blob: a4715735c3c442c8418ce9cc1e612b43f705a1bf [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Michal Simek6fc7c452011-10-06 20:35:35 +00002/*
Ashok Reddy Soma645aa762021-06-24 00:34:41 -06003 * Copyright (C) 2021 Waymo LLC
Michal Simek6fc7c452011-10-06 20:35:35 +00004 * Copyright (C) 2011 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2011 PetaLogix
6 * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
Michal Simek6fc7c452011-10-06 20:35:35 +00007 */
8
9#include <config.h>
10#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070011#include <cpu_func.h>
Michal Simek682baac2015-12-08 15:44:41 +010012#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Michal Simek6fc7c452011-10-06 20:35:35 +000014#include <net.h>
15#include <malloc.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060016#include <asm/global_data.h>
Michal Simek6fc7c452011-10-06 20:35:35 +000017#include <asm/io.h>
18#include <phy.h>
19#include <miiphy.h>
Siva Durga Prasad Paladugu20633622017-01-06 16:18:50 +053020#include <wait_bit.h>
Simon Glassdbd79542020-05-10 11:40:11 -060021#include <linux/delay.h>
Michal Simek6fc7c452011-10-06 20:35:35 +000022
Michal Simek682baac2015-12-08 15:44:41 +010023DECLARE_GLOBAL_DATA_PTR;
24
Michal Simek6fc7c452011-10-06 20:35:35 +000025/* Link setup */
26#define XAE_EMMC_LINKSPEED_MASK 0xC0000000 /* Link speed */
27#define XAE_EMMC_LINKSPD_10 0x00000000 /* Link Speed mask for 10 Mbit */
28#define XAE_EMMC_LINKSPD_100 0x40000000 /* Link Speed mask for 100 Mbit */
29#define XAE_EMMC_LINKSPD_1000 0x80000000 /* Link Speed mask for 1000 Mbit */
30
31/* Interrupt Status/Enable/Mask Registers bit definitions */
32#define XAE_INT_RXRJECT_MASK 0x00000008 /* Rx frame rejected */
33#define XAE_INT_MGTRDY_MASK 0x00000080 /* MGT clock Lock */
34
35/* Receive Configuration Word 1 (RCW1) Register bit definitions */
36#define XAE_RCW1_RX_MASK 0x10000000 /* Receiver enable */
37
38/* Transmitter Configuration (TC) Register bit definitions */
39#define XAE_TC_TX_MASK 0x10000000 /* Transmitter enable */
40
41#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
42
43/* MDIO Management Configuration (MC) Register bit definitions */
44#define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable*/
45
46/* MDIO Management Control Register (MCR) Register bit definitions */
47#define XAE_MDIO_MCR_PHYAD_MASK 0x1F000000 /* Phy Address Mask */
48#define XAE_MDIO_MCR_PHYAD_SHIFT 24 /* Phy Address Shift */
49#define XAE_MDIO_MCR_REGAD_MASK 0x001F0000 /* Reg Address Mask */
50#define XAE_MDIO_MCR_REGAD_SHIFT 16 /* Reg Address Shift */
51#define XAE_MDIO_MCR_OP_READ_MASK 0x00008000 /* Op Code Read Mask */
52#define XAE_MDIO_MCR_OP_WRITE_MASK 0x00004000 /* Op Code Write Mask */
53#define XAE_MDIO_MCR_INITIATE_MASK 0x00000800 /* Ready Mask */
54#define XAE_MDIO_MCR_READY_MASK 0x00000080 /* Ready Mask */
55
56#define XAE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
57
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +053058#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
59
Michal Simek6fc7c452011-10-06 20:35:35 +000060/* DMA macros */
61/* Bitmasks of XAXIDMA_CR_OFFSET register */
62#define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */
63#define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
64
65/* Bitmasks of XAXIDMA_SR_OFFSET register */
66#define XAXIDMA_HALTED_MASK 0x00000001 /* DMA channel halted */
67
68/* Bitmask for interrupts */
69#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
70#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
71#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
72
73/* Bitmasks of XAXIDMA_BD_CTRL_OFFSET register */
74#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
75#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
76
Ashok Reddy Soma645aa762021-06-24 00:34:41 -060077/* Bitmasks for XXV Ethernet MAC */
78#define XXV_TC_TX_MASK 0x00000001
79#define XXV_TC_FCS_MASK 0x00000002
80#define XXV_RCW1_RX_MASK 0x00000001
81#define XXV_RCW1_FCS_MASK 0x00000002
82
83#define DMAALIGN 128
84#define XXV_MIN_PKT_SIZE 60
Michal Simek6fc7c452011-10-06 20:35:35 +000085
86static u8 rxframe[PKTSIZE_ALIGN] __attribute((aligned(DMAALIGN)));
Ashok Reddy Soma645aa762021-06-24 00:34:41 -060087static u8 txminframe[XXV_MIN_PKT_SIZE] __attribute((aligned(DMAALIGN)));
88
89enum emac_variant {
90 EMAC_1G = 0,
91 EMAC_10G_25G = 1,
92};
Michal Simek6fc7c452011-10-06 20:35:35 +000093
94/* Reflect dma offsets */
95struct axidma_reg {
96 u32 control; /* DMACR */
97 u32 status; /* DMASR */
Vipul Kumar3b06a0f2018-01-23 14:52:35 +053098 u32 current; /* CURDESC low 32 bit */
99 u32 current_hi; /* CURDESC high 32 bit */
100 u32 tail; /* TAILDESC low 32 bit */
101 u32 tail_hi; /* TAILDESC high 32 bit */
Michal Simek6fc7c452011-10-06 20:35:35 +0000102};
103
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600104/* Platform data structures */
105struct axidma_plat {
106 struct eth_pdata eth_pdata;
107 struct axidma_reg *dmatx;
108 struct axidma_reg *dmarx;
109 int phyaddr;
110 u8 eth_hasnobuf;
111 int phy_of_handle;
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600112 enum emac_variant mactype;
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600113};
114
Michal Simek6fc7c452011-10-06 20:35:35 +0000115/* Private driver structures */
116struct axidma_priv {
117 struct axidma_reg *dmatx;
118 struct axidma_reg *dmarx;
119 int phyaddr;
Michal Simek6cb55e72015-12-09 14:39:42 +0100120 struct axi_regs *iobase;
Michal Simek682baac2015-12-08 15:44:41 +0100121 phy_interface_t interface;
Michal Simek6fc7c452011-10-06 20:35:35 +0000122 struct phy_device *phydev;
123 struct mii_dev *bus;
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530124 u8 eth_hasnobuf;
Siva Durga Prasad Paladuguc3d94f62019-03-15 17:46:45 +0530125 int phy_of_handle;
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600126 enum emac_variant mactype;
Michal Simek6fc7c452011-10-06 20:35:35 +0000127};
128
129/* BD descriptors */
130struct axidma_bd {
Ashok Reddy Soma91d455a2020-09-03 08:36:43 -0600131 u32 next_desc; /* Next descriptor pointer */
132 u32 next_desc_msb;
133 u32 buf_addr; /* Buffer address */
134 u32 buf_addr_msb;
Michal Simek6fc7c452011-10-06 20:35:35 +0000135 u32 reserved3;
136 u32 reserved4;
137 u32 cntrl; /* Control */
138 u32 status; /* Status */
139 u32 app0;
140 u32 app1; /* TX start << 16 | insert */
141 u32 app2; /* TX csum seed */
142 u32 app3;
143 u32 app4;
144 u32 sw_id_offset;
145 u32 reserved5;
146 u32 reserved6;
147};
148
149/* Static BDs - driver uses only one BD */
150static struct axidma_bd tx_bd __attribute((aligned(DMAALIGN)));
151static struct axidma_bd rx_bd __attribute((aligned(DMAALIGN)));
152
153struct axi_regs {
154 u32 reserved[3];
155 u32 is; /* 0xC: Interrupt status */
156 u32 reserved2;
157 u32 ie; /* 0x14: Interrupt enable */
158 u32 reserved3[251];
159 u32 rcw1; /* 0x404: Rx Configuration Word 1 */
160 u32 tc; /* 0x408: Tx Configuration */
161 u32 reserved4;
162 u32 emmc; /* 0x410: EMAC mode configuration */
163 u32 reserved5[59];
164 u32 mdio_mc; /* 0x500: MII Management Config */
165 u32 mdio_mcr; /* 0x504: MII Management Control */
166 u32 mdio_mwd; /* 0x508: MII Management Write Data */
167 u32 mdio_mrd; /* 0x50C: MII Management Read Data */
168 u32 reserved6[124];
169 u32 uaw0; /* 0x700: Unicast address word 0 */
170 u32 uaw1; /* 0x704: Unicast address word 1 */
171};
172
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600173struct xxv_axi_regs {
174 u32 gt_reset; /* 0x0 */
175 u32 reserved[2];
176 u32 tc; /* 0xC: Tx Configuration */
177 u32 reserved2;
178 u32 rcw1; /* 0x14: Rx Configuration Word 1 */
179};
180
Michal Simek6fc7c452011-10-06 20:35:35 +0000181/* Use MII register 1 (MII status register) to detect PHY */
182#define PHY_DETECT_REG 1
183
184/*
185 * Mask used to verify certain PHY features (or register contents)
186 * in the register above:
187 * 0x1000: 10Mbps full duplex support
188 * 0x0800: 10Mbps half duplex support
189 * 0x0008: Auto-negotiation support
190 */
191#define PHY_DETECT_MASK 0x1808
192
Michal Simekf5221872015-12-09 14:36:31 +0100193static inline int mdio_wait(struct axi_regs *regs)
Michal Simek6fc7c452011-10-06 20:35:35 +0000194{
Michal Simek6fc7c452011-10-06 20:35:35 +0000195 u32 timeout = 200;
196
197 /* Wait till MDIO interface is ready to accept a new transaction. */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530198 while (timeout && (!(readl(&regs->mdio_mcr)
Michal Simek6fc7c452011-10-06 20:35:35 +0000199 & XAE_MDIO_MCR_READY_MASK))) {
200 timeout--;
201 udelay(1);
202 }
203 if (!timeout) {
204 printf("%s: Timeout\n", __func__);
205 return 1;
206 }
207 return 0;
208}
209
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530210/**
211 * axienet_dma_write - Memory mapped Axi DMA register Buffer Descriptor write.
212 * @bd: pointer to BD descriptor structure
213 * @desc: Address offset of DMA descriptors
214 *
215 * This function writes the value into the corresponding Axi DMA register.
216 */
217static inline void axienet_dma_write(struct axidma_bd *bd, u32 *desc)
218{
219#if defined(CONFIG_PHYS_64BIT)
Ashok Reddy Soma91d455a2020-09-03 08:36:43 -0600220 writeq((unsigned long)bd, desc);
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530221#else
222 writel((u32)bd, desc);
223#endif
224}
225
Michal Simek41beca12015-12-09 14:44:38 +0100226static u32 phyread(struct axidma_priv *priv, u32 phyaddress, u32 registernum,
227 u16 *val)
Michal Simek6fc7c452011-10-06 20:35:35 +0000228{
Michal Simek41beca12015-12-09 14:44:38 +0100229 struct axi_regs *regs = priv->iobase;
Michal Simek6fc7c452011-10-06 20:35:35 +0000230 u32 mdioctrlreg = 0;
231
Michal Simekf5221872015-12-09 14:36:31 +0100232 if (mdio_wait(regs))
Michal Simek6fc7c452011-10-06 20:35:35 +0000233 return 1;
234
235 mdioctrlreg = ((phyaddress << XAE_MDIO_MCR_PHYAD_SHIFT) &
236 XAE_MDIO_MCR_PHYAD_MASK) |
237 ((registernum << XAE_MDIO_MCR_REGAD_SHIFT)
238 & XAE_MDIO_MCR_REGAD_MASK) |
239 XAE_MDIO_MCR_INITIATE_MASK |
240 XAE_MDIO_MCR_OP_READ_MASK;
241
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530242 writel(mdioctrlreg, &regs->mdio_mcr);
Michal Simek6fc7c452011-10-06 20:35:35 +0000243
Michal Simekf5221872015-12-09 14:36:31 +0100244 if (mdio_wait(regs))
Michal Simek6fc7c452011-10-06 20:35:35 +0000245 return 1;
246
247 /* Read data */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530248 *val = readl(&regs->mdio_mrd);
Michal Simek6fc7c452011-10-06 20:35:35 +0000249 return 0;
250}
251
Michal Simek41beca12015-12-09 14:44:38 +0100252static u32 phywrite(struct axidma_priv *priv, u32 phyaddress, u32 registernum,
253 u32 data)
Michal Simek6fc7c452011-10-06 20:35:35 +0000254{
Michal Simek41beca12015-12-09 14:44:38 +0100255 struct axi_regs *regs = priv->iobase;
Michal Simek6fc7c452011-10-06 20:35:35 +0000256 u32 mdioctrlreg = 0;
257
Michal Simekf5221872015-12-09 14:36:31 +0100258 if (mdio_wait(regs))
Michal Simek6fc7c452011-10-06 20:35:35 +0000259 return 1;
260
261 mdioctrlreg = ((phyaddress << XAE_MDIO_MCR_PHYAD_SHIFT) &
262 XAE_MDIO_MCR_PHYAD_MASK) |
263 ((registernum << XAE_MDIO_MCR_REGAD_SHIFT)
264 & XAE_MDIO_MCR_REGAD_MASK) |
265 XAE_MDIO_MCR_INITIATE_MASK |
266 XAE_MDIO_MCR_OP_WRITE_MASK;
267
268 /* Write data */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530269 writel(data, &regs->mdio_mwd);
Michal Simek6fc7c452011-10-06 20:35:35 +0000270
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530271 writel(mdioctrlreg, &regs->mdio_mcr);
Michal Simek6fc7c452011-10-06 20:35:35 +0000272
Michal Simekf5221872015-12-09 14:36:31 +0100273 if (mdio_wait(regs))
Michal Simek6fc7c452011-10-06 20:35:35 +0000274 return 1;
275
276 return 0;
277}
278
Michal Simek0b13ee22015-12-08 16:10:05 +0100279static int axiemac_phy_init(struct udevice *dev)
Michal Simek6fc7c452011-10-06 20:35:35 +0000280{
281 u16 phyreg;
Patrick van Gelder70ab9432020-06-03 14:18:04 +0200282 int i;
283 u32 ret;
Michal Simek682baac2015-12-08 15:44:41 +0100284 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6cb55e72015-12-09 14:39:42 +0100285 struct axi_regs *regs = priv->iobase;
Michal Simek6fc7c452011-10-06 20:35:35 +0000286 struct phy_device *phydev;
287
288 u32 supported = SUPPORTED_10baseT_Half |
289 SUPPORTED_10baseT_Full |
290 SUPPORTED_100baseT_Half |
291 SUPPORTED_100baseT_Full |
292 SUPPORTED_1000baseT_Half |
293 SUPPORTED_1000baseT_Full;
294
Michal Simek0b13ee22015-12-08 16:10:05 +0100295 /* Set default MDIO divisor */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530296 writel(XAE_MDIO_DIV_DFT | XAE_MDIO_MC_MDIOEN_MASK, &regs->mdio_mc);
Michal Simek0b13ee22015-12-08 16:10:05 +0100297
Michal Simek6fc7c452011-10-06 20:35:35 +0000298 if (priv->phyaddr == -1) {
299 /* Detect the PHY address */
300 for (i = 31; i >= 0; i--) {
Michal Simek41beca12015-12-09 14:44:38 +0100301 ret = phyread(priv, i, PHY_DETECT_REG, &phyreg);
Michal Simek6fc7c452011-10-06 20:35:35 +0000302 if (!ret && (phyreg != 0xFFFF) &&
303 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
304 /* Found a valid PHY address */
305 priv->phyaddr = i;
306 debug("axiemac: Found valid phy address, %x\n",
Michal Simek2f1e0652015-12-09 10:54:53 +0100307 i);
Michal Simek6fc7c452011-10-06 20:35:35 +0000308 break;
309 }
310 }
311 }
312
313 /* Interface - look at tsec */
Siva Durga Prasad Paladugu49348512016-02-21 15:46:14 +0530314 phydev = phy_connect(priv->bus, priv->phyaddr, dev, priv->interface);
Michal Simek6fc7c452011-10-06 20:35:35 +0000315
316 phydev->supported &= supported;
317 phydev->advertising = phydev->supported;
318 priv->phydev = phydev;
Siva Durga Prasad Paladuguc3d94f62019-03-15 17:46:45 +0530319 if (priv->phy_of_handle)
320 priv->phydev->node = offset_to_ofnode(priv->phy_of_handle);
Michal Simek6fc7c452011-10-06 20:35:35 +0000321 phy_config(phydev);
Michal Simek0b13ee22015-12-08 16:10:05 +0100322
323 return 0;
324}
325
326/* Setting axi emac and phy to proper setting */
327static int setup_phy(struct udevice *dev)
328{
Siva Durga Prasad Paladugua1c6ed82016-02-21 15:46:15 +0530329 u16 temp;
330 u32 speed, emmc_reg, ret;
Michal Simek0b13ee22015-12-08 16:10:05 +0100331 struct axidma_priv *priv = dev_get_priv(dev);
332 struct axi_regs *regs = priv->iobase;
333 struct phy_device *phydev = priv->phydev;
334
Siva Durga Prasad Paladugua1c6ed82016-02-21 15:46:15 +0530335 if (priv->interface == PHY_INTERFACE_MODE_SGMII) {
336 /*
337 * In SGMII cases the isolate bit might set
338 * after DMA and ethernet resets and hence
339 * check and clear if set.
340 */
341 ret = phyread(priv, priv->phyaddr, MII_BMCR, &temp);
342 if (ret)
343 return 0;
344 if (temp & BMCR_ISOLATE) {
345 temp &= ~BMCR_ISOLATE;
346 ret = phywrite(priv, priv->phyaddr, MII_BMCR, temp);
347 if (ret)
348 return 0;
349 }
350 }
351
Timur Tabi42387462012-07-09 08:52:43 +0000352 if (phy_startup(phydev)) {
353 printf("axiemac: could not initialize PHY %s\n",
354 phydev->dev->name);
355 return 0;
356 }
Michal Simek5848f132013-11-21 16:15:51 +0100357 if (!phydev->link) {
358 printf("%s: No link.\n", phydev->dev->name);
359 return 0;
360 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000361
362 switch (phydev->speed) {
363 case 1000:
364 speed = XAE_EMMC_LINKSPD_1000;
365 break;
366 case 100:
367 speed = XAE_EMMC_LINKSPD_100;
368 break;
369 case 10:
370 speed = XAE_EMMC_LINKSPD_10;
371 break;
372 default:
373 return 0;
374 }
375
376 /* Setup the emac for the phy speed */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530377 emmc_reg = readl(&regs->emmc);
Michal Simek6fc7c452011-10-06 20:35:35 +0000378 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
379 emmc_reg |= speed;
380
381 /* Write new speed setting out to Axi Ethernet */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530382 writel(emmc_reg, &regs->emmc);
Michal Simek6fc7c452011-10-06 20:35:35 +0000383
384 /*
385 * Setting the operating speed of the MAC needs a delay. There
386 * doesn't seem to be register to poll, so please consider this
387 * during your application design.
388 */
389 udelay(1);
390
391 return 1;
392}
393
394/* STOP DMA transfers */
Michal Simek8fbf79f2015-12-16 09:18:12 +0100395static void axiemac_stop(struct udevice *dev)
Michal Simek6fc7c452011-10-06 20:35:35 +0000396{
Michal Simek682baac2015-12-08 15:44:41 +0100397 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000398 u32 temp;
399
400 /* Stop the hardware */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530401 temp = readl(&priv->dmatx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000402 temp &= ~XAXIDMA_CR_RUNSTOP_MASK;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530403 writel(temp, &priv->dmatx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000404
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530405 temp = readl(&priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000406 temp &= ~XAXIDMA_CR_RUNSTOP_MASK;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530407 writel(temp, &priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000408
409 debug("axiemac: Halted\n");
410}
411
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600412static int xxv_axi_ethernet_init(struct axidma_priv *priv)
413{
414 struct xxv_axi_regs *regs = (struct xxv_axi_regs *)priv->iobase;
415
416 writel(readl(&regs->rcw1) | XXV_RCW1_FCS_MASK, &regs->rcw1);
417 writel(readl(&regs->tc) | XXV_TC_FCS_MASK, &regs->tc);
418 writel(readl(&regs->tc) | XXV_TC_TX_MASK, &regs->tc);
419 writel(readl(&regs->rcw1) | XXV_RCW1_RX_MASK, &regs->rcw1);
420
421 return 0;
422}
423
Michal Simek638c0ef2015-12-09 14:53:51 +0100424static int axi_ethernet_init(struct axidma_priv *priv)
Michal Simek6fc7c452011-10-06 20:35:35 +0000425{
Michal Simek638c0ef2015-12-09 14:53:51 +0100426 struct axi_regs *regs = priv->iobase;
Siva Durga Prasad Paladugu20633622017-01-06 16:18:50 +0530427 int err;
Michal Simek6fc7c452011-10-06 20:35:35 +0000428
429 /*
430 * Check the status of the MgtRdy bit in the interrupt status
431 * registers. This must be done to allow the MGT clock to become stable
432 * for the Sgmii and 1000BaseX PHY interfaces. No other register reads
433 * will be valid until this bit is valid.
434 * The bit is always a 1 for all other PHY interfaces.
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530435 * Interrupt status and enable registers are not available in non
436 * processor mode and hence bypass in this mode
Michal Simek6fc7c452011-10-06 20:35:35 +0000437 */
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530438 if (!priv->eth_hasnobuf) {
Álvaro Fernåndez Rojas918de032018-01-23 17:14:55 +0100439 err = wait_for_bit_le32(&regs->is, XAE_INT_MGTRDY_MASK,
440 true, 200, false);
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530441 if (err) {
442 printf("%s: Timeout\n", __func__);
443 return 1;
444 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000445
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530446 /*
447 * Stop the device and reset HW
448 * Disable interrupts
449 */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530450 writel(0, &regs->ie);
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530451 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000452
453 /* Disable the receiver */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530454 writel(readl(&regs->rcw1) & ~XAE_RCW1_RX_MASK, &regs->rcw1);
Michal Simek6fc7c452011-10-06 20:35:35 +0000455
456 /*
457 * Stopping the receiver in mid-packet causes a dropped packet
458 * indication from HW. Clear it.
459 */
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530460 if (!priv->eth_hasnobuf) {
461 /* Set the interrupt status register to clear the interrupt */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530462 writel(XAE_INT_RXRJECT_MASK, &regs->is);
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530463 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000464
465 /* Setup HW */
466 /* Set default MDIO divisor */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530467 writel(XAE_MDIO_DIV_DFT | XAE_MDIO_MC_MDIOEN_MASK, &regs->mdio_mc);
Michal Simek6fc7c452011-10-06 20:35:35 +0000468
469 debug("axiemac: InitHw done\n");
470 return 0;
471}
472
Michal Simek8fbf79f2015-12-16 09:18:12 +0100473static int axiemac_write_hwaddr(struct udevice *dev)
Michal Simek6fc7c452011-10-06 20:35:35 +0000474{
Simon Glassfa20e932020-12-03 16:55:20 -0700475 struct eth_pdata *pdata = dev_get_plat(dev);
Michal Simek682baac2015-12-08 15:44:41 +0100476 struct axidma_priv *priv = dev_get_priv(dev);
477 struct axi_regs *regs = priv->iobase;
Michal Simek6fc7c452011-10-06 20:35:35 +0000478
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600479 if (priv->mactype != EMAC_1G)
480 return 0;
481
Michal Simek6fc7c452011-10-06 20:35:35 +0000482 /* Set the MAC address */
Michal Simek682baac2015-12-08 15:44:41 +0100483 int val = ((pdata->enetaddr[3] << 24) | (pdata->enetaddr[2] << 16) |
484 (pdata->enetaddr[1] << 8) | (pdata->enetaddr[0]));
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530485 writel(val, &regs->uaw0);
Michal Simek6fc7c452011-10-06 20:35:35 +0000486
Michal Simek682baac2015-12-08 15:44:41 +0100487 val = (pdata->enetaddr[5] << 8) | pdata->enetaddr[4];
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530488 val |= readl(&regs->uaw1) & ~XAE_UAW1_UNICASTADDR_MASK;
489 writel(val, &regs->uaw1);
Michal Simek6fc7c452011-10-06 20:35:35 +0000490 return 0;
491}
492
493/* Reset DMA engine */
Michal Simek638c0ef2015-12-09 14:53:51 +0100494static void axi_dma_init(struct axidma_priv *priv)
Michal Simek6fc7c452011-10-06 20:35:35 +0000495{
Michal Simek6fc7c452011-10-06 20:35:35 +0000496 u32 timeout = 500;
497
498 /* Reset the engine so the hardware starts from a known state */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530499 writel(XAXIDMA_CR_RESET_MASK, &priv->dmatx->control);
500 writel(XAXIDMA_CR_RESET_MASK, &priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000501
502 /* At the initialization time, hardware should finish reset quickly */
503 while (timeout--) {
504 /* Check transmit/receive channel */
505 /* Reset is done when the reset bit is low */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530506 if (!((readl(&priv->dmatx->control) |
507 readl(&priv->dmarx->control))
Michal Simek5aa45392015-10-28 11:00:47 +0100508 & XAXIDMA_CR_RESET_MASK)) {
Michal Simek6fc7c452011-10-06 20:35:35 +0000509 break;
510 }
511 }
512 if (!timeout)
513 printf("%s: Timeout\n", __func__);
514}
515
Michal Simek8fbf79f2015-12-16 09:18:12 +0100516static int axiemac_start(struct udevice *dev)
Michal Simek6fc7c452011-10-06 20:35:35 +0000517{
Michal Simek682baac2015-12-08 15:44:41 +0100518 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000519 u32 temp;
520
521 debug("axiemac: Init started\n");
522 /*
523 * Initialize AXIDMA engine. AXIDMA engine must be initialized before
524 * AxiEthernet. During AXIDMA engine initialization, AXIDMA hardware is
525 * reset, and since AXIDMA reset line is connected to AxiEthernet, this
526 * would ensure a reset of AxiEthernet.
527 */
Michal Simek638c0ef2015-12-09 14:53:51 +0100528 axi_dma_init(priv);
Michal Simek6fc7c452011-10-06 20:35:35 +0000529
530 /* Initialize AxiEthernet hardware. */
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600531 if (priv->mactype == EMAC_1G) {
532 if (axi_ethernet_init(priv))
533 return -1;
534 } else {
535 if (xxv_axi_ethernet_init(priv))
536 return -1;
537 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000538
539 /* Disable all RX interrupts before RxBD space setup */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530540 temp = readl(&priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000541 temp &= ~XAXIDMA_IRQ_ALL_MASK;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530542 writel(temp, &priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000543
544 /* Start DMA RX channel. Now it's ready to receive data.*/
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530545 axienet_dma_write(&rx_bd, &priv->dmarx->current);
Michal Simek6fc7c452011-10-06 20:35:35 +0000546
547 /* Setup the BD. */
548 memset(&rx_bd, 0, sizeof(rx_bd));
Ashok Reddy Soma91d455a2020-09-03 08:36:43 -0600549 rx_bd.next_desc = lower_32_bits((unsigned long)&rx_bd);
550 rx_bd.buf_addr = lower_32_bits((unsigned long)&rxframe);
551#if defined(CONFIG_PHYS_64BIT)
552 rx_bd.next_desc_msb = upper_32_bits((unsigned long)&rx_bd);
553 rx_bd.buf_addr_msb = upper_32_bits((unsigned long)&rxframe);
554#endif
Michal Simek6fc7c452011-10-06 20:35:35 +0000555 rx_bd.cntrl = sizeof(rxframe);
556 /* Flush the last BD so DMA core could see the updates */
Ashok Reddy Somaf29a5702020-09-03 08:36:44 -0600557 flush_cache((phys_addr_t)&rx_bd, sizeof(rx_bd));
Michal Simek6fc7c452011-10-06 20:35:35 +0000558
559 /* It is necessary to flush rxframe because if you don't do it
560 * then cache can contain uninitialized data */
Ashok Reddy Somaf29a5702020-09-03 08:36:44 -0600561 flush_cache((phys_addr_t)&rxframe, sizeof(rxframe));
Michal Simek6fc7c452011-10-06 20:35:35 +0000562
563 /* Start the hardware */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530564 temp = readl(&priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000565 temp |= XAXIDMA_CR_RUNSTOP_MASK;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530566 writel(temp, &priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000567
568 /* Rx BD is ready - start */
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530569 axienet_dma_write(&rx_bd, &priv->dmarx->tail);
Michal Simek6fc7c452011-10-06 20:35:35 +0000570
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600571 if (priv->mactype == EMAC_1G) {
572 struct axi_regs *regs = priv->iobase;
573 /* Enable TX */
574 writel(XAE_TC_TX_MASK, &regs->tc);
575 /* Enable RX */
576 writel(XAE_RCW1_RX_MASK, &regs->rcw1);
Michal Simek6fc7c452011-10-06 20:35:35 +0000577
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600578 /* PHY setup */
579 if (!setup_phy(dev)) {
580 axiemac_stop(dev);
581 return -1;
582 }
583 } else {
584 struct xxv_axi_regs *regs = (struct xxv_axi_regs *)priv->iobase;
585 /* Enable TX */
586 writel(readl(&regs->tc) | XXV_TC_TX_MASK, &regs->tc);
587
588 /* Enable RX */
589 writel(readl(&regs->rcw1) | XXV_RCW1_RX_MASK, &regs->rcw1);
Michal Simek6fc7c452011-10-06 20:35:35 +0000590 }
591
592 debug("axiemac: Init complete\n");
593 return 0;
594}
595
Michal Simek682baac2015-12-08 15:44:41 +0100596static int axiemac_send(struct udevice *dev, void *ptr, int len)
Michal Simek6fc7c452011-10-06 20:35:35 +0000597{
Michal Simek682baac2015-12-08 15:44:41 +0100598 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000599 u32 timeout;
600
601 if (len > PKTSIZE_ALIGN)
602 len = PKTSIZE_ALIGN;
603
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600604 /* If size is less than min packet size, pad to min size */
605 if (priv->mactype == EMAC_10G_25G && len < XXV_MIN_PKT_SIZE) {
606 memset(txminframe, 0, XXV_MIN_PKT_SIZE);
607 memcpy(txminframe, ptr, len);
608 len = XXV_MIN_PKT_SIZE;
609 ptr = txminframe;
610 }
611
Michal Simek6fc7c452011-10-06 20:35:35 +0000612 /* Flush packet to main memory to be trasfered by DMA */
Ashok Reddy Somaf29a5702020-09-03 08:36:44 -0600613 flush_cache((phys_addr_t)ptr, len);
Michal Simek6fc7c452011-10-06 20:35:35 +0000614
615 /* Setup Tx BD */
616 memset(&tx_bd, 0, sizeof(tx_bd));
617 /* At the end of the ring, link the last BD back to the top */
Ashok Reddy Soma91d455a2020-09-03 08:36:43 -0600618 tx_bd.next_desc = lower_32_bits((unsigned long)&tx_bd);
619 tx_bd.buf_addr = lower_32_bits((unsigned long)ptr);
620#if defined(CONFIG_PHYS_64BIT)
621 tx_bd.next_desc_msb = upper_32_bits((unsigned long)&tx_bd);
622 tx_bd.buf_addr_msb = upper_32_bits((unsigned long)ptr);
623#endif
Michal Simek6fc7c452011-10-06 20:35:35 +0000624 /* Save len */
625 tx_bd.cntrl = len | XAXIDMA_BD_CTRL_TXSOF_MASK |
626 XAXIDMA_BD_CTRL_TXEOF_MASK;
627
628 /* Flush the last BD so DMA core could see the updates */
Ashok Reddy Somaf29a5702020-09-03 08:36:44 -0600629 flush_cache((phys_addr_t)&tx_bd, sizeof(tx_bd));
Michal Simek6fc7c452011-10-06 20:35:35 +0000630
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530631 if (readl(&priv->dmatx->status) & XAXIDMA_HALTED_MASK) {
Michal Simek6fc7c452011-10-06 20:35:35 +0000632 u32 temp;
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530633 axienet_dma_write(&tx_bd, &priv->dmatx->current);
Michal Simek6fc7c452011-10-06 20:35:35 +0000634 /* Start the hardware */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530635 temp = readl(&priv->dmatx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000636 temp |= XAXIDMA_CR_RUNSTOP_MASK;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530637 writel(temp, &priv->dmatx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000638 }
639
640 /* Start transfer */
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530641 axienet_dma_write(&tx_bd, &priv->dmatx->tail);
Michal Simek6fc7c452011-10-06 20:35:35 +0000642
643 /* Wait for transmission to complete */
644 debug("axiemac: Waiting for tx to be done\n");
645 timeout = 200;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530646 while (timeout && (!(readl(&priv->dmatx->status) &
Michal Simek5aa45392015-10-28 11:00:47 +0100647 (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)))) {
Michal Simek6fc7c452011-10-06 20:35:35 +0000648 timeout--;
649 udelay(1);
650 }
651 if (!timeout) {
652 printf("%s: Timeout\n", __func__);
653 return 1;
654 }
655
656 debug("axiemac: Sending complete\n");
657 return 0;
658}
659
Michal Simek638c0ef2015-12-09 14:53:51 +0100660static int isrxready(struct axidma_priv *priv)
Michal Simek6fc7c452011-10-06 20:35:35 +0000661{
662 u32 status;
Michal Simek6fc7c452011-10-06 20:35:35 +0000663
664 /* Read pending interrupts */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530665 status = readl(&priv->dmarx->status);
Michal Simek6fc7c452011-10-06 20:35:35 +0000666
667 /* Acknowledge pending interrupts */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530668 writel(status & XAXIDMA_IRQ_ALL_MASK, &priv->dmarx->status);
Michal Simek6fc7c452011-10-06 20:35:35 +0000669
670 /*
671 * If Reception done interrupt is asserted, call RX call back function
672 * to handle the processed BDs and then raise the according flag.
673 */
674 if ((status & (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)))
675 return 1;
676
677 return 0;
678}
679
Michal Simek682baac2015-12-08 15:44:41 +0100680static int axiemac_recv(struct udevice *dev, int flags, uchar **packetp)
Michal Simek6fc7c452011-10-06 20:35:35 +0000681{
682 u32 length;
Michal Simek682baac2015-12-08 15:44:41 +0100683 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000684 u32 temp;
685
686 /* Wait for an incoming packet */
Michal Simek638c0ef2015-12-09 14:53:51 +0100687 if (!isrxready(priv))
Michal Simek682baac2015-12-08 15:44:41 +0100688 return -1;
Michal Simek6fc7c452011-10-06 20:35:35 +0000689
690 debug("axiemac: RX data ready\n");
691
692 /* Disable IRQ for a moment till packet is handled */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530693 temp = readl(&priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000694 temp &= ~XAXIDMA_IRQ_ALL_MASK;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530695 writel(temp, &priv->dmarx->control);
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600696 if (!priv->eth_hasnobuf && priv->mactype == EMAC_1G)
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530697 length = rx_bd.app4 & 0xFFFF; /* max length mask */
698 else
699 length = rx_bd.status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
Michal Simek6fc7c452011-10-06 20:35:35 +0000700
Michal Simek6fc7c452011-10-06 20:35:35 +0000701#ifdef DEBUG
702 print_buffer(&rxframe, &rxframe[0], 1, length, 16);
703#endif
Michal Simek0305be82015-12-09 14:13:23 +0100704
705 *packetp = rxframe;
706 return length;
707}
708
709static int axiemac_free_pkt(struct udevice *dev, uchar *packet, int length)
710{
711 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000712
713#ifdef DEBUG
714 /* It is useful to clear buffer to be sure that it is consistent */
715 memset(rxframe, 0, sizeof(rxframe));
716#endif
717 /* Setup RxBD */
718 /* Clear the whole buffer and setup it again - all flags are cleared */
719 memset(&rx_bd, 0, sizeof(rx_bd));
Ashok Reddy Soma91d455a2020-09-03 08:36:43 -0600720 rx_bd.next_desc = lower_32_bits((unsigned long)&rx_bd);
721 rx_bd.buf_addr = lower_32_bits((unsigned long)&rxframe);
722#if defined(CONFIG_PHYS_64BIT)
723 rx_bd.next_desc_msb = upper_32_bits((unsigned long)&rx_bd);
724 rx_bd.buf_addr_msb = upper_32_bits((unsigned long)&rxframe);
725#endif
Michal Simek6fc7c452011-10-06 20:35:35 +0000726 rx_bd.cntrl = sizeof(rxframe);
727
728 /* Write bd to HW */
Ashok Reddy Somaf29a5702020-09-03 08:36:44 -0600729 flush_cache((phys_addr_t)&rx_bd, sizeof(rx_bd));
Michal Simek6fc7c452011-10-06 20:35:35 +0000730
731 /* It is necessary to flush rxframe because if you don't do it
732 * then cache will contain previous packet */
Ashok Reddy Somaf29a5702020-09-03 08:36:44 -0600733 flush_cache((phys_addr_t)&rxframe, sizeof(rxframe));
Michal Simek6fc7c452011-10-06 20:35:35 +0000734
735 /* Rx BD is ready - start again */
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530736 axienet_dma_write(&rx_bd, &priv->dmarx->tail);
Michal Simek6fc7c452011-10-06 20:35:35 +0000737
738 debug("axiemac: RX completed, framelength = %d\n", length);
739
Michal Simek682baac2015-12-08 15:44:41 +0100740 return 0;
Michal Simek6fc7c452011-10-06 20:35:35 +0000741}
742
Michal Simek682baac2015-12-08 15:44:41 +0100743static int axiemac_miiphy_read(struct mii_dev *bus, int addr,
744 int devad, int reg)
Michal Simek6fc7c452011-10-06 20:35:35 +0000745{
Michal Simek682baac2015-12-08 15:44:41 +0100746 int ret;
747 u16 value;
Michal Simek6fc7c452011-10-06 20:35:35 +0000748
Michal Simek682baac2015-12-08 15:44:41 +0100749 ret = phyread(bus->priv, addr, reg, &value);
750 debug("axiemac: Read MII 0x%x, 0x%x, 0x%x, %d\n", addr, reg,
751 value, ret);
752 return value;
Michal Simek6fc7c452011-10-06 20:35:35 +0000753}
754
Michal Simek682baac2015-12-08 15:44:41 +0100755static int axiemac_miiphy_write(struct mii_dev *bus, int addr, int devad,
756 int reg, u16 value)
Michal Simek6fc7c452011-10-06 20:35:35 +0000757{
Michal Simek682baac2015-12-08 15:44:41 +0100758 debug("axiemac: Write MII 0x%x, 0x%x, 0x%x\n", addr, reg, value);
759 return phywrite(bus->priv, addr, reg, value);
Michal Simek6fc7c452011-10-06 20:35:35 +0000760}
761
Michal Simek682baac2015-12-08 15:44:41 +0100762static int axi_emac_probe(struct udevice *dev)
Michal Simek6fc7c452011-10-06 20:35:35 +0000763{
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600764 struct axidma_plat *plat = dev_get_plat(dev);
765 struct eth_pdata *pdata = &plat->eth_pdata;
Michal Simek682baac2015-12-08 15:44:41 +0100766 struct axidma_priv *priv = dev_get_priv(dev);
767 int ret;
768
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600769 priv->iobase = (struct axi_regs *)pdata->iobase;
770 priv->dmatx = plat->dmatx;
771 /* RX channel offset is 0x30 */
772 priv->dmarx = (struct axidma_reg *)((phys_addr_t)priv->dmatx + 0x30);
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600773 priv->mactype = plat->mactype;
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600774
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600775 if (priv->mactype == EMAC_1G) {
776 priv->eth_hasnobuf = plat->eth_hasnobuf;
777 priv->phyaddr = plat->phyaddr;
778 priv->phy_of_handle = plat->phy_of_handle;
779 priv->interface = pdata->phy_interface;
Michal Simek682baac2015-12-08 15:44:41 +0100780
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600781 priv->bus = mdio_alloc();
782 priv->bus->read = axiemac_miiphy_read;
783 priv->bus->write = axiemac_miiphy_write;
784 priv->bus->priv = priv;
785
786 ret = mdio_register_seq(priv->bus, dev_seq(dev));
787 if (ret)
788 return ret;
Michal Simek682baac2015-12-08 15:44:41 +0100789
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600790 axiemac_phy_init(dev);
791 }
Michal Simek0b13ee22015-12-08 16:10:05 +0100792
Michal Simek6fc7c452011-10-06 20:35:35 +0000793 return 0;
794}
795
Michal Simek682baac2015-12-08 15:44:41 +0100796static int axi_emac_remove(struct udevice *dev)
Michal Simek6fc7c452011-10-06 20:35:35 +0000797{
Michal Simek682baac2015-12-08 15:44:41 +0100798 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000799
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600800 if (priv->mactype == EMAC_1G) {
801 free(priv->phydev);
802 mdio_unregister(priv->bus);
803 mdio_free(priv->bus);
804 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000805
Michal Simek682baac2015-12-08 15:44:41 +0100806 return 0;
807}
808
809static const struct eth_ops axi_emac_ops = {
Michal Simek8fbf79f2015-12-16 09:18:12 +0100810 .start = axiemac_start,
Michal Simek682baac2015-12-08 15:44:41 +0100811 .send = axiemac_send,
812 .recv = axiemac_recv,
Michal Simek0305be82015-12-09 14:13:23 +0100813 .free_pkt = axiemac_free_pkt,
Michal Simek8fbf79f2015-12-16 09:18:12 +0100814 .stop = axiemac_stop,
815 .write_hwaddr = axiemac_write_hwaddr,
Michal Simek682baac2015-12-08 15:44:41 +0100816};
817
Simon Glassaad29ae2020-12-03 16:55:21 -0700818static int axi_emac_of_to_plat(struct udevice *dev)
Michal Simek682baac2015-12-08 15:44:41 +0100819{
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600820 struct axidma_plat *plat = dev_get_plat(dev);
821 struct eth_pdata *pdata = &plat->eth_pdata;
Simon Glassdd79d6e2017-01-17 16:52:55 -0700822 int node = dev_of_offset(dev);
Michal Simek682baac2015-12-08 15:44:41 +0100823 int offset = 0;
Michal Simek6fc7c452011-10-06 20:35:35 +0000824
Masahiro Yamadaa89b4de2020-07-17 14:36:48 +0900825 pdata->iobase = dev_read_addr(dev);
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600826 plat->mactype = dev_get_driver_data(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000827
Simon Glassdd79d6e2017-01-17 16:52:55 -0700828 offset = fdtdec_lookup_phandle(gd->fdt_blob, node,
Michal Simek682baac2015-12-08 15:44:41 +0100829 "axistream-connected");
830 if (offset <= 0) {
831 printf("%s: axistream is not found\n", __func__);
832 return -EINVAL;
833 }
Greentime Hue50c9ea2022-01-20 16:41:28 +0800834 plat->dmatx = (struct axidma_reg *)fdtdec_get_addr_size_auto_parent
835 (gd->fdt_blob, 0, offset, "reg", 0, NULL, false);
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600836 if (!plat->dmatx) {
Michal Simek682baac2015-12-08 15:44:41 +0100837 printf("%s: axi_dma register space not found\n", __func__);
838 return -EINVAL;
839 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000840
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600841 if (plat->mactype == EMAC_1G) {
842 plat->phyaddr = -1;
Michal Simek6fc7c452011-10-06 20:35:35 +0000843
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600844 offset = fdtdec_lookup_phandle(gd->fdt_blob, node,
845 "phy-handle");
846 if (offset > 0) {
847 plat->phyaddr = fdtdec_get_int(gd->fdt_blob, offset,
848 "reg", -1);
849 plat->phy_of_handle = offset;
850 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000851
Marek BehĂșnbc194772022-04-07 00:33:01 +0200852 pdata->phy_interface = dev_read_phy_mode(dev);
Marek BehĂșn48631e42022-04-07 00:33:03 +0200853 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600854 return -EINVAL;
Michal Simek682baac2015-12-08 15:44:41 +0100855
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600856 plat->eth_hasnobuf = fdtdec_get_bool(gd->fdt_blob, node,
857 "xlnx,eth-hasnobuf");
858 }
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530859
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600860 printf("AXI EMAC: %lx, phyaddr %d, interface %s\n", (ulong)pdata->iobase,
861 plat->phyaddr, phy_string_for_interface(pdata->phy_interface));
Michal Simek682baac2015-12-08 15:44:41 +0100862
863 return 0;
Michal Simek6fc7c452011-10-06 20:35:35 +0000864}
Michal Simek682baac2015-12-08 15:44:41 +0100865
866static const struct udevice_id axi_emac_ids[] = {
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600867 { .compatible = "xlnx,axi-ethernet-1.00.a", .data = (uintptr_t)EMAC_1G },
868 { .compatible = "xlnx,xxv-ethernet-1.0", .data = (uintptr_t)EMAC_10G_25G },
Michal Simek682baac2015-12-08 15:44:41 +0100869 { }
870};
871
872U_BOOT_DRIVER(axi_emac) = {
873 .name = "axi_emac",
874 .id = UCLASS_ETH,
875 .of_match = axi_emac_ids,
Simon Glassaad29ae2020-12-03 16:55:21 -0700876 .of_to_plat = axi_emac_of_to_plat,
Michal Simek682baac2015-12-08 15:44:41 +0100877 .probe = axi_emac_probe,
878 .remove = axi_emac_remove,
879 .ops = &axi_emac_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700880 .priv_auto = sizeof(struct axidma_priv),
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600881 .plat_auto = sizeof(struct axidma_plat),
Michal Simek682baac2015-12-08 15:44:41 +0100882};