blob: d48e342ea08eeac10ddf9f410be7887057906f23 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Michal Simek6fc7c452011-10-06 20:35:35 +00002/*
Ashok Reddy Soma645aa762021-06-24 00:34:41 -06003 * Copyright (C) 2021 Waymo LLC
Michal Simek6fc7c452011-10-06 20:35:35 +00004 * Copyright (C) 2011 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2011 PetaLogix
6 * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
Michal Simek6fc7c452011-10-06 20:35:35 +00007 */
8
9#include <config.h>
10#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070011#include <cpu_func.h>
Simon Glass1ab16922022-07-31 12:28:48 -060012#include <display_options.h>
Michal Simek682baac2015-12-08 15:44:41 +010013#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060014#include <log.h>
Michal Simek6fc7c452011-10-06 20:35:35 +000015#include <net.h>
16#include <malloc.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060017#include <asm/global_data.h>
Michal Simek6fc7c452011-10-06 20:35:35 +000018#include <asm/io.h>
19#include <phy.h>
20#include <miiphy.h>
Siva Durga Prasad Paladugu20633622017-01-06 16:18:50 +053021#include <wait_bit.h>
Simon Glassdbd79542020-05-10 11:40:11 -060022#include <linux/delay.h>
T Karthik Reddy4a3f40b2022-05-10 13:26:09 +020023#include <eth_phy.h>
Michal Simek6fc7c452011-10-06 20:35:35 +000024
Michal Simek682baac2015-12-08 15:44:41 +010025DECLARE_GLOBAL_DATA_PTR;
26
Michal Simek6fc7c452011-10-06 20:35:35 +000027/* Link setup */
28#define XAE_EMMC_LINKSPEED_MASK 0xC0000000 /* Link speed */
29#define XAE_EMMC_LINKSPD_10 0x00000000 /* Link Speed mask for 10 Mbit */
30#define XAE_EMMC_LINKSPD_100 0x40000000 /* Link Speed mask for 100 Mbit */
31#define XAE_EMMC_LINKSPD_1000 0x80000000 /* Link Speed mask for 1000 Mbit */
32
33/* Interrupt Status/Enable/Mask Registers bit definitions */
34#define XAE_INT_RXRJECT_MASK 0x00000008 /* Rx frame rejected */
35#define XAE_INT_MGTRDY_MASK 0x00000080 /* MGT clock Lock */
36
37/* Receive Configuration Word 1 (RCW1) Register bit definitions */
38#define XAE_RCW1_RX_MASK 0x10000000 /* Receiver enable */
39
40/* Transmitter Configuration (TC) Register bit definitions */
41#define XAE_TC_TX_MASK 0x10000000 /* Transmitter enable */
42
43#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
44
45/* MDIO Management Configuration (MC) Register bit definitions */
46#define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable*/
47
48/* MDIO Management Control Register (MCR) Register bit definitions */
49#define XAE_MDIO_MCR_PHYAD_MASK 0x1F000000 /* Phy Address Mask */
50#define XAE_MDIO_MCR_PHYAD_SHIFT 24 /* Phy Address Shift */
51#define XAE_MDIO_MCR_REGAD_MASK 0x001F0000 /* Reg Address Mask */
52#define XAE_MDIO_MCR_REGAD_SHIFT 16 /* Reg Address Shift */
53#define XAE_MDIO_MCR_OP_READ_MASK 0x00008000 /* Op Code Read Mask */
54#define XAE_MDIO_MCR_OP_WRITE_MASK 0x00004000 /* Op Code Write Mask */
55#define XAE_MDIO_MCR_INITIATE_MASK 0x00000800 /* Ready Mask */
56#define XAE_MDIO_MCR_READY_MASK 0x00000080 /* Ready Mask */
57
58#define XAE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
59
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +053060#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
61
Michal Simek6fc7c452011-10-06 20:35:35 +000062/* DMA macros */
63/* Bitmasks of XAXIDMA_CR_OFFSET register */
64#define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */
65#define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
66
67/* Bitmasks of XAXIDMA_SR_OFFSET register */
68#define XAXIDMA_HALTED_MASK 0x00000001 /* DMA channel halted */
69
70/* Bitmask for interrupts */
71#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
72#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
73#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
74
75/* Bitmasks of XAXIDMA_BD_CTRL_OFFSET register */
76#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
77#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
78
Ashok Reddy Soma645aa762021-06-24 00:34:41 -060079/* Bitmasks for XXV Ethernet MAC */
80#define XXV_TC_TX_MASK 0x00000001
81#define XXV_TC_FCS_MASK 0x00000002
82#define XXV_RCW1_RX_MASK 0x00000001
83#define XXV_RCW1_FCS_MASK 0x00000002
84
85#define DMAALIGN 128
86#define XXV_MIN_PKT_SIZE 60
Michal Simek6fc7c452011-10-06 20:35:35 +000087
88static u8 rxframe[PKTSIZE_ALIGN] __attribute((aligned(DMAALIGN)));
Ashok Reddy Soma645aa762021-06-24 00:34:41 -060089static u8 txminframe[XXV_MIN_PKT_SIZE] __attribute((aligned(DMAALIGN)));
90
91enum emac_variant {
92 EMAC_1G = 0,
93 EMAC_10G_25G = 1,
94};
Michal Simek6fc7c452011-10-06 20:35:35 +000095
96/* Reflect dma offsets */
97struct axidma_reg {
98 u32 control; /* DMACR */
99 u32 status; /* DMASR */
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530100 u32 current; /* CURDESC low 32 bit */
101 u32 current_hi; /* CURDESC high 32 bit */
102 u32 tail; /* TAILDESC low 32 bit */
103 u32 tail_hi; /* TAILDESC high 32 bit */
Michal Simek6fc7c452011-10-06 20:35:35 +0000104};
105
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600106/* Platform data structures */
107struct axidma_plat {
108 struct eth_pdata eth_pdata;
109 struct axidma_reg *dmatx;
110 struct axidma_reg *dmarx;
111 int phyaddr;
112 u8 eth_hasnobuf;
113 int phy_of_handle;
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600114 enum emac_variant mactype;
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600115};
116
Michal Simek6fc7c452011-10-06 20:35:35 +0000117/* Private driver structures */
118struct axidma_priv {
119 struct axidma_reg *dmatx;
120 struct axidma_reg *dmarx;
121 int phyaddr;
Michal Simek6cb55e72015-12-09 14:39:42 +0100122 struct axi_regs *iobase;
Michal Simek682baac2015-12-08 15:44:41 +0100123 phy_interface_t interface;
Michal Simek6fc7c452011-10-06 20:35:35 +0000124 struct phy_device *phydev;
125 struct mii_dev *bus;
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530126 u8 eth_hasnobuf;
Siva Durga Prasad Paladuguc3d94f62019-03-15 17:46:45 +0530127 int phy_of_handle;
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600128 enum emac_variant mactype;
Michal Simek6fc7c452011-10-06 20:35:35 +0000129};
130
131/* BD descriptors */
132struct axidma_bd {
Ashok Reddy Soma91d455a2020-09-03 08:36:43 -0600133 u32 next_desc; /* Next descriptor pointer */
134 u32 next_desc_msb;
135 u32 buf_addr; /* Buffer address */
136 u32 buf_addr_msb;
Michal Simek6fc7c452011-10-06 20:35:35 +0000137 u32 reserved3;
138 u32 reserved4;
139 u32 cntrl; /* Control */
140 u32 status; /* Status */
141 u32 app0;
142 u32 app1; /* TX start << 16 | insert */
143 u32 app2; /* TX csum seed */
144 u32 app3;
145 u32 app4;
146 u32 sw_id_offset;
147 u32 reserved5;
148 u32 reserved6;
149};
150
151/* Static BDs - driver uses only one BD */
152static struct axidma_bd tx_bd __attribute((aligned(DMAALIGN)));
153static struct axidma_bd rx_bd __attribute((aligned(DMAALIGN)));
154
155struct axi_regs {
156 u32 reserved[3];
157 u32 is; /* 0xC: Interrupt status */
158 u32 reserved2;
159 u32 ie; /* 0x14: Interrupt enable */
160 u32 reserved3[251];
161 u32 rcw1; /* 0x404: Rx Configuration Word 1 */
162 u32 tc; /* 0x408: Tx Configuration */
163 u32 reserved4;
164 u32 emmc; /* 0x410: EMAC mode configuration */
165 u32 reserved5[59];
166 u32 mdio_mc; /* 0x500: MII Management Config */
167 u32 mdio_mcr; /* 0x504: MII Management Control */
168 u32 mdio_mwd; /* 0x508: MII Management Write Data */
169 u32 mdio_mrd; /* 0x50C: MII Management Read Data */
170 u32 reserved6[124];
171 u32 uaw0; /* 0x700: Unicast address word 0 */
172 u32 uaw1; /* 0x704: Unicast address word 1 */
173};
174
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600175struct xxv_axi_regs {
176 u32 gt_reset; /* 0x0 */
177 u32 reserved[2];
178 u32 tc; /* 0xC: Tx Configuration */
179 u32 reserved2;
180 u32 rcw1; /* 0x14: Rx Configuration Word 1 */
181};
182
Michal Simek6fc7c452011-10-06 20:35:35 +0000183/* Use MII register 1 (MII status register) to detect PHY */
184#define PHY_DETECT_REG 1
185
186/*
187 * Mask used to verify certain PHY features (or register contents)
188 * in the register above:
189 * 0x1000: 10Mbps full duplex support
190 * 0x0800: 10Mbps half duplex support
191 * 0x0008: Auto-negotiation support
192 */
193#define PHY_DETECT_MASK 0x1808
194
Michal Simekf5221872015-12-09 14:36:31 +0100195static inline int mdio_wait(struct axi_regs *regs)
Michal Simek6fc7c452011-10-06 20:35:35 +0000196{
Michal Simek6fc7c452011-10-06 20:35:35 +0000197 u32 timeout = 200;
198
199 /* Wait till MDIO interface is ready to accept a new transaction. */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530200 while (timeout && (!(readl(&regs->mdio_mcr)
Michal Simek6fc7c452011-10-06 20:35:35 +0000201 & XAE_MDIO_MCR_READY_MASK))) {
202 timeout--;
203 udelay(1);
204 }
205 if (!timeout) {
206 printf("%s: Timeout\n", __func__);
207 return 1;
208 }
209 return 0;
210}
211
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530212/**
213 * axienet_dma_write - Memory mapped Axi DMA register Buffer Descriptor write.
214 * @bd: pointer to BD descriptor structure
215 * @desc: Address offset of DMA descriptors
216 *
217 * This function writes the value into the corresponding Axi DMA register.
218 */
219static inline void axienet_dma_write(struct axidma_bd *bd, u32 *desc)
220{
221#if defined(CONFIG_PHYS_64BIT)
Ashok Reddy Soma91d455a2020-09-03 08:36:43 -0600222 writeq((unsigned long)bd, desc);
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530223#else
224 writel((u32)bd, desc);
225#endif
226}
227
Michal Simek41beca12015-12-09 14:44:38 +0100228static u32 phyread(struct axidma_priv *priv, u32 phyaddress, u32 registernum,
229 u16 *val)
Michal Simek6fc7c452011-10-06 20:35:35 +0000230{
Michal Simek41beca12015-12-09 14:44:38 +0100231 struct axi_regs *regs = priv->iobase;
Michal Simek6fc7c452011-10-06 20:35:35 +0000232 u32 mdioctrlreg = 0;
233
Michal Simekf5221872015-12-09 14:36:31 +0100234 if (mdio_wait(regs))
Michal Simek6fc7c452011-10-06 20:35:35 +0000235 return 1;
236
237 mdioctrlreg = ((phyaddress << XAE_MDIO_MCR_PHYAD_SHIFT) &
238 XAE_MDIO_MCR_PHYAD_MASK) |
239 ((registernum << XAE_MDIO_MCR_REGAD_SHIFT)
240 & XAE_MDIO_MCR_REGAD_MASK) |
241 XAE_MDIO_MCR_INITIATE_MASK |
242 XAE_MDIO_MCR_OP_READ_MASK;
243
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530244 writel(mdioctrlreg, &regs->mdio_mcr);
Michal Simek6fc7c452011-10-06 20:35:35 +0000245
Michal Simekf5221872015-12-09 14:36:31 +0100246 if (mdio_wait(regs))
Michal Simek6fc7c452011-10-06 20:35:35 +0000247 return 1;
248
249 /* Read data */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530250 *val = readl(&regs->mdio_mrd);
Michal Simek6fc7c452011-10-06 20:35:35 +0000251 return 0;
252}
253
Michal Simek41beca12015-12-09 14:44:38 +0100254static u32 phywrite(struct axidma_priv *priv, u32 phyaddress, u32 registernum,
255 u32 data)
Michal Simek6fc7c452011-10-06 20:35:35 +0000256{
Michal Simek41beca12015-12-09 14:44:38 +0100257 struct axi_regs *regs = priv->iobase;
Michal Simek6fc7c452011-10-06 20:35:35 +0000258 u32 mdioctrlreg = 0;
259
Michal Simekf5221872015-12-09 14:36:31 +0100260 if (mdio_wait(regs))
Michal Simek6fc7c452011-10-06 20:35:35 +0000261 return 1;
262
263 mdioctrlreg = ((phyaddress << XAE_MDIO_MCR_PHYAD_SHIFT) &
264 XAE_MDIO_MCR_PHYAD_MASK) |
265 ((registernum << XAE_MDIO_MCR_REGAD_SHIFT)
266 & XAE_MDIO_MCR_REGAD_MASK) |
267 XAE_MDIO_MCR_INITIATE_MASK |
268 XAE_MDIO_MCR_OP_WRITE_MASK;
269
270 /* Write data */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530271 writel(data, &regs->mdio_mwd);
Michal Simek6fc7c452011-10-06 20:35:35 +0000272
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530273 writel(mdioctrlreg, &regs->mdio_mcr);
Michal Simek6fc7c452011-10-06 20:35:35 +0000274
Michal Simekf5221872015-12-09 14:36:31 +0100275 if (mdio_wait(regs))
Michal Simek6fc7c452011-10-06 20:35:35 +0000276 return 1;
277
278 return 0;
279}
280
Michal Simek0b13ee22015-12-08 16:10:05 +0100281static int axiemac_phy_init(struct udevice *dev)
Michal Simek6fc7c452011-10-06 20:35:35 +0000282{
283 u16 phyreg;
Patrick van Gelder70ab9432020-06-03 14:18:04 +0200284 int i;
285 u32 ret;
Michal Simek682baac2015-12-08 15:44:41 +0100286 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6cb55e72015-12-09 14:39:42 +0100287 struct axi_regs *regs = priv->iobase;
Michal Simek6fc7c452011-10-06 20:35:35 +0000288 struct phy_device *phydev;
289
290 u32 supported = SUPPORTED_10baseT_Half |
291 SUPPORTED_10baseT_Full |
292 SUPPORTED_100baseT_Half |
293 SUPPORTED_100baseT_Full |
294 SUPPORTED_1000baseT_Half |
295 SUPPORTED_1000baseT_Full;
296
Michal Simek0b13ee22015-12-08 16:10:05 +0100297 /* Set default MDIO divisor */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530298 writel(XAE_MDIO_DIV_DFT | XAE_MDIO_MC_MDIOEN_MASK, &regs->mdio_mc);
Michal Simek0b13ee22015-12-08 16:10:05 +0100299
T Karthik Reddy4a3f40b2022-05-10 13:26:09 +0200300 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
301 priv->phyaddr = eth_phy_get_addr(dev);
302
Michal Simek6fc7c452011-10-06 20:35:35 +0000303 if (priv->phyaddr == -1) {
304 /* Detect the PHY address */
305 for (i = 31; i >= 0; i--) {
Michal Simek41beca12015-12-09 14:44:38 +0100306 ret = phyread(priv, i, PHY_DETECT_REG, &phyreg);
Michal Simek6fc7c452011-10-06 20:35:35 +0000307 if (!ret && (phyreg != 0xFFFF) &&
308 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
309 /* Found a valid PHY address */
310 priv->phyaddr = i;
311 debug("axiemac: Found valid phy address, %x\n",
Michal Simek2f1e0652015-12-09 10:54:53 +0100312 i);
Michal Simek6fc7c452011-10-06 20:35:35 +0000313 break;
314 }
315 }
316 }
317
318 /* Interface - look at tsec */
Siva Durga Prasad Paladugu49348512016-02-21 15:46:14 +0530319 phydev = phy_connect(priv->bus, priv->phyaddr, dev, priv->interface);
Michal Simek6fc7c452011-10-06 20:35:35 +0000320
321 phydev->supported &= supported;
322 phydev->advertising = phydev->supported;
323 priv->phydev = phydev;
Siva Durga Prasad Paladuguc3d94f62019-03-15 17:46:45 +0530324 if (priv->phy_of_handle)
325 priv->phydev->node = offset_to_ofnode(priv->phy_of_handle);
Michal Simek6fc7c452011-10-06 20:35:35 +0000326 phy_config(phydev);
Michal Simek0b13ee22015-12-08 16:10:05 +0100327
328 return 0;
329}
330
331/* Setting axi emac and phy to proper setting */
332static int setup_phy(struct udevice *dev)
333{
Siva Durga Prasad Paladugua1c6ed82016-02-21 15:46:15 +0530334 u16 temp;
335 u32 speed, emmc_reg, ret;
Michal Simek0b13ee22015-12-08 16:10:05 +0100336 struct axidma_priv *priv = dev_get_priv(dev);
337 struct axi_regs *regs = priv->iobase;
338 struct phy_device *phydev = priv->phydev;
339
Siva Durga Prasad Paladugua1c6ed82016-02-21 15:46:15 +0530340 if (priv->interface == PHY_INTERFACE_MODE_SGMII) {
341 /*
342 * In SGMII cases the isolate bit might set
343 * after DMA and ethernet resets and hence
344 * check and clear if set.
345 */
346 ret = phyread(priv, priv->phyaddr, MII_BMCR, &temp);
347 if (ret)
348 return 0;
349 if (temp & BMCR_ISOLATE) {
350 temp &= ~BMCR_ISOLATE;
351 ret = phywrite(priv, priv->phyaddr, MII_BMCR, temp);
352 if (ret)
353 return 0;
354 }
355 }
356
Timur Tabi42387462012-07-09 08:52:43 +0000357 if (phy_startup(phydev)) {
358 printf("axiemac: could not initialize PHY %s\n",
359 phydev->dev->name);
360 return 0;
361 }
Michal Simek5848f132013-11-21 16:15:51 +0100362 if (!phydev->link) {
363 printf("%s: No link.\n", phydev->dev->name);
364 return 0;
365 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000366
367 switch (phydev->speed) {
368 case 1000:
369 speed = XAE_EMMC_LINKSPD_1000;
370 break;
371 case 100:
372 speed = XAE_EMMC_LINKSPD_100;
373 break;
374 case 10:
375 speed = XAE_EMMC_LINKSPD_10;
376 break;
377 default:
378 return 0;
379 }
380
381 /* Setup the emac for the phy speed */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530382 emmc_reg = readl(&regs->emmc);
Michal Simek6fc7c452011-10-06 20:35:35 +0000383 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
384 emmc_reg |= speed;
385
386 /* Write new speed setting out to Axi Ethernet */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530387 writel(emmc_reg, &regs->emmc);
Michal Simek6fc7c452011-10-06 20:35:35 +0000388
389 /*
390 * Setting the operating speed of the MAC needs a delay. There
391 * doesn't seem to be register to poll, so please consider this
392 * during your application design.
393 */
394 udelay(1);
395
396 return 1;
397}
398
399/* STOP DMA transfers */
Michal Simek8fbf79f2015-12-16 09:18:12 +0100400static void axiemac_stop(struct udevice *dev)
Michal Simek6fc7c452011-10-06 20:35:35 +0000401{
Michal Simek682baac2015-12-08 15:44:41 +0100402 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000403 u32 temp;
404
405 /* Stop the hardware */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530406 temp = readl(&priv->dmatx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000407 temp &= ~XAXIDMA_CR_RUNSTOP_MASK;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530408 writel(temp, &priv->dmatx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000409
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530410 temp = readl(&priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000411 temp &= ~XAXIDMA_CR_RUNSTOP_MASK;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530412 writel(temp, &priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000413
414 debug("axiemac: Halted\n");
415}
416
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600417static int xxv_axi_ethernet_init(struct axidma_priv *priv)
418{
419 struct xxv_axi_regs *regs = (struct xxv_axi_regs *)priv->iobase;
420
421 writel(readl(&regs->rcw1) | XXV_RCW1_FCS_MASK, &regs->rcw1);
422 writel(readl(&regs->tc) | XXV_TC_FCS_MASK, &regs->tc);
423 writel(readl(&regs->tc) | XXV_TC_TX_MASK, &regs->tc);
424 writel(readl(&regs->rcw1) | XXV_RCW1_RX_MASK, &regs->rcw1);
425
426 return 0;
427}
428
Michal Simek638c0ef2015-12-09 14:53:51 +0100429static int axi_ethernet_init(struct axidma_priv *priv)
Michal Simek6fc7c452011-10-06 20:35:35 +0000430{
Michal Simek638c0ef2015-12-09 14:53:51 +0100431 struct axi_regs *regs = priv->iobase;
Siva Durga Prasad Paladugu20633622017-01-06 16:18:50 +0530432 int err;
Michal Simek6fc7c452011-10-06 20:35:35 +0000433
434 /*
435 * Check the status of the MgtRdy bit in the interrupt status
436 * registers. This must be done to allow the MGT clock to become stable
437 * for the Sgmii and 1000BaseX PHY interfaces. No other register reads
438 * will be valid until this bit is valid.
439 * The bit is always a 1 for all other PHY interfaces.
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530440 * Interrupt status and enable registers are not available in non
441 * processor mode and hence bypass in this mode
Michal Simek6fc7c452011-10-06 20:35:35 +0000442 */
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530443 if (!priv->eth_hasnobuf) {
Álvaro Fernåndez Rojas918de032018-01-23 17:14:55 +0100444 err = wait_for_bit_le32(&regs->is, XAE_INT_MGTRDY_MASK,
445 true, 200, false);
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530446 if (err) {
447 printf("%s: Timeout\n", __func__);
448 return 1;
449 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000450
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530451 /*
452 * Stop the device and reset HW
453 * Disable interrupts
454 */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530455 writel(0, &regs->ie);
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530456 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000457
458 /* Disable the receiver */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530459 writel(readl(&regs->rcw1) & ~XAE_RCW1_RX_MASK, &regs->rcw1);
Michal Simek6fc7c452011-10-06 20:35:35 +0000460
461 /*
462 * Stopping the receiver in mid-packet causes a dropped packet
463 * indication from HW. Clear it.
464 */
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530465 if (!priv->eth_hasnobuf) {
466 /* Set the interrupt status register to clear the interrupt */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530467 writel(XAE_INT_RXRJECT_MASK, &regs->is);
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530468 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000469
470 /* Setup HW */
471 /* Set default MDIO divisor */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530472 writel(XAE_MDIO_DIV_DFT | XAE_MDIO_MC_MDIOEN_MASK, &regs->mdio_mc);
Michal Simek6fc7c452011-10-06 20:35:35 +0000473
474 debug("axiemac: InitHw done\n");
475 return 0;
476}
477
Michal Simek8fbf79f2015-12-16 09:18:12 +0100478static int axiemac_write_hwaddr(struct udevice *dev)
Michal Simek6fc7c452011-10-06 20:35:35 +0000479{
Simon Glassfa20e932020-12-03 16:55:20 -0700480 struct eth_pdata *pdata = dev_get_plat(dev);
Michal Simek682baac2015-12-08 15:44:41 +0100481 struct axidma_priv *priv = dev_get_priv(dev);
482 struct axi_regs *regs = priv->iobase;
Michal Simek6fc7c452011-10-06 20:35:35 +0000483
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600484 if (priv->mactype != EMAC_1G)
485 return 0;
486
Michal Simek6fc7c452011-10-06 20:35:35 +0000487 /* Set the MAC address */
Michal Simek682baac2015-12-08 15:44:41 +0100488 int val = ((pdata->enetaddr[3] << 24) | (pdata->enetaddr[2] << 16) |
489 (pdata->enetaddr[1] << 8) | (pdata->enetaddr[0]));
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530490 writel(val, &regs->uaw0);
Michal Simek6fc7c452011-10-06 20:35:35 +0000491
Michal Simek682baac2015-12-08 15:44:41 +0100492 val = (pdata->enetaddr[5] << 8) | pdata->enetaddr[4];
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530493 val |= readl(&regs->uaw1) & ~XAE_UAW1_UNICASTADDR_MASK;
494 writel(val, &regs->uaw1);
Michal Simek6fc7c452011-10-06 20:35:35 +0000495 return 0;
496}
497
498/* Reset DMA engine */
Michal Simek638c0ef2015-12-09 14:53:51 +0100499static void axi_dma_init(struct axidma_priv *priv)
Michal Simek6fc7c452011-10-06 20:35:35 +0000500{
Michal Simek6fc7c452011-10-06 20:35:35 +0000501 u32 timeout = 500;
502
503 /* Reset the engine so the hardware starts from a known state */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530504 writel(XAXIDMA_CR_RESET_MASK, &priv->dmatx->control);
505 writel(XAXIDMA_CR_RESET_MASK, &priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000506
507 /* At the initialization time, hardware should finish reset quickly */
508 while (timeout--) {
509 /* Check transmit/receive channel */
510 /* Reset is done when the reset bit is low */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530511 if (!((readl(&priv->dmatx->control) |
512 readl(&priv->dmarx->control))
Michal Simek5aa45392015-10-28 11:00:47 +0100513 & XAXIDMA_CR_RESET_MASK)) {
Michal Simek6fc7c452011-10-06 20:35:35 +0000514 break;
515 }
516 }
517 if (!timeout)
518 printf("%s: Timeout\n", __func__);
519}
520
Michal Simek8fbf79f2015-12-16 09:18:12 +0100521static int axiemac_start(struct udevice *dev)
Michal Simek6fc7c452011-10-06 20:35:35 +0000522{
Michal Simek682baac2015-12-08 15:44:41 +0100523 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000524 u32 temp;
525
526 debug("axiemac: Init started\n");
527 /*
528 * Initialize AXIDMA engine. AXIDMA engine must be initialized before
529 * AxiEthernet. During AXIDMA engine initialization, AXIDMA hardware is
530 * reset, and since AXIDMA reset line is connected to AxiEthernet, this
531 * would ensure a reset of AxiEthernet.
532 */
Michal Simek638c0ef2015-12-09 14:53:51 +0100533 axi_dma_init(priv);
Michal Simek6fc7c452011-10-06 20:35:35 +0000534
535 /* Initialize AxiEthernet hardware. */
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600536 if (priv->mactype == EMAC_1G) {
537 if (axi_ethernet_init(priv))
538 return -1;
539 } else {
540 if (xxv_axi_ethernet_init(priv))
541 return -1;
542 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000543
544 /* Disable all RX interrupts before RxBD space setup */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530545 temp = readl(&priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000546 temp &= ~XAXIDMA_IRQ_ALL_MASK;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530547 writel(temp, &priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000548
549 /* Start DMA RX channel. Now it's ready to receive data.*/
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530550 axienet_dma_write(&rx_bd, &priv->dmarx->current);
Michal Simek6fc7c452011-10-06 20:35:35 +0000551
552 /* Setup the BD. */
553 memset(&rx_bd, 0, sizeof(rx_bd));
Ashok Reddy Soma91d455a2020-09-03 08:36:43 -0600554 rx_bd.next_desc = lower_32_bits((unsigned long)&rx_bd);
555 rx_bd.buf_addr = lower_32_bits((unsigned long)&rxframe);
556#if defined(CONFIG_PHYS_64BIT)
557 rx_bd.next_desc_msb = upper_32_bits((unsigned long)&rx_bd);
558 rx_bd.buf_addr_msb = upper_32_bits((unsigned long)&rxframe);
559#endif
Michal Simek6fc7c452011-10-06 20:35:35 +0000560 rx_bd.cntrl = sizeof(rxframe);
561 /* Flush the last BD so DMA core could see the updates */
Ashok Reddy Somaf29a5702020-09-03 08:36:44 -0600562 flush_cache((phys_addr_t)&rx_bd, sizeof(rx_bd));
Michal Simek6fc7c452011-10-06 20:35:35 +0000563
564 /* It is necessary to flush rxframe because if you don't do it
565 * then cache can contain uninitialized data */
Ashok Reddy Somaf29a5702020-09-03 08:36:44 -0600566 flush_cache((phys_addr_t)&rxframe, sizeof(rxframe));
Michal Simek6fc7c452011-10-06 20:35:35 +0000567
568 /* Start the hardware */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530569 temp = readl(&priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000570 temp |= XAXIDMA_CR_RUNSTOP_MASK;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530571 writel(temp, &priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000572
573 /* Rx BD is ready - start */
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530574 axienet_dma_write(&rx_bd, &priv->dmarx->tail);
Michal Simek6fc7c452011-10-06 20:35:35 +0000575
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600576 if (priv->mactype == EMAC_1G) {
577 struct axi_regs *regs = priv->iobase;
578 /* Enable TX */
579 writel(XAE_TC_TX_MASK, &regs->tc);
580 /* Enable RX */
581 writel(XAE_RCW1_RX_MASK, &regs->rcw1);
Michal Simek6fc7c452011-10-06 20:35:35 +0000582
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600583 /* PHY setup */
584 if (!setup_phy(dev)) {
585 axiemac_stop(dev);
586 return -1;
587 }
588 } else {
589 struct xxv_axi_regs *regs = (struct xxv_axi_regs *)priv->iobase;
590 /* Enable TX */
591 writel(readl(&regs->tc) | XXV_TC_TX_MASK, &regs->tc);
592
593 /* Enable RX */
594 writel(readl(&regs->rcw1) | XXV_RCW1_RX_MASK, &regs->rcw1);
Michal Simek6fc7c452011-10-06 20:35:35 +0000595 }
596
597 debug("axiemac: Init complete\n");
598 return 0;
599}
600
Michal Simek682baac2015-12-08 15:44:41 +0100601static int axiemac_send(struct udevice *dev, void *ptr, int len)
Michal Simek6fc7c452011-10-06 20:35:35 +0000602{
Michal Simek682baac2015-12-08 15:44:41 +0100603 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000604 u32 timeout;
605
606 if (len > PKTSIZE_ALIGN)
607 len = PKTSIZE_ALIGN;
608
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600609 /* If size is less than min packet size, pad to min size */
610 if (priv->mactype == EMAC_10G_25G && len < XXV_MIN_PKT_SIZE) {
611 memset(txminframe, 0, XXV_MIN_PKT_SIZE);
612 memcpy(txminframe, ptr, len);
613 len = XXV_MIN_PKT_SIZE;
614 ptr = txminframe;
615 }
616
Michal Simek6fc7c452011-10-06 20:35:35 +0000617 /* Flush packet to main memory to be trasfered by DMA */
Ashok Reddy Somaf29a5702020-09-03 08:36:44 -0600618 flush_cache((phys_addr_t)ptr, len);
Michal Simek6fc7c452011-10-06 20:35:35 +0000619
620 /* Setup Tx BD */
621 memset(&tx_bd, 0, sizeof(tx_bd));
622 /* At the end of the ring, link the last BD back to the top */
Ashok Reddy Soma91d455a2020-09-03 08:36:43 -0600623 tx_bd.next_desc = lower_32_bits((unsigned long)&tx_bd);
624 tx_bd.buf_addr = lower_32_bits((unsigned long)ptr);
625#if defined(CONFIG_PHYS_64BIT)
626 tx_bd.next_desc_msb = upper_32_bits((unsigned long)&tx_bd);
627 tx_bd.buf_addr_msb = upper_32_bits((unsigned long)ptr);
628#endif
Michal Simek6fc7c452011-10-06 20:35:35 +0000629 /* Save len */
630 tx_bd.cntrl = len | XAXIDMA_BD_CTRL_TXSOF_MASK |
631 XAXIDMA_BD_CTRL_TXEOF_MASK;
632
633 /* Flush the last BD so DMA core could see the updates */
Ashok Reddy Somaf29a5702020-09-03 08:36:44 -0600634 flush_cache((phys_addr_t)&tx_bd, sizeof(tx_bd));
Michal Simek6fc7c452011-10-06 20:35:35 +0000635
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530636 if (readl(&priv->dmatx->status) & XAXIDMA_HALTED_MASK) {
Michal Simek6fc7c452011-10-06 20:35:35 +0000637 u32 temp;
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530638 axienet_dma_write(&tx_bd, &priv->dmatx->current);
Michal Simek6fc7c452011-10-06 20:35:35 +0000639 /* Start the hardware */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530640 temp = readl(&priv->dmatx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000641 temp |= XAXIDMA_CR_RUNSTOP_MASK;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530642 writel(temp, &priv->dmatx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000643 }
644
645 /* Start transfer */
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530646 axienet_dma_write(&tx_bd, &priv->dmatx->tail);
Michal Simek6fc7c452011-10-06 20:35:35 +0000647
648 /* Wait for transmission to complete */
649 debug("axiemac: Waiting for tx to be done\n");
650 timeout = 200;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530651 while (timeout && (!(readl(&priv->dmatx->status) &
Michal Simek5aa45392015-10-28 11:00:47 +0100652 (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)))) {
Michal Simek6fc7c452011-10-06 20:35:35 +0000653 timeout--;
654 udelay(1);
655 }
656 if (!timeout) {
657 printf("%s: Timeout\n", __func__);
658 return 1;
659 }
660
661 debug("axiemac: Sending complete\n");
662 return 0;
663}
664
Michal Simek638c0ef2015-12-09 14:53:51 +0100665static int isrxready(struct axidma_priv *priv)
Michal Simek6fc7c452011-10-06 20:35:35 +0000666{
667 u32 status;
Michal Simek6fc7c452011-10-06 20:35:35 +0000668
669 /* Read pending interrupts */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530670 status = readl(&priv->dmarx->status);
Michal Simek6fc7c452011-10-06 20:35:35 +0000671
672 /* Acknowledge pending interrupts */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530673 writel(status & XAXIDMA_IRQ_ALL_MASK, &priv->dmarx->status);
Michal Simek6fc7c452011-10-06 20:35:35 +0000674
675 /*
676 * If Reception done interrupt is asserted, call RX call back function
677 * to handle the processed BDs and then raise the according flag.
678 */
679 if ((status & (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)))
680 return 1;
681
682 return 0;
683}
684
Michal Simek682baac2015-12-08 15:44:41 +0100685static int axiemac_recv(struct udevice *dev, int flags, uchar **packetp)
Michal Simek6fc7c452011-10-06 20:35:35 +0000686{
687 u32 length;
Michal Simek682baac2015-12-08 15:44:41 +0100688 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000689 u32 temp;
690
691 /* Wait for an incoming packet */
Michal Simek638c0ef2015-12-09 14:53:51 +0100692 if (!isrxready(priv))
Michal Simek682baac2015-12-08 15:44:41 +0100693 return -1;
Michal Simek6fc7c452011-10-06 20:35:35 +0000694
695 debug("axiemac: RX data ready\n");
696
697 /* Disable IRQ for a moment till packet is handled */
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530698 temp = readl(&priv->dmarx->control);
Michal Simek6fc7c452011-10-06 20:35:35 +0000699 temp &= ~XAXIDMA_IRQ_ALL_MASK;
Siva Durga Prasad Paladugud227c9a2017-11-23 12:23:12 +0530700 writel(temp, &priv->dmarx->control);
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600701 if (!priv->eth_hasnobuf && priv->mactype == EMAC_1G)
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530702 length = rx_bd.app4 & 0xFFFF; /* max length mask */
703 else
704 length = rx_bd.status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
Michal Simek6fc7c452011-10-06 20:35:35 +0000705
Michal Simek6fc7c452011-10-06 20:35:35 +0000706#ifdef DEBUG
707 print_buffer(&rxframe, &rxframe[0], 1, length, 16);
708#endif
Michal Simek0305be82015-12-09 14:13:23 +0100709
710 *packetp = rxframe;
711 return length;
712}
713
714static int axiemac_free_pkt(struct udevice *dev, uchar *packet, int length)
715{
716 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000717
718#ifdef DEBUG
719 /* It is useful to clear buffer to be sure that it is consistent */
720 memset(rxframe, 0, sizeof(rxframe));
721#endif
722 /* Setup RxBD */
723 /* Clear the whole buffer and setup it again - all flags are cleared */
724 memset(&rx_bd, 0, sizeof(rx_bd));
Ashok Reddy Soma91d455a2020-09-03 08:36:43 -0600725 rx_bd.next_desc = lower_32_bits((unsigned long)&rx_bd);
726 rx_bd.buf_addr = lower_32_bits((unsigned long)&rxframe);
727#if defined(CONFIG_PHYS_64BIT)
728 rx_bd.next_desc_msb = upper_32_bits((unsigned long)&rx_bd);
729 rx_bd.buf_addr_msb = upper_32_bits((unsigned long)&rxframe);
730#endif
Michal Simek6fc7c452011-10-06 20:35:35 +0000731 rx_bd.cntrl = sizeof(rxframe);
732
733 /* Write bd to HW */
Ashok Reddy Somaf29a5702020-09-03 08:36:44 -0600734 flush_cache((phys_addr_t)&rx_bd, sizeof(rx_bd));
Michal Simek6fc7c452011-10-06 20:35:35 +0000735
736 /* It is necessary to flush rxframe because if you don't do it
737 * then cache will contain previous packet */
Ashok Reddy Somaf29a5702020-09-03 08:36:44 -0600738 flush_cache((phys_addr_t)&rxframe, sizeof(rxframe));
Michal Simek6fc7c452011-10-06 20:35:35 +0000739
740 /* Rx BD is ready - start again */
Vipul Kumar3b06a0f2018-01-23 14:52:35 +0530741 axienet_dma_write(&rx_bd, &priv->dmarx->tail);
Michal Simek6fc7c452011-10-06 20:35:35 +0000742
743 debug("axiemac: RX completed, framelength = %d\n", length);
744
Michal Simek682baac2015-12-08 15:44:41 +0100745 return 0;
Michal Simek6fc7c452011-10-06 20:35:35 +0000746}
747
Michal Simek682baac2015-12-08 15:44:41 +0100748static int axiemac_miiphy_read(struct mii_dev *bus, int addr,
749 int devad, int reg)
Michal Simek6fc7c452011-10-06 20:35:35 +0000750{
Michal Simek682baac2015-12-08 15:44:41 +0100751 int ret;
752 u16 value;
Michal Simek6fc7c452011-10-06 20:35:35 +0000753
Michal Simek682baac2015-12-08 15:44:41 +0100754 ret = phyread(bus->priv, addr, reg, &value);
755 debug("axiemac: Read MII 0x%x, 0x%x, 0x%x, %d\n", addr, reg,
756 value, ret);
757 return value;
Michal Simek6fc7c452011-10-06 20:35:35 +0000758}
759
Michal Simek682baac2015-12-08 15:44:41 +0100760static int axiemac_miiphy_write(struct mii_dev *bus, int addr, int devad,
761 int reg, u16 value)
Michal Simek6fc7c452011-10-06 20:35:35 +0000762{
Michal Simek682baac2015-12-08 15:44:41 +0100763 debug("axiemac: Write MII 0x%x, 0x%x, 0x%x\n", addr, reg, value);
764 return phywrite(bus->priv, addr, reg, value);
Michal Simek6fc7c452011-10-06 20:35:35 +0000765}
766
Michal Simek682baac2015-12-08 15:44:41 +0100767static int axi_emac_probe(struct udevice *dev)
Michal Simek6fc7c452011-10-06 20:35:35 +0000768{
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600769 struct axidma_plat *plat = dev_get_plat(dev);
770 struct eth_pdata *pdata = &plat->eth_pdata;
Michal Simek682baac2015-12-08 15:44:41 +0100771 struct axidma_priv *priv = dev_get_priv(dev);
772 int ret;
773
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600774 priv->iobase = (struct axi_regs *)pdata->iobase;
775 priv->dmatx = plat->dmatx;
776 /* RX channel offset is 0x30 */
777 priv->dmarx = (struct axidma_reg *)((phys_addr_t)priv->dmatx + 0x30);
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600778 priv->mactype = plat->mactype;
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600779
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600780 if (priv->mactype == EMAC_1G) {
781 priv->eth_hasnobuf = plat->eth_hasnobuf;
782 priv->phyaddr = plat->phyaddr;
783 priv->phy_of_handle = plat->phy_of_handle;
784 priv->interface = pdata->phy_interface;
Michal Simek682baac2015-12-08 15:44:41 +0100785
T Karthik Reddy4a3f40b2022-05-10 13:26:09 +0200786 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
787 priv->bus = eth_phy_get_mdio_bus(dev);
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600788
T Karthik Reddy4a3f40b2022-05-10 13:26:09 +0200789 if (!priv->bus) {
790 priv->bus = mdio_alloc();
791 priv->bus->read = axiemac_miiphy_read;
792 priv->bus->write = axiemac_miiphy_write;
793 priv->bus->priv = priv;
794
795 ret = mdio_register_seq(priv->bus, dev_seq(dev));
796 if (ret)
797 return ret;
798 }
799
800 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
801 eth_phy_set_mdio_bus(dev, priv->bus);
Michal Simek682baac2015-12-08 15:44:41 +0100802
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600803 axiemac_phy_init(dev);
804 }
Michal Simek0b13ee22015-12-08 16:10:05 +0100805
T Karthik Reddy4a3f40b2022-05-10 13:26:09 +0200806 printf("AXI EMAC: %lx, phyaddr %d, interface %s\n", (ulong)pdata->iobase,
807 priv->phyaddr, phy_string_for_interface(pdata->phy_interface));
808
Michal Simek6fc7c452011-10-06 20:35:35 +0000809 return 0;
810}
811
Michal Simek682baac2015-12-08 15:44:41 +0100812static int axi_emac_remove(struct udevice *dev)
Michal Simek6fc7c452011-10-06 20:35:35 +0000813{
Michal Simek682baac2015-12-08 15:44:41 +0100814 struct axidma_priv *priv = dev_get_priv(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000815
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600816 if (priv->mactype == EMAC_1G) {
817 free(priv->phydev);
818 mdio_unregister(priv->bus);
819 mdio_free(priv->bus);
820 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000821
Michal Simek682baac2015-12-08 15:44:41 +0100822 return 0;
823}
824
825static const struct eth_ops axi_emac_ops = {
Michal Simek8fbf79f2015-12-16 09:18:12 +0100826 .start = axiemac_start,
Michal Simek682baac2015-12-08 15:44:41 +0100827 .send = axiemac_send,
828 .recv = axiemac_recv,
Michal Simek0305be82015-12-09 14:13:23 +0100829 .free_pkt = axiemac_free_pkt,
Michal Simek8fbf79f2015-12-16 09:18:12 +0100830 .stop = axiemac_stop,
831 .write_hwaddr = axiemac_write_hwaddr,
Michal Simek682baac2015-12-08 15:44:41 +0100832};
833
Simon Glassaad29ae2020-12-03 16:55:21 -0700834static int axi_emac_of_to_plat(struct udevice *dev)
Michal Simek682baac2015-12-08 15:44:41 +0100835{
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600836 struct axidma_plat *plat = dev_get_plat(dev);
837 struct eth_pdata *pdata = &plat->eth_pdata;
Simon Glassdd79d6e2017-01-17 16:52:55 -0700838 int node = dev_of_offset(dev);
Michal Simek682baac2015-12-08 15:44:41 +0100839 int offset = 0;
Michal Simek6fc7c452011-10-06 20:35:35 +0000840
Masahiro Yamadaa89b4de2020-07-17 14:36:48 +0900841 pdata->iobase = dev_read_addr(dev);
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600842 plat->mactype = dev_get_driver_data(dev);
Michal Simek6fc7c452011-10-06 20:35:35 +0000843
Simon Glassdd79d6e2017-01-17 16:52:55 -0700844 offset = fdtdec_lookup_phandle(gd->fdt_blob, node,
Michal Simek682baac2015-12-08 15:44:41 +0100845 "axistream-connected");
846 if (offset <= 0) {
847 printf("%s: axistream is not found\n", __func__);
848 return -EINVAL;
849 }
Greentime Hue50c9ea2022-01-20 16:41:28 +0800850 plat->dmatx = (struct axidma_reg *)fdtdec_get_addr_size_auto_parent
851 (gd->fdt_blob, 0, offset, "reg", 0, NULL, false);
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600852 if (!plat->dmatx) {
Michal Simek682baac2015-12-08 15:44:41 +0100853 printf("%s: axi_dma register space not found\n", __func__);
854 return -EINVAL;
855 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000856
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600857 if (plat->mactype == EMAC_1G) {
858 plat->phyaddr = -1;
Michal Simek6fc7c452011-10-06 20:35:35 +0000859
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600860 offset = fdtdec_lookup_phandle(gd->fdt_blob, node,
861 "phy-handle");
862 if (offset > 0) {
T Karthik Reddy4a3f40b2022-05-10 13:26:09 +0200863 if (!(IS_ENABLED(CONFIG_DM_ETH_PHY)))
864 plat->phyaddr = fdtdec_get_int(gd->fdt_blob,
865 offset,
866 "reg", -1);
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600867 plat->phy_of_handle = offset;
868 }
Michal Simek6fc7c452011-10-06 20:35:35 +0000869
Marek BehĂșnbc194772022-04-07 00:33:01 +0200870 pdata->phy_interface = dev_read_phy_mode(dev);
Marek BehĂșn48631e42022-04-07 00:33:03 +0200871 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600872 return -EINVAL;
Michal Simek682baac2015-12-08 15:44:41 +0100873
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600874 plat->eth_hasnobuf = fdtdec_get_bool(gd->fdt_blob, node,
875 "xlnx,eth-hasnobuf");
876 }
Siva Durga Prasad Paladuguf8026032017-01-06 16:27:15 +0530877
Michal Simek682baac2015-12-08 15:44:41 +0100878 return 0;
Michal Simek6fc7c452011-10-06 20:35:35 +0000879}
Michal Simek682baac2015-12-08 15:44:41 +0100880
881static const struct udevice_id axi_emac_ids[] = {
Ashok Reddy Soma645aa762021-06-24 00:34:41 -0600882 { .compatible = "xlnx,axi-ethernet-1.00.a", .data = (uintptr_t)EMAC_1G },
883 { .compatible = "xlnx,xxv-ethernet-1.0", .data = (uintptr_t)EMAC_10G_25G },
Michal Simek682baac2015-12-08 15:44:41 +0100884 { }
885};
886
887U_BOOT_DRIVER(axi_emac) = {
888 .name = "axi_emac",
889 .id = UCLASS_ETH,
890 .of_match = axi_emac_ids,
Simon Glassaad29ae2020-12-03 16:55:21 -0700891 .of_to_plat = axi_emac_of_to_plat,
Michal Simek682baac2015-12-08 15:44:41 +0100892 .probe = axi_emac_probe,
893 .remove = axi_emac_remove,
894 .ops = &axi_emac_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700895 .priv_auto = sizeof(struct axidma_priv),
Ashok Reddy Soma3c4df952021-06-24 00:34:40 -0600896 .plat_auto = sizeof(struct axidma_plat),
Michal Simek682baac2015-12-08 15:44:41 +0100897};