blob: 03959ea95a558f7381bf25b7dd28eb9eecef288f [file] [log] [blame]
Tom Rinidec7ea02024-05-20 13:35:03 -06001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2023, Intel Corporation.
4 *
5 * Portions based on U-Boot's dwc_eth_qos.c.
6 */
7
8/*
9 * This driver supports the Synopsys Designware Ethernet XGMAC (10G Ethernet
10 * MAC) IP block. The IP supports multiple options for bus type, clocking/
11 * reset structure, and feature list.
12 *
13 * The driver is written such that generic core logic is kept separate from
14 * configuration-specific logic. Code that interacts with configuration-
15 * specific resources is split out into separate functions to avoid polluting
16 * common code. If/when this driver is enhanced to support multiple
17 * configurations, the core code should be adapted to call all configuration-
18 * specific functions through function pointers, with the definition of those
19 * function pointers being supplied by struct udevice_id xgmac_ids[]'s .data
20 * field.
21 *
22 * This configuration uses an AXI master/DMA bus, an AHB slave/register bus,
23 * contains the DMA, MTL, and MAC sub-blocks, and supports a single RGMII PHY.
24 * This configuration also has SW control over all clock and reset signals to
25 * the HW block.
26 */
27
28#define LOG_CATEGORY UCLASS_ETH
29
30#include <clk.h>
31#include <cpu_func.h>
32#include <dm.h>
33#include <errno.h>
34#include <eth_phy.h>
35#include <log.h>
36#include <malloc.h>
37#include <memalign.h>
38#include <miiphy.h>
39#include <net.h>
40#include <netdev.h>
41#include <phy.h>
42#include <reset.h>
43#include <wait_bit.h>
44#include <asm/cache.h>
45#include <asm/gpio.h>
46#include <asm/io.h>
47#include <linux/delay.h>
Nikunj Kela43347f22025-02-21 22:07:34 -080048#include <linux/kernel.h>
Tom Rinidec7ea02024-05-20 13:35:03 -060049#include "dwc_eth_xgmac.h"
50
51static void *xgmac_alloc_descs(struct xgmac_priv *xgmac, unsigned int num)
52{
53 return memalign(ARCH_DMA_MINALIGN, num * xgmac->desc_size);
54}
55
56static void xgmac_free_descs(void *descs)
57{
58 free(descs);
59}
60
61static struct xgmac_desc *xgmac_get_desc(struct xgmac_priv *xgmac,
62 unsigned int num, bool rx)
63{
64 return (rx ? xgmac->rx_descs : xgmac->tx_descs) +
65 (num * xgmac->desc_size);
66}
67
68void xgmac_inval_desc_generic(void *desc)
69{
70 unsigned long start;
71 unsigned long end;
72
73 if (!desc) {
74 pr_err("%s invalid input buffer\n", __func__);
75 return;
76 }
77
78 start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
79 end = ALIGN(start + sizeof(struct xgmac_desc),
80 ARCH_DMA_MINALIGN);
81
82 invalidate_dcache_range(start, end);
83}
84
85void xgmac_flush_desc_generic(void *desc)
86{
87 unsigned long start;
88 unsigned long end;
89
90 if (!desc) {
91 pr_err("%s invalid input buffer\n", __func__);
92 return;
93 }
94
95 start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
96 end = ALIGN(start + sizeof(struct xgmac_desc),
97 ARCH_DMA_MINALIGN);
98
99 flush_dcache_range(start, end);
100}
101
102void xgmac_inval_buffer_generic(void *buf, size_t size)
103{
104 unsigned long start;
105 unsigned long end;
106
107 if (!buf) {
108 pr_err("%s invalid input buffer\n", __func__);
109 return;
110 }
111
112 start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
113 end = ALIGN((unsigned long)buf + size,
114 ARCH_DMA_MINALIGN);
115
116 invalidate_dcache_range(start, end);
117}
118
119void xgmac_flush_buffer_generic(void *buf, size_t size)
120{
121 unsigned long start;
122 unsigned long end;
123
124 if (!buf) {
125 pr_err("%s invalid input buffer\n", __func__);
126 return;
127 }
128
129 start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
130 end = ALIGN((unsigned long)buf + size,
131 ARCH_DMA_MINALIGN);
132
133 flush_dcache_range(start, end);
134}
135
136static int xgmac_mdio_wait_idle(struct xgmac_priv *xgmac)
137{
138 return wait_for_bit_le32(&xgmac->mac_regs->mdio_data,
139 XGMAC_MAC_MDIO_ADDRESS_SBUSY, false,
140 XGMAC_TIMEOUT_100MS, true);
141}
142
143static int xgmac_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
144 int mdio_reg)
145{
146 struct xgmac_priv *xgmac = bus->priv;
147 u32 val;
148 u32 hw_addr;
149 int ret;
150
151 debug("%s(dev=%p, addr=0x%x, reg=%d):\n", __func__, xgmac->dev, mdio_addr,
152 mdio_reg);
153
154 ret = xgmac_mdio_wait_idle(xgmac);
155 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800156 pr_err("%s MDIO not idle at entry: %d\n",
157 xgmac->dev->name, ret);
158
Tom Rinidec7ea02024-05-20 13:35:03 -0600159 return ret;
160 }
161
162 /* Set clause 22 format */
163 val = BIT(mdio_addr);
164 writel(val, &xgmac->mac_regs->mdio_clause_22_port);
165
166 hw_addr = (mdio_addr << XGMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
167 (mdio_reg & XGMAC_MAC_MDIO_REG_ADDR_C22P_MASK);
168
169 val = xgmac->config->config_mac_mdio <<
170 XGMAC_MAC_MDIO_ADDRESS_CR_SHIFT;
171
172 val |= XGMAC_MAC_MDIO_ADDRESS_SADDR |
173 XGMAC_MDIO_SINGLE_CMD_ADDR_CMD_READ |
174 XGMAC_MAC_MDIO_ADDRESS_SBUSY;
175
176 ret = xgmac_mdio_wait_idle(xgmac);
177 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800178 pr_err("%s MDIO not idle at entry: %d\n",
179 xgmac->dev->name, ret);
180
Tom Rinidec7ea02024-05-20 13:35:03 -0600181 return ret;
182 }
183
184 writel(hw_addr, &xgmac->mac_regs->mdio_address);
185 writel(val, &xgmac->mac_regs->mdio_data);
186
187 ret = xgmac_mdio_wait_idle(xgmac);
188 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800189 pr_err("%s MDIO read didn't complete: %d\n",
190 xgmac->dev->name, ret);
191
Tom Rinidec7ea02024-05-20 13:35:03 -0600192 return ret;
193 }
194
195 val = readl(&xgmac->mac_regs->mdio_data);
196 val &= XGMAC_MAC_MDIO_DATA_GD_MASK;
197
198 debug("%s: val=0x%x\n", __func__, val);
199
200 return val;
201}
202
203static int xgmac_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
204 int mdio_reg, u16 mdio_val)
205{
206 struct xgmac_priv *xgmac = bus->priv;
207 u32 val;
208 u32 hw_addr;
209 int ret;
210
211 debug("%s(dev=%p, addr=0x%x, reg=%d, val=0x%x):\n", __func__, xgmac->dev,
212 mdio_addr, mdio_reg, mdio_val);
213
214 ret = xgmac_mdio_wait_idle(xgmac);
215 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800216 pr_err("%s MDIO not idle at entry: %d\n",
217 xgmac->dev->name, ret);
218
Tom Rinidec7ea02024-05-20 13:35:03 -0600219 return ret;
220 }
221
222 /* Set clause 22 format */
223 val = BIT(mdio_addr);
224 writel(val, &xgmac->mac_regs->mdio_clause_22_port);
225
226 hw_addr = (mdio_addr << XGMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
227 (mdio_reg & XGMAC_MAC_MDIO_REG_ADDR_C22P_MASK);
228
229 hw_addr |= (mdio_reg >> XGMAC_MAC_MDIO_ADDRESS_PA_SHIFT) <<
230 XGMAC_MAC_MDIO_ADDRESS_DA_SHIFT;
231
232 val = (xgmac->config->config_mac_mdio <<
233 XGMAC_MAC_MDIO_ADDRESS_CR_SHIFT);
234
235 val |= XGMAC_MAC_MDIO_ADDRESS_SADDR |
236 mdio_val | XGMAC_MDIO_SINGLE_CMD_ADDR_CMD_WRITE |
237 XGMAC_MAC_MDIO_ADDRESS_SBUSY;
238
239 ret = xgmac_mdio_wait_idle(xgmac);
240 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800241 pr_err("%s MDIO not idle at entry: %d\n",
242 xgmac->dev->name, ret);
243
Tom Rinidec7ea02024-05-20 13:35:03 -0600244 return ret;
245 }
246
247 writel(hw_addr, &xgmac->mac_regs->mdio_address);
248 writel(val, &xgmac->mac_regs->mdio_data);
249
250 ret = xgmac_mdio_wait_idle(xgmac);
251 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800252 pr_err("%s MDIO write didn't complete: %d\n",
253 xgmac->dev->name, ret);
254
Tom Rinidec7ea02024-05-20 13:35:03 -0600255 return ret;
256 }
257
258 return 0;
259}
260
261static int xgmac_set_full_duplex(struct udevice *dev)
262{
263 struct xgmac_priv *xgmac = dev_get_priv(dev);
264
265 debug("%s(dev=%p):\n", __func__, dev);
266
267 clrbits_le32(&xgmac->mac_regs->mac_extended_conf, XGMAC_MAC_EXT_CONF_HD);
268
269 return 0;
270}
271
272static int xgmac_set_half_duplex(struct udevice *dev)
273{
274 struct xgmac_priv *xgmac = dev_get_priv(dev);
275
276 debug("%s(dev=%p):\n", __func__, dev);
277
278 setbits_le32(&xgmac->mac_regs->mac_extended_conf, XGMAC_MAC_EXT_CONF_HD);
279
280 /* WAR: Flush TX queue when switching to half-duplex */
281 setbits_le32(&xgmac->mtl_regs->txq0_operation_mode,
282 XGMAC_MTL_TXQ0_OPERATION_MODE_FTQ);
283
284 return 0;
285}
286
287static int xgmac_set_gmii_speed(struct udevice *dev)
288{
289 struct xgmac_priv *xgmac = dev_get_priv(dev);
290 u32 val;
291
292 debug("%s(dev=%p):\n", __func__, dev);
293
294 val = XGMAC_MAC_CONF_SS_1G_GMII << XGMAC_MAC_CONF_SS_SHIFT;
295 writel(val, &xgmac->mac_regs->tx_configuration);
296
297 return 0;
298}
299
300static int xgmac_set_mii_speed_100(struct udevice *dev)
301{
302 struct xgmac_priv *xgmac = dev_get_priv(dev);
303 u32 val;
304
305 debug("%s(dev=%p):\n", __func__, dev);
306
307 val = XGMAC_MAC_CONF_SS_100M_MII << XGMAC_MAC_CONF_SS_SHIFT;
308 writel(val, &xgmac->mac_regs->tx_configuration);
309
310 return 0;
311}
312
313static int xgmac_set_mii_speed_10(struct udevice *dev)
314{
315 struct xgmac_priv *xgmac = dev_get_priv(dev);
316 u32 val;
317
318 debug("%s(dev=%p):\n", __func__, dev);
319
320 val = XGMAC_MAC_CONF_SS_2_10M_MII << XGMAC_MAC_CONF_SS_SHIFT;
321 writel(val, &xgmac->mac_regs->tx_configuration);
322
323 return 0;
324}
325
326static int xgmac_adjust_link(struct udevice *dev)
327{
328 struct xgmac_priv *xgmac = dev_get_priv(dev);
329 int ret;
330 bool en_calibration;
331
332 debug("%s(dev=%p):\n", __func__, dev);
333
334 if (xgmac->phy->duplex)
335 ret = xgmac_set_full_duplex(dev);
336 else
337 ret = xgmac_set_half_duplex(dev);
338 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800339 pr_err("%s xgmac_set_*_duplex() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600340 return ret;
341 }
342
343 switch (xgmac->phy->speed) {
344 case SPEED_1000:
345 en_calibration = true;
346 ret = xgmac_set_gmii_speed(dev);
347 break;
348 case SPEED_100:
349 en_calibration = true;
350 ret = xgmac_set_mii_speed_100(dev);
351 break;
352 case SPEED_10:
353 en_calibration = false;
354 ret = xgmac_set_mii_speed_10(dev);
355 break;
356 default:
Boon Khai Ng21204d12025-01-17 14:48:23 +0800357 pr_err("%s invalid speed %d\n", dev->name, xgmac->phy->speed);
Tom Rinidec7ea02024-05-20 13:35:03 -0600358 return -EINVAL;
359 }
360 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800361 pr_err("%s xgmac_set_*mii_speed*() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600362 return ret;
363 }
364
365 if (en_calibration) {
366 ret = xgmac->config->ops->xgmac_calibrate_pads(dev);
367 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800368 pr_err("%s xgmac_calibrate_pads() failed: %d\n",
369 dev->name, ret);
370
Tom Rinidec7ea02024-05-20 13:35:03 -0600371 return ret;
372 }
373 } else {
374 ret = xgmac->config->ops->xgmac_disable_calibration(dev);
375 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800376 pr_err("%s xgmac_disable_calibration() failed: %d\n",
377 dev->name, ret);
378
Tom Rinidec7ea02024-05-20 13:35:03 -0600379 return ret;
380 }
381 }
382
383 return 0;
384}
385
386static int xgmac_write_hwaddr(struct udevice *dev)
387{
388 struct eth_pdata *plat = dev_get_plat(dev);
389 struct xgmac_priv *xgmac = dev_get_priv(dev);
390 u32 val;
391
392 /*
393 * This function may be called before start() or after stop(). At that
394 * time, on at least some configurations of the XGMAC HW, all clocks to
395 * the XGMAC HW block will be stopped, and a reset signal applied. If
396 * any register access is attempted in this state, bus timeouts or CPU
397 * hangs may occur. This check prevents that.
398 *
399 * A simple solution to this problem would be to not implement
400 * write_hwaddr(), since start() always writes the MAC address into HW
401 * anyway. However, it is desirable to implement write_hwaddr() to
402 * support the case of SW that runs subsequent to U-Boot which expects
403 * the MAC address to already be programmed into the XGMAC registers,
404 * which must happen irrespective of whether the U-Boot user (or
405 * scripts) actually made use of the XGMAC device, and hence
406 * irrespective of whether start() was ever called.
407 *
408 */
409 if (!xgmac->config->reg_access_always_ok && !xgmac->reg_access_ok)
410 return 0;
411
412 /* Update the MAC address */
413 val = (plat->enetaddr[5] << 8) |
414 (plat->enetaddr[4]);
415 writel(val, &xgmac->mac_regs->address0_high);
416 val = (plat->enetaddr[3] << 24) |
417 (plat->enetaddr[2] << 16) |
418 (plat->enetaddr[1] << 8) |
419 (plat->enetaddr[0]);
420 writel(val, &xgmac->mac_regs->address0_low);
421 return 0;
422}
423
424static int xgmac_read_rom_hwaddr(struct udevice *dev)
425{
426 struct eth_pdata *pdata = dev_get_plat(dev);
427 struct xgmac_priv *xgmac = dev_get_priv(dev);
428 int ret;
429
430 ret = xgmac->config->ops->xgmac_get_enetaddr(dev);
431 if (ret < 0)
432 return ret;
433
434 return !is_valid_ethaddr(pdata->enetaddr);
435}
436
437static int xgmac_get_phy_addr(struct xgmac_priv *priv, struct udevice *dev)
438{
439 struct ofnode_phandle_args phandle_args;
440 int reg;
441
442 if (dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
443 &phandle_args)) {
444 debug("Failed to find phy-handle");
445 return -ENODEV;
446 }
447
448 priv->phy_of_node = phandle_args.node;
449
450 reg = ofnode_read_u32_default(phandle_args.node, "reg", 0);
451
452 return reg;
453}
454
455static int xgmac_start(struct udevice *dev)
456{
457 struct xgmac_priv *xgmac = dev_get_priv(dev);
458 int ret, i;
459 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
460 ulong last_rx_desc;
Nikunj Kela43347f22025-02-21 22:07:34 -0800461 ulong desc_pad, address;
Tom Rinidec7ea02024-05-20 13:35:03 -0600462
463 struct xgmac_desc *tx_desc = NULL;
464 struct xgmac_desc *rx_desc = NULL;
465 int addr = -1;
466
467 debug("%s(dev=%p):\n", __func__, dev);
468
469 xgmac->tx_desc_idx = 0;
470 xgmac->rx_desc_idx = 0;
471
472 ret = xgmac->config->ops->xgmac_start_resets(dev);
473 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800474 pr_err("%s xgmac_start_resets() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600475 goto err;
476 }
477
478 xgmac->reg_access_ok = true;
479
480 ret = wait_for_bit_le32(&xgmac->dma_regs->mode,
481 XGMAC_DMA_MODE_SWR, false,
482 xgmac->config->swr_wait, false);
483 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800484 pr_err("%s XGMAC_DMA_MODE_SWR stuck: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600485 goto err_stop_resets;
486 }
487
488 ret = xgmac->config->ops->xgmac_calibrate_pads(dev);
489 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800490 pr_err("%s xgmac_calibrate_pads() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600491 goto err_stop_resets;
492 }
493
494 /*
495 * if PHY was already connected and configured,
496 * don't need to reconnect/reconfigure again
497 */
498 if (!xgmac->phy) {
499 addr = xgmac_get_phy_addr(xgmac, dev);
500 xgmac->phy = phy_connect(xgmac->mii, addr, dev,
501 xgmac->config->interface(dev));
502 if (!xgmac->phy) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800503 pr_err("%s phy_connect() failed\n", dev->name);
Tom Rinidec7ea02024-05-20 13:35:03 -0600504 goto err_stop_resets;
505 }
506
507 if (xgmac->max_speed) {
508 ret = phy_set_supported(xgmac->phy, xgmac->max_speed);
509 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800510 pr_err("%s phy_set_supported() failed: %d\n",
511 dev->name, ret);
512
Tom Rinidec7ea02024-05-20 13:35:03 -0600513 goto err_shutdown_phy;
514 }
515 }
516
517 xgmac->phy->node = xgmac->phy_of_node;
518 ret = phy_config(xgmac->phy);
519 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800520 pr_err("%s phy_config() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600521 goto err_shutdown_phy;
522 }
523 }
524
525 ret = phy_startup(xgmac->phy);
526 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800527 pr_err("%s phy_startup() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600528 goto err_shutdown_phy;
529 }
530
531 if (!xgmac->phy->link) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800532 pr_err("%s No link\n", dev->name);
Tom Rinidec7ea02024-05-20 13:35:03 -0600533 goto err_shutdown_phy;
534 }
535
536 ret = xgmac_adjust_link(dev);
537 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800538 pr_err("%s xgmac_adjust_link() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600539 goto err_shutdown_phy;
540 }
541
542 /* Configure MTL */
543
544 /* Enable Store and Forward mode for TX */
545 /* Program Tx operating mode */
546 setbits_le32(&xgmac->mtl_regs->txq0_operation_mode,
547 XGMAC_MTL_TXQ0_OPERATION_MODE_TSF |
548 (XGMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
549 XGMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
550
551 /* Transmit Queue weight */
552 writel(0x10, &xgmac->mtl_regs->txq0_quantum_weight);
553
554 /* Enable Store and Forward mode for RX, since no jumbo frame */
555 setbits_le32(&xgmac->mtl_regs->rxq0_operation_mode,
556 XGMAC_MTL_RXQ0_OPERATION_MODE_RSF);
557
558 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
559 val = readl(&xgmac->mac_regs->hw_feature1);
560 tx_fifo_sz = (val >> XGMAC_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
561 XGMAC_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
562 rx_fifo_sz = (val >> XGMAC_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
563 XGMAC_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
564
565 /*
566 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
567 * r/tqs is encoded as (n / 256) - 1.
568 */
569 tqs = (128 << tx_fifo_sz) / 256 - 1;
570 rqs = (128 << rx_fifo_sz) / 256 - 1;
571
572 clrsetbits_le32(&xgmac->mtl_regs->txq0_operation_mode,
573 XGMAC_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
574 XGMAC_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
575 tqs << XGMAC_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
576 clrsetbits_le32(&xgmac->mtl_regs->rxq0_operation_mode,
577 XGMAC_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
578 XGMAC_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
579 rqs << XGMAC_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
580
581 setbits_le32(&xgmac->mtl_regs->rxq0_operation_mode,
582 XGMAC_MTL_RXQ0_OPERATION_MODE_EHFC);
583
584 /* Configure MAC */
585 clrsetbits_le32(&xgmac->mac_regs->rxq_ctrl0,
586 XGMAC_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
587 XGMAC_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
588 xgmac->config->config_mac <<
589 XGMAC_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
590
591 /* Multicast and Broadcast Queue Enable */
592 setbits_le32(&xgmac->mac_regs->rxq_ctrl1,
593 XGMAC_MAC_RXQ_CTRL1_MCBCQEN);
594
595 /* enable promise mode and receive all mode */
596 setbits_le32(&xgmac->mac_regs->mac_packet_filter,
597 XGMAC_MAC_PACKET_FILTER_RA |
598 XGMAC_MAC_PACKET_FILTER_PR);
599
600 /* Set TX flow control parameters */
601 /* Set Pause Time */
602 setbits_le32(&xgmac->mac_regs->q0_tx_flow_ctrl,
603 XGMAC_MAC_Q0_TX_FLOW_CTRL_PT_MASK <<
604 XGMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
605
606 /* Assign priority for RX flow control */
607 clrbits_le32(&xgmac->mac_regs->rxq_ctrl2,
608 XGMAC_MAC_RXQ_CTRL2_PSRQ0_MASK <<
609 XGMAC_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
610
611 /* Enable flow control */
612 setbits_le32(&xgmac->mac_regs->q0_tx_flow_ctrl,
613 XGMAC_MAC_Q0_TX_FLOW_CTRL_TFE);
614 setbits_le32(&xgmac->mac_regs->rx_flow_ctrl,
615 XGMAC_MAC_RX_FLOW_CTRL_RFE);
616
617 clrbits_le32(&xgmac->mac_regs->tx_configuration,
618 XGMAC_MAC_CONF_JD);
619
620 clrbits_le32(&xgmac->mac_regs->rx_configuration,
621 XGMAC_MAC_CONF_JE |
622 XGMAC_MAC_CONF_GPSLCE |
623 XGMAC_MAC_CONF_WD);
624
625 setbits_le32(&xgmac->mac_regs->rx_configuration,
626 XGMAC_MAC_CONF_ACS |
627 XGMAC_MAC_CONF_CST);
628
629 ret = xgmac_write_hwaddr(dev);
630 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800631 pr_err("%s xgmac_write_hwaddr() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600632 goto err;
633 }
634
635 /* Configure DMA */
636 clrsetbits_le32(&xgmac->dma_regs->sysbus_mode,
637 XGMAC_DMA_SYSBUS_MODE_AAL,
638 XGMAC_DMA_SYSBUS_MODE_EAME |
639 XGMAC_DMA_SYSBUS_MODE_UNDEF);
640
641 /* Enable OSP mode */
642 setbits_le32(&xgmac->dma_regs->ch0_tx_control,
643 XGMAC_DMA_CH0_TX_CONTROL_OSP);
644
645 /* RX buffer size. Must be a multiple of bus width */
646 clrsetbits_le32(&xgmac->dma_regs->ch0_rx_control,
647 XGMAC_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
648 XGMAC_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
649 XGMAC_MAX_PACKET_SIZE <<
650 XGMAC_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
651
652 desc_pad = (xgmac->desc_size - sizeof(struct xgmac_desc)) /
653 xgmac->config->axi_bus_width;
654
655 setbits_le32(&xgmac->dma_regs->ch0_control,
656 XGMAC_DMA_CH0_CONTROL_PBLX8 |
657 (desc_pad << XGMAC_DMA_CH0_CONTROL_DSL_SHIFT));
658
659 /*
660 * Burst length must be < 1/2 FIFO size.
661 * FIFO size in tqs is encoded as (n / 256) - 1.
662 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
663 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
664 */
665 pbl = tqs + 1;
666 if (pbl > 32)
667 pbl = 32;
668
669 clrsetbits_le32(&xgmac->dma_regs->ch0_tx_control,
670 XGMAC_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
671 XGMAC_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
672 pbl << XGMAC_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
673
674 clrsetbits_le32(&xgmac->dma_regs->ch0_rx_control,
675 XGMAC_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
676 XGMAC_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
677 8 << XGMAC_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
678
679 /* DMA performance configuration */
680 val = (XGMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK <<
681 XGMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
682 (XGMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK <<
683 XGMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT) |
684 XGMAC_DMA_SYSBUS_MODE_EAME |
685 XGMAC_DMA_SYSBUS_MODE_BLEN16 |
686 XGMAC_DMA_SYSBUS_MODE_BLEN8 |
687 XGMAC_DMA_SYSBUS_MODE_BLEN4 |
688 XGMAC_DMA_SYSBUS_MODE_BLEN32;
689
690 writel(val, &xgmac->dma_regs->sysbus_mode);
691
692 /* Set up descriptors */
693
694 memset(xgmac->tx_descs, 0, xgmac->desc_size * XGMAC_DESCRIPTORS_TX);
695 memset(xgmac->rx_descs, 0, xgmac->desc_size * XGMAC_DESCRIPTORS_RX);
696
697 for (i = 0; i < XGMAC_DESCRIPTORS_TX; i++) {
698 tx_desc = (struct xgmac_desc *)xgmac_get_desc(xgmac, i, false);
699
700 xgmac->config->ops->xgmac_flush_desc(tx_desc);
701 }
702
703 for (i = 0; i < XGMAC_DESCRIPTORS_RX; i++) {
704 rx_desc = (struct xgmac_desc *)xgmac_get_desc(xgmac, i, true);
705
Nikunj Kela43347f22025-02-21 22:07:34 -0800706 address = (uintptr_t)(xgmac->rx_dma_buf +
707 (i * XGMAC_MAX_PACKET_SIZE));
708
709 rx_desc->des0 = lower_32_bits(address);
710 rx_desc->des1 = upper_32_bits(address);
Tom Rinidec7ea02024-05-20 13:35:03 -0600711 rx_desc->des3 = XGMAC_DESC3_OWN;
712 /* Flush the cache to the memory */
713 mb();
714 xgmac->config->ops->xgmac_flush_desc(rx_desc);
715 xgmac->config->ops->xgmac_inval_buffer(xgmac->rx_dma_buf +
716 (i * XGMAC_MAX_PACKET_SIZE),
717 XGMAC_MAX_PACKET_SIZE);
718 }
719
Nikunj Kela43347f22025-02-21 22:07:34 -0800720 address = (ulong)xgmac_get_desc(xgmac, 0, false);
721 writel(upper_32_bits(address),
722 &xgmac->dma_regs->ch0_txdesc_list_haddress);
723 writel(lower_32_bits(address),
Tom Rinidec7ea02024-05-20 13:35:03 -0600724 &xgmac->dma_regs->ch0_txdesc_list_address);
725 writel(XGMAC_DESCRIPTORS_TX - 1,
726 &xgmac->dma_regs->ch0_txdesc_ring_length);
Nikunj Kela43347f22025-02-21 22:07:34 -0800727 address = (ulong)xgmac_get_desc(xgmac, 0, true);
728 writel(upper_32_bits(address),
729 &xgmac->dma_regs->ch0_rxdesc_list_haddress);
730 writel(lower_32_bits(address),
Tom Rinidec7ea02024-05-20 13:35:03 -0600731 &xgmac->dma_regs->ch0_rxdesc_list_address);
732 writel(XGMAC_DESCRIPTORS_RX - 1,
733 &xgmac->dma_regs->ch0_rxdesc_ring_length);
734
735 /* Enable everything */
736 setbits_le32(&xgmac->dma_regs->ch0_tx_control,
737 XGMAC_DMA_CH0_TX_CONTROL_ST);
738 setbits_le32(&xgmac->dma_regs->ch0_rx_control,
739 XGMAC_DMA_CH0_RX_CONTROL_SR);
740 setbits_le32(&xgmac->mac_regs->tx_configuration,
741 XGMAC_MAC_CONF_TE);
742 setbits_le32(&xgmac->mac_regs->rx_configuration,
743 XGMAC_MAC_CONF_RE);
744
745 /* TX tail pointer not written until we need to TX a packet */
746 /*
747 * Point RX tail pointer at last descriptor. Ideally, we'd point at the
748 * first descriptor, implying all descriptors were available. However,
749 * that's not distinguishable from none of the descriptors being
750 * available.
751 */
752 last_rx_desc = (ulong)xgmac_get_desc(xgmac, XGMAC_DESCRIPTORS_RX - 1, true);
753 writel(last_rx_desc, &xgmac->dma_regs->ch0_rxdesc_tail_pointer);
754
755 xgmac->started = true;
756
757 debug("%s: OK\n", __func__);
758 return 0;
759
760err_shutdown_phy:
761 phy_shutdown(xgmac->phy);
762err_stop_resets:
763 xgmac->config->ops->xgmac_stop_resets(dev);
764err:
Boon Khai Ng21204d12025-01-17 14:48:23 +0800765 pr_err("%s FAILED: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600766 return ret;
767}
768
769static void xgmac_stop(struct udevice *dev)
770{
771 struct xgmac_priv *xgmac = dev_get_priv(dev);
772 unsigned long start_time;
773 u32 val;
774 u32 trcsts;
775 u32 txqsts;
776 u32 prxq;
777 u32 rxqsts;
778
779 debug("%s(dev=%p):\n", __func__, dev);
780
781 if (!xgmac->started)
782 return;
783 xgmac->started = false;
784 xgmac->reg_access_ok = false;
785
786 /* Disable TX DMA */
787 clrbits_le32(&xgmac->dma_regs->ch0_tx_control,
788 XGMAC_DMA_CH0_TX_CONTROL_ST);
789
790 /* Wait for TX all packets to drain out of MTL */
791 start_time = get_timer(0);
792
793 while (get_timer(start_time) < XGMAC_TIMEOUT_100MS) {
794 val = readl(&xgmac->mtl_regs->txq0_debug);
795
796 trcsts = (val >> XGMAC_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
797 XGMAC_MTL_TXQ0_DEBUG_TRCSTS_MASK;
798
799 txqsts = val & XGMAC_MTL_TXQ0_DEBUG_TXQSTS;
800
801 if (trcsts != XGMAC_MTL_TXQ0_DEBUG_TRCSTS_READ_STATE && !txqsts)
802 break;
803 }
804
805 /* Turn off MAC TX and RX */
806 clrbits_le32(&xgmac->mac_regs->tx_configuration,
807 XGMAC_MAC_CONF_RE);
808 clrbits_le32(&xgmac->mac_regs->rx_configuration,
809 XGMAC_MAC_CONF_RE);
810
811 /* Wait for all RX packets to drain out of MTL */
812 start_time = get_timer(0);
813
814 while (get_timer(start_time) < XGMAC_TIMEOUT_100MS) {
815 val = readl(&xgmac->mtl_regs->rxq0_debug);
816
817 prxq = (val >> XGMAC_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
818 XGMAC_MTL_RXQ0_DEBUG_PRXQ_MASK;
819
820 rxqsts = (val >> XGMAC_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
821 XGMAC_MTL_RXQ0_DEBUG_RXQSTS_MASK;
822
823 if (!prxq && !rxqsts)
824 break;
825 }
826
827 /* Turn off RX DMA */
828 clrbits_le32(&xgmac->dma_regs->ch0_rx_control,
829 XGMAC_DMA_CH0_RX_CONTROL_SR);
830
831 if (xgmac->phy)
832 phy_shutdown(xgmac->phy);
833
834 xgmac->config->ops->xgmac_stop_resets(dev);
835
836 debug("%s: OK\n", __func__);
837}
838
839static int xgmac_send(struct udevice *dev, void *packet, int length)
840{
841 struct xgmac_priv *xgmac = dev_get_priv(dev);
842 struct xgmac_desc *tx_desc;
843 unsigned long start_time;
844
845 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
846 length);
847
848 memcpy(xgmac->tx_dma_buf, packet, length);
849 xgmac->config->ops->xgmac_flush_buffer(xgmac->tx_dma_buf, length);
850
851 tx_desc = xgmac_get_desc(xgmac, xgmac->tx_desc_idx, false);
852 xgmac->tx_desc_idx++;
853 xgmac->tx_desc_idx %= XGMAC_DESCRIPTORS_TX;
854
Nikunj Kela43347f22025-02-21 22:07:34 -0800855 tx_desc->des0 = lower_32_bits((ulong)xgmac->tx_dma_buf);
856 tx_desc->des1 = upper_32_bits((ulong)xgmac->tx_dma_buf);
Tom Rinidec7ea02024-05-20 13:35:03 -0600857 tx_desc->des2 = length;
858 /*
859 * Make sure that if HW sees the _OWN write below, it will see all the
860 * writes to the rest of the descriptor too.
861 */
862 mb();
863 tx_desc->des3 = XGMAC_DESC3_OWN | XGMAC_DESC3_FD | XGMAC_DESC3_LD | length;
864 xgmac->config->ops->xgmac_flush_desc(tx_desc);
865
866 writel((ulong)xgmac_get_desc(xgmac, xgmac->tx_desc_idx, false),
867 &xgmac->dma_regs->ch0_txdesc_tail_pointer);
868
869 start_time = get_timer(0);
870
871 while (get_timer(start_time) < XGMAC_TIMEOUT_100MS) {
872 xgmac->config->ops->xgmac_inval_desc(tx_desc);
873 if (!(readl(&tx_desc->des3) & XGMAC_DESC3_OWN))
874 return 0;
875 }
876 debug("%s: TX timeout\n", __func__);
877
878 return -ETIMEDOUT;
879}
880
881static int xgmac_recv(struct udevice *dev, int flags, uchar **packetp)
882{
883 struct xgmac_priv *xgmac = dev_get_priv(dev);
884 struct xgmac_desc *rx_desc;
885 int length;
886
887 debug("%s(dev=%p, flags=0x%x):\n", __func__, dev, flags);
888
889 rx_desc = xgmac_get_desc(xgmac, xgmac->rx_desc_idx, true);
890 xgmac->config->ops->xgmac_inval_desc(rx_desc);
891 if (rx_desc->des3 & XGMAC_DESC3_OWN) {
892 debug("%s: RX packet not available\n", __func__);
893 return -EAGAIN;
894 }
895
896 *packetp = xgmac->rx_dma_buf +
897 (xgmac->rx_desc_idx * XGMAC_MAX_PACKET_SIZE);
898 length = rx_desc->des3 & XGMAC_RDES3_PKT_LENGTH_MASK;
899 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
900
901 xgmac->config->ops->xgmac_inval_buffer(*packetp, length);
902
903 return length;
904}
905
906static int xgmac_free_pkt(struct udevice *dev, uchar *packet, int length)
907{
908 struct xgmac_priv *xgmac = dev_get_priv(dev);
909 u32 idx, idx_mask = xgmac->desc_per_cacheline - 1;
910 uchar *packet_expected;
911 struct xgmac_desc *rx_desc;
Nikunj Kela43347f22025-02-21 22:07:34 -0800912 ulong address;
Tom Rinidec7ea02024-05-20 13:35:03 -0600913
914 debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
915
916 packet_expected = xgmac->rx_dma_buf +
917 (xgmac->rx_desc_idx * XGMAC_MAX_PACKET_SIZE);
918 if (packet != packet_expected) {
919 debug("%s: Unexpected packet (expected %p)\n", __func__,
920 packet_expected);
921 return -EINVAL;
922 }
923
924 xgmac->config->ops->xgmac_inval_buffer(packet, length);
925
926 if ((xgmac->rx_desc_idx & idx_mask) == idx_mask) {
927 for (idx = xgmac->rx_desc_idx - idx_mask;
928 idx <= xgmac->rx_desc_idx;
929 idx++) {
930 rx_desc = xgmac_get_desc(xgmac, idx, true);
931 rx_desc->des0 = 0;
Nikunj Kela43347f22025-02-21 22:07:34 -0800932 rx_desc->des1 = 0;
Tom Rinidec7ea02024-05-20 13:35:03 -0600933 /* Flush the cache to the memory */
934 mb();
935 xgmac->config->ops->xgmac_flush_desc(rx_desc);
936 xgmac->config->ops->xgmac_inval_buffer(packet, length);
Nikunj Kela43347f22025-02-21 22:07:34 -0800937 address = (ulong)(xgmac->rx_dma_buf +
938 (idx * XGMAC_MAX_PACKET_SIZE));
939 rx_desc->des0 = lower_32_bits(address);
940 rx_desc->des1 = upper_32_bits(address);
Tom Rinidec7ea02024-05-20 13:35:03 -0600941 rx_desc->des2 = 0;
942 /*
943 * Make sure that if HW sees the _OWN write below,
944 * it will see all the writes to the rest of the
945 * descriptor too.
946 */
947 mb();
948 rx_desc->des3 = XGMAC_DESC3_OWN;
949 xgmac->config->ops->xgmac_flush_desc(rx_desc);
950 }
951 writel((ulong)rx_desc, &xgmac->dma_regs->ch0_rxdesc_tail_pointer);
952 }
953
954 xgmac->rx_desc_idx++;
955 xgmac->rx_desc_idx %= XGMAC_DESCRIPTORS_RX;
956
957 return 0;
958}
959
960static int xgmac_probe_resources_core(struct udevice *dev)
961{
962 struct xgmac_priv *xgmac = dev_get_priv(dev);
963 unsigned int desc_step;
964 int ret;
965
966 debug("%s(dev=%p):\n", __func__, dev);
967
968 /* Maximum distance between neighboring descriptors, in Bytes. */
969 desc_step = sizeof(struct xgmac_desc);
970
971 if (desc_step < ARCH_DMA_MINALIGN) {
972 /*
973 * The hardware implementation cannot place one descriptor
974 * per cacheline, it is necessary to place multiple descriptors
975 * per cacheline in memory and do cache management carefully.
976 */
977 xgmac->desc_size = BIT(fls(desc_step) - 1);
978 } else {
979 xgmac->desc_size = ALIGN(sizeof(struct xgmac_desc),
980 (unsigned int)ARCH_DMA_MINALIGN);
981 }
982 xgmac->desc_per_cacheline = ARCH_DMA_MINALIGN / xgmac->desc_size;
983
984 xgmac->tx_descs = xgmac_alloc_descs(xgmac, XGMAC_DESCRIPTORS_TX);
985 if (!xgmac->tx_descs) {
986 debug("%s: xgmac_alloc_descs(tx) failed\n", __func__);
987 ret = -ENOMEM;
988 goto err;
989 }
990
991 xgmac->rx_descs = xgmac_alloc_descs(xgmac, XGMAC_DESCRIPTORS_RX);
992 if (!xgmac->rx_descs) {
993 debug("%s: xgmac_alloc_descs(rx) failed\n", __func__);
994 ret = -ENOMEM;
995 goto err_free_tx_descs;
996 }
997
998 xgmac->tx_dma_buf = memalign(XGMAC_BUFFER_ALIGN, XGMAC_MAX_PACKET_SIZE);
999 if (!xgmac->tx_dma_buf) {
1000 debug("%s: memalign(tx_dma_buf) failed\n", __func__);
1001 ret = -ENOMEM;
1002 goto err_free_descs;
1003 }
1004 debug("%s: tx_dma_buf=%p\n", __func__, xgmac->tx_dma_buf);
1005
1006 xgmac->rx_dma_buf = memalign(XGMAC_BUFFER_ALIGN, XGMAC_RX_BUFFER_SIZE);
1007 if (!xgmac->rx_dma_buf) {
1008 debug("%s: memalign(rx_dma_buf) failed\n", __func__);
1009 ret = -ENOMEM;
1010 goto err_free_tx_dma_buf;
1011 }
1012 debug("%s: rx_dma_buf=%p\n", __func__, xgmac->rx_dma_buf);
1013
1014 xgmac->rx_pkt = malloc(XGMAC_MAX_PACKET_SIZE);
1015 if (!xgmac->rx_pkt) {
1016 debug("%s: malloc(rx_pkt) failed\n", __func__);
1017 ret = -ENOMEM;
1018 goto err_free_rx_dma_buf;
1019 }
1020 debug("%s: rx_pkt=%p\n", __func__, xgmac->rx_pkt);
1021
1022 xgmac->config->ops->xgmac_inval_buffer(xgmac->rx_dma_buf,
1023 XGMAC_MAX_PACKET_SIZE * XGMAC_DESCRIPTORS_RX);
1024
1025 debug("%s: OK\n", __func__);
1026 return 0;
1027
1028err_free_rx_dma_buf:
1029 free(xgmac->rx_dma_buf);
1030err_free_tx_dma_buf:
1031 free(xgmac->tx_dma_buf);
1032err_free_descs:
1033 xgmac_free_descs(xgmac->rx_descs);
1034err_free_tx_descs:
1035 xgmac_free_descs(xgmac->tx_descs);
1036err:
1037
1038 debug("%s: returns %d\n", __func__, ret);
1039 return ret;
1040}
1041
1042static int xgmac_remove_resources_core(struct udevice *dev)
1043{
1044 struct xgmac_priv *xgmac = dev_get_priv(dev);
1045
1046 debug("%s(dev=%p):\n", __func__, dev);
1047
1048 free(xgmac->rx_pkt);
1049 free(xgmac->rx_dma_buf);
1050 free(xgmac->tx_dma_buf);
1051 xgmac_free_descs(xgmac->rx_descs);
1052 xgmac_free_descs(xgmac->tx_descs);
1053
1054 debug("%s: OK\n", __func__);
1055 return 0;
1056}
1057
1058/* board-specific Ethernet Interface initializations. */
1059__weak int board_interface_eth_init(struct udevice *dev,
1060 phy_interface_t interface_type)
1061{
1062 return 0;
1063}
1064
1065static int xgmac_probe(struct udevice *dev)
1066{
1067 struct xgmac_priv *xgmac = dev_get_priv(dev);
1068 int ret;
1069
1070 debug("%s(dev=%p):\n", __func__, dev);
1071
1072 xgmac->dev = dev;
1073 xgmac->config = (void *)dev_get_driver_data(dev);
1074
1075 xgmac->regs = dev_read_addr(dev);
1076 if (xgmac->regs == FDT_ADDR_T_NONE) {
Boon Khai Ng21204d12025-01-17 14:48:23 +08001077 pr_err("%s dev_read_addr() failed\n", dev->name);
Tom Rinidec7ea02024-05-20 13:35:03 -06001078 return -ENODEV;
1079 }
1080 xgmac->mac_regs = (void *)(xgmac->regs + XGMAC_MAC_REGS_BASE);
1081 xgmac->mtl_regs = (void *)(xgmac->regs + XGMAC_MTL_REGS_BASE);
1082 xgmac->dma_regs = (void *)(xgmac->regs + XGMAC_DMA_REGS_BASE);
1083
1084 xgmac->max_speed = dev_read_u32_default(dev, "max-speed", 0);
1085
1086 ret = xgmac_probe_resources_core(dev);
1087 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +08001088 pr_err("%s xgmac_probe_resources_core() failed: %d\n",
1089 dev->name, ret);
1090
Tom Rinidec7ea02024-05-20 13:35:03 -06001091 return ret;
1092 }
1093
1094 ret = xgmac->config->ops->xgmac_probe_resources(dev);
1095 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +08001096 pr_err("%s xgmac_probe_resources() failed: %d\n",
1097 dev->name, ret);
1098
Tom Rinidec7ea02024-05-20 13:35:03 -06001099 goto err_remove_resources_core;
1100 }
1101
1102 ret = xgmac->config->ops->xgmac_start_clks(dev);
1103 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +08001104 pr_err("%s xgmac_start_clks() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -06001105 return ret;
1106 }
1107
1108 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
1109 xgmac->mii = eth_phy_get_mdio_bus(dev);
1110
1111 if (!xgmac->mii) {
1112 xgmac->mii = mdio_alloc();
1113 if (!xgmac->mii) {
Boon Khai Ng21204d12025-01-17 14:48:23 +08001114 pr_err("%s mdio_alloc() failed\n", dev->name);
Tom Rinidec7ea02024-05-20 13:35:03 -06001115 ret = -ENOMEM;
1116 goto err_stop_clks;
1117 }
1118 xgmac->mii->read = xgmac_mdio_read;
1119 xgmac->mii->write = xgmac_mdio_write;
1120 xgmac->mii->priv = xgmac;
1121 strcpy(xgmac->mii->name, dev->name);
1122
1123 ret = mdio_register(xgmac->mii);
1124 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +08001125 pr_err("%s mdio_register() failed: %d\n",
1126 dev->name, ret);
1127
Tom Rinidec7ea02024-05-20 13:35:03 -06001128 goto err_free_mdio;
1129 }
1130 }
1131
1132 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
1133 eth_phy_set_mdio_bus(dev, xgmac->mii);
1134
1135 debug("%s: OK\n", __func__);
1136 return 0;
1137
1138err_free_mdio:
1139 mdio_free(xgmac->mii);
1140err_stop_clks:
1141 xgmac->config->ops->xgmac_stop_clks(dev);
1142err_remove_resources_core:
1143 xgmac_remove_resources_core(dev);
1144
1145 debug("%s: returns %d\n", __func__, ret);
1146 return ret;
1147}
1148
1149static int xgmac_remove(struct udevice *dev)
1150{
1151 struct xgmac_priv *xgmac = dev_get_priv(dev);
1152
1153 debug("%s(dev=%p):\n", __func__, dev);
1154
1155 mdio_unregister(xgmac->mii);
1156 mdio_free(xgmac->mii);
1157 xgmac->config->ops->xgmac_stop_clks(dev);
1158 xgmac->config->ops->xgmac_remove_resources(dev);
1159
1160 xgmac_remove_resources_core(dev);
1161
1162 debug("%s: OK\n", __func__);
1163 return 0;
1164}
1165
1166int xgmac_null_ops(struct udevice *dev)
1167{
1168 return 0;
1169}
1170
1171static const struct eth_ops xgmac_ops = {
1172 .start = xgmac_start,
1173 .stop = xgmac_stop,
1174 .send = xgmac_send,
1175 .recv = xgmac_recv,
1176 .free_pkt = xgmac_free_pkt,
1177 .write_hwaddr = xgmac_write_hwaddr,
1178 .read_rom_hwaddr = xgmac_read_rom_hwaddr,
1179};
1180
1181static const struct udevice_id xgmac_ids[] = {
1182 {
1183 .compatible = "intel,socfpga-dwxgmac",
1184 .data = (ulong)&xgmac_socfpga_config
1185 },
1186 { }
1187};
1188
1189U_BOOT_DRIVER(eth_xgmac) = {
1190 .name = "eth_xgmac",
1191 .id = UCLASS_ETH,
1192 .of_match = of_match_ptr(xgmac_ids),
1193 .probe = xgmac_probe,
1194 .remove = xgmac_remove,
1195 .ops = &xgmac_ops,
1196 .priv_auto = sizeof(struct xgmac_priv),
1197 .plat_auto = sizeof(struct eth_pdata),
1198};