blob: cf8227b1b4d6d7f921f530b64d1bb4651675093f [file] [log] [blame]
Tom Rinidec7ea02024-05-20 13:35:03 -06001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2023, Intel Corporation.
4 *
5 * Portions based on U-Boot's dwc_eth_qos.c.
6 */
7
8/*
9 * This driver supports the Synopsys Designware Ethernet XGMAC (10G Ethernet
10 * MAC) IP block. The IP supports multiple options for bus type, clocking/
11 * reset structure, and feature list.
12 *
13 * The driver is written such that generic core logic is kept separate from
14 * configuration-specific logic. Code that interacts with configuration-
15 * specific resources is split out into separate functions to avoid polluting
16 * common code. If/when this driver is enhanced to support multiple
17 * configurations, the core code should be adapted to call all configuration-
18 * specific functions through function pointers, with the definition of those
19 * function pointers being supplied by struct udevice_id xgmac_ids[]'s .data
20 * field.
21 *
22 * This configuration uses an AXI master/DMA bus, an AHB slave/register bus,
23 * contains the DMA, MTL, and MAC sub-blocks, and supports a single RGMII PHY.
24 * This configuration also has SW control over all clock and reset signals to
25 * the HW block.
26 */
27
28#define LOG_CATEGORY UCLASS_ETH
29
30#include <clk.h>
31#include <cpu_func.h>
32#include <dm.h>
33#include <errno.h>
34#include <eth_phy.h>
35#include <log.h>
36#include <malloc.h>
37#include <memalign.h>
38#include <miiphy.h>
39#include <net.h>
40#include <netdev.h>
41#include <phy.h>
42#include <reset.h>
43#include <wait_bit.h>
44#include <asm/cache.h>
45#include <asm/gpio.h>
46#include <asm/io.h>
47#include <linux/delay.h>
48#include "dwc_eth_xgmac.h"
49
50static void *xgmac_alloc_descs(struct xgmac_priv *xgmac, unsigned int num)
51{
52 return memalign(ARCH_DMA_MINALIGN, num * xgmac->desc_size);
53}
54
55static void xgmac_free_descs(void *descs)
56{
57 free(descs);
58}
59
60static struct xgmac_desc *xgmac_get_desc(struct xgmac_priv *xgmac,
61 unsigned int num, bool rx)
62{
63 return (rx ? xgmac->rx_descs : xgmac->tx_descs) +
64 (num * xgmac->desc_size);
65}
66
67void xgmac_inval_desc_generic(void *desc)
68{
69 unsigned long start;
70 unsigned long end;
71
72 if (!desc) {
73 pr_err("%s invalid input buffer\n", __func__);
74 return;
75 }
76
77 start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
78 end = ALIGN(start + sizeof(struct xgmac_desc),
79 ARCH_DMA_MINALIGN);
80
81 invalidate_dcache_range(start, end);
82}
83
84void xgmac_flush_desc_generic(void *desc)
85{
86 unsigned long start;
87 unsigned long end;
88
89 if (!desc) {
90 pr_err("%s invalid input buffer\n", __func__);
91 return;
92 }
93
94 start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
95 end = ALIGN(start + sizeof(struct xgmac_desc),
96 ARCH_DMA_MINALIGN);
97
98 flush_dcache_range(start, end);
99}
100
101void xgmac_inval_buffer_generic(void *buf, size_t size)
102{
103 unsigned long start;
104 unsigned long end;
105
106 if (!buf) {
107 pr_err("%s invalid input buffer\n", __func__);
108 return;
109 }
110
111 start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
112 end = ALIGN((unsigned long)buf + size,
113 ARCH_DMA_MINALIGN);
114
115 invalidate_dcache_range(start, end);
116}
117
118void xgmac_flush_buffer_generic(void *buf, size_t size)
119{
120 unsigned long start;
121 unsigned long end;
122
123 if (!buf) {
124 pr_err("%s invalid input buffer\n", __func__);
125 return;
126 }
127
128 start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
129 end = ALIGN((unsigned long)buf + size,
130 ARCH_DMA_MINALIGN);
131
132 flush_dcache_range(start, end);
133}
134
135static int xgmac_mdio_wait_idle(struct xgmac_priv *xgmac)
136{
137 return wait_for_bit_le32(&xgmac->mac_regs->mdio_data,
138 XGMAC_MAC_MDIO_ADDRESS_SBUSY, false,
139 XGMAC_TIMEOUT_100MS, true);
140}
141
142static int xgmac_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
143 int mdio_reg)
144{
145 struct xgmac_priv *xgmac = bus->priv;
146 u32 val;
147 u32 hw_addr;
148 int ret;
149
150 debug("%s(dev=%p, addr=0x%x, reg=%d):\n", __func__, xgmac->dev, mdio_addr,
151 mdio_reg);
152
153 ret = xgmac_mdio_wait_idle(xgmac);
154 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800155 pr_err("%s MDIO not idle at entry: %d\n",
156 xgmac->dev->name, ret);
157
Tom Rinidec7ea02024-05-20 13:35:03 -0600158 return ret;
159 }
160
161 /* Set clause 22 format */
162 val = BIT(mdio_addr);
163 writel(val, &xgmac->mac_regs->mdio_clause_22_port);
164
165 hw_addr = (mdio_addr << XGMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
166 (mdio_reg & XGMAC_MAC_MDIO_REG_ADDR_C22P_MASK);
167
168 val = xgmac->config->config_mac_mdio <<
169 XGMAC_MAC_MDIO_ADDRESS_CR_SHIFT;
170
171 val |= XGMAC_MAC_MDIO_ADDRESS_SADDR |
172 XGMAC_MDIO_SINGLE_CMD_ADDR_CMD_READ |
173 XGMAC_MAC_MDIO_ADDRESS_SBUSY;
174
175 ret = xgmac_mdio_wait_idle(xgmac);
176 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800177 pr_err("%s MDIO not idle at entry: %d\n",
178 xgmac->dev->name, ret);
179
Tom Rinidec7ea02024-05-20 13:35:03 -0600180 return ret;
181 }
182
183 writel(hw_addr, &xgmac->mac_regs->mdio_address);
184 writel(val, &xgmac->mac_regs->mdio_data);
185
186 ret = xgmac_mdio_wait_idle(xgmac);
187 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800188 pr_err("%s MDIO read didn't complete: %d\n",
189 xgmac->dev->name, ret);
190
Tom Rinidec7ea02024-05-20 13:35:03 -0600191 return ret;
192 }
193
194 val = readl(&xgmac->mac_regs->mdio_data);
195 val &= XGMAC_MAC_MDIO_DATA_GD_MASK;
196
197 debug("%s: val=0x%x\n", __func__, val);
198
199 return val;
200}
201
202static int xgmac_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
203 int mdio_reg, u16 mdio_val)
204{
205 struct xgmac_priv *xgmac = bus->priv;
206 u32 val;
207 u32 hw_addr;
208 int ret;
209
210 debug("%s(dev=%p, addr=0x%x, reg=%d, val=0x%x):\n", __func__, xgmac->dev,
211 mdio_addr, mdio_reg, mdio_val);
212
213 ret = xgmac_mdio_wait_idle(xgmac);
214 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800215 pr_err("%s MDIO not idle at entry: %d\n",
216 xgmac->dev->name, ret);
217
Tom Rinidec7ea02024-05-20 13:35:03 -0600218 return ret;
219 }
220
221 /* Set clause 22 format */
222 val = BIT(mdio_addr);
223 writel(val, &xgmac->mac_regs->mdio_clause_22_port);
224
225 hw_addr = (mdio_addr << XGMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
226 (mdio_reg & XGMAC_MAC_MDIO_REG_ADDR_C22P_MASK);
227
228 hw_addr |= (mdio_reg >> XGMAC_MAC_MDIO_ADDRESS_PA_SHIFT) <<
229 XGMAC_MAC_MDIO_ADDRESS_DA_SHIFT;
230
231 val = (xgmac->config->config_mac_mdio <<
232 XGMAC_MAC_MDIO_ADDRESS_CR_SHIFT);
233
234 val |= XGMAC_MAC_MDIO_ADDRESS_SADDR |
235 mdio_val | XGMAC_MDIO_SINGLE_CMD_ADDR_CMD_WRITE |
236 XGMAC_MAC_MDIO_ADDRESS_SBUSY;
237
238 ret = xgmac_mdio_wait_idle(xgmac);
239 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800240 pr_err("%s MDIO not idle at entry: %d\n",
241 xgmac->dev->name, ret);
242
Tom Rinidec7ea02024-05-20 13:35:03 -0600243 return ret;
244 }
245
246 writel(hw_addr, &xgmac->mac_regs->mdio_address);
247 writel(val, &xgmac->mac_regs->mdio_data);
248
249 ret = xgmac_mdio_wait_idle(xgmac);
250 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800251 pr_err("%s MDIO write didn't complete: %d\n",
252 xgmac->dev->name, ret);
253
Tom Rinidec7ea02024-05-20 13:35:03 -0600254 return ret;
255 }
256
257 return 0;
258}
259
260static int xgmac_set_full_duplex(struct udevice *dev)
261{
262 struct xgmac_priv *xgmac = dev_get_priv(dev);
263
264 debug("%s(dev=%p):\n", __func__, dev);
265
266 clrbits_le32(&xgmac->mac_regs->mac_extended_conf, XGMAC_MAC_EXT_CONF_HD);
267
268 return 0;
269}
270
271static int xgmac_set_half_duplex(struct udevice *dev)
272{
273 struct xgmac_priv *xgmac = dev_get_priv(dev);
274
275 debug("%s(dev=%p):\n", __func__, dev);
276
277 setbits_le32(&xgmac->mac_regs->mac_extended_conf, XGMAC_MAC_EXT_CONF_HD);
278
279 /* WAR: Flush TX queue when switching to half-duplex */
280 setbits_le32(&xgmac->mtl_regs->txq0_operation_mode,
281 XGMAC_MTL_TXQ0_OPERATION_MODE_FTQ);
282
283 return 0;
284}
285
286static int xgmac_set_gmii_speed(struct udevice *dev)
287{
288 struct xgmac_priv *xgmac = dev_get_priv(dev);
289 u32 val;
290
291 debug("%s(dev=%p):\n", __func__, dev);
292
293 val = XGMAC_MAC_CONF_SS_1G_GMII << XGMAC_MAC_CONF_SS_SHIFT;
294 writel(val, &xgmac->mac_regs->tx_configuration);
295
296 return 0;
297}
298
299static int xgmac_set_mii_speed_100(struct udevice *dev)
300{
301 struct xgmac_priv *xgmac = dev_get_priv(dev);
302 u32 val;
303
304 debug("%s(dev=%p):\n", __func__, dev);
305
306 val = XGMAC_MAC_CONF_SS_100M_MII << XGMAC_MAC_CONF_SS_SHIFT;
307 writel(val, &xgmac->mac_regs->tx_configuration);
308
309 return 0;
310}
311
312static int xgmac_set_mii_speed_10(struct udevice *dev)
313{
314 struct xgmac_priv *xgmac = dev_get_priv(dev);
315 u32 val;
316
317 debug("%s(dev=%p):\n", __func__, dev);
318
319 val = XGMAC_MAC_CONF_SS_2_10M_MII << XGMAC_MAC_CONF_SS_SHIFT;
320 writel(val, &xgmac->mac_regs->tx_configuration);
321
322 return 0;
323}
324
325static int xgmac_adjust_link(struct udevice *dev)
326{
327 struct xgmac_priv *xgmac = dev_get_priv(dev);
328 int ret;
329 bool en_calibration;
330
331 debug("%s(dev=%p):\n", __func__, dev);
332
333 if (xgmac->phy->duplex)
334 ret = xgmac_set_full_duplex(dev);
335 else
336 ret = xgmac_set_half_duplex(dev);
337 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800338 pr_err("%s xgmac_set_*_duplex() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600339 return ret;
340 }
341
342 switch (xgmac->phy->speed) {
343 case SPEED_1000:
344 en_calibration = true;
345 ret = xgmac_set_gmii_speed(dev);
346 break;
347 case SPEED_100:
348 en_calibration = true;
349 ret = xgmac_set_mii_speed_100(dev);
350 break;
351 case SPEED_10:
352 en_calibration = false;
353 ret = xgmac_set_mii_speed_10(dev);
354 break;
355 default:
Boon Khai Ng21204d12025-01-17 14:48:23 +0800356 pr_err("%s invalid speed %d\n", dev->name, xgmac->phy->speed);
Tom Rinidec7ea02024-05-20 13:35:03 -0600357 return -EINVAL;
358 }
359 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800360 pr_err("%s xgmac_set_*mii_speed*() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600361 return ret;
362 }
363
364 if (en_calibration) {
365 ret = xgmac->config->ops->xgmac_calibrate_pads(dev);
366 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800367 pr_err("%s xgmac_calibrate_pads() failed: %d\n",
368 dev->name, ret);
369
Tom Rinidec7ea02024-05-20 13:35:03 -0600370 return ret;
371 }
372 } else {
373 ret = xgmac->config->ops->xgmac_disable_calibration(dev);
374 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800375 pr_err("%s xgmac_disable_calibration() failed: %d\n",
376 dev->name, ret);
377
Tom Rinidec7ea02024-05-20 13:35:03 -0600378 return ret;
379 }
380 }
381
382 return 0;
383}
384
385static int xgmac_write_hwaddr(struct udevice *dev)
386{
387 struct eth_pdata *plat = dev_get_plat(dev);
388 struct xgmac_priv *xgmac = dev_get_priv(dev);
389 u32 val;
390
391 /*
392 * This function may be called before start() or after stop(). At that
393 * time, on at least some configurations of the XGMAC HW, all clocks to
394 * the XGMAC HW block will be stopped, and a reset signal applied. If
395 * any register access is attempted in this state, bus timeouts or CPU
396 * hangs may occur. This check prevents that.
397 *
398 * A simple solution to this problem would be to not implement
399 * write_hwaddr(), since start() always writes the MAC address into HW
400 * anyway. However, it is desirable to implement write_hwaddr() to
401 * support the case of SW that runs subsequent to U-Boot which expects
402 * the MAC address to already be programmed into the XGMAC registers,
403 * which must happen irrespective of whether the U-Boot user (or
404 * scripts) actually made use of the XGMAC device, and hence
405 * irrespective of whether start() was ever called.
406 *
407 */
408 if (!xgmac->config->reg_access_always_ok && !xgmac->reg_access_ok)
409 return 0;
410
411 /* Update the MAC address */
412 val = (plat->enetaddr[5] << 8) |
413 (plat->enetaddr[4]);
414 writel(val, &xgmac->mac_regs->address0_high);
415 val = (plat->enetaddr[3] << 24) |
416 (plat->enetaddr[2] << 16) |
417 (plat->enetaddr[1] << 8) |
418 (plat->enetaddr[0]);
419 writel(val, &xgmac->mac_regs->address0_low);
420 return 0;
421}
422
423static int xgmac_read_rom_hwaddr(struct udevice *dev)
424{
425 struct eth_pdata *pdata = dev_get_plat(dev);
426 struct xgmac_priv *xgmac = dev_get_priv(dev);
427 int ret;
428
429 ret = xgmac->config->ops->xgmac_get_enetaddr(dev);
430 if (ret < 0)
431 return ret;
432
433 return !is_valid_ethaddr(pdata->enetaddr);
434}
435
436static int xgmac_get_phy_addr(struct xgmac_priv *priv, struct udevice *dev)
437{
438 struct ofnode_phandle_args phandle_args;
439 int reg;
440
441 if (dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
442 &phandle_args)) {
443 debug("Failed to find phy-handle");
444 return -ENODEV;
445 }
446
447 priv->phy_of_node = phandle_args.node;
448
449 reg = ofnode_read_u32_default(phandle_args.node, "reg", 0);
450
451 return reg;
452}
453
454static int xgmac_start(struct udevice *dev)
455{
456 struct xgmac_priv *xgmac = dev_get_priv(dev);
457 int ret, i;
458 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
459 ulong last_rx_desc;
460 ulong desc_pad;
461
462 struct xgmac_desc *tx_desc = NULL;
463 struct xgmac_desc *rx_desc = NULL;
464 int addr = -1;
465
466 debug("%s(dev=%p):\n", __func__, dev);
467
468 xgmac->tx_desc_idx = 0;
469 xgmac->rx_desc_idx = 0;
470
471 ret = xgmac->config->ops->xgmac_start_resets(dev);
472 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800473 pr_err("%s xgmac_start_resets() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600474 goto err;
475 }
476
477 xgmac->reg_access_ok = true;
478
479 ret = wait_for_bit_le32(&xgmac->dma_regs->mode,
480 XGMAC_DMA_MODE_SWR, false,
481 xgmac->config->swr_wait, false);
482 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800483 pr_err("%s XGMAC_DMA_MODE_SWR stuck: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600484 goto err_stop_resets;
485 }
486
487 ret = xgmac->config->ops->xgmac_calibrate_pads(dev);
488 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800489 pr_err("%s xgmac_calibrate_pads() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600490 goto err_stop_resets;
491 }
492
493 /*
494 * if PHY was already connected and configured,
495 * don't need to reconnect/reconfigure again
496 */
497 if (!xgmac->phy) {
498 addr = xgmac_get_phy_addr(xgmac, dev);
499 xgmac->phy = phy_connect(xgmac->mii, addr, dev,
500 xgmac->config->interface(dev));
501 if (!xgmac->phy) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800502 pr_err("%s phy_connect() failed\n", dev->name);
Tom Rinidec7ea02024-05-20 13:35:03 -0600503 goto err_stop_resets;
504 }
505
506 if (xgmac->max_speed) {
507 ret = phy_set_supported(xgmac->phy, xgmac->max_speed);
508 if (ret) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800509 pr_err("%s phy_set_supported() failed: %d\n",
510 dev->name, ret);
511
Tom Rinidec7ea02024-05-20 13:35:03 -0600512 goto err_shutdown_phy;
513 }
514 }
515
516 xgmac->phy->node = xgmac->phy_of_node;
517 ret = phy_config(xgmac->phy);
518 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800519 pr_err("%s phy_config() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600520 goto err_shutdown_phy;
521 }
522 }
523
524 ret = phy_startup(xgmac->phy);
525 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800526 pr_err("%s phy_startup() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600527 goto err_shutdown_phy;
528 }
529
530 if (!xgmac->phy->link) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800531 pr_err("%s No link\n", dev->name);
Tom Rinidec7ea02024-05-20 13:35:03 -0600532 goto err_shutdown_phy;
533 }
534
535 ret = xgmac_adjust_link(dev);
536 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800537 pr_err("%s xgmac_adjust_link() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600538 goto err_shutdown_phy;
539 }
540
541 /* Configure MTL */
542
543 /* Enable Store and Forward mode for TX */
544 /* Program Tx operating mode */
545 setbits_le32(&xgmac->mtl_regs->txq0_operation_mode,
546 XGMAC_MTL_TXQ0_OPERATION_MODE_TSF |
547 (XGMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
548 XGMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
549
550 /* Transmit Queue weight */
551 writel(0x10, &xgmac->mtl_regs->txq0_quantum_weight);
552
553 /* Enable Store and Forward mode for RX, since no jumbo frame */
554 setbits_le32(&xgmac->mtl_regs->rxq0_operation_mode,
555 XGMAC_MTL_RXQ0_OPERATION_MODE_RSF);
556
557 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
558 val = readl(&xgmac->mac_regs->hw_feature1);
559 tx_fifo_sz = (val >> XGMAC_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
560 XGMAC_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
561 rx_fifo_sz = (val >> XGMAC_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
562 XGMAC_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
563
564 /*
565 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
566 * r/tqs is encoded as (n / 256) - 1.
567 */
568 tqs = (128 << tx_fifo_sz) / 256 - 1;
569 rqs = (128 << rx_fifo_sz) / 256 - 1;
570
571 clrsetbits_le32(&xgmac->mtl_regs->txq0_operation_mode,
572 XGMAC_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
573 XGMAC_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
574 tqs << XGMAC_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
575 clrsetbits_le32(&xgmac->mtl_regs->rxq0_operation_mode,
576 XGMAC_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
577 XGMAC_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
578 rqs << XGMAC_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
579
580 setbits_le32(&xgmac->mtl_regs->rxq0_operation_mode,
581 XGMAC_MTL_RXQ0_OPERATION_MODE_EHFC);
582
583 /* Configure MAC */
584 clrsetbits_le32(&xgmac->mac_regs->rxq_ctrl0,
585 XGMAC_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
586 XGMAC_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
587 xgmac->config->config_mac <<
588 XGMAC_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
589
590 /* Multicast and Broadcast Queue Enable */
591 setbits_le32(&xgmac->mac_regs->rxq_ctrl1,
592 XGMAC_MAC_RXQ_CTRL1_MCBCQEN);
593
594 /* enable promise mode and receive all mode */
595 setbits_le32(&xgmac->mac_regs->mac_packet_filter,
596 XGMAC_MAC_PACKET_FILTER_RA |
597 XGMAC_MAC_PACKET_FILTER_PR);
598
599 /* Set TX flow control parameters */
600 /* Set Pause Time */
601 setbits_le32(&xgmac->mac_regs->q0_tx_flow_ctrl,
602 XGMAC_MAC_Q0_TX_FLOW_CTRL_PT_MASK <<
603 XGMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
604
605 /* Assign priority for RX flow control */
606 clrbits_le32(&xgmac->mac_regs->rxq_ctrl2,
607 XGMAC_MAC_RXQ_CTRL2_PSRQ0_MASK <<
608 XGMAC_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
609
610 /* Enable flow control */
611 setbits_le32(&xgmac->mac_regs->q0_tx_flow_ctrl,
612 XGMAC_MAC_Q0_TX_FLOW_CTRL_TFE);
613 setbits_le32(&xgmac->mac_regs->rx_flow_ctrl,
614 XGMAC_MAC_RX_FLOW_CTRL_RFE);
615
616 clrbits_le32(&xgmac->mac_regs->tx_configuration,
617 XGMAC_MAC_CONF_JD);
618
619 clrbits_le32(&xgmac->mac_regs->rx_configuration,
620 XGMAC_MAC_CONF_JE |
621 XGMAC_MAC_CONF_GPSLCE |
622 XGMAC_MAC_CONF_WD);
623
624 setbits_le32(&xgmac->mac_regs->rx_configuration,
625 XGMAC_MAC_CONF_ACS |
626 XGMAC_MAC_CONF_CST);
627
628 ret = xgmac_write_hwaddr(dev);
629 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +0800630 pr_err("%s xgmac_write_hwaddr() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600631 goto err;
632 }
633
634 /* Configure DMA */
635 clrsetbits_le32(&xgmac->dma_regs->sysbus_mode,
636 XGMAC_DMA_SYSBUS_MODE_AAL,
637 XGMAC_DMA_SYSBUS_MODE_EAME |
638 XGMAC_DMA_SYSBUS_MODE_UNDEF);
639
640 /* Enable OSP mode */
641 setbits_le32(&xgmac->dma_regs->ch0_tx_control,
642 XGMAC_DMA_CH0_TX_CONTROL_OSP);
643
644 /* RX buffer size. Must be a multiple of bus width */
645 clrsetbits_le32(&xgmac->dma_regs->ch0_rx_control,
646 XGMAC_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
647 XGMAC_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
648 XGMAC_MAX_PACKET_SIZE <<
649 XGMAC_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
650
651 desc_pad = (xgmac->desc_size - sizeof(struct xgmac_desc)) /
652 xgmac->config->axi_bus_width;
653
654 setbits_le32(&xgmac->dma_regs->ch0_control,
655 XGMAC_DMA_CH0_CONTROL_PBLX8 |
656 (desc_pad << XGMAC_DMA_CH0_CONTROL_DSL_SHIFT));
657
658 /*
659 * Burst length must be < 1/2 FIFO size.
660 * FIFO size in tqs is encoded as (n / 256) - 1.
661 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
662 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
663 */
664 pbl = tqs + 1;
665 if (pbl > 32)
666 pbl = 32;
667
668 clrsetbits_le32(&xgmac->dma_regs->ch0_tx_control,
669 XGMAC_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
670 XGMAC_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
671 pbl << XGMAC_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
672
673 clrsetbits_le32(&xgmac->dma_regs->ch0_rx_control,
674 XGMAC_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
675 XGMAC_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
676 8 << XGMAC_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
677
678 /* DMA performance configuration */
679 val = (XGMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK <<
680 XGMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
681 (XGMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK <<
682 XGMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT) |
683 XGMAC_DMA_SYSBUS_MODE_EAME |
684 XGMAC_DMA_SYSBUS_MODE_BLEN16 |
685 XGMAC_DMA_SYSBUS_MODE_BLEN8 |
686 XGMAC_DMA_SYSBUS_MODE_BLEN4 |
687 XGMAC_DMA_SYSBUS_MODE_BLEN32;
688
689 writel(val, &xgmac->dma_regs->sysbus_mode);
690
691 /* Set up descriptors */
692
693 memset(xgmac->tx_descs, 0, xgmac->desc_size * XGMAC_DESCRIPTORS_TX);
694 memset(xgmac->rx_descs, 0, xgmac->desc_size * XGMAC_DESCRIPTORS_RX);
695
696 for (i = 0; i < XGMAC_DESCRIPTORS_TX; i++) {
697 tx_desc = (struct xgmac_desc *)xgmac_get_desc(xgmac, i, false);
698
699 xgmac->config->ops->xgmac_flush_desc(tx_desc);
700 }
701
702 for (i = 0; i < XGMAC_DESCRIPTORS_RX; i++) {
703 rx_desc = (struct xgmac_desc *)xgmac_get_desc(xgmac, i, true);
704
705 rx_desc->des0 = (uintptr_t)(xgmac->rx_dma_buf +
706 (i * XGMAC_MAX_PACKET_SIZE));
707 rx_desc->des3 = XGMAC_DESC3_OWN;
708 /* Flush the cache to the memory */
709 mb();
710 xgmac->config->ops->xgmac_flush_desc(rx_desc);
711 xgmac->config->ops->xgmac_inval_buffer(xgmac->rx_dma_buf +
712 (i * XGMAC_MAX_PACKET_SIZE),
713 XGMAC_MAX_PACKET_SIZE);
714 }
715
716 writel(0, &xgmac->dma_regs->ch0_txdesc_list_haddress);
717 writel((ulong)xgmac_get_desc(xgmac, 0, false),
718 &xgmac->dma_regs->ch0_txdesc_list_address);
719 writel(XGMAC_DESCRIPTORS_TX - 1,
720 &xgmac->dma_regs->ch0_txdesc_ring_length);
721 writel(0, &xgmac->dma_regs->ch0_rxdesc_list_haddress);
722 writel((ulong)xgmac_get_desc(xgmac, 0, true),
723 &xgmac->dma_regs->ch0_rxdesc_list_address);
724 writel(XGMAC_DESCRIPTORS_RX - 1,
725 &xgmac->dma_regs->ch0_rxdesc_ring_length);
726
727 /* Enable everything */
728 setbits_le32(&xgmac->dma_regs->ch0_tx_control,
729 XGMAC_DMA_CH0_TX_CONTROL_ST);
730 setbits_le32(&xgmac->dma_regs->ch0_rx_control,
731 XGMAC_DMA_CH0_RX_CONTROL_SR);
732 setbits_le32(&xgmac->mac_regs->tx_configuration,
733 XGMAC_MAC_CONF_TE);
734 setbits_le32(&xgmac->mac_regs->rx_configuration,
735 XGMAC_MAC_CONF_RE);
736
737 /* TX tail pointer not written until we need to TX a packet */
738 /*
739 * Point RX tail pointer at last descriptor. Ideally, we'd point at the
740 * first descriptor, implying all descriptors were available. However,
741 * that's not distinguishable from none of the descriptors being
742 * available.
743 */
744 last_rx_desc = (ulong)xgmac_get_desc(xgmac, XGMAC_DESCRIPTORS_RX - 1, true);
745 writel(last_rx_desc, &xgmac->dma_regs->ch0_rxdesc_tail_pointer);
746
747 xgmac->started = true;
748
749 debug("%s: OK\n", __func__);
750 return 0;
751
752err_shutdown_phy:
753 phy_shutdown(xgmac->phy);
754err_stop_resets:
755 xgmac->config->ops->xgmac_stop_resets(dev);
756err:
Boon Khai Ng21204d12025-01-17 14:48:23 +0800757 pr_err("%s FAILED: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -0600758 return ret;
759}
760
761static void xgmac_stop(struct udevice *dev)
762{
763 struct xgmac_priv *xgmac = dev_get_priv(dev);
764 unsigned long start_time;
765 u32 val;
766 u32 trcsts;
767 u32 txqsts;
768 u32 prxq;
769 u32 rxqsts;
770
771 debug("%s(dev=%p):\n", __func__, dev);
772
773 if (!xgmac->started)
774 return;
775 xgmac->started = false;
776 xgmac->reg_access_ok = false;
777
778 /* Disable TX DMA */
779 clrbits_le32(&xgmac->dma_regs->ch0_tx_control,
780 XGMAC_DMA_CH0_TX_CONTROL_ST);
781
782 /* Wait for TX all packets to drain out of MTL */
783 start_time = get_timer(0);
784
785 while (get_timer(start_time) < XGMAC_TIMEOUT_100MS) {
786 val = readl(&xgmac->mtl_regs->txq0_debug);
787
788 trcsts = (val >> XGMAC_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
789 XGMAC_MTL_TXQ0_DEBUG_TRCSTS_MASK;
790
791 txqsts = val & XGMAC_MTL_TXQ0_DEBUG_TXQSTS;
792
793 if (trcsts != XGMAC_MTL_TXQ0_DEBUG_TRCSTS_READ_STATE && !txqsts)
794 break;
795 }
796
797 /* Turn off MAC TX and RX */
798 clrbits_le32(&xgmac->mac_regs->tx_configuration,
799 XGMAC_MAC_CONF_RE);
800 clrbits_le32(&xgmac->mac_regs->rx_configuration,
801 XGMAC_MAC_CONF_RE);
802
803 /* Wait for all RX packets to drain out of MTL */
804 start_time = get_timer(0);
805
806 while (get_timer(start_time) < XGMAC_TIMEOUT_100MS) {
807 val = readl(&xgmac->mtl_regs->rxq0_debug);
808
809 prxq = (val >> XGMAC_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
810 XGMAC_MTL_RXQ0_DEBUG_PRXQ_MASK;
811
812 rxqsts = (val >> XGMAC_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
813 XGMAC_MTL_RXQ0_DEBUG_RXQSTS_MASK;
814
815 if (!prxq && !rxqsts)
816 break;
817 }
818
819 /* Turn off RX DMA */
820 clrbits_le32(&xgmac->dma_regs->ch0_rx_control,
821 XGMAC_DMA_CH0_RX_CONTROL_SR);
822
823 if (xgmac->phy)
824 phy_shutdown(xgmac->phy);
825
826 xgmac->config->ops->xgmac_stop_resets(dev);
827
828 debug("%s: OK\n", __func__);
829}
830
831static int xgmac_send(struct udevice *dev, void *packet, int length)
832{
833 struct xgmac_priv *xgmac = dev_get_priv(dev);
834 struct xgmac_desc *tx_desc;
835 unsigned long start_time;
836
837 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
838 length);
839
840 memcpy(xgmac->tx_dma_buf, packet, length);
841 xgmac->config->ops->xgmac_flush_buffer(xgmac->tx_dma_buf, length);
842
843 tx_desc = xgmac_get_desc(xgmac, xgmac->tx_desc_idx, false);
844 xgmac->tx_desc_idx++;
845 xgmac->tx_desc_idx %= XGMAC_DESCRIPTORS_TX;
846
847 tx_desc->des0 = (ulong)xgmac->tx_dma_buf;
848 tx_desc->des1 = 0;
849 tx_desc->des2 = length;
850 /*
851 * Make sure that if HW sees the _OWN write below, it will see all the
852 * writes to the rest of the descriptor too.
853 */
854 mb();
855 tx_desc->des3 = XGMAC_DESC3_OWN | XGMAC_DESC3_FD | XGMAC_DESC3_LD | length;
856 xgmac->config->ops->xgmac_flush_desc(tx_desc);
857
858 writel((ulong)xgmac_get_desc(xgmac, xgmac->tx_desc_idx, false),
859 &xgmac->dma_regs->ch0_txdesc_tail_pointer);
860
861 start_time = get_timer(0);
862
863 while (get_timer(start_time) < XGMAC_TIMEOUT_100MS) {
864 xgmac->config->ops->xgmac_inval_desc(tx_desc);
865 if (!(readl(&tx_desc->des3) & XGMAC_DESC3_OWN))
866 return 0;
867 }
868 debug("%s: TX timeout\n", __func__);
869
870 return -ETIMEDOUT;
871}
872
873static int xgmac_recv(struct udevice *dev, int flags, uchar **packetp)
874{
875 struct xgmac_priv *xgmac = dev_get_priv(dev);
876 struct xgmac_desc *rx_desc;
877 int length;
878
879 debug("%s(dev=%p, flags=0x%x):\n", __func__, dev, flags);
880
881 rx_desc = xgmac_get_desc(xgmac, xgmac->rx_desc_idx, true);
882 xgmac->config->ops->xgmac_inval_desc(rx_desc);
883 if (rx_desc->des3 & XGMAC_DESC3_OWN) {
884 debug("%s: RX packet not available\n", __func__);
885 return -EAGAIN;
886 }
887
888 *packetp = xgmac->rx_dma_buf +
889 (xgmac->rx_desc_idx * XGMAC_MAX_PACKET_SIZE);
890 length = rx_desc->des3 & XGMAC_RDES3_PKT_LENGTH_MASK;
891 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
892
893 xgmac->config->ops->xgmac_inval_buffer(*packetp, length);
894
895 return length;
896}
897
898static int xgmac_free_pkt(struct udevice *dev, uchar *packet, int length)
899{
900 struct xgmac_priv *xgmac = dev_get_priv(dev);
901 u32 idx, idx_mask = xgmac->desc_per_cacheline - 1;
902 uchar *packet_expected;
903 struct xgmac_desc *rx_desc;
904
905 debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
906
907 packet_expected = xgmac->rx_dma_buf +
908 (xgmac->rx_desc_idx * XGMAC_MAX_PACKET_SIZE);
909 if (packet != packet_expected) {
910 debug("%s: Unexpected packet (expected %p)\n", __func__,
911 packet_expected);
912 return -EINVAL;
913 }
914
915 xgmac->config->ops->xgmac_inval_buffer(packet, length);
916
917 if ((xgmac->rx_desc_idx & idx_mask) == idx_mask) {
918 for (idx = xgmac->rx_desc_idx - idx_mask;
919 idx <= xgmac->rx_desc_idx;
920 idx++) {
921 rx_desc = xgmac_get_desc(xgmac, idx, true);
922 rx_desc->des0 = 0;
923 /* Flush the cache to the memory */
924 mb();
925 xgmac->config->ops->xgmac_flush_desc(rx_desc);
926 xgmac->config->ops->xgmac_inval_buffer(packet, length);
927 rx_desc->des0 = (u32)(ulong)(xgmac->rx_dma_buf +
928 (idx * XGMAC_MAX_PACKET_SIZE));
929 rx_desc->des1 = 0;
930 rx_desc->des2 = 0;
931 /*
932 * Make sure that if HW sees the _OWN write below,
933 * it will see all the writes to the rest of the
934 * descriptor too.
935 */
936 mb();
937 rx_desc->des3 = XGMAC_DESC3_OWN;
938 xgmac->config->ops->xgmac_flush_desc(rx_desc);
939 }
940 writel((ulong)rx_desc, &xgmac->dma_regs->ch0_rxdesc_tail_pointer);
941 }
942
943 xgmac->rx_desc_idx++;
944 xgmac->rx_desc_idx %= XGMAC_DESCRIPTORS_RX;
945
946 return 0;
947}
948
949static int xgmac_probe_resources_core(struct udevice *dev)
950{
951 struct xgmac_priv *xgmac = dev_get_priv(dev);
952 unsigned int desc_step;
953 int ret;
954
955 debug("%s(dev=%p):\n", __func__, dev);
956
957 /* Maximum distance between neighboring descriptors, in Bytes. */
958 desc_step = sizeof(struct xgmac_desc);
959
960 if (desc_step < ARCH_DMA_MINALIGN) {
961 /*
962 * The hardware implementation cannot place one descriptor
963 * per cacheline, it is necessary to place multiple descriptors
964 * per cacheline in memory and do cache management carefully.
965 */
966 xgmac->desc_size = BIT(fls(desc_step) - 1);
967 } else {
968 xgmac->desc_size = ALIGN(sizeof(struct xgmac_desc),
969 (unsigned int)ARCH_DMA_MINALIGN);
970 }
971 xgmac->desc_per_cacheline = ARCH_DMA_MINALIGN / xgmac->desc_size;
972
973 xgmac->tx_descs = xgmac_alloc_descs(xgmac, XGMAC_DESCRIPTORS_TX);
974 if (!xgmac->tx_descs) {
975 debug("%s: xgmac_alloc_descs(tx) failed\n", __func__);
976 ret = -ENOMEM;
977 goto err;
978 }
979
980 xgmac->rx_descs = xgmac_alloc_descs(xgmac, XGMAC_DESCRIPTORS_RX);
981 if (!xgmac->rx_descs) {
982 debug("%s: xgmac_alloc_descs(rx) failed\n", __func__);
983 ret = -ENOMEM;
984 goto err_free_tx_descs;
985 }
986
987 xgmac->tx_dma_buf = memalign(XGMAC_BUFFER_ALIGN, XGMAC_MAX_PACKET_SIZE);
988 if (!xgmac->tx_dma_buf) {
989 debug("%s: memalign(tx_dma_buf) failed\n", __func__);
990 ret = -ENOMEM;
991 goto err_free_descs;
992 }
993 debug("%s: tx_dma_buf=%p\n", __func__, xgmac->tx_dma_buf);
994
995 xgmac->rx_dma_buf = memalign(XGMAC_BUFFER_ALIGN, XGMAC_RX_BUFFER_SIZE);
996 if (!xgmac->rx_dma_buf) {
997 debug("%s: memalign(rx_dma_buf) failed\n", __func__);
998 ret = -ENOMEM;
999 goto err_free_tx_dma_buf;
1000 }
1001 debug("%s: rx_dma_buf=%p\n", __func__, xgmac->rx_dma_buf);
1002
1003 xgmac->rx_pkt = malloc(XGMAC_MAX_PACKET_SIZE);
1004 if (!xgmac->rx_pkt) {
1005 debug("%s: malloc(rx_pkt) failed\n", __func__);
1006 ret = -ENOMEM;
1007 goto err_free_rx_dma_buf;
1008 }
1009 debug("%s: rx_pkt=%p\n", __func__, xgmac->rx_pkt);
1010
1011 xgmac->config->ops->xgmac_inval_buffer(xgmac->rx_dma_buf,
1012 XGMAC_MAX_PACKET_SIZE * XGMAC_DESCRIPTORS_RX);
1013
1014 debug("%s: OK\n", __func__);
1015 return 0;
1016
1017err_free_rx_dma_buf:
1018 free(xgmac->rx_dma_buf);
1019err_free_tx_dma_buf:
1020 free(xgmac->tx_dma_buf);
1021err_free_descs:
1022 xgmac_free_descs(xgmac->rx_descs);
1023err_free_tx_descs:
1024 xgmac_free_descs(xgmac->tx_descs);
1025err:
1026
1027 debug("%s: returns %d\n", __func__, ret);
1028 return ret;
1029}
1030
1031static int xgmac_remove_resources_core(struct udevice *dev)
1032{
1033 struct xgmac_priv *xgmac = dev_get_priv(dev);
1034
1035 debug("%s(dev=%p):\n", __func__, dev);
1036
1037 free(xgmac->rx_pkt);
1038 free(xgmac->rx_dma_buf);
1039 free(xgmac->tx_dma_buf);
1040 xgmac_free_descs(xgmac->rx_descs);
1041 xgmac_free_descs(xgmac->tx_descs);
1042
1043 debug("%s: OK\n", __func__);
1044 return 0;
1045}
1046
1047/* board-specific Ethernet Interface initializations. */
1048__weak int board_interface_eth_init(struct udevice *dev,
1049 phy_interface_t interface_type)
1050{
1051 return 0;
1052}
1053
1054static int xgmac_probe(struct udevice *dev)
1055{
1056 struct xgmac_priv *xgmac = dev_get_priv(dev);
1057 int ret;
1058
1059 debug("%s(dev=%p):\n", __func__, dev);
1060
1061 xgmac->dev = dev;
1062 xgmac->config = (void *)dev_get_driver_data(dev);
1063
1064 xgmac->regs = dev_read_addr(dev);
1065 if (xgmac->regs == FDT_ADDR_T_NONE) {
Boon Khai Ng21204d12025-01-17 14:48:23 +08001066 pr_err("%s dev_read_addr() failed\n", dev->name);
Tom Rinidec7ea02024-05-20 13:35:03 -06001067 return -ENODEV;
1068 }
1069 xgmac->mac_regs = (void *)(xgmac->regs + XGMAC_MAC_REGS_BASE);
1070 xgmac->mtl_regs = (void *)(xgmac->regs + XGMAC_MTL_REGS_BASE);
1071 xgmac->dma_regs = (void *)(xgmac->regs + XGMAC_DMA_REGS_BASE);
1072
1073 xgmac->max_speed = dev_read_u32_default(dev, "max-speed", 0);
1074
1075 ret = xgmac_probe_resources_core(dev);
1076 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +08001077 pr_err("%s xgmac_probe_resources_core() failed: %d\n",
1078 dev->name, ret);
1079
Tom Rinidec7ea02024-05-20 13:35:03 -06001080 return ret;
1081 }
1082
1083 ret = xgmac->config->ops->xgmac_probe_resources(dev);
1084 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +08001085 pr_err("%s xgmac_probe_resources() failed: %d\n",
1086 dev->name, ret);
1087
Tom Rinidec7ea02024-05-20 13:35:03 -06001088 goto err_remove_resources_core;
1089 }
1090
1091 ret = xgmac->config->ops->xgmac_start_clks(dev);
1092 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +08001093 pr_err("%s xgmac_start_clks() failed: %d\n", dev->name, ret);
Tom Rinidec7ea02024-05-20 13:35:03 -06001094 return ret;
1095 }
1096
1097 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
1098 xgmac->mii = eth_phy_get_mdio_bus(dev);
1099
1100 if (!xgmac->mii) {
1101 xgmac->mii = mdio_alloc();
1102 if (!xgmac->mii) {
Boon Khai Ng21204d12025-01-17 14:48:23 +08001103 pr_err("%s mdio_alloc() failed\n", dev->name);
Tom Rinidec7ea02024-05-20 13:35:03 -06001104 ret = -ENOMEM;
1105 goto err_stop_clks;
1106 }
1107 xgmac->mii->read = xgmac_mdio_read;
1108 xgmac->mii->write = xgmac_mdio_write;
1109 xgmac->mii->priv = xgmac;
1110 strcpy(xgmac->mii->name, dev->name);
1111
1112 ret = mdio_register(xgmac->mii);
1113 if (ret < 0) {
Boon Khai Ng21204d12025-01-17 14:48:23 +08001114 pr_err("%s mdio_register() failed: %d\n",
1115 dev->name, ret);
1116
Tom Rinidec7ea02024-05-20 13:35:03 -06001117 goto err_free_mdio;
1118 }
1119 }
1120
1121 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
1122 eth_phy_set_mdio_bus(dev, xgmac->mii);
1123
1124 debug("%s: OK\n", __func__);
1125 return 0;
1126
1127err_free_mdio:
1128 mdio_free(xgmac->mii);
1129err_stop_clks:
1130 xgmac->config->ops->xgmac_stop_clks(dev);
1131err_remove_resources_core:
1132 xgmac_remove_resources_core(dev);
1133
1134 debug("%s: returns %d\n", __func__, ret);
1135 return ret;
1136}
1137
1138static int xgmac_remove(struct udevice *dev)
1139{
1140 struct xgmac_priv *xgmac = dev_get_priv(dev);
1141
1142 debug("%s(dev=%p):\n", __func__, dev);
1143
1144 mdio_unregister(xgmac->mii);
1145 mdio_free(xgmac->mii);
1146 xgmac->config->ops->xgmac_stop_clks(dev);
1147 xgmac->config->ops->xgmac_remove_resources(dev);
1148
1149 xgmac_remove_resources_core(dev);
1150
1151 debug("%s: OK\n", __func__);
1152 return 0;
1153}
1154
1155int xgmac_null_ops(struct udevice *dev)
1156{
1157 return 0;
1158}
1159
1160static const struct eth_ops xgmac_ops = {
1161 .start = xgmac_start,
1162 .stop = xgmac_stop,
1163 .send = xgmac_send,
1164 .recv = xgmac_recv,
1165 .free_pkt = xgmac_free_pkt,
1166 .write_hwaddr = xgmac_write_hwaddr,
1167 .read_rom_hwaddr = xgmac_read_rom_hwaddr,
1168};
1169
1170static const struct udevice_id xgmac_ids[] = {
1171 {
1172 .compatible = "intel,socfpga-dwxgmac",
1173 .data = (ulong)&xgmac_socfpga_config
1174 },
1175 { }
1176};
1177
1178U_BOOT_DRIVER(eth_xgmac) = {
1179 .name = "eth_xgmac",
1180 .id = UCLASS_ETH,
1181 .of_match = of_match_ptr(xgmac_ids),
1182 .probe = xgmac_probe,
1183 .remove = xgmac_remove,
1184 .ops = &xgmac_ops,
1185 .priv_auto = sizeof(struct xgmac_priv),
1186 .plat_auto = sizeof(struct eth_pdata),
1187};