blob: f4e58093805d2c1652728651ae0067fb4e34f78b [file] [log] [blame]
Keerthya00b95c2019-07-09 10:30:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
4 *
5 * Copyright (C) 2019, Texas Instruments, Incorporated
6 *
7 */
8
9#include <common.h>
Simon Glass9bc15642020-02-03 07:36:16 -070010#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060011#include <asm/cache.h>
Suman Anna18e40be2023-08-02 13:47:26 +053012#include <asm/gpio.h>
Keerthya00b95c2019-07-09 10:30:34 +053013#include <asm/io.h>
14#include <asm/processor.h>
15#include <clk.h>
16#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070017#include <dm/device_compat.h>
Keerthya00b95c2019-07-09 10:30:34 +053018#include <dm/lists.h>
Maxime Ripard028849d2023-07-24 15:57:30 +020019#include <dm/pinctrl.h>
Keerthya00b95c2019-07-09 10:30:34 +053020#include <dma-uclass.h>
21#include <dm/of_access.h>
22#include <miiphy.h>
23#include <net.h>
24#include <phy.h>
25#include <power-domain.h>
Roger Quadroscb8f8ad2023-07-22 22:31:48 +030026#include <regmap.h>
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +053027#include <soc.h>
Roger Quadroscb8f8ad2023-07-22 22:31:48 +030028#include <syscon.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060029#include <linux/bitops.h>
Suman Anna18e40be2023-08-02 13:47:26 +053030#include <linux/delay.h>
Keerthya00b95c2019-07-09 10:30:34 +053031#include <linux/soc/ti/ti-udma.h>
32
33#include "cpsw_mdio.h"
34
Vignesh Raghavendrac5a66132021-05-10 20:06:09 +053035#define AM65_CPSW_CPSWNU_MAX_PORTS 9
Keerthya00b95c2019-07-09 10:30:34 +053036
37#define AM65_CPSW_SS_BASE 0x0
38#define AM65_CPSW_SGMII_BASE 0x100
39#define AM65_CPSW_MDIO_BASE 0xf00
40#define AM65_CPSW_XGMII_BASE 0x2100
41#define AM65_CPSW_CPSW_NU_BASE 0x20000
42#define AM65_CPSW_CPSW_NU_ALE_BASE 0x1e000
43
44#define AM65_CPSW_CPSW_NU_PORTS_OFFSET 0x1000
45#define AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET 0x330
46
47#define AM65_CPSW_MDIO_BUS_FREQ_DEF 1000000
48
49#define AM65_CPSW_CTL_REG 0x4
50#define AM65_CPSW_STAT_PORT_EN_REG 0x14
51#define AM65_CPSW_PTYPE_REG 0x18
52
53#define AM65_CPSW_CTL_REG_P0_ENABLE BIT(2)
54#define AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE BIT(13)
55#define AM65_CPSW_CTL_REG_P0_RX_PAD BIT(14)
56
57#define AM65_CPSW_P0_FLOW_ID_REG 0x8
58#define AM65_CPSW_PN_RX_MAXLEN_REG 0x24
59#define AM65_CPSW_PN_REG_SA_L 0x308
60#define AM65_CPSW_PN_REG_SA_H 0x30c
61
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +053062#define AM65_CPSW_SGMII_CONTROL_REG 0x010
63#define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018
64#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
65
66#define ADVERTISE_SGMII 0x1
67
Keerthya00b95c2019-07-09 10:30:34 +053068#define AM65_CPSW_ALE_CTL_REG 0x8
69#define AM65_CPSW_ALE_CTL_REG_ENABLE BIT(31)
70#define AM65_CPSW_ALE_CTL_REG_RESET_TBL BIT(30)
71#define AM65_CPSW_ALE_CTL_REG_BYPASS BIT(4)
72#define AM65_CPSW_ALE_PN_CTL_REG(x) (0x40 + (x) * 4)
73#define AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD 0x3
74#define AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY BIT(11)
75
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +053076#define AM65_CPSW_ALE_THREADMAPDEF_REG 0x134
77#define AM65_CPSW_ALE_DEFTHREAD_EN BIT(15)
78
Keerthya00b95c2019-07-09 10:30:34 +053079#define AM65_CPSW_MACSL_CTL_REG 0x0
80#define AM65_CPSW_MACSL_CTL_REG_IFCTL_A BIT(15)
Murali Karicheri6565e902020-04-17 11:12:09 -040081#define AM65_CPSW_MACSL_CTL_EXT_EN BIT(18)
Keerthya00b95c2019-07-09 10:30:34 +053082#define AM65_CPSW_MACSL_CTL_REG_GIG BIT(7)
83#define AM65_CPSW_MACSL_CTL_REG_GMII_EN BIT(5)
84#define AM65_CPSW_MACSL_CTL_REG_LOOPBACK BIT(1)
85#define AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX BIT(0)
86#define AM65_CPSW_MACSL_RESET_REG 0x8
87#define AM65_CPSW_MACSL_RESET_REG_RESET BIT(0)
88#define AM65_CPSW_MACSL_STATUS_REG 0x4
89#define AM65_CPSW_MACSL_RESET_REG_PN_IDLE BIT(31)
90#define AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE BIT(30)
91#define AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE BIT(29)
92#define AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE BIT(28)
93#define AM65_CPSW_MACSL_RESET_REG_IDLE_MASK \
94 (AM65_CPSW_MACSL_RESET_REG_PN_IDLE | \
95 AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE | \
96 AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE | \
97 AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE)
98
99#define AM65_CPSW_CPPI_PKT_TYPE 0x7
100
Suman Anna18e40be2023-08-02 13:47:26 +0530101#define DEFAULT_GPIO_RESET_DELAY 10
102
Keerthya00b95c2019-07-09 10:30:34 +0530103struct am65_cpsw_port {
104 fdt_addr_t port_base;
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530105 fdt_addr_t port_sgmii_base;
Keerthya00b95c2019-07-09 10:30:34 +0530106 fdt_addr_t macsl_base;
107 bool disabled;
108 u32 mac_control;
109};
110
111struct am65_cpsw_common {
112 struct udevice *dev;
113 fdt_addr_t ss_base;
114 fdt_addr_t cpsw_base;
115 fdt_addr_t mdio_base;
116 fdt_addr_t ale_base;
Keerthya00b95c2019-07-09 10:30:34 +0530117
118 struct clk fclk;
119 struct power_domain pwrdmn;
120
121 u32 port_num;
122 struct am65_cpsw_port ports[AM65_CPSW_CPSWNU_MAX_PORTS];
Keerthya00b95c2019-07-09 10:30:34 +0530123
124 struct mii_dev *bus;
125 u32 bus_freq;
126
Suman Anna18e40be2023-08-02 13:47:26 +0530127 struct gpio_desc mdio_gpio_reset;
128 u32 reset_delay_us;
129 u32 reset_post_delay_us;
130
Keerthya00b95c2019-07-09 10:30:34 +0530131 struct dma dma_tx;
132 struct dma dma_rx;
133 u32 rx_next;
134 u32 rx_pend;
135 bool started;
136};
137
138struct am65_cpsw_priv {
139 struct udevice *dev;
140 struct am65_cpsw_common *cpsw_common;
141 u32 port_id;
142
143 struct phy_device *phydev;
144 bool has_phy;
145 ofnode phy_node;
146 u32 phy_addr;
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +0530147
148 bool mdio_manual_mode;
Keerthya00b95c2019-07-09 10:30:34 +0530149};
150
151#ifdef PKTSIZE_ALIGN
152#define UDMA_RX_BUF_SIZE PKTSIZE_ALIGN
153#else
154#define UDMA_RX_BUF_SIZE ALIGN(1522, ARCH_DMA_MINALIGN)
155#endif
156
157#ifdef PKTBUFSRX
158#define UDMA_RX_DESC_NUM PKTBUFSRX
159#else
160#define UDMA_RX_DESC_NUM 4
161#endif
162
163#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
164 ((mac)[2] << 16) | ((mac)[3] << 24))
165#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
166
167static void am65_cpsw_set_sl_mac(struct am65_cpsw_port *slave,
168 unsigned char *addr)
169{
170 writel(mac_hi(addr),
171 slave->port_base + AM65_CPSW_PN_REG_SA_H);
172 writel(mac_lo(addr),
173 slave->port_base + AM65_CPSW_PN_REG_SA_L);
174}
175
176int am65_cpsw_macsl_reset(struct am65_cpsw_port *slave)
177{
178 u32 i = 100;
179
180 /* Set the soft reset bit */
181 writel(AM65_CPSW_MACSL_RESET_REG_RESET,
182 slave->macsl_base + AM65_CPSW_MACSL_RESET_REG);
183
184 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_RESET_REG) &
185 AM65_CPSW_MACSL_RESET_REG_RESET) && i--)
186 cpu_relax();
187
188 /* Timeout on the reset */
189 return i;
190}
191
192static int am65_cpsw_macsl_wait_for_idle(struct am65_cpsw_port *slave)
193{
194 u32 i = 100;
195
196 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_STATUS_REG) &
197 AM65_CPSW_MACSL_RESET_REG_IDLE_MASK) && i--)
198 cpu_relax();
199
200 return i;
201}
202
203static int am65_cpsw_update_link(struct am65_cpsw_priv *priv)
204{
205 struct am65_cpsw_common *common = priv->cpsw_common;
206 struct am65_cpsw_port *port = &common->ports[priv->port_id];
207 struct phy_device *phy = priv->phydev;
208 u32 mac_control = 0;
209
210 if (phy->link) { /* link up */
211 mac_control = /*AM65_CPSW_MACSL_CTL_REG_LOOPBACK |*/
212 AM65_CPSW_MACSL_CTL_REG_GMII_EN;
213 if (phy->speed == 1000)
214 mac_control |= AM65_CPSW_MACSL_CTL_REG_GIG;
Murali Karicheri6565e902020-04-17 11:12:09 -0400215 if (phy->speed == 10 && phy_interface_is_rgmii(phy))
216 /* Can be used with in band mode only */
217 mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
Keerthya00b95c2019-07-09 10:30:34 +0530218 if (phy->duplex == DUPLEX_FULL)
219 mac_control |= AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX;
220 if (phy->speed == 100)
221 mac_control |= AM65_CPSW_MACSL_CTL_REG_IFCTL_A;
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530222 if (phy->interface == PHY_INTERFACE_MODE_SGMII)
223 mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
Keerthya00b95c2019-07-09 10:30:34 +0530224 }
225
226 if (mac_control == port->mac_control)
227 goto out;
228
229 if (mac_control) {
230 printf("link up on port %d, speed %d, %s duplex\n",
231 priv->port_id, phy->speed,
232 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
233 } else {
234 printf("link down on port %d\n", priv->port_id);
235 }
236
237 writel(mac_control, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
238 port->mac_control = mac_control;
239
240out:
241 return phy->link;
242}
243
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500244#define AM65_GMII_SEL_PORT_OFFS(x) (0x4 * ((x) - 1))
245
Keerthya00b95c2019-07-09 10:30:34 +0530246#define AM65_GMII_SEL_MODE_MII 0
247#define AM65_GMII_SEL_MODE_RMII 1
248#define AM65_GMII_SEL_MODE_RGMII 2
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530249#define AM65_GMII_SEL_MODE_SGMII 3
Keerthya00b95c2019-07-09 10:30:34 +0530250
251#define AM65_GMII_SEL_RGMII_IDMODE BIT(4)
252
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300253static int am65_cpsw_gmii_sel_k3(struct am65_cpsw_priv *priv,
254 phy_interface_t phy_mode)
Keerthya00b95c2019-07-09 10:30:34 +0530255{
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300256 struct udevice *dev = priv->dev;
257 u32 offset, reg, phandle;
Keerthya00b95c2019-07-09 10:30:34 +0530258 bool rgmii_id = false;
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300259 fdt_addr_t gmii_sel;
260 u32 mode = 0;
261 ofnode node;
262 int ret;
263
264 ret = ofnode_read_u32(dev_ofnode(dev), "phys", &phandle);
265 if (ret)
266 return ret;
267
268 ret = ofnode_read_u32_index(dev_ofnode(dev), "phys", 1, &offset);
269 if (ret)
270 return ret;
271
272 node = ofnode_get_by_phandle(phandle);
273 if (!ofnode_valid(node))
274 return -ENODEV;
275
276 gmii_sel = ofnode_get_addr(node);
277 if (gmii_sel == FDT_ADDR_T_NONE)
278 return -ENODEV;
Keerthya00b95c2019-07-09 10:30:34 +0530279
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300280 gmii_sel += AM65_GMII_SEL_PORT_OFFS(offset);
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500281 reg = readl(gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530282
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300283 dev_dbg(dev, "old gmii_sel: %08x\n", reg);
Keerthya00b95c2019-07-09 10:30:34 +0530284
285 switch (phy_mode) {
286 case PHY_INTERFACE_MODE_RMII:
287 mode = AM65_GMII_SEL_MODE_RMII;
288 break;
289
290 case PHY_INTERFACE_MODE_RGMII:
Grygorii Strashkobf45d9b2019-09-19 11:16:41 +0300291 case PHY_INTERFACE_MODE_RGMII_RXID:
Keerthya00b95c2019-07-09 10:30:34 +0530292 mode = AM65_GMII_SEL_MODE_RGMII;
293 break;
294
295 case PHY_INTERFACE_MODE_RGMII_ID:
Keerthya00b95c2019-07-09 10:30:34 +0530296 case PHY_INTERFACE_MODE_RGMII_TXID:
297 mode = AM65_GMII_SEL_MODE_RGMII;
298 rgmii_id = true;
299 break;
300
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530301 case PHY_INTERFACE_MODE_SGMII:
302 mode = AM65_GMII_SEL_MODE_SGMII;
303 break;
304
Keerthya00b95c2019-07-09 10:30:34 +0530305 default:
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300306 dev_warn(dev,
Keerthya00b95c2019-07-09 10:30:34 +0530307 "Unsupported PHY mode: %u. Defaulting to MII.\n",
308 phy_mode);
309 /* fallthrough */
310 case PHY_INTERFACE_MODE_MII:
311 mode = AM65_GMII_SEL_MODE_MII;
312 break;
313 };
314
315 if (rgmii_id)
316 mode |= AM65_GMII_SEL_RGMII_IDMODE;
317
318 reg = mode;
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300319 dev_dbg(dev, "gmii_sel PHY mode: %u, new gmii_sel: %08x\n",
Keerthya00b95c2019-07-09 10:30:34 +0530320 phy_mode, reg);
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500321 writel(reg, gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530322
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500323 reg = readl(gmii_sel);
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300324 if (reg != mode) {
325 dev_err(dev,
Keerthya00b95c2019-07-09 10:30:34 +0530326 "gmii_sel PHY mode NOT SET!: requested: %08x, gmii_sel: %08x\n",
327 mode, reg);
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300328 return 0;
329 }
330
331 return 0;
Keerthya00b95c2019-07-09 10:30:34 +0530332}
333
334static int am65_cpsw_start(struct udevice *dev)
335{
Simon Glassfa20e932020-12-03 16:55:20 -0700336 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530337 struct am65_cpsw_priv *priv = dev_get_priv(dev);
338 struct am65_cpsw_common *common = priv->cpsw_common;
339 struct am65_cpsw_port *port = &common->ports[priv->port_id];
340 struct am65_cpsw_port *port0 = &common->ports[0];
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530341 struct ti_udma_drv_chan_cfg_data *dma_rx_cfg_data;
Keerthya00b95c2019-07-09 10:30:34 +0530342 int ret, i;
343
344 ret = power_domain_on(&common->pwrdmn);
345 if (ret) {
346 dev_err(dev, "power_domain_on() failed %d\n", ret);
347 goto out;
348 }
349
350 ret = clk_enable(&common->fclk);
351 if (ret) {
352 dev_err(dev, "clk enabled failed %d\n", ret);
353 goto err_off_pwrdm;
354 }
355
356 common->rx_next = 0;
357 common->rx_pend = 0;
358 ret = dma_get_by_name(common->dev, "tx0", &common->dma_tx);
359 if (ret) {
360 dev_err(dev, "TX dma get failed %d\n", ret);
361 goto err_off_clk;
362 }
363 ret = dma_get_by_name(common->dev, "rx", &common->dma_rx);
364 if (ret) {
365 dev_err(dev, "RX dma get failed %d\n", ret);
366 goto err_free_tx;
367 }
368
369 for (i = 0; i < UDMA_RX_DESC_NUM; i++) {
370 ret = dma_prepare_rcv_buf(&common->dma_rx,
371 net_rx_packets[i],
372 UDMA_RX_BUF_SIZE);
373 if (ret) {
374 dev_err(dev, "RX dma add buf failed %d\n", ret);
375 goto err_free_tx;
376 }
377 }
378
379 ret = dma_enable(&common->dma_tx);
380 if (ret) {
381 dev_err(dev, "TX dma_enable failed %d\n", ret);
382 goto err_free_rx;
383 }
384 ret = dma_enable(&common->dma_rx);
385 if (ret) {
386 dev_err(dev, "RX dma_enable failed %d\n", ret);
387 goto err_dis_tx;
388 }
389
390 /* Control register */
391 writel(AM65_CPSW_CTL_REG_P0_ENABLE |
392 AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE |
393 AM65_CPSW_CTL_REG_P0_RX_PAD,
394 common->cpsw_base + AM65_CPSW_CTL_REG);
395
396 /* disable priority elevation */
397 writel(0, common->cpsw_base + AM65_CPSW_PTYPE_REG);
398
399 /* enable statistics */
400 writel(BIT(0) | BIT(priv->port_id),
401 common->cpsw_base + AM65_CPSW_STAT_PORT_EN_REG);
402
403 /* Port 0 length register */
404 writel(PKTSIZE_ALIGN, port0->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
405
406 /* set base flow_id */
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530407 dma_get_cfg(&common->dma_rx, 0, (void **)&dma_rx_cfg_data);
408 writel(dma_rx_cfg_data->flow_id_base,
Keerthya00b95c2019-07-09 10:30:34 +0530409 port0->port_base + AM65_CPSW_P0_FLOW_ID_REG);
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530410 dev_info(dev, "K3 CPSW: rflow_id_base: %u\n",
411 dma_rx_cfg_data->flow_id_base);
Keerthya00b95c2019-07-09 10:30:34 +0530412
413 /* Reset and enable the ALE */
414 writel(AM65_CPSW_ALE_CTL_REG_ENABLE | AM65_CPSW_ALE_CTL_REG_RESET_TBL |
415 AM65_CPSW_ALE_CTL_REG_BYPASS,
416 common->ale_base + AM65_CPSW_ALE_CTL_REG);
417
418 /* port 0 put into forward mode */
419 writel(AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
420 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
421
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +0530422 writel(AM65_CPSW_ALE_DEFTHREAD_EN,
423 common->ale_base + AM65_CPSW_ALE_THREADMAPDEF_REG);
424
Keerthya00b95c2019-07-09 10:30:34 +0530425 /* PORT x configuration */
426
427 /* Port x Max length register */
428 writel(PKTSIZE_ALIGN, port->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
429
430 /* Port x set mac */
431 am65_cpsw_set_sl_mac(port, pdata->enetaddr);
432
433 /* Port x ALE: mac_only, Forwarding */
434 writel(AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY |
435 AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
436 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
437
438 port->mac_control = 0;
439 if (!am65_cpsw_macsl_reset(port)) {
440 dev_err(dev, "mac_sl reset failed\n");
441 ret = -EFAULT;
442 goto err_dis_rx;
443 }
444
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530445 if (priv->phydev->interface == PHY_INTERFACE_MODE_SGMII) {
446 writel(ADVERTISE_SGMII,
447 port->port_sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG);
448 writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
449 port->port_sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
450 }
451
Keerthya00b95c2019-07-09 10:30:34 +0530452 ret = phy_startup(priv->phydev);
453 if (ret) {
454 dev_err(dev, "phy_startup failed\n");
455 goto err_dis_rx;
456 }
457
458 ret = am65_cpsw_update_link(priv);
459 if (!ret) {
460 ret = -ENODEV;
461 goto err_phy_shutdown;
462 }
463
464 common->started = true;
465
466 return 0;
467
468err_phy_shutdown:
469 phy_shutdown(priv->phydev);
470err_dis_rx:
471 /* disable ports */
472 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
473 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
474 if (!am65_cpsw_macsl_wait_for_idle(port))
475 dev_err(dev, "mac_sl idle timeout\n");
476 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
477 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
478 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
479
480 dma_disable(&common->dma_rx);
481err_dis_tx:
482 dma_disable(&common->dma_tx);
483err_free_rx:
484 dma_free(&common->dma_rx);
485err_free_tx:
486 dma_free(&common->dma_tx);
487err_off_clk:
488 clk_disable(&common->fclk);
489err_off_pwrdm:
490 power_domain_off(&common->pwrdmn);
491out:
492 dev_err(dev, "%s end error\n", __func__);
493
494 return ret;
495}
496
497static int am65_cpsw_send(struct udevice *dev, void *packet, int length)
498{
499 struct am65_cpsw_priv *priv = dev_get_priv(dev);
500 struct am65_cpsw_common *common = priv->cpsw_common;
501 struct ti_udma_drv_packet_data packet_data;
502 int ret;
503
504 packet_data.pkt_type = AM65_CPSW_CPPI_PKT_TYPE;
505 packet_data.dest_tag = priv->port_id;
506 ret = dma_send(&common->dma_tx, packet, length, &packet_data);
507 if (ret) {
508 dev_err(dev, "TX dma_send failed %d\n", ret);
509 return ret;
510 }
511
512 return 0;
513}
514
515static int am65_cpsw_recv(struct udevice *dev, int flags, uchar **packetp)
516{
517 struct am65_cpsw_priv *priv = dev_get_priv(dev);
518 struct am65_cpsw_common *common = priv->cpsw_common;
519
520 /* try to receive a new packet */
521 return dma_receive(&common->dma_rx, (void **)packetp, NULL);
522}
523
524static int am65_cpsw_free_pkt(struct udevice *dev, uchar *packet, int length)
525{
526 struct am65_cpsw_priv *priv = dev_get_priv(dev);
527 struct am65_cpsw_common *common = priv->cpsw_common;
528 int ret;
529
530 if (length > 0) {
531 u32 pkt = common->rx_next % UDMA_RX_DESC_NUM;
532
533 ret = dma_prepare_rcv_buf(&common->dma_rx,
534 net_rx_packets[pkt],
535 UDMA_RX_BUF_SIZE);
536 if (ret)
537 dev_err(dev, "RX dma free_pkt failed %d\n", ret);
538 common->rx_next++;
539 }
540
541 return 0;
542}
543
544static void am65_cpsw_stop(struct udevice *dev)
545{
546 struct am65_cpsw_priv *priv = dev_get_priv(dev);
547 struct am65_cpsw_common *common = priv->cpsw_common;
548 struct am65_cpsw_port *port = &common->ports[priv->port_id];
549
550 if (!common->started)
551 return;
552
553 phy_shutdown(priv->phydev);
554
555 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
556 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
557 if (!am65_cpsw_macsl_wait_for_idle(port))
558 dev_err(dev, "mac_sl idle timeout\n");
559 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
560 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
561 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
562
563 dma_disable(&common->dma_tx);
564 dma_free(&common->dma_tx);
565
566 dma_disable(&common->dma_rx);
567 dma_free(&common->dma_rx);
568
569 common->started = false;
570}
571
Roger Quadroscb8f8ad2023-07-22 22:31:48 +0300572static int am65_cpsw_am654_get_efuse_macid(struct udevice *dev,
573 int slave, u8 *mac_addr)
574{
575 u32 mac_lo, mac_hi, offset;
576 struct regmap *syscon;
577 int ret;
578
579 syscon = syscon_regmap_lookup_by_phandle(dev, "ti,syscon-efuse");
580 if (IS_ERR(syscon)) {
581 if (PTR_ERR(syscon) == -ENODEV)
582 return 0;
583 return PTR_ERR(syscon);
584 }
585
586 ret = dev_read_u32_index(dev, "ti,syscon-efuse", 1, &offset);
587 if (ret)
588 return ret;
589
590 regmap_read(syscon, offset, &mac_lo);
591 regmap_read(syscon, offset + 4, &mac_hi);
592
593 mac_addr[0] = (mac_hi >> 8) & 0xff;
594 mac_addr[1] = mac_hi & 0xff;
595 mac_addr[2] = (mac_lo >> 24) & 0xff;
596 mac_addr[3] = (mac_lo >> 16) & 0xff;
597 mac_addr[4] = (mac_lo >> 8) & 0xff;
598 mac_addr[5] = mac_lo & 0xff;
599
600 return 0;
601}
602
Keerthya00b95c2019-07-09 10:30:34 +0530603static int am65_cpsw_read_rom_hwaddr(struct udevice *dev)
604{
605 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700606 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530607
Roger Quadroscb8f8ad2023-07-22 22:31:48 +0300608 am65_cpsw_am654_get_efuse_macid(dev,
609 priv->port_id,
610 pdata->enetaddr);
Keerthya00b95c2019-07-09 10:30:34 +0530611
612 return 0;
613}
614
615static const struct eth_ops am65_cpsw_ops = {
616 .start = am65_cpsw_start,
617 .send = am65_cpsw_send,
618 .recv = am65_cpsw_recv,
619 .free_pkt = am65_cpsw_free_pkt,
620 .stop = am65_cpsw_stop,
621 .read_rom_hwaddr = am65_cpsw_read_rom_hwaddr,
622};
623
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +0530624static const struct soc_attr k3_mdio_soc_data[] = {
625 { .family = "AM62X", .revision = "SR1.0" },
626 { .family = "AM64X", .revision = "SR1.0" },
627 { .family = "AM64X", .revision = "SR2.0" },
628 { .family = "AM65X", .revision = "SR1.0" },
629 { .family = "AM65X", .revision = "SR2.0" },
630 { .family = "J7200", .revision = "SR1.0" },
631 { .family = "J7200", .revision = "SR2.0" },
632 { .family = "J721E", .revision = "SR1.0" },
633 { .family = "J721E", .revision = "SR1.1" },
634 { .family = "J721S2", .revision = "SR1.0" },
635 { /* sentinel */ },
636};
637
Maxime Ripard028849d2023-07-24 15:57:30 +0200638static ofnode am65_cpsw_find_mdio(ofnode parent)
639{
640 ofnode node;
641
642 ofnode_for_each_subnode(node, parent)
643 if (ofnode_device_is_compatible(node, "ti,cpsw-mdio"))
644 return node;
645
646 return ofnode_null();
647}
648
649static int am65_cpsw_mdio_setup(struct udevice *dev)
650{
651 struct am65_cpsw_priv *priv = dev_get_priv(dev);
652 struct am65_cpsw_common *cpsw_common = priv->cpsw_common;
653 struct udevice *mdio_dev;
654 ofnode mdio;
655 int ret;
656
657 mdio = am65_cpsw_find_mdio(dev_ofnode(cpsw_common->dev));
658 if (!ofnode_valid(mdio))
659 return 0;
660
661 /*
662 * The MDIO controller is represented in the DT binding by a
663 * subnode of the MAC controller.
664 *
665 * We don't have a DM driver for the MDIO device yet, and thus any
666 * pinctrl setting on its node will be ignored.
667 *
668 * However, we do need to make sure the pins states tied to the
669 * MDIO node are configured properly. Fortunately, the core DM
670 * does that for use when we get a device, so we can work around
671 * that whole issue by just requesting a dummy MDIO driver to
672 * probe, and our pins will get muxed.
673 */
674 ret = uclass_get_device_by_ofnode(UCLASS_MDIO, mdio, &mdio_dev);
675 if (ret)
676 return ret;
677
678 return 0;
679}
680
Keerthya00b95c2019-07-09 10:30:34 +0530681static int am65_cpsw_mdio_init(struct udevice *dev)
682{
683 struct am65_cpsw_priv *priv = dev_get_priv(dev);
684 struct am65_cpsw_common *cpsw_common = priv->cpsw_common;
Maxime Ripard028849d2023-07-24 15:57:30 +0200685 int ret;
Keerthya00b95c2019-07-09 10:30:34 +0530686
687 if (!priv->has_phy || cpsw_common->bus)
688 return 0;
689
Suman Anna18e40be2023-08-02 13:47:26 +0530690 if (IS_ENABLED(CONFIG_DM_GPIO)) {
691 if (dm_gpio_is_valid(&cpsw_common->mdio_gpio_reset)) {
692 dm_gpio_set_value(&cpsw_common->mdio_gpio_reset, 1);
693 udelay(cpsw_common->reset_delay_us);
694 dm_gpio_set_value(&cpsw_common->mdio_gpio_reset, 0);
695 if (cpsw_common->reset_post_delay_us > 0)
696 udelay(cpsw_common->reset_post_delay_us);
697 }
698 }
699
Maxime Ripard028849d2023-07-24 15:57:30 +0200700 ret = am65_cpsw_mdio_setup(dev);
701 if (ret)
702 return ret;
703
Keerthya00b95c2019-07-09 10:30:34 +0530704 cpsw_common->bus = cpsw_mdio_init(dev->name,
705 cpsw_common->mdio_base,
706 cpsw_common->bus_freq,
Ravi Gunasekaran40cea492022-09-22 15:21:23 +0530707 clk_get_rate(&cpsw_common->fclk),
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +0530708 priv->mdio_manual_mode);
Keerthya00b95c2019-07-09 10:30:34 +0530709 if (!cpsw_common->bus)
710 return -EFAULT;
711
712 return 0;
713}
714
715static int am65_cpsw_phy_init(struct udevice *dev)
716{
717 struct am65_cpsw_priv *priv = dev_get_priv(dev);
718 struct am65_cpsw_common *cpsw_common = priv->cpsw_common;
Simon Glassfa20e932020-12-03 16:55:20 -0700719 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530720 struct phy_device *phydev;
721 u32 supported = PHY_GBIT_FEATURES;
722 int ret;
723
724 phydev = phy_connect(cpsw_common->bus,
725 priv->phy_addr,
726 priv->dev,
727 pdata->phy_interface);
728
729 if (!phydev) {
730 dev_err(dev, "phy_connect() failed\n");
731 return -ENODEV;
732 }
733
734 phydev->supported &= supported;
735 if (pdata->max_speed) {
736 ret = phy_set_supported(phydev, pdata->max_speed);
737 if (ret)
738 return ret;
739 }
740 phydev->advertising = phydev->supported;
741
742 if (ofnode_valid(priv->phy_node))
743 phydev->node = priv->phy_node;
744
745 priv->phydev = phydev;
746 ret = phy_config(phydev);
747 if (ret < 0)
748 pr_err("phy_config() failed: %d", ret);
749
750 return ret;
751}
752
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530753static int am65_cpsw_ofdata_parse_phy(struct udevice *dev)
Keerthya00b95c2019-07-09 10:30:34 +0530754{
Simon Glassfa20e932020-12-03 16:55:20 -0700755 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530756 struct am65_cpsw_priv *priv = dev_get_priv(dev);
757 struct ofnode_phandle_args out_args;
Keerthya00b95c2019-07-09 10:30:34 +0530758 int ret = 0;
759
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530760 dev_read_u32(dev, "reg", &priv->port_id);
761
Marek Behúnbc194772022-04-07 00:33:01 +0200762 pdata->phy_interface = dev_read_phy_mode(dev);
Marek Behún48631e42022-04-07 00:33:03 +0200763 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
Marek Behúnbc194772022-04-07 00:33:01 +0200764 dev_err(dev, "Invalid PHY mode, port %u\n", priv->port_id);
765 return -EINVAL;
Keerthya00b95c2019-07-09 10:30:34 +0530766 }
767
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530768 dev_read_u32(dev, "max-speed", (u32 *)&pdata->max_speed);
Keerthya00b95c2019-07-09 10:30:34 +0530769 if (pdata->max_speed)
770 dev_err(dev, "Port %u speed froced to %uMbit\n",
771 priv->port_id, pdata->max_speed);
772
773 priv->has_phy = true;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530774 ret = ofnode_parse_phandle_with_args(dev_ofnode(dev), "phy-handle",
Keerthya00b95c2019-07-09 10:30:34 +0530775 NULL, 0, 0, &out_args);
776 if (ret) {
777 dev_err(dev, "can't parse phy-handle port %u (%d)\n",
778 priv->port_id, ret);
779 priv->has_phy = false;
780 ret = 0;
781 }
782
783 priv->phy_node = out_args.node;
784 if (priv->has_phy) {
785 ret = ofnode_read_u32(priv->phy_node, "reg", &priv->phy_addr);
786 if (ret) {
787 dev_err(dev, "failed to get phy_addr port %u (%d)\n",
788 priv->port_id, ret);
789 goto out;
790 }
791 }
792
793out:
794 return ret;
795}
796
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530797static int am65_cpsw_port_probe(struct udevice *dev)
Keerthya00b95c2019-07-09 10:30:34 +0530798{
799 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700800 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530801 struct am65_cpsw_common *cpsw_common;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530802 char portname[15];
803 int ret;
Keerthya00b95c2019-07-09 10:30:34 +0530804
805 priv->dev = dev;
806
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530807 cpsw_common = dev_get_priv(dev->parent);
Keerthya00b95c2019-07-09 10:30:34 +0530808 priv->cpsw_common = cpsw_common;
809
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530810 sprintf(portname, "%s%s", dev->parent->name, dev->name);
811 device_set_name(dev, portname);
812
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +0530813 priv->mdio_manual_mode = false;
814 if (soc_device_match(k3_mdio_soc_data))
815 priv->mdio_manual_mode = true;
816
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530817 ret = am65_cpsw_ofdata_parse_phy(dev);
818 if (ret)
819 goto out;
820
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300821 ret = am65_cpsw_gmii_sel_k3(priv, pdata->phy_interface);
822 if (ret)
823 goto out;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530824
825 ret = am65_cpsw_mdio_init(dev);
826 if (ret)
827 goto out;
828
829 ret = am65_cpsw_phy_init(dev);
830 if (ret)
831 goto out;
832out:
833 return ret;
834}
835
836static int am65_cpsw_probe_nuss(struct udevice *dev)
837{
838 struct am65_cpsw_common *cpsw_common = dev_get_priv(dev);
Suman Anna18e40be2023-08-02 13:47:26 +0530839 ofnode ports_np, node, mdio_np;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530840 int ret, i;
841 struct udevice *port_dev;
842
Keerthya00b95c2019-07-09 10:30:34 +0530843 cpsw_common->dev = dev;
844 cpsw_common->ss_base = dev_read_addr(dev);
845 if (cpsw_common->ss_base == FDT_ADDR_T_NONE)
846 return -EINVAL;
Keerthya00b95c2019-07-09 10:30:34 +0530847
848 ret = power_domain_get_by_index(dev, &cpsw_common->pwrdmn, 0);
849 if (ret) {
850 dev_err(dev, "failed to get pwrdmn: %d\n", ret);
851 return ret;
852 }
853
854 ret = clk_get_by_name(dev, "fck", &cpsw_common->fclk);
855 if (ret) {
856 power_domain_free(&cpsw_common->pwrdmn);
857 dev_err(dev, "failed to get clock %d\n", ret);
858 return ret;
859 }
860
861 cpsw_common->cpsw_base = cpsw_common->ss_base + AM65_CPSW_CPSW_NU_BASE;
862 cpsw_common->ale_base = cpsw_common->cpsw_base +
863 AM65_CPSW_CPSW_NU_ALE_BASE;
864 cpsw_common->mdio_base = cpsw_common->ss_base + AM65_CPSW_MDIO_BASE;
865
Suman Anna18e40be2023-08-02 13:47:26 +0530866 if (IS_ENABLED(CONFIG_DM_GPIO)) {
867 /* get bus level PHY reset GPIO details */
868 mdio_np = dev_read_subnode(dev, "mdio");
869 if (!ofnode_valid(mdio_np)) {
870 ret = -ENOENT;
871 goto out;
872 }
873
874 cpsw_common->reset_delay_us = ofnode_read_u32_default(mdio_np, "reset-delay-us",
875 DEFAULT_GPIO_RESET_DELAY);
876 cpsw_common->reset_post_delay_us = ofnode_read_u32_default(mdio_np,
877 "reset-post-delay-us",
878 0);
879 ret = gpio_request_by_name_nodev(mdio_np, "reset-gpios", 0,
880 &cpsw_common->mdio_gpio_reset,
881 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
882 }
883
Vignesh Raghavendra2b834d02020-07-06 13:36:54 +0530884 ports_np = dev_read_subnode(dev, "ethernet-ports");
Keerthya00b95c2019-07-09 10:30:34 +0530885 if (!ofnode_valid(ports_np)) {
886 ret = -ENOENT;
887 goto out;
888 }
889
890 ofnode_for_each_subnode(node, ports_np) {
891 const char *node_name;
892 u32 port_id;
893 bool disabled;
894
895 node_name = ofnode_get_name(node);
896
Simon Glass2e4938b2022-09-06 20:27:17 -0600897 disabled = !ofnode_is_enabled(node);
Keerthya00b95c2019-07-09 10:30:34 +0530898
899 ret = ofnode_read_u32(node, "reg", &port_id);
900 if (ret) {
901 dev_err(dev, "%s: failed to get port_id (%d)\n",
902 node_name, ret);
903 goto out;
904 }
905
906 if (port_id >= AM65_CPSW_CPSWNU_MAX_PORTS) {
907 dev_err(dev, "%s: invalid port_id (%d)\n",
908 node_name, port_id);
909 ret = -EINVAL;
910 goto out;
911 }
912 cpsw_common->port_num++;
913
914 if (!port_id)
915 continue;
916
Keerthya00b95c2019-07-09 10:30:34 +0530917 cpsw_common->ports[port_id].disabled = disabled;
918 if (disabled)
919 continue;
920
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530921 ret = device_bind_driver_to_node(dev, "am65_cpsw_nuss_port", ofnode_get_name(node), node, &port_dev);
Keerthya00b95c2019-07-09 10:30:34 +0530922 if (ret)
Vignesh Raghavendrad26ac2e2022-01-21 12:47:51 +0530923 dev_err(dev, "Failed to bind to %s node\n", ofnode_get_name(node));
Keerthya00b95c2019-07-09 10:30:34 +0530924 }
925
926 for (i = 0; i < AM65_CPSW_CPSWNU_MAX_PORTS; i++) {
927 struct am65_cpsw_port *port = &cpsw_common->ports[i];
928
929 port->port_base = cpsw_common->cpsw_base +
930 AM65_CPSW_CPSW_NU_PORTS_OFFSET +
931 (i * AM65_CPSW_CPSW_NU_PORTS_OFFSET);
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530932 port->port_sgmii_base = cpsw_common->ss_base +
933 (i * AM65_CPSW_SGMII_BASE);
Keerthya00b95c2019-07-09 10:30:34 +0530934 port->macsl_base = port->port_base +
935 AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET;
936 }
937
Keerthya00b95c2019-07-09 10:30:34 +0530938 cpsw_common->bus_freq =
939 dev_read_u32_default(dev, "bus_freq",
940 AM65_CPSW_MDIO_BUS_FREQ_DEF);
941
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530942 dev_info(dev, "K3 CPSW: nuss_ver: 0x%08X cpsw_ver: 0x%08X ale_ver: 0x%08X Ports:%u mdio_freq:%u\n",
Keerthya00b95c2019-07-09 10:30:34 +0530943 readl(cpsw_common->ss_base),
944 readl(cpsw_common->cpsw_base),
945 readl(cpsw_common->ale_base),
946 cpsw_common->port_num,
Keerthya00b95c2019-07-09 10:30:34 +0530947 cpsw_common->bus_freq);
948
949out:
950 clk_free(&cpsw_common->fclk);
951 power_domain_free(&cpsw_common->pwrdmn);
952 return ret;
953}
954
955static const struct udevice_id am65_cpsw_nuss_ids[] = {
956 { .compatible = "ti,am654-cpsw-nuss" },
Vignesh Raghavendra30bc6ea2019-12-04 22:17:23 +0530957 { .compatible = "ti,j721e-cpsw-nuss" },
Vignesh Raghavendra1cc35622021-05-10 20:06:11 +0530958 { .compatible = "ti,am642-cpsw-nuss" },
Keerthya00b95c2019-07-09 10:30:34 +0530959 { }
960};
961
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530962U_BOOT_DRIVER(am65_cpsw_nuss) = {
963 .name = "am65_cpsw_nuss",
964 .id = UCLASS_MISC,
Keerthya00b95c2019-07-09 10:30:34 +0530965 .of_match = am65_cpsw_nuss_ids,
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530966 .probe = am65_cpsw_probe_nuss,
967 .priv_auto = sizeof(struct am65_cpsw_common),
968};
969
970U_BOOT_DRIVER(am65_cpsw_nuss_port) = {
971 .name = "am65_cpsw_nuss_port",
972 .id = UCLASS_ETH,
973 .probe = am65_cpsw_port_probe,
Keerthya00b95c2019-07-09 10:30:34 +0530974 .ops = &am65_cpsw_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700975 .priv_auto = sizeof(struct am65_cpsw_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700976 .plat_auto = sizeof(struct eth_pdata),
Vignesh Raghavendra198bbb12022-01-28 11:21:19 +0530977 .flags = DM_FLAG_ALLOC_PRIV_DMA | DM_FLAG_OS_PREPARE,
Keerthya00b95c2019-07-09 10:30:34 +0530978};
Maxime Ripard028849d2023-07-24 15:57:30 +0200979
980static const struct udevice_id am65_cpsw_mdio_ids[] = {
981 { .compatible = "ti,cpsw-mdio" },
982 { }
983};
984
985U_BOOT_DRIVER(am65_cpsw_mdio) = {
986 .name = "am65_cpsw_mdio",
987 .id = UCLASS_MDIO,
988 .of_match = am65_cpsw_mdio_ids,
989};