blob: 18a33c4c0e3c7c0ddb7a46dba11360319ccd1310 [file] [log] [blame]
Keerthya00b95c2019-07-09 10:30:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
4 *
5 * Copyright (C) 2019, Texas Instruments, Incorporated
6 *
7 */
8
9#include <common.h>
Simon Glass9bc15642020-02-03 07:36:16 -070010#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060011#include <asm/cache.h>
Suman Anna18e40be2023-08-02 13:47:26 +053012#include <asm/gpio.h>
Keerthya00b95c2019-07-09 10:30:34 +053013#include <asm/io.h>
14#include <asm/processor.h>
15#include <clk.h>
16#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070017#include <dm/device_compat.h>
Keerthya00b95c2019-07-09 10:30:34 +053018#include <dm/lists.h>
Maxime Ripard028849d2023-07-24 15:57:30 +020019#include <dm/pinctrl.h>
Keerthya00b95c2019-07-09 10:30:34 +053020#include <dma-uclass.h>
21#include <dm/of_access.h>
22#include <miiphy.h>
23#include <net.h>
24#include <phy.h>
25#include <power-domain.h>
Roger Quadroscb8f8ad2023-07-22 22:31:48 +030026#include <regmap.h>
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +053027#include <soc.h>
Roger Quadroscb8f8ad2023-07-22 22:31:48 +030028#include <syscon.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060029#include <linux/bitops.h>
Suman Anna18e40be2023-08-02 13:47:26 +053030#include <linux/delay.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060031#include <linux/printk.h>
Keerthya00b95c2019-07-09 10:30:34 +053032#include <linux/soc/ti/ti-udma.h>
33
34#include "cpsw_mdio.h"
35
Vignesh Raghavendrac5a66132021-05-10 20:06:09 +053036#define AM65_CPSW_CPSWNU_MAX_PORTS 9
Keerthya00b95c2019-07-09 10:30:34 +053037
38#define AM65_CPSW_SS_BASE 0x0
39#define AM65_CPSW_SGMII_BASE 0x100
40#define AM65_CPSW_MDIO_BASE 0xf00
41#define AM65_CPSW_XGMII_BASE 0x2100
42#define AM65_CPSW_CPSW_NU_BASE 0x20000
43#define AM65_CPSW_CPSW_NU_ALE_BASE 0x1e000
44
45#define AM65_CPSW_CPSW_NU_PORTS_OFFSET 0x1000
46#define AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET 0x330
47
48#define AM65_CPSW_MDIO_BUS_FREQ_DEF 1000000
49
50#define AM65_CPSW_CTL_REG 0x4
51#define AM65_CPSW_STAT_PORT_EN_REG 0x14
52#define AM65_CPSW_PTYPE_REG 0x18
53
54#define AM65_CPSW_CTL_REG_P0_ENABLE BIT(2)
55#define AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE BIT(13)
56#define AM65_CPSW_CTL_REG_P0_RX_PAD BIT(14)
57
58#define AM65_CPSW_P0_FLOW_ID_REG 0x8
59#define AM65_CPSW_PN_RX_MAXLEN_REG 0x24
60#define AM65_CPSW_PN_REG_SA_L 0x308
61#define AM65_CPSW_PN_REG_SA_H 0x30c
62
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +053063#define AM65_CPSW_SGMII_CONTROL_REG 0x010
64#define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018
65#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
66
67#define ADVERTISE_SGMII 0x1
68
Keerthya00b95c2019-07-09 10:30:34 +053069#define AM65_CPSW_ALE_CTL_REG 0x8
70#define AM65_CPSW_ALE_CTL_REG_ENABLE BIT(31)
71#define AM65_CPSW_ALE_CTL_REG_RESET_TBL BIT(30)
72#define AM65_CPSW_ALE_CTL_REG_BYPASS BIT(4)
73#define AM65_CPSW_ALE_PN_CTL_REG(x) (0x40 + (x) * 4)
74#define AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD 0x3
75#define AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY BIT(11)
76
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +053077#define AM65_CPSW_ALE_THREADMAPDEF_REG 0x134
78#define AM65_CPSW_ALE_DEFTHREAD_EN BIT(15)
79
Keerthya00b95c2019-07-09 10:30:34 +053080#define AM65_CPSW_MACSL_CTL_REG 0x0
81#define AM65_CPSW_MACSL_CTL_REG_IFCTL_A BIT(15)
Murali Karicheri6565e902020-04-17 11:12:09 -040082#define AM65_CPSW_MACSL_CTL_EXT_EN BIT(18)
Keerthya00b95c2019-07-09 10:30:34 +053083#define AM65_CPSW_MACSL_CTL_REG_GIG BIT(7)
84#define AM65_CPSW_MACSL_CTL_REG_GMII_EN BIT(5)
85#define AM65_CPSW_MACSL_CTL_REG_LOOPBACK BIT(1)
86#define AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX BIT(0)
87#define AM65_CPSW_MACSL_RESET_REG 0x8
88#define AM65_CPSW_MACSL_RESET_REG_RESET BIT(0)
89#define AM65_CPSW_MACSL_STATUS_REG 0x4
90#define AM65_CPSW_MACSL_RESET_REG_PN_IDLE BIT(31)
91#define AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE BIT(30)
92#define AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE BIT(29)
93#define AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE BIT(28)
94#define AM65_CPSW_MACSL_RESET_REG_IDLE_MASK \
95 (AM65_CPSW_MACSL_RESET_REG_PN_IDLE | \
96 AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE | \
97 AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE | \
98 AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE)
99
100#define AM65_CPSW_CPPI_PKT_TYPE 0x7
101
Suman Anna18e40be2023-08-02 13:47:26 +0530102#define DEFAULT_GPIO_RESET_DELAY 10
103
Keerthya00b95c2019-07-09 10:30:34 +0530104struct am65_cpsw_port {
105 fdt_addr_t port_base;
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530106 fdt_addr_t port_sgmii_base;
Keerthya00b95c2019-07-09 10:30:34 +0530107 fdt_addr_t macsl_base;
108 bool disabled;
109 u32 mac_control;
110};
111
112struct am65_cpsw_common {
113 struct udevice *dev;
114 fdt_addr_t ss_base;
115 fdt_addr_t cpsw_base;
116 fdt_addr_t mdio_base;
117 fdt_addr_t ale_base;
Keerthya00b95c2019-07-09 10:30:34 +0530118
119 struct clk fclk;
120 struct power_domain pwrdmn;
121
122 u32 port_num;
123 struct am65_cpsw_port ports[AM65_CPSW_CPSWNU_MAX_PORTS];
Keerthya00b95c2019-07-09 10:30:34 +0530124
125 struct mii_dev *bus;
126 u32 bus_freq;
127
Suman Anna18e40be2023-08-02 13:47:26 +0530128 struct gpio_desc mdio_gpio_reset;
129 u32 reset_delay_us;
130 u32 reset_post_delay_us;
131
Keerthya00b95c2019-07-09 10:30:34 +0530132 struct dma dma_tx;
133 struct dma dma_rx;
134 u32 rx_next;
135 u32 rx_pend;
136 bool started;
137};
138
139struct am65_cpsw_priv {
140 struct udevice *dev;
141 struct am65_cpsw_common *cpsw_common;
142 u32 port_id;
143
144 struct phy_device *phydev;
145 bool has_phy;
146 ofnode phy_node;
147 u32 phy_addr;
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +0530148
149 bool mdio_manual_mode;
Keerthya00b95c2019-07-09 10:30:34 +0530150};
151
152#ifdef PKTSIZE_ALIGN
153#define UDMA_RX_BUF_SIZE PKTSIZE_ALIGN
154#else
155#define UDMA_RX_BUF_SIZE ALIGN(1522, ARCH_DMA_MINALIGN)
156#endif
157
158#ifdef PKTBUFSRX
159#define UDMA_RX_DESC_NUM PKTBUFSRX
160#else
161#define UDMA_RX_DESC_NUM 4
162#endif
163
164#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
165 ((mac)[2] << 16) | ((mac)[3] << 24))
166#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
167
168static void am65_cpsw_set_sl_mac(struct am65_cpsw_port *slave,
169 unsigned char *addr)
170{
171 writel(mac_hi(addr),
172 slave->port_base + AM65_CPSW_PN_REG_SA_H);
173 writel(mac_lo(addr),
174 slave->port_base + AM65_CPSW_PN_REG_SA_L);
175}
176
177int am65_cpsw_macsl_reset(struct am65_cpsw_port *slave)
178{
179 u32 i = 100;
180
181 /* Set the soft reset bit */
182 writel(AM65_CPSW_MACSL_RESET_REG_RESET,
183 slave->macsl_base + AM65_CPSW_MACSL_RESET_REG);
184
185 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_RESET_REG) &
186 AM65_CPSW_MACSL_RESET_REG_RESET) && i--)
187 cpu_relax();
188
189 /* Timeout on the reset */
190 return i;
191}
192
193static int am65_cpsw_macsl_wait_for_idle(struct am65_cpsw_port *slave)
194{
195 u32 i = 100;
196
197 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_STATUS_REG) &
198 AM65_CPSW_MACSL_RESET_REG_IDLE_MASK) && i--)
199 cpu_relax();
200
201 return i;
202}
203
204static int am65_cpsw_update_link(struct am65_cpsw_priv *priv)
205{
206 struct am65_cpsw_common *common = priv->cpsw_common;
207 struct am65_cpsw_port *port = &common->ports[priv->port_id];
208 struct phy_device *phy = priv->phydev;
209 u32 mac_control = 0;
210
211 if (phy->link) { /* link up */
212 mac_control = /*AM65_CPSW_MACSL_CTL_REG_LOOPBACK |*/
213 AM65_CPSW_MACSL_CTL_REG_GMII_EN;
214 if (phy->speed == 1000)
215 mac_control |= AM65_CPSW_MACSL_CTL_REG_GIG;
Murali Karicheri6565e902020-04-17 11:12:09 -0400216 if (phy->speed == 10 && phy_interface_is_rgmii(phy))
217 /* Can be used with in band mode only */
218 mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
Keerthya00b95c2019-07-09 10:30:34 +0530219 if (phy->duplex == DUPLEX_FULL)
220 mac_control |= AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX;
221 if (phy->speed == 100)
222 mac_control |= AM65_CPSW_MACSL_CTL_REG_IFCTL_A;
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530223 if (phy->interface == PHY_INTERFACE_MODE_SGMII)
224 mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
Keerthya00b95c2019-07-09 10:30:34 +0530225 }
226
227 if (mac_control == port->mac_control)
228 goto out;
229
230 if (mac_control) {
231 printf("link up on port %d, speed %d, %s duplex\n",
232 priv->port_id, phy->speed,
233 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
234 } else {
235 printf("link down on port %d\n", priv->port_id);
236 }
237
238 writel(mac_control, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
239 port->mac_control = mac_control;
240
241out:
242 return phy->link;
243}
244
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500245#define AM65_GMII_SEL_PORT_OFFS(x) (0x4 * ((x) - 1))
246
Keerthya00b95c2019-07-09 10:30:34 +0530247#define AM65_GMII_SEL_MODE_MII 0
248#define AM65_GMII_SEL_MODE_RMII 1
249#define AM65_GMII_SEL_MODE_RGMII 2
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530250#define AM65_GMII_SEL_MODE_SGMII 3
Keerthya00b95c2019-07-09 10:30:34 +0530251
252#define AM65_GMII_SEL_RGMII_IDMODE BIT(4)
253
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300254static int am65_cpsw_gmii_sel_k3(struct am65_cpsw_priv *priv,
255 phy_interface_t phy_mode)
Keerthya00b95c2019-07-09 10:30:34 +0530256{
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300257 struct udevice *dev = priv->dev;
258 u32 offset, reg, phandle;
Keerthya00b95c2019-07-09 10:30:34 +0530259 bool rgmii_id = false;
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300260 fdt_addr_t gmii_sel;
261 u32 mode = 0;
262 ofnode node;
263 int ret;
264
265 ret = ofnode_read_u32(dev_ofnode(dev), "phys", &phandle);
266 if (ret)
267 return ret;
268
269 ret = ofnode_read_u32_index(dev_ofnode(dev), "phys", 1, &offset);
270 if (ret)
271 return ret;
272
273 node = ofnode_get_by_phandle(phandle);
274 if (!ofnode_valid(node))
275 return -ENODEV;
276
277 gmii_sel = ofnode_get_addr(node);
278 if (gmii_sel == FDT_ADDR_T_NONE)
279 return -ENODEV;
Keerthya00b95c2019-07-09 10:30:34 +0530280
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300281 gmii_sel += AM65_GMII_SEL_PORT_OFFS(offset);
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500282 reg = readl(gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530283
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300284 dev_dbg(dev, "old gmii_sel: %08x\n", reg);
Keerthya00b95c2019-07-09 10:30:34 +0530285
286 switch (phy_mode) {
287 case PHY_INTERFACE_MODE_RMII:
288 mode = AM65_GMII_SEL_MODE_RMII;
289 break;
290
291 case PHY_INTERFACE_MODE_RGMII:
Grygorii Strashkobf45d9b2019-09-19 11:16:41 +0300292 case PHY_INTERFACE_MODE_RGMII_RXID:
Keerthya00b95c2019-07-09 10:30:34 +0530293 mode = AM65_GMII_SEL_MODE_RGMII;
294 break;
295
296 case PHY_INTERFACE_MODE_RGMII_ID:
Keerthya00b95c2019-07-09 10:30:34 +0530297 case PHY_INTERFACE_MODE_RGMII_TXID:
298 mode = AM65_GMII_SEL_MODE_RGMII;
299 rgmii_id = true;
300 break;
301
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530302 case PHY_INTERFACE_MODE_SGMII:
303 mode = AM65_GMII_SEL_MODE_SGMII;
304 break;
305
Keerthya00b95c2019-07-09 10:30:34 +0530306 default:
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300307 dev_warn(dev,
Keerthya00b95c2019-07-09 10:30:34 +0530308 "Unsupported PHY mode: %u. Defaulting to MII.\n",
309 phy_mode);
310 /* fallthrough */
311 case PHY_INTERFACE_MODE_MII:
312 mode = AM65_GMII_SEL_MODE_MII;
313 break;
314 };
315
316 if (rgmii_id)
317 mode |= AM65_GMII_SEL_RGMII_IDMODE;
318
319 reg = mode;
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300320 dev_dbg(dev, "gmii_sel PHY mode: %u, new gmii_sel: %08x\n",
Keerthya00b95c2019-07-09 10:30:34 +0530321 phy_mode, reg);
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500322 writel(reg, gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530323
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500324 reg = readl(gmii_sel);
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300325 if (reg != mode) {
326 dev_err(dev,
Keerthya00b95c2019-07-09 10:30:34 +0530327 "gmii_sel PHY mode NOT SET!: requested: %08x, gmii_sel: %08x\n",
328 mode, reg);
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300329 return 0;
330 }
331
332 return 0;
Keerthya00b95c2019-07-09 10:30:34 +0530333}
334
335static int am65_cpsw_start(struct udevice *dev)
336{
Simon Glassfa20e932020-12-03 16:55:20 -0700337 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530338 struct am65_cpsw_priv *priv = dev_get_priv(dev);
339 struct am65_cpsw_common *common = priv->cpsw_common;
340 struct am65_cpsw_port *port = &common->ports[priv->port_id];
341 struct am65_cpsw_port *port0 = &common->ports[0];
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530342 struct ti_udma_drv_chan_cfg_data *dma_rx_cfg_data;
Keerthya00b95c2019-07-09 10:30:34 +0530343 int ret, i;
344
345 ret = power_domain_on(&common->pwrdmn);
346 if (ret) {
347 dev_err(dev, "power_domain_on() failed %d\n", ret);
348 goto out;
349 }
350
351 ret = clk_enable(&common->fclk);
352 if (ret) {
353 dev_err(dev, "clk enabled failed %d\n", ret);
354 goto err_off_pwrdm;
355 }
356
357 common->rx_next = 0;
358 common->rx_pend = 0;
359 ret = dma_get_by_name(common->dev, "tx0", &common->dma_tx);
360 if (ret) {
361 dev_err(dev, "TX dma get failed %d\n", ret);
362 goto err_off_clk;
363 }
364 ret = dma_get_by_name(common->dev, "rx", &common->dma_rx);
365 if (ret) {
366 dev_err(dev, "RX dma get failed %d\n", ret);
367 goto err_free_tx;
368 }
369
370 for (i = 0; i < UDMA_RX_DESC_NUM; i++) {
371 ret = dma_prepare_rcv_buf(&common->dma_rx,
372 net_rx_packets[i],
373 UDMA_RX_BUF_SIZE);
374 if (ret) {
375 dev_err(dev, "RX dma add buf failed %d\n", ret);
376 goto err_free_tx;
377 }
378 }
379
380 ret = dma_enable(&common->dma_tx);
381 if (ret) {
382 dev_err(dev, "TX dma_enable failed %d\n", ret);
383 goto err_free_rx;
384 }
385 ret = dma_enable(&common->dma_rx);
386 if (ret) {
387 dev_err(dev, "RX dma_enable failed %d\n", ret);
388 goto err_dis_tx;
389 }
390
391 /* Control register */
392 writel(AM65_CPSW_CTL_REG_P0_ENABLE |
393 AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE |
394 AM65_CPSW_CTL_REG_P0_RX_PAD,
395 common->cpsw_base + AM65_CPSW_CTL_REG);
396
397 /* disable priority elevation */
398 writel(0, common->cpsw_base + AM65_CPSW_PTYPE_REG);
399
400 /* enable statistics */
401 writel(BIT(0) | BIT(priv->port_id),
402 common->cpsw_base + AM65_CPSW_STAT_PORT_EN_REG);
403
404 /* Port 0 length register */
405 writel(PKTSIZE_ALIGN, port0->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
406
407 /* set base flow_id */
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530408 dma_get_cfg(&common->dma_rx, 0, (void **)&dma_rx_cfg_data);
409 writel(dma_rx_cfg_data->flow_id_base,
Keerthya00b95c2019-07-09 10:30:34 +0530410 port0->port_base + AM65_CPSW_P0_FLOW_ID_REG);
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530411 dev_info(dev, "K3 CPSW: rflow_id_base: %u\n",
412 dma_rx_cfg_data->flow_id_base);
Keerthya00b95c2019-07-09 10:30:34 +0530413
414 /* Reset and enable the ALE */
415 writel(AM65_CPSW_ALE_CTL_REG_ENABLE | AM65_CPSW_ALE_CTL_REG_RESET_TBL |
416 AM65_CPSW_ALE_CTL_REG_BYPASS,
417 common->ale_base + AM65_CPSW_ALE_CTL_REG);
418
419 /* port 0 put into forward mode */
420 writel(AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
421 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
422
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +0530423 writel(AM65_CPSW_ALE_DEFTHREAD_EN,
424 common->ale_base + AM65_CPSW_ALE_THREADMAPDEF_REG);
425
Keerthya00b95c2019-07-09 10:30:34 +0530426 /* PORT x configuration */
427
428 /* Port x Max length register */
429 writel(PKTSIZE_ALIGN, port->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
430
431 /* Port x set mac */
432 am65_cpsw_set_sl_mac(port, pdata->enetaddr);
433
434 /* Port x ALE: mac_only, Forwarding */
435 writel(AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY |
436 AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
437 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
438
439 port->mac_control = 0;
440 if (!am65_cpsw_macsl_reset(port)) {
441 dev_err(dev, "mac_sl reset failed\n");
442 ret = -EFAULT;
443 goto err_dis_rx;
444 }
445
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530446 if (priv->phydev->interface == PHY_INTERFACE_MODE_SGMII) {
447 writel(ADVERTISE_SGMII,
448 port->port_sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG);
449 writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
450 port->port_sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
451 }
452
Keerthya00b95c2019-07-09 10:30:34 +0530453 ret = phy_startup(priv->phydev);
454 if (ret) {
455 dev_err(dev, "phy_startup failed\n");
456 goto err_dis_rx;
457 }
458
459 ret = am65_cpsw_update_link(priv);
460 if (!ret) {
461 ret = -ENODEV;
462 goto err_phy_shutdown;
463 }
464
465 common->started = true;
466
467 return 0;
468
469err_phy_shutdown:
470 phy_shutdown(priv->phydev);
471err_dis_rx:
472 /* disable ports */
473 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
474 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
475 if (!am65_cpsw_macsl_wait_for_idle(port))
476 dev_err(dev, "mac_sl idle timeout\n");
477 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
478 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
479 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
480
481 dma_disable(&common->dma_rx);
482err_dis_tx:
483 dma_disable(&common->dma_tx);
484err_free_rx:
485 dma_free(&common->dma_rx);
486err_free_tx:
487 dma_free(&common->dma_tx);
488err_off_clk:
489 clk_disable(&common->fclk);
490err_off_pwrdm:
491 power_domain_off(&common->pwrdmn);
492out:
493 dev_err(dev, "%s end error\n", __func__);
494
495 return ret;
496}
497
498static int am65_cpsw_send(struct udevice *dev, void *packet, int length)
499{
500 struct am65_cpsw_priv *priv = dev_get_priv(dev);
501 struct am65_cpsw_common *common = priv->cpsw_common;
502 struct ti_udma_drv_packet_data packet_data;
503 int ret;
504
505 packet_data.pkt_type = AM65_CPSW_CPPI_PKT_TYPE;
506 packet_data.dest_tag = priv->port_id;
507 ret = dma_send(&common->dma_tx, packet, length, &packet_data);
508 if (ret) {
509 dev_err(dev, "TX dma_send failed %d\n", ret);
510 return ret;
511 }
512
513 return 0;
514}
515
516static int am65_cpsw_recv(struct udevice *dev, int flags, uchar **packetp)
517{
518 struct am65_cpsw_priv *priv = dev_get_priv(dev);
519 struct am65_cpsw_common *common = priv->cpsw_common;
520
521 /* try to receive a new packet */
522 return dma_receive(&common->dma_rx, (void **)packetp, NULL);
523}
524
525static int am65_cpsw_free_pkt(struct udevice *dev, uchar *packet, int length)
526{
527 struct am65_cpsw_priv *priv = dev_get_priv(dev);
528 struct am65_cpsw_common *common = priv->cpsw_common;
529 int ret;
530
531 if (length > 0) {
532 u32 pkt = common->rx_next % UDMA_RX_DESC_NUM;
533
534 ret = dma_prepare_rcv_buf(&common->dma_rx,
535 net_rx_packets[pkt],
536 UDMA_RX_BUF_SIZE);
537 if (ret)
538 dev_err(dev, "RX dma free_pkt failed %d\n", ret);
539 common->rx_next++;
540 }
541
542 return 0;
543}
544
545static void am65_cpsw_stop(struct udevice *dev)
546{
547 struct am65_cpsw_priv *priv = dev_get_priv(dev);
548 struct am65_cpsw_common *common = priv->cpsw_common;
549 struct am65_cpsw_port *port = &common->ports[priv->port_id];
550
551 if (!common->started)
552 return;
553
554 phy_shutdown(priv->phydev);
555
556 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
557 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
558 if (!am65_cpsw_macsl_wait_for_idle(port))
559 dev_err(dev, "mac_sl idle timeout\n");
560 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
561 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
562 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
563
564 dma_disable(&common->dma_tx);
565 dma_free(&common->dma_tx);
566
567 dma_disable(&common->dma_rx);
568 dma_free(&common->dma_rx);
569
570 common->started = false;
571}
572
Roger Quadroscb8f8ad2023-07-22 22:31:48 +0300573static int am65_cpsw_am654_get_efuse_macid(struct udevice *dev,
574 int slave, u8 *mac_addr)
575{
576 u32 mac_lo, mac_hi, offset;
577 struct regmap *syscon;
578 int ret;
579
580 syscon = syscon_regmap_lookup_by_phandle(dev, "ti,syscon-efuse");
581 if (IS_ERR(syscon)) {
582 if (PTR_ERR(syscon) == -ENODEV)
583 return 0;
584 return PTR_ERR(syscon);
585 }
586
587 ret = dev_read_u32_index(dev, "ti,syscon-efuse", 1, &offset);
588 if (ret)
589 return ret;
590
591 regmap_read(syscon, offset, &mac_lo);
592 regmap_read(syscon, offset + 4, &mac_hi);
593
594 mac_addr[0] = (mac_hi >> 8) & 0xff;
595 mac_addr[1] = mac_hi & 0xff;
596 mac_addr[2] = (mac_lo >> 24) & 0xff;
597 mac_addr[3] = (mac_lo >> 16) & 0xff;
598 mac_addr[4] = (mac_lo >> 8) & 0xff;
599 mac_addr[5] = mac_lo & 0xff;
600
601 return 0;
602}
603
Keerthya00b95c2019-07-09 10:30:34 +0530604static int am65_cpsw_read_rom_hwaddr(struct udevice *dev)
605{
606 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700607 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530608
Roger Quadroscb8f8ad2023-07-22 22:31:48 +0300609 am65_cpsw_am654_get_efuse_macid(dev,
610 priv->port_id,
611 pdata->enetaddr);
Keerthya00b95c2019-07-09 10:30:34 +0530612
613 return 0;
614}
615
616static const struct eth_ops am65_cpsw_ops = {
617 .start = am65_cpsw_start,
618 .send = am65_cpsw_send,
619 .recv = am65_cpsw_recv,
620 .free_pkt = am65_cpsw_free_pkt,
621 .stop = am65_cpsw_stop,
622 .read_rom_hwaddr = am65_cpsw_read_rom_hwaddr,
623};
624
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +0530625static const struct soc_attr k3_mdio_soc_data[] = {
626 { .family = "AM62X", .revision = "SR1.0" },
627 { .family = "AM64X", .revision = "SR1.0" },
628 { .family = "AM64X", .revision = "SR2.0" },
629 { .family = "AM65X", .revision = "SR1.0" },
630 { .family = "AM65X", .revision = "SR2.0" },
631 { .family = "J7200", .revision = "SR1.0" },
632 { .family = "J7200", .revision = "SR2.0" },
633 { .family = "J721E", .revision = "SR1.0" },
634 { .family = "J721E", .revision = "SR1.1" },
635 { .family = "J721S2", .revision = "SR1.0" },
636 { /* sentinel */ },
637};
638
Maxime Ripard028849d2023-07-24 15:57:30 +0200639static ofnode am65_cpsw_find_mdio(ofnode parent)
640{
641 ofnode node;
642
643 ofnode_for_each_subnode(node, parent)
644 if (ofnode_device_is_compatible(node, "ti,cpsw-mdio"))
645 return node;
646
647 return ofnode_null();
648}
649
650static int am65_cpsw_mdio_setup(struct udevice *dev)
651{
652 struct am65_cpsw_priv *priv = dev_get_priv(dev);
653 struct am65_cpsw_common *cpsw_common = priv->cpsw_common;
654 struct udevice *mdio_dev;
655 ofnode mdio;
656 int ret;
657
658 mdio = am65_cpsw_find_mdio(dev_ofnode(cpsw_common->dev));
659 if (!ofnode_valid(mdio))
660 return 0;
661
662 /*
663 * The MDIO controller is represented in the DT binding by a
664 * subnode of the MAC controller.
665 *
666 * We don't have a DM driver for the MDIO device yet, and thus any
667 * pinctrl setting on its node will be ignored.
668 *
669 * However, we do need to make sure the pins states tied to the
670 * MDIO node are configured properly. Fortunately, the core DM
671 * does that for use when we get a device, so we can work around
672 * that whole issue by just requesting a dummy MDIO driver to
673 * probe, and our pins will get muxed.
674 */
675 ret = uclass_get_device_by_ofnode(UCLASS_MDIO, mdio, &mdio_dev);
676 if (ret)
677 return ret;
678
679 return 0;
680}
681
Keerthya00b95c2019-07-09 10:30:34 +0530682static int am65_cpsw_mdio_init(struct udevice *dev)
683{
684 struct am65_cpsw_priv *priv = dev_get_priv(dev);
685 struct am65_cpsw_common *cpsw_common = priv->cpsw_common;
Maxime Ripard028849d2023-07-24 15:57:30 +0200686 int ret;
Keerthya00b95c2019-07-09 10:30:34 +0530687
688 if (!priv->has_phy || cpsw_common->bus)
689 return 0;
690
Suman Anna18e40be2023-08-02 13:47:26 +0530691 if (IS_ENABLED(CONFIG_DM_GPIO)) {
692 if (dm_gpio_is_valid(&cpsw_common->mdio_gpio_reset)) {
693 dm_gpio_set_value(&cpsw_common->mdio_gpio_reset, 1);
694 udelay(cpsw_common->reset_delay_us);
695 dm_gpio_set_value(&cpsw_common->mdio_gpio_reset, 0);
696 if (cpsw_common->reset_post_delay_us > 0)
697 udelay(cpsw_common->reset_post_delay_us);
698 }
699 }
700
Maxime Ripard028849d2023-07-24 15:57:30 +0200701 ret = am65_cpsw_mdio_setup(dev);
702 if (ret)
703 return ret;
704
Keerthya00b95c2019-07-09 10:30:34 +0530705 cpsw_common->bus = cpsw_mdio_init(dev->name,
706 cpsw_common->mdio_base,
707 cpsw_common->bus_freq,
Ravi Gunasekaran40cea492022-09-22 15:21:23 +0530708 clk_get_rate(&cpsw_common->fclk),
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +0530709 priv->mdio_manual_mode);
Keerthya00b95c2019-07-09 10:30:34 +0530710 if (!cpsw_common->bus)
711 return -EFAULT;
712
713 return 0;
714}
715
716static int am65_cpsw_phy_init(struct udevice *dev)
717{
718 struct am65_cpsw_priv *priv = dev_get_priv(dev);
719 struct am65_cpsw_common *cpsw_common = priv->cpsw_common;
Simon Glassfa20e932020-12-03 16:55:20 -0700720 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530721 struct phy_device *phydev;
722 u32 supported = PHY_GBIT_FEATURES;
723 int ret;
724
725 phydev = phy_connect(cpsw_common->bus,
726 priv->phy_addr,
727 priv->dev,
728 pdata->phy_interface);
729
730 if (!phydev) {
731 dev_err(dev, "phy_connect() failed\n");
732 return -ENODEV;
733 }
734
735 phydev->supported &= supported;
736 if (pdata->max_speed) {
737 ret = phy_set_supported(phydev, pdata->max_speed);
738 if (ret)
739 return ret;
740 }
741 phydev->advertising = phydev->supported;
742
743 if (ofnode_valid(priv->phy_node))
744 phydev->node = priv->phy_node;
745
746 priv->phydev = phydev;
747 ret = phy_config(phydev);
748 if (ret < 0)
749 pr_err("phy_config() failed: %d", ret);
750
751 return ret;
752}
753
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530754static int am65_cpsw_ofdata_parse_phy(struct udevice *dev)
Keerthya00b95c2019-07-09 10:30:34 +0530755{
Simon Glassfa20e932020-12-03 16:55:20 -0700756 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530757 struct am65_cpsw_priv *priv = dev_get_priv(dev);
758 struct ofnode_phandle_args out_args;
Keerthya00b95c2019-07-09 10:30:34 +0530759 int ret = 0;
760
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530761 dev_read_u32(dev, "reg", &priv->port_id);
762
Marek Behúnbc194772022-04-07 00:33:01 +0200763 pdata->phy_interface = dev_read_phy_mode(dev);
Marek Behún48631e42022-04-07 00:33:03 +0200764 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
Marek Behúnbc194772022-04-07 00:33:01 +0200765 dev_err(dev, "Invalid PHY mode, port %u\n", priv->port_id);
766 return -EINVAL;
Keerthya00b95c2019-07-09 10:30:34 +0530767 }
768
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530769 dev_read_u32(dev, "max-speed", (u32 *)&pdata->max_speed);
Keerthya00b95c2019-07-09 10:30:34 +0530770 if (pdata->max_speed)
771 dev_err(dev, "Port %u speed froced to %uMbit\n",
772 priv->port_id, pdata->max_speed);
773
774 priv->has_phy = true;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530775 ret = ofnode_parse_phandle_with_args(dev_ofnode(dev), "phy-handle",
Keerthya00b95c2019-07-09 10:30:34 +0530776 NULL, 0, 0, &out_args);
777 if (ret) {
778 dev_err(dev, "can't parse phy-handle port %u (%d)\n",
779 priv->port_id, ret);
780 priv->has_phy = false;
781 ret = 0;
782 }
783
784 priv->phy_node = out_args.node;
785 if (priv->has_phy) {
786 ret = ofnode_read_u32(priv->phy_node, "reg", &priv->phy_addr);
787 if (ret) {
788 dev_err(dev, "failed to get phy_addr port %u (%d)\n",
789 priv->port_id, ret);
790 goto out;
791 }
792 }
793
794out:
795 return ret;
796}
797
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530798static int am65_cpsw_port_probe(struct udevice *dev)
Keerthya00b95c2019-07-09 10:30:34 +0530799{
800 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700801 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530802 struct am65_cpsw_common *cpsw_common;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530803 char portname[15];
804 int ret;
Keerthya00b95c2019-07-09 10:30:34 +0530805
806 priv->dev = dev;
807
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530808 cpsw_common = dev_get_priv(dev->parent);
Keerthya00b95c2019-07-09 10:30:34 +0530809 priv->cpsw_common = cpsw_common;
810
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530811 sprintf(portname, "%s%s", dev->parent->name, dev->name);
812 device_set_name(dev, portname);
813
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +0530814 priv->mdio_manual_mode = false;
815 if (soc_device_match(k3_mdio_soc_data))
816 priv->mdio_manual_mode = true;
817
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530818 ret = am65_cpsw_ofdata_parse_phy(dev);
819 if (ret)
820 goto out;
821
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300822 ret = am65_cpsw_gmii_sel_k3(priv, pdata->phy_interface);
823 if (ret)
824 goto out;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530825
826 ret = am65_cpsw_mdio_init(dev);
827 if (ret)
828 goto out;
829
830 ret = am65_cpsw_phy_init(dev);
831 if (ret)
832 goto out;
833out:
834 return ret;
835}
836
837static int am65_cpsw_probe_nuss(struct udevice *dev)
838{
839 struct am65_cpsw_common *cpsw_common = dev_get_priv(dev);
Suman Anna18e40be2023-08-02 13:47:26 +0530840 ofnode ports_np, node, mdio_np;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530841 int ret, i;
842 struct udevice *port_dev;
843
Keerthya00b95c2019-07-09 10:30:34 +0530844 cpsw_common->dev = dev;
845 cpsw_common->ss_base = dev_read_addr(dev);
846 if (cpsw_common->ss_base == FDT_ADDR_T_NONE)
847 return -EINVAL;
Keerthya00b95c2019-07-09 10:30:34 +0530848
849 ret = power_domain_get_by_index(dev, &cpsw_common->pwrdmn, 0);
850 if (ret) {
851 dev_err(dev, "failed to get pwrdmn: %d\n", ret);
852 return ret;
853 }
854
855 ret = clk_get_by_name(dev, "fck", &cpsw_common->fclk);
856 if (ret) {
857 power_domain_free(&cpsw_common->pwrdmn);
858 dev_err(dev, "failed to get clock %d\n", ret);
859 return ret;
860 }
861
862 cpsw_common->cpsw_base = cpsw_common->ss_base + AM65_CPSW_CPSW_NU_BASE;
863 cpsw_common->ale_base = cpsw_common->cpsw_base +
864 AM65_CPSW_CPSW_NU_ALE_BASE;
865 cpsw_common->mdio_base = cpsw_common->ss_base + AM65_CPSW_MDIO_BASE;
866
Suman Anna18e40be2023-08-02 13:47:26 +0530867 if (IS_ENABLED(CONFIG_DM_GPIO)) {
868 /* get bus level PHY reset GPIO details */
869 mdio_np = dev_read_subnode(dev, "mdio");
870 if (!ofnode_valid(mdio_np)) {
871 ret = -ENOENT;
872 goto out;
873 }
874
875 cpsw_common->reset_delay_us = ofnode_read_u32_default(mdio_np, "reset-delay-us",
876 DEFAULT_GPIO_RESET_DELAY);
877 cpsw_common->reset_post_delay_us = ofnode_read_u32_default(mdio_np,
878 "reset-post-delay-us",
879 0);
880 ret = gpio_request_by_name_nodev(mdio_np, "reset-gpios", 0,
881 &cpsw_common->mdio_gpio_reset,
882 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
883 }
884
Vignesh Raghavendra2b834d02020-07-06 13:36:54 +0530885 ports_np = dev_read_subnode(dev, "ethernet-ports");
Keerthya00b95c2019-07-09 10:30:34 +0530886 if (!ofnode_valid(ports_np)) {
887 ret = -ENOENT;
888 goto out;
889 }
890
891 ofnode_for_each_subnode(node, ports_np) {
892 const char *node_name;
893 u32 port_id;
894 bool disabled;
895
896 node_name = ofnode_get_name(node);
897
Simon Glass2e4938b2022-09-06 20:27:17 -0600898 disabled = !ofnode_is_enabled(node);
Keerthya00b95c2019-07-09 10:30:34 +0530899
900 ret = ofnode_read_u32(node, "reg", &port_id);
901 if (ret) {
902 dev_err(dev, "%s: failed to get port_id (%d)\n",
903 node_name, ret);
904 goto out;
905 }
906
907 if (port_id >= AM65_CPSW_CPSWNU_MAX_PORTS) {
908 dev_err(dev, "%s: invalid port_id (%d)\n",
909 node_name, port_id);
910 ret = -EINVAL;
911 goto out;
912 }
913 cpsw_common->port_num++;
914
915 if (!port_id)
916 continue;
917
Keerthya00b95c2019-07-09 10:30:34 +0530918 cpsw_common->ports[port_id].disabled = disabled;
919 if (disabled)
920 continue;
921
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530922 ret = device_bind_driver_to_node(dev, "am65_cpsw_nuss_port", ofnode_get_name(node), node, &port_dev);
Keerthya00b95c2019-07-09 10:30:34 +0530923 if (ret)
Vignesh Raghavendrad26ac2e2022-01-21 12:47:51 +0530924 dev_err(dev, "Failed to bind to %s node\n", ofnode_get_name(node));
Keerthya00b95c2019-07-09 10:30:34 +0530925 }
926
927 for (i = 0; i < AM65_CPSW_CPSWNU_MAX_PORTS; i++) {
928 struct am65_cpsw_port *port = &cpsw_common->ports[i];
929
930 port->port_base = cpsw_common->cpsw_base +
931 AM65_CPSW_CPSW_NU_PORTS_OFFSET +
932 (i * AM65_CPSW_CPSW_NU_PORTS_OFFSET);
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530933 port->port_sgmii_base = cpsw_common->ss_base +
934 (i * AM65_CPSW_SGMII_BASE);
Keerthya00b95c2019-07-09 10:30:34 +0530935 port->macsl_base = port->port_base +
936 AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET;
937 }
938
Keerthya00b95c2019-07-09 10:30:34 +0530939 cpsw_common->bus_freq =
940 dev_read_u32_default(dev, "bus_freq",
941 AM65_CPSW_MDIO_BUS_FREQ_DEF);
942
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530943 dev_info(dev, "K3 CPSW: nuss_ver: 0x%08X cpsw_ver: 0x%08X ale_ver: 0x%08X Ports:%u mdio_freq:%u\n",
Keerthya00b95c2019-07-09 10:30:34 +0530944 readl(cpsw_common->ss_base),
945 readl(cpsw_common->cpsw_base),
946 readl(cpsw_common->ale_base),
947 cpsw_common->port_num,
Keerthya00b95c2019-07-09 10:30:34 +0530948 cpsw_common->bus_freq);
949
950out:
951 clk_free(&cpsw_common->fclk);
952 power_domain_free(&cpsw_common->pwrdmn);
953 return ret;
954}
955
956static const struct udevice_id am65_cpsw_nuss_ids[] = {
957 { .compatible = "ti,am654-cpsw-nuss" },
Vignesh Raghavendra30bc6ea2019-12-04 22:17:23 +0530958 { .compatible = "ti,j721e-cpsw-nuss" },
Vignesh Raghavendra1cc35622021-05-10 20:06:11 +0530959 { .compatible = "ti,am642-cpsw-nuss" },
Keerthya00b95c2019-07-09 10:30:34 +0530960 { }
961};
962
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530963U_BOOT_DRIVER(am65_cpsw_nuss) = {
964 .name = "am65_cpsw_nuss",
965 .id = UCLASS_MISC,
Keerthya00b95c2019-07-09 10:30:34 +0530966 .of_match = am65_cpsw_nuss_ids,
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530967 .probe = am65_cpsw_probe_nuss,
968 .priv_auto = sizeof(struct am65_cpsw_common),
969};
970
971U_BOOT_DRIVER(am65_cpsw_nuss_port) = {
972 .name = "am65_cpsw_nuss_port",
973 .id = UCLASS_ETH,
974 .probe = am65_cpsw_port_probe,
Keerthya00b95c2019-07-09 10:30:34 +0530975 .ops = &am65_cpsw_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700976 .priv_auto = sizeof(struct am65_cpsw_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700977 .plat_auto = sizeof(struct eth_pdata),
Vignesh Raghavendra198bbb12022-01-28 11:21:19 +0530978 .flags = DM_FLAG_ALLOC_PRIV_DMA | DM_FLAG_OS_PREPARE,
Keerthya00b95c2019-07-09 10:30:34 +0530979};
Maxime Ripard028849d2023-07-24 15:57:30 +0200980
981static const struct udevice_id am65_cpsw_mdio_ids[] = {
982 { .compatible = "ti,cpsw-mdio" },
983 { }
984};
985
986U_BOOT_DRIVER(am65_cpsw_mdio) = {
987 .name = "am65_cpsw_mdio",
988 .id = UCLASS_MDIO,
989 .of_match = am65_cpsw_mdio_ids,
990};