blob: 9b69f36d04dc85234cc2518ea294b22b92ce39f5 [file] [log] [blame]
Keerthya00b95c2019-07-09 10:30:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
4 *
5 * Copyright (C) 2019, Texas Instruments, Incorporated
6 *
7 */
8
Simon Glass9bc15642020-02-03 07:36:16 -07009#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060010#include <asm/cache.h>
Suman Anna18e40be2023-08-02 13:47:26 +053011#include <asm/gpio.h>
Keerthya00b95c2019-07-09 10:30:34 +053012#include <asm/io.h>
13#include <asm/processor.h>
14#include <clk.h>
15#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070016#include <dm/device_compat.h>
Keerthya00b95c2019-07-09 10:30:34 +053017#include <dm/lists.h>
Maxime Ripard028849d2023-07-24 15:57:30 +020018#include <dm/pinctrl.h>
Keerthya00b95c2019-07-09 10:30:34 +053019#include <dma-uclass.h>
20#include <dm/of_access.h>
21#include <miiphy.h>
22#include <net.h>
23#include <phy.h>
24#include <power-domain.h>
Roger Quadroscb8f8ad2023-07-22 22:31:48 +030025#include <regmap.h>
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +053026#include <soc.h>
Roger Quadroscb8f8ad2023-07-22 22:31:48 +030027#include <syscon.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060028#include <linux/bitops.h>
Suman Anna18e40be2023-08-02 13:47:26 +053029#include <linux/delay.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060030#include <linux/printk.h>
Keerthya00b95c2019-07-09 10:30:34 +053031#include <linux/soc/ti/ti-udma.h>
32
Vignesh Raghavendrac5a66132021-05-10 20:06:09 +053033#define AM65_CPSW_CPSWNU_MAX_PORTS 9
Keerthya00b95c2019-07-09 10:30:34 +053034
35#define AM65_CPSW_SS_BASE 0x0
36#define AM65_CPSW_SGMII_BASE 0x100
37#define AM65_CPSW_MDIO_BASE 0xf00
38#define AM65_CPSW_XGMII_BASE 0x2100
39#define AM65_CPSW_CPSW_NU_BASE 0x20000
40#define AM65_CPSW_CPSW_NU_ALE_BASE 0x1e000
41
42#define AM65_CPSW_CPSW_NU_PORTS_OFFSET 0x1000
43#define AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET 0x330
44
45#define AM65_CPSW_MDIO_BUS_FREQ_DEF 1000000
46
47#define AM65_CPSW_CTL_REG 0x4
48#define AM65_CPSW_STAT_PORT_EN_REG 0x14
49#define AM65_CPSW_PTYPE_REG 0x18
50
51#define AM65_CPSW_CTL_REG_P0_ENABLE BIT(2)
52#define AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE BIT(13)
53#define AM65_CPSW_CTL_REG_P0_RX_PAD BIT(14)
54
55#define AM65_CPSW_P0_FLOW_ID_REG 0x8
56#define AM65_CPSW_PN_RX_MAXLEN_REG 0x24
57#define AM65_CPSW_PN_REG_SA_L 0x308
58#define AM65_CPSW_PN_REG_SA_H 0x30c
59
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +053060#define AM65_CPSW_SGMII_CONTROL_REG 0x010
61#define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018
62#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
63
64#define ADVERTISE_SGMII 0x1
65
Keerthya00b95c2019-07-09 10:30:34 +053066#define AM65_CPSW_ALE_CTL_REG 0x8
67#define AM65_CPSW_ALE_CTL_REG_ENABLE BIT(31)
68#define AM65_CPSW_ALE_CTL_REG_RESET_TBL BIT(30)
69#define AM65_CPSW_ALE_CTL_REG_BYPASS BIT(4)
70#define AM65_CPSW_ALE_PN_CTL_REG(x) (0x40 + (x) * 4)
71#define AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD 0x3
72#define AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY BIT(11)
73
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +053074#define AM65_CPSW_ALE_THREADMAPDEF_REG 0x134
75#define AM65_CPSW_ALE_DEFTHREAD_EN BIT(15)
76
Keerthya00b95c2019-07-09 10:30:34 +053077#define AM65_CPSW_MACSL_CTL_REG 0x0
78#define AM65_CPSW_MACSL_CTL_REG_IFCTL_A BIT(15)
Murali Karicheri6565e902020-04-17 11:12:09 -040079#define AM65_CPSW_MACSL_CTL_EXT_EN BIT(18)
Keerthya00b95c2019-07-09 10:30:34 +053080#define AM65_CPSW_MACSL_CTL_REG_GIG BIT(7)
81#define AM65_CPSW_MACSL_CTL_REG_GMII_EN BIT(5)
82#define AM65_CPSW_MACSL_CTL_REG_LOOPBACK BIT(1)
83#define AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX BIT(0)
84#define AM65_CPSW_MACSL_RESET_REG 0x8
85#define AM65_CPSW_MACSL_RESET_REG_RESET BIT(0)
86#define AM65_CPSW_MACSL_STATUS_REG 0x4
87#define AM65_CPSW_MACSL_RESET_REG_PN_IDLE BIT(31)
88#define AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE BIT(30)
89#define AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE BIT(29)
90#define AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE BIT(28)
91#define AM65_CPSW_MACSL_RESET_REG_IDLE_MASK \
92 (AM65_CPSW_MACSL_RESET_REG_PN_IDLE | \
93 AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE | \
94 AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE | \
95 AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE)
96
97#define AM65_CPSW_CPPI_PKT_TYPE 0x7
98
Suman Anna18e40be2023-08-02 13:47:26 +053099#define DEFAULT_GPIO_RESET_DELAY 10
100
Keerthya00b95c2019-07-09 10:30:34 +0530101struct am65_cpsw_port {
102 fdt_addr_t port_base;
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530103 fdt_addr_t port_sgmii_base;
Keerthya00b95c2019-07-09 10:30:34 +0530104 fdt_addr_t macsl_base;
105 bool disabled;
106 u32 mac_control;
107};
108
109struct am65_cpsw_common {
110 struct udevice *dev;
111 fdt_addr_t ss_base;
112 fdt_addr_t cpsw_base;
Keerthya00b95c2019-07-09 10:30:34 +0530113 fdt_addr_t ale_base;
Keerthya00b95c2019-07-09 10:30:34 +0530114
115 struct clk fclk;
116 struct power_domain pwrdmn;
117
118 u32 port_num;
119 struct am65_cpsw_port ports[AM65_CPSW_CPSWNU_MAX_PORTS];
Keerthya00b95c2019-07-09 10:30:34 +0530120
Keerthya00b95c2019-07-09 10:30:34 +0530121 u32 bus_freq;
122
123 struct dma dma_tx;
124 struct dma dma_rx;
125 u32 rx_next;
126 u32 rx_pend;
127 bool started;
128};
129
130struct am65_cpsw_priv {
131 struct udevice *dev;
132 struct am65_cpsw_common *cpsw_common;
133 u32 port_id;
Keerthya00b95c2019-07-09 10:30:34 +0530134 struct phy_device *phydev;
Keerthya00b95c2019-07-09 10:30:34 +0530135};
136
137#ifdef PKTSIZE_ALIGN
138#define UDMA_RX_BUF_SIZE PKTSIZE_ALIGN
139#else
140#define UDMA_RX_BUF_SIZE ALIGN(1522, ARCH_DMA_MINALIGN)
141#endif
142
143#ifdef PKTBUFSRX
144#define UDMA_RX_DESC_NUM PKTBUFSRX
145#else
146#define UDMA_RX_DESC_NUM 4
147#endif
148
149#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
150 ((mac)[2] << 16) | ((mac)[3] << 24))
151#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
152
153static void am65_cpsw_set_sl_mac(struct am65_cpsw_port *slave,
154 unsigned char *addr)
155{
156 writel(mac_hi(addr),
157 slave->port_base + AM65_CPSW_PN_REG_SA_H);
158 writel(mac_lo(addr),
159 slave->port_base + AM65_CPSW_PN_REG_SA_L);
160}
161
162int am65_cpsw_macsl_reset(struct am65_cpsw_port *slave)
163{
164 u32 i = 100;
165
166 /* Set the soft reset bit */
167 writel(AM65_CPSW_MACSL_RESET_REG_RESET,
168 slave->macsl_base + AM65_CPSW_MACSL_RESET_REG);
169
170 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_RESET_REG) &
171 AM65_CPSW_MACSL_RESET_REG_RESET) && i--)
172 cpu_relax();
173
174 /* Timeout on the reset */
175 return i;
176}
177
178static int am65_cpsw_macsl_wait_for_idle(struct am65_cpsw_port *slave)
179{
180 u32 i = 100;
181
182 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_STATUS_REG) &
183 AM65_CPSW_MACSL_RESET_REG_IDLE_MASK) && i--)
184 cpu_relax();
185
186 return i;
187}
188
189static int am65_cpsw_update_link(struct am65_cpsw_priv *priv)
190{
191 struct am65_cpsw_common *common = priv->cpsw_common;
192 struct am65_cpsw_port *port = &common->ports[priv->port_id];
193 struct phy_device *phy = priv->phydev;
194 u32 mac_control = 0;
195
196 if (phy->link) { /* link up */
197 mac_control = /*AM65_CPSW_MACSL_CTL_REG_LOOPBACK |*/
198 AM65_CPSW_MACSL_CTL_REG_GMII_EN;
199 if (phy->speed == 1000)
200 mac_control |= AM65_CPSW_MACSL_CTL_REG_GIG;
Murali Karicheri6565e902020-04-17 11:12:09 -0400201 if (phy->speed == 10 && phy_interface_is_rgmii(phy))
202 /* Can be used with in band mode only */
203 mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
Keerthya00b95c2019-07-09 10:30:34 +0530204 if (phy->duplex == DUPLEX_FULL)
205 mac_control |= AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX;
206 if (phy->speed == 100)
207 mac_control |= AM65_CPSW_MACSL_CTL_REG_IFCTL_A;
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530208 if (phy->interface == PHY_INTERFACE_MODE_SGMII)
209 mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
Keerthya00b95c2019-07-09 10:30:34 +0530210 }
211
212 if (mac_control == port->mac_control)
213 goto out;
214
215 if (mac_control) {
216 printf("link up on port %d, speed %d, %s duplex\n",
217 priv->port_id, phy->speed,
218 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
219 } else {
220 printf("link down on port %d\n", priv->port_id);
221 }
222
223 writel(mac_control, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
224 port->mac_control = mac_control;
225
226out:
227 return phy->link;
228}
229
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500230#define AM65_GMII_SEL_PORT_OFFS(x) (0x4 * ((x) - 1))
231
Keerthya00b95c2019-07-09 10:30:34 +0530232#define AM65_GMII_SEL_MODE_MII 0
233#define AM65_GMII_SEL_MODE_RMII 1
234#define AM65_GMII_SEL_MODE_RGMII 2
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530235#define AM65_GMII_SEL_MODE_SGMII 3
Keerthya00b95c2019-07-09 10:30:34 +0530236
237#define AM65_GMII_SEL_RGMII_IDMODE BIT(4)
238
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300239static int am65_cpsw_gmii_sel_k3(struct am65_cpsw_priv *priv,
240 phy_interface_t phy_mode)
Keerthya00b95c2019-07-09 10:30:34 +0530241{
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300242 struct udevice *dev = priv->dev;
243 u32 offset, reg, phandle;
Keerthya00b95c2019-07-09 10:30:34 +0530244 bool rgmii_id = false;
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300245 fdt_addr_t gmii_sel;
246 u32 mode = 0;
247 ofnode node;
248 int ret;
249
250 ret = ofnode_read_u32(dev_ofnode(dev), "phys", &phandle);
251 if (ret)
252 return ret;
253
254 ret = ofnode_read_u32_index(dev_ofnode(dev), "phys", 1, &offset);
255 if (ret)
256 return ret;
257
258 node = ofnode_get_by_phandle(phandle);
259 if (!ofnode_valid(node))
260 return -ENODEV;
261
262 gmii_sel = ofnode_get_addr(node);
263 if (gmii_sel == FDT_ADDR_T_NONE)
264 return -ENODEV;
Keerthya00b95c2019-07-09 10:30:34 +0530265
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300266 gmii_sel += AM65_GMII_SEL_PORT_OFFS(offset);
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500267 reg = readl(gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530268
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300269 dev_dbg(dev, "old gmii_sel: %08x\n", reg);
Keerthya00b95c2019-07-09 10:30:34 +0530270
271 switch (phy_mode) {
272 case PHY_INTERFACE_MODE_RMII:
273 mode = AM65_GMII_SEL_MODE_RMII;
274 break;
275
276 case PHY_INTERFACE_MODE_RGMII:
Grygorii Strashkobf45d9b2019-09-19 11:16:41 +0300277 case PHY_INTERFACE_MODE_RGMII_RXID:
Keerthya00b95c2019-07-09 10:30:34 +0530278 mode = AM65_GMII_SEL_MODE_RGMII;
279 break;
280
281 case PHY_INTERFACE_MODE_RGMII_ID:
Keerthya00b95c2019-07-09 10:30:34 +0530282 case PHY_INTERFACE_MODE_RGMII_TXID:
283 mode = AM65_GMII_SEL_MODE_RGMII;
284 rgmii_id = true;
285 break;
286
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530287 case PHY_INTERFACE_MODE_SGMII:
288 mode = AM65_GMII_SEL_MODE_SGMII;
289 break;
290
Keerthya00b95c2019-07-09 10:30:34 +0530291 default:
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300292 dev_warn(dev,
Keerthya00b95c2019-07-09 10:30:34 +0530293 "Unsupported PHY mode: %u. Defaulting to MII.\n",
294 phy_mode);
295 /* fallthrough */
296 case PHY_INTERFACE_MODE_MII:
297 mode = AM65_GMII_SEL_MODE_MII;
298 break;
299 };
300
301 if (rgmii_id)
302 mode |= AM65_GMII_SEL_RGMII_IDMODE;
303
304 reg = mode;
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300305 dev_dbg(dev, "gmii_sel PHY mode: %u, new gmii_sel: %08x\n",
Keerthya00b95c2019-07-09 10:30:34 +0530306 phy_mode, reg);
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500307 writel(reg, gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530308
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500309 reg = readl(gmii_sel);
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300310 if (reg != mode) {
311 dev_err(dev,
Keerthya00b95c2019-07-09 10:30:34 +0530312 "gmii_sel PHY mode NOT SET!: requested: %08x, gmii_sel: %08x\n",
313 mode, reg);
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300314 return 0;
315 }
316
317 return 0;
Keerthya00b95c2019-07-09 10:30:34 +0530318}
319
320static int am65_cpsw_start(struct udevice *dev)
321{
Simon Glassfa20e932020-12-03 16:55:20 -0700322 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530323 struct am65_cpsw_priv *priv = dev_get_priv(dev);
324 struct am65_cpsw_common *common = priv->cpsw_common;
325 struct am65_cpsw_port *port = &common->ports[priv->port_id];
326 struct am65_cpsw_port *port0 = &common->ports[0];
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530327 struct ti_udma_drv_chan_cfg_data *dma_rx_cfg_data;
Keerthya00b95c2019-07-09 10:30:34 +0530328 int ret, i;
329
Matthias Schifferac70b872024-04-26 10:02:25 +0200330 if (common->started)
331 return 0;
332
Keerthya00b95c2019-07-09 10:30:34 +0530333 ret = power_domain_on(&common->pwrdmn);
334 if (ret) {
335 dev_err(dev, "power_domain_on() failed %d\n", ret);
336 goto out;
337 }
338
339 ret = clk_enable(&common->fclk);
340 if (ret) {
341 dev_err(dev, "clk enabled failed %d\n", ret);
342 goto err_off_pwrdm;
343 }
344
345 common->rx_next = 0;
346 common->rx_pend = 0;
347 ret = dma_get_by_name(common->dev, "tx0", &common->dma_tx);
348 if (ret) {
349 dev_err(dev, "TX dma get failed %d\n", ret);
350 goto err_off_clk;
351 }
352 ret = dma_get_by_name(common->dev, "rx", &common->dma_rx);
353 if (ret) {
354 dev_err(dev, "RX dma get failed %d\n", ret);
355 goto err_free_tx;
356 }
357
358 for (i = 0; i < UDMA_RX_DESC_NUM; i++) {
359 ret = dma_prepare_rcv_buf(&common->dma_rx,
360 net_rx_packets[i],
361 UDMA_RX_BUF_SIZE);
362 if (ret) {
363 dev_err(dev, "RX dma add buf failed %d\n", ret);
Matthias Schiffer9ee004a2024-04-26 10:02:26 +0200364 goto err_free_rx;
Keerthya00b95c2019-07-09 10:30:34 +0530365 }
366 }
367
368 ret = dma_enable(&common->dma_tx);
369 if (ret) {
370 dev_err(dev, "TX dma_enable failed %d\n", ret);
371 goto err_free_rx;
372 }
373 ret = dma_enable(&common->dma_rx);
374 if (ret) {
375 dev_err(dev, "RX dma_enable failed %d\n", ret);
376 goto err_dis_tx;
377 }
378
379 /* Control register */
380 writel(AM65_CPSW_CTL_REG_P0_ENABLE |
381 AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE |
382 AM65_CPSW_CTL_REG_P0_RX_PAD,
383 common->cpsw_base + AM65_CPSW_CTL_REG);
384
385 /* disable priority elevation */
386 writel(0, common->cpsw_base + AM65_CPSW_PTYPE_REG);
387
388 /* enable statistics */
389 writel(BIT(0) | BIT(priv->port_id),
390 common->cpsw_base + AM65_CPSW_STAT_PORT_EN_REG);
391
392 /* Port 0 length register */
393 writel(PKTSIZE_ALIGN, port0->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
394
395 /* set base flow_id */
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530396 dma_get_cfg(&common->dma_rx, 0, (void **)&dma_rx_cfg_data);
397 writel(dma_rx_cfg_data->flow_id_base,
Keerthya00b95c2019-07-09 10:30:34 +0530398 port0->port_base + AM65_CPSW_P0_FLOW_ID_REG);
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530399 dev_info(dev, "K3 CPSW: rflow_id_base: %u\n",
400 dma_rx_cfg_data->flow_id_base);
Keerthya00b95c2019-07-09 10:30:34 +0530401
402 /* Reset and enable the ALE */
403 writel(AM65_CPSW_ALE_CTL_REG_ENABLE | AM65_CPSW_ALE_CTL_REG_RESET_TBL |
404 AM65_CPSW_ALE_CTL_REG_BYPASS,
405 common->ale_base + AM65_CPSW_ALE_CTL_REG);
406
407 /* port 0 put into forward mode */
408 writel(AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
409 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
410
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +0530411 writel(AM65_CPSW_ALE_DEFTHREAD_EN,
412 common->ale_base + AM65_CPSW_ALE_THREADMAPDEF_REG);
413
Keerthya00b95c2019-07-09 10:30:34 +0530414 /* PORT x configuration */
415
416 /* Port x Max length register */
417 writel(PKTSIZE_ALIGN, port->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
418
419 /* Port x set mac */
420 am65_cpsw_set_sl_mac(port, pdata->enetaddr);
421
422 /* Port x ALE: mac_only, Forwarding */
423 writel(AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY |
424 AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
425 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
426
427 port->mac_control = 0;
428 if (!am65_cpsw_macsl_reset(port)) {
429 dev_err(dev, "mac_sl reset failed\n");
430 ret = -EFAULT;
431 goto err_dis_rx;
432 }
433
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530434 if (priv->phydev->interface == PHY_INTERFACE_MODE_SGMII) {
435 writel(ADVERTISE_SGMII,
436 port->port_sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG);
437 writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
438 port->port_sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
439 }
440
Siddharth Vadapalli2e9f2e92025-04-16 18:26:43 +0530441 ret = phy_config(priv->phydev);
442 if (ret < 0) {
443 dev_err(dev, "phy_config failed: %d", ret);
444 goto err_dis_rx;
445 }
446
Keerthya00b95c2019-07-09 10:30:34 +0530447 ret = phy_startup(priv->phydev);
448 if (ret) {
449 dev_err(dev, "phy_startup failed\n");
450 goto err_dis_rx;
451 }
452
453 ret = am65_cpsw_update_link(priv);
454 if (!ret) {
455 ret = -ENODEV;
456 goto err_phy_shutdown;
457 }
458
459 common->started = true;
460
461 return 0;
462
463err_phy_shutdown:
464 phy_shutdown(priv->phydev);
465err_dis_rx:
466 /* disable ports */
467 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
468 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
469 if (!am65_cpsw_macsl_wait_for_idle(port))
470 dev_err(dev, "mac_sl idle timeout\n");
471 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
472 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
473 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
474
475 dma_disable(&common->dma_rx);
476err_dis_tx:
477 dma_disable(&common->dma_tx);
478err_free_rx:
479 dma_free(&common->dma_rx);
480err_free_tx:
481 dma_free(&common->dma_tx);
482err_off_clk:
483 clk_disable(&common->fclk);
484err_off_pwrdm:
485 power_domain_off(&common->pwrdmn);
486out:
487 dev_err(dev, "%s end error\n", __func__);
488
489 return ret;
490}
491
492static int am65_cpsw_send(struct udevice *dev, void *packet, int length)
493{
494 struct am65_cpsw_priv *priv = dev_get_priv(dev);
495 struct am65_cpsw_common *common = priv->cpsw_common;
496 struct ti_udma_drv_packet_data packet_data;
497 int ret;
498
Matthias Schifferac70b872024-04-26 10:02:25 +0200499 if (!common->started)
500 return -ENETDOWN;
501
Keerthya00b95c2019-07-09 10:30:34 +0530502 packet_data.pkt_type = AM65_CPSW_CPPI_PKT_TYPE;
503 packet_data.dest_tag = priv->port_id;
504 ret = dma_send(&common->dma_tx, packet, length, &packet_data);
505 if (ret) {
506 dev_err(dev, "TX dma_send failed %d\n", ret);
507 return ret;
508 }
509
510 return 0;
511}
512
513static int am65_cpsw_recv(struct udevice *dev, int flags, uchar **packetp)
514{
515 struct am65_cpsw_priv *priv = dev_get_priv(dev);
516 struct am65_cpsw_common *common = priv->cpsw_common;
517
Matthias Schifferac70b872024-04-26 10:02:25 +0200518 if (!common->started)
519 return -ENETDOWN;
520
Keerthya00b95c2019-07-09 10:30:34 +0530521 /* try to receive a new packet */
522 return dma_receive(&common->dma_rx, (void **)packetp, NULL);
523}
524
525static int am65_cpsw_free_pkt(struct udevice *dev, uchar *packet, int length)
526{
527 struct am65_cpsw_priv *priv = dev_get_priv(dev);
528 struct am65_cpsw_common *common = priv->cpsw_common;
529 int ret;
530
531 if (length > 0) {
532 u32 pkt = common->rx_next % UDMA_RX_DESC_NUM;
533
534 ret = dma_prepare_rcv_buf(&common->dma_rx,
535 net_rx_packets[pkt],
536 UDMA_RX_BUF_SIZE);
537 if (ret)
538 dev_err(dev, "RX dma free_pkt failed %d\n", ret);
539 common->rx_next++;
540 }
541
542 return 0;
543}
544
545static void am65_cpsw_stop(struct udevice *dev)
546{
547 struct am65_cpsw_priv *priv = dev_get_priv(dev);
548 struct am65_cpsw_common *common = priv->cpsw_common;
549 struct am65_cpsw_port *port = &common->ports[priv->port_id];
550
551 if (!common->started)
552 return;
553
554 phy_shutdown(priv->phydev);
555
556 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
557 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
558 if (!am65_cpsw_macsl_wait_for_idle(port))
559 dev_err(dev, "mac_sl idle timeout\n");
560 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
561 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
562 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
563
564 dma_disable(&common->dma_tx);
565 dma_free(&common->dma_tx);
566
567 dma_disable(&common->dma_rx);
568 dma_free(&common->dma_rx);
569
570 common->started = false;
571}
572
Roger Quadroscb8f8ad2023-07-22 22:31:48 +0300573static int am65_cpsw_am654_get_efuse_macid(struct udevice *dev,
574 int slave, u8 *mac_addr)
575{
576 u32 mac_lo, mac_hi, offset;
577 struct regmap *syscon;
578 int ret;
579
580 syscon = syscon_regmap_lookup_by_phandle(dev, "ti,syscon-efuse");
581 if (IS_ERR(syscon)) {
582 if (PTR_ERR(syscon) == -ENODEV)
583 return 0;
584 return PTR_ERR(syscon);
585 }
586
587 ret = dev_read_u32_index(dev, "ti,syscon-efuse", 1, &offset);
588 if (ret)
589 return ret;
590
591 regmap_read(syscon, offset, &mac_lo);
592 regmap_read(syscon, offset + 4, &mac_hi);
593
594 mac_addr[0] = (mac_hi >> 8) & 0xff;
595 mac_addr[1] = mac_hi & 0xff;
596 mac_addr[2] = (mac_lo >> 24) & 0xff;
597 mac_addr[3] = (mac_lo >> 16) & 0xff;
598 mac_addr[4] = (mac_lo >> 8) & 0xff;
599 mac_addr[5] = mac_lo & 0xff;
600
601 return 0;
602}
603
Keerthya00b95c2019-07-09 10:30:34 +0530604static int am65_cpsw_read_rom_hwaddr(struct udevice *dev)
605{
606 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700607 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530608
Roger Quadroscb8f8ad2023-07-22 22:31:48 +0300609 am65_cpsw_am654_get_efuse_macid(dev,
610 priv->port_id,
611 pdata->enetaddr);
Keerthya00b95c2019-07-09 10:30:34 +0530612
613 return 0;
614}
615
616static const struct eth_ops am65_cpsw_ops = {
617 .start = am65_cpsw_start,
618 .send = am65_cpsw_send,
619 .recv = am65_cpsw_recv,
620 .free_pkt = am65_cpsw_free_pkt,
621 .stop = am65_cpsw_stop,
622 .read_rom_hwaddr = am65_cpsw_read_rom_hwaddr,
623};
624
Keerthya00b95c2019-07-09 10:30:34 +0530625static int am65_cpsw_phy_init(struct udevice *dev)
626{
627 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700628 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530629 struct phy_device *phydev;
630 u32 supported = PHY_GBIT_FEATURES;
631 int ret;
632
Roger Quadrosfced6b62024-02-28 12:35:27 +0200633 phydev = dm_eth_phy_connect(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530634 if (!phydev) {
635 dev_err(dev, "phy_connect() failed\n");
636 return -ENODEV;
637 }
638
639 phydev->supported &= supported;
640 if (pdata->max_speed) {
641 ret = phy_set_supported(phydev, pdata->max_speed);
642 if (ret)
643 return ret;
644 }
645 phydev->advertising = phydev->supported;
646
Keerthya00b95c2019-07-09 10:30:34 +0530647 priv->phydev = phydev;
Keerthya00b95c2019-07-09 10:30:34 +0530648
649 return ret;
650}
651
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530652static int am65_cpsw_ofdata_parse_phy(struct udevice *dev)
Keerthya00b95c2019-07-09 10:30:34 +0530653{
Simon Glassfa20e932020-12-03 16:55:20 -0700654 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530655 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530656
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530657 dev_read_u32(dev, "reg", &priv->port_id);
658
Marek Behúnbc194772022-04-07 00:33:01 +0200659 pdata->phy_interface = dev_read_phy_mode(dev);
Marek Behún48631e42022-04-07 00:33:03 +0200660 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
Marek Behúnbc194772022-04-07 00:33:01 +0200661 dev_err(dev, "Invalid PHY mode, port %u\n", priv->port_id);
662 return -EINVAL;
Keerthya00b95c2019-07-09 10:30:34 +0530663 }
664
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530665 dev_read_u32(dev, "max-speed", (u32 *)&pdata->max_speed);
Keerthya00b95c2019-07-09 10:30:34 +0530666 if (pdata->max_speed)
Richard Genoud33a57a82025-03-11 15:14:30 +0100667 dev_err(dev, "Port %u speed forced to %uMbit\n",
Keerthya00b95c2019-07-09 10:30:34 +0530668 priv->port_id, pdata->max_speed);
669
Roger Quadrosfced6b62024-02-28 12:35:27 +0200670 return 0;
Keerthya00b95c2019-07-09 10:30:34 +0530671}
672
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530673static int am65_cpsw_port_probe(struct udevice *dev)
Keerthya00b95c2019-07-09 10:30:34 +0530674{
675 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700676 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530677 struct am65_cpsw_common *cpsw_common;
Michael Walle2e9cacb2024-04-03 16:31:55 +0200678 char portname[32];
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530679 int ret;
Keerthya00b95c2019-07-09 10:30:34 +0530680
681 priv->dev = dev;
682
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530683 cpsw_common = dev_get_priv(dev->parent);
Keerthya00b95c2019-07-09 10:30:34 +0530684 priv->cpsw_common = cpsw_common;
685
Michael Walle2e9cacb2024-04-03 16:31:55 +0200686 snprintf(portname, sizeof(portname), "%s%s", dev->parent->name, dev->name);
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530687 device_set_name(dev, portname);
688
689 ret = am65_cpsw_ofdata_parse_phy(dev);
690 if (ret)
691 goto out;
692
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300693 ret = am65_cpsw_gmii_sel_k3(priv, pdata->phy_interface);
694 if (ret)
695 goto out;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530696
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530697 ret = am65_cpsw_phy_init(dev);
Roger Quadrosfced6b62024-02-28 12:35:27 +0200698
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530699out:
700 return ret;
701}
702
703static int am65_cpsw_probe_nuss(struct udevice *dev)
704{
705 struct am65_cpsw_common *cpsw_common = dev_get_priv(dev);
Roger Quadrosfced6b62024-02-28 12:35:27 +0200706 ofnode ports_np, node;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530707 int ret, i;
708 struct udevice *port_dev;
709
Keerthya00b95c2019-07-09 10:30:34 +0530710 cpsw_common->dev = dev;
711 cpsw_common->ss_base = dev_read_addr(dev);
712 if (cpsw_common->ss_base == FDT_ADDR_T_NONE)
713 return -EINVAL;
Keerthya00b95c2019-07-09 10:30:34 +0530714
715 ret = power_domain_get_by_index(dev, &cpsw_common->pwrdmn, 0);
716 if (ret) {
717 dev_err(dev, "failed to get pwrdmn: %d\n", ret);
718 return ret;
719 }
720
721 ret = clk_get_by_name(dev, "fck", &cpsw_common->fclk);
722 if (ret) {
723 power_domain_free(&cpsw_common->pwrdmn);
724 dev_err(dev, "failed to get clock %d\n", ret);
725 return ret;
726 }
727
728 cpsw_common->cpsw_base = cpsw_common->ss_base + AM65_CPSW_CPSW_NU_BASE;
729 cpsw_common->ale_base = cpsw_common->cpsw_base +
730 AM65_CPSW_CPSW_NU_ALE_BASE;
Suman Anna18e40be2023-08-02 13:47:26 +0530731
Vignesh Raghavendra2b834d02020-07-06 13:36:54 +0530732 ports_np = dev_read_subnode(dev, "ethernet-ports");
Keerthya00b95c2019-07-09 10:30:34 +0530733 if (!ofnode_valid(ports_np)) {
734 ret = -ENOENT;
735 goto out;
736 }
737
738 ofnode_for_each_subnode(node, ports_np) {
739 const char *node_name;
740 u32 port_id;
741 bool disabled;
742
743 node_name = ofnode_get_name(node);
744
Simon Glass2e4938b2022-09-06 20:27:17 -0600745 disabled = !ofnode_is_enabled(node);
Keerthya00b95c2019-07-09 10:30:34 +0530746
747 ret = ofnode_read_u32(node, "reg", &port_id);
748 if (ret) {
749 dev_err(dev, "%s: failed to get port_id (%d)\n",
750 node_name, ret);
751 goto out;
752 }
753
754 if (port_id >= AM65_CPSW_CPSWNU_MAX_PORTS) {
755 dev_err(dev, "%s: invalid port_id (%d)\n",
756 node_name, port_id);
757 ret = -EINVAL;
758 goto out;
759 }
760 cpsw_common->port_num++;
761
762 if (!port_id)
763 continue;
764
Keerthya00b95c2019-07-09 10:30:34 +0530765 cpsw_common->ports[port_id].disabled = disabled;
766 if (disabled)
767 continue;
768
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530769 ret = device_bind_driver_to_node(dev, "am65_cpsw_nuss_port", ofnode_get_name(node), node, &port_dev);
Keerthya00b95c2019-07-09 10:30:34 +0530770 if (ret)
Vignesh Raghavendrad26ac2e2022-01-21 12:47:51 +0530771 dev_err(dev, "Failed to bind to %s node\n", ofnode_get_name(node));
Keerthya00b95c2019-07-09 10:30:34 +0530772 }
773
774 for (i = 0; i < AM65_CPSW_CPSWNU_MAX_PORTS; i++) {
775 struct am65_cpsw_port *port = &cpsw_common->ports[i];
776
777 port->port_base = cpsw_common->cpsw_base +
778 AM65_CPSW_CPSW_NU_PORTS_OFFSET +
779 (i * AM65_CPSW_CPSW_NU_PORTS_OFFSET);
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530780 port->port_sgmii_base = cpsw_common->ss_base +
781 (i * AM65_CPSW_SGMII_BASE);
Keerthya00b95c2019-07-09 10:30:34 +0530782 port->macsl_base = port->port_base +
783 AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET;
784 }
785
Keerthya00b95c2019-07-09 10:30:34 +0530786 cpsw_common->bus_freq =
787 dev_read_u32_default(dev, "bus_freq",
788 AM65_CPSW_MDIO_BUS_FREQ_DEF);
789
Roger Quadrosfced6b62024-02-28 12:35:27 +0200790 dev_info(dev, "K3 CPSW: nuss_ver: 0x%08X cpsw_ver: 0x%08X ale_ver: 0x%08X Ports:%u\n",
Keerthya00b95c2019-07-09 10:30:34 +0530791 readl(cpsw_common->ss_base),
792 readl(cpsw_common->cpsw_base),
793 readl(cpsw_common->ale_base),
Roger Quadrosfced6b62024-02-28 12:35:27 +0200794 cpsw_common->port_num);
Keerthya00b95c2019-07-09 10:30:34 +0530795
796out:
Keerthya00b95c2019-07-09 10:30:34 +0530797 power_domain_free(&cpsw_common->pwrdmn);
798 return ret;
799}
800
801static const struct udevice_id am65_cpsw_nuss_ids[] = {
802 { .compatible = "ti,am654-cpsw-nuss" },
Vignesh Raghavendra30bc6ea2019-12-04 22:17:23 +0530803 { .compatible = "ti,j721e-cpsw-nuss" },
Vignesh Raghavendra1cc35622021-05-10 20:06:11 +0530804 { .compatible = "ti,am642-cpsw-nuss" },
Keerthya00b95c2019-07-09 10:30:34 +0530805 { }
806};
807
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530808U_BOOT_DRIVER(am65_cpsw_nuss) = {
809 .name = "am65_cpsw_nuss",
810 .id = UCLASS_MISC,
Keerthya00b95c2019-07-09 10:30:34 +0530811 .of_match = am65_cpsw_nuss_ids,
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530812 .probe = am65_cpsw_probe_nuss,
813 .priv_auto = sizeof(struct am65_cpsw_common),
814};
815
816U_BOOT_DRIVER(am65_cpsw_nuss_port) = {
817 .name = "am65_cpsw_nuss_port",
818 .id = UCLASS_ETH,
819 .probe = am65_cpsw_port_probe,
Keerthya00b95c2019-07-09 10:30:34 +0530820 .ops = &am65_cpsw_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700821 .priv_auto = sizeof(struct am65_cpsw_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700822 .plat_auto = sizeof(struct eth_pdata),
Vignesh Raghavendra198bbb12022-01-28 11:21:19 +0530823 .flags = DM_FLAG_ALLOC_PRIV_DMA | DM_FLAG_OS_PREPARE,
Keerthya00b95c2019-07-09 10:30:34 +0530824};