blob: 335c8bee3fefb29281169a631821f4a27781f94e [file] [log] [blame]
Keerthya00b95c2019-07-09 10:30:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
4 *
5 * Copyright (C) 2019, Texas Instruments, Incorporated
6 *
7 */
8
Simon Glass9bc15642020-02-03 07:36:16 -07009#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060010#include <asm/cache.h>
Suman Anna18e40be2023-08-02 13:47:26 +053011#include <asm/gpio.h>
Keerthya00b95c2019-07-09 10:30:34 +053012#include <asm/io.h>
13#include <asm/processor.h>
14#include <clk.h>
15#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070016#include <dm/device_compat.h>
Keerthya00b95c2019-07-09 10:30:34 +053017#include <dm/lists.h>
Maxime Ripard028849d2023-07-24 15:57:30 +020018#include <dm/pinctrl.h>
Keerthya00b95c2019-07-09 10:30:34 +053019#include <dma-uclass.h>
20#include <dm/of_access.h>
21#include <miiphy.h>
22#include <net.h>
23#include <phy.h>
24#include <power-domain.h>
Roger Quadroscb8f8ad2023-07-22 22:31:48 +030025#include <regmap.h>
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +053026#include <soc.h>
Roger Quadroscb8f8ad2023-07-22 22:31:48 +030027#include <syscon.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060028#include <linux/bitops.h>
Suman Anna18e40be2023-08-02 13:47:26 +053029#include <linux/delay.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060030#include <linux/printk.h>
Keerthya00b95c2019-07-09 10:30:34 +053031#include <linux/soc/ti/ti-udma.h>
32
Vignesh Raghavendrac5a66132021-05-10 20:06:09 +053033#define AM65_CPSW_CPSWNU_MAX_PORTS 9
Keerthya00b95c2019-07-09 10:30:34 +053034
35#define AM65_CPSW_SS_BASE 0x0
36#define AM65_CPSW_SGMII_BASE 0x100
37#define AM65_CPSW_MDIO_BASE 0xf00
38#define AM65_CPSW_XGMII_BASE 0x2100
39#define AM65_CPSW_CPSW_NU_BASE 0x20000
40#define AM65_CPSW_CPSW_NU_ALE_BASE 0x1e000
41
42#define AM65_CPSW_CPSW_NU_PORTS_OFFSET 0x1000
43#define AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET 0x330
44
45#define AM65_CPSW_MDIO_BUS_FREQ_DEF 1000000
46
47#define AM65_CPSW_CTL_REG 0x4
48#define AM65_CPSW_STAT_PORT_EN_REG 0x14
49#define AM65_CPSW_PTYPE_REG 0x18
50
51#define AM65_CPSW_CTL_REG_P0_ENABLE BIT(2)
52#define AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE BIT(13)
53#define AM65_CPSW_CTL_REG_P0_RX_PAD BIT(14)
54
55#define AM65_CPSW_P0_FLOW_ID_REG 0x8
56#define AM65_CPSW_PN_RX_MAXLEN_REG 0x24
57#define AM65_CPSW_PN_REG_SA_L 0x308
58#define AM65_CPSW_PN_REG_SA_H 0x30c
59
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +053060#define AM65_CPSW_SGMII_CONTROL_REG 0x010
61#define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018
62#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
63
64#define ADVERTISE_SGMII 0x1
65
Keerthya00b95c2019-07-09 10:30:34 +053066#define AM65_CPSW_ALE_CTL_REG 0x8
67#define AM65_CPSW_ALE_CTL_REG_ENABLE BIT(31)
68#define AM65_CPSW_ALE_CTL_REG_RESET_TBL BIT(30)
69#define AM65_CPSW_ALE_CTL_REG_BYPASS BIT(4)
70#define AM65_CPSW_ALE_PN_CTL_REG(x) (0x40 + (x) * 4)
71#define AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD 0x3
72#define AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY BIT(11)
73
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +053074#define AM65_CPSW_ALE_THREADMAPDEF_REG 0x134
75#define AM65_CPSW_ALE_DEFTHREAD_EN BIT(15)
76
Keerthya00b95c2019-07-09 10:30:34 +053077#define AM65_CPSW_MACSL_CTL_REG 0x0
78#define AM65_CPSW_MACSL_CTL_REG_IFCTL_A BIT(15)
Murali Karicheri6565e902020-04-17 11:12:09 -040079#define AM65_CPSW_MACSL_CTL_EXT_EN BIT(18)
Keerthya00b95c2019-07-09 10:30:34 +053080#define AM65_CPSW_MACSL_CTL_REG_GIG BIT(7)
81#define AM65_CPSW_MACSL_CTL_REG_GMII_EN BIT(5)
82#define AM65_CPSW_MACSL_CTL_REG_LOOPBACK BIT(1)
83#define AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX BIT(0)
84#define AM65_CPSW_MACSL_RESET_REG 0x8
85#define AM65_CPSW_MACSL_RESET_REG_RESET BIT(0)
86#define AM65_CPSW_MACSL_STATUS_REG 0x4
87#define AM65_CPSW_MACSL_RESET_REG_PN_IDLE BIT(31)
88#define AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE BIT(30)
89#define AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE BIT(29)
90#define AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE BIT(28)
91#define AM65_CPSW_MACSL_RESET_REG_IDLE_MASK \
92 (AM65_CPSW_MACSL_RESET_REG_PN_IDLE | \
93 AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE | \
94 AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE | \
95 AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE)
96
97#define AM65_CPSW_CPPI_PKT_TYPE 0x7
98
Suman Anna18e40be2023-08-02 13:47:26 +053099#define DEFAULT_GPIO_RESET_DELAY 10
100
Keerthya00b95c2019-07-09 10:30:34 +0530101struct am65_cpsw_port {
102 fdt_addr_t port_base;
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530103 fdt_addr_t port_sgmii_base;
Keerthya00b95c2019-07-09 10:30:34 +0530104 fdt_addr_t macsl_base;
105 bool disabled;
106 u32 mac_control;
107};
108
109struct am65_cpsw_common {
110 struct udevice *dev;
111 fdt_addr_t ss_base;
112 fdt_addr_t cpsw_base;
Keerthya00b95c2019-07-09 10:30:34 +0530113 fdt_addr_t ale_base;
Keerthya00b95c2019-07-09 10:30:34 +0530114
115 struct clk fclk;
116 struct power_domain pwrdmn;
117
118 u32 port_num;
119 struct am65_cpsw_port ports[AM65_CPSW_CPSWNU_MAX_PORTS];
Keerthya00b95c2019-07-09 10:30:34 +0530120
Keerthya00b95c2019-07-09 10:30:34 +0530121 u32 bus_freq;
122
123 struct dma dma_tx;
124 struct dma dma_rx;
125 u32 rx_next;
126 u32 rx_pend;
127 bool started;
128};
129
130struct am65_cpsw_priv {
131 struct udevice *dev;
132 struct am65_cpsw_common *cpsw_common;
133 u32 port_id;
Keerthya00b95c2019-07-09 10:30:34 +0530134 struct phy_device *phydev;
Keerthya00b95c2019-07-09 10:30:34 +0530135};
136
137#ifdef PKTSIZE_ALIGN
138#define UDMA_RX_BUF_SIZE PKTSIZE_ALIGN
139#else
140#define UDMA_RX_BUF_SIZE ALIGN(1522, ARCH_DMA_MINALIGN)
141#endif
142
143#ifdef PKTBUFSRX
144#define UDMA_RX_DESC_NUM PKTBUFSRX
145#else
146#define UDMA_RX_DESC_NUM 4
147#endif
148
149#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
150 ((mac)[2] << 16) | ((mac)[3] << 24))
151#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
152
153static void am65_cpsw_set_sl_mac(struct am65_cpsw_port *slave,
154 unsigned char *addr)
155{
156 writel(mac_hi(addr),
157 slave->port_base + AM65_CPSW_PN_REG_SA_H);
158 writel(mac_lo(addr),
159 slave->port_base + AM65_CPSW_PN_REG_SA_L);
160}
161
162int am65_cpsw_macsl_reset(struct am65_cpsw_port *slave)
163{
164 u32 i = 100;
165
166 /* Set the soft reset bit */
167 writel(AM65_CPSW_MACSL_RESET_REG_RESET,
168 slave->macsl_base + AM65_CPSW_MACSL_RESET_REG);
169
170 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_RESET_REG) &
171 AM65_CPSW_MACSL_RESET_REG_RESET) && i--)
172 cpu_relax();
173
174 /* Timeout on the reset */
175 return i;
176}
177
178static int am65_cpsw_macsl_wait_for_idle(struct am65_cpsw_port *slave)
179{
180 u32 i = 100;
181
182 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_STATUS_REG) &
183 AM65_CPSW_MACSL_RESET_REG_IDLE_MASK) && i--)
184 cpu_relax();
185
186 return i;
187}
188
189static int am65_cpsw_update_link(struct am65_cpsw_priv *priv)
190{
191 struct am65_cpsw_common *common = priv->cpsw_common;
192 struct am65_cpsw_port *port = &common->ports[priv->port_id];
193 struct phy_device *phy = priv->phydev;
194 u32 mac_control = 0;
195
196 if (phy->link) { /* link up */
197 mac_control = /*AM65_CPSW_MACSL_CTL_REG_LOOPBACK |*/
198 AM65_CPSW_MACSL_CTL_REG_GMII_EN;
199 if (phy->speed == 1000)
200 mac_control |= AM65_CPSW_MACSL_CTL_REG_GIG;
Murali Karicheri6565e902020-04-17 11:12:09 -0400201 if (phy->speed == 10 && phy_interface_is_rgmii(phy))
202 /* Can be used with in band mode only */
203 mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
Keerthya00b95c2019-07-09 10:30:34 +0530204 if (phy->duplex == DUPLEX_FULL)
205 mac_control |= AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX;
206 if (phy->speed == 100)
207 mac_control |= AM65_CPSW_MACSL_CTL_REG_IFCTL_A;
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530208 if (phy->interface == PHY_INTERFACE_MODE_SGMII)
209 mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
Keerthya00b95c2019-07-09 10:30:34 +0530210 }
211
212 if (mac_control == port->mac_control)
213 goto out;
214
215 if (mac_control) {
216 printf("link up on port %d, speed %d, %s duplex\n",
217 priv->port_id, phy->speed,
218 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
219 } else {
220 printf("link down on port %d\n", priv->port_id);
221 }
222
223 writel(mac_control, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
224 port->mac_control = mac_control;
225
226out:
227 return phy->link;
228}
229
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500230#define AM65_GMII_SEL_PORT_OFFS(x) (0x4 * ((x) - 1))
231
Keerthya00b95c2019-07-09 10:30:34 +0530232#define AM65_GMII_SEL_MODE_MII 0
233#define AM65_GMII_SEL_MODE_RMII 1
234#define AM65_GMII_SEL_MODE_RGMII 2
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530235#define AM65_GMII_SEL_MODE_SGMII 3
Keerthya00b95c2019-07-09 10:30:34 +0530236
237#define AM65_GMII_SEL_RGMII_IDMODE BIT(4)
238
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300239static int am65_cpsw_gmii_sel_k3(struct am65_cpsw_priv *priv,
240 phy_interface_t phy_mode)
Keerthya00b95c2019-07-09 10:30:34 +0530241{
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300242 struct udevice *dev = priv->dev;
243 u32 offset, reg, phandle;
Keerthya00b95c2019-07-09 10:30:34 +0530244 bool rgmii_id = false;
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300245 fdt_addr_t gmii_sel;
246 u32 mode = 0;
247 ofnode node;
248 int ret;
249
250 ret = ofnode_read_u32(dev_ofnode(dev), "phys", &phandle);
251 if (ret)
252 return ret;
253
254 ret = ofnode_read_u32_index(dev_ofnode(dev), "phys", 1, &offset);
255 if (ret)
256 return ret;
257
258 node = ofnode_get_by_phandle(phandle);
259 if (!ofnode_valid(node))
260 return -ENODEV;
261
262 gmii_sel = ofnode_get_addr(node);
263 if (gmii_sel == FDT_ADDR_T_NONE)
264 return -ENODEV;
Keerthya00b95c2019-07-09 10:30:34 +0530265
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300266 gmii_sel += AM65_GMII_SEL_PORT_OFFS(offset);
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500267 reg = readl(gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530268
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300269 dev_dbg(dev, "old gmii_sel: %08x\n", reg);
Keerthya00b95c2019-07-09 10:30:34 +0530270
271 switch (phy_mode) {
272 case PHY_INTERFACE_MODE_RMII:
273 mode = AM65_GMII_SEL_MODE_RMII;
274 break;
275
276 case PHY_INTERFACE_MODE_RGMII:
Grygorii Strashkobf45d9b2019-09-19 11:16:41 +0300277 case PHY_INTERFACE_MODE_RGMII_RXID:
Keerthya00b95c2019-07-09 10:30:34 +0530278 mode = AM65_GMII_SEL_MODE_RGMII;
279 break;
280
281 case PHY_INTERFACE_MODE_RGMII_ID:
Keerthya00b95c2019-07-09 10:30:34 +0530282 case PHY_INTERFACE_MODE_RGMII_TXID:
283 mode = AM65_GMII_SEL_MODE_RGMII;
284 rgmii_id = true;
285 break;
286
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530287 case PHY_INTERFACE_MODE_SGMII:
288 mode = AM65_GMII_SEL_MODE_SGMII;
289 break;
290
Keerthya00b95c2019-07-09 10:30:34 +0530291 default:
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300292 dev_warn(dev,
Keerthya00b95c2019-07-09 10:30:34 +0530293 "Unsupported PHY mode: %u. Defaulting to MII.\n",
294 phy_mode);
295 /* fallthrough */
296 case PHY_INTERFACE_MODE_MII:
297 mode = AM65_GMII_SEL_MODE_MII;
298 break;
299 };
300
301 if (rgmii_id)
302 mode |= AM65_GMII_SEL_RGMII_IDMODE;
303
304 reg = mode;
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300305 dev_dbg(dev, "gmii_sel PHY mode: %u, new gmii_sel: %08x\n",
Keerthya00b95c2019-07-09 10:30:34 +0530306 phy_mode, reg);
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500307 writel(reg, gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530308
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500309 reg = readl(gmii_sel);
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300310 if (reg != mode) {
311 dev_err(dev,
Keerthya00b95c2019-07-09 10:30:34 +0530312 "gmii_sel PHY mode NOT SET!: requested: %08x, gmii_sel: %08x\n",
313 mode, reg);
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300314 return 0;
315 }
316
317 return 0;
Keerthya00b95c2019-07-09 10:30:34 +0530318}
319
320static int am65_cpsw_start(struct udevice *dev)
321{
Simon Glassfa20e932020-12-03 16:55:20 -0700322 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530323 struct am65_cpsw_priv *priv = dev_get_priv(dev);
324 struct am65_cpsw_common *common = priv->cpsw_common;
325 struct am65_cpsw_port *port = &common->ports[priv->port_id];
326 struct am65_cpsw_port *port0 = &common->ports[0];
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530327 struct ti_udma_drv_chan_cfg_data *dma_rx_cfg_data;
Keerthya00b95c2019-07-09 10:30:34 +0530328 int ret, i;
329
330 ret = power_domain_on(&common->pwrdmn);
331 if (ret) {
332 dev_err(dev, "power_domain_on() failed %d\n", ret);
333 goto out;
334 }
335
336 ret = clk_enable(&common->fclk);
337 if (ret) {
338 dev_err(dev, "clk enabled failed %d\n", ret);
339 goto err_off_pwrdm;
340 }
341
342 common->rx_next = 0;
343 common->rx_pend = 0;
344 ret = dma_get_by_name(common->dev, "tx0", &common->dma_tx);
345 if (ret) {
346 dev_err(dev, "TX dma get failed %d\n", ret);
347 goto err_off_clk;
348 }
349 ret = dma_get_by_name(common->dev, "rx", &common->dma_rx);
350 if (ret) {
351 dev_err(dev, "RX dma get failed %d\n", ret);
352 goto err_free_tx;
353 }
354
355 for (i = 0; i < UDMA_RX_DESC_NUM; i++) {
356 ret = dma_prepare_rcv_buf(&common->dma_rx,
357 net_rx_packets[i],
358 UDMA_RX_BUF_SIZE);
359 if (ret) {
360 dev_err(dev, "RX dma add buf failed %d\n", ret);
361 goto err_free_tx;
362 }
363 }
364
365 ret = dma_enable(&common->dma_tx);
366 if (ret) {
367 dev_err(dev, "TX dma_enable failed %d\n", ret);
368 goto err_free_rx;
369 }
370 ret = dma_enable(&common->dma_rx);
371 if (ret) {
372 dev_err(dev, "RX dma_enable failed %d\n", ret);
373 goto err_dis_tx;
374 }
375
376 /* Control register */
377 writel(AM65_CPSW_CTL_REG_P0_ENABLE |
378 AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE |
379 AM65_CPSW_CTL_REG_P0_RX_PAD,
380 common->cpsw_base + AM65_CPSW_CTL_REG);
381
382 /* disable priority elevation */
383 writel(0, common->cpsw_base + AM65_CPSW_PTYPE_REG);
384
385 /* enable statistics */
386 writel(BIT(0) | BIT(priv->port_id),
387 common->cpsw_base + AM65_CPSW_STAT_PORT_EN_REG);
388
389 /* Port 0 length register */
390 writel(PKTSIZE_ALIGN, port0->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
391
392 /* set base flow_id */
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530393 dma_get_cfg(&common->dma_rx, 0, (void **)&dma_rx_cfg_data);
394 writel(dma_rx_cfg_data->flow_id_base,
Keerthya00b95c2019-07-09 10:30:34 +0530395 port0->port_base + AM65_CPSW_P0_FLOW_ID_REG);
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530396 dev_info(dev, "K3 CPSW: rflow_id_base: %u\n",
397 dma_rx_cfg_data->flow_id_base);
Keerthya00b95c2019-07-09 10:30:34 +0530398
399 /* Reset and enable the ALE */
400 writel(AM65_CPSW_ALE_CTL_REG_ENABLE | AM65_CPSW_ALE_CTL_REG_RESET_TBL |
401 AM65_CPSW_ALE_CTL_REG_BYPASS,
402 common->ale_base + AM65_CPSW_ALE_CTL_REG);
403
404 /* port 0 put into forward mode */
405 writel(AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
406 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
407
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +0530408 writel(AM65_CPSW_ALE_DEFTHREAD_EN,
409 common->ale_base + AM65_CPSW_ALE_THREADMAPDEF_REG);
410
Keerthya00b95c2019-07-09 10:30:34 +0530411 /* PORT x configuration */
412
413 /* Port x Max length register */
414 writel(PKTSIZE_ALIGN, port->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
415
416 /* Port x set mac */
417 am65_cpsw_set_sl_mac(port, pdata->enetaddr);
418
419 /* Port x ALE: mac_only, Forwarding */
420 writel(AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY |
421 AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
422 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
423
424 port->mac_control = 0;
425 if (!am65_cpsw_macsl_reset(port)) {
426 dev_err(dev, "mac_sl reset failed\n");
427 ret = -EFAULT;
428 goto err_dis_rx;
429 }
430
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530431 if (priv->phydev->interface == PHY_INTERFACE_MODE_SGMII) {
432 writel(ADVERTISE_SGMII,
433 port->port_sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG);
434 writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
435 port->port_sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
436 }
437
Keerthya00b95c2019-07-09 10:30:34 +0530438 ret = phy_startup(priv->phydev);
439 if (ret) {
440 dev_err(dev, "phy_startup failed\n");
441 goto err_dis_rx;
442 }
443
444 ret = am65_cpsw_update_link(priv);
445 if (!ret) {
446 ret = -ENODEV;
447 goto err_phy_shutdown;
448 }
449
450 common->started = true;
451
452 return 0;
453
454err_phy_shutdown:
455 phy_shutdown(priv->phydev);
456err_dis_rx:
457 /* disable ports */
458 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
459 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
460 if (!am65_cpsw_macsl_wait_for_idle(port))
461 dev_err(dev, "mac_sl idle timeout\n");
462 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
463 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
464 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
465
466 dma_disable(&common->dma_rx);
467err_dis_tx:
468 dma_disable(&common->dma_tx);
469err_free_rx:
470 dma_free(&common->dma_rx);
471err_free_tx:
472 dma_free(&common->dma_tx);
473err_off_clk:
474 clk_disable(&common->fclk);
475err_off_pwrdm:
476 power_domain_off(&common->pwrdmn);
477out:
478 dev_err(dev, "%s end error\n", __func__);
479
480 return ret;
481}
482
483static int am65_cpsw_send(struct udevice *dev, void *packet, int length)
484{
485 struct am65_cpsw_priv *priv = dev_get_priv(dev);
486 struct am65_cpsw_common *common = priv->cpsw_common;
487 struct ti_udma_drv_packet_data packet_data;
488 int ret;
489
490 packet_data.pkt_type = AM65_CPSW_CPPI_PKT_TYPE;
491 packet_data.dest_tag = priv->port_id;
492 ret = dma_send(&common->dma_tx, packet, length, &packet_data);
493 if (ret) {
494 dev_err(dev, "TX dma_send failed %d\n", ret);
495 return ret;
496 }
497
498 return 0;
499}
500
501static int am65_cpsw_recv(struct udevice *dev, int flags, uchar **packetp)
502{
503 struct am65_cpsw_priv *priv = dev_get_priv(dev);
504 struct am65_cpsw_common *common = priv->cpsw_common;
505
506 /* try to receive a new packet */
507 return dma_receive(&common->dma_rx, (void **)packetp, NULL);
508}
509
510static int am65_cpsw_free_pkt(struct udevice *dev, uchar *packet, int length)
511{
512 struct am65_cpsw_priv *priv = dev_get_priv(dev);
513 struct am65_cpsw_common *common = priv->cpsw_common;
514 int ret;
515
516 if (length > 0) {
517 u32 pkt = common->rx_next % UDMA_RX_DESC_NUM;
518
519 ret = dma_prepare_rcv_buf(&common->dma_rx,
520 net_rx_packets[pkt],
521 UDMA_RX_BUF_SIZE);
522 if (ret)
523 dev_err(dev, "RX dma free_pkt failed %d\n", ret);
524 common->rx_next++;
525 }
526
527 return 0;
528}
529
530static void am65_cpsw_stop(struct udevice *dev)
531{
532 struct am65_cpsw_priv *priv = dev_get_priv(dev);
533 struct am65_cpsw_common *common = priv->cpsw_common;
534 struct am65_cpsw_port *port = &common->ports[priv->port_id];
535
536 if (!common->started)
537 return;
538
539 phy_shutdown(priv->phydev);
540
541 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
542 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
543 if (!am65_cpsw_macsl_wait_for_idle(port))
544 dev_err(dev, "mac_sl idle timeout\n");
545 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
546 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
547 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
548
549 dma_disable(&common->dma_tx);
550 dma_free(&common->dma_tx);
551
552 dma_disable(&common->dma_rx);
553 dma_free(&common->dma_rx);
554
555 common->started = false;
556}
557
Roger Quadroscb8f8ad2023-07-22 22:31:48 +0300558static int am65_cpsw_am654_get_efuse_macid(struct udevice *dev,
559 int slave, u8 *mac_addr)
560{
561 u32 mac_lo, mac_hi, offset;
562 struct regmap *syscon;
563 int ret;
564
565 syscon = syscon_regmap_lookup_by_phandle(dev, "ti,syscon-efuse");
566 if (IS_ERR(syscon)) {
567 if (PTR_ERR(syscon) == -ENODEV)
568 return 0;
569 return PTR_ERR(syscon);
570 }
571
572 ret = dev_read_u32_index(dev, "ti,syscon-efuse", 1, &offset);
573 if (ret)
574 return ret;
575
576 regmap_read(syscon, offset, &mac_lo);
577 regmap_read(syscon, offset + 4, &mac_hi);
578
579 mac_addr[0] = (mac_hi >> 8) & 0xff;
580 mac_addr[1] = mac_hi & 0xff;
581 mac_addr[2] = (mac_lo >> 24) & 0xff;
582 mac_addr[3] = (mac_lo >> 16) & 0xff;
583 mac_addr[4] = (mac_lo >> 8) & 0xff;
584 mac_addr[5] = mac_lo & 0xff;
585
586 return 0;
587}
588
Keerthya00b95c2019-07-09 10:30:34 +0530589static int am65_cpsw_read_rom_hwaddr(struct udevice *dev)
590{
591 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700592 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530593
Roger Quadroscb8f8ad2023-07-22 22:31:48 +0300594 am65_cpsw_am654_get_efuse_macid(dev,
595 priv->port_id,
596 pdata->enetaddr);
Keerthya00b95c2019-07-09 10:30:34 +0530597
598 return 0;
599}
600
601static const struct eth_ops am65_cpsw_ops = {
602 .start = am65_cpsw_start,
603 .send = am65_cpsw_send,
604 .recv = am65_cpsw_recv,
605 .free_pkt = am65_cpsw_free_pkt,
606 .stop = am65_cpsw_stop,
607 .read_rom_hwaddr = am65_cpsw_read_rom_hwaddr,
608};
609
Keerthya00b95c2019-07-09 10:30:34 +0530610static int am65_cpsw_phy_init(struct udevice *dev)
611{
612 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700613 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530614 struct phy_device *phydev;
615 u32 supported = PHY_GBIT_FEATURES;
616 int ret;
617
Roger Quadrosfced6b62024-02-28 12:35:27 +0200618 phydev = dm_eth_phy_connect(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530619 if (!phydev) {
620 dev_err(dev, "phy_connect() failed\n");
621 return -ENODEV;
622 }
623
624 phydev->supported &= supported;
625 if (pdata->max_speed) {
626 ret = phy_set_supported(phydev, pdata->max_speed);
627 if (ret)
628 return ret;
629 }
630 phydev->advertising = phydev->supported;
631
Keerthya00b95c2019-07-09 10:30:34 +0530632 priv->phydev = phydev;
633 ret = phy_config(phydev);
634 if (ret < 0)
Roger Quadrosfced6b62024-02-28 12:35:27 +0200635 dev_err(dev, "phy_config() failed: %d", ret);
Keerthya00b95c2019-07-09 10:30:34 +0530636
637 return ret;
638}
639
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530640static int am65_cpsw_ofdata_parse_phy(struct udevice *dev)
Keerthya00b95c2019-07-09 10:30:34 +0530641{
Simon Glassfa20e932020-12-03 16:55:20 -0700642 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530643 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530644
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530645 dev_read_u32(dev, "reg", &priv->port_id);
646
Marek Behúnbc194772022-04-07 00:33:01 +0200647 pdata->phy_interface = dev_read_phy_mode(dev);
Marek Behún48631e42022-04-07 00:33:03 +0200648 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
Marek Behúnbc194772022-04-07 00:33:01 +0200649 dev_err(dev, "Invalid PHY mode, port %u\n", priv->port_id);
650 return -EINVAL;
Keerthya00b95c2019-07-09 10:30:34 +0530651 }
652
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530653 dev_read_u32(dev, "max-speed", (u32 *)&pdata->max_speed);
Keerthya00b95c2019-07-09 10:30:34 +0530654 if (pdata->max_speed)
655 dev_err(dev, "Port %u speed froced to %uMbit\n",
656 priv->port_id, pdata->max_speed);
657
Roger Quadrosfced6b62024-02-28 12:35:27 +0200658 return 0;
Keerthya00b95c2019-07-09 10:30:34 +0530659}
660
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530661static int am65_cpsw_port_probe(struct udevice *dev)
Keerthya00b95c2019-07-09 10:30:34 +0530662{
663 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700664 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530665 struct am65_cpsw_common *cpsw_common;
Michael Walle2e9cacb2024-04-03 16:31:55 +0200666 char portname[32];
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530667 int ret;
Keerthya00b95c2019-07-09 10:30:34 +0530668
669 priv->dev = dev;
670
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530671 cpsw_common = dev_get_priv(dev->parent);
Keerthya00b95c2019-07-09 10:30:34 +0530672 priv->cpsw_common = cpsw_common;
673
Michael Walle2e9cacb2024-04-03 16:31:55 +0200674 snprintf(portname, sizeof(portname), "%s%s", dev->parent->name, dev->name);
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530675 device_set_name(dev, portname);
676
677 ret = am65_cpsw_ofdata_parse_phy(dev);
678 if (ret)
679 goto out;
680
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300681 ret = am65_cpsw_gmii_sel_k3(priv, pdata->phy_interface);
682 if (ret)
683 goto out;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530684
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530685 ret = am65_cpsw_phy_init(dev);
Roger Quadrosfced6b62024-02-28 12:35:27 +0200686
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530687out:
688 return ret;
689}
690
691static int am65_cpsw_probe_nuss(struct udevice *dev)
692{
693 struct am65_cpsw_common *cpsw_common = dev_get_priv(dev);
Roger Quadrosfced6b62024-02-28 12:35:27 +0200694 ofnode ports_np, node;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530695 int ret, i;
696 struct udevice *port_dev;
697
Keerthya00b95c2019-07-09 10:30:34 +0530698 cpsw_common->dev = dev;
699 cpsw_common->ss_base = dev_read_addr(dev);
700 if (cpsw_common->ss_base == FDT_ADDR_T_NONE)
701 return -EINVAL;
Keerthya00b95c2019-07-09 10:30:34 +0530702
703 ret = power_domain_get_by_index(dev, &cpsw_common->pwrdmn, 0);
704 if (ret) {
705 dev_err(dev, "failed to get pwrdmn: %d\n", ret);
706 return ret;
707 }
708
709 ret = clk_get_by_name(dev, "fck", &cpsw_common->fclk);
710 if (ret) {
711 power_domain_free(&cpsw_common->pwrdmn);
712 dev_err(dev, "failed to get clock %d\n", ret);
713 return ret;
714 }
715
716 cpsw_common->cpsw_base = cpsw_common->ss_base + AM65_CPSW_CPSW_NU_BASE;
717 cpsw_common->ale_base = cpsw_common->cpsw_base +
718 AM65_CPSW_CPSW_NU_ALE_BASE;
Suman Anna18e40be2023-08-02 13:47:26 +0530719
Vignesh Raghavendra2b834d02020-07-06 13:36:54 +0530720 ports_np = dev_read_subnode(dev, "ethernet-ports");
Keerthya00b95c2019-07-09 10:30:34 +0530721 if (!ofnode_valid(ports_np)) {
722 ret = -ENOENT;
723 goto out;
724 }
725
726 ofnode_for_each_subnode(node, ports_np) {
727 const char *node_name;
728 u32 port_id;
729 bool disabled;
730
731 node_name = ofnode_get_name(node);
732
Simon Glass2e4938b2022-09-06 20:27:17 -0600733 disabled = !ofnode_is_enabled(node);
Keerthya00b95c2019-07-09 10:30:34 +0530734
735 ret = ofnode_read_u32(node, "reg", &port_id);
736 if (ret) {
737 dev_err(dev, "%s: failed to get port_id (%d)\n",
738 node_name, ret);
739 goto out;
740 }
741
742 if (port_id >= AM65_CPSW_CPSWNU_MAX_PORTS) {
743 dev_err(dev, "%s: invalid port_id (%d)\n",
744 node_name, port_id);
745 ret = -EINVAL;
746 goto out;
747 }
748 cpsw_common->port_num++;
749
750 if (!port_id)
751 continue;
752
Keerthya00b95c2019-07-09 10:30:34 +0530753 cpsw_common->ports[port_id].disabled = disabled;
754 if (disabled)
755 continue;
756
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530757 ret = device_bind_driver_to_node(dev, "am65_cpsw_nuss_port", ofnode_get_name(node), node, &port_dev);
Keerthya00b95c2019-07-09 10:30:34 +0530758 if (ret)
Vignesh Raghavendrad26ac2e2022-01-21 12:47:51 +0530759 dev_err(dev, "Failed to bind to %s node\n", ofnode_get_name(node));
Keerthya00b95c2019-07-09 10:30:34 +0530760 }
761
762 for (i = 0; i < AM65_CPSW_CPSWNU_MAX_PORTS; i++) {
763 struct am65_cpsw_port *port = &cpsw_common->ports[i];
764
765 port->port_base = cpsw_common->cpsw_base +
766 AM65_CPSW_CPSW_NU_PORTS_OFFSET +
767 (i * AM65_CPSW_CPSW_NU_PORTS_OFFSET);
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530768 port->port_sgmii_base = cpsw_common->ss_base +
769 (i * AM65_CPSW_SGMII_BASE);
Keerthya00b95c2019-07-09 10:30:34 +0530770 port->macsl_base = port->port_base +
771 AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET;
772 }
773
Keerthya00b95c2019-07-09 10:30:34 +0530774 cpsw_common->bus_freq =
775 dev_read_u32_default(dev, "bus_freq",
776 AM65_CPSW_MDIO_BUS_FREQ_DEF);
777
Roger Quadrosfced6b62024-02-28 12:35:27 +0200778 dev_info(dev, "K3 CPSW: nuss_ver: 0x%08X cpsw_ver: 0x%08X ale_ver: 0x%08X Ports:%u\n",
Keerthya00b95c2019-07-09 10:30:34 +0530779 readl(cpsw_common->ss_base),
780 readl(cpsw_common->cpsw_base),
781 readl(cpsw_common->ale_base),
Roger Quadrosfced6b62024-02-28 12:35:27 +0200782 cpsw_common->port_num);
Keerthya00b95c2019-07-09 10:30:34 +0530783
784out:
Keerthya00b95c2019-07-09 10:30:34 +0530785 power_domain_free(&cpsw_common->pwrdmn);
786 return ret;
787}
788
789static const struct udevice_id am65_cpsw_nuss_ids[] = {
790 { .compatible = "ti,am654-cpsw-nuss" },
Vignesh Raghavendra30bc6ea2019-12-04 22:17:23 +0530791 { .compatible = "ti,j721e-cpsw-nuss" },
Vignesh Raghavendra1cc35622021-05-10 20:06:11 +0530792 { .compatible = "ti,am642-cpsw-nuss" },
Keerthya00b95c2019-07-09 10:30:34 +0530793 { }
794};
795
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530796U_BOOT_DRIVER(am65_cpsw_nuss) = {
797 .name = "am65_cpsw_nuss",
798 .id = UCLASS_MISC,
Keerthya00b95c2019-07-09 10:30:34 +0530799 .of_match = am65_cpsw_nuss_ids,
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530800 .probe = am65_cpsw_probe_nuss,
801 .priv_auto = sizeof(struct am65_cpsw_common),
802};
803
804U_BOOT_DRIVER(am65_cpsw_nuss_port) = {
805 .name = "am65_cpsw_nuss_port",
806 .id = UCLASS_ETH,
807 .probe = am65_cpsw_port_probe,
Keerthya00b95c2019-07-09 10:30:34 +0530808 .ops = &am65_cpsw_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700809 .priv_auto = sizeof(struct am65_cpsw_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700810 .plat_auto = sizeof(struct eth_pdata),
Vignesh Raghavendra198bbb12022-01-28 11:21:19 +0530811 .flags = DM_FLAG_ALLOC_PRIV_DMA | DM_FLAG_OS_PREPARE,
Keerthya00b95c2019-07-09 10:30:34 +0530812};