blob: c70b42f6bcc249ff9124c2952a38311f32ad19f9 [file] [log] [blame]
Keerthya00b95c2019-07-09 10:30:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
4 *
5 * Copyright (C) 2019, Texas Instruments, Incorporated
6 *
7 */
8
Simon Glass9bc15642020-02-03 07:36:16 -07009#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060010#include <asm/cache.h>
Suman Anna18e40be2023-08-02 13:47:26 +053011#include <asm/gpio.h>
Keerthya00b95c2019-07-09 10:30:34 +053012#include <asm/io.h>
13#include <asm/processor.h>
14#include <clk.h>
15#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070016#include <dm/device_compat.h>
Keerthya00b95c2019-07-09 10:30:34 +053017#include <dm/lists.h>
Maxime Ripard028849d2023-07-24 15:57:30 +020018#include <dm/pinctrl.h>
Keerthya00b95c2019-07-09 10:30:34 +053019#include <dma-uclass.h>
20#include <dm/of_access.h>
21#include <miiphy.h>
22#include <net.h>
23#include <phy.h>
24#include <power-domain.h>
Roger Quadroscb8f8ad2023-07-22 22:31:48 +030025#include <regmap.h>
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +053026#include <soc.h>
Roger Quadroscb8f8ad2023-07-22 22:31:48 +030027#include <syscon.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060028#include <linux/bitops.h>
Suman Anna18e40be2023-08-02 13:47:26 +053029#include <linux/delay.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060030#include <linux/printk.h>
Keerthya00b95c2019-07-09 10:30:34 +053031#include <linux/soc/ti/ti-udma.h>
32
Vignesh Raghavendrac5a66132021-05-10 20:06:09 +053033#define AM65_CPSW_CPSWNU_MAX_PORTS 9
Keerthya00b95c2019-07-09 10:30:34 +053034
35#define AM65_CPSW_SS_BASE 0x0
36#define AM65_CPSW_SGMII_BASE 0x100
37#define AM65_CPSW_MDIO_BASE 0xf00
38#define AM65_CPSW_XGMII_BASE 0x2100
39#define AM65_CPSW_CPSW_NU_BASE 0x20000
40#define AM65_CPSW_CPSW_NU_ALE_BASE 0x1e000
41
42#define AM65_CPSW_CPSW_NU_PORTS_OFFSET 0x1000
43#define AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET 0x330
44
45#define AM65_CPSW_MDIO_BUS_FREQ_DEF 1000000
46
47#define AM65_CPSW_CTL_REG 0x4
48#define AM65_CPSW_STAT_PORT_EN_REG 0x14
49#define AM65_CPSW_PTYPE_REG 0x18
50
51#define AM65_CPSW_CTL_REG_P0_ENABLE BIT(2)
52#define AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE BIT(13)
53#define AM65_CPSW_CTL_REG_P0_RX_PAD BIT(14)
54
55#define AM65_CPSW_P0_FLOW_ID_REG 0x8
56#define AM65_CPSW_PN_RX_MAXLEN_REG 0x24
57#define AM65_CPSW_PN_REG_SA_L 0x308
58#define AM65_CPSW_PN_REG_SA_H 0x30c
59
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +053060#define AM65_CPSW_SGMII_CONTROL_REG 0x010
61#define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018
62#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
63
64#define ADVERTISE_SGMII 0x1
65
Keerthya00b95c2019-07-09 10:30:34 +053066#define AM65_CPSW_ALE_CTL_REG 0x8
67#define AM65_CPSW_ALE_CTL_REG_ENABLE BIT(31)
68#define AM65_CPSW_ALE_CTL_REG_RESET_TBL BIT(30)
69#define AM65_CPSW_ALE_CTL_REG_BYPASS BIT(4)
70#define AM65_CPSW_ALE_PN_CTL_REG(x) (0x40 + (x) * 4)
71#define AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD 0x3
72#define AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY BIT(11)
73
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +053074#define AM65_CPSW_ALE_THREADMAPDEF_REG 0x134
75#define AM65_CPSW_ALE_DEFTHREAD_EN BIT(15)
76
Keerthya00b95c2019-07-09 10:30:34 +053077#define AM65_CPSW_MACSL_CTL_REG 0x0
78#define AM65_CPSW_MACSL_CTL_REG_IFCTL_A BIT(15)
Murali Karicheri6565e902020-04-17 11:12:09 -040079#define AM65_CPSW_MACSL_CTL_EXT_EN BIT(18)
Keerthya00b95c2019-07-09 10:30:34 +053080#define AM65_CPSW_MACSL_CTL_REG_GIG BIT(7)
81#define AM65_CPSW_MACSL_CTL_REG_GMII_EN BIT(5)
82#define AM65_CPSW_MACSL_CTL_REG_LOOPBACK BIT(1)
83#define AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX BIT(0)
84#define AM65_CPSW_MACSL_RESET_REG 0x8
85#define AM65_CPSW_MACSL_RESET_REG_RESET BIT(0)
86#define AM65_CPSW_MACSL_STATUS_REG 0x4
87#define AM65_CPSW_MACSL_RESET_REG_PN_IDLE BIT(31)
88#define AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE BIT(30)
89#define AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE BIT(29)
90#define AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE BIT(28)
91#define AM65_CPSW_MACSL_RESET_REG_IDLE_MASK \
92 (AM65_CPSW_MACSL_RESET_REG_PN_IDLE | \
93 AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE | \
94 AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE | \
95 AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE)
96
97#define AM65_CPSW_CPPI_PKT_TYPE 0x7
98
Suman Anna18e40be2023-08-02 13:47:26 +053099#define DEFAULT_GPIO_RESET_DELAY 10
100
Keerthya00b95c2019-07-09 10:30:34 +0530101struct am65_cpsw_port {
102 fdt_addr_t port_base;
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530103 fdt_addr_t port_sgmii_base;
Keerthya00b95c2019-07-09 10:30:34 +0530104 fdt_addr_t macsl_base;
105 bool disabled;
106 u32 mac_control;
107};
108
109struct am65_cpsw_common {
110 struct udevice *dev;
111 fdt_addr_t ss_base;
112 fdt_addr_t cpsw_base;
Keerthya00b95c2019-07-09 10:30:34 +0530113 fdt_addr_t ale_base;
Keerthya00b95c2019-07-09 10:30:34 +0530114
115 struct clk fclk;
116 struct power_domain pwrdmn;
117
118 u32 port_num;
119 struct am65_cpsw_port ports[AM65_CPSW_CPSWNU_MAX_PORTS];
Keerthya00b95c2019-07-09 10:30:34 +0530120
Keerthya00b95c2019-07-09 10:30:34 +0530121 u32 bus_freq;
122
123 struct dma dma_tx;
124 struct dma dma_rx;
125 u32 rx_next;
126 u32 rx_pend;
127 bool started;
128};
129
130struct am65_cpsw_priv {
131 struct udevice *dev;
132 struct am65_cpsw_common *cpsw_common;
133 u32 port_id;
Keerthya00b95c2019-07-09 10:30:34 +0530134 struct phy_device *phydev;
Keerthya00b95c2019-07-09 10:30:34 +0530135};
136
137#ifdef PKTSIZE_ALIGN
138#define UDMA_RX_BUF_SIZE PKTSIZE_ALIGN
139#else
140#define UDMA_RX_BUF_SIZE ALIGN(1522, ARCH_DMA_MINALIGN)
141#endif
142
143#ifdef PKTBUFSRX
144#define UDMA_RX_DESC_NUM PKTBUFSRX
145#else
146#define UDMA_RX_DESC_NUM 4
147#endif
148
149#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
150 ((mac)[2] << 16) | ((mac)[3] << 24))
151#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
152
153static void am65_cpsw_set_sl_mac(struct am65_cpsw_port *slave,
154 unsigned char *addr)
155{
156 writel(mac_hi(addr),
157 slave->port_base + AM65_CPSW_PN_REG_SA_H);
158 writel(mac_lo(addr),
159 slave->port_base + AM65_CPSW_PN_REG_SA_L);
160}
161
162int am65_cpsw_macsl_reset(struct am65_cpsw_port *slave)
163{
164 u32 i = 100;
165
166 /* Set the soft reset bit */
167 writel(AM65_CPSW_MACSL_RESET_REG_RESET,
168 slave->macsl_base + AM65_CPSW_MACSL_RESET_REG);
169
170 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_RESET_REG) &
171 AM65_CPSW_MACSL_RESET_REG_RESET) && i--)
172 cpu_relax();
173
174 /* Timeout on the reset */
175 return i;
176}
177
178static int am65_cpsw_macsl_wait_for_idle(struct am65_cpsw_port *slave)
179{
180 u32 i = 100;
181
182 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_STATUS_REG) &
183 AM65_CPSW_MACSL_RESET_REG_IDLE_MASK) && i--)
184 cpu_relax();
185
186 return i;
187}
188
189static int am65_cpsw_update_link(struct am65_cpsw_priv *priv)
190{
191 struct am65_cpsw_common *common = priv->cpsw_common;
192 struct am65_cpsw_port *port = &common->ports[priv->port_id];
193 struct phy_device *phy = priv->phydev;
194 u32 mac_control = 0;
195
196 if (phy->link) { /* link up */
197 mac_control = /*AM65_CPSW_MACSL_CTL_REG_LOOPBACK |*/
198 AM65_CPSW_MACSL_CTL_REG_GMII_EN;
199 if (phy->speed == 1000)
200 mac_control |= AM65_CPSW_MACSL_CTL_REG_GIG;
Murali Karicheri6565e902020-04-17 11:12:09 -0400201 if (phy->speed == 10 && phy_interface_is_rgmii(phy))
202 /* Can be used with in band mode only */
203 mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
Keerthya00b95c2019-07-09 10:30:34 +0530204 if (phy->duplex == DUPLEX_FULL)
205 mac_control |= AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX;
206 if (phy->speed == 100)
207 mac_control |= AM65_CPSW_MACSL_CTL_REG_IFCTL_A;
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530208 if (phy->interface == PHY_INTERFACE_MODE_SGMII)
209 mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
Keerthya00b95c2019-07-09 10:30:34 +0530210 }
211
212 if (mac_control == port->mac_control)
213 goto out;
214
215 if (mac_control) {
216 printf("link up on port %d, speed %d, %s duplex\n",
217 priv->port_id, phy->speed,
218 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
219 } else {
220 printf("link down on port %d\n", priv->port_id);
221 }
222
223 writel(mac_control, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
224 port->mac_control = mac_control;
225
226out:
227 return phy->link;
228}
229
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500230#define AM65_GMII_SEL_PORT_OFFS(x) (0x4 * ((x) - 1))
231
Keerthya00b95c2019-07-09 10:30:34 +0530232#define AM65_GMII_SEL_MODE_MII 0
233#define AM65_GMII_SEL_MODE_RMII 1
234#define AM65_GMII_SEL_MODE_RGMII 2
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530235#define AM65_GMII_SEL_MODE_SGMII 3
Keerthya00b95c2019-07-09 10:30:34 +0530236
237#define AM65_GMII_SEL_RGMII_IDMODE BIT(4)
238
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300239static int am65_cpsw_gmii_sel_k3(struct am65_cpsw_priv *priv,
240 phy_interface_t phy_mode)
Keerthya00b95c2019-07-09 10:30:34 +0530241{
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300242 struct udevice *dev = priv->dev;
243 u32 offset, reg, phandle;
Keerthya00b95c2019-07-09 10:30:34 +0530244 bool rgmii_id = false;
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300245 fdt_addr_t gmii_sel;
246 u32 mode = 0;
247 ofnode node;
248 int ret;
249
250 ret = ofnode_read_u32(dev_ofnode(dev), "phys", &phandle);
251 if (ret)
252 return ret;
253
254 ret = ofnode_read_u32_index(dev_ofnode(dev), "phys", 1, &offset);
255 if (ret)
256 return ret;
257
258 node = ofnode_get_by_phandle(phandle);
259 if (!ofnode_valid(node))
260 return -ENODEV;
261
262 gmii_sel = ofnode_get_addr(node);
263 if (gmii_sel == FDT_ADDR_T_NONE)
264 return -ENODEV;
Keerthya00b95c2019-07-09 10:30:34 +0530265
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300266 gmii_sel += AM65_GMII_SEL_PORT_OFFS(offset);
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500267 reg = readl(gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530268
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300269 dev_dbg(dev, "old gmii_sel: %08x\n", reg);
Keerthya00b95c2019-07-09 10:30:34 +0530270
271 switch (phy_mode) {
272 case PHY_INTERFACE_MODE_RMII:
273 mode = AM65_GMII_SEL_MODE_RMII;
274 break;
275
276 case PHY_INTERFACE_MODE_RGMII:
Grygorii Strashkobf45d9b2019-09-19 11:16:41 +0300277 case PHY_INTERFACE_MODE_RGMII_RXID:
Keerthya00b95c2019-07-09 10:30:34 +0530278 mode = AM65_GMII_SEL_MODE_RGMII;
279 break;
280
281 case PHY_INTERFACE_MODE_RGMII_ID:
Keerthya00b95c2019-07-09 10:30:34 +0530282 case PHY_INTERFACE_MODE_RGMII_TXID:
283 mode = AM65_GMII_SEL_MODE_RGMII;
284 rgmii_id = true;
285 break;
286
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530287 case PHY_INTERFACE_MODE_SGMII:
288 mode = AM65_GMII_SEL_MODE_SGMII;
289 break;
290
Keerthya00b95c2019-07-09 10:30:34 +0530291 default:
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300292 dev_warn(dev,
Keerthya00b95c2019-07-09 10:30:34 +0530293 "Unsupported PHY mode: %u. Defaulting to MII.\n",
294 phy_mode);
295 /* fallthrough */
296 case PHY_INTERFACE_MODE_MII:
297 mode = AM65_GMII_SEL_MODE_MII;
298 break;
299 };
300
301 if (rgmii_id)
302 mode |= AM65_GMII_SEL_RGMII_IDMODE;
303
304 reg = mode;
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300305 dev_dbg(dev, "gmii_sel PHY mode: %u, new gmii_sel: %08x\n",
Keerthya00b95c2019-07-09 10:30:34 +0530306 phy_mode, reg);
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500307 writel(reg, gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530308
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500309 reg = readl(gmii_sel);
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300310 if (reg != mode) {
311 dev_err(dev,
Keerthya00b95c2019-07-09 10:30:34 +0530312 "gmii_sel PHY mode NOT SET!: requested: %08x, gmii_sel: %08x\n",
313 mode, reg);
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300314 return 0;
315 }
316
317 return 0;
Keerthya00b95c2019-07-09 10:30:34 +0530318}
319
320static int am65_cpsw_start(struct udevice *dev)
321{
Simon Glassfa20e932020-12-03 16:55:20 -0700322 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530323 struct am65_cpsw_priv *priv = dev_get_priv(dev);
324 struct am65_cpsw_common *common = priv->cpsw_common;
325 struct am65_cpsw_port *port = &common->ports[priv->port_id];
326 struct am65_cpsw_port *port0 = &common->ports[0];
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530327 struct ti_udma_drv_chan_cfg_data *dma_rx_cfg_data;
Keerthya00b95c2019-07-09 10:30:34 +0530328 int ret, i;
329
Matthias Schifferac70b872024-04-26 10:02:25 +0200330 if (common->started)
331 return 0;
332
Keerthya00b95c2019-07-09 10:30:34 +0530333 ret = power_domain_on(&common->pwrdmn);
334 if (ret) {
335 dev_err(dev, "power_domain_on() failed %d\n", ret);
336 goto out;
337 }
338
339 ret = clk_enable(&common->fclk);
340 if (ret) {
341 dev_err(dev, "clk enabled failed %d\n", ret);
342 goto err_off_pwrdm;
343 }
344
345 common->rx_next = 0;
346 common->rx_pend = 0;
347 ret = dma_get_by_name(common->dev, "tx0", &common->dma_tx);
348 if (ret) {
349 dev_err(dev, "TX dma get failed %d\n", ret);
350 goto err_off_clk;
351 }
352 ret = dma_get_by_name(common->dev, "rx", &common->dma_rx);
353 if (ret) {
354 dev_err(dev, "RX dma get failed %d\n", ret);
355 goto err_free_tx;
356 }
357
358 for (i = 0; i < UDMA_RX_DESC_NUM; i++) {
359 ret = dma_prepare_rcv_buf(&common->dma_rx,
360 net_rx_packets[i],
361 UDMA_RX_BUF_SIZE);
362 if (ret) {
363 dev_err(dev, "RX dma add buf failed %d\n", ret);
Matthias Schiffer9ee004a2024-04-26 10:02:26 +0200364 goto err_free_rx;
Keerthya00b95c2019-07-09 10:30:34 +0530365 }
366 }
367
368 ret = dma_enable(&common->dma_tx);
369 if (ret) {
370 dev_err(dev, "TX dma_enable failed %d\n", ret);
371 goto err_free_rx;
372 }
373 ret = dma_enable(&common->dma_rx);
374 if (ret) {
375 dev_err(dev, "RX dma_enable failed %d\n", ret);
376 goto err_dis_tx;
377 }
378
379 /* Control register */
380 writel(AM65_CPSW_CTL_REG_P0_ENABLE |
381 AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE |
382 AM65_CPSW_CTL_REG_P0_RX_PAD,
383 common->cpsw_base + AM65_CPSW_CTL_REG);
384
385 /* disable priority elevation */
386 writel(0, common->cpsw_base + AM65_CPSW_PTYPE_REG);
387
388 /* enable statistics */
389 writel(BIT(0) | BIT(priv->port_id),
390 common->cpsw_base + AM65_CPSW_STAT_PORT_EN_REG);
391
392 /* Port 0 length register */
393 writel(PKTSIZE_ALIGN, port0->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
394
395 /* set base flow_id */
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530396 dma_get_cfg(&common->dma_rx, 0, (void **)&dma_rx_cfg_data);
397 writel(dma_rx_cfg_data->flow_id_base,
Keerthya00b95c2019-07-09 10:30:34 +0530398 port0->port_base + AM65_CPSW_P0_FLOW_ID_REG);
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530399 dev_info(dev, "K3 CPSW: rflow_id_base: %u\n",
400 dma_rx_cfg_data->flow_id_base);
Keerthya00b95c2019-07-09 10:30:34 +0530401
402 /* Reset and enable the ALE */
403 writel(AM65_CPSW_ALE_CTL_REG_ENABLE | AM65_CPSW_ALE_CTL_REG_RESET_TBL |
404 AM65_CPSW_ALE_CTL_REG_BYPASS,
405 common->ale_base + AM65_CPSW_ALE_CTL_REG);
406
407 /* port 0 put into forward mode */
408 writel(AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
409 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
410
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +0530411 writel(AM65_CPSW_ALE_DEFTHREAD_EN,
412 common->ale_base + AM65_CPSW_ALE_THREADMAPDEF_REG);
413
Keerthya00b95c2019-07-09 10:30:34 +0530414 /* PORT x configuration */
415
416 /* Port x Max length register */
417 writel(PKTSIZE_ALIGN, port->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
418
419 /* Port x set mac */
420 am65_cpsw_set_sl_mac(port, pdata->enetaddr);
421
422 /* Port x ALE: mac_only, Forwarding */
423 writel(AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY |
424 AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
425 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
426
427 port->mac_control = 0;
428 if (!am65_cpsw_macsl_reset(port)) {
429 dev_err(dev, "mac_sl reset failed\n");
430 ret = -EFAULT;
431 goto err_dis_rx;
432 }
433
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530434 if (priv->phydev->interface == PHY_INTERFACE_MODE_SGMII) {
435 writel(ADVERTISE_SGMII,
436 port->port_sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG);
437 writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
438 port->port_sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
439 }
440
Keerthya00b95c2019-07-09 10:30:34 +0530441 ret = phy_startup(priv->phydev);
442 if (ret) {
443 dev_err(dev, "phy_startup failed\n");
444 goto err_dis_rx;
445 }
446
447 ret = am65_cpsw_update_link(priv);
448 if (!ret) {
449 ret = -ENODEV;
450 goto err_phy_shutdown;
451 }
452
453 common->started = true;
454
455 return 0;
456
457err_phy_shutdown:
458 phy_shutdown(priv->phydev);
459err_dis_rx:
460 /* disable ports */
461 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
462 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
463 if (!am65_cpsw_macsl_wait_for_idle(port))
464 dev_err(dev, "mac_sl idle timeout\n");
465 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
466 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
467 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
468
469 dma_disable(&common->dma_rx);
470err_dis_tx:
471 dma_disable(&common->dma_tx);
472err_free_rx:
473 dma_free(&common->dma_rx);
474err_free_tx:
475 dma_free(&common->dma_tx);
476err_off_clk:
477 clk_disable(&common->fclk);
478err_off_pwrdm:
479 power_domain_off(&common->pwrdmn);
480out:
481 dev_err(dev, "%s end error\n", __func__);
482
483 return ret;
484}
485
486static int am65_cpsw_send(struct udevice *dev, void *packet, int length)
487{
488 struct am65_cpsw_priv *priv = dev_get_priv(dev);
489 struct am65_cpsw_common *common = priv->cpsw_common;
490 struct ti_udma_drv_packet_data packet_data;
491 int ret;
492
Matthias Schifferac70b872024-04-26 10:02:25 +0200493 if (!common->started)
494 return -ENETDOWN;
495
Keerthya00b95c2019-07-09 10:30:34 +0530496 packet_data.pkt_type = AM65_CPSW_CPPI_PKT_TYPE;
497 packet_data.dest_tag = priv->port_id;
498 ret = dma_send(&common->dma_tx, packet, length, &packet_data);
499 if (ret) {
500 dev_err(dev, "TX dma_send failed %d\n", ret);
501 return ret;
502 }
503
504 return 0;
505}
506
507static int am65_cpsw_recv(struct udevice *dev, int flags, uchar **packetp)
508{
509 struct am65_cpsw_priv *priv = dev_get_priv(dev);
510 struct am65_cpsw_common *common = priv->cpsw_common;
511
Matthias Schifferac70b872024-04-26 10:02:25 +0200512 if (!common->started)
513 return -ENETDOWN;
514
Keerthya00b95c2019-07-09 10:30:34 +0530515 /* try to receive a new packet */
516 return dma_receive(&common->dma_rx, (void **)packetp, NULL);
517}
518
519static int am65_cpsw_free_pkt(struct udevice *dev, uchar *packet, int length)
520{
521 struct am65_cpsw_priv *priv = dev_get_priv(dev);
522 struct am65_cpsw_common *common = priv->cpsw_common;
523 int ret;
524
525 if (length > 0) {
526 u32 pkt = common->rx_next % UDMA_RX_DESC_NUM;
527
528 ret = dma_prepare_rcv_buf(&common->dma_rx,
529 net_rx_packets[pkt],
530 UDMA_RX_BUF_SIZE);
531 if (ret)
532 dev_err(dev, "RX dma free_pkt failed %d\n", ret);
533 common->rx_next++;
534 }
535
536 return 0;
537}
538
539static void am65_cpsw_stop(struct udevice *dev)
540{
541 struct am65_cpsw_priv *priv = dev_get_priv(dev);
542 struct am65_cpsw_common *common = priv->cpsw_common;
543 struct am65_cpsw_port *port = &common->ports[priv->port_id];
544
545 if (!common->started)
546 return;
547
548 phy_shutdown(priv->phydev);
549
550 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
551 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
552 if (!am65_cpsw_macsl_wait_for_idle(port))
553 dev_err(dev, "mac_sl idle timeout\n");
554 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
555 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
556 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
557
558 dma_disable(&common->dma_tx);
559 dma_free(&common->dma_tx);
560
561 dma_disable(&common->dma_rx);
562 dma_free(&common->dma_rx);
563
564 common->started = false;
565}
566
Roger Quadroscb8f8ad2023-07-22 22:31:48 +0300567static int am65_cpsw_am654_get_efuse_macid(struct udevice *dev,
568 int slave, u8 *mac_addr)
569{
570 u32 mac_lo, mac_hi, offset;
571 struct regmap *syscon;
572 int ret;
573
574 syscon = syscon_regmap_lookup_by_phandle(dev, "ti,syscon-efuse");
575 if (IS_ERR(syscon)) {
576 if (PTR_ERR(syscon) == -ENODEV)
577 return 0;
578 return PTR_ERR(syscon);
579 }
580
581 ret = dev_read_u32_index(dev, "ti,syscon-efuse", 1, &offset);
582 if (ret)
583 return ret;
584
585 regmap_read(syscon, offset, &mac_lo);
586 regmap_read(syscon, offset + 4, &mac_hi);
587
588 mac_addr[0] = (mac_hi >> 8) & 0xff;
589 mac_addr[1] = mac_hi & 0xff;
590 mac_addr[2] = (mac_lo >> 24) & 0xff;
591 mac_addr[3] = (mac_lo >> 16) & 0xff;
592 mac_addr[4] = (mac_lo >> 8) & 0xff;
593 mac_addr[5] = mac_lo & 0xff;
594
595 return 0;
596}
597
Keerthya00b95c2019-07-09 10:30:34 +0530598static int am65_cpsw_read_rom_hwaddr(struct udevice *dev)
599{
600 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700601 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530602
Roger Quadroscb8f8ad2023-07-22 22:31:48 +0300603 am65_cpsw_am654_get_efuse_macid(dev,
604 priv->port_id,
605 pdata->enetaddr);
Keerthya00b95c2019-07-09 10:30:34 +0530606
607 return 0;
608}
609
610static const struct eth_ops am65_cpsw_ops = {
611 .start = am65_cpsw_start,
612 .send = am65_cpsw_send,
613 .recv = am65_cpsw_recv,
614 .free_pkt = am65_cpsw_free_pkt,
615 .stop = am65_cpsw_stop,
616 .read_rom_hwaddr = am65_cpsw_read_rom_hwaddr,
617};
618
Keerthya00b95c2019-07-09 10:30:34 +0530619static int am65_cpsw_phy_init(struct udevice *dev)
620{
621 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700622 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530623 struct phy_device *phydev;
624 u32 supported = PHY_GBIT_FEATURES;
625 int ret;
626
Roger Quadrosfced6b62024-02-28 12:35:27 +0200627 phydev = dm_eth_phy_connect(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530628 if (!phydev) {
629 dev_err(dev, "phy_connect() failed\n");
630 return -ENODEV;
631 }
632
633 phydev->supported &= supported;
634 if (pdata->max_speed) {
635 ret = phy_set_supported(phydev, pdata->max_speed);
636 if (ret)
637 return ret;
638 }
639 phydev->advertising = phydev->supported;
640
Keerthya00b95c2019-07-09 10:30:34 +0530641 priv->phydev = phydev;
642 ret = phy_config(phydev);
643 if (ret < 0)
Roger Quadrosfced6b62024-02-28 12:35:27 +0200644 dev_err(dev, "phy_config() failed: %d", ret);
Keerthya00b95c2019-07-09 10:30:34 +0530645
646 return ret;
647}
648
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530649static int am65_cpsw_ofdata_parse_phy(struct udevice *dev)
Keerthya00b95c2019-07-09 10:30:34 +0530650{
Simon Glassfa20e932020-12-03 16:55:20 -0700651 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530652 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530653
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530654 dev_read_u32(dev, "reg", &priv->port_id);
655
Marek Behúnbc194772022-04-07 00:33:01 +0200656 pdata->phy_interface = dev_read_phy_mode(dev);
Marek Behún48631e42022-04-07 00:33:03 +0200657 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
Marek Behúnbc194772022-04-07 00:33:01 +0200658 dev_err(dev, "Invalid PHY mode, port %u\n", priv->port_id);
659 return -EINVAL;
Keerthya00b95c2019-07-09 10:30:34 +0530660 }
661
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530662 dev_read_u32(dev, "max-speed", (u32 *)&pdata->max_speed);
Keerthya00b95c2019-07-09 10:30:34 +0530663 if (pdata->max_speed)
664 dev_err(dev, "Port %u speed froced to %uMbit\n",
665 priv->port_id, pdata->max_speed);
666
Roger Quadrosfced6b62024-02-28 12:35:27 +0200667 return 0;
Keerthya00b95c2019-07-09 10:30:34 +0530668}
669
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530670static int am65_cpsw_port_probe(struct udevice *dev)
Keerthya00b95c2019-07-09 10:30:34 +0530671{
672 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700673 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530674 struct am65_cpsw_common *cpsw_common;
Michael Walle2e9cacb2024-04-03 16:31:55 +0200675 char portname[32];
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530676 int ret;
Keerthya00b95c2019-07-09 10:30:34 +0530677
678 priv->dev = dev;
679
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530680 cpsw_common = dev_get_priv(dev->parent);
Keerthya00b95c2019-07-09 10:30:34 +0530681 priv->cpsw_common = cpsw_common;
682
Michael Walle2e9cacb2024-04-03 16:31:55 +0200683 snprintf(portname, sizeof(portname), "%s%s", dev->parent->name, dev->name);
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530684 device_set_name(dev, portname);
685
686 ret = am65_cpsw_ofdata_parse_phy(dev);
687 if (ret)
688 goto out;
689
Roger Quadrosbe0619b2023-07-22 22:31:49 +0300690 ret = am65_cpsw_gmii_sel_k3(priv, pdata->phy_interface);
691 if (ret)
692 goto out;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530693
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530694 ret = am65_cpsw_phy_init(dev);
Roger Quadrosfced6b62024-02-28 12:35:27 +0200695
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530696out:
697 return ret;
698}
699
700static int am65_cpsw_probe_nuss(struct udevice *dev)
701{
702 struct am65_cpsw_common *cpsw_common = dev_get_priv(dev);
Roger Quadrosfced6b62024-02-28 12:35:27 +0200703 ofnode ports_np, node;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530704 int ret, i;
705 struct udevice *port_dev;
706
Keerthya00b95c2019-07-09 10:30:34 +0530707 cpsw_common->dev = dev;
708 cpsw_common->ss_base = dev_read_addr(dev);
709 if (cpsw_common->ss_base == FDT_ADDR_T_NONE)
710 return -EINVAL;
Keerthya00b95c2019-07-09 10:30:34 +0530711
712 ret = power_domain_get_by_index(dev, &cpsw_common->pwrdmn, 0);
713 if (ret) {
714 dev_err(dev, "failed to get pwrdmn: %d\n", ret);
715 return ret;
716 }
717
718 ret = clk_get_by_name(dev, "fck", &cpsw_common->fclk);
719 if (ret) {
720 power_domain_free(&cpsw_common->pwrdmn);
721 dev_err(dev, "failed to get clock %d\n", ret);
722 return ret;
723 }
724
725 cpsw_common->cpsw_base = cpsw_common->ss_base + AM65_CPSW_CPSW_NU_BASE;
726 cpsw_common->ale_base = cpsw_common->cpsw_base +
727 AM65_CPSW_CPSW_NU_ALE_BASE;
Suman Anna18e40be2023-08-02 13:47:26 +0530728
Vignesh Raghavendra2b834d02020-07-06 13:36:54 +0530729 ports_np = dev_read_subnode(dev, "ethernet-ports");
Keerthya00b95c2019-07-09 10:30:34 +0530730 if (!ofnode_valid(ports_np)) {
731 ret = -ENOENT;
732 goto out;
733 }
734
735 ofnode_for_each_subnode(node, ports_np) {
736 const char *node_name;
737 u32 port_id;
738 bool disabled;
739
740 node_name = ofnode_get_name(node);
741
Simon Glass2e4938b2022-09-06 20:27:17 -0600742 disabled = !ofnode_is_enabled(node);
Keerthya00b95c2019-07-09 10:30:34 +0530743
744 ret = ofnode_read_u32(node, "reg", &port_id);
745 if (ret) {
746 dev_err(dev, "%s: failed to get port_id (%d)\n",
747 node_name, ret);
748 goto out;
749 }
750
751 if (port_id >= AM65_CPSW_CPSWNU_MAX_PORTS) {
752 dev_err(dev, "%s: invalid port_id (%d)\n",
753 node_name, port_id);
754 ret = -EINVAL;
755 goto out;
756 }
757 cpsw_common->port_num++;
758
759 if (!port_id)
760 continue;
761
Keerthya00b95c2019-07-09 10:30:34 +0530762 cpsw_common->ports[port_id].disabled = disabled;
763 if (disabled)
764 continue;
765
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530766 ret = device_bind_driver_to_node(dev, "am65_cpsw_nuss_port", ofnode_get_name(node), node, &port_dev);
Keerthya00b95c2019-07-09 10:30:34 +0530767 if (ret)
Vignesh Raghavendrad26ac2e2022-01-21 12:47:51 +0530768 dev_err(dev, "Failed to bind to %s node\n", ofnode_get_name(node));
Keerthya00b95c2019-07-09 10:30:34 +0530769 }
770
771 for (i = 0; i < AM65_CPSW_CPSWNU_MAX_PORTS; i++) {
772 struct am65_cpsw_port *port = &cpsw_common->ports[i];
773
774 port->port_base = cpsw_common->cpsw_base +
775 AM65_CPSW_CPSW_NU_PORTS_OFFSET +
776 (i * AM65_CPSW_CPSW_NU_PORTS_OFFSET);
Siddharth Vadapalli726fc0a2023-08-02 13:47:25 +0530777 port->port_sgmii_base = cpsw_common->ss_base +
778 (i * AM65_CPSW_SGMII_BASE);
Keerthya00b95c2019-07-09 10:30:34 +0530779 port->macsl_base = port->port_base +
780 AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET;
781 }
782
Keerthya00b95c2019-07-09 10:30:34 +0530783 cpsw_common->bus_freq =
784 dev_read_u32_default(dev, "bus_freq",
785 AM65_CPSW_MDIO_BUS_FREQ_DEF);
786
Roger Quadrosfced6b62024-02-28 12:35:27 +0200787 dev_info(dev, "K3 CPSW: nuss_ver: 0x%08X cpsw_ver: 0x%08X ale_ver: 0x%08X Ports:%u\n",
Keerthya00b95c2019-07-09 10:30:34 +0530788 readl(cpsw_common->ss_base),
789 readl(cpsw_common->cpsw_base),
790 readl(cpsw_common->ale_base),
Roger Quadrosfced6b62024-02-28 12:35:27 +0200791 cpsw_common->port_num);
Keerthya00b95c2019-07-09 10:30:34 +0530792
793out:
Keerthya00b95c2019-07-09 10:30:34 +0530794 power_domain_free(&cpsw_common->pwrdmn);
795 return ret;
796}
797
798static const struct udevice_id am65_cpsw_nuss_ids[] = {
799 { .compatible = "ti,am654-cpsw-nuss" },
Vignesh Raghavendra30bc6ea2019-12-04 22:17:23 +0530800 { .compatible = "ti,j721e-cpsw-nuss" },
Vignesh Raghavendra1cc35622021-05-10 20:06:11 +0530801 { .compatible = "ti,am642-cpsw-nuss" },
Keerthya00b95c2019-07-09 10:30:34 +0530802 { }
803};
804
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530805U_BOOT_DRIVER(am65_cpsw_nuss) = {
806 .name = "am65_cpsw_nuss",
807 .id = UCLASS_MISC,
Keerthya00b95c2019-07-09 10:30:34 +0530808 .of_match = am65_cpsw_nuss_ids,
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530809 .probe = am65_cpsw_probe_nuss,
810 .priv_auto = sizeof(struct am65_cpsw_common),
811};
812
813U_BOOT_DRIVER(am65_cpsw_nuss_port) = {
814 .name = "am65_cpsw_nuss_port",
815 .id = UCLASS_ETH,
816 .probe = am65_cpsw_port_probe,
Keerthya00b95c2019-07-09 10:30:34 +0530817 .ops = &am65_cpsw_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700818 .priv_auto = sizeof(struct am65_cpsw_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700819 .plat_auto = sizeof(struct eth_pdata),
Vignesh Raghavendra198bbb12022-01-28 11:21:19 +0530820 .flags = DM_FLAG_ALLOC_PRIV_DMA | DM_FLAG_OS_PREPARE,
Keerthya00b95c2019-07-09 10:30:34 +0530821};