blob: 523a4c9f919ef4bb67558641a5d0bea87fbc98d1 [file] [log] [blame]
Keerthya00b95c2019-07-09 10:30:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
4 *
5 * Copyright (C) 2019, Texas Instruments, Incorporated
6 *
7 */
8
9#include <common.h>
Simon Glass9bc15642020-02-03 07:36:16 -070010#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060011#include <asm/cache.h>
Keerthya00b95c2019-07-09 10:30:34 +053012#include <asm/io.h>
13#include <asm/processor.h>
14#include <clk.h>
15#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070016#include <dm/device_compat.h>
Keerthya00b95c2019-07-09 10:30:34 +053017#include <dm/lists.h>
18#include <dma-uclass.h>
19#include <dm/of_access.h>
20#include <miiphy.h>
21#include <net.h>
22#include <phy.h>
23#include <power-domain.h>
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +053024#include <soc.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060025#include <linux/bitops.h>
Keerthya00b95c2019-07-09 10:30:34 +053026#include <linux/soc/ti/ti-udma.h>
27
28#include "cpsw_mdio.h"
29
Vignesh Raghavendrac5a66132021-05-10 20:06:09 +053030#define AM65_CPSW_CPSWNU_MAX_PORTS 9
Keerthya00b95c2019-07-09 10:30:34 +053031
32#define AM65_CPSW_SS_BASE 0x0
33#define AM65_CPSW_SGMII_BASE 0x100
34#define AM65_CPSW_MDIO_BASE 0xf00
35#define AM65_CPSW_XGMII_BASE 0x2100
36#define AM65_CPSW_CPSW_NU_BASE 0x20000
37#define AM65_CPSW_CPSW_NU_ALE_BASE 0x1e000
38
39#define AM65_CPSW_CPSW_NU_PORTS_OFFSET 0x1000
40#define AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET 0x330
41
42#define AM65_CPSW_MDIO_BUS_FREQ_DEF 1000000
43
44#define AM65_CPSW_CTL_REG 0x4
45#define AM65_CPSW_STAT_PORT_EN_REG 0x14
46#define AM65_CPSW_PTYPE_REG 0x18
47
48#define AM65_CPSW_CTL_REG_P0_ENABLE BIT(2)
49#define AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE BIT(13)
50#define AM65_CPSW_CTL_REG_P0_RX_PAD BIT(14)
51
52#define AM65_CPSW_P0_FLOW_ID_REG 0x8
53#define AM65_CPSW_PN_RX_MAXLEN_REG 0x24
54#define AM65_CPSW_PN_REG_SA_L 0x308
55#define AM65_CPSW_PN_REG_SA_H 0x30c
56
57#define AM65_CPSW_ALE_CTL_REG 0x8
58#define AM65_CPSW_ALE_CTL_REG_ENABLE BIT(31)
59#define AM65_CPSW_ALE_CTL_REG_RESET_TBL BIT(30)
60#define AM65_CPSW_ALE_CTL_REG_BYPASS BIT(4)
61#define AM65_CPSW_ALE_PN_CTL_REG(x) (0x40 + (x) * 4)
62#define AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD 0x3
63#define AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY BIT(11)
64
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +053065#define AM65_CPSW_ALE_THREADMAPDEF_REG 0x134
66#define AM65_CPSW_ALE_DEFTHREAD_EN BIT(15)
67
Keerthya00b95c2019-07-09 10:30:34 +053068#define AM65_CPSW_MACSL_CTL_REG 0x0
69#define AM65_CPSW_MACSL_CTL_REG_IFCTL_A BIT(15)
Murali Karicheri6565e902020-04-17 11:12:09 -040070#define AM65_CPSW_MACSL_CTL_EXT_EN BIT(18)
Keerthya00b95c2019-07-09 10:30:34 +053071#define AM65_CPSW_MACSL_CTL_REG_GIG BIT(7)
72#define AM65_CPSW_MACSL_CTL_REG_GMII_EN BIT(5)
73#define AM65_CPSW_MACSL_CTL_REG_LOOPBACK BIT(1)
74#define AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX BIT(0)
75#define AM65_CPSW_MACSL_RESET_REG 0x8
76#define AM65_CPSW_MACSL_RESET_REG_RESET BIT(0)
77#define AM65_CPSW_MACSL_STATUS_REG 0x4
78#define AM65_CPSW_MACSL_RESET_REG_PN_IDLE BIT(31)
79#define AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE BIT(30)
80#define AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE BIT(29)
81#define AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE BIT(28)
82#define AM65_CPSW_MACSL_RESET_REG_IDLE_MASK \
83 (AM65_CPSW_MACSL_RESET_REG_PN_IDLE | \
84 AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE | \
85 AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE | \
86 AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE)
87
88#define AM65_CPSW_CPPI_PKT_TYPE 0x7
89
90struct am65_cpsw_port {
91 fdt_addr_t port_base;
92 fdt_addr_t macsl_base;
93 bool disabled;
94 u32 mac_control;
95};
96
97struct am65_cpsw_common {
98 struct udevice *dev;
99 fdt_addr_t ss_base;
100 fdt_addr_t cpsw_base;
101 fdt_addr_t mdio_base;
102 fdt_addr_t ale_base;
103 fdt_addr_t gmii_sel;
104 fdt_addr_t mac_efuse;
105
106 struct clk fclk;
107 struct power_domain pwrdmn;
108
109 u32 port_num;
110 struct am65_cpsw_port ports[AM65_CPSW_CPSWNU_MAX_PORTS];
Keerthya00b95c2019-07-09 10:30:34 +0530111
112 struct mii_dev *bus;
113 u32 bus_freq;
114
115 struct dma dma_tx;
116 struct dma dma_rx;
117 u32 rx_next;
118 u32 rx_pend;
119 bool started;
120};
121
122struct am65_cpsw_priv {
123 struct udevice *dev;
124 struct am65_cpsw_common *cpsw_common;
125 u32 port_id;
126
127 struct phy_device *phydev;
128 bool has_phy;
129 ofnode phy_node;
130 u32 phy_addr;
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +0530131
132 bool mdio_manual_mode;
Keerthya00b95c2019-07-09 10:30:34 +0530133};
134
135#ifdef PKTSIZE_ALIGN
136#define UDMA_RX_BUF_SIZE PKTSIZE_ALIGN
137#else
138#define UDMA_RX_BUF_SIZE ALIGN(1522, ARCH_DMA_MINALIGN)
139#endif
140
141#ifdef PKTBUFSRX
142#define UDMA_RX_DESC_NUM PKTBUFSRX
143#else
144#define UDMA_RX_DESC_NUM 4
145#endif
146
147#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
148 ((mac)[2] << 16) | ((mac)[3] << 24))
149#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
150
151static void am65_cpsw_set_sl_mac(struct am65_cpsw_port *slave,
152 unsigned char *addr)
153{
154 writel(mac_hi(addr),
155 slave->port_base + AM65_CPSW_PN_REG_SA_H);
156 writel(mac_lo(addr),
157 slave->port_base + AM65_CPSW_PN_REG_SA_L);
158}
159
160int am65_cpsw_macsl_reset(struct am65_cpsw_port *slave)
161{
162 u32 i = 100;
163
164 /* Set the soft reset bit */
165 writel(AM65_CPSW_MACSL_RESET_REG_RESET,
166 slave->macsl_base + AM65_CPSW_MACSL_RESET_REG);
167
168 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_RESET_REG) &
169 AM65_CPSW_MACSL_RESET_REG_RESET) && i--)
170 cpu_relax();
171
172 /* Timeout on the reset */
173 return i;
174}
175
176static int am65_cpsw_macsl_wait_for_idle(struct am65_cpsw_port *slave)
177{
178 u32 i = 100;
179
180 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_STATUS_REG) &
181 AM65_CPSW_MACSL_RESET_REG_IDLE_MASK) && i--)
182 cpu_relax();
183
184 return i;
185}
186
187static int am65_cpsw_update_link(struct am65_cpsw_priv *priv)
188{
189 struct am65_cpsw_common *common = priv->cpsw_common;
190 struct am65_cpsw_port *port = &common->ports[priv->port_id];
191 struct phy_device *phy = priv->phydev;
192 u32 mac_control = 0;
193
194 if (phy->link) { /* link up */
195 mac_control = /*AM65_CPSW_MACSL_CTL_REG_LOOPBACK |*/
196 AM65_CPSW_MACSL_CTL_REG_GMII_EN;
197 if (phy->speed == 1000)
198 mac_control |= AM65_CPSW_MACSL_CTL_REG_GIG;
Murali Karicheri6565e902020-04-17 11:12:09 -0400199 if (phy->speed == 10 && phy_interface_is_rgmii(phy))
200 /* Can be used with in band mode only */
201 mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
Keerthya00b95c2019-07-09 10:30:34 +0530202 if (phy->duplex == DUPLEX_FULL)
203 mac_control |= AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX;
204 if (phy->speed == 100)
205 mac_control |= AM65_CPSW_MACSL_CTL_REG_IFCTL_A;
206 }
207
208 if (mac_control == port->mac_control)
209 goto out;
210
211 if (mac_control) {
212 printf("link up on port %d, speed %d, %s duplex\n",
213 priv->port_id, phy->speed,
214 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
215 } else {
216 printf("link down on port %d\n", priv->port_id);
217 }
218
219 writel(mac_control, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
220 port->mac_control = mac_control;
221
222out:
223 return phy->link;
224}
225
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500226#define AM65_GMII_SEL_PORT_OFFS(x) (0x4 * ((x) - 1))
227
Keerthya00b95c2019-07-09 10:30:34 +0530228#define AM65_GMII_SEL_MODE_MII 0
229#define AM65_GMII_SEL_MODE_RMII 1
230#define AM65_GMII_SEL_MODE_RGMII 2
231
232#define AM65_GMII_SEL_RGMII_IDMODE BIT(4)
233
234static void am65_cpsw_gmii_sel_k3(struct am65_cpsw_priv *priv,
235 phy_interface_t phy_mode, int slave)
236{
237 struct am65_cpsw_common *common = priv->cpsw_common;
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500238 fdt_addr_t gmii_sel = common->gmii_sel + AM65_GMII_SEL_PORT_OFFS(slave);
Keerthya00b95c2019-07-09 10:30:34 +0530239 u32 reg;
240 u32 mode = 0;
241 bool rgmii_id = false;
242
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500243 reg = readl(gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530244
245 dev_dbg(common->dev, "old gmii_sel: %08x\n", reg);
246
247 switch (phy_mode) {
248 case PHY_INTERFACE_MODE_RMII:
249 mode = AM65_GMII_SEL_MODE_RMII;
250 break;
251
252 case PHY_INTERFACE_MODE_RGMII:
Grygorii Strashkobf45d9b2019-09-19 11:16:41 +0300253 case PHY_INTERFACE_MODE_RGMII_RXID:
Keerthya00b95c2019-07-09 10:30:34 +0530254 mode = AM65_GMII_SEL_MODE_RGMII;
255 break;
256
257 case PHY_INTERFACE_MODE_RGMII_ID:
Keerthya00b95c2019-07-09 10:30:34 +0530258 case PHY_INTERFACE_MODE_RGMII_TXID:
259 mode = AM65_GMII_SEL_MODE_RGMII;
260 rgmii_id = true;
261 break;
262
263 default:
264 dev_warn(common->dev,
265 "Unsupported PHY mode: %u. Defaulting to MII.\n",
266 phy_mode);
267 /* fallthrough */
268 case PHY_INTERFACE_MODE_MII:
269 mode = AM65_GMII_SEL_MODE_MII;
270 break;
271 };
272
273 if (rgmii_id)
274 mode |= AM65_GMII_SEL_RGMII_IDMODE;
275
276 reg = mode;
277 dev_dbg(common->dev, "gmii_sel PHY mode: %u, new gmii_sel: %08x\n",
278 phy_mode, reg);
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500279 writel(reg, gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530280
Andreas Dannenberg1dc2ee62023-06-14 17:28:53 -0500281 reg = readl(gmii_sel);
Keerthya00b95c2019-07-09 10:30:34 +0530282 if (reg != mode)
283 dev_err(common->dev,
284 "gmii_sel PHY mode NOT SET!: requested: %08x, gmii_sel: %08x\n",
285 mode, reg);
286}
287
288static int am65_cpsw_start(struct udevice *dev)
289{
Simon Glassfa20e932020-12-03 16:55:20 -0700290 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530291 struct am65_cpsw_priv *priv = dev_get_priv(dev);
292 struct am65_cpsw_common *common = priv->cpsw_common;
293 struct am65_cpsw_port *port = &common->ports[priv->port_id];
294 struct am65_cpsw_port *port0 = &common->ports[0];
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530295 struct ti_udma_drv_chan_cfg_data *dma_rx_cfg_data;
Keerthya00b95c2019-07-09 10:30:34 +0530296 int ret, i;
297
298 ret = power_domain_on(&common->pwrdmn);
299 if (ret) {
300 dev_err(dev, "power_domain_on() failed %d\n", ret);
301 goto out;
302 }
303
304 ret = clk_enable(&common->fclk);
305 if (ret) {
306 dev_err(dev, "clk enabled failed %d\n", ret);
307 goto err_off_pwrdm;
308 }
309
310 common->rx_next = 0;
311 common->rx_pend = 0;
312 ret = dma_get_by_name(common->dev, "tx0", &common->dma_tx);
313 if (ret) {
314 dev_err(dev, "TX dma get failed %d\n", ret);
315 goto err_off_clk;
316 }
317 ret = dma_get_by_name(common->dev, "rx", &common->dma_rx);
318 if (ret) {
319 dev_err(dev, "RX dma get failed %d\n", ret);
320 goto err_free_tx;
321 }
322
323 for (i = 0; i < UDMA_RX_DESC_NUM; i++) {
324 ret = dma_prepare_rcv_buf(&common->dma_rx,
325 net_rx_packets[i],
326 UDMA_RX_BUF_SIZE);
327 if (ret) {
328 dev_err(dev, "RX dma add buf failed %d\n", ret);
329 goto err_free_tx;
330 }
331 }
332
333 ret = dma_enable(&common->dma_tx);
334 if (ret) {
335 dev_err(dev, "TX dma_enable failed %d\n", ret);
336 goto err_free_rx;
337 }
338 ret = dma_enable(&common->dma_rx);
339 if (ret) {
340 dev_err(dev, "RX dma_enable failed %d\n", ret);
341 goto err_dis_tx;
342 }
343
344 /* Control register */
345 writel(AM65_CPSW_CTL_REG_P0_ENABLE |
346 AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE |
347 AM65_CPSW_CTL_REG_P0_RX_PAD,
348 common->cpsw_base + AM65_CPSW_CTL_REG);
349
350 /* disable priority elevation */
351 writel(0, common->cpsw_base + AM65_CPSW_PTYPE_REG);
352
353 /* enable statistics */
354 writel(BIT(0) | BIT(priv->port_id),
355 common->cpsw_base + AM65_CPSW_STAT_PORT_EN_REG);
356
357 /* Port 0 length register */
358 writel(PKTSIZE_ALIGN, port0->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
359
360 /* set base flow_id */
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530361 dma_get_cfg(&common->dma_rx, 0, (void **)&dma_rx_cfg_data);
362 writel(dma_rx_cfg_data->flow_id_base,
Keerthya00b95c2019-07-09 10:30:34 +0530363 port0->port_base + AM65_CPSW_P0_FLOW_ID_REG);
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530364 dev_info(dev, "K3 CPSW: rflow_id_base: %u\n",
365 dma_rx_cfg_data->flow_id_base);
Keerthya00b95c2019-07-09 10:30:34 +0530366
367 /* Reset and enable the ALE */
368 writel(AM65_CPSW_ALE_CTL_REG_ENABLE | AM65_CPSW_ALE_CTL_REG_RESET_TBL |
369 AM65_CPSW_ALE_CTL_REG_BYPASS,
370 common->ale_base + AM65_CPSW_ALE_CTL_REG);
371
372 /* port 0 put into forward mode */
373 writel(AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
374 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
375
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +0530376 writel(AM65_CPSW_ALE_DEFTHREAD_EN,
377 common->ale_base + AM65_CPSW_ALE_THREADMAPDEF_REG);
378
Keerthya00b95c2019-07-09 10:30:34 +0530379 /* PORT x configuration */
380
381 /* Port x Max length register */
382 writel(PKTSIZE_ALIGN, port->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
383
384 /* Port x set mac */
385 am65_cpsw_set_sl_mac(port, pdata->enetaddr);
386
387 /* Port x ALE: mac_only, Forwarding */
388 writel(AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY |
389 AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
390 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
391
392 port->mac_control = 0;
393 if (!am65_cpsw_macsl_reset(port)) {
394 dev_err(dev, "mac_sl reset failed\n");
395 ret = -EFAULT;
396 goto err_dis_rx;
397 }
398
399 ret = phy_startup(priv->phydev);
400 if (ret) {
401 dev_err(dev, "phy_startup failed\n");
402 goto err_dis_rx;
403 }
404
405 ret = am65_cpsw_update_link(priv);
406 if (!ret) {
407 ret = -ENODEV;
408 goto err_phy_shutdown;
409 }
410
411 common->started = true;
412
413 return 0;
414
415err_phy_shutdown:
416 phy_shutdown(priv->phydev);
417err_dis_rx:
418 /* disable ports */
419 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
420 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
421 if (!am65_cpsw_macsl_wait_for_idle(port))
422 dev_err(dev, "mac_sl idle timeout\n");
423 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
424 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
425 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
426
427 dma_disable(&common->dma_rx);
428err_dis_tx:
429 dma_disable(&common->dma_tx);
430err_free_rx:
431 dma_free(&common->dma_rx);
432err_free_tx:
433 dma_free(&common->dma_tx);
434err_off_clk:
435 clk_disable(&common->fclk);
436err_off_pwrdm:
437 power_domain_off(&common->pwrdmn);
438out:
439 dev_err(dev, "%s end error\n", __func__);
440
441 return ret;
442}
443
444static int am65_cpsw_send(struct udevice *dev, void *packet, int length)
445{
446 struct am65_cpsw_priv *priv = dev_get_priv(dev);
447 struct am65_cpsw_common *common = priv->cpsw_common;
448 struct ti_udma_drv_packet_data packet_data;
449 int ret;
450
451 packet_data.pkt_type = AM65_CPSW_CPPI_PKT_TYPE;
452 packet_data.dest_tag = priv->port_id;
453 ret = dma_send(&common->dma_tx, packet, length, &packet_data);
454 if (ret) {
455 dev_err(dev, "TX dma_send failed %d\n", ret);
456 return ret;
457 }
458
459 return 0;
460}
461
462static int am65_cpsw_recv(struct udevice *dev, int flags, uchar **packetp)
463{
464 struct am65_cpsw_priv *priv = dev_get_priv(dev);
465 struct am65_cpsw_common *common = priv->cpsw_common;
466
467 /* try to receive a new packet */
468 return dma_receive(&common->dma_rx, (void **)packetp, NULL);
469}
470
471static int am65_cpsw_free_pkt(struct udevice *dev, uchar *packet, int length)
472{
473 struct am65_cpsw_priv *priv = dev_get_priv(dev);
474 struct am65_cpsw_common *common = priv->cpsw_common;
475 int ret;
476
477 if (length > 0) {
478 u32 pkt = common->rx_next % UDMA_RX_DESC_NUM;
479
480 ret = dma_prepare_rcv_buf(&common->dma_rx,
481 net_rx_packets[pkt],
482 UDMA_RX_BUF_SIZE);
483 if (ret)
484 dev_err(dev, "RX dma free_pkt failed %d\n", ret);
485 common->rx_next++;
486 }
487
488 return 0;
489}
490
491static void am65_cpsw_stop(struct udevice *dev)
492{
493 struct am65_cpsw_priv *priv = dev_get_priv(dev);
494 struct am65_cpsw_common *common = priv->cpsw_common;
495 struct am65_cpsw_port *port = &common->ports[priv->port_id];
496
497 if (!common->started)
498 return;
499
500 phy_shutdown(priv->phydev);
501
502 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
503 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
504 if (!am65_cpsw_macsl_wait_for_idle(port))
505 dev_err(dev, "mac_sl idle timeout\n");
506 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
507 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
508 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
509
510 dma_disable(&common->dma_tx);
511 dma_free(&common->dma_tx);
512
513 dma_disable(&common->dma_rx);
514 dma_free(&common->dma_rx);
515
516 common->started = false;
517}
518
519static int am65_cpsw_read_rom_hwaddr(struct udevice *dev)
520{
521 struct am65_cpsw_priv *priv = dev_get_priv(dev);
522 struct am65_cpsw_common *common = priv->cpsw_common;
Simon Glassfa20e932020-12-03 16:55:20 -0700523 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530524 u32 mac_hi, mac_lo;
525
526 if (common->mac_efuse == FDT_ADDR_T_NONE)
527 return -1;
528
529 mac_lo = readl(common->mac_efuse);
530 mac_hi = readl(common->mac_efuse + 4);
531 pdata->enetaddr[0] = (mac_hi >> 8) & 0xff;
532 pdata->enetaddr[1] = mac_hi & 0xff;
533 pdata->enetaddr[2] = (mac_lo >> 24) & 0xff;
534 pdata->enetaddr[3] = (mac_lo >> 16) & 0xff;
535 pdata->enetaddr[4] = (mac_lo >> 8) & 0xff;
536 pdata->enetaddr[5] = mac_lo & 0xff;
537
538 return 0;
539}
540
541static const struct eth_ops am65_cpsw_ops = {
542 .start = am65_cpsw_start,
543 .send = am65_cpsw_send,
544 .recv = am65_cpsw_recv,
545 .free_pkt = am65_cpsw_free_pkt,
546 .stop = am65_cpsw_stop,
547 .read_rom_hwaddr = am65_cpsw_read_rom_hwaddr,
548};
549
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +0530550static const struct soc_attr k3_mdio_soc_data[] = {
551 { .family = "AM62X", .revision = "SR1.0" },
552 { .family = "AM64X", .revision = "SR1.0" },
553 { .family = "AM64X", .revision = "SR2.0" },
554 { .family = "AM65X", .revision = "SR1.0" },
555 { .family = "AM65X", .revision = "SR2.0" },
556 { .family = "J7200", .revision = "SR1.0" },
557 { .family = "J7200", .revision = "SR2.0" },
558 { .family = "J721E", .revision = "SR1.0" },
559 { .family = "J721E", .revision = "SR1.1" },
560 { .family = "J721S2", .revision = "SR1.0" },
561 { /* sentinel */ },
562};
563
Keerthya00b95c2019-07-09 10:30:34 +0530564static int am65_cpsw_mdio_init(struct udevice *dev)
565{
566 struct am65_cpsw_priv *priv = dev_get_priv(dev);
567 struct am65_cpsw_common *cpsw_common = priv->cpsw_common;
568
569 if (!priv->has_phy || cpsw_common->bus)
570 return 0;
571
572 cpsw_common->bus = cpsw_mdio_init(dev->name,
573 cpsw_common->mdio_base,
574 cpsw_common->bus_freq,
Ravi Gunasekaran40cea492022-09-22 15:21:23 +0530575 clk_get_rate(&cpsw_common->fclk),
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +0530576 priv->mdio_manual_mode);
Keerthya00b95c2019-07-09 10:30:34 +0530577 if (!cpsw_common->bus)
578 return -EFAULT;
579
580 return 0;
581}
582
583static int am65_cpsw_phy_init(struct udevice *dev)
584{
585 struct am65_cpsw_priv *priv = dev_get_priv(dev);
586 struct am65_cpsw_common *cpsw_common = priv->cpsw_common;
Simon Glassfa20e932020-12-03 16:55:20 -0700587 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530588 struct phy_device *phydev;
589 u32 supported = PHY_GBIT_FEATURES;
590 int ret;
591
592 phydev = phy_connect(cpsw_common->bus,
593 priv->phy_addr,
594 priv->dev,
595 pdata->phy_interface);
596
597 if (!phydev) {
598 dev_err(dev, "phy_connect() failed\n");
599 return -ENODEV;
600 }
601
602 phydev->supported &= supported;
603 if (pdata->max_speed) {
604 ret = phy_set_supported(phydev, pdata->max_speed);
605 if (ret)
606 return ret;
607 }
608 phydev->advertising = phydev->supported;
609
610 if (ofnode_valid(priv->phy_node))
611 phydev->node = priv->phy_node;
612
613 priv->phydev = phydev;
614 ret = phy_config(phydev);
615 if (ret < 0)
616 pr_err("phy_config() failed: %d", ret);
617
618 return ret;
619}
620
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530621static int am65_cpsw_ofdata_parse_phy(struct udevice *dev)
Keerthya00b95c2019-07-09 10:30:34 +0530622{
Simon Glassfa20e932020-12-03 16:55:20 -0700623 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530624 struct am65_cpsw_priv *priv = dev_get_priv(dev);
625 struct ofnode_phandle_args out_args;
Keerthya00b95c2019-07-09 10:30:34 +0530626 int ret = 0;
627
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530628 dev_read_u32(dev, "reg", &priv->port_id);
629
Marek Behúnbc194772022-04-07 00:33:01 +0200630 pdata->phy_interface = dev_read_phy_mode(dev);
Marek Behún48631e42022-04-07 00:33:03 +0200631 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
Marek Behúnbc194772022-04-07 00:33:01 +0200632 dev_err(dev, "Invalid PHY mode, port %u\n", priv->port_id);
633 return -EINVAL;
Keerthya00b95c2019-07-09 10:30:34 +0530634 }
635
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530636 dev_read_u32(dev, "max-speed", (u32 *)&pdata->max_speed);
Keerthya00b95c2019-07-09 10:30:34 +0530637 if (pdata->max_speed)
638 dev_err(dev, "Port %u speed froced to %uMbit\n",
639 priv->port_id, pdata->max_speed);
640
641 priv->has_phy = true;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530642 ret = ofnode_parse_phandle_with_args(dev_ofnode(dev), "phy-handle",
Keerthya00b95c2019-07-09 10:30:34 +0530643 NULL, 0, 0, &out_args);
644 if (ret) {
645 dev_err(dev, "can't parse phy-handle port %u (%d)\n",
646 priv->port_id, ret);
647 priv->has_phy = false;
648 ret = 0;
649 }
650
651 priv->phy_node = out_args.node;
652 if (priv->has_phy) {
653 ret = ofnode_read_u32(priv->phy_node, "reg", &priv->phy_addr);
654 if (ret) {
655 dev_err(dev, "failed to get phy_addr port %u (%d)\n",
656 priv->port_id, ret);
657 goto out;
658 }
659 }
660
661out:
662 return ret;
663}
664
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530665static int am65_cpsw_port_probe(struct udevice *dev)
Keerthya00b95c2019-07-09 10:30:34 +0530666{
667 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700668 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530669 struct am65_cpsw_common *cpsw_common;
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530670 char portname[15];
671 int ret;
Keerthya00b95c2019-07-09 10:30:34 +0530672
673 priv->dev = dev;
674
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530675 cpsw_common = dev_get_priv(dev->parent);
Keerthya00b95c2019-07-09 10:30:34 +0530676 priv->cpsw_common = cpsw_common;
677
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530678 sprintf(portname, "%s%s", dev->parent->name, dev->name);
679 device_set_name(dev, portname);
680
Ravi Gunasekaran1eb61912022-09-22 15:21:24 +0530681 priv->mdio_manual_mode = false;
682 if (soc_device_match(k3_mdio_soc_data))
683 priv->mdio_manual_mode = true;
684
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530685 ret = am65_cpsw_ofdata_parse_phy(dev);
686 if (ret)
687 goto out;
688
689 am65_cpsw_gmii_sel_k3(priv, pdata->phy_interface, priv->port_id);
690
691 ret = am65_cpsw_mdio_init(dev);
692 if (ret)
693 goto out;
694
695 ret = am65_cpsw_phy_init(dev);
696 if (ret)
697 goto out;
698out:
699 return ret;
700}
701
702static int am65_cpsw_probe_nuss(struct udevice *dev)
703{
704 struct am65_cpsw_common *cpsw_common = dev_get_priv(dev);
705 ofnode ports_np, node;
706 int ret, i;
707 struct udevice *port_dev;
708
Keerthya00b95c2019-07-09 10:30:34 +0530709 cpsw_common->dev = dev;
710 cpsw_common->ss_base = dev_read_addr(dev);
711 if (cpsw_common->ss_base == FDT_ADDR_T_NONE)
712 return -EINVAL;
713 cpsw_common->mac_efuse = devfdt_get_addr_name(dev, "mac_efuse");
714 /* no err check - optional */
715
716 ret = power_domain_get_by_index(dev, &cpsw_common->pwrdmn, 0);
717 if (ret) {
718 dev_err(dev, "failed to get pwrdmn: %d\n", ret);
719 return ret;
720 }
721
722 ret = clk_get_by_name(dev, "fck", &cpsw_common->fclk);
723 if (ret) {
724 power_domain_free(&cpsw_common->pwrdmn);
725 dev_err(dev, "failed to get clock %d\n", ret);
726 return ret;
727 }
728
729 cpsw_common->cpsw_base = cpsw_common->ss_base + AM65_CPSW_CPSW_NU_BASE;
730 cpsw_common->ale_base = cpsw_common->cpsw_base +
731 AM65_CPSW_CPSW_NU_ALE_BASE;
732 cpsw_common->mdio_base = cpsw_common->ss_base + AM65_CPSW_MDIO_BASE;
733
Vignesh Raghavendra2b834d02020-07-06 13:36:54 +0530734 ports_np = dev_read_subnode(dev, "ethernet-ports");
Keerthya00b95c2019-07-09 10:30:34 +0530735 if (!ofnode_valid(ports_np)) {
736 ret = -ENOENT;
737 goto out;
738 }
739
740 ofnode_for_each_subnode(node, ports_np) {
741 const char *node_name;
742 u32 port_id;
743 bool disabled;
744
745 node_name = ofnode_get_name(node);
746
Simon Glass2e4938b2022-09-06 20:27:17 -0600747 disabled = !ofnode_is_enabled(node);
Keerthya00b95c2019-07-09 10:30:34 +0530748
749 ret = ofnode_read_u32(node, "reg", &port_id);
750 if (ret) {
751 dev_err(dev, "%s: failed to get port_id (%d)\n",
752 node_name, ret);
753 goto out;
754 }
755
756 if (port_id >= AM65_CPSW_CPSWNU_MAX_PORTS) {
757 dev_err(dev, "%s: invalid port_id (%d)\n",
758 node_name, port_id);
759 ret = -EINVAL;
760 goto out;
761 }
762 cpsw_common->port_num++;
763
764 if (!port_id)
765 continue;
766
Keerthya00b95c2019-07-09 10:30:34 +0530767 cpsw_common->ports[port_id].disabled = disabled;
768 if (disabled)
769 continue;
770
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530771 ret = device_bind_driver_to_node(dev, "am65_cpsw_nuss_port", ofnode_get_name(node), node, &port_dev);
Keerthya00b95c2019-07-09 10:30:34 +0530772 if (ret)
Vignesh Raghavendrad26ac2e2022-01-21 12:47:51 +0530773 dev_err(dev, "Failed to bind to %s node\n", ofnode_get_name(node));
Keerthya00b95c2019-07-09 10:30:34 +0530774 }
775
776 for (i = 0; i < AM65_CPSW_CPSWNU_MAX_PORTS; i++) {
777 struct am65_cpsw_port *port = &cpsw_common->ports[i];
778
779 port->port_base = cpsw_common->cpsw_base +
780 AM65_CPSW_CPSW_NU_PORTS_OFFSET +
781 (i * AM65_CPSW_CPSW_NU_PORTS_OFFSET);
782 port->macsl_base = port->port_base +
783 AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET;
784 }
785
786 node = dev_read_subnode(dev, "cpsw-phy-sel");
787 if (!ofnode_valid(node)) {
788 dev_err(dev, "can't find cpsw-phy-sel\n");
789 ret = -ENOENT;
790 goto out;
791 }
792
793 cpsw_common->gmii_sel = ofnode_get_addr(node);
794 if (cpsw_common->gmii_sel == FDT_ADDR_T_NONE) {
795 dev_err(dev, "failed to get gmii_sel base\n");
796 goto out;
797 }
798
Keerthya00b95c2019-07-09 10:30:34 +0530799 cpsw_common->bus_freq =
800 dev_read_u32_default(dev, "bus_freq",
801 AM65_CPSW_MDIO_BUS_FREQ_DEF);
802
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530803 dev_info(dev, "K3 CPSW: nuss_ver: 0x%08X cpsw_ver: 0x%08X ale_ver: 0x%08X Ports:%u mdio_freq:%u\n",
Keerthya00b95c2019-07-09 10:30:34 +0530804 readl(cpsw_common->ss_base),
805 readl(cpsw_common->cpsw_base),
806 readl(cpsw_common->ale_base),
807 cpsw_common->port_num,
Keerthya00b95c2019-07-09 10:30:34 +0530808 cpsw_common->bus_freq);
809
810out:
811 clk_free(&cpsw_common->fclk);
812 power_domain_free(&cpsw_common->pwrdmn);
813 return ret;
814}
815
816static const struct udevice_id am65_cpsw_nuss_ids[] = {
817 { .compatible = "ti,am654-cpsw-nuss" },
Vignesh Raghavendra30bc6ea2019-12-04 22:17:23 +0530818 { .compatible = "ti,j721e-cpsw-nuss" },
Vignesh Raghavendra1cc35622021-05-10 20:06:11 +0530819 { .compatible = "ti,am642-cpsw-nuss" },
Keerthya00b95c2019-07-09 10:30:34 +0530820 { }
821};
822
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530823U_BOOT_DRIVER(am65_cpsw_nuss) = {
824 .name = "am65_cpsw_nuss",
825 .id = UCLASS_MISC,
Keerthya00b95c2019-07-09 10:30:34 +0530826 .of_match = am65_cpsw_nuss_ids,
Vignesh Raghavendrabbedbbb2021-12-24 12:55:30 +0530827 .probe = am65_cpsw_probe_nuss,
828 .priv_auto = sizeof(struct am65_cpsw_common),
829};
830
831U_BOOT_DRIVER(am65_cpsw_nuss_port) = {
832 .name = "am65_cpsw_nuss_port",
833 .id = UCLASS_ETH,
834 .probe = am65_cpsw_port_probe,
Keerthya00b95c2019-07-09 10:30:34 +0530835 .ops = &am65_cpsw_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700836 .priv_auto = sizeof(struct am65_cpsw_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700837 .plat_auto = sizeof(struct eth_pdata),
Vignesh Raghavendra198bbb12022-01-28 11:21:19 +0530838 .flags = DM_FLAG_ALLOC_PRIV_DMA | DM_FLAG_OS_PREPARE,
Keerthya00b95c2019-07-09 10:30:34 +0530839};