blob: e6954b64b792f642804d393d67cb5a2dd7064484 [file] [log] [blame]
Keerthya00b95c2019-07-09 10:30:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
4 *
5 * Copyright (C) 2019, Texas Instruments, Incorporated
6 *
7 */
8
9#include <common.h>
Simon Glass9bc15642020-02-03 07:36:16 -070010#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060011#include <asm/cache.h>
Keerthya00b95c2019-07-09 10:30:34 +053012#include <asm/io.h>
13#include <asm/processor.h>
14#include <clk.h>
15#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070016#include <dm/device_compat.h>
Keerthya00b95c2019-07-09 10:30:34 +053017#include <dm/lists.h>
18#include <dma-uclass.h>
19#include <dm/of_access.h>
20#include <miiphy.h>
21#include <net.h>
22#include <phy.h>
23#include <power-domain.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060024#include <linux/bitops.h>
Keerthya00b95c2019-07-09 10:30:34 +053025#include <linux/soc/ti/ti-udma.h>
26
27#include "cpsw_mdio.h"
28
29#define AM65_CPSW_CPSWNU_MAX_PORTS 2
30
31#define AM65_CPSW_SS_BASE 0x0
32#define AM65_CPSW_SGMII_BASE 0x100
33#define AM65_CPSW_MDIO_BASE 0xf00
34#define AM65_CPSW_XGMII_BASE 0x2100
35#define AM65_CPSW_CPSW_NU_BASE 0x20000
36#define AM65_CPSW_CPSW_NU_ALE_BASE 0x1e000
37
38#define AM65_CPSW_CPSW_NU_PORTS_OFFSET 0x1000
39#define AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET 0x330
40
41#define AM65_CPSW_MDIO_BUS_FREQ_DEF 1000000
42
43#define AM65_CPSW_CTL_REG 0x4
44#define AM65_CPSW_STAT_PORT_EN_REG 0x14
45#define AM65_CPSW_PTYPE_REG 0x18
46
47#define AM65_CPSW_CTL_REG_P0_ENABLE BIT(2)
48#define AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE BIT(13)
49#define AM65_CPSW_CTL_REG_P0_RX_PAD BIT(14)
50
51#define AM65_CPSW_P0_FLOW_ID_REG 0x8
52#define AM65_CPSW_PN_RX_MAXLEN_REG 0x24
53#define AM65_CPSW_PN_REG_SA_L 0x308
54#define AM65_CPSW_PN_REG_SA_H 0x30c
55
56#define AM65_CPSW_ALE_CTL_REG 0x8
57#define AM65_CPSW_ALE_CTL_REG_ENABLE BIT(31)
58#define AM65_CPSW_ALE_CTL_REG_RESET_TBL BIT(30)
59#define AM65_CPSW_ALE_CTL_REG_BYPASS BIT(4)
60#define AM65_CPSW_ALE_PN_CTL_REG(x) (0x40 + (x) * 4)
61#define AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD 0x3
62#define AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY BIT(11)
63
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +053064#define AM65_CPSW_ALE_THREADMAPDEF_REG 0x134
65#define AM65_CPSW_ALE_DEFTHREAD_EN BIT(15)
66
Keerthya00b95c2019-07-09 10:30:34 +053067#define AM65_CPSW_MACSL_CTL_REG 0x0
68#define AM65_CPSW_MACSL_CTL_REG_IFCTL_A BIT(15)
Murali Karicheri6565e902020-04-17 11:12:09 -040069#define AM65_CPSW_MACSL_CTL_EXT_EN BIT(18)
Keerthya00b95c2019-07-09 10:30:34 +053070#define AM65_CPSW_MACSL_CTL_REG_GIG BIT(7)
71#define AM65_CPSW_MACSL_CTL_REG_GMII_EN BIT(5)
72#define AM65_CPSW_MACSL_CTL_REG_LOOPBACK BIT(1)
73#define AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX BIT(0)
74#define AM65_CPSW_MACSL_RESET_REG 0x8
75#define AM65_CPSW_MACSL_RESET_REG_RESET BIT(0)
76#define AM65_CPSW_MACSL_STATUS_REG 0x4
77#define AM65_CPSW_MACSL_RESET_REG_PN_IDLE BIT(31)
78#define AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE BIT(30)
79#define AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE BIT(29)
80#define AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE BIT(28)
81#define AM65_CPSW_MACSL_RESET_REG_IDLE_MASK \
82 (AM65_CPSW_MACSL_RESET_REG_PN_IDLE | \
83 AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE | \
84 AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE | \
85 AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE)
86
87#define AM65_CPSW_CPPI_PKT_TYPE 0x7
88
89struct am65_cpsw_port {
90 fdt_addr_t port_base;
91 fdt_addr_t macsl_base;
92 bool disabled;
93 u32 mac_control;
94};
95
96struct am65_cpsw_common {
97 struct udevice *dev;
98 fdt_addr_t ss_base;
99 fdt_addr_t cpsw_base;
100 fdt_addr_t mdio_base;
101 fdt_addr_t ale_base;
102 fdt_addr_t gmii_sel;
103 fdt_addr_t mac_efuse;
104
105 struct clk fclk;
106 struct power_domain pwrdmn;
107
108 u32 port_num;
109 struct am65_cpsw_port ports[AM65_CPSW_CPSWNU_MAX_PORTS];
Keerthya00b95c2019-07-09 10:30:34 +0530110
111 struct mii_dev *bus;
112 u32 bus_freq;
113
114 struct dma dma_tx;
115 struct dma dma_rx;
116 u32 rx_next;
117 u32 rx_pend;
118 bool started;
119};
120
121struct am65_cpsw_priv {
122 struct udevice *dev;
123 struct am65_cpsw_common *cpsw_common;
124 u32 port_id;
125
126 struct phy_device *phydev;
127 bool has_phy;
128 ofnode phy_node;
129 u32 phy_addr;
130};
131
132#ifdef PKTSIZE_ALIGN
133#define UDMA_RX_BUF_SIZE PKTSIZE_ALIGN
134#else
135#define UDMA_RX_BUF_SIZE ALIGN(1522, ARCH_DMA_MINALIGN)
136#endif
137
138#ifdef PKTBUFSRX
139#define UDMA_RX_DESC_NUM PKTBUFSRX
140#else
141#define UDMA_RX_DESC_NUM 4
142#endif
143
144#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
145 ((mac)[2] << 16) | ((mac)[3] << 24))
146#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
147
148static void am65_cpsw_set_sl_mac(struct am65_cpsw_port *slave,
149 unsigned char *addr)
150{
151 writel(mac_hi(addr),
152 slave->port_base + AM65_CPSW_PN_REG_SA_H);
153 writel(mac_lo(addr),
154 slave->port_base + AM65_CPSW_PN_REG_SA_L);
155}
156
157int am65_cpsw_macsl_reset(struct am65_cpsw_port *slave)
158{
159 u32 i = 100;
160
161 /* Set the soft reset bit */
162 writel(AM65_CPSW_MACSL_RESET_REG_RESET,
163 slave->macsl_base + AM65_CPSW_MACSL_RESET_REG);
164
165 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_RESET_REG) &
166 AM65_CPSW_MACSL_RESET_REG_RESET) && i--)
167 cpu_relax();
168
169 /* Timeout on the reset */
170 return i;
171}
172
173static int am65_cpsw_macsl_wait_for_idle(struct am65_cpsw_port *slave)
174{
175 u32 i = 100;
176
177 while ((readl(slave->macsl_base + AM65_CPSW_MACSL_STATUS_REG) &
178 AM65_CPSW_MACSL_RESET_REG_IDLE_MASK) && i--)
179 cpu_relax();
180
181 return i;
182}
183
184static int am65_cpsw_update_link(struct am65_cpsw_priv *priv)
185{
186 struct am65_cpsw_common *common = priv->cpsw_common;
187 struct am65_cpsw_port *port = &common->ports[priv->port_id];
188 struct phy_device *phy = priv->phydev;
189 u32 mac_control = 0;
190
191 if (phy->link) { /* link up */
192 mac_control = /*AM65_CPSW_MACSL_CTL_REG_LOOPBACK |*/
193 AM65_CPSW_MACSL_CTL_REG_GMII_EN;
194 if (phy->speed == 1000)
195 mac_control |= AM65_CPSW_MACSL_CTL_REG_GIG;
Murali Karicheri6565e902020-04-17 11:12:09 -0400196 if (phy->speed == 10 && phy_interface_is_rgmii(phy))
197 /* Can be used with in band mode only */
198 mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
Keerthya00b95c2019-07-09 10:30:34 +0530199 if (phy->duplex == DUPLEX_FULL)
200 mac_control |= AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX;
201 if (phy->speed == 100)
202 mac_control |= AM65_CPSW_MACSL_CTL_REG_IFCTL_A;
203 }
204
205 if (mac_control == port->mac_control)
206 goto out;
207
208 if (mac_control) {
209 printf("link up on port %d, speed %d, %s duplex\n",
210 priv->port_id, phy->speed,
211 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
212 } else {
213 printf("link down on port %d\n", priv->port_id);
214 }
215
216 writel(mac_control, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
217 port->mac_control = mac_control;
218
219out:
220 return phy->link;
221}
222
223#define AM65_GMII_SEL_MODE_MII 0
224#define AM65_GMII_SEL_MODE_RMII 1
225#define AM65_GMII_SEL_MODE_RGMII 2
226
227#define AM65_GMII_SEL_RGMII_IDMODE BIT(4)
228
229static void am65_cpsw_gmii_sel_k3(struct am65_cpsw_priv *priv,
230 phy_interface_t phy_mode, int slave)
231{
232 struct am65_cpsw_common *common = priv->cpsw_common;
233 u32 reg;
234 u32 mode = 0;
235 bool rgmii_id = false;
236
237 reg = readl(common->gmii_sel);
238
239 dev_dbg(common->dev, "old gmii_sel: %08x\n", reg);
240
241 switch (phy_mode) {
242 case PHY_INTERFACE_MODE_RMII:
243 mode = AM65_GMII_SEL_MODE_RMII;
244 break;
245
246 case PHY_INTERFACE_MODE_RGMII:
Grygorii Strashkobf45d9b2019-09-19 11:16:41 +0300247 case PHY_INTERFACE_MODE_RGMII_RXID:
Keerthya00b95c2019-07-09 10:30:34 +0530248 mode = AM65_GMII_SEL_MODE_RGMII;
249 break;
250
251 case PHY_INTERFACE_MODE_RGMII_ID:
Keerthya00b95c2019-07-09 10:30:34 +0530252 case PHY_INTERFACE_MODE_RGMII_TXID:
253 mode = AM65_GMII_SEL_MODE_RGMII;
254 rgmii_id = true;
255 break;
256
257 default:
258 dev_warn(common->dev,
259 "Unsupported PHY mode: %u. Defaulting to MII.\n",
260 phy_mode);
261 /* fallthrough */
262 case PHY_INTERFACE_MODE_MII:
263 mode = AM65_GMII_SEL_MODE_MII;
264 break;
265 };
266
267 if (rgmii_id)
268 mode |= AM65_GMII_SEL_RGMII_IDMODE;
269
270 reg = mode;
271 dev_dbg(common->dev, "gmii_sel PHY mode: %u, new gmii_sel: %08x\n",
272 phy_mode, reg);
273 writel(reg, common->gmii_sel);
274
275 reg = readl(common->gmii_sel);
276 if (reg != mode)
277 dev_err(common->dev,
278 "gmii_sel PHY mode NOT SET!: requested: %08x, gmii_sel: %08x\n",
279 mode, reg);
280}
281
282static int am65_cpsw_start(struct udevice *dev)
283{
Simon Glassfa20e932020-12-03 16:55:20 -0700284 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530285 struct am65_cpsw_priv *priv = dev_get_priv(dev);
286 struct am65_cpsw_common *common = priv->cpsw_common;
287 struct am65_cpsw_port *port = &common->ports[priv->port_id];
288 struct am65_cpsw_port *port0 = &common->ports[0];
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530289 struct ti_udma_drv_chan_cfg_data *dma_rx_cfg_data;
Keerthya00b95c2019-07-09 10:30:34 +0530290 int ret, i;
291
292 ret = power_domain_on(&common->pwrdmn);
293 if (ret) {
294 dev_err(dev, "power_domain_on() failed %d\n", ret);
295 goto out;
296 }
297
298 ret = clk_enable(&common->fclk);
299 if (ret) {
300 dev_err(dev, "clk enabled failed %d\n", ret);
301 goto err_off_pwrdm;
302 }
303
304 common->rx_next = 0;
305 common->rx_pend = 0;
306 ret = dma_get_by_name(common->dev, "tx0", &common->dma_tx);
307 if (ret) {
308 dev_err(dev, "TX dma get failed %d\n", ret);
309 goto err_off_clk;
310 }
311 ret = dma_get_by_name(common->dev, "rx", &common->dma_rx);
312 if (ret) {
313 dev_err(dev, "RX dma get failed %d\n", ret);
314 goto err_free_tx;
315 }
316
317 for (i = 0; i < UDMA_RX_DESC_NUM; i++) {
318 ret = dma_prepare_rcv_buf(&common->dma_rx,
319 net_rx_packets[i],
320 UDMA_RX_BUF_SIZE);
321 if (ret) {
322 dev_err(dev, "RX dma add buf failed %d\n", ret);
323 goto err_free_tx;
324 }
325 }
326
327 ret = dma_enable(&common->dma_tx);
328 if (ret) {
329 dev_err(dev, "TX dma_enable failed %d\n", ret);
330 goto err_free_rx;
331 }
332 ret = dma_enable(&common->dma_rx);
333 if (ret) {
334 dev_err(dev, "RX dma_enable failed %d\n", ret);
335 goto err_dis_tx;
336 }
337
338 /* Control register */
339 writel(AM65_CPSW_CTL_REG_P0_ENABLE |
340 AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE |
341 AM65_CPSW_CTL_REG_P0_RX_PAD,
342 common->cpsw_base + AM65_CPSW_CTL_REG);
343
344 /* disable priority elevation */
345 writel(0, common->cpsw_base + AM65_CPSW_PTYPE_REG);
346
347 /* enable statistics */
348 writel(BIT(0) | BIT(priv->port_id),
349 common->cpsw_base + AM65_CPSW_STAT_PORT_EN_REG);
350
351 /* Port 0 length register */
352 writel(PKTSIZE_ALIGN, port0->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
353
354 /* set base flow_id */
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530355 dma_get_cfg(&common->dma_rx, 0, (void **)&dma_rx_cfg_data);
356 writel(dma_rx_cfg_data->flow_id_base,
Keerthya00b95c2019-07-09 10:30:34 +0530357 port0->port_base + AM65_CPSW_P0_FLOW_ID_REG);
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530358 dev_info(dev, "K3 CPSW: rflow_id_base: %u\n",
359 dma_rx_cfg_data->flow_id_base);
Keerthya00b95c2019-07-09 10:30:34 +0530360
361 /* Reset and enable the ALE */
362 writel(AM65_CPSW_ALE_CTL_REG_ENABLE | AM65_CPSW_ALE_CTL_REG_RESET_TBL |
363 AM65_CPSW_ALE_CTL_REG_BYPASS,
364 common->ale_base + AM65_CPSW_ALE_CTL_REG);
365
366 /* port 0 put into forward mode */
367 writel(AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
368 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
369
Vignesh Raghavendra5cb8a0f2020-07-06 13:36:53 +0530370 writel(AM65_CPSW_ALE_DEFTHREAD_EN,
371 common->ale_base + AM65_CPSW_ALE_THREADMAPDEF_REG);
372
Keerthya00b95c2019-07-09 10:30:34 +0530373 /* PORT x configuration */
374
375 /* Port x Max length register */
376 writel(PKTSIZE_ALIGN, port->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
377
378 /* Port x set mac */
379 am65_cpsw_set_sl_mac(port, pdata->enetaddr);
380
381 /* Port x ALE: mac_only, Forwarding */
382 writel(AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY |
383 AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
384 common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
385
386 port->mac_control = 0;
387 if (!am65_cpsw_macsl_reset(port)) {
388 dev_err(dev, "mac_sl reset failed\n");
389 ret = -EFAULT;
390 goto err_dis_rx;
391 }
392
393 ret = phy_startup(priv->phydev);
394 if (ret) {
395 dev_err(dev, "phy_startup failed\n");
396 goto err_dis_rx;
397 }
398
399 ret = am65_cpsw_update_link(priv);
400 if (!ret) {
401 ret = -ENODEV;
402 goto err_phy_shutdown;
403 }
404
405 common->started = true;
406
407 return 0;
408
409err_phy_shutdown:
410 phy_shutdown(priv->phydev);
411err_dis_rx:
412 /* disable ports */
413 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
414 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
415 if (!am65_cpsw_macsl_wait_for_idle(port))
416 dev_err(dev, "mac_sl idle timeout\n");
417 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
418 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
419 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
420
421 dma_disable(&common->dma_rx);
422err_dis_tx:
423 dma_disable(&common->dma_tx);
424err_free_rx:
425 dma_free(&common->dma_rx);
426err_free_tx:
427 dma_free(&common->dma_tx);
428err_off_clk:
429 clk_disable(&common->fclk);
430err_off_pwrdm:
431 power_domain_off(&common->pwrdmn);
432out:
433 dev_err(dev, "%s end error\n", __func__);
434
435 return ret;
436}
437
438static int am65_cpsw_send(struct udevice *dev, void *packet, int length)
439{
440 struct am65_cpsw_priv *priv = dev_get_priv(dev);
441 struct am65_cpsw_common *common = priv->cpsw_common;
442 struct ti_udma_drv_packet_data packet_data;
443 int ret;
444
445 packet_data.pkt_type = AM65_CPSW_CPPI_PKT_TYPE;
446 packet_data.dest_tag = priv->port_id;
447 ret = dma_send(&common->dma_tx, packet, length, &packet_data);
448 if (ret) {
449 dev_err(dev, "TX dma_send failed %d\n", ret);
450 return ret;
451 }
452
453 return 0;
454}
455
456static int am65_cpsw_recv(struct udevice *dev, int flags, uchar **packetp)
457{
458 struct am65_cpsw_priv *priv = dev_get_priv(dev);
459 struct am65_cpsw_common *common = priv->cpsw_common;
460
461 /* try to receive a new packet */
462 return dma_receive(&common->dma_rx, (void **)packetp, NULL);
463}
464
465static int am65_cpsw_free_pkt(struct udevice *dev, uchar *packet, int length)
466{
467 struct am65_cpsw_priv *priv = dev_get_priv(dev);
468 struct am65_cpsw_common *common = priv->cpsw_common;
469 int ret;
470
471 if (length > 0) {
472 u32 pkt = common->rx_next % UDMA_RX_DESC_NUM;
473
474 ret = dma_prepare_rcv_buf(&common->dma_rx,
475 net_rx_packets[pkt],
476 UDMA_RX_BUF_SIZE);
477 if (ret)
478 dev_err(dev, "RX dma free_pkt failed %d\n", ret);
479 common->rx_next++;
480 }
481
482 return 0;
483}
484
485static void am65_cpsw_stop(struct udevice *dev)
486{
487 struct am65_cpsw_priv *priv = dev_get_priv(dev);
488 struct am65_cpsw_common *common = priv->cpsw_common;
489 struct am65_cpsw_port *port = &common->ports[priv->port_id];
490
491 if (!common->started)
492 return;
493
494 phy_shutdown(priv->phydev);
495
496 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
497 writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
498 if (!am65_cpsw_macsl_wait_for_idle(port))
499 dev_err(dev, "mac_sl idle timeout\n");
500 writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
501 writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
502 writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
503
504 dma_disable(&common->dma_tx);
505 dma_free(&common->dma_tx);
506
507 dma_disable(&common->dma_rx);
508 dma_free(&common->dma_rx);
509
510 common->started = false;
511}
512
513static int am65_cpsw_read_rom_hwaddr(struct udevice *dev)
514{
515 struct am65_cpsw_priv *priv = dev_get_priv(dev);
516 struct am65_cpsw_common *common = priv->cpsw_common;
Simon Glassfa20e932020-12-03 16:55:20 -0700517 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530518 u32 mac_hi, mac_lo;
519
520 if (common->mac_efuse == FDT_ADDR_T_NONE)
521 return -1;
522
523 mac_lo = readl(common->mac_efuse);
524 mac_hi = readl(common->mac_efuse + 4);
525 pdata->enetaddr[0] = (mac_hi >> 8) & 0xff;
526 pdata->enetaddr[1] = mac_hi & 0xff;
527 pdata->enetaddr[2] = (mac_lo >> 24) & 0xff;
528 pdata->enetaddr[3] = (mac_lo >> 16) & 0xff;
529 pdata->enetaddr[4] = (mac_lo >> 8) & 0xff;
530 pdata->enetaddr[5] = mac_lo & 0xff;
531
532 return 0;
533}
534
535static const struct eth_ops am65_cpsw_ops = {
536 .start = am65_cpsw_start,
537 .send = am65_cpsw_send,
538 .recv = am65_cpsw_recv,
539 .free_pkt = am65_cpsw_free_pkt,
540 .stop = am65_cpsw_stop,
541 .read_rom_hwaddr = am65_cpsw_read_rom_hwaddr,
542};
543
544static int am65_cpsw_mdio_init(struct udevice *dev)
545{
546 struct am65_cpsw_priv *priv = dev_get_priv(dev);
547 struct am65_cpsw_common *cpsw_common = priv->cpsw_common;
548
549 if (!priv->has_phy || cpsw_common->bus)
550 return 0;
551
552 cpsw_common->bus = cpsw_mdio_init(dev->name,
553 cpsw_common->mdio_base,
554 cpsw_common->bus_freq,
555 clk_get_rate(&cpsw_common->fclk));
556 if (!cpsw_common->bus)
557 return -EFAULT;
558
559 return 0;
560}
561
562static int am65_cpsw_phy_init(struct udevice *dev)
563{
564 struct am65_cpsw_priv *priv = dev_get_priv(dev);
565 struct am65_cpsw_common *cpsw_common = priv->cpsw_common;
Simon Glassfa20e932020-12-03 16:55:20 -0700566 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530567 struct phy_device *phydev;
568 u32 supported = PHY_GBIT_FEATURES;
569 int ret;
570
571 phydev = phy_connect(cpsw_common->bus,
572 priv->phy_addr,
573 priv->dev,
574 pdata->phy_interface);
575
576 if (!phydev) {
577 dev_err(dev, "phy_connect() failed\n");
578 return -ENODEV;
579 }
580
581 phydev->supported &= supported;
582 if (pdata->max_speed) {
583 ret = phy_set_supported(phydev, pdata->max_speed);
584 if (ret)
585 return ret;
586 }
587 phydev->advertising = phydev->supported;
588
589 if (ofnode_valid(priv->phy_node))
590 phydev->node = priv->phy_node;
591
592 priv->phydev = phydev;
593 ret = phy_config(phydev);
594 if (ret < 0)
595 pr_err("phy_config() failed: %d", ret);
596
597 return ret;
598}
599
600static int am65_cpsw_ofdata_parse_phy(struct udevice *dev, ofnode port_np)
601{
Simon Glassfa20e932020-12-03 16:55:20 -0700602 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530603 struct am65_cpsw_priv *priv = dev_get_priv(dev);
604 struct ofnode_phandle_args out_args;
605 const char *phy_mode;
606 int ret = 0;
607
608 phy_mode = ofnode_read_string(port_np, "phy-mode");
609 if (phy_mode) {
610 pdata->phy_interface =
611 phy_get_interface_by_name(phy_mode);
612 if (pdata->phy_interface == -1) {
613 dev_err(dev, "Invalid PHY mode '%s', port %u\n",
614 phy_mode, priv->port_id);
615 ret = -EINVAL;
616 goto out;
617 }
618 }
619
620 ofnode_read_u32(port_np, "max-speed", (u32 *)&pdata->max_speed);
621 if (pdata->max_speed)
622 dev_err(dev, "Port %u speed froced to %uMbit\n",
623 priv->port_id, pdata->max_speed);
624
625 priv->has_phy = true;
626 ret = ofnode_parse_phandle_with_args(port_np, "phy-handle",
627 NULL, 0, 0, &out_args);
628 if (ret) {
629 dev_err(dev, "can't parse phy-handle port %u (%d)\n",
630 priv->port_id, ret);
631 priv->has_phy = false;
632 ret = 0;
633 }
634
635 priv->phy_node = out_args.node;
636 if (priv->has_phy) {
637 ret = ofnode_read_u32(priv->phy_node, "reg", &priv->phy_addr);
638 if (ret) {
639 dev_err(dev, "failed to get phy_addr port %u (%d)\n",
640 priv->port_id, ret);
641 goto out;
642 }
643 }
644
645out:
646 return ret;
647}
648
649static int am65_cpsw_probe_cpsw(struct udevice *dev)
650{
651 struct am65_cpsw_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700652 struct eth_pdata *pdata = dev_get_plat(dev);
Keerthya00b95c2019-07-09 10:30:34 +0530653 struct am65_cpsw_common *cpsw_common;
654 ofnode ports_np, node;
655 int ret, i;
656
657 priv->dev = dev;
658
659 cpsw_common = calloc(1, sizeof(*priv->cpsw_common));
660 if (!cpsw_common)
661 return -ENOMEM;
662 priv->cpsw_common = cpsw_common;
663
664 cpsw_common->dev = dev;
665 cpsw_common->ss_base = dev_read_addr(dev);
666 if (cpsw_common->ss_base == FDT_ADDR_T_NONE)
667 return -EINVAL;
668 cpsw_common->mac_efuse = devfdt_get_addr_name(dev, "mac_efuse");
669 /* no err check - optional */
670
671 ret = power_domain_get_by_index(dev, &cpsw_common->pwrdmn, 0);
672 if (ret) {
673 dev_err(dev, "failed to get pwrdmn: %d\n", ret);
674 return ret;
675 }
676
677 ret = clk_get_by_name(dev, "fck", &cpsw_common->fclk);
678 if (ret) {
679 power_domain_free(&cpsw_common->pwrdmn);
680 dev_err(dev, "failed to get clock %d\n", ret);
681 return ret;
682 }
683
684 cpsw_common->cpsw_base = cpsw_common->ss_base + AM65_CPSW_CPSW_NU_BASE;
685 cpsw_common->ale_base = cpsw_common->cpsw_base +
686 AM65_CPSW_CPSW_NU_ALE_BASE;
687 cpsw_common->mdio_base = cpsw_common->ss_base + AM65_CPSW_MDIO_BASE;
688
Vignesh Raghavendra2b834d02020-07-06 13:36:54 +0530689 ports_np = dev_read_subnode(dev, "ethernet-ports");
Keerthya00b95c2019-07-09 10:30:34 +0530690 if (!ofnode_valid(ports_np)) {
691 ret = -ENOENT;
692 goto out;
693 }
694
695 ofnode_for_each_subnode(node, ports_np) {
696 const char *node_name;
697 u32 port_id;
698 bool disabled;
699
700 node_name = ofnode_get_name(node);
701
702 disabled = !ofnode_is_available(node);
703
704 ret = ofnode_read_u32(node, "reg", &port_id);
705 if (ret) {
706 dev_err(dev, "%s: failed to get port_id (%d)\n",
707 node_name, ret);
708 goto out;
709 }
710
711 if (port_id >= AM65_CPSW_CPSWNU_MAX_PORTS) {
712 dev_err(dev, "%s: invalid port_id (%d)\n",
713 node_name, port_id);
714 ret = -EINVAL;
715 goto out;
716 }
717 cpsw_common->port_num++;
718
719 if (!port_id)
720 continue;
721
722 priv->port_id = port_id;
723 cpsw_common->ports[port_id].disabled = disabled;
724 if (disabled)
725 continue;
726
727 ret = am65_cpsw_ofdata_parse_phy(dev, node);
728 if (ret)
729 goto out;
730 }
731
732 for (i = 0; i < AM65_CPSW_CPSWNU_MAX_PORTS; i++) {
733 struct am65_cpsw_port *port = &cpsw_common->ports[i];
734
735 port->port_base = cpsw_common->cpsw_base +
736 AM65_CPSW_CPSW_NU_PORTS_OFFSET +
737 (i * AM65_CPSW_CPSW_NU_PORTS_OFFSET);
738 port->macsl_base = port->port_base +
739 AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET;
740 }
741
742 node = dev_read_subnode(dev, "cpsw-phy-sel");
743 if (!ofnode_valid(node)) {
744 dev_err(dev, "can't find cpsw-phy-sel\n");
745 ret = -ENOENT;
746 goto out;
747 }
748
749 cpsw_common->gmii_sel = ofnode_get_addr(node);
750 if (cpsw_common->gmii_sel == FDT_ADDR_T_NONE) {
751 dev_err(dev, "failed to get gmii_sel base\n");
752 goto out;
753 }
754
Keerthya00b95c2019-07-09 10:30:34 +0530755 cpsw_common->bus_freq =
756 dev_read_u32_default(dev, "bus_freq",
757 AM65_CPSW_MDIO_BUS_FREQ_DEF);
758
759 am65_cpsw_gmii_sel_k3(priv, pdata->phy_interface, priv->port_id);
760
761 ret = am65_cpsw_mdio_init(dev);
762 if (ret)
763 goto out;
764
765 ret = am65_cpsw_phy_init(dev);
766 if (ret)
767 goto out;
768
Vignesh Raghavendra462ff042019-12-04 22:17:22 +0530769 dev_info(dev, "K3 CPSW: nuss_ver: 0x%08X cpsw_ver: 0x%08X ale_ver: 0x%08X Ports:%u mdio_freq:%u\n",
Keerthya00b95c2019-07-09 10:30:34 +0530770 readl(cpsw_common->ss_base),
771 readl(cpsw_common->cpsw_base),
772 readl(cpsw_common->ale_base),
773 cpsw_common->port_num,
Keerthya00b95c2019-07-09 10:30:34 +0530774 cpsw_common->bus_freq);
775
776out:
777 clk_free(&cpsw_common->fclk);
778 power_domain_free(&cpsw_common->pwrdmn);
779 return ret;
780}
781
782static const struct udevice_id am65_cpsw_nuss_ids[] = {
783 { .compatible = "ti,am654-cpsw-nuss" },
Vignesh Raghavendra30bc6ea2019-12-04 22:17:23 +0530784 { .compatible = "ti,j721e-cpsw-nuss" },
Keerthya00b95c2019-07-09 10:30:34 +0530785 { }
786};
787
788U_BOOT_DRIVER(am65_cpsw_nuss_slave) = {
789 .name = "am65_cpsw_nuss_slave",
790 .id = UCLASS_ETH,
791 .of_match = am65_cpsw_nuss_ids,
792 .probe = am65_cpsw_probe_cpsw,
793 .ops = &am65_cpsw_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700794 .priv_auto = sizeof(struct am65_cpsw_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700795 .plat_auto = sizeof(struct eth_pdata),
Keerthya00b95c2019-07-09 10:30:34 +0530796 .flags = DM_FLAG_ALLOC_PRIV_DMA,
797};