blob: 75e7bcf83b768ac0d4f14927b4cfd83157296ae0 [file] [log] [blame]
developerc3ac93d2018-12-20 16:12:53 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
7 */
8
9#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070010#include <cpu_func.h>
developerc3ac93d2018-12-20 16:12:53 +080011#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
developerc3ac93d2018-12-20 16:12:53 +080013#include <malloc.h>
14#include <miiphy.h>
Simon Glass274e0b02020-05-10 11:39:56 -060015#include <net.h>
developerc3ac93d2018-12-20 16:12:53 +080016#include <regmap.h>
17#include <reset.h>
18#include <syscon.h>
19#include <wait_bit.h>
Simon Glass274e0b02020-05-10 11:39:56 -060020#include <asm/cache.h>
developerc3ac93d2018-12-20 16:12:53 +080021#include <asm/gpio.h>
22#include <asm/io.h>
Simon Glass9bc15642020-02-03 07:36:16 -070023#include <dm/device_compat.h>
Simon Glassdbd79542020-05-10 11:40:11 -060024#include <linux/delay.h>
developerc3ac93d2018-12-20 16:12:53 +080025#include <linux/err.h>
26#include <linux/ioport.h>
27#include <linux/mdio.h>
28#include <linux/mii.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060029#include <linux/printk.h>
developerc3ac93d2018-12-20 16:12:53 +080030
31#include "mtk_eth.h"
32
33#define NUM_TX_DESC 24
34#define NUM_RX_DESC 24
35#define TX_TOTAL_BUF_SIZE (NUM_TX_DESC * PKTSIZE_ALIGN)
36#define RX_TOTAL_BUF_SIZE (NUM_RX_DESC * PKTSIZE_ALIGN)
37#define TOTAL_PKT_BUF_SIZE (TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
38
developerd5d73952020-02-18 16:49:37 +080039#define MT753X_NUM_PHYS 5
40#define MT753X_NUM_PORTS 7
41#define MT753X_DFL_SMI_ADDR 31
42#define MT753X_SMI_ADDR_MASK 0x1f
developerc3ac93d2018-12-20 16:12:53 +080043
developerd5d73952020-02-18 16:49:37 +080044#define MT753X_PHY_ADDR(base, addr) \
developerc3ac93d2018-12-20 16:12:53 +080045 (((base) + (addr)) & 0x1f)
46
47#define GDMA_FWD_TO_CPU \
48 (0x20000000 | \
49 GDM_ICS_EN | \
50 GDM_TCS_EN | \
51 GDM_UCS_EN | \
52 STRP_CRC | \
53 (DP_PDMA << MYMAC_DP_S) | \
54 (DP_PDMA << BC_DP_S) | \
55 (DP_PDMA << MC_DP_S) | \
56 (DP_PDMA << UN_DP_S))
57
developer76e14722023-07-19 17:17:41 +080058#define GDMA_BRIDGE_TO_CPU \
59 (0xC0000000 | \
60 GDM_ICS_EN | \
61 GDM_TCS_EN | \
62 GDM_UCS_EN | \
63 (DP_PDMA << MYMAC_DP_S) | \
64 (DP_PDMA << BC_DP_S) | \
65 (DP_PDMA << MC_DP_S) | \
66 (DP_PDMA << UN_DP_S))
67
developerc3ac93d2018-12-20 16:12:53 +080068#define GDMA_FWD_DISCARD \
69 (0x20000000 | \
70 GDM_ICS_EN | \
71 GDM_TCS_EN | \
72 GDM_UCS_EN | \
73 STRP_CRC | \
74 (DP_DISCARD << MYMAC_DP_S) | \
75 (DP_DISCARD << BC_DP_S) | \
76 (DP_DISCARD << MC_DP_S) | \
77 (DP_DISCARD << UN_DP_S))
78
developerc3ac93d2018-12-20 16:12:53 +080079enum mtk_switch {
80 SW_NONE,
developerd5d73952020-02-18 16:49:37 +080081 SW_MT7530,
developer76e14722023-07-19 17:17:41 +080082 SW_MT7531,
83 SW_MT7988,
developerc3ac93d2018-12-20 16:12:53 +080084};
85
developer1d3b1f62022-09-09 19:59:21 +080086/* struct mtk_soc_data - This is the structure holding all differences
87 * among various plaforms
88 * @caps Flags shown the extra capability for the SoC
89 * @ana_rgc3: The offset for register ANA_RGC3 related to
90 * sgmiisys syscon
developer78fed682023-07-19 17:17:37 +080091 * @gdma_count: Number of GDMAs
developera7cdebf2022-09-09 19:59:26 +080092 * @pdma_base: Register base of PDMA block
93 * @txd_size: Tx DMA descriptor size.
94 * @rxd_size: Rx DMA descriptor size.
developer1d3b1f62022-09-09 19:59:21 +080095 */
96struct mtk_soc_data {
97 u32 caps;
98 u32 ana_rgc3;
developer78fed682023-07-19 17:17:37 +080099 u32 gdma_count;
developera7cdebf2022-09-09 19:59:26 +0800100 u32 pdma_base;
developer65089f72022-09-09 19:59:24 +0800101 u32 txd_size;
102 u32 rxd_size;
developerc3ac93d2018-12-20 16:12:53 +0800103};
104
105struct mtk_eth_priv {
106 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
107
developer65089f72022-09-09 19:59:24 +0800108 void *tx_ring_noc;
109 void *rx_ring_noc;
developerc3ac93d2018-12-20 16:12:53 +0800110
111 int rx_dma_owner_idx0;
112 int tx_cpu_owner_idx0;
113
114 void __iomem *fe_base;
115 void __iomem *gmac_base;
developer9a12c242020-01-21 19:31:57 +0800116 void __iomem *sgmii_base;
developer76e14722023-07-19 17:17:41 +0800117 void __iomem *gsw_base;
developerc3ac93d2018-12-20 16:12:53 +0800118
developera182b7e2022-05-20 11:23:37 +0800119 struct regmap *ethsys_regmap;
120
developera5d712a2023-07-19 17:17:22 +0800121 struct regmap *infra_regmap;
122
developer03ce27b2023-07-19 17:17:31 +0800123 struct regmap *usxgmii_regmap;
124 struct regmap *xfi_pextp_regmap;
125 struct regmap *xfi_pll_regmap;
126 struct regmap *toprgu_regmap;
127
developerc3ac93d2018-12-20 16:12:53 +0800128 struct mii_dev *mdio_bus;
129 int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
130 int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
131 int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
132 int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
133 u16 val);
134
developer1d3b1f62022-09-09 19:59:21 +0800135 const struct mtk_soc_data *soc;
developerc3ac93d2018-12-20 16:12:53 +0800136 int gmac_id;
137 int force_mode;
138 int speed;
139 int duplex;
developer4843ad32024-01-22 10:08:11 +0800140 int mdc;
developer053929c2022-09-09 19:59:28 +0800141 bool pn_swap;
developerc3ac93d2018-12-20 16:12:53 +0800142
143 struct phy_device *phydev;
144 int phy_interface;
145 int phy_addr;
146
147 enum mtk_switch sw;
148 int (*switch_init)(struct mtk_eth_priv *priv);
developer08849652023-07-19 17:16:54 +0800149 void (*switch_mac_control)(struct mtk_eth_priv *priv, bool enable);
developerd5d73952020-02-18 16:49:37 +0800150 u32 mt753x_smi_addr;
151 u32 mt753x_phy_base;
developer08849652023-07-19 17:16:54 +0800152 u32 mt753x_pmcr;
developer3a46a672023-07-19 17:16:59 +0800153 u32 mt753x_reset_wait_time;
developerc3ac93d2018-12-20 16:12:53 +0800154
155 struct gpio_desc rst_gpio;
156 int mcm;
157
158 struct reset_ctl rst_fe;
159 struct reset_ctl rst_mcm;
160};
161
162static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
163{
developera7cdebf2022-09-09 19:59:26 +0800164 writel(val, priv->fe_base + priv->soc->pdma_base + reg);
developerc3ac93d2018-12-20 16:12:53 +0800165}
166
167static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
168 u32 set)
169{
developera7cdebf2022-09-09 19:59:26 +0800170 clrsetbits_le32(priv->fe_base + priv->soc->pdma_base + reg, clr, set);
developerc3ac93d2018-12-20 16:12:53 +0800171}
172
173static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
174 u32 val)
175{
176 u32 gdma_base;
177
developer78fed682023-07-19 17:17:37 +0800178 if (no == 2)
179 gdma_base = GDMA3_BASE;
180 else if (no == 1)
developerc3ac93d2018-12-20 16:12:53 +0800181 gdma_base = GDMA2_BASE;
182 else
183 gdma_base = GDMA1_BASE;
184
185 writel(val, priv->fe_base + gdma_base + reg);
186}
187
developer76e14722023-07-19 17:17:41 +0800188static void mtk_fe_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
189{
190 clrsetbits_le32(priv->fe_base + reg, clr, set);
191}
192
developerc3ac93d2018-12-20 16:12:53 +0800193static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
194{
195 return readl(priv->gmac_base + reg);
196}
197
198static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
199{
200 writel(val, priv->gmac_base + reg);
201}
202
203static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
204{
205 clrsetbits_le32(priv->gmac_base + reg, clr, set);
206}
207
208static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
209 u32 set)
210{
developera182b7e2022-05-20 11:23:37 +0800211 uint val;
212
213 regmap_read(priv->ethsys_regmap, reg, &val);
214 val &= ~clr;
215 val |= set;
216 regmap_write(priv->ethsys_regmap, reg, val);
developerc3ac93d2018-12-20 16:12:53 +0800217}
218
developera5d712a2023-07-19 17:17:22 +0800219static void mtk_infra_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
220 u32 set)
221{
222 uint val;
223
224 regmap_read(priv->infra_regmap, reg, &val);
225 val &= ~clr;
226 val |= set;
227 regmap_write(priv->infra_regmap, reg, val);
228}
229
developer76e14722023-07-19 17:17:41 +0800230static u32 mtk_gsw_read(struct mtk_eth_priv *priv, u32 reg)
231{
232 return readl(priv->gsw_base + reg);
233}
234
235static void mtk_gsw_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
236{
237 writel(val, priv->gsw_base + reg);
238}
239
developerc3ac93d2018-12-20 16:12:53 +0800240/* Direct MDIO clause 22/45 access via SoC */
241static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
242 u32 cmd, u32 st)
243{
244 int ret;
245 u32 val;
246
247 val = (st << MDIO_ST_S) |
248 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
249 (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
250 (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
251
developer4781c6e2023-07-19 17:17:03 +0800252 if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
developerc3ac93d2018-12-20 16:12:53 +0800253 val |= data & MDIO_RW_DATA_M;
254
255 mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
256
257 ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
258 PHY_ACS_ST, 0, 5000, 0);
259 if (ret) {
260 pr_warn("MDIO access timeout\n");
261 return ret;
262 }
263
developer4781c6e2023-07-19 17:17:03 +0800264 if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
developerc3ac93d2018-12-20 16:12:53 +0800265 val = mtk_gmac_read(priv, GMAC_PIAC_REG);
266 return val & MDIO_RW_DATA_M;
267 }
268
269 return 0;
270}
271
272/* Direct MDIO clause 22 read via SoC */
273static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
274{
275 return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
276}
277
278/* Direct MDIO clause 22 write via SoC */
279static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
280{
281 return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
282}
283
284/* Direct MDIO clause 45 read via SoC */
285static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
286{
287 int ret;
288
289 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
290 if (ret)
291 return ret;
292
293 return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
294 MDIO_ST_C45);
295}
296
297/* Direct MDIO clause 45 write via SoC */
298static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
299 u16 reg, u16 val)
300{
301 int ret;
302
303 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
304 if (ret)
305 return ret;
306
307 return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
308 MDIO_ST_C45);
309}
310
311/* Indirect MDIO clause 45 read via MII registers */
312static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
313 u16 reg)
314{
315 int ret;
316
317 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
318 (MMD_ADDR << MMD_CMD_S) |
319 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
320 if (ret)
321 return ret;
322
323 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
324 if (ret)
325 return ret;
326
327 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
328 (MMD_DATA << MMD_CMD_S) |
329 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
330 if (ret)
331 return ret;
332
333 return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
334}
335
336/* Indirect MDIO clause 45 write via MII registers */
337static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
338 u16 reg, u16 val)
339{
340 int ret;
341
342 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
343 (MMD_ADDR << MMD_CMD_S) |
344 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
345 if (ret)
346 return ret;
347
348 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
349 if (ret)
350 return ret;
351
352 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
353 (MMD_DATA << MMD_CMD_S) |
354 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
355 if (ret)
356 return ret;
357
358 return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
359}
360
developerd5d73952020-02-18 16:49:37 +0800361/*
362 * MT7530 Internal Register Address Bits
363 * -------------------------------------------------------------------
364 * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
365 * |----------------------------------------|---------------|--------|
366 * | Page Address | Reg Address | Unused |
367 * -------------------------------------------------------------------
368 */
369
370static int mt753x_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
371{
372 int ret, low_word, high_word;
373
developer76e14722023-07-19 17:17:41 +0800374 if (priv->sw == SW_MT7988) {
375 *data = mtk_gsw_read(priv, reg);
376 return 0;
377 }
378
developerd5d73952020-02-18 16:49:37 +0800379 /* Write page address */
380 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
381 if (ret)
382 return ret;
383
384 /* Read low word */
385 low_word = mtk_mii_read(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf);
386 if (low_word < 0)
387 return low_word;
388
389 /* Read high word */
390 high_word = mtk_mii_read(priv, priv->mt753x_smi_addr, 0x10);
391 if (high_word < 0)
392 return high_word;
393
394 if (data)
395 *data = ((u32)high_word << 16) | (low_word & 0xffff);
396
397 return 0;
398}
399
400static int mt753x_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
401{
402 int ret;
403
developer76e14722023-07-19 17:17:41 +0800404 if (priv->sw == SW_MT7988) {
405 mtk_gsw_write(priv, reg, data);
406 return 0;
407 }
408
developerd5d73952020-02-18 16:49:37 +0800409 /* Write page address */
410 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
411 if (ret)
412 return ret;
413
414 /* Write low word */
415 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf,
416 data & 0xffff);
417 if (ret)
418 return ret;
419
420 /* Write high word */
421 return mtk_mii_write(priv, priv->mt753x_smi_addr, 0x10, data >> 16);
422}
423
424static void mt753x_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
425 u32 set)
426{
427 u32 val;
428
429 mt753x_reg_read(priv, reg, &val);
430 val &= ~clr;
431 val |= set;
432 mt753x_reg_write(priv, reg, val);
433}
434
435/* Indirect MDIO clause 22/45 access */
436static int mt7531_mii_rw(struct mtk_eth_priv *priv, int phy, int reg, u16 data,
437 u32 cmd, u32 st)
438{
439 ulong timeout;
440 u32 val, timeout_ms;
441 int ret = 0;
442
443 val = (st << MDIO_ST_S) |
444 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
445 ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
446 ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
447
448 if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
449 val |= data & MDIO_RW_DATA_M;
450
451 mt753x_reg_write(priv, MT7531_PHY_IAC, val | PHY_ACS_ST);
452
453 timeout_ms = 100;
454 timeout = get_timer(0);
455 while (1) {
456 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
457
458 if ((val & PHY_ACS_ST) == 0)
459 break;
460
461 if (get_timer(timeout) > timeout_ms)
462 return -ETIMEDOUT;
463 }
464
465 if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
466 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
467 ret = val & MDIO_RW_DATA_M;
468 }
469
470 return ret;
471}
472
473static int mt7531_mii_ind_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
474{
475 u8 phy_addr;
476
477 if (phy >= MT753X_NUM_PHYS)
478 return -EINVAL;
479
480 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
481
482 return mt7531_mii_rw(priv, phy_addr, reg, 0, MDIO_CMD_READ,
483 MDIO_ST_C22);
484}
485
486static int mt7531_mii_ind_write(struct mtk_eth_priv *priv, u8 phy, u8 reg,
487 u16 val)
488{
489 u8 phy_addr;
490
491 if (phy >= MT753X_NUM_PHYS)
492 return -EINVAL;
493
494 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
495
496 return mt7531_mii_rw(priv, phy_addr, reg, val, MDIO_CMD_WRITE,
497 MDIO_ST_C22);
498}
499
developerdd6243f2023-07-19 17:17:07 +0800500static int mt7531_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
501 u16 reg)
developerd5d73952020-02-18 16:49:37 +0800502{
503 u8 phy_addr;
504 int ret;
505
506 if (addr >= MT753X_NUM_PHYS)
507 return -EINVAL;
508
509 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
510
511 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
512 MDIO_ST_C45);
513 if (ret)
514 return ret;
515
516 return mt7531_mii_rw(priv, phy_addr, devad, 0, MDIO_CMD_READ_C45,
517 MDIO_ST_C45);
518}
519
520static int mt7531_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
521 u16 reg, u16 val)
522{
523 u8 phy_addr;
524 int ret;
525
526 if (addr >= MT753X_NUM_PHYS)
527 return 0;
528
529 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
530
531 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
532 MDIO_ST_C45);
533 if (ret)
534 return ret;
535
536 return mt7531_mii_rw(priv, phy_addr, devad, val, MDIO_CMD_WRITE,
537 MDIO_ST_C45);
538}
539
developerc3ac93d2018-12-20 16:12:53 +0800540static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
541{
542 struct mtk_eth_priv *priv = bus->priv;
543
544 if (devad < 0)
545 return priv->mii_read(priv, addr, reg);
546 else
547 return priv->mmd_read(priv, addr, devad, reg);
548}
549
550static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
551 u16 val)
552{
553 struct mtk_eth_priv *priv = bus->priv;
554
555 if (devad < 0)
556 return priv->mii_write(priv, addr, reg, val);
557 else
558 return priv->mmd_write(priv, addr, devad, reg, val);
559}
560
561static int mtk_mdio_register(struct udevice *dev)
562{
563 struct mtk_eth_priv *priv = dev_get_priv(dev);
564 struct mii_dev *mdio_bus = mdio_alloc();
565 int ret;
566
567 if (!mdio_bus)
568 return -ENOMEM;
569
570 /* Assign MDIO access APIs according to the switch/phy */
571 switch (priv->sw) {
572 case SW_MT7530:
573 priv->mii_read = mtk_mii_read;
574 priv->mii_write = mtk_mii_write;
575 priv->mmd_read = mtk_mmd_ind_read;
576 priv->mmd_write = mtk_mmd_ind_write;
577 break;
developerd5d73952020-02-18 16:49:37 +0800578 case SW_MT7531:
developer76e14722023-07-19 17:17:41 +0800579 case SW_MT7988:
developerd5d73952020-02-18 16:49:37 +0800580 priv->mii_read = mt7531_mii_ind_read;
581 priv->mii_write = mt7531_mii_ind_write;
582 priv->mmd_read = mt7531_mmd_ind_read;
583 priv->mmd_write = mt7531_mmd_ind_write;
584 break;
developerc3ac93d2018-12-20 16:12:53 +0800585 default:
586 priv->mii_read = mtk_mii_read;
587 priv->mii_write = mtk_mii_write;
588 priv->mmd_read = mtk_mmd_read;
589 priv->mmd_write = mtk_mmd_write;
590 }
591
592 mdio_bus->read = mtk_mdio_read;
593 mdio_bus->write = mtk_mdio_write;
594 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
595
596 mdio_bus->priv = (void *)priv;
597
598 ret = mdio_register(mdio_bus);
599
600 if (ret)
601 return ret;
602
603 priv->mdio_bus = mdio_bus;
604
605 return 0;
606}
607
developerd5d73952020-02-18 16:49:37 +0800608static int mt753x_core_reg_read(struct mtk_eth_priv *priv, u32 reg)
developerc3ac93d2018-12-20 16:12:53 +0800609{
developerd5d73952020-02-18 16:49:37 +0800610 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
developerc3ac93d2018-12-20 16:12:53 +0800611
developerd5d73952020-02-18 16:49:37 +0800612 return priv->mmd_read(priv, phy_addr, 0x1f, reg);
developerc3ac93d2018-12-20 16:12:53 +0800613}
614
developerd5d73952020-02-18 16:49:37 +0800615static void mt753x_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
developerc3ac93d2018-12-20 16:12:53 +0800616{
developerd5d73952020-02-18 16:49:37 +0800617 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
developerc3ac93d2018-12-20 16:12:53 +0800618
developerd5d73952020-02-18 16:49:37 +0800619 priv->mmd_write(priv, phy_addr, 0x1f, reg, val);
developerc3ac93d2018-12-20 16:12:53 +0800620}
621
622static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
623{
624 u32 ncpo1, ssc_delta;
625
626 switch (mode) {
627 case PHY_INTERFACE_MODE_RGMII:
628 ncpo1 = 0x0c80;
629 ssc_delta = 0x87;
630 break;
631 default:
632 printf("error: xMII mode %d not supported\n", mode);
633 return -EINVAL;
634 }
635
636 /* Disable MT7530 core clock */
developerd5d73952020-02-18 16:49:37 +0800637 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
developerc3ac93d2018-12-20 16:12:53 +0800638
639 /* Disable MT7530 PLL */
developerd5d73952020-02-18 16:49:37 +0800640 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
developerc3ac93d2018-12-20 16:12:53 +0800641 (2 << RG_GSWPLL_POSDIV_200M_S) |
642 (32 << RG_GSWPLL_FBKDIV_200M_S));
643
644 /* For MT7530 core clock = 500Mhz */
developerd5d73952020-02-18 16:49:37 +0800645 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP2,
developerc3ac93d2018-12-20 16:12:53 +0800646 (1 << RG_GSWPLL_POSDIV_500M_S) |
647 (25 << RG_GSWPLL_FBKDIV_500M_S));
648
649 /* Enable MT7530 PLL */
developerd5d73952020-02-18 16:49:37 +0800650 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
developerc3ac93d2018-12-20 16:12:53 +0800651 (2 << RG_GSWPLL_POSDIV_200M_S) |
652 (32 << RG_GSWPLL_FBKDIV_200M_S) |
653 RG_GSWPLL_EN_PRE);
654
655 udelay(20);
656
developerd5d73952020-02-18 16:49:37 +0800657 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
developerc3ac93d2018-12-20 16:12:53 +0800658
659 /* Setup the MT7530 TRGMII Tx Clock */
developerd5d73952020-02-18 16:49:37 +0800660 mt753x_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
661 mt753x_core_reg_write(priv, CORE_PLL_GROUP6, 0);
662 mt753x_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
663 mt753x_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
664 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
developerc3ac93d2018-12-20 16:12:53 +0800665 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
666
developerd5d73952020-02-18 16:49:37 +0800667 mt753x_core_reg_write(priv, CORE_PLL_GROUP2,
developerc3ac93d2018-12-20 16:12:53 +0800668 RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
669 (1 << RG_SYSPLL_POSDIV_S));
670
developerd5d73952020-02-18 16:49:37 +0800671 mt753x_core_reg_write(priv, CORE_PLL_GROUP7,
developerc3ac93d2018-12-20 16:12:53 +0800672 RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
673 RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
674
675 /* Enable MT7530 core clock */
developerd5d73952020-02-18 16:49:37 +0800676 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
developerc3ac93d2018-12-20 16:12:53 +0800677 REG_GSWCK_EN | REG_TRGMIICK_EN);
678
679 return 0;
680}
681
developer08849652023-07-19 17:16:54 +0800682static void mt7530_mac_control(struct mtk_eth_priv *priv, bool enable)
683{
684 u32 pmcr = FORCE_MODE;
685
686 if (enable)
687 pmcr = priv->mt753x_pmcr;
688
689 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
690}
691
developerc3ac93d2018-12-20 16:12:53 +0800692static int mt7530_setup(struct mtk_eth_priv *priv)
693{
694 u16 phy_addr, phy_val;
developer2f866c42022-05-20 11:23:42 +0800695 u32 val, txdrv;
developerc3ac93d2018-12-20 16:12:53 +0800696 int i;
697
developer1d3b1f62022-09-09 19:59:21 +0800698 if (!MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
developer2f866c42022-05-20 11:23:42 +0800699 /* Select 250MHz clk for RGMII mode */
700 mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
701 ETHSYS_TRGMII_CLK_SEL362_5, 0);
702
703 txdrv = 8;
704 } else {
705 txdrv = 4;
706 }
developerc3ac93d2018-12-20 16:12:53 +0800707
developerc3ac93d2018-12-20 16:12:53 +0800708 /* Modify HWTRAP first to allow direct access to internal PHYs */
developerd5d73952020-02-18 16:49:37 +0800709 mt753x_reg_read(priv, HWTRAP_REG, &val);
developerc3ac93d2018-12-20 16:12:53 +0800710 val |= CHG_TRAP;
711 val &= ~C_MDIO_BPS;
developerd5d73952020-02-18 16:49:37 +0800712 mt753x_reg_write(priv, MHWTRAP_REG, val);
developerc3ac93d2018-12-20 16:12:53 +0800713
714 /* Calculate the phy base address */
715 val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
developerd5d73952020-02-18 16:49:37 +0800716 priv->mt753x_phy_base = (val | 0x7) + 1;
developerc3ac93d2018-12-20 16:12:53 +0800717
718 /* Turn off PHYs */
developerd5d73952020-02-18 16:49:37 +0800719 for (i = 0; i < MT753X_NUM_PHYS; i++) {
720 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
developerc3ac93d2018-12-20 16:12:53 +0800721 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
722 phy_val |= BMCR_PDOWN;
723 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
724 }
725
726 /* Force MAC link down before reset */
developerd5d73952020-02-18 16:49:37 +0800727 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
728 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
developerc3ac93d2018-12-20 16:12:53 +0800729
730 /* MT7530 reset */
developerd5d73952020-02-18 16:49:37 +0800731 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
developerc3ac93d2018-12-20 16:12:53 +0800732 udelay(100);
733
developerd5d73952020-02-18 16:49:37 +0800734 val = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
developerc3ac93d2018-12-20 16:12:53 +0800735 MAC_MODE | FORCE_MODE |
736 MAC_TX_EN | MAC_RX_EN |
737 BKOFF_EN | BACKPR_EN |
738 (SPEED_1000M << FORCE_SPD_S) |
739 FORCE_DPX | FORCE_LINK;
740
741 /* MT7530 Port6: Forced 1000M/FD, FC disabled */
developer08849652023-07-19 17:16:54 +0800742 priv->mt753x_pmcr = val;
developerc3ac93d2018-12-20 16:12:53 +0800743
744 /* MT7530 Port5: Forced link down */
developerd5d73952020-02-18 16:49:37 +0800745 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
developerc3ac93d2018-12-20 16:12:53 +0800746
developer08849652023-07-19 17:16:54 +0800747 /* Keep MAC link down before starting eth */
748 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
749
developerc3ac93d2018-12-20 16:12:53 +0800750 /* MT7530 Port6: Set to RGMII */
developerd5d73952020-02-18 16:49:37 +0800751 mt753x_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
developerc3ac93d2018-12-20 16:12:53 +0800752
753 /* Hardware Trap: Enable Port6, Disable Port5 */
developerd5d73952020-02-18 16:49:37 +0800754 mt753x_reg_read(priv, HWTRAP_REG, &val);
developerc3ac93d2018-12-20 16:12:53 +0800755 val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
756 (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
757 (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
758 val &= ~(C_MDIO_BPS | P6_INTF_DIS);
developerd5d73952020-02-18 16:49:37 +0800759 mt753x_reg_write(priv, MHWTRAP_REG, val);
developerc3ac93d2018-12-20 16:12:53 +0800760
761 /* Setup switch core pll */
762 mt7530_pad_clk_setup(priv, priv->phy_interface);
763
764 /* Lower Tx Driving for TRGMII path */
765 for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
developerd5d73952020-02-18 16:49:37 +0800766 mt753x_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
developer2f866c42022-05-20 11:23:42 +0800767 (txdrv << TD_DM_DRVP_S) |
768 (txdrv << TD_DM_DRVN_S));
developerc3ac93d2018-12-20 16:12:53 +0800769
770 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
developerd5d73952020-02-18 16:49:37 +0800771 mt753x_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
developerc3ac93d2018-12-20 16:12:53 +0800772
773 /* Turn on PHYs */
developerd5d73952020-02-18 16:49:37 +0800774 for (i = 0; i < MT753X_NUM_PHYS; i++) {
775 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
developerc3ac93d2018-12-20 16:12:53 +0800776 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
777 phy_val &= ~BMCR_PDOWN;
778 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
779 }
780
developerd5d73952020-02-18 16:49:37 +0800781 return 0;
782}
783
784static void mt7531_core_pll_setup(struct mtk_eth_priv *priv, int mcm)
785{
786 /* Step 1 : Disable MT7531 COREPLL */
787 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, 0);
788
789 /* Step 2: switch to XTAL output */
790 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_CLKSW, SW_CLKSW);
791
792 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, 0);
793
794 /* Step 3: disable PLLGP and enable program PLLGP */
795 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_PLLGP, SW_PLLGP);
796
797 /* Step 4: program COREPLL output frequency to 500MHz */
798 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_POSDIV_M,
799 2 << RG_COREPLL_POSDIV_S);
800 udelay(25);
801
802 /* Currently, support XTAL 25Mhz only */
803 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_M,
804 0x140000 << RG_COREPLL_SDM_PCW_S);
805
806 /* Set feedback divide ratio update signal to high */
807 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG,
808 RG_COREPLL_SDM_PCW_CHG);
809
810 /* Wait for at least 16 XTAL clocks */
811 udelay(10);
812
813 /* Step 5: set feedback divide ratio update signal to low */
814 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG, 0);
815
816 /* add enable 325M clock for SGMII */
817 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
818
819 /* add enable 250SSC clock for RGMII */
820 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
821
822 /*Step 6: Enable MT7531 PLL */
823 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, RG_COREPLL_EN);
824
825 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, EN_COREPLL);
826
827 udelay(25);
828}
829
830static int mt7531_port_sgmii_init(struct mtk_eth_priv *priv,
831 u32 port)
832{
833 if (port != 5 && port != 6) {
834 printf("mt7531: port %d is not a SGMII port\n", port);
835 return -EINVAL;
836 }
837
838 /* Set SGMII GEN2 speed(2.5G) */
839 mt753x_reg_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
840 SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
841
842 /* Disable SGMII AN */
843 mt753x_reg_rmw(priv, MT7531_PCS_CONTROL_1(port),
844 SGMII_AN_ENABLE, 0);
845
846 /* SGMII force mode setting */
847 mt753x_reg_write(priv, MT7531_SGMII_MODE(port), SGMII_FORCE_MODE);
848
849 /* Release PHYA power down state */
850 mt753x_reg_rmw(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
851 SGMII_PHYA_PWD, 0);
852
853 return 0;
854}
855
856static int mt7531_port_rgmii_init(struct mtk_eth_priv *priv, u32 port)
857{
858 u32 val;
859
860 if (port != 5) {
861 printf("error: RGMII mode is not available for port %d\n",
862 port);
863 return -EINVAL;
864 }
865
866 mt753x_reg_read(priv, MT7531_CLKGEN_CTRL, &val);
867 val |= GP_CLK_EN;
868 val &= ~GP_MODE_M;
869 val |= GP_MODE_RGMII << GP_MODE_S;
870 val |= TXCLK_NO_REVERSE;
871 val |= RXCLK_NO_DELAY;
872 val &= ~CLK_SKEW_IN_M;
873 val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
874 val &= ~CLK_SKEW_OUT_M;
875 val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
876 mt753x_reg_write(priv, MT7531_CLKGEN_CTRL, val);
877
878 return 0;
879}
880
881static void mt7531_phy_setting(struct mtk_eth_priv *priv)
882{
883 int i;
884 u32 val;
885
886 for (i = 0; i < MT753X_NUM_PHYS; i++) {
887 /* Enable HW auto downshift */
888 priv->mii_write(priv, i, 0x1f, 0x1);
889 val = priv->mii_read(priv, i, PHY_EXT_REG_14);
890 val |= PHY_EN_DOWN_SHFIT;
891 priv->mii_write(priv, i, PHY_EXT_REG_14, val);
892
893 /* PHY link down power saving enable */
894 val = priv->mii_read(priv, i, PHY_EXT_REG_17);
895 val |= PHY_LINKDOWN_POWER_SAVING_EN;
896 priv->mii_write(priv, i, PHY_EXT_REG_17, val);
897
898 val = priv->mmd_read(priv, i, 0x1e, PHY_DEV1E_REG_0C6);
899 val &= ~PHY_POWER_SAVING_M;
900 val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
901 priv->mmd_write(priv, i, 0x1e, PHY_DEV1E_REG_0C6, val);
902 }
903}
904
developer08849652023-07-19 17:16:54 +0800905static void mt7531_mac_control(struct mtk_eth_priv *priv, bool enable)
906{
907 u32 pmcr = FORCE_MODE_LNK;
908
909 if (enable)
910 pmcr = priv->mt753x_pmcr;
911
912 mt753x_reg_write(priv, PMCR_REG(5), pmcr);
913 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
914}
915
developerd5d73952020-02-18 16:49:37 +0800916static int mt7531_setup(struct mtk_eth_priv *priv)
917{
918 u16 phy_addr, phy_val;
919 u32 val;
920 u32 pmcr;
921 u32 port5_sgmii;
922 int i;
923
924 priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
925 MT753X_SMI_ADDR_MASK;
926
927 /* Turn off PHYs */
928 for (i = 0; i < MT753X_NUM_PHYS; i++) {
929 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
930 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
931 phy_val |= BMCR_PDOWN;
932 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
933 }
934
935 /* Force MAC link down before reset */
936 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
937 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
938
939 /* Switch soft reset */
940 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
941 udelay(100);
942
943 /* Enable MDC input Schmitt Trigger */
944 mt753x_reg_rmw(priv, MT7531_SMT0_IOLB, SMT_IOLB_5_SMI_MDC_EN,
945 SMT_IOLB_5_SMI_MDC_EN);
946
947 mt7531_core_pll_setup(priv, priv->mcm);
948
949 mt753x_reg_read(priv, MT7531_TOP_SIG_SR, &val);
950 port5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
951
952 /* port5 support either RGMII or SGMII, port6 only support SGMII. */
953 switch (priv->phy_interface) {
954 case PHY_INTERFACE_MODE_RGMII:
955 if (!port5_sgmii)
956 mt7531_port_rgmii_init(priv, 5);
957 break;
developer4aafc992023-07-19 17:17:13 +0800958 case PHY_INTERFACE_MODE_2500BASEX:
developerd5d73952020-02-18 16:49:37 +0800959 mt7531_port_sgmii_init(priv, 6);
960 if (port5_sgmii)
961 mt7531_port_sgmii_init(priv, 5);
962 break;
963 default:
964 break;
965 }
966
967 pmcr = MT7531_FORCE_MODE |
968 (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
969 MAC_MODE | MAC_TX_EN | MAC_RX_EN |
970 BKOFF_EN | BACKPR_EN |
971 FORCE_RX_FC | FORCE_TX_FC |
972 (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
973 FORCE_LINK;
974
developer08849652023-07-19 17:16:54 +0800975 priv->mt753x_pmcr = pmcr;
976
977 /* Keep MAC link down before starting eth */
978 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
979 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
developerd5d73952020-02-18 16:49:37 +0800980
981 /* Turn on PHYs */
982 for (i = 0; i < MT753X_NUM_PHYS; i++) {
983 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
984 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
985 phy_val &= ~BMCR_PDOWN;
986 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
987 }
988
989 mt7531_phy_setting(priv);
990
991 /* Enable Internal PHYs */
992 val = mt753x_core_reg_read(priv, CORE_PLL_GROUP4);
993 val |= MT7531_BYPASS_MODE;
994 val &= ~MT7531_POWER_ON_OFF;
995 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, val);
996
997 return 0;
998}
999
developer76e14722023-07-19 17:17:41 +08001000static void mt7988_phy_setting(struct mtk_eth_priv *priv)
1001{
1002 u16 val;
1003 u32 i;
1004
1005 for (i = 0; i < MT753X_NUM_PHYS; i++) {
1006 /* Enable HW auto downshift */
1007 priv->mii_write(priv, i, 0x1f, 0x1);
1008 val = priv->mii_read(priv, i, PHY_EXT_REG_14);
1009 val |= PHY_EN_DOWN_SHFIT;
1010 priv->mii_write(priv, i, PHY_EXT_REG_14, val);
1011
1012 /* PHY link down power saving enable */
1013 val = priv->mii_read(priv, i, PHY_EXT_REG_17);
1014 val |= PHY_LINKDOWN_POWER_SAVING_EN;
1015 priv->mii_write(priv, i, PHY_EXT_REG_17, val);
1016 }
1017}
1018
1019static void mt7988_mac_control(struct mtk_eth_priv *priv, bool enable)
1020{
1021 u32 pmcr = FORCE_MODE_LNK;
1022
1023 if (enable)
1024 pmcr = priv->mt753x_pmcr;
1025
1026 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
1027}
1028
1029static int mt7988_setup(struct mtk_eth_priv *priv)
1030{
1031 u16 phy_addr, phy_val;
1032 u32 pmcr;
1033 int i;
1034
1035 priv->gsw_base = regmap_get_range(priv->ethsys_regmap, 0) + GSW_BASE;
1036
1037 priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
1038 MT753X_SMI_ADDR_MASK;
1039
1040 /* Turn off PHYs */
1041 for (i = 0; i < MT753X_NUM_PHYS; i++) {
1042 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
1043 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
1044 phy_val |= BMCR_PDOWN;
1045 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
1046 }
1047
1048 switch (priv->phy_interface) {
1049 case PHY_INTERFACE_MODE_USXGMII:
1050 /* Use CPU bridge instead of actual USXGMII path */
1051
1052 /* Set GDM1 no drop */
1053 mtk_fe_rmw(priv, PSE_NO_DROP_CFG_REG, 0, PSE_NO_DROP_GDM1);
1054
1055 /* Enable GDM1 to GSW CPU bridge */
1056 mtk_gmac_rmw(priv, GMAC_MAC_MISC_REG, 0, BIT(0));
1057
1058 /* XGMAC force link up */
1059 mtk_gmac_rmw(priv, GMAC_XGMAC_STS_REG, 0, P1_XGMAC_FORCE_LINK);
1060
1061 /* Setup GSW CPU bridge IPG */
1062 mtk_gmac_rmw(priv, GMAC_GSW_CFG_REG, GSWTX_IPG_M | GSWRX_IPG_M,
1063 (0xB << GSWTX_IPG_S) | (0xB << GSWRX_IPG_S));
1064 break;
1065 default:
1066 printf("Error: MT7988 GSW does not support %s interface\n",
1067 phy_string_for_interface(priv->phy_interface));
1068 break;
1069 }
1070
1071 pmcr = MT7988_FORCE_MODE |
1072 (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
1073 MAC_MODE | MAC_TX_EN | MAC_RX_EN |
1074 BKOFF_EN | BACKPR_EN |
1075 FORCE_RX_FC | FORCE_TX_FC |
1076 (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
1077 FORCE_LINK;
1078
1079 priv->mt753x_pmcr = pmcr;
1080
1081 /* Keep MAC link down before starting eth */
1082 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
1083
1084 /* Turn on PHYs */
1085 for (i = 0; i < MT753X_NUM_PHYS; i++) {
1086 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
1087 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
1088 phy_val &= ~BMCR_PDOWN;
1089 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
1090 }
1091
1092 mt7988_phy_setting(priv);
1093
1094 return 0;
1095}
1096
developerdd6243f2023-07-19 17:17:07 +08001097static int mt753x_switch_init(struct mtk_eth_priv *priv)
developerd5d73952020-02-18 16:49:37 +08001098{
1099 int ret;
1100 int i;
1101
1102 /* Global reset switch */
1103 if (priv->mcm) {
1104 reset_assert(&priv->rst_mcm);
1105 udelay(1000);
1106 reset_deassert(&priv->rst_mcm);
developer3a46a672023-07-19 17:16:59 +08001107 mdelay(priv->mt753x_reset_wait_time);
developerd5d73952020-02-18 16:49:37 +08001108 } else if (dm_gpio_is_valid(&priv->rst_gpio)) {
1109 dm_gpio_set_value(&priv->rst_gpio, 0);
1110 udelay(1000);
1111 dm_gpio_set_value(&priv->rst_gpio, 1);
developer3a46a672023-07-19 17:16:59 +08001112 mdelay(priv->mt753x_reset_wait_time);
developerd5d73952020-02-18 16:49:37 +08001113 }
1114
1115 ret = priv->switch_init(priv);
1116 if (ret)
1117 return ret;
1118
developerc3ac93d2018-12-20 16:12:53 +08001119 /* Set port isolation */
developerd5d73952020-02-18 16:49:37 +08001120 for (i = 0; i < MT753X_NUM_PORTS; i++) {
developerc3ac93d2018-12-20 16:12:53 +08001121 /* Set port matrix mode */
1122 if (i != 6)
developerd5d73952020-02-18 16:49:37 +08001123 mt753x_reg_write(priv, PCR_REG(i),
developerc3ac93d2018-12-20 16:12:53 +08001124 (0x40 << PORT_MATRIX_S));
1125 else
developerd5d73952020-02-18 16:49:37 +08001126 mt753x_reg_write(priv, PCR_REG(i),
developerc3ac93d2018-12-20 16:12:53 +08001127 (0x3f << PORT_MATRIX_S));
1128
1129 /* Set port mode to user port */
developerd5d73952020-02-18 16:49:37 +08001130 mt753x_reg_write(priv, PVC_REG(i),
developerc3ac93d2018-12-20 16:12:53 +08001131 (0x8100 << STAG_VPID_S) |
1132 (VLAN_ATTR_USER << VLAN_ATTR_S));
1133 }
1134
1135 return 0;
1136}
1137
developer03ce27b2023-07-19 17:17:31 +08001138static void mtk_xphy_link_adjust(struct mtk_eth_priv *priv)
1139{
1140 u16 lcl_adv = 0, rmt_adv = 0;
1141 u8 flowctrl;
1142 u32 mcr;
1143
1144 mcr = mtk_gmac_read(priv, XGMAC_PORT_MCR(priv->gmac_id));
1145 mcr &= ~(XGMAC_FORCE_TX_FC | XGMAC_FORCE_RX_FC);
1146
1147 if (priv->phydev->duplex) {
1148 if (priv->phydev->pause)
1149 rmt_adv = LPA_PAUSE_CAP;
1150 if (priv->phydev->asym_pause)
1151 rmt_adv |= LPA_PAUSE_ASYM;
1152
1153 if (priv->phydev->advertising & ADVERTISED_Pause)
1154 lcl_adv |= ADVERTISE_PAUSE_CAP;
1155 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1156 lcl_adv |= ADVERTISE_PAUSE_ASYM;
1157
1158 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1159
1160 if (flowctrl & FLOW_CTRL_TX)
1161 mcr |= XGMAC_FORCE_TX_FC;
1162 if (flowctrl & FLOW_CTRL_RX)
1163 mcr |= XGMAC_FORCE_RX_FC;
1164
1165 debug("rx pause %s, tx pause %s\n",
1166 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1167 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1168 }
1169
1170 mcr &= ~(XGMAC_TRX_DISABLE);
1171 mtk_gmac_write(priv, XGMAC_PORT_MCR(priv->gmac_id), mcr);
1172}
1173
developerc3ac93d2018-12-20 16:12:53 +08001174static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
1175{
1176 u16 lcl_adv = 0, rmt_adv = 0;
1177 u8 flowctrl;
1178 u32 mcr;
1179
developerd5d73952020-02-18 16:49:37 +08001180 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
developerc3ac93d2018-12-20 16:12:53 +08001181 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1182 MAC_MODE | FORCE_MODE |
1183 MAC_TX_EN | MAC_RX_EN |
developer4aafc992023-07-19 17:17:13 +08001184 DEL_RXFIFO_CLR |
developerc3ac93d2018-12-20 16:12:53 +08001185 BKOFF_EN | BACKPR_EN;
1186
1187 switch (priv->phydev->speed) {
1188 case SPEED_10:
1189 mcr |= (SPEED_10M << FORCE_SPD_S);
1190 break;
1191 case SPEED_100:
1192 mcr |= (SPEED_100M << FORCE_SPD_S);
1193 break;
1194 case SPEED_1000:
developer4aafc992023-07-19 17:17:13 +08001195 case SPEED_2500:
developerc3ac93d2018-12-20 16:12:53 +08001196 mcr |= (SPEED_1000M << FORCE_SPD_S);
1197 break;
1198 };
1199
1200 if (priv->phydev->link)
1201 mcr |= FORCE_LINK;
1202
1203 if (priv->phydev->duplex) {
1204 mcr |= FORCE_DPX;
1205
1206 if (priv->phydev->pause)
1207 rmt_adv = LPA_PAUSE_CAP;
1208 if (priv->phydev->asym_pause)
1209 rmt_adv |= LPA_PAUSE_ASYM;
1210
1211 if (priv->phydev->advertising & ADVERTISED_Pause)
1212 lcl_adv |= ADVERTISE_PAUSE_CAP;
1213 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1214 lcl_adv |= ADVERTISE_PAUSE_ASYM;
1215
1216 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1217
1218 if (flowctrl & FLOW_CTRL_TX)
1219 mcr |= FORCE_TX_FC;
1220 if (flowctrl & FLOW_CTRL_RX)
1221 mcr |= FORCE_RX_FC;
1222
1223 debug("rx pause %s, tx pause %s\n",
1224 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1225 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1226 }
1227
1228 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1229}
1230
1231static int mtk_phy_start(struct mtk_eth_priv *priv)
1232{
1233 struct phy_device *phydev = priv->phydev;
1234 int ret;
1235
1236 ret = phy_startup(phydev);
1237
1238 if (ret) {
1239 debug("Could not initialize PHY %s\n", phydev->dev->name);
1240 return ret;
1241 }
1242
1243 if (!phydev->link) {
1244 debug("%s: link down.\n", phydev->dev->name);
1245 return 0;
1246 }
1247
developer03ce27b2023-07-19 17:17:31 +08001248 if (!priv->force_mode) {
developeref7b6502024-01-22 10:08:16 +08001249 if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII ||
1250 priv->phy_interface == PHY_INTERFACE_MODE_XGMII)
developer03ce27b2023-07-19 17:17:31 +08001251 mtk_xphy_link_adjust(priv);
1252 else
1253 mtk_phy_link_adjust(priv);
1254 }
developerc3ac93d2018-12-20 16:12:53 +08001255
1256 debug("Speed: %d, %s duplex%s\n", phydev->speed,
1257 (phydev->duplex) ? "full" : "half",
1258 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
1259
1260 return 0;
1261}
1262
1263static int mtk_phy_probe(struct udevice *dev)
1264{
1265 struct mtk_eth_priv *priv = dev_get_priv(dev);
1266 struct phy_device *phydev;
1267
1268 phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
1269 priv->phy_interface);
1270 if (!phydev)
1271 return -ENODEV;
1272
1273 phydev->supported &= PHY_GBIT_FEATURES;
1274 phydev->advertising = phydev->supported;
1275
1276 priv->phydev = phydev;
1277 phy_config(phydev);
1278
1279 return 0;
1280}
1281
developer4aafc992023-07-19 17:17:13 +08001282static void mtk_sgmii_an_init(struct mtk_eth_priv *priv)
1283{
1284 /* Set SGMII GEN1 speed(1G) */
1285 clrsetbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1286 SGMSYS_SPEED_2500, 0);
1287
1288 /* Enable SGMII AN */
1289 setbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1290 SGMII_AN_ENABLE);
1291
1292 /* SGMII AN mode setting */
1293 writel(SGMII_AN_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1294
1295 /* SGMII PN SWAP setting */
1296 if (priv->pn_swap) {
1297 setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1298 SGMII_PN_SWAP_TX_RX);
1299 }
1300
1301 /* Release PHYA power down state */
1302 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1303 SGMII_PHYA_PWD, 0);
1304}
1305
1306static void mtk_sgmii_force_init(struct mtk_eth_priv *priv)
developer9a12c242020-01-21 19:31:57 +08001307{
1308 /* Set SGMII GEN2 speed(2.5G) */
developer1d3b1f62022-09-09 19:59:21 +08001309 setbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1310 SGMSYS_SPEED_2500);
developer9a12c242020-01-21 19:31:57 +08001311
1312 /* Disable SGMII AN */
1313 clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1314 SGMII_AN_ENABLE, 0);
1315
1316 /* SGMII force mode setting */
1317 writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1318
developer053929c2022-09-09 19:59:28 +08001319 /* SGMII PN SWAP setting */
1320 if (priv->pn_swap) {
1321 setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1322 SGMII_PN_SWAP_TX_RX);
1323 }
1324
developer9a12c242020-01-21 19:31:57 +08001325 /* Release PHYA power down state */
1326 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1327 SGMII_PHYA_PWD, 0);
1328}
1329
developer03ce27b2023-07-19 17:17:31 +08001330static void mtk_xfi_pll_enable(struct mtk_eth_priv *priv)
1331{
1332 u32 val = 0;
1333
1334 /* Add software workaround for USXGMII PLL TCL issue */
1335 regmap_write(priv->xfi_pll_regmap, XFI_PLL_ANA_GLB8,
1336 RG_XFI_PLL_ANA_SWWA);
1337
1338 regmap_read(priv->xfi_pll_regmap, XFI_PLL_DIG_GLB8, &val);
1339 val |= RG_XFI_PLL_EN;
1340 regmap_write(priv->xfi_pll_regmap, XFI_PLL_DIG_GLB8, val);
1341}
1342
1343static void mtk_usxgmii_reset(struct mtk_eth_priv *priv)
1344{
1345 switch (priv->gmac_id) {
1346 case 1:
1347 regmap_write(priv->toprgu_regmap, 0xFC, 0x0000A004);
1348 regmap_write(priv->toprgu_regmap, 0x18, 0x88F0A004);
1349 regmap_write(priv->toprgu_regmap, 0xFC, 0x00000000);
1350 regmap_write(priv->toprgu_regmap, 0x18, 0x88F00000);
1351 regmap_write(priv->toprgu_regmap, 0x18, 0x00F00000);
1352 break;
1353 case 2:
1354 regmap_write(priv->toprgu_regmap, 0xFC, 0x00005002);
1355 regmap_write(priv->toprgu_regmap, 0x18, 0x88F05002);
1356 regmap_write(priv->toprgu_regmap, 0xFC, 0x00000000);
1357 regmap_write(priv->toprgu_regmap, 0x18, 0x88F00000);
1358 regmap_write(priv->toprgu_regmap, 0x18, 0x00F00000);
1359 break;
1360 }
1361
1362 mdelay(10);
1363}
1364
1365static void mtk_usxgmii_setup_phya_an_10000(struct mtk_eth_priv *priv)
1366{
1367 regmap_write(priv->usxgmii_regmap, 0x810, 0x000FFE6D);
1368 regmap_write(priv->usxgmii_regmap, 0x818, 0x07B1EC7B);
1369 regmap_write(priv->usxgmii_regmap, 0x80C, 0x30000000);
1370 ndelay(1020);
1371 regmap_write(priv->usxgmii_regmap, 0x80C, 0x10000000);
1372 ndelay(1020);
1373 regmap_write(priv->usxgmii_regmap, 0x80C, 0x00000000);
1374
1375 regmap_write(priv->xfi_pextp_regmap, 0x9024, 0x00C9071C);
1376 regmap_write(priv->xfi_pextp_regmap, 0x2020, 0xAA8585AA);
1377 regmap_write(priv->xfi_pextp_regmap, 0x2030, 0x0C020707);
1378 regmap_write(priv->xfi_pextp_regmap, 0x2034, 0x0E050F0F);
1379 regmap_write(priv->xfi_pextp_regmap, 0x2040, 0x00140032);
1380 regmap_write(priv->xfi_pextp_regmap, 0x50F0, 0x00C014AA);
1381 regmap_write(priv->xfi_pextp_regmap, 0x50E0, 0x3777C12B);
1382 regmap_write(priv->xfi_pextp_regmap, 0x506C, 0x005F9CFF);
1383 regmap_write(priv->xfi_pextp_regmap, 0x5070, 0x9D9DFAFA);
1384 regmap_write(priv->xfi_pextp_regmap, 0x5074, 0x27273F3F);
1385 regmap_write(priv->xfi_pextp_regmap, 0x5078, 0xA7883C68);
1386 regmap_write(priv->xfi_pextp_regmap, 0x507C, 0x11661166);
1387 regmap_write(priv->xfi_pextp_regmap, 0x5080, 0x0E000AAF);
1388 regmap_write(priv->xfi_pextp_regmap, 0x5084, 0x08080D0D);
1389 regmap_write(priv->xfi_pextp_regmap, 0x5088, 0x02030909);
1390 regmap_write(priv->xfi_pextp_regmap, 0x50E4, 0x0C0C0000);
1391 regmap_write(priv->xfi_pextp_regmap, 0x50E8, 0x04040000);
1392 regmap_write(priv->xfi_pextp_regmap, 0x50EC, 0x0F0F0C06);
1393 regmap_write(priv->xfi_pextp_regmap, 0x50A8, 0x506E8C8C);
1394 regmap_write(priv->xfi_pextp_regmap, 0x6004, 0x18190000);
1395 regmap_write(priv->xfi_pextp_regmap, 0x00F8, 0x01423342);
1396 regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F20);
1397 regmap_write(priv->xfi_pextp_regmap, 0x0030, 0x00050C00);
1398 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x02002800);
1399 ndelay(1020);
1400 regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000020);
1401 regmap_write(priv->xfi_pextp_regmap, 0x3028, 0x00008A01);
1402 regmap_write(priv->xfi_pextp_regmap, 0x302C, 0x0000A884);
1403 regmap_write(priv->xfi_pextp_regmap, 0x3024, 0x00083002);
1404 regmap_write(priv->xfi_pextp_regmap, 0x3010, 0x00022220);
1405 regmap_write(priv->xfi_pextp_regmap, 0x5064, 0x0F020A01);
1406 regmap_write(priv->xfi_pextp_regmap, 0x50B4, 0x06100600);
1407 regmap_write(priv->xfi_pextp_regmap, 0x3048, 0x40704000);
1408 regmap_write(priv->xfi_pextp_regmap, 0x3050, 0xA8000000);
1409 regmap_write(priv->xfi_pextp_regmap, 0x3054, 0x000000AA);
1410 regmap_write(priv->xfi_pextp_regmap, 0x306C, 0x00000F00);
1411 regmap_write(priv->xfi_pextp_regmap, 0xA060, 0x00040000);
1412 regmap_write(priv->xfi_pextp_regmap, 0x90D0, 0x00000001);
1413 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200E800);
1414 udelay(150);
1415 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C111);
1416 ndelay(1020);
1417 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C101);
1418 udelay(15);
1419 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C111);
1420 ndelay(1020);
1421 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C101);
1422 udelay(100);
1423 regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000030);
1424 regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F00);
1425 regmap_write(priv->xfi_pextp_regmap, 0x3040, 0x30000000);
1426 udelay(400);
1427}
1428
1429static void mtk_usxgmii_an_init(struct mtk_eth_priv *priv)
1430{
1431 mtk_xfi_pll_enable(priv);
1432 mtk_usxgmii_reset(priv);
1433 mtk_usxgmii_setup_phya_an_10000(priv);
1434}
1435
developerc3ac93d2018-12-20 16:12:53 +08001436static void mtk_mac_init(struct mtk_eth_priv *priv)
1437{
1438 int i, ge_mode = 0;
1439 u32 mcr;
1440
1441 switch (priv->phy_interface) {
1442 case PHY_INTERFACE_MODE_RGMII_RXID:
1443 case PHY_INTERFACE_MODE_RGMII:
developer9a12c242020-01-21 19:31:57 +08001444 ge_mode = GE_MODE_RGMII;
1445 break;
developerc3ac93d2018-12-20 16:12:53 +08001446 case PHY_INTERFACE_MODE_SGMII:
developer4aafc992023-07-19 17:17:13 +08001447 case PHY_INTERFACE_MODE_2500BASEX:
developera5d712a2023-07-19 17:17:22 +08001448 if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC2_U3_QPHY)) {
1449 mtk_infra_rmw(priv, USB_PHY_SWITCH_REG, QPHY_SEL_MASK,
1450 SGMII_QPHY_SEL);
1451 }
1452
developerc3ac93d2018-12-20 16:12:53 +08001453 ge_mode = GE_MODE_RGMII;
developer9a12c242020-01-21 19:31:57 +08001454 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG, SYSCFG0_SGMII_SEL_M,
1455 SYSCFG0_SGMII_SEL(priv->gmac_id));
developer4aafc992023-07-19 17:17:13 +08001456 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
1457 mtk_sgmii_an_init(priv);
1458 else
1459 mtk_sgmii_force_init(priv);
developerc3ac93d2018-12-20 16:12:53 +08001460 break;
1461 case PHY_INTERFACE_MODE_MII:
1462 case PHY_INTERFACE_MODE_GMII:
1463 ge_mode = GE_MODE_MII;
1464 break;
1465 case PHY_INTERFACE_MODE_RMII:
1466 ge_mode = GE_MODE_RMII;
1467 break;
1468 default:
1469 break;
1470 }
1471
1472 /* set the gmac to the right mode */
1473 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1474 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1475 ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
1476
1477 if (priv->force_mode) {
developerd5d73952020-02-18 16:49:37 +08001478 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
developerc3ac93d2018-12-20 16:12:53 +08001479 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1480 MAC_MODE | FORCE_MODE |
1481 MAC_TX_EN | MAC_RX_EN |
1482 BKOFF_EN | BACKPR_EN |
1483 FORCE_LINK;
1484
1485 switch (priv->speed) {
1486 case SPEED_10:
1487 mcr |= SPEED_10M << FORCE_SPD_S;
1488 break;
1489 case SPEED_100:
1490 mcr |= SPEED_100M << FORCE_SPD_S;
1491 break;
1492 case SPEED_1000:
developer4aafc992023-07-19 17:17:13 +08001493 case SPEED_2500:
developerc3ac93d2018-12-20 16:12:53 +08001494 mcr |= SPEED_1000M << FORCE_SPD_S;
1495 break;
1496 }
1497
1498 if (priv->duplex)
1499 mcr |= FORCE_DPX;
1500
1501 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1502 }
1503
developer1d3b1f62022-09-09 19:59:21 +08001504 if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC1_TRGMII) &&
1505 !MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
developerc3ac93d2018-12-20 16:12:53 +08001506 /* Lower Tx Driving for TRGMII path */
1507 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
1508 mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
1509 (8 << TD_DM_DRVP_S) |
1510 (8 << TD_DM_DRVN_S));
1511
1512 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
1513 RX_RST | RXC_DQSISEL);
1514 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
1515 }
developer03ce27b2023-07-19 17:17:31 +08001516}
1517
1518static void mtk_xmac_init(struct mtk_eth_priv *priv)
1519{
developeref7b6502024-01-22 10:08:16 +08001520 u32 force_link = 0;
developer03ce27b2023-07-19 17:17:31 +08001521
1522 switch (priv->phy_interface) {
1523 case PHY_INTERFACE_MODE_USXGMII:
1524 mtk_usxgmii_an_init(priv);
1525 break;
1526 default:
1527 break;
1528 }
1529
1530 /* Set GMAC to the correct mode */
1531 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1532 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1533 0);
1534
developeref7b6502024-01-22 10:08:16 +08001535 if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII &&
1536 priv->gmac_id == 1) {
developer03ce27b2023-07-19 17:17:31 +08001537 mtk_infra_rmw(priv, TOPMISC_NETSYS_PCS_MUX,
1538 NETSYS_PCS_MUX_MASK, MUX_G2_USXGMII_SEL);
developer03ce27b2023-07-19 17:17:31 +08001539 }
1540
developeref7b6502024-01-22 10:08:16 +08001541 if (priv->phy_interface == PHY_INTERFACE_MODE_XGMII ||
1542 priv->gmac_id == 2)
1543 force_link = XGMAC_FORCE_LINK(priv->gmac_id);
1544
1545 mtk_gmac_rmw(priv, XGMAC_STS(priv->gmac_id),
1546 XGMAC_FORCE_LINK(priv->gmac_id), force_link);
1547
developer03ce27b2023-07-19 17:17:31 +08001548 /* Force GMAC link down */
1549 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), FORCE_MODE);
developerc3ac93d2018-12-20 16:12:53 +08001550}
1551
1552static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
1553{
1554 char *pkt_base = priv->pkt_pool;
developera7cdebf2022-09-09 19:59:26 +08001555 struct mtk_tx_dma_v2 *txd;
1556 struct mtk_rx_dma_v2 *rxd;
developerc3ac93d2018-12-20 16:12:53 +08001557 int i;
1558
1559 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
1560 udelay(500);
1561
developer65089f72022-09-09 19:59:24 +08001562 memset(priv->tx_ring_noc, 0, NUM_TX_DESC * priv->soc->txd_size);
1563 memset(priv->rx_ring_noc, 0, NUM_RX_DESC * priv->soc->rxd_size);
1564 memset(priv->pkt_pool, 0xff, TOTAL_PKT_BUF_SIZE);
developerc3ac93d2018-12-20 16:12:53 +08001565
Frank Wunderlich44350182020-01-31 10:23:29 +01001566 flush_dcache_range((ulong)pkt_base,
1567 (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
developerc3ac93d2018-12-20 16:12:53 +08001568
1569 priv->rx_dma_owner_idx0 = 0;
1570 priv->tx_cpu_owner_idx0 = 0;
1571
1572 for (i = 0; i < NUM_TX_DESC; i++) {
developer65089f72022-09-09 19:59:24 +08001573 txd = priv->tx_ring_noc + i * priv->soc->txd_size;
developerc3ac93d2018-12-20 16:12:53 +08001574
developer65089f72022-09-09 19:59:24 +08001575 txd->txd1 = virt_to_phys(pkt_base);
1576 txd->txd2 = PDMA_TXD2_DDONE | PDMA_TXD2_LS0;
developera7cdebf2022-09-09 19:59:26 +08001577
developer78fed682023-07-19 17:17:37 +08001578 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1579 txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id == 2 ?
1580 15 : priv->gmac_id + 1);
1581 else if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
developera7cdebf2022-09-09 19:59:26 +08001582 txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id + 1);
1583 else
1584 txd->txd4 = PDMA_V1_TXD4_FPORT_SET(priv->gmac_id + 1);
developer65089f72022-09-09 19:59:24 +08001585
developerc3ac93d2018-12-20 16:12:53 +08001586 pkt_base += PKTSIZE_ALIGN;
1587 }
1588
1589 for (i = 0; i < NUM_RX_DESC; i++) {
developer65089f72022-09-09 19:59:24 +08001590 rxd = priv->rx_ring_noc + i * priv->soc->rxd_size;
1591
1592 rxd->rxd1 = virt_to_phys(pkt_base);
developera7cdebf2022-09-09 19:59:26 +08001593
developer78fed682023-07-19 17:17:37 +08001594 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1595 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
developera7cdebf2022-09-09 19:59:26 +08001596 rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1597 else
1598 rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
developer65089f72022-09-09 19:59:24 +08001599
developerc3ac93d2018-12-20 16:12:53 +08001600 pkt_base += PKTSIZE_ALIGN;
1601 }
1602
1603 mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
1604 virt_to_phys(priv->tx_ring_noc));
1605 mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
1606 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1607
1608 mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
1609 virt_to_phys(priv->rx_ring_noc));
1610 mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
1611 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
1612
1613 mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
1614}
1615
developer4843ad32024-01-22 10:08:11 +08001616static void mtk_eth_mdc_init(struct mtk_eth_priv *priv)
1617{
1618 u32 divider;
1619
1620 if (priv->mdc == 0)
1621 return;
1622
1623 divider = min_t(u32, DIV_ROUND_UP(MDC_MAX_FREQ, priv->mdc), MDC_MAX_DIVIDER);
1624
1625 /* Configure MDC turbo mode */
1626 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1627 mtk_gmac_rmw(priv, GMAC_MAC_MISC_REG, 0, MISC_MDC_TURBO);
1628 else
1629 mtk_gmac_rmw(priv, GMAC_PPSC_REG, 0, MISC_MDC_TURBO);
1630
1631 /* Configure MDC divider */
1632 mtk_gmac_rmw(priv, GMAC_PPSC_REG, PHY_MDC_CFG,
1633 FIELD_PREP(PHY_MDC_CFG, divider));
1634}
1635
developerc3ac93d2018-12-20 16:12:53 +08001636static int mtk_eth_start(struct udevice *dev)
1637{
1638 struct mtk_eth_priv *priv = dev_get_priv(dev);
developer78fed682023-07-19 17:17:37 +08001639 int i, ret;
developerc3ac93d2018-12-20 16:12:53 +08001640
1641 /* Reset FE */
1642 reset_assert(&priv->rst_fe);
1643 udelay(1000);
1644 reset_deassert(&priv->rst_fe);
1645 mdelay(10);
1646
developer78fed682023-07-19 17:17:37 +08001647 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1648 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
developera7cdebf2022-09-09 19:59:26 +08001649 setbits_le32(priv->fe_base + FE_GLO_MISC_REG, PDMA_VER_V2);
1650
developerc3ac93d2018-12-20 16:12:53 +08001651 /* Packets forward to PDMA */
1652 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
1653
developer78fed682023-07-19 17:17:37 +08001654 for (i = 0; i < priv->soc->gdma_count; i++) {
1655 if (i == priv->gmac_id)
1656 continue;
1657
1658 mtk_gdma_write(priv, i, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1659 }
1660
1661 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3)) {
developer76e14722023-07-19 17:17:41 +08001662 if (priv->sw == SW_MT7988 && priv->gmac_id == 0) {
1663 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG,
1664 GDMA_BRIDGE_TO_CPU);
1665 }
1666
developer78fed682023-07-19 17:17:37 +08001667 mtk_gdma_write(priv, priv->gmac_id, GDMA_EG_CTRL_REG,
1668 GDMA_CPU_BRIDGE_EN);
1669 }
developerc3ac93d2018-12-20 16:12:53 +08001670
1671 udelay(500);
1672
1673 mtk_eth_fifo_init(priv);
1674
developer08849652023-07-19 17:16:54 +08001675 if (priv->switch_mac_control)
1676 priv->switch_mac_control(priv, true);
1677
developerc3ac93d2018-12-20 16:12:53 +08001678 /* Start PHY */
1679 if (priv->sw == SW_NONE) {
1680 ret = mtk_phy_start(priv);
1681 if (ret)
1682 return ret;
1683 }
1684
1685 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
1686 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
1687 udelay(500);
1688
1689 return 0;
1690}
1691
1692static void mtk_eth_stop(struct udevice *dev)
1693{
1694 struct mtk_eth_priv *priv = dev_get_priv(dev);
1695
developer08849652023-07-19 17:16:54 +08001696 if (priv->switch_mac_control)
1697 priv->switch_mac_control(priv, false);
1698
developerc3ac93d2018-12-20 16:12:53 +08001699 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
1700 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
1701 udelay(500);
1702
developera7cdebf2022-09-09 19:59:26 +08001703 wait_for_bit_le32(priv->fe_base + priv->soc->pdma_base + PDMA_GLO_CFG_REG,
developerc3ac93d2018-12-20 16:12:53 +08001704 RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
1705}
1706
1707static int mtk_eth_write_hwaddr(struct udevice *dev)
1708{
Simon Glassfa20e932020-12-03 16:55:20 -07001709 struct eth_pdata *pdata = dev_get_plat(dev);
developerc3ac93d2018-12-20 16:12:53 +08001710 struct mtk_eth_priv *priv = dev_get_priv(dev);
1711 unsigned char *mac = pdata->enetaddr;
1712 u32 macaddr_lsb, macaddr_msb;
1713
1714 macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
1715 macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
1716 ((u32)mac[4] << 8) | (u32)mac[5];
1717
1718 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
1719 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
1720
1721 return 0;
1722}
1723
1724static int mtk_eth_send(struct udevice *dev, void *packet, int length)
1725{
1726 struct mtk_eth_priv *priv = dev_get_priv(dev);
1727 u32 idx = priv->tx_cpu_owner_idx0;
developera7cdebf2022-09-09 19:59:26 +08001728 struct mtk_tx_dma_v2 *txd;
developerc3ac93d2018-12-20 16:12:53 +08001729 void *pkt_base;
1730
developer65089f72022-09-09 19:59:24 +08001731 txd = priv->tx_ring_noc + idx * priv->soc->txd_size;
1732
1733 if (!(txd->txd2 & PDMA_TXD2_DDONE)) {
developerc3ac93d2018-12-20 16:12:53 +08001734 debug("mtk-eth: TX DMA descriptor ring is full\n");
1735 return -EPERM;
1736 }
1737
developer65089f72022-09-09 19:59:24 +08001738 pkt_base = (void *)phys_to_virt(txd->txd1);
developerc3ac93d2018-12-20 16:12:53 +08001739 memcpy(pkt_base, packet, length);
Frank Wunderlich44350182020-01-31 10:23:29 +01001740 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
developerc3ac93d2018-12-20 16:12:53 +08001741 roundup(length, ARCH_DMA_MINALIGN));
1742
developer78fed682023-07-19 17:17:37 +08001743 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1744 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
developera7cdebf2022-09-09 19:59:26 +08001745 txd->txd2 = PDMA_TXD2_LS0 | PDMA_V2_TXD2_SDL0_SET(length);
1746 else
1747 txd->txd2 = PDMA_TXD2_LS0 | PDMA_V1_TXD2_SDL0_SET(length);
developerc3ac93d2018-12-20 16:12:53 +08001748
1749 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
1750 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1751
1752 return 0;
1753}
1754
1755static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1756{
1757 struct mtk_eth_priv *priv = dev_get_priv(dev);
1758 u32 idx = priv->rx_dma_owner_idx0;
developera7cdebf2022-09-09 19:59:26 +08001759 struct mtk_rx_dma_v2 *rxd;
developerc3ac93d2018-12-20 16:12:53 +08001760 uchar *pkt_base;
1761 u32 length;
1762
developer65089f72022-09-09 19:59:24 +08001763 rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1764
1765 if (!(rxd->rxd2 & PDMA_RXD2_DDONE)) {
developerc3ac93d2018-12-20 16:12:53 +08001766 debug("mtk-eth: RX DMA descriptor ring is empty\n");
1767 return -EAGAIN;
1768 }
1769
developer78fed682023-07-19 17:17:37 +08001770 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1771 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
developera7cdebf2022-09-09 19:59:26 +08001772 length = PDMA_V2_RXD2_PLEN0_GET(rxd->rxd2);
1773 else
1774 length = PDMA_V1_RXD2_PLEN0_GET(rxd->rxd2);
developer65089f72022-09-09 19:59:24 +08001775
1776 pkt_base = (void *)phys_to_virt(rxd->rxd1);
Frank Wunderlich44350182020-01-31 10:23:29 +01001777 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
developerc3ac93d2018-12-20 16:12:53 +08001778 roundup(length, ARCH_DMA_MINALIGN));
1779
1780 if (packetp)
1781 *packetp = pkt_base;
1782
1783 return length;
1784}
1785
1786static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1787{
1788 struct mtk_eth_priv *priv = dev_get_priv(dev);
1789 u32 idx = priv->rx_dma_owner_idx0;
developera7cdebf2022-09-09 19:59:26 +08001790 struct mtk_rx_dma_v2 *rxd;
developerc3ac93d2018-12-20 16:12:53 +08001791
developer65089f72022-09-09 19:59:24 +08001792 rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1793
developer78fed682023-07-19 17:17:37 +08001794 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1795 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
developera7cdebf2022-09-09 19:59:26 +08001796 rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1797 else
1798 rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
developerc3ac93d2018-12-20 16:12:53 +08001799
1800 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1801 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1802
1803 return 0;
1804}
1805
1806static int mtk_eth_probe(struct udevice *dev)
1807{
Simon Glassfa20e932020-12-03 16:55:20 -07001808 struct eth_pdata *pdata = dev_get_plat(dev);
developerc3ac93d2018-12-20 16:12:53 +08001809 struct mtk_eth_priv *priv = dev_get_priv(dev);
Frank Wunderlich44350182020-01-31 10:23:29 +01001810 ulong iobase = pdata->iobase;
developerc3ac93d2018-12-20 16:12:53 +08001811 int ret;
1812
1813 /* Frame Engine Register Base */
1814 priv->fe_base = (void *)iobase;
1815
1816 /* GMAC Register Base */
1817 priv->gmac_base = (void *)(iobase + GMAC_BASE);
1818
1819 /* MDIO register */
1820 ret = mtk_mdio_register(dev);
1821 if (ret)
1822 return ret;
1823
1824 /* Prepare for tx/rx rings */
developer65089f72022-09-09 19:59:24 +08001825 priv->tx_ring_noc = (void *)
1826 noncached_alloc(priv->soc->txd_size * NUM_TX_DESC,
developerc3ac93d2018-12-20 16:12:53 +08001827 ARCH_DMA_MINALIGN);
developer65089f72022-09-09 19:59:24 +08001828 priv->rx_ring_noc = (void *)
1829 noncached_alloc(priv->soc->rxd_size * NUM_RX_DESC,
developerc3ac93d2018-12-20 16:12:53 +08001830 ARCH_DMA_MINALIGN);
1831
developer4843ad32024-01-22 10:08:11 +08001832 /* Set MDC divider */
1833 mtk_eth_mdc_init(priv);
1834
developerc3ac93d2018-12-20 16:12:53 +08001835 /* Set MAC mode */
developeref7b6502024-01-22 10:08:16 +08001836 if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII ||
1837 priv->phy_interface == PHY_INTERFACE_MODE_XGMII)
developer03ce27b2023-07-19 17:17:31 +08001838 mtk_xmac_init(priv);
1839 else
1840 mtk_mac_init(priv);
developerc3ac93d2018-12-20 16:12:53 +08001841
1842 /* Probe phy if switch is not specified */
1843 if (priv->sw == SW_NONE)
1844 return mtk_phy_probe(dev);
1845
1846 /* Initialize switch */
developerd5d73952020-02-18 16:49:37 +08001847 return mt753x_switch_init(priv);
developerc3ac93d2018-12-20 16:12:53 +08001848}
1849
1850static int mtk_eth_remove(struct udevice *dev)
1851{
1852 struct mtk_eth_priv *priv = dev_get_priv(dev);
1853
1854 /* MDIO unregister */
1855 mdio_unregister(priv->mdio_bus);
1856 mdio_free(priv->mdio_bus);
1857
1858 /* Stop possibly started DMA */
1859 mtk_eth_stop(dev);
1860
1861 return 0;
1862}
1863
Simon Glassaad29ae2020-12-03 16:55:21 -07001864static int mtk_eth_of_to_plat(struct udevice *dev)
developerc3ac93d2018-12-20 16:12:53 +08001865{
Simon Glassfa20e932020-12-03 16:55:20 -07001866 struct eth_pdata *pdata = dev_get_plat(dev);
developerc3ac93d2018-12-20 16:12:53 +08001867 struct mtk_eth_priv *priv = dev_get_priv(dev);
1868 struct ofnode_phandle_args args;
1869 struct regmap *regmap;
1870 const char *str;
1871 ofnode subnode;
1872 int ret;
1873
developer1d3b1f62022-09-09 19:59:21 +08001874 priv->soc = (const struct mtk_soc_data *)dev_get_driver_data(dev);
1875 if (!priv->soc) {
1876 dev_err(dev, "missing soc compatible data\n");
1877 return -EINVAL;
1878 }
developerc3ac93d2018-12-20 16:12:53 +08001879
developerafa74c22022-05-20 11:23:31 +08001880 pdata->iobase = (phys_addr_t)dev_remap_addr(dev);
developerc3ac93d2018-12-20 16:12:53 +08001881
1882 /* get corresponding ethsys phandle */
1883 ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1884 &args);
1885 if (ret)
1886 return ret;
1887
developera182b7e2022-05-20 11:23:37 +08001888 priv->ethsys_regmap = syscon_node_to_regmap(args.node);
1889 if (IS_ERR(priv->ethsys_regmap))
1890 return PTR_ERR(priv->ethsys_regmap);
developerc3ac93d2018-12-20 16:12:53 +08001891
developera5d712a2023-07-19 17:17:22 +08001892 if (MTK_HAS_CAPS(priv->soc->caps, MTK_INFRA)) {
1893 /* get corresponding infracfg phandle */
1894 ret = dev_read_phandle_with_args(dev, "mediatek,infracfg",
1895 NULL, 0, 0, &args);
1896
1897 if (ret)
1898 return ret;
1899
1900 priv->infra_regmap = syscon_node_to_regmap(args.node);
1901 if (IS_ERR(priv->infra_regmap))
1902 return PTR_ERR(priv->infra_regmap);
1903 }
1904
developerc3ac93d2018-12-20 16:12:53 +08001905 /* Reset controllers */
1906 ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1907 if (ret) {
1908 printf("error: Unable to get reset ctrl for frame engine\n");
1909 return ret;
1910 }
1911
1912 priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1913
developer4843ad32024-01-22 10:08:11 +08001914 priv->mdc = 0;
1915 subnode = ofnode_find_subnode(dev_ofnode(dev), "mdio");
1916 if (ofnode_valid(subnode)) {
1917 priv->mdc = ofnode_read_u32_default(subnode, "clock-frequency", 2500000);
1918 if (priv->mdc > MDC_MAX_FREQ ||
1919 priv->mdc < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1920 printf("error: MDIO clock frequency out of range\n");
1921 return -EINVAL;
1922 }
1923 }
1924
developerc3ac93d2018-12-20 16:12:53 +08001925 /* Interface mode is required */
Marek Behúnbc194772022-04-07 00:33:01 +02001926 pdata->phy_interface = dev_read_phy_mode(dev);
1927 priv->phy_interface = pdata->phy_interface;
Marek Behún48631e42022-04-07 00:33:03 +02001928 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
developerc3ac93d2018-12-20 16:12:53 +08001929 printf("error: phy-mode is not set\n");
1930 return -EINVAL;
1931 }
1932
1933 /* Force mode or autoneg */
1934 subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1935 if (ofnode_valid(subnode)) {
1936 priv->force_mode = 1;
1937 priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1938 priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1939
1940 if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
developer4aafc992023-07-19 17:17:13 +08001941 priv->speed != SPEED_1000 && priv->speed != SPEED_2500 &&
1942 priv->speed != SPEED_10000) {
developerc3ac93d2018-12-20 16:12:53 +08001943 printf("error: no valid speed set in fixed-link\n");
1944 return -EINVAL;
1945 }
1946 }
1947
developer4aafc992023-07-19 17:17:13 +08001948 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1949 priv->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
developer9a12c242020-01-21 19:31:57 +08001950 /* get corresponding sgmii phandle */
1951 ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
1952 NULL, 0, 0, &args);
1953 if (ret)
1954 return ret;
1955
1956 regmap = syscon_node_to_regmap(args.node);
1957
1958 if (IS_ERR(regmap))
1959 return PTR_ERR(regmap);
1960
1961 priv->sgmii_base = regmap_get_range(regmap, 0);
1962
1963 if (!priv->sgmii_base) {
1964 dev_err(dev, "Unable to find sgmii\n");
1965 return -ENODEV;
1966 }
developer053929c2022-09-09 19:59:28 +08001967
1968 priv->pn_swap = ofnode_read_bool(args.node, "pn_swap");
developer03ce27b2023-07-19 17:17:31 +08001969 } else if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII) {
1970 /* get corresponding usxgmii phandle */
1971 ret = dev_read_phandle_with_args(dev, "mediatek,usxgmiisys",
1972 NULL, 0, 0, &args);
1973 if (ret)
1974 return ret;
1975
1976 priv->usxgmii_regmap = syscon_node_to_regmap(args.node);
1977 if (IS_ERR(priv->usxgmii_regmap))
1978 return PTR_ERR(priv->usxgmii_regmap);
1979
1980 /* get corresponding xfi_pextp phandle */
1981 ret = dev_read_phandle_with_args(dev, "mediatek,xfi_pextp",
1982 NULL, 0, 0, &args);
1983 if (ret)
1984 return ret;
1985
1986 priv->xfi_pextp_regmap = syscon_node_to_regmap(args.node);
1987 if (IS_ERR(priv->xfi_pextp_regmap))
1988 return PTR_ERR(priv->xfi_pextp_regmap);
1989
1990 /* get corresponding xfi_pll phandle */
1991 ret = dev_read_phandle_with_args(dev, "mediatek,xfi_pll",
1992 NULL, 0, 0, &args);
1993 if (ret)
1994 return ret;
1995
1996 priv->xfi_pll_regmap = syscon_node_to_regmap(args.node);
1997 if (IS_ERR(priv->xfi_pll_regmap))
1998 return PTR_ERR(priv->xfi_pll_regmap);
1999
2000 /* get corresponding toprgu phandle */
2001 ret = dev_read_phandle_with_args(dev, "mediatek,toprgu",
2002 NULL, 0, 0, &args);
2003 if (ret)
2004 return ret;
2005
2006 priv->toprgu_regmap = syscon_node_to_regmap(args.node);
2007 if (IS_ERR(priv->toprgu_regmap))
2008 return PTR_ERR(priv->toprgu_regmap);
developer9a12c242020-01-21 19:31:57 +08002009 }
2010
developerc3ac93d2018-12-20 16:12:53 +08002011 /* check for switch first, otherwise phy will be used */
2012 priv->sw = SW_NONE;
2013 priv->switch_init = NULL;
developer08849652023-07-19 17:16:54 +08002014 priv->switch_mac_control = NULL;
developerc3ac93d2018-12-20 16:12:53 +08002015 str = dev_read_string(dev, "mediatek,switch");
2016
2017 if (str) {
2018 if (!strcmp(str, "mt7530")) {
2019 priv->sw = SW_MT7530;
2020 priv->switch_init = mt7530_setup;
developer08849652023-07-19 17:16:54 +08002021 priv->switch_mac_control = mt7530_mac_control;
developerd5d73952020-02-18 16:49:37 +08002022 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
developer3a46a672023-07-19 17:16:59 +08002023 priv->mt753x_reset_wait_time = 1000;
developerd5d73952020-02-18 16:49:37 +08002024 } else if (!strcmp(str, "mt7531")) {
2025 priv->sw = SW_MT7531;
2026 priv->switch_init = mt7531_setup;
developer08849652023-07-19 17:16:54 +08002027 priv->switch_mac_control = mt7531_mac_control;
developerd5d73952020-02-18 16:49:37 +08002028 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
developer3a46a672023-07-19 17:16:59 +08002029 priv->mt753x_reset_wait_time = 200;
developer76e14722023-07-19 17:17:41 +08002030 } else if (!strcmp(str, "mt7988")) {
2031 priv->sw = SW_MT7988;
2032 priv->switch_init = mt7988_setup;
2033 priv->switch_mac_control = mt7988_mac_control;
2034 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
2035 priv->mt753x_reset_wait_time = 50;
developerc3ac93d2018-12-20 16:12:53 +08002036 } else {
2037 printf("error: unsupported switch\n");
2038 return -EINVAL;
2039 }
2040
2041 priv->mcm = dev_read_bool(dev, "mediatek,mcm");
2042 if (priv->mcm) {
2043 ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
2044 if (ret) {
2045 printf("error: no reset ctrl for mcm\n");
2046 return ret;
2047 }
2048 } else {
2049 gpio_request_by_name(dev, "reset-gpios", 0,
2050 &priv->rst_gpio, GPIOD_IS_OUT);
2051 }
2052 } else {
developera19b69d2019-04-28 15:08:57 +08002053 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
2054 0, &args);
2055 if (ret) {
developerc3ac93d2018-12-20 16:12:53 +08002056 printf("error: phy-handle is not specified\n");
2057 return ret;
2058 }
2059
developera19b69d2019-04-28 15:08:57 +08002060 priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
developerc3ac93d2018-12-20 16:12:53 +08002061 if (priv->phy_addr < 0) {
2062 printf("error: phy address is not specified\n");
2063 return ret;
2064 }
2065 }
2066
2067 return 0;
2068}
2069
developer76e14722023-07-19 17:17:41 +08002070static const struct mtk_soc_data mt7988_data = {
2071 .caps = MT7988_CAPS,
2072 .ana_rgc3 = 0x128,
2073 .gdma_count = 3,
2074 .pdma_base = PDMA_V3_BASE,
2075 .txd_size = sizeof(struct mtk_tx_dma_v2),
2076 .rxd_size = sizeof(struct mtk_rx_dma_v2),
2077};
2078
developer053929c2022-09-09 19:59:28 +08002079static const struct mtk_soc_data mt7986_data = {
2080 .caps = MT7986_CAPS,
2081 .ana_rgc3 = 0x128,
developer78fed682023-07-19 17:17:37 +08002082 .gdma_count = 2,
developer053929c2022-09-09 19:59:28 +08002083 .pdma_base = PDMA_V2_BASE,
2084 .txd_size = sizeof(struct mtk_tx_dma_v2),
2085 .rxd_size = sizeof(struct mtk_rx_dma_v2),
2086};
2087
2088static const struct mtk_soc_data mt7981_data = {
developera5d712a2023-07-19 17:17:22 +08002089 .caps = MT7981_CAPS,
developer053929c2022-09-09 19:59:28 +08002090 .ana_rgc3 = 0x128,
developer78fed682023-07-19 17:17:37 +08002091 .gdma_count = 2,
developer053929c2022-09-09 19:59:28 +08002092 .pdma_base = PDMA_V2_BASE,
2093 .txd_size = sizeof(struct mtk_tx_dma_v2),
2094 .rxd_size = sizeof(struct mtk_rx_dma_v2),
2095};
2096
developer1d3b1f62022-09-09 19:59:21 +08002097static const struct mtk_soc_data mt7629_data = {
2098 .ana_rgc3 = 0x128,
developer78fed682023-07-19 17:17:37 +08002099 .gdma_count = 2,
developera7cdebf2022-09-09 19:59:26 +08002100 .pdma_base = PDMA_V1_BASE,
developer65089f72022-09-09 19:59:24 +08002101 .txd_size = sizeof(struct mtk_tx_dma),
2102 .rxd_size = sizeof(struct mtk_rx_dma),
developer1d3b1f62022-09-09 19:59:21 +08002103};
2104
2105static const struct mtk_soc_data mt7623_data = {
2106 .caps = MT7623_CAPS,
developer78fed682023-07-19 17:17:37 +08002107 .gdma_count = 2,
developera7cdebf2022-09-09 19:59:26 +08002108 .pdma_base = PDMA_V1_BASE,
developer65089f72022-09-09 19:59:24 +08002109 .txd_size = sizeof(struct mtk_tx_dma),
2110 .rxd_size = sizeof(struct mtk_rx_dma),
developer1d3b1f62022-09-09 19:59:21 +08002111};
2112
2113static const struct mtk_soc_data mt7622_data = {
2114 .ana_rgc3 = 0x2028,
developer78fed682023-07-19 17:17:37 +08002115 .gdma_count = 2,
developera7cdebf2022-09-09 19:59:26 +08002116 .pdma_base = PDMA_V1_BASE,
developer65089f72022-09-09 19:59:24 +08002117 .txd_size = sizeof(struct mtk_tx_dma),
2118 .rxd_size = sizeof(struct mtk_rx_dma),
developer1d3b1f62022-09-09 19:59:21 +08002119};
2120
2121static const struct mtk_soc_data mt7621_data = {
2122 .caps = MT7621_CAPS,
developer78fed682023-07-19 17:17:37 +08002123 .gdma_count = 2,
developera7cdebf2022-09-09 19:59:26 +08002124 .pdma_base = PDMA_V1_BASE,
developer65089f72022-09-09 19:59:24 +08002125 .txd_size = sizeof(struct mtk_tx_dma),
2126 .rxd_size = sizeof(struct mtk_rx_dma),
developer1d3b1f62022-09-09 19:59:21 +08002127};
2128
developerc3ac93d2018-12-20 16:12:53 +08002129static const struct udevice_id mtk_eth_ids[] = {
developer76e14722023-07-19 17:17:41 +08002130 { .compatible = "mediatek,mt7988-eth", .data = (ulong)&mt7988_data },
developer053929c2022-09-09 19:59:28 +08002131 { .compatible = "mediatek,mt7986-eth", .data = (ulong)&mt7986_data },
2132 { .compatible = "mediatek,mt7981-eth", .data = (ulong)&mt7981_data },
developer1d3b1f62022-09-09 19:59:21 +08002133 { .compatible = "mediatek,mt7629-eth", .data = (ulong)&mt7629_data },
2134 { .compatible = "mediatek,mt7623-eth", .data = (ulong)&mt7623_data },
2135 { .compatible = "mediatek,mt7622-eth", .data = (ulong)&mt7622_data },
2136 { .compatible = "mediatek,mt7621-eth", .data = (ulong)&mt7621_data },
developerc3ac93d2018-12-20 16:12:53 +08002137 {}
2138};
2139
2140static const struct eth_ops mtk_eth_ops = {
2141 .start = mtk_eth_start,
2142 .stop = mtk_eth_stop,
2143 .send = mtk_eth_send,
2144 .recv = mtk_eth_recv,
2145 .free_pkt = mtk_eth_free_pkt,
2146 .write_hwaddr = mtk_eth_write_hwaddr,
2147};
2148
2149U_BOOT_DRIVER(mtk_eth) = {
2150 .name = "mtk-eth",
2151 .id = UCLASS_ETH,
2152 .of_match = mtk_eth_ids,
Simon Glassaad29ae2020-12-03 16:55:21 -07002153 .of_to_plat = mtk_eth_of_to_plat,
Simon Glass71fa5b42020-12-03 16:55:18 -07002154 .plat_auto = sizeof(struct eth_pdata),
developerc3ac93d2018-12-20 16:12:53 +08002155 .probe = mtk_eth_probe,
2156 .remove = mtk_eth_remove,
2157 .ops = &mtk_eth_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -07002158 .priv_auto = sizeof(struct mtk_eth_priv),
developerc3ac93d2018-12-20 16:12:53 +08002159 .flags = DM_FLAG_ALLOC_PRIV_DMA,
2160};