blob: d3f29e4a0d0dc6123eef767d14f2a1c6e82d0999 [file] [log] [blame]
developerc3ac93d2018-12-20 16:12:53 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
7 */
8
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
developerc3ac93d2018-12-20 16:12:53 +080010#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060011#include <log.h>
developerc3ac93d2018-12-20 16:12:53 +080012#include <malloc.h>
13#include <miiphy.h>
Simon Glass274e0b02020-05-10 11:39:56 -060014#include <net.h>
developerc3ac93d2018-12-20 16:12:53 +080015#include <regmap.h>
16#include <reset.h>
17#include <syscon.h>
18#include <wait_bit.h>
Simon Glass274e0b02020-05-10 11:39:56 -060019#include <asm/cache.h>
developerc3ac93d2018-12-20 16:12:53 +080020#include <asm/gpio.h>
21#include <asm/io.h>
Simon Glass9bc15642020-02-03 07:36:16 -070022#include <dm/device_compat.h>
Simon Glassdbd79542020-05-10 11:40:11 -060023#include <linux/delay.h>
developerc3ac93d2018-12-20 16:12:53 +080024#include <linux/err.h>
25#include <linux/ioport.h>
26#include <linux/mdio.h>
27#include <linux/mii.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060028#include <linux/printk.h>
developerc3ac93d2018-12-20 16:12:53 +080029
30#include "mtk_eth.h"
31
32#define NUM_TX_DESC 24
33#define NUM_RX_DESC 24
34#define TX_TOTAL_BUF_SIZE (NUM_TX_DESC * PKTSIZE_ALIGN)
35#define RX_TOTAL_BUF_SIZE (NUM_RX_DESC * PKTSIZE_ALIGN)
36#define TOTAL_PKT_BUF_SIZE (TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
37
developerd5d73952020-02-18 16:49:37 +080038#define MT753X_NUM_PHYS 5
39#define MT753X_NUM_PORTS 7
40#define MT753X_DFL_SMI_ADDR 31
41#define MT753X_SMI_ADDR_MASK 0x1f
developerc3ac93d2018-12-20 16:12:53 +080042
developerd5d73952020-02-18 16:49:37 +080043#define MT753X_PHY_ADDR(base, addr) \
developerc3ac93d2018-12-20 16:12:53 +080044 (((base) + (addr)) & 0x1f)
45
46#define GDMA_FWD_TO_CPU \
47 (0x20000000 | \
48 GDM_ICS_EN | \
49 GDM_TCS_EN | \
50 GDM_UCS_EN | \
51 STRP_CRC | \
52 (DP_PDMA << MYMAC_DP_S) | \
53 (DP_PDMA << BC_DP_S) | \
54 (DP_PDMA << MC_DP_S) | \
55 (DP_PDMA << UN_DP_S))
56
developer76e14722023-07-19 17:17:41 +080057#define GDMA_BRIDGE_TO_CPU \
58 (0xC0000000 | \
59 GDM_ICS_EN | \
60 GDM_TCS_EN | \
61 GDM_UCS_EN | \
62 (DP_PDMA << MYMAC_DP_S) | \
63 (DP_PDMA << BC_DP_S) | \
64 (DP_PDMA << MC_DP_S) | \
65 (DP_PDMA << UN_DP_S))
66
developerc3ac93d2018-12-20 16:12:53 +080067#define GDMA_FWD_DISCARD \
68 (0x20000000 | \
69 GDM_ICS_EN | \
70 GDM_TCS_EN | \
71 GDM_UCS_EN | \
72 STRP_CRC | \
73 (DP_DISCARD << MYMAC_DP_S) | \
74 (DP_DISCARD << BC_DP_S) | \
75 (DP_DISCARD << MC_DP_S) | \
76 (DP_DISCARD << UN_DP_S))
77
developerc3ac93d2018-12-20 16:12:53 +080078enum mtk_switch {
79 SW_NONE,
developerd5d73952020-02-18 16:49:37 +080080 SW_MT7530,
developer76e14722023-07-19 17:17:41 +080081 SW_MT7531,
82 SW_MT7988,
developerc3ac93d2018-12-20 16:12:53 +080083};
84
developer1d3b1f62022-09-09 19:59:21 +080085/* struct mtk_soc_data - This is the structure holding all differences
86 * among various plaforms
87 * @caps Flags shown the extra capability for the SoC
88 * @ana_rgc3: The offset for register ANA_RGC3 related to
89 * sgmiisys syscon
developer78fed682023-07-19 17:17:37 +080090 * @gdma_count: Number of GDMAs
developera7cdebf2022-09-09 19:59:26 +080091 * @pdma_base: Register base of PDMA block
92 * @txd_size: Tx DMA descriptor size.
93 * @rxd_size: Rx DMA descriptor size.
developer1d3b1f62022-09-09 19:59:21 +080094 */
95struct mtk_soc_data {
96 u32 caps;
97 u32 ana_rgc3;
developer78fed682023-07-19 17:17:37 +080098 u32 gdma_count;
developera7cdebf2022-09-09 19:59:26 +080099 u32 pdma_base;
developer65089f72022-09-09 19:59:24 +0800100 u32 txd_size;
101 u32 rxd_size;
developerc3ac93d2018-12-20 16:12:53 +0800102};
103
104struct mtk_eth_priv {
105 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
106
developer65089f72022-09-09 19:59:24 +0800107 void *tx_ring_noc;
108 void *rx_ring_noc;
developerc3ac93d2018-12-20 16:12:53 +0800109
110 int rx_dma_owner_idx0;
111 int tx_cpu_owner_idx0;
112
113 void __iomem *fe_base;
114 void __iomem *gmac_base;
developer9a12c242020-01-21 19:31:57 +0800115 void __iomem *sgmii_base;
developer76e14722023-07-19 17:17:41 +0800116 void __iomem *gsw_base;
developerc3ac93d2018-12-20 16:12:53 +0800117
developera182b7e2022-05-20 11:23:37 +0800118 struct regmap *ethsys_regmap;
119
developera5d712a2023-07-19 17:17:22 +0800120 struct regmap *infra_regmap;
121
developer03ce27b2023-07-19 17:17:31 +0800122 struct regmap *usxgmii_regmap;
123 struct regmap *xfi_pextp_regmap;
124 struct regmap *xfi_pll_regmap;
125 struct regmap *toprgu_regmap;
126
developerc3ac93d2018-12-20 16:12:53 +0800127 struct mii_dev *mdio_bus;
128 int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
129 int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
130 int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
131 int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
132 u16 val);
133
developer1d3b1f62022-09-09 19:59:21 +0800134 const struct mtk_soc_data *soc;
developerc3ac93d2018-12-20 16:12:53 +0800135 int gmac_id;
136 int force_mode;
137 int speed;
138 int duplex;
developer4843ad32024-01-22 10:08:11 +0800139 int mdc;
developer053929c2022-09-09 19:59:28 +0800140 bool pn_swap;
developerc3ac93d2018-12-20 16:12:53 +0800141
142 struct phy_device *phydev;
143 int phy_interface;
144 int phy_addr;
145
146 enum mtk_switch sw;
147 int (*switch_init)(struct mtk_eth_priv *priv);
developer08849652023-07-19 17:16:54 +0800148 void (*switch_mac_control)(struct mtk_eth_priv *priv, bool enable);
developerd5d73952020-02-18 16:49:37 +0800149 u32 mt753x_smi_addr;
150 u32 mt753x_phy_base;
developer08849652023-07-19 17:16:54 +0800151 u32 mt753x_pmcr;
developer3a46a672023-07-19 17:16:59 +0800152 u32 mt753x_reset_wait_time;
developerc3ac93d2018-12-20 16:12:53 +0800153
154 struct gpio_desc rst_gpio;
155 int mcm;
156
157 struct reset_ctl rst_fe;
158 struct reset_ctl rst_mcm;
159};
160
161static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
162{
developera7cdebf2022-09-09 19:59:26 +0800163 writel(val, priv->fe_base + priv->soc->pdma_base + reg);
developerc3ac93d2018-12-20 16:12:53 +0800164}
165
166static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
167 u32 set)
168{
developera7cdebf2022-09-09 19:59:26 +0800169 clrsetbits_le32(priv->fe_base + priv->soc->pdma_base + reg, clr, set);
developerc3ac93d2018-12-20 16:12:53 +0800170}
171
172static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
173 u32 val)
174{
175 u32 gdma_base;
176
developer78fed682023-07-19 17:17:37 +0800177 if (no == 2)
178 gdma_base = GDMA3_BASE;
179 else if (no == 1)
developerc3ac93d2018-12-20 16:12:53 +0800180 gdma_base = GDMA2_BASE;
181 else
182 gdma_base = GDMA1_BASE;
183
184 writel(val, priv->fe_base + gdma_base + reg);
185}
186
developer76e14722023-07-19 17:17:41 +0800187static void mtk_fe_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
188{
189 clrsetbits_le32(priv->fe_base + reg, clr, set);
190}
191
developerc3ac93d2018-12-20 16:12:53 +0800192static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
193{
194 return readl(priv->gmac_base + reg);
195}
196
197static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
198{
199 writel(val, priv->gmac_base + reg);
200}
201
202static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
203{
204 clrsetbits_le32(priv->gmac_base + reg, clr, set);
205}
206
207static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
208 u32 set)
209{
developera182b7e2022-05-20 11:23:37 +0800210 uint val;
211
212 regmap_read(priv->ethsys_regmap, reg, &val);
213 val &= ~clr;
214 val |= set;
215 regmap_write(priv->ethsys_regmap, reg, val);
developerc3ac93d2018-12-20 16:12:53 +0800216}
217
developera5d712a2023-07-19 17:17:22 +0800218static void mtk_infra_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
219 u32 set)
220{
221 uint val;
222
223 regmap_read(priv->infra_regmap, reg, &val);
224 val &= ~clr;
225 val |= set;
226 regmap_write(priv->infra_regmap, reg, val);
227}
228
developer76e14722023-07-19 17:17:41 +0800229static u32 mtk_gsw_read(struct mtk_eth_priv *priv, u32 reg)
230{
231 return readl(priv->gsw_base + reg);
232}
233
234static void mtk_gsw_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
235{
236 writel(val, priv->gsw_base + reg);
237}
238
developerc3ac93d2018-12-20 16:12:53 +0800239/* Direct MDIO clause 22/45 access via SoC */
240static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
241 u32 cmd, u32 st)
242{
243 int ret;
244 u32 val;
245
246 val = (st << MDIO_ST_S) |
247 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
248 (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
249 (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
250
developer4781c6e2023-07-19 17:17:03 +0800251 if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
developerc3ac93d2018-12-20 16:12:53 +0800252 val |= data & MDIO_RW_DATA_M;
253
254 mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
255
256 ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
257 PHY_ACS_ST, 0, 5000, 0);
258 if (ret) {
259 pr_warn("MDIO access timeout\n");
260 return ret;
261 }
262
developer4781c6e2023-07-19 17:17:03 +0800263 if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
developerc3ac93d2018-12-20 16:12:53 +0800264 val = mtk_gmac_read(priv, GMAC_PIAC_REG);
265 return val & MDIO_RW_DATA_M;
266 }
267
268 return 0;
269}
270
271/* Direct MDIO clause 22 read via SoC */
272static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
273{
274 return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
275}
276
277/* Direct MDIO clause 22 write via SoC */
278static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
279{
280 return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
281}
282
283/* Direct MDIO clause 45 read via SoC */
284static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
285{
286 int ret;
287
288 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
289 if (ret)
290 return ret;
291
292 return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
293 MDIO_ST_C45);
294}
295
296/* Direct MDIO clause 45 write via SoC */
297static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
298 u16 reg, u16 val)
299{
300 int ret;
301
302 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
303 if (ret)
304 return ret;
305
306 return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
307 MDIO_ST_C45);
308}
309
310/* Indirect MDIO clause 45 read via MII registers */
311static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
312 u16 reg)
313{
314 int ret;
315
316 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
317 (MMD_ADDR << MMD_CMD_S) |
318 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
319 if (ret)
320 return ret;
321
322 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
323 if (ret)
324 return ret;
325
326 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
327 (MMD_DATA << MMD_CMD_S) |
328 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
329 if (ret)
330 return ret;
331
332 return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
333}
334
335/* Indirect MDIO clause 45 write via MII registers */
336static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
337 u16 reg, u16 val)
338{
339 int ret;
340
341 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
342 (MMD_ADDR << MMD_CMD_S) |
343 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
344 if (ret)
345 return ret;
346
347 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
348 if (ret)
349 return ret;
350
351 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
352 (MMD_DATA << MMD_CMD_S) |
353 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
354 if (ret)
355 return ret;
356
357 return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
358}
359
developerd5d73952020-02-18 16:49:37 +0800360/*
361 * MT7530 Internal Register Address Bits
362 * -------------------------------------------------------------------
363 * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
364 * |----------------------------------------|---------------|--------|
365 * | Page Address | Reg Address | Unused |
366 * -------------------------------------------------------------------
367 */
368
369static int mt753x_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
370{
371 int ret, low_word, high_word;
372
developer76e14722023-07-19 17:17:41 +0800373 if (priv->sw == SW_MT7988) {
374 *data = mtk_gsw_read(priv, reg);
375 return 0;
376 }
377
developerd5d73952020-02-18 16:49:37 +0800378 /* Write page address */
379 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
380 if (ret)
381 return ret;
382
383 /* Read low word */
384 low_word = mtk_mii_read(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf);
385 if (low_word < 0)
386 return low_word;
387
388 /* Read high word */
389 high_word = mtk_mii_read(priv, priv->mt753x_smi_addr, 0x10);
390 if (high_word < 0)
391 return high_word;
392
393 if (data)
394 *data = ((u32)high_word << 16) | (low_word & 0xffff);
395
396 return 0;
397}
398
399static int mt753x_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
400{
401 int ret;
402
developer76e14722023-07-19 17:17:41 +0800403 if (priv->sw == SW_MT7988) {
404 mtk_gsw_write(priv, reg, data);
405 return 0;
406 }
407
developerd5d73952020-02-18 16:49:37 +0800408 /* Write page address */
409 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
410 if (ret)
411 return ret;
412
413 /* Write low word */
414 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf,
415 data & 0xffff);
416 if (ret)
417 return ret;
418
419 /* Write high word */
420 return mtk_mii_write(priv, priv->mt753x_smi_addr, 0x10, data >> 16);
421}
422
423static void mt753x_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
424 u32 set)
425{
426 u32 val;
427
428 mt753x_reg_read(priv, reg, &val);
429 val &= ~clr;
430 val |= set;
431 mt753x_reg_write(priv, reg, val);
432}
433
434/* Indirect MDIO clause 22/45 access */
435static int mt7531_mii_rw(struct mtk_eth_priv *priv, int phy, int reg, u16 data,
436 u32 cmd, u32 st)
437{
438 ulong timeout;
439 u32 val, timeout_ms;
440 int ret = 0;
441
442 val = (st << MDIO_ST_S) |
443 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
444 ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
445 ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
446
447 if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
448 val |= data & MDIO_RW_DATA_M;
449
450 mt753x_reg_write(priv, MT7531_PHY_IAC, val | PHY_ACS_ST);
451
452 timeout_ms = 100;
453 timeout = get_timer(0);
454 while (1) {
455 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
456
457 if ((val & PHY_ACS_ST) == 0)
458 break;
459
460 if (get_timer(timeout) > timeout_ms)
461 return -ETIMEDOUT;
462 }
463
464 if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
465 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
466 ret = val & MDIO_RW_DATA_M;
467 }
468
469 return ret;
470}
471
472static int mt7531_mii_ind_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
473{
474 u8 phy_addr;
475
476 if (phy >= MT753X_NUM_PHYS)
477 return -EINVAL;
478
479 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
480
481 return mt7531_mii_rw(priv, phy_addr, reg, 0, MDIO_CMD_READ,
482 MDIO_ST_C22);
483}
484
485static int mt7531_mii_ind_write(struct mtk_eth_priv *priv, u8 phy, u8 reg,
486 u16 val)
487{
488 u8 phy_addr;
489
490 if (phy >= MT753X_NUM_PHYS)
491 return -EINVAL;
492
493 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
494
495 return mt7531_mii_rw(priv, phy_addr, reg, val, MDIO_CMD_WRITE,
496 MDIO_ST_C22);
497}
498
developerdd6243f2023-07-19 17:17:07 +0800499static int mt7531_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
500 u16 reg)
developerd5d73952020-02-18 16:49:37 +0800501{
502 u8 phy_addr;
503 int ret;
504
505 if (addr >= MT753X_NUM_PHYS)
506 return -EINVAL;
507
508 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
509
510 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
511 MDIO_ST_C45);
512 if (ret)
513 return ret;
514
515 return mt7531_mii_rw(priv, phy_addr, devad, 0, MDIO_CMD_READ_C45,
516 MDIO_ST_C45);
517}
518
519static int mt7531_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
520 u16 reg, u16 val)
521{
522 u8 phy_addr;
523 int ret;
524
525 if (addr >= MT753X_NUM_PHYS)
526 return 0;
527
528 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
529
530 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
531 MDIO_ST_C45);
532 if (ret)
533 return ret;
534
535 return mt7531_mii_rw(priv, phy_addr, devad, val, MDIO_CMD_WRITE,
536 MDIO_ST_C45);
537}
538
developerc3ac93d2018-12-20 16:12:53 +0800539static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
540{
541 struct mtk_eth_priv *priv = bus->priv;
542
543 if (devad < 0)
544 return priv->mii_read(priv, addr, reg);
545 else
546 return priv->mmd_read(priv, addr, devad, reg);
547}
548
549static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
550 u16 val)
551{
552 struct mtk_eth_priv *priv = bus->priv;
553
554 if (devad < 0)
555 return priv->mii_write(priv, addr, reg, val);
556 else
557 return priv->mmd_write(priv, addr, devad, reg, val);
558}
559
560static int mtk_mdio_register(struct udevice *dev)
561{
562 struct mtk_eth_priv *priv = dev_get_priv(dev);
563 struct mii_dev *mdio_bus = mdio_alloc();
564 int ret;
565
566 if (!mdio_bus)
567 return -ENOMEM;
568
569 /* Assign MDIO access APIs according to the switch/phy */
570 switch (priv->sw) {
571 case SW_MT7530:
572 priv->mii_read = mtk_mii_read;
573 priv->mii_write = mtk_mii_write;
574 priv->mmd_read = mtk_mmd_ind_read;
575 priv->mmd_write = mtk_mmd_ind_write;
576 break;
developerd5d73952020-02-18 16:49:37 +0800577 case SW_MT7531:
developer76e14722023-07-19 17:17:41 +0800578 case SW_MT7988:
developerd5d73952020-02-18 16:49:37 +0800579 priv->mii_read = mt7531_mii_ind_read;
580 priv->mii_write = mt7531_mii_ind_write;
581 priv->mmd_read = mt7531_mmd_ind_read;
582 priv->mmd_write = mt7531_mmd_ind_write;
583 break;
developerc3ac93d2018-12-20 16:12:53 +0800584 default:
585 priv->mii_read = mtk_mii_read;
586 priv->mii_write = mtk_mii_write;
587 priv->mmd_read = mtk_mmd_read;
588 priv->mmd_write = mtk_mmd_write;
589 }
590
591 mdio_bus->read = mtk_mdio_read;
592 mdio_bus->write = mtk_mdio_write;
593 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
594
595 mdio_bus->priv = (void *)priv;
596
597 ret = mdio_register(mdio_bus);
598
599 if (ret)
600 return ret;
601
602 priv->mdio_bus = mdio_bus;
603
604 return 0;
605}
606
developerd5d73952020-02-18 16:49:37 +0800607static int mt753x_core_reg_read(struct mtk_eth_priv *priv, u32 reg)
developerc3ac93d2018-12-20 16:12:53 +0800608{
developerd5d73952020-02-18 16:49:37 +0800609 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
developerc3ac93d2018-12-20 16:12:53 +0800610
developerd5d73952020-02-18 16:49:37 +0800611 return priv->mmd_read(priv, phy_addr, 0x1f, reg);
developerc3ac93d2018-12-20 16:12:53 +0800612}
613
developerd5d73952020-02-18 16:49:37 +0800614static void mt753x_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
developerc3ac93d2018-12-20 16:12:53 +0800615{
developerd5d73952020-02-18 16:49:37 +0800616 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
developerc3ac93d2018-12-20 16:12:53 +0800617
developerd5d73952020-02-18 16:49:37 +0800618 priv->mmd_write(priv, phy_addr, 0x1f, reg, val);
developerc3ac93d2018-12-20 16:12:53 +0800619}
620
621static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
622{
623 u32 ncpo1, ssc_delta;
624
625 switch (mode) {
626 case PHY_INTERFACE_MODE_RGMII:
627 ncpo1 = 0x0c80;
628 ssc_delta = 0x87;
629 break;
630 default:
631 printf("error: xMII mode %d not supported\n", mode);
632 return -EINVAL;
633 }
634
635 /* Disable MT7530 core clock */
developerd5d73952020-02-18 16:49:37 +0800636 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
developerc3ac93d2018-12-20 16:12:53 +0800637
638 /* Disable MT7530 PLL */
developerd5d73952020-02-18 16:49:37 +0800639 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
developerc3ac93d2018-12-20 16:12:53 +0800640 (2 << RG_GSWPLL_POSDIV_200M_S) |
641 (32 << RG_GSWPLL_FBKDIV_200M_S));
642
643 /* For MT7530 core clock = 500Mhz */
developerd5d73952020-02-18 16:49:37 +0800644 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP2,
developerc3ac93d2018-12-20 16:12:53 +0800645 (1 << RG_GSWPLL_POSDIV_500M_S) |
646 (25 << RG_GSWPLL_FBKDIV_500M_S));
647
648 /* Enable MT7530 PLL */
developerd5d73952020-02-18 16:49:37 +0800649 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
developerc3ac93d2018-12-20 16:12:53 +0800650 (2 << RG_GSWPLL_POSDIV_200M_S) |
651 (32 << RG_GSWPLL_FBKDIV_200M_S) |
652 RG_GSWPLL_EN_PRE);
653
654 udelay(20);
655
developerd5d73952020-02-18 16:49:37 +0800656 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
developerc3ac93d2018-12-20 16:12:53 +0800657
658 /* Setup the MT7530 TRGMII Tx Clock */
developerd5d73952020-02-18 16:49:37 +0800659 mt753x_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
660 mt753x_core_reg_write(priv, CORE_PLL_GROUP6, 0);
661 mt753x_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
662 mt753x_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
663 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
developerc3ac93d2018-12-20 16:12:53 +0800664 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
665
developerd5d73952020-02-18 16:49:37 +0800666 mt753x_core_reg_write(priv, CORE_PLL_GROUP2,
developerc3ac93d2018-12-20 16:12:53 +0800667 RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
668 (1 << RG_SYSPLL_POSDIV_S));
669
developerd5d73952020-02-18 16:49:37 +0800670 mt753x_core_reg_write(priv, CORE_PLL_GROUP7,
developerc3ac93d2018-12-20 16:12:53 +0800671 RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
672 RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
673
674 /* Enable MT7530 core clock */
developerd5d73952020-02-18 16:49:37 +0800675 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
developerc3ac93d2018-12-20 16:12:53 +0800676 REG_GSWCK_EN | REG_TRGMIICK_EN);
677
678 return 0;
679}
680
developer08849652023-07-19 17:16:54 +0800681static void mt7530_mac_control(struct mtk_eth_priv *priv, bool enable)
682{
683 u32 pmcr = FORCE_MODE;
684
685 if (enable)
686 pmcr = priv->mt753x_pmcr;
687
688 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
689}
690
developerc3ac93d2018-12-20 16:12:53 +0800691static int mt7530_setup(struct mtk_eth_priv *priv)
692{
693 u16 phy_addr, phy_val;
developer2f866c42022-05-20 11:23:42 +0800694 u32 val, txdrv;
developerc3ac93d2018-12-20 16:12:53 +0800695 int i;
696
developer1d3b1f62022-09-09 19:59:21 +0800697 if (!MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
developer2f866c42022-05-20 11:23:42 +0800698 /* Select 250MHz clk for RGMII mode */
699 mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
700 ETHSYS_TRGMII_CLK_SEL362_5, 0);
701
702 txdrv = 8;
703 } else {
704 txdrv = 4;
705 }
developerc3ac93d2018-12-20 16:12:53 +0800706
developerc3ac93d2018-12-20 16:12:53 +0800707 /* Modify HWTRAP first to allow direct access to internal PHYs */
developerd5d73952020-02-18 16:49:37 +0800708 mt753x_reg_read(priv, HWTRAP_REG, &val);
developerc3ac93d2018-12-20 16:12:53 +0800709 val |= CHG_TRAP;
710 val &= ~C_MDIO_BPS;
developerd5d73952020-02-18 16:49:37 +0800711 mt753x_reg_write(priv, MHWTRAP_REG, val);
developerc3ac93d2018-12-20 16:12:53 +0800712
713 /* Calculate the phy base address */
714 val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
developerd5d73952020-02-18 16:49:37 +0800715 priv->mt753x_phy_base = (val | 0x7) + 1;
developerc3ac93d2018-12-20 16:12:53 +0800716
717 /* Turn off PHYs */
developerd5d73952020-02-18 16:49:37 +0800718 for (i = 0; i < MT753X_NUM_PHYS; i++) {
719 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
developerc3ac93d2018-12-20 16:12:53 +0800720 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
721 phy_val |= BMCR_PDOWN;
722 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
723 }
724
725 /* Force MAC link down before reset */
developerd5d73952020-02-18 16:49:37 +0800726 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
727 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
developerc3ac93d2018-12-20 16:12:53 +0800728
729 /* MT7530 reset */
developerd5d73952020-02-18 16:49:37 +0800730 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
developerc3ac93d2018-12-20 16:12:53 +0800731 udelay(100);
732
developerd5d73952020-02-18 16:49:37 +0800733 val = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
developerc3ac93d2018-12-20 16:12:53 +0800734 MAC_MODE | FORCE_MODE |
735 MAC_TX_EN | MAC_RX_EN |
736 BKOFF_EN | BACKPR_EN |
737 (SPEED_1000M << FORCE_SPD_S) |
738 FORCE_DPX | FORCE_LINK;
739
740 /* MT7530 Port6: Forced 1000M/FD, FC disabled */
developer08849652023-07-19 17:16:54 +0800741 priv->mt753x_pmcr = val;
developerc3ac93d2018-12-20 16:12:53 +0800742
743 /* MT7530 Port5: Forced link down */
developerd5d73952020-02-18 16:49:37 +0800744 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
developerc3ac93d2018-12-20 16:12:53 +0800745
developer08849652023-07-19 17:16:54 +0800746 /* Keep MAC link down before starting eth */
747 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
748
developerc3ac93d2018-12-20 16:12:53 +0800749 /* MT7530 Port6: Set to RGMII */
developerd5d73952020-02-18 16:49:37 +0800750 mt753x_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
developerc3ac93d2018-12-20 16:12:53 +0800751
752 /* Hardware Trap: Enable Port6, Disable Port5 */
developerd5d73952020-02-18 16:49:37 +0800753 mt753x_reg_read(priv, HWTRAP_REG, &val);
developerc3ac93d2018-12-20 16:12:53 +0800754 val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
755 (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
756 (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
757 val &= ~(C_MDIO_BPS | P6_INTF_DIS);
developerd5d73952020-02-18 16:49:37 +0800758 mt753x_reg_write(priv, MHWTRAP_REG, val);
developerc3ac93d2018-12-20 16:12:53 +0800759
760 /* Setup switch core pll */
761 mt7530_pad_clk_setup(priv, priv->phy_interface);
762
763 /* Lower Tx Driving for TRGMII path */
764 for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
developerd5d73952020-02-18 16:49:37 +0800765 mt753x_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
developer2f866c42022-05-20 11:23:42 +0800766 (txdrv << TD_DM_DRVP_S) |
767 (txdrv << TD_DM_DRVN_S));
developerc3ac93d2018-12-20 16:12:53 +0800768
769 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
developerd5d73952020-02-18 16:49:37 +0800770 mt753x_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
developerc3ac93d2018-12-20 16:12:53 +0800771
772 /* Turn on PHYs */
developerd5d73952020-02-18 16:49:37 +0800773 for (i = 0; i < MT753X_NUM_PHYS; i++) {
774 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
developerc3ac93d2018-12-20 16:12:53 +0800775 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
776 phy_val &= ~BMCR_PDOWN;
777 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
778 }
779
developerd5d73952020-02-18 16:49:37 +0800780 return 0;
781}
782
783static void mt7531_core_pll_setup(struct mtk_eth_priv *priv, int mcm)
784{
785 /* Step 1 : Disable MT7531 COREPLL */
786 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, 0);
787
788 /* Step 2: switch to XTAL output */
789 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_CLKSW, SW_CLKSW);
790
791 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, 0);
792
793 /* Step 3: disable PLLGP and enable program PLLGP */
794 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_PLLGP, SW_PLLGP);
795
796 /* Step 4: program COREPLL output frequency to 500MHz */
797 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_POSDIV_M,
798 2 << RG_COREPLL_POSDIV_S);
799 udelay(25);
800
801 /* Currently, support XTAL 25Mhz only */
802 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_M,
803 0x140000 << RG_COREPLL_SDM_PCW_S);
804
805 /* Set feedback divide ratio update signal to high */
806 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG,
807 RG_COREPLL_SDM_PCW_CHG);
808
809 /* Wait for at least 16 XTAL clocks */
810 udelay(10);
811
812 /* Step 5: set feedback divide ratio update signal to low */
813 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG, 0);
814
815 /* add enable 325M clock for SGMII */
816 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
817
818 /* add enable 250SSC clock for RGMII */
819 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
820
821 /*Step 6: Enable MT7531 PLL */
822 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, RG_COREPLL_EN);
823
824 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, EN_COREPLL);
825
826 udelay(25);
827}
828
829static int mt7531_port_sgmii_init(struct mtk_eth_priv *priv,
830 u32 port)
831{
832 if (port != 5 && port != 6) {
833 printf("mt7531: port %d is not a SGMII port\n", port);
834 return -EINVAL;
835 }
836
837 /* Set SGMII GEN2 speed(2.5G) */
developer0535efd2024-12-17 16:39:23 +0800838 mt753x_reg_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port), SGMSYS_SPEED_MASK,
839 FIELD_PREP(SGMSYS_SPEED_MASK, SGMSYS_SPEED_2500));
developerd5d73952020-02-18 16:49:37 +0800840
841 /* Disable SGMII AN */
842 mt753x_reg_rmw(priv, MT7531_PCS_CONTROL_1(port),
843 SGMII_AN_ENABLE, 0);
844
845 /* SGMII force mode setting */
846 mt753x_reg_write(priv, MT7531_SGMII_MODE(port), SGMII_FORCE_MODE);
847
848 /* Release PHYA power down state */
849 mt753x_reg_rmw(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
850 SGMII_PHYA_PWD, 0);
851
852 return 0;
853}
854
855static int mt7531_port_rgmii_init(struct mtk_eth_priv *priv, u32 port)
856{
857 u32 val;
858
859 if (port != 5) {
860 printf("error: RGMII mode is not available for port %d\n",
861 port);
862 return -EINVAL;
863 }
864
865 mt753x_reg_read(priv, MT7531_CLKGEN_CTRL, &val);
866 val |= GP_CLK_EN;
867 val &= ~GP_MODE_M;
868 val |= GP_MODE_RGMII << GP_MODE_S;
869 val |= TXCLK_NO_REVERSE;
870 val |= RXCLK_NO_DELAY;
871 val &= ~CLK_SKEW_IN_M;
872 val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
873 val &= ~CLK_SKEW_OUT_M;
874 val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
875 mt753x_reg_write(priv, MT7531_CLKGEN_CTRL, val);
876
877 return 0;
878}
879
880static void mt7531_phy_setting(struct mtk_eth_priv *priv)
881{
882 int i;
883 u32 val;
884
885 for (i = 0; i < MT753X_NUM_PHYS; i++) {
886 /* Enable HW auto downshift */
887 priv->mii_write(priv, i, 0x1f, 0x1);
888 val = priv->mii_read(priv, i, PHY_EXT_REG_14);
889 val |= PHY_EN_DOWN_SHFIT;
890 priv->mii_write(priv, i, PHY_EXT_REG_14, val);
891
892 /* PHY link down power saving enable */
893 val = priv->mii_read(priv, i, PHY_EXT_REG_17);
894 val |= PHY_LINKDOWN_POWER_SAVING_EN;
895 priv->mii_write(priv, i, PHY_EXT_REG_17, val);
896
897 val = priv->mmd_read(priv, i, 0x1e, PHY_DEV1E_REG_0C6);
898 val &= ~PHY_POWER_SAVING_M;
899 val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
900 priv->mmd_write(priv, i, 0x1e, PHY_DEV1E_REG_0C6, val);
901 }
902}
903
developer08849652023-07-19 17:16:54 +0800904static void mt7531_mac_control(struct mtk_eth_priv *priv, bool enable)
905{
906 u32 pmcr = FORCE_MODE_LNK;
907
908 if (enable)
909 pmcr = priv->mt753x_pmcr;
910
911 mt753x_reg_write(priv, PMCR_REG(5), pmcr);
912 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
913}
914
developerd5d73952020-02-18 16:49:37 +0800915static int mt7531_setup(struct mtk_eth_priv *priv)
916{
917 u16 phy_addr, phy_val;
918 u32 val;
919 u32 pmcr;
920 u32 port5_sgmii;
921 int i;
922
923 priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
924 MT753X_SMI_ADDR_MASK;
925
926 /* Turn off PHYs */
927 for (i = 0; i < MT753X_NUM_PHYS; i++) {
928 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
929 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
930 phy_val |= BMCR_PDOWN;
931 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
932 }
933
934 /* Force MAC link down before reset */
935 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
936 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
937
938 /* Switch soft reset */
939 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
940 udelay(100);
941
942 /* Enable MDC input Schmitt Trigger */
943 mt753x_reg_rmw(priv, MT7531_SMT0_IOLB, SMT_IOLB_5_SMI_MDC_EN,
944 SMT_IOLB_5_SMI_MDC_EN);
945
946 mt7531_core_pll_setup(priv, priv->mcm);
947
948 mt753x_reg_read(priv, MT7531_TOP_SIG_SR, &val);
949 port5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
950
951 /* port5 support either RGMII or SGMII, port6 only support SGMII. */
952 switch (priv->phy_interface) {
953 case PHY_INTERFACE_MODE_RGMII:
954 if (!port5_sgmii)
955 mt7531_port_rgmii_init(priv, 5);
956 break;
developer4aafc992023-07-19 17:17:13 +0800957 case PHY_INTERFACE_MODE_2500BASEX:
developerd5d73952020-02-18 16:49:37 +0800958 mt7531_port_sgmii_init(priv, 6);
959 if (port5_sgmii)
960 mt7531_port_sgmii_init(priv, 5);
961 break;
962 default:
963 break;
964 }
965
966 pmcr = MT7531_FORCE_MODE |
967 (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
968 MAC_MODE | MAC_TX_EN | MAC_RX_EN |
969 BKOFF_EN | BACKPR_EN |
970 FORCE_RX_FC | FORCE_TX_FC |
971 (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
972 FORCE_LINK;
973
developer08849652023-07-19 17:16:54 +0800974 priv->mt753x_pmcr = pmcr;
975
976 /* Keep MAC link down before starting eth */
977 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
978 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
developerd5d73952020-02-18 16:49:37 +0800979
980 /* Turn on PHYs */
981 for (i = 0; i < MT753X_NUM_PHYS; i++) {
982 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
983 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
984 phy_val &= ~BMCR_PDOWN;
985 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
986 }
987
988 mt7531_phy_setting(priv);
989
990 /* Enable Internal PHYs */
991 val = mt753x_core_reg_read(priv, CORE_PLL_GROUP4);
992 val |= MT7531_BYPASS_MODE;
993 val &= ~MT7531_POWER_ON_OFF;
994 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, val);
995
996 return 0;
997}
998
developer76e14722023-07-19 17:17:41 +0800999static void mt7988_phy_setting(struct mtk_eth_priv *priv)
1000{
1001 u16 val;
1002 u32 i;
1003
1004 for (i = 0; i < MT753X_NUM_PHYS; i++) {
1005 /* Enable HW auto downshift */
1006 priv->mii_write(priv, i, 0x1f, 0x1);
1007 val = priv->mii_read(priv, i, PHY_EXT_REG_14);
1008 val |= PHY_EN_DOWN_SHFIT;
1009 priv->mii_write(priv, i, PHY_EXT_REG_14, val);
1010
1011 /* PHY link down power saving enable */
1012 val = priv->mii_read(priv, i, PHY_EXT_REG_17);
1013 val |= PHY_LINKDOWN_POWER_SAVING_EN;
1014 priv->mii_write(priv, i, PHY_EXT_REG_17, val);
1015 }
1016}
1017
1018static void mt7988_mac_control(struct mtk_eth_priv *priv, bool enable)
1019{
1020 u32 pmcr = FORCE_MODE_LNK;
1021
1022 if (enable)
1023 pmcr = priv->mt753x_pmcr;
1024
1025 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
1026}
1027
1028static int mt7988_setup(struct mtk_eth_priv *priv)
1029{
1030 u16 phy_addr, phy_val;
1031 u32 pmcr;
1032 int i;
1033
1034 priv->gsw_base = regmap_get_range(priv->ethsys_regmap, 0) + GSW_BASE;
1035
1036 priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
1037 MT753X_SMI_ADDR_MASK;
1038
1039 /* Turn off PHYs */
1040 for (i = 0; i < MT753X_NUM_PHYS; i++) {
1041 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
1042 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
1043 phy_val |= BMCR_PDOWN;
1044 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
1045 }
1046
1047 switch (priv->phy_interface) {
1048 case PHY_INTERFACE_MODE_USXGMII:
1049 /* Use CPU bridge instead of actual USXGMII path */
1050
1051 /* Set GDM1 no drop */
1052 mtk_fe_rmw(priv, PSE_NO_DROP_CFG_REG, 0, PSE_NO_DROP_GDM1);
1053
1054 /* Enable GDM1 to GSW CPU bridge */
1055 mtk_gmac_rmw(priv, GMAC_MAC_MISC_REG, 0, BIT(0));
1056
1057 /* XGMAC force link up */
1058 mtk_gmac_rmw(priv, GMAC_XGMAC_STS_REG, 0, P1_XGMAC_FORCE_LINK);
1059
1060 /* Setup GSW CPU bridge IPG */
1061 mtk_gmac_rmw(priv, GMAC_GSW_CFG_REG, GSWTX_IPG_M | GSWRX_IPG_M,
1062 (0xB << GSWTX_IPG_S) | (0xB << GSWRX_IPG_S));
1063 break;
1064 default:
1065 printf("Error: MT7988 GSW does not support %s interface\n",
1066 phy_string_for_interface(priv->phy_interface));
1067 break;
1068 }
1069
1070 pmcr = MT7988_FORCE_MODE |
1071 (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
1072 MAC_MODE | MAC_TX_EN | MAC_RX_EN |
1073 BKOFF_EN | BACKPR_EN |
1074 FORCE_RX_FC | FORCE_TX_FC |
1075 (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
1076 FORCE_LINK;
1077
1078 priv->mt753x_pmcr = pmcr;
1079
1080 /* Keep MAC link down before starting eth */
1081 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
1082
1083 /* Turn on PHYs */
1084 for (i = 0; i < MT753X_NUM_PHYS; i++) {
1085 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
1086 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
1087 phy_val &= ~BMCR_PDOWN;
1088 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
1089 }
1090
1091 mt7988_phy_setting(priv);
1092
1093 return 0;
1094}
1095
developerdd6243f2023-07-19 17:17:07 +08001096static int mt753x_switch_init(struct mtk_eth_priv *priv)
developerd5d73952020-02-18 16:49:37 +08001097{
1098 int ret;
1099 int i;
1100
1101 /* Global reset switch */
1102 if (priv->mcm) {
1103 reset_assert(&priv->rst_mcm);
1104 udelay(1000);
1105 reset_deassert(&priv->rst_mcm);
developer3a46a672023-07-19 17:16:59 +08001106 mdelay(priv->mt753x_reset_wait_time);
developerd5d73952020-02-18 16:49:37 +08001107 } else if (dm_gpio_is_valid(&priv->rst_gpio)) {
1108 dm_gpio_set_value(&priv->rst_gpio, 0);
1109 udelay(1000);
1110 dm_gpio_set_value(&priv->rst_gpio, 1);
developer3a46a672023-07-19 17:16:59 +08001111 mdelay(priv->mt753x_reset_wait_time);
developerd5d73952020-02-18 16:49:37 +08001112 }
1113
1114 ret = priv->switch_init(priv);
1115 if (ret)
1116 return ret;
1117
developerc3ac93d2018-12-20 16:12:53 +08001118 /* Set port isolation */
developerd5d73952020-02-18 16:49:37 +08001119 for (i = 0; i < MT753X_NUM_PORTS; i++) {
developerc3ac93d2018-12-20 16:12:53 +08001120 /* Set port matrix mode */
1121 if (i != 6)
developerd5d73952020-02-18 16:49:37 +08001122 mt753x_reg_write(priv, PCR_REG(i),
developerc3ac93d2018-12-20 16:12:53 +08001123 (0x40 << PORT_MATRIX_S));
1124 else
developerd5d73952020-02-18 16:49:37 +08001125 mt753x_reg_write(priv, PCR_REG(i),
developerc3ac93d2018-12-20 16:12:53 +08001126 (0x3f << PORT_MATRIX_S));
1127
1128 /* Set port mode to user port */
developerd5d73952020-02-18 16:49:37 +08001129 mt753x_reg_write(priv, PVC_REG(i),
developerc3ac93d2018-12-20 16:12:53 +08001130 (0x8100 << STAG_VPID_S) |
1131 (VLAN_ATTR_USER << VLAN_ATTR_S));
1132 }
1133
1134 return 0;
1135}
1136
developer03ce27b2023-07-19 17:17:31 +08001137static void mtk_xphy_link_adjust(struct mtk_eth_priv *priv)
1138{
1139 u16 lcl_adv = 0, rmt_adv = 0;
1140 u8 flowctrl;
1141 u32 mcr;
1142
1143 mcr = mtk_gmac_read(priv, XGMAC_PORT_MCR(priv->gmac_id));
1144 mcr &= ~(XGMAC_FORCE_TX_FC | XGMAC_FORCE_RX_FC);
1145
1146 if (priv->phydev->duplex) {
1147 if (priv->phydev->pause)
1148 rmt_adv = LPA_PAUSE_CAP;
1149 if (priv->phydev->asym_pause)
1150 rmt_adv |= LPA_PAUSE_ASYM;
1151
1152 if (priv->phydev->advertising & ADVERTISED_Pause)
1153 lcl_adv |= ADVERTISE_PAUSE_CAP;
1154 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1155 lcl_adv |= ADVERTISE_PAUSE_ASYM;
1156
1157 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1158
1159 if (flowctrl & FLOW_CTRL_TX)
1160 mcr |= XGMAC_FORCE_TX_FC;
1161 if (flowctrl & FLOW_CTRL_RX)
1162 mcr |= XGMAC_FORCE_RX_FC;
1163
1164 debug("rx pause %s, tx pause %s\n",
1165 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1166 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1167 }
1168
1169 mcr &= ~(XGMAC_TRX_DISABLE);
1170 mtk_gmac_write(priv, XGMAC_PORT_MCR(priv->gmac_id), mcr);
1171}
1172
developerc3ac93d2018-12-20 16:12:53 +08001173static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
1174{
1175 u16 lcl_adv = 0, rmt_adv = 0;
1176 u8 flowctrl;
1177 u32 mcr;
1178
developerd5d73952020-02-18 16:49:37 +08001179 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
developerc3ac93d2018-12-20 16:12:53 +08001180 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1181 MAC_MODE | FORCE_MODE |
1182 MAC_TX_EN | MAC_RX_EN |
developer4aafc992023-07-19 17:17:13 +08001183 DEL_RXFIFO_CLR |
developerc3ac93d2018-12-20 16:12:53 +08001184 BKOFF_EN | BACKPR_EN;
1185
1186 switch (priv->phydev->speed) {
1187 case SPEED_10:
1188 mcr |= (SPEED_10M << FORCE_SPD_S);
1189 break;
1190 case SPEED_100:
1191 mcr |= (SPEED_100M << FORCE_SPD_S);
1192 break;
1193 case SPEED_1000:
developer4aafc992023-07-19 17:17:13 +08001194 case SPEED_2500:
developerc3ac93d2018-12-20 16:12:53 +08001195 mcr |= (SPEED_1000M << FORCE_SPD_S);
1196 break;
1197 };
1198
1199 if (priv->phydev->link)
1200 mcr |= FORCE_LINK;
1201
1202 if (priv->phydev->duplex) {
1203 mcr |= FORCE_DPX;
1204
1205 if (priv->phydev->pause)
1206 rmt_adv = LPA_PAUSE_CAP;
1207 if (priv->phydev->asym_pause)
1208 rmt_adv |= LPA_PAUSE_ASYM;
1209
1210 if (priv->phydev->advertising & ADVERTISED_Pause)
1211 lcl_adv |= ADVERTISE_PAUSE_CAP;
1212 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1213 lcl_adv |= ADVERTISE_PAUSE_ASYM;
1214
1215 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1216
1217 if (flowctrl & FLOW_CTRL_TX)
1218 mcr |= FORCE_TX_FC;
1219 if (flowctrl & FLOW_CTRL_RX)
1220 mcr |= FORCE_RX_FC;
1221
1222 debug("rx pause %s, tx pause %s\n",
1223 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1224 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1225 }
1226
1227 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1228}
1229
1230static int mtk_phy_start(struct mtk_eth_priv *priv)
1231{
1232 struct phy_device *phydev = priv->phydev;
1233 int ret;
1234
1235 ret = phy_startup(phydev);
1236
1237 if (ret) {
1238 debug("Could not initialize PHY %s\n", phydev->dev->name);
1239 return ret;
1240 }
1241
1242 if (!phydev->link) {
1243 debug("%s: link down.\n", phydev->dev->name);
1244 return 0;
1245 }
1246
developer03ce27b2023-07-19 17:17:31 +08001247 if (!priv->force_mode) {
developeref7b6502024-01-22 10:08:16 +08001248 if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII ||
developer7fe0b3c2024-12-17 16:39:50 +08001249 priv->phy_interface == PHY_INTERFACE_MODE_10GBASER ||
developeref7b6502024-01-22 10:08:16 +08001250 priv->phy_interface == PHY_INTERFACE_MODE_XGMII)
developer03ce27b2023-07-19 17:17:31 +08001251 mtk_xphy_link_adjust(priv);
1252 else
1253 mtk_phy_link_adjust(priv);
1254 }
developerc3ac93d2018-12-20 16:12:53 +08001255
1256 debug("Speed: %d, %s duplex%s\n", phydev->speed,
1257 (phydev->duplex) ? "full" : "half",
1258 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
1259
1260 return 0;
1261}
1262
1263static int mtk_phy_probe(struct udevice *dev)
1264{
1265 struct mtk_eth_priv *priv = dev_get_priv(dev);
1266 struct phy_device *phydev;
1267
1268 phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
1269 priv->phy_interface);
1270 if (!phydev)
1271 return -ENODEV;
1272
1273 phydev->supported &= PHY_GBIT_FEATURES;
1274 phydev->advertising = phydev->supported;
1275
1276 priv->phydev = phydev;
1277 phy_config(phydev);
1278
1279 return 0;
1280}
1281
developer4aafc992023-07-19 17:17:13 +08001282static void mtk_sgmii_an_init(struct mtk_eth_priv *priv)
1283{
1284 /* Set SGMII GEN1 speed(1G) */
developer0535efd2024-12-17 16:39:23 +08001285 clrbits_le32(priv->sgmii_base + priv->soc->ana_rgc3, SGMSYS_SPEED_MASK);
developer4aafc992023-07-19 17:17:13 +08001286
1287 /* Enable SGMII AN */
1288 setbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1289 SGMII_AN_ENABLE);
1290
1291 /* SGMII AN mode setting */
1292 writel(SGMII_AN_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1293
1294 /* SGMII PN SWAP setting */
1295 if (priv->pn_swap) {
1296 setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1297 SGMII_PN_SWAP_TX_RX);
1298 }
1299
1300 /* Release PHYA power down state */
1301 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1302 SGMII_PHYA_PWD, 0);
1303}
1304
1305static void mtk_sgmii_force_init(struct mtk_eth_priv *priv)
developer9a12c242020-01-21 19:31:57 +08001306{
1307 /* Set SGMII GEN2 speed(2.5G) */
developer0535efd2024-12-17 16:39:23 +08001308 clrsetbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1309 SGMSYS_SPEED_MASK,
1310 FIELD_PREP(SGMSYS_SPEED_MASK, SGMSYS_SPEED_2500));
developer9a12c242020-01-21 19:31:57 +08001311
1312 /* Disable SGMII AN */
1313 clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1314 SGMII_AN_ENABLE, 0);
1315
1316 /* SGMII force mode setting */
1317 writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1318
developer053929c2022-09-09 19:59:28 +08001319 /* SGMII PN SWAP setting */
1320 if (priv->pn_swap) {
1321 setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1322 SGMII_PN_SWAP_TX_RX);
1323 }
1324
developer9a12c242020-01-21 19:31:57 +08001325 /* Release PHYA power down state */
1326 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1327 SGMII_PHYA_PWD, 0);
1328}
1329
developer03ce27b2023-07-19 17:17:31 +08001330static void mtk_xfi_pll_enable(struct mtk_eth_priv *priv)
1331{
1332 u32 val = 0;
1333
1334 /* Add software workaround for USXGMII PLL TCL issue */
1335 regmap_write(priv->xfi_pll_regmap, XFI_PLL_ANA_GLB8,
1336 RG_XFI_PLL_ANA_SWWA);
1337
1338 regmap_read(priv->xfi_pll_regmap, XFI_PLL_DIG_GLB8, &val);
1339 val |= RG_XFI_PLL_EN;
1340 regmap_write(priv->xfi_pll_regmap, XFI_PLL_DIG_GLB8, val);
1341}
1342
1343static void mtk_usxgmii_reset(struct mtk_eth_priv *priv)
1344{
1345 switch (priv->gmac_id) {
1346 case 1:
1347 regmap_write(priv->toprgu_regmap, 0xFC, 0x0000A004);
1348 regmap_write(priv->toprgu_regmap, 0x18, 0x88F0A004);
1349 regmap_write(priv->toprgu_regmap, 0xFC, 0x00000000);
1350 regmap_write(priv->toprgu_regmap, 0x18, 0x88F00000);
1351 regmap_write(priv->toprgu_regmap, 0x18, 0x00F00000);
1352 break;
1353 case 2:
1354 regmap_write(priv->toprgu_regmap, 0xFC, 0x00005002);
1355 regmap_write(priv->toprgu_regmap, 0x18, 0x88F05002);
1356 regmap_write(priv->toprgu_regmap, 0xFC, 0x00000000);
1357 regmap_write(priv->toprgu_regmap, 0x18, 0x88F00000);
1358 regmap_write(priv->toprgu_regmap, 0x18, 0x00F00000);
1359 break;
1360 }
1361
1362 mdelay(10);
1363}
1364
1365static void mtk_usxgmii_setup_phya_an_10000(struct mtk_eth_priv *priv)
1366{
1367 regmap_write(priv->usxgmii_regmap, 0x810, 0x000FFE6D);
1368 regmap_write(priv->usxgmii_regmap, 0x818, 0x07B1EC7B);
1369 regmap_write(priv->usxgmii_regmap, 0x80C, 0x30000000);
1370 ndelay(1020);
1371 regmap_write(priv->usxgmii_regmap, 0x80C, 0x10000000);
1372 ndelay(1020);
1373 regmap_write(priv->usxgmii_regmap, 0x80C, 0x00000000);
1374
1375 regmap_write(priv->xfi_pextp_regmap, 0x9024, 0x00C9071C);
1376 regmap_write(priv->xfi_pextp_regmap, 0x2020, 0xAA8585AA);
1377 regmap_write(priv->xfi_pextp_regmap, 0x2030, 0x0C020707);
1378 regmap_write(priv->xfi_pextp_regmap, 0x2034, 0x0E050F0F);
1379 regmap_write(priv->xfi_pextp_regmap, 0x2040, 0x00140032);
1380 regmap_write(priv->xfi_pextp_regmap, 0x50F0, 0x00C014AA);
1381 regmap_write(priv->xfi_pextp_regmap, 0x50E0, 0x3777C12B);
1382 regmap_write(priv->xfi_pextp_regmap, 0x506C, 0x005F9CFF);
1383 regmap_write(priv->xfi_pextp_regmap, 0x5070, 0x9D9DFAFA);
1384 regmap_write(priv->xfi_pextp_regmap, 0x5074, 0x27273F3F);
1385 regmap_write(priv->xfi_pextp_regmap, 0x5078, 0xA7883C68);
1386 regmap_write(priv->xfi_pextp_regmap, 0x507C, 0x11661166);
1387 regmap_write(priv->xfi_pextp_regmap, 0x5080, 0x0E000AAF);
1388 regmap_write(priv->xfi_pextp_regmap, 0x5084, 0x08080D0D);
1389 regmap_write(priv->xfi_pextp_regmap, 0x5088, 0x02030909);
1390 regmap_write(priv->xfi_pextp_regmap, 0x50E4, 0x0C0C0000);
1391 regmap_write(priv->xfi_pextp_regmap, 0x50E8, 0x04040000);
1392 regmap_write(priv->xfi_pextp_regmap, 0x50EC, 0x0F0F0C06);
1393 regmap_write(priv->xfi_pextp_regmap, 0x50A8, 0x506E8C8C);
1394 regmap_write(priv->xfi_pextp_regmap, 0x6004, 0x18190000);
1395 regmap_write(priv->xfi_pextp_regmap, 0x00F8, 0x01423342);
1396 regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F20);
1397 regmap_write(priv->xfi_pextp_regmap, 0x0030, 0x00050C00);
1398 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x02002800);
1399 ndelay(1020);
1400 regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000020);
1401 regmap_write(priv->xfi_pextp_regmap, 0x3028, 0x00008A01);
1402 regmap_write(priv->xfi_pextp_regmap, 0x302C, 0x0000A884);
1403 regmap_write(priv->xfi_pextp_regmap, 0x3024, 0x00083002);
1404 regmap_write(priv->xfi_pextp_regmap, 0x3010, 0x00022220);
1405 regmap_write(priv->xfi_pextp_regmap, 0x5064, 0x0F020A01);
1406 regmap_write(priv->xfi_pextp_regmap, 0x50B4, 0x06100600);
1407 regmap_write(priv->xfi_pextp_regmap, 0x3048, 0x40704000);
1408 regmap_write(priv->xfi_pextp_regmap, 0x3050, 0xA8000000);
1409 regmap_write(priv->xfi_pextp_regmap, 0x3054, 0x000000AA);
1410 regmap_write(priv->xfi_pextp_regmap, 0x306C, 0x00000F00);
1411 regmap_write(priv->xfi_pextp_regmap, 0xA060, 0x00040000);
1412 regmap_write(priv->xfi_pextp_regmap, 0x90D0, 0x00000001);
1413 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200E800);
1414 udelay(150);
1415 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C111);
1416 ndelay(1020);
1417 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C101);
1418 udelay(15);
1419 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C111);
1420 ndelay(1020);
1421 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C101);
1422 udelay(100);
1423 regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000030);
1424 regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F00);
1425 regmap_write(priv->xfi_pextp_regmap, 0x3040, 0x30000000);
1426 udelay(400);
1427}
1428
developer7fe0b3c2024-12-17 16:39:50 +08001429static void mtk_usxgmii_setup_phya_force_10000(struct mtk_eth_priv *priv)
1430{
1431 regmap_write(priv->usxgmii_regmap, 0x810, 0x000FFE6C);
1432 regmap_write(priv->usxgmii_regmap, 0x818, 0x07B1EC7B);
1433 regmap_write(priv->usxgmii_regmap, 0x80C, 0xB0000000);
1434 ndelay(1020);
1435 regmap_write(priv->usxgmii_regmap, 0x80C, 0x90000000);
1436 ndelay(1020);
1437
1438 regmap_write(priv->xfi_pextp_regmap, 0x9024, 0x00C9071C);
1439 regmap_write(priv->xfi_pextp_regmap, 0x2020, 0xAA8585AA);
1440 regmap_write(priv->xfi_pextp_regmap, 0x2030, 0x0C020707);
1441 regmap_write(priv->xfi_pextp_regmap, 0x2034, 0x0E050F0F);
1442 regmap_write(priv->xfi_pextp_regmap, 0x2040, 0x00140032);
1443 regmap_write(priv->xfi_pextp_regmap, 0x50F0, 0x00C014AA);
1444 regmap_write(priv->xfi_pextp_regmap, 0x50E0, 0x3777C12B);
1445 regmap_write(priv->xfi_pextp_regmap, 0x506C, 0x005F9CFF);
1446 regmap_write(priv->xfi_pextp_regmap, 0x5070, 0x9D9DFAFA);
1447 regmap_write(priv->xfi_pextp_regmap, 0x5074, 0x27273F3F);
1448 regmap_write(priv->xfi_pextp_regmap, 0x5078, 0xA7883C68);
1449 regmap_write(priv->xfi_pextp_regmap, 0x507C, 0x11661166);
1450 regmap_write(priv->xfi_pextp_regmap, 0x5080, 0x0E000AAF);
1451 regmap_write(priv->xfi_pextp_regmap, 0x5084, 0x08080D0D);
1452 regmap_write(priv->xfi_pextp_regmap, 0x5088, 0x02030909);
1453 regmap_write(priv->xfi_pextp_regmap, 0x50E4, 0x0C0C0000);
1454 regmap_write(priv->xfi_pextp_regmap, 0x50E8, 0x04040000);
1455 regmap_write(priv->xfi_pextp_regmap, 0x50EC, 0x0F0F0C06);
1456 regmap_write(priv->xfi_pextp_regmap, 0x50A8, 0x506E8C8C);
1457 regmap_write(priv->xfi_pextp_regmap, 0x6004, 0x18190000);
1458 regmap_write(priv->xfi_pextp_regmap, 0x00F8, 0x01423342);
1459 regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F20);
1460 regmap_write(priv->xfi_pextp_regmap, 0x0030, 0x00050C00);
1461 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x02002800);
1462 ndelay(1020);
1463 regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000020);
1464 regmap_write(priv->xfi_pextp_regmap, 0x3028, 0x00008A01);
1465 regmap_write(priv->xfi_pextp_regmap, 0x302C, 0x0000A884);
1466 regmap_write(priv->xfi_pextp_regmap, 0x3024, 0x00083002);
1467 regmap_write(priv->xfi_pextp_regmap, 0x3010, 0x00022220);
1468 regmap_write(priv->xfi_pextp_regmap, 0x5064, 0x0F020A01);
1469 regmap_write(priv->xfi_pextp_regmap, 0x50B4, 0x06100600);
1470 regmap_write(priv->xfi_pextp_regmap, 0x3048, 0x47684100);
1471 regmap_write(priv->xfi_pextp_regmap, 0x3050, 0x00000000);
1472 regmap_write(priv->xfi_pextp_regmap, 0x3054, 0x00000000);
1473 regmap_write(priv->xfi_pextp_regmap, 0x306C, 0x00000F00);
1474 if (priv->gmac_id == 2)
1475 regmap_write(priv->xfi_pextp_regmap, 0xA008, 0x0007B400);
1476 regmap_write(priv->xfi_pextp_regmap, 0xA060, 0x00040000);
1477 regmap_write(priv->xfi_pextp_regmap, 0x90D0, 0x00000001);
1478 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200E800);
1479 udelay(150);
1480 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C111);
1481 ndelay(1020);
1482 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C101);
1483 udelay(15);
1484 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C111);
1485 ndelay(1020);
1486 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C101);
1487 udelay(100);
1488 regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000030);
1489 regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F00);
1490 regmap_write(priv->xfi_pextp_regmap, 0x3040, 0x30000000);
1491 udelay(400);
1492}
1493
developer03ce27b2023-07-19 17:17:31 +08001494static void mtk_usxgmii_an_init(struct mtk_eth_priv *priv)
1495{
1496 mtk_xfi_pll_enable(priv);
1497 mtk_usxgmii_reset(priv);
1498 mtk_usxgmii_setup_phya_an_10000(priv);
1499}
1500
developer7fe0b3c2024-12-17 16:39:50 +08001501static void mtk_10gbaser_init(struct mtk_eth_priv *priv)
1502{
1503 mtk_xfi_pll_enable(priv);
1504 mtk_usxgmii_reset(priv);
1505 mtk_usxgmii_setup_phya_force_10000(priv);
1506}
1507
developera2a01412024-12-17 16:39:55 +08001508static int mtk_mac_init(struct mtk_eth_priv *priv)
developerc3ac93d2018-12-20 16:12:53 +08001509{
developer2da7d4a2024-12-17 16:39:41 +08001510 int i, sgmii_sel_mask = 0, ge_mode = 0;
developerc3ac93d2018-12-20 16:12:53 +08001511 u32 mcr;
1512
developere8f42692024-12-17 16:39:46 +08001513 if (MTK_HAS_CAPS(priv->soc->caps, MTK_ETH_PATH_MT7629_GMAC2)) {
1514 mtk_infra_rmw(priv, MT7629_INFRA_MISC2_REG,
1515 INFRA_MISC2_BONDING_OPTION, priv->gmac_id);
1516 }
1517
developerc3ac93d2018-12-20 16:12:53 +08001518 switch (priv->phy_interface) {
1519 case PHY_INTERFACE_MODE_RGMII_RXID:
1520 case PHY_INTERFACE_MODE_RGMII:
developer9a12c242020-01-21 19:31:57 +08001521 ge_mode = GE_MODE_RGMII;
1522 break;
developerc3ac93d2018-12-20 16:12:53 +08001523 case PHY_INTERFACE_MODE_SGMII:
developer4aafc992023-07-19 17:17:13 +08001524 case PHY_INTERFACE_MODE_2500BASEX:
developera2a01412024-12-17 16:39:55 +08001525 if (!IS_ENABLED(CONFIG_MTK_ETH_SGMII)) {
1526 printf("Error: SGMII is not supported on this platform\n");
1527 return -ENOTSUPP;
1528 }
1529
developera5d712a2023-07-19 17:17:22 +08001530 if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC2_U3_QPHY)) {
1531 mtk_infra_rmw(priv, USB_PHY_SWITCH_REG, QPHY_SEL_MASK,
1532 SGMII_QPHY_SEL);
1533 }
1534
developer2da7d4a2024-12-17 16:39:41 +08001535 if (MTK_HAS_CAPS(priv->soc->caps, MTK_ETH_PATH_MT7622_SGMII))
1536 sgmii_sel_mask = SYSCFG1_SGMII_SEL_M;
1537
1538 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG1_REG, sgmii_sel_mask,
developer0b584952024-12-17 16:39:27 +08001539 SYSCFG1_SGMII_SEL(priv->gmac_id));
developer2da7d4a2024-12-17 16:39:41 +08001540
developer4aafc992023-07-19 17:17:13 +08001541 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
1542 mtk_sgmii_an_init(priv);
1543 else
1544 mtk_sgmii_force_init(priv);
developera2a01412024-12-17 16:39:55 +08001545
1546 ge_mode = GE_MODE_RGMII;
developerc3ac93d2018-12-20 16:12:53 +08001547 break;
1548 case PHY_INTERFACE_MODE_MII:
1549 case PHY_INTERFACE_MODE_GMII:
1550 ge_mode = GE_MODE_MII;
1551 break;
1552 case PHY_INTERFACE_MODE_RMII:
1553 ge_mode = GE_MODE_RMII;
1554 break;
1555 default:
1556 break;
1557 }
1558
1559 /* set the gmac to the right mode */
developer0b584952024-12-17 16:39:27 +08001560 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG1_REG,
1561 SYSCFG1_GE_MODE_M << SYSCFG1_GE_MODE_S(priv->gmac_id),
1562 ge_mode << SYSCFG1_GE_MODE_S(priv->gmac_id));
developerc3ac93d2018-12-20 16:12:53 +08001563
1564 if (priv->force_mode) {
developerd5d73952020-02-18 16:49:37 +08001565 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
developerc3ac93d2018-12-20 16:12:53 +08001566 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1567 MAC_MODE | FORCE_MODE |
1568 MAC_TX_EN | MAC_RX_EN |
1569 BKOFF_EN | BACKPR_EN |
1570 FORCE_LINK;
1571
1572 switch (priv->speed) {
1573 case SPEED_10:
1574 mcr |= SPEED_10M << FORCE_SPD_S;
1575 break;
1576 case SPEED_100:
1577 mcr |= SPEED_100M << FORCE_SPD_S;
1578 break;
1579 case SPEED_1000:
developer4aafc992023-07-19 17:17:13 +08001580 case SPEED_2500:
developerc3ac93d2018-12-20 16:12:53 +08001581 mcr |= SPEED_1000M << FORCE_SPD_S;
1582 break;
1583 }
1584
1585 if (priv->duplex)
1586 mcr |= FORCE_DPX;
1587
1588 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1589 }
1590
developer1d3b1f62022-09-09 19:59:21 +08001591 if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC1_TRGMII) &&
1592 !MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
developerc3ac93d2018-12-20 16:12:53 +08001593 /* Lower Tx Driving for TRGMII path */
1594 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
1595 mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
1596 (8 << TD_DM_DRVP_S) |
1597 (8 << TD_DM_DRVN_S));
1598
1599 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
1600 RX_RST | RXC_DQSISEL);
1601 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
1602 }
developera2a01412024-12-17 16:39:55 +08001603
1604 return 0;
developer03ce27b2023-07-19 17:17:31 +08001605}
1606
developera2a01412024-12-17 16:39:55 +08001607static int mtk_xmac_init(struct mtk_eth_priv *priv)
developer03ce27b2023-07-19 17:17:31 +08001608{
developeref7b6502024-01-22 10:08:16 +08001609 u32 force_link = 0;
developer03ce27b2023-07-19 17:17:31 +08001610
developera2a01412024-12-17 16:39:55 +08001611 if (!IS_ENABLED(CONFIG_MTK_ETH_XGMII)) {
1612 printf("Error: 10Gb interface is not supported on this platform\n");
1613 return -ENOTSUPP;
1614 }
1615
developer03ce27b2023-07-19 17:17:31 +08001616 switch (priv->phy_interface) {
1617 case PHY_INTERFACE_MODE_USXGMII:
1618 mtk_usxgmii_an_init(priv);
1619 break;
developer7fe0b3c2024-12-17 16:39:50 +08001620 case PHY_INTERFACE_MODE_10GBASER:
1621 mtk_10gbaser_init(priv);
1622 break;
developer03ce27b2023-07-19 17:17:31 +08001623 default:
1624 break;
1625 }
1626
1627 /* Set GMAC to the correct mode */
developer0b584952024-12-17 16:39:27 +08001628 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG1_REG,
1629 SYSCFG1_GE_MODE_M << SYSCFG1_GE_MODE_S(priv->gmac_id),
developer03ce27b2023-07-19 17:17:31 +08001630 0);
1631
developer7fe0b3c2024-12-17 16:39:50 +08001632 if ((priv->phy_interface == PHY_INTERFACE_MODE_USXGMII ||
1633 priv->phy_interface == PHY_INTERFACE_MODE_10GBASER) &&
developeref7b6502024-01-22 10:08:16 +08001634 priv->gmac_id == 1) {
developer03ce27b2023-07-19 17:17:31 +08001635 mtk_infra_rmw(priv, TOPMISC_NETSYS_PCS_MUX,
1636 NETSYS_PCS_MUX_MASK, MUX_G2_USXGMII_SEL);
developer03ce27b2023-07-19 17:17:31 +08001637 }
1638
developeref7b6502024-01-22 10:08:16 +08001639 if (priv->phy_interface == PHY_INTERFACE_MODE_XGMII ||
1640 priv->gmac_id == 2)
1641 force_link = XGMAC_FORCE_LINK(priv->gmac_id);
1642
1643 mtk_gmac_rmw(priv, XGMAC_STS(priv->gmac_id),
1644 XGMAC_FORCE_LINK(priv->gmac_id), force_link);
1645
developer03ce27b2023-07-19 17:17:31 +08001646 /* Force GMAC link down */
1647 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), FORCE_MODE);
developera2a01412024-12-17 16:39:55 +08001648
1649 return 0;
developerc3ac93d2018-12-20 16:12:53 +08001650}
1651
1652static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
1653{
1654 char *pkt_base = priv->pkt_pool;
developera7cdebf2022-09-09 19:59:26 +08001655 struct mtk_tx_dma_v2 *txd;
1656 struct mtk_rx_dma_v2 *rxd;
developerc3ac93d2018-12-20 16:12:53 +08001657 int i;
1658
1659 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
1660 udelay(500);
1661
developer65089f72022-09-09 19:59:24 +08001662 memset(priv->tx_ring_noc, 0, NUM_TX_DESC * priv->soc->txd_size);
1663 memset(priv->rx_ring_noc, 0, NUM_RX_DESC * priv->soc->rxd_size);
1664 memset(priv->pkt_pool, 0xff, TOTAL_PKT_BUF_SIZE);
developerc3ac93d2018-12-20 16:12:53 +08001665
Frank Wunderlich44350182020-01-31 10:23:29 +01001666 flush_dcache_range((ulong)pkt_base,
1667 (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
developerc3ac93d2018-12-20 16:12:53 +08001668
1669 priv->rx_dma_owner_idx0 = 0;
1670 priv->tx_cpu_owner_idx0 = 0;
1671
1672 for (i = 0; i < NUM_TX_DESC; i++) {
developer65089f72022-09-09 19:59:24 +08001673 txd = priv->tx_ring_noc + i * priv->soc->txd_size;
developerc3ac93d2018-12-20 16:12:53 +08001674
developer65089f72022-09-09 19:59:24 +08001675 txd->txd1 = virt_to_phys(pkt_base);
1676 txd->txd2 = PDMA_TXD2_DDONE | PDMA_TXD2_LS0;
developera7cdebf2022-09-09 19:59:26 +08001677
developer78fed682023-07-19 17:17:37 +08001678 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1679 txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id == 2 ?
1680 15 : priv->gmac_id + 1);
1681 else if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
developera7cdebf2022-09-09 19:59:26 +08001682 txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id + 1);
1683 else
1684 txd->txd4 = PDMA_V1_TXD4_FPORT_SET(priv->gmac_id + 1);
developer65089f72022-09-09 19:59:24 +08001685
developerc3ac93d2018-12-20 16:12:53 +08001686 pkt_base += PKTSIZE_ALIGN;
1687 }
1688
1689 for (i = 0; i < NUM_RX_DESC; i++) {
developer65089f72022-09-09 19:59:24 +08001690 rxd = priv->rx_ring_noc + i * priv->soc->rxd_size;
1691
1692 rxd->rxd1 = virt_to_phys(pkt_base);
developera7cdebf2022-09-09 19:59:26 +08001693
developer78fed682023-07-19 17:17:37 +08001694 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1695 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
developera7cdebf2022-09-09 19:59:26 +08001696 rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1697 else
1698 rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
developer65089f72022-09-09 19:59:24 +08001699
developerc3ac93d2018-12-20 16:12:53 +08001700 pkt_base += PKTSIZE_ALIGN;
1701 }
1702
1703 mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
1704 virt_to_phys(priv->tx_ring_noc));
1705 mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
1706 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1707
1708 mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
1709 virt_to_phys(priv->rx_ring_noc));
1710 mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
1711 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
1712
1713 mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
1714}
1715
developer4843ad32024-01-22 10:08:11 +08001716static void mtk_eth_mdc_init(struct mtk_eth_priv *priv)
1717{
1718 u32 divider;
1719
1720 if (priv->mdc == 0)
1721 return;
1722
1723 divider = min_t(u32, DIV_ROUND_UP(MDC_MAX_FREQ, priv->mdc), MDC_MAX_DIVIDER);
1724
1725 /* Configure MDC turbo mode */
1726 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1727 mtk_gmac_rmw(priv, GMAC_MAC_MISC_REG, 0, MISC_MDC_TURBO);
1728 else
1729 mtk_gmac_rmw(priv, GMAC_PPSC_REG, 0, MISC_MDC_TURBO);
1730
1731 /* Configure MDC divider */
1732 mtk_gmac_rmw(priv, GMAC_PPSC_REG, PHY_MDC_CFG,
1733 FIELD_PREP(PHY_MDC_CFG, divider));
1734}
1735
developerc3ac93d2018-12-20 16:12:53 +08001736static int mtk_eth_start(struct udevice *dev)
1737{
1738 struct mtk_eth_priv *priv = dev_get_priv(dev);
developer78fed682023-07-19 17:17:37 +08001739 int i, ret;
developerc3ac93d2018-12-20 16:12:53 +08001740
1741 /* Reset FE */
1742 reset_assert(&priv->rst_fe);
1743 udelay(1000);
1744 reset_deassert(&priv->rst_fe);
1745 mdelay(10);
1746
developer78fed682023-07-19 17:17:37 +08001747 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1748 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
developera7cdebf2022-09-09 19:59:26 +08001749 setbits_le32(priv->fe_base + FE_GLO_MISC_REG, PDMA_VER_V2);
1750
developerc3ac93d2018-12-20 16:12:53 +08001751 /* Packets forward to PDMA */
1752 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
1753
developer78fed682023-07-19 17:17:37 +08001754 for (i = 0; i < priv->soc->gdma_count; i++) {
1755 if (i == priv->gmac_id)
1756 continue;
1757
1758 mtk_gdma_write(priv, i, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1759 }
1760
1761 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3)) {
developer76e14722023-07-19 17:17:41 +08001762 if (priv->sw == SW_MT7988 && priv->gmac_id == 0) {
1763 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG,
1764 GDMA_BRIDGE_TO_CPU);
1765 }
1766
developer78fed682023-07-19 17:17:37 +08001767 mtk_gdma_write(priv, priv->gmac_id, GDMA_EG_CTRL_REG,
1768 GDMA_CPU_BRIDGE_EN);
1769 }
developerc3ac93d2018-12-20 16:12:53 +08001770
1771 udelay(500);
1772
1773 mtk_eth_fifo_init(priv);
1774
developer08849652023-07-19 17:16:54 +08001775 if (priv->switch_mac_control)
1776 priv->switch_mac_control(priv, true);
1777
developerc3ac93d2018-12-20 16:12:53 +08001778 /* Start PHY */
1779 if (priv->sw == SW_NONE) {
1780 ret = mtk_phy_start(priv);
1781 if (ret)
1782 return ret;
1783 }
1784
1785 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
1786 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
1787 udelay(500);
1788
1789 return 0;
1790}
1791
1792static void mtk_eth_stop(struct udevice *dev)
1793{
1794 struct mtk_eth_priv *priv = dev_get_priv(dev);
1795
developer08849652023-07-19 17:16:54 +08001796 if (priv->switch_mac_control)
1797 priv->switch_mac_control(priv, false);
1798
developerc3ac93d2018-12-20 16:12:53 +08001799 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
1800 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
1801 udelay(500);
1802
developera7cdebf2022-09-09 19:59:26 +08001803 wait_for_bit_le32(priv->fe_base + priv->soc->pdma_base + PDMA_GLO_CFG_REG,
developerc3ac93d2018-12-20 16:12:53 +08001804 RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
1805}
1806
1807static int mtk_eth_write_hwaddr(struct udevice *dev)
1808{
Simon Glassfa20e932020-12-03 16:55:20 -07001809 struct eth_pdata *pdata = dev_get_plat(dev);
developerc3ac93d2018-12-20 16:12:53 +08001810 struct mtk_eth_priv *priv = dev_get_priv(dev);
1811 unsigned char *mac = pdata->enetaddr;
1812 u32 macaddr_lsb, macaddr_msb;
1813
1814 macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
1815 macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
1816 ((u32)mac[4] << 8) | (u32)mac[5];
1817
1818 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
1819 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
1820
1821 return 0;
1822}
1823
1824static int mtk_eth_send(struct udevice *dev, void *packet, int length)
1825{
1826 struct mtk_eth_priv *priv = dev_get_priv(dev);
1827 u32 idx = priv->tx_cpu_owner_idx0;
developera7cdebf2022-09-09 19:59:26 +08001828 struct mtk_tx_dma_v2 *txd;
developerc3ac93d2018-12-20 16:12:53 +08001829 void *pkt_base;
1830
developer65089f72022-09-09 19:59:24 +08001831 txd = priv->tx_ring_noc + idx * priv->soc->txd_size;
1832
1833 if (!(txd->txd2 & PDMA_TXD2_DDONE)) {
developerc3ac93d2018-12-20 16:12:53 +08001834 debug("mtk-eth: TX DMA descriptor ring is full\n");
1835 return -EPERM;
1836 }
1837
developer65089f72022-09-09 19:59:24 +08001838 pkt_base = (void *)phys_to_virt(txd->txd1);
developerc3ac93d2018-12-20 16:12:53 +08001839 memcpy(pkt_base, packet, length);
Frank Wunderlich44350182020-01-31 10:23:29 +01001840 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
developerc3ac93d2018-12-20 16:12:53 +08001841 roundup(length, ARCH_DMA_MINALIGN));
1842
developer78fed682023-07-19 17:17:37 +08001843 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1844 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
developera7cdebf2022-09-09 19:59:26 +08001845 txd->txd2 = PDMA_TXD2_LS0 | PDMA_V2_TXD2_SDL0_SET(length);
1846 else
1847 txd->txd2 = PDMA_TXD2_LS0 | PDMA_V1_TXD2_SDL0_SET(length);
developerc3ac93d2018-12-20 16:12:53 +08001848
1849 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
1850 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1851
1852 return 0;
1853}
1854
1855static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1856{
1857 struct mtk_eth_priv *priv = dev_get_priv(dev);
1858 u32 idx = priv->rx_dma_owner_idx0;
developera7cdebf2022-09-09 19:59:26 +08001859 struct mtk_rx_dma_v2 *rxd;
developerc3ac93d2018-12-20 16:12:53 +08001860 uchar *pkt_base;
1861 u32 length;
1862
developer65089f72022-09-09 19:59:24 +08001863 rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1864
1865 if (!(rxd->rxd2 & PDMA_RXD2_DDONE)) {
developerc3ac93d2018-12-20 16:12:53 +08001866 debug("mtk-eth: RX DMA descriptor ring is empty\n");
1867 return -EAGAIN;
1868 }
1869
developer78fed682023-07-19 17:17:37 +08001870 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1871 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
developera7cdebf2022-09-09 19:59:26 +08001872 length = PDMA_V2_RXD2_PLEN0_GET(rxd->rxd2);
1873 else
1874 length = PDMA_V1_RXD2_PLEN0_GET(rxd->rxd2);
developer65089f72022-09-09 19:59:24 +08001875
1876 pkt_base = (void *)phys_to_virt(rxd->rxd1);
Frank Wunderlich44350182020-01-31 10:23:29 +01001877 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
developerc3ac93d2018-12-20 16:12:53 +08001878 roundup(length, ARCH_DMA_MINALIGN));
1879
1880 if (packetp)
1881 *packetp = pkt_base;
1882
1883 return length;
1884}
1885
1886static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1887{
1888 struct mtk_eth_priv *priv = dev_get_priv(dev);
1889 u32 idx = priv->rx_dma_owner_idx0;
developera7cdebf2022-09-09 19:59:26 +08001890 struct mtk_rx_dma_v2 *rxd;
developerc3ac93d2018-12-20 16:12:53 +08001891
developer65089f72022-09-09 19:59:24 +08001892 rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1893
developer78fed682023-07-19 17:17:37 +08001894 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1895 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
developera7cdebf2022-09-09 19:59:26 +08001896 rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1897 else
1898 rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
developerc3ac93d2018-12-20 16:12:53 +08001899
1900 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1901 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1902
1903 return 0;
1904}
1905
1906static int mtk_eth_probe(struct udevice *dev)
1907{
Simon Glassfa20e932020-12-03 16:55:20 -07001908 struct eth_pdata *pdata = dev_get_plat(dev);
developerc3ac93d2018-12-20 16:12:53 +08001909 struct mtk_eth_priv *priv = dev_get_priv(dev);
Frank Wunderlich44350182020-01-31 10:23:29 +01001910 ulong iobase = pdata->iobase;
developerc3ac93d2018-12-20 16:12:53 +08001911 int ret;
1912
1913 /* Frame Engine Register Base */
1914 priv->fe_base = (void *)iobase;
1915
1916 /* GMAC Register Base */
1917 priv->gmac_base = (void *)(iobase + GMAC_BASE);
1918
1919 /* MDIO register */
1920 ret = mtk_mdio_register(dev);
1921 if (ret)
1922 return ret;
1923
1924 /* Prepare for tx/rx rings */
developer65089f72022-09-09 19:59:24 +08001925 priv->tx_ring_noc = (void *)
1926 noncached_alloc(priv->soc->txd_size * NUM_TX_DESC,
developerc3ac93d2018-12-20 16:12:53 +08001927 ARCH_DMA_MINALIGN);
developer65089f72022-09-09 19:59:24 +08001928 priv->rx_ring_noc = (void *)
1929 noncached_alloc(priv->soc->rxd_size * NUM_RX_DESC,
developerc3ac93d2018-12-20 16:12:53 +08001930 ARCH_DMA_MINALIGN);
1931
developer4843ad32024-01-22 10:08:11 +08001932 /* Set MDC divider */
1933 mtk_eth_mdc_init(priv);
1934
developerc3ac93d2018-12-20 16:12:53 +08001935 /* Set MAC mode */
developeref7b6502024-01-22 10:08:16 +08001936 if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII ||
developer7fe0b3c2024-12-17 16:39:50 +08001937 priv->phy_interface == PHY_INTERFACE_MODE_10GBASER ||
developeref7b6502024-01-22 10:08:16 +08001938 priv->phy_interface == PHY_INTERFACE_MODE_XGMII)
developera2a01412024-12-17 16:39:55 +08001939 ret = mtk_xmac_init(priv);
developer03ce27b2023-07-19 17:17:31 +08001940 else
developera2a01412024-12-17 16:39:55 +08001941 ret = mtk_mac_init(priv);
1942
1943 if (ret)
1944 return ret;
developerc3ac93d2018-12-20 16:12:53 +08001945
1946 /* Probe phy if switch is not specified */
1947 if (priv->sw == SW_NONE)
1948 return mtk_phy_probe(dev);
1949
1950 /* Initialize switch */
developerd5d73952020-02-18 16:49:37 +08001951 return mt753x_switch_init(priv);
developerc3ac93d2018-12-20 16:12:53 +08001952}
1953
1954static int mtk_eth_remove(struct udevice *dev)
1955{
1956 struct mtk_eth_priv *priv = dev_get_priv(dev);
1957
1958 /* MDIO unregister */
1959 mdio_unregister(priv->mdio_bus);
1960 mdio_free(priv->mdio_bus);
1961
1962 /* Stop possibly started DMA */
1963 mtk_eth_stop(dev);
1964
1965 return 0;
1966}
1967
Simon Glassaad29ae2020-12-03 16:55:21 -07001968static int mtk_eth_of_to_plat(struct udevice *dev)
developerc3ac93d2018-12-20 16:12:53 +08001969{
Simon Glassfa20e932020-12-03 16:55:20 -07001970 struct eth_pdata *pdata = dev_get_plat(dev);
developerc3ac93d2018-12-20 16:12:53 +08001971 struct mtk_eth_priv *priv = dev_get_priv(dev);
1972 struct ofnode_phandle_args args;
1973 struct regmap *regmap;
1974 const char *str;
1975 ofnode subnode;
1976 int ret;
1977
developer1d3b1f62022-09-09 19:59:21 +08001978 priv->soc = (const struct mtk_soc_data *)dev_get_driver_data(dev);
1979 if (!priv->soc) {
1980 dev_err(dev, "missing soc compatible data\n");
1981 return -EINVAL;
1982 }
developerc3ac93d2018-12-20 16:12:53 +08001983
developerafa74c22022-05-20 11:23:31 +08001984 pdata->iobase = (phys_addr_t)dev_remap_addr(dev);
developerc3ac93d2018-12-20 16:12:53 +08001985
1986 /* get corresponding ethsys phandle */
1987 ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1988 &args);
1989 if (ret)
1990 return ret;
1991
developera182b7e2022-05-20 11:23:37 +08001992 priv->ethsys_regmap = syscon_node_to_regmap(args.node);
1993 if (IS_ERR(priv->ethsys_regmap))
1994 return PTR_ERR(priv->ethsys_regmap);
developerc3ac93d2018-12-20 16:12:53 +08001995
developera5d712a2023-07-19 17:17:22 +08001996 if (MTK_HAS_CAPS(priv->soc->caps, MTK_INFRA)) {
1997 /* get corresponding infracfg phandle */
1998 ret = dev_read_phandle_with_args(dev, "mediatek,infracfg",
1999 NULL, 0, 0, &args);
2000
2001 if (ret)
2002 return ret;
2003
2004 priv->infra_regmap = syscon_node_to_regmap(args.node);
2005 if (IS_ERR(priv->infra_regmap))
2006 return PTR_ERR(priv->infra_regmap);
2007 }
2008
developerc3ac93d2018-12-20 16:12:53 +08002009 /* Reset controllers */
2010 ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
2011 if (ret) {
2012 printf("error: Unable to get reset ctrl for frame engine\n");
2013 return ret;
2014 }
2015
2016 priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
2017
developer4843ad32024-01-22 10:08:11 +08002018 priv->mdc = 0;
2019 subnode = ofnode_find_subnode(dev_ofnode(dev), "mdio");
2020 if (ofnode_valid(subnode)) {
2021 priv->mdc = ofnode_read_u32_default(subnode, "clock-frequency", 2500000);
2022 if (priv->mdc > MDC_MAX_FREQ ||
2023 priv->mdc < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
2024 printf("error: MDIO clock frequency out of range\n");
2025 return -EINVAL;
2026 }
2027 }
2028
developerc3ac93d2018-12-20 16:12:53 +08002029 /* Interface mode is required */
Marek Behúnbc194772022-04-07 00:33:01 +02002030 pdata->phy_interface = dev_read_phy_mode(dev);
2031 priv->phy_interface = pdata->phy_interface;
Marek Behún48631e42022-04-07 00:33:03 +02002032 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
developerc3ac93d2018-12-20 16:12:53 +08002033 printf("error: phy-mode is not set\n");
2034 return -EINVAL;
2035 }
2036
2037 /* Force mode or autoneg */
2038 subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
2039 if (ofnode_valid(subnode)) {
2040 priv->force_mode = 1;
2041 priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
2042 priv->duplex = ofnode_read_bool(subnode, "full-duplex");
2043
2044 if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
developer4aafc992023-07-19 17:17:13 +08002045 priv->speed != SPEED_1000 && priv->speed != SPEED_2500 &&
2046 priv->speed != SPEED_10000) {
developerc3ac93d2018-12-20 16:12:53 +08002047 printf("error: no valid speed set in fixed-link\n");
2048 return -EINVAL;
2049 }
2050 }
2051
developera2a01412024-12-17 16:39:55 +08002052 if ((priv->phy_interface == PHY_INTERFACE_MODE_SGMII ||
2053 priv->phy_interface == PHY_INTERFACE_MODE_2500BASEX) &&
2054 IS_ENABLED(CONFIG_MTK_ETH_SGMII)) {
developer9a12c242020-01-21 19:31:57 +08002055 /* get corresponding sgmii phandle */
2056 ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
2057 NULL, 0, 0, &args);
2058 if (ret)
2059 return ret;
2060
2061 regmap = syscon_node_to_regmap(args.node);
2062
2063 if (IS_ERR(regmap))
2064 return PTR_ERR(regmap);
2065
2066 priv->sgmii_base = regmap_get_range(regmap, 0);
2067
2068 if (!priv->sgmii_base) {
2069 dev_err(dev, "Unable to find sgmii\n");
2070 return -ENODEV;
2071 }
developer053929c2022-09-09 19:59:28 +08002072
Christian Marangi50ba7a52024-06-24 23:03:30 +02002073 /* Upstream linux use mediatek,pnswap instead of pn_swap */
2074 priv->pn_swap = ofnode_read_bool(args.node, "pn_swap") ||
2075 ofnode_read_bool(args.node, "mediatek,pnswap");
developera2a01412024-12-17 16:39:55 +08002076 } else if ((priv->phy_interface == PHY_INTERFACE_MODE_USXGMII ||
2077 priv->phy_interface == PHY_INTERFACE_MODE_10GBASER) &&
2078 IS_ENABLED(CONFIG_MTK_ETH_XGMII)) {
developer03ce27b2023-07-19 17:17:31 +08002079 /* get corresponding usxgmii phandle */
2080 ret = dev_read_phandle_with_args(dev, "mediatek,usxgmiisys",
2081 NULL, 0, 0, &args);
2082 if (ret)
2083 return ret;
2084
2085 priv->usxgmii_regmap = syscon_node_to_regmap(args.node);
2086 if (IS_ERR(priv->usxgmii_regmap))
2087 return PTR_ERR(priv->usxgmii_regmap);
2088
2089 /* get corresponding xfi_pextp phandle */
2090 ret = dev_read_phandle_with_args(dev, "mediatek,xfi_pextp",
2091 NULL, 0, 0, &args);
2092 if (ret)
2093 return ret;
2094
2095 priv->xfi_pextp_regmap = syscon_node_to_regmap(args.node);
2096 if (IS_ERR(priv->xfi_pextp_regmap))
2097 return PTR_ERR(priv->xfi_pextp_regmap);
2098
2099 /* get corresponding xfi_pll phandle */
2100 ret = dev_read_phandle_with_args(dev, "mediatek,xfi_pll",
2101 NULL, 0, 0, &args);
2102 if (ret)
2103 return ret;
2104
2105 priv->xfi_pll_regmap = syscon_node_to_regmap(args.node);
2106 if (IS_ERR(priv->xfi_pll_regmap))
2107 return PTR_ERR(priv->xfi_pll_regmap);
2108
2109 /* get corresponding toprgu phandle */
2110 ret = dev_read_phandle_with_args(dev, "mediatek,toprgu",
2111 NULL, 0, 0, &args);
2112 if (ret)
2113 return ret;
2114
2115 priv->toprgu_regmap = syscon_node_to_regmap(args.node);
2116 if (IS_ERR(priv->toprgu_regmap))
2117 return PTR_ERR(priv->toprgu_regmap);
developer9a12c242020-01-21 19:31:57 +08002118 }
2119
developerc3ac93d2018-12-20 16:12:53 +08002120 /* check for switch first, otherwise phy will be used */
2121 priv->sw = SW_NONE;
2122 priv->switch_init = NULL;
developer08849652023-07-19 17:16:54 +08002123 priv->switch_mac_control = NULL;
developerc3ac93d2018-12-20 16:12:53 +08002124 str = dev_read_string(dev, "mediatek,switch");
2125
2126 if (str) {
2127 if (!strcmp(str, "mt7530")) {
2128 priv->sw = SW_MT7530;
2129 priv->switch_init = mt7530_setup;
developer08849652023-07-19 17:16:54 +08002130 priv->switch_mac_control = mt7530_mac_control;
developerd5d73952020-02-18 16:49:37 +08002131 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
developer3a46a672023-07-19 17:16:59 +08002132 priv->mt753x_reset_wait_time = 1000;
developerd5d73952020-02-18 16:49:37 +08002133 } else if (!strcmp(str, "mt7531")) {
2134 priv->sw = SW_MT7531;
2135 priv->switch_init = mt7531_setup;
developer08849652023-07-19 17:16:54 +08002136 priv->switch_mac_control = mt7531_mac_control;
developerd5d73952020-02-18 16:49:37 +08002137 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
developer3a46a672023-07-19 17:16:59 +08002138 priv->mt753x_reset_wait_time = 200;
developer76e14722023-07-19 17:17:41 +08002139 } else if (!strcmp(str, "mt7988")) {
2140 priv->sw = SW_MT7988;
2141 priv->switch_init = mt7988_setup;
2142 priv->switch_mac_control = mt7988_mac_control;
2143 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
2144 priv->mt753x_reset_wait_time = 50;
developerc3ac93d2018-12-20 16:12:53 +08002145 } else {
2146 printf("error: unsupported switch\n");
2147 return -EINVAL;
2148 }
2149
2150 priv->mcm = dev_read_bool(dev, "mediatek,mcm");
2151 if (priv->mcm) {
2152 ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
2153 if (ret) {
2154 printf("error: no reset ctrl for mcm\n");
2155 return ret;
2156 }
2157 } else {
2158 gpio_request_by_name(dev, "reset-gpios", 0,
2159 &priv->rst_gpio, GPIOD_IS_OUT);
2160 }
2161 } else {
developera19b69d2019-04-28 15:08:57 +08002162 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
2163 0, &args);
2164 if (ret) {
developerc3ac93d2018-12-20 16:12:53 +08002165 printf("error: phy-handle is not specified\n");
2166 return ret;
2167 }
2168
developera19b69d2019-04-28 15:08:57 +08002169 priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
developerc3ac93d2018-12-20 16:12:53 +08002170 if (priv->phy_addr < 0) {
2171 printf("error: phy address is not specified\n");
2172 return ret;
2173 }
2174 }
2175
2176 return 0;
2177}
2178
developer76e14722023-07-19 17:17:41 +08002179static const struct mtk_soc_data mt7988_data = {
2180 .caps = MT7988_CAPS,
2181 .ana_rgc3 = 0x128,
2182 .gdma_count = 3,
2183 .pdma_base = PDMA_V3_BASE,
2184 .txd_size = sizeof(struct mtk_tx_dma_v2),
2185 .rxd_size = sizeof(struct mtk_rx_dma_v2),
2186};
2187
developer053929c2022-09-09 19:59:28 +08002188static const struct mtk_soc_data mt7986_data = {
2189 .caps = MT7986_CAPS,
2190 .ana_rgc3 = 0x128,
developer78fed682023-07-19 17:17:37 +08002191 .gdma_count = 2,
developer053929c2022-09-09 19:59:28 +08002192 .pdma_base = PDMA_V2_BASE,
2193 .txd_size = sizeof(struct mtk_tx_dma_v2),
2194 .rxd_size = sizeof(struct mtk_rx_dma_v2),
2195};
2196
2197static const struct mtk_soc_data mt7981_data = {
developera5d712a2023-07-19 17:17:22 +08002198 .caps = MT7981_CAPS,
developer053929c2022-09-09 19:59:28 +08002199 .ana_rgc3 = 0x128,
developer78fed682023-07-19 17:17:37 +08002200 .gdma_count = 2,
developer053929c2022-09-09 19:59:28 +08002201 .pdma_base = PDMA_V2_BASE,
2202 .txd_size = sizeof(struct mtk_tx_dma_v2),
2203 .rxd_size = sizeof(struct mtk_rx_dma_v2),
2204};
2205
developer1d3b1f62022-09-09 19:59:21 +08002206static const struct mtk_soc_data mt7629_data = {
developere8f42692024-12-17 16:39:46 +08002207 .caps = MT7629_CAPS,
developer1d3b1f62022-09-09 19:59:21 +08002208 .ana_rgc3 = 0x128,
developer78fed682023-07-19 17:17:37 +08002209 .gdma_count = 2,
developera7cdebf2022-09-09 19:59:26 +08002210 .pdma_base = PDMA_V1_BASE,
developer65089f72022-09-09 19:59:24 +08002211 .txd_size = sizeof(struct mtk_tx_dma),
2212 .rxd_size = sizeof(struct mtk_rx_dma),
developer1d3b1f62022-09-09 19:59:21 +08002213};
2214
2215static const struct mtk_soc_data mt7623_data = {
2216 .caps = MT7623_CAPS,
developer78fed682023-07-19 17:17:37 +08002217 .gdma_count = 2,
developera7cdebf2022-09-09 19:59:26 +08002218 .pdma_base = PDMA_V1_BASE,
developer65089f72022-09-09 19:59:24 +08002219 .txd_size = sizeof(struct mtk_tx_dma),
2220 .rxd_size = sizeof(struct mtk_rx_dma),
developer1d3b1f62022-09-09 19:59:21 +08002221};
2222
2223static const struct mtk_soc_data mt7622_data = {
developer2da7d4a2024-12-17 16:39:41 +08002224 .caps = MT7622_CAPS,
developer1d3b1f62022-09-09 19:59:21 +08002225 .ana_rgc3 = 0x2028,
developer78fed682023-07-19 17:17:37 +08002226 .gdma_count = 2,
developera7cdebf2022-09-09 19:59:26 +08002227 .pdma_base = PDMA_V1_BASE,
developer65089f72022-09-09 19:59:24 +08002228 .txd_size = sizeof(struct mtk_tx_dma),
2229 .rxd_size = sizeof(struct mtk_rx_dma),
developer1d3b1f62022-09-09 19:59:21 +08002230};
2231
2232static const struct mtk_soc_data mt7621_data = {
2233 .caps = MT7621_CAPS,
developer78fed682023-07-19 17:17:37 +08002234 .gdma_count = 2,
developera7cdebf2022-09-09 19:59:26 +08002235 .pdma_base = PDMA_V1_BASE,
developer65089f72022-09-09 19:59:24 +08002236 .txd_size = sizeof(struct mtk_tx_dma),
2237 .rxd_size = sizeof(struct mtk_rx_dma),
developer1d3b1f62022-09-09 19:59:21 +08002238};
2239
developerc3ac93d2018-12-20 16:12:53 +08002240static const struct udevice_id mtk_eth_ids[] = {
developer76e14722023-07-19 17:17:41 +08002241 { .compatible = "mediatek,mt7988-eth", .data = (ulong)&mt7988_data },
developer053929c2022-09-09 19:59:28 +08002242 { .compatible = "mediatek,mt7986-eth", .data = (ulong)&mt7986_data },
2243 { .compatible = "mediatek,mt7981-eth", .data = (ulong)&mt7981_data },
developer1d3b1f62022-09-09 19:59:21 +08002244 { .compatible = "mediatek,mt7629-eth", .data = (ulong)&mt7629_data },
2245 { .compatible = "mediatek,mt7623-eth", .data = (ulong)&mt7623_data },
2246 { .compatible = "mediatek,mt7622-eth", .data = (ulong)&mt7622_data },
2247 { .compatible = "mediatek,mt7621-eth", .data = (ulong)&mt7621_data },
developerc3ac93d2018-12-20 16:12:53 +08002248 {}
2249};
2250
2251static const struct eth_ops mtk_eth_ops = {
2252 .start = mtk_eth_start,
2253 .stop = mtk_eth_stop,
2254 .send = mtk_eth_send,
2255 .recv = mtk_eth_recv,
2256 .free_pkt = mtk_eth_free_pkt,
2257 .write_hwaddr = mtk_eth_write_hwaddr,
2258};
2259
2260U_BOOT_DRIVER(mtk_eth) = {
2261 .name = "mtk-eth",
2262 .id = UCLASS_ETH,
2263 .of_match = mtk_eth_ids,
Simon Glassaad29ae2020-12-03 16:55:21 -07002264 .of_to_plat = mtk_eth_of_to_plat,
Simon Glass71fa5b42020-12-03 16:55:18 -07002265 .plat_auto = sizeof(struct eth_pdata),
developerc3ac93d2018-12-20 16:12:53 +08002266 .probe = mtk_eth_probe,
2267 .remove = mtk_eth_remove,
2268 .ops = &mtk_eth_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -07002269 .priv_auto = sizeof(struct mtk_eth_priv),
developerc3ac93d2018-12-20 16:12:53 +08002270 .flags = DM_FLAG_ALLOC_PRIV_DMA,
2271};