blob: d424cb8d92afcdbcf3afe2bd3e572b053d0e6f37 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
developer3f28d382023-03-07 16:06:30 +080012#include <linux/of_address.h>
developerfd40db22021-04-29 10:08:25 +080013#include <linux/mfd/syscon.h>
14#include <linux/regmap.h>
15#include <linux/clk.h>
16#include <linux/pm_runtime.h>
17#include <linux/if_vlan.h>
18#include <linux/reset.h>
19#include <linux/tcp.h>
20#include <linux/interrupt.h>
21#include <linux/pinctrl/devinfo.h>
22#include <linux/phylink.h>
developera2613e62022-07-01 18:29:37 +080023#include <linux/gpio/consumer.h>
developerfd40db22021-04-29 10:08:25 +080024#include <net/dsa.h>
25
26#include "mtk_eth_soc.h"
27#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080028#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080029
30#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
31#include "mtk_hnat/nf_hnat_mtk.h"
32#endif
33
34static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080035atomic_t reset_lock = ATOMIC_INIT(0);
36atomic_t force = ATOMIC_INIT(0);
37
developerfd40db22021-04-29 10:08:25 +080038module_param_named(msg_level, mtk_msg_level, int, 0);
39MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080040DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080041
42#define MTK_ETHTOOL_STAT(x) { #x, \
43 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
44
developer68ce74f2023-01-03 16:11:57 +080045static const struct mtk_reg_map mtk_reg_map = {
46 .tx_irq_mask = 0x1a1c,
47 .tx_irq_status = 0x1a18,
48 .pdma = {
developera7fbeec2024-02-02 13:56:07 +080049 .tx_ptr = 0x0800,
50 .tx_cnt_cfg = 0x0804,
51 .pctx_ptr = 0x0808,
52 .pdtx_ptr = 0x080c,
developer68ce74f2023-01-03 16:11:57 +080053 .rx_ptr = 0x0900,
54 .rx_cnt_cfg = 0x0904,
55 .pcrx_ptr = 0x0908,
56 .glo_cfg = 0x0a04,
57 .rst_idx = 0x0a08,
58 .delay_irq = 0x0a0c,
59 .irq_status = 0x0a20,
60 .irq_mask = 0x0a28,
61 .int_grp = 0x0a50,
62 .int_grp2 = 0x0a54,
63 },
64 .qdma = {
65 .qtx_cfg = 0x1800,
66 .qtx_sch = 0x1804,
67 .rx_ptr = 0x1900,
68 .rx_cnt_cfg = 0x1904,
69 .qcrx_ptr = 0x1908,
70 .glo_cfg = 0x1a04,
71 .rst_idx = 0x1a08,
72 .delay_irq = 0x1a0c,
73 .fc_th = 0x1a10,
74 .tx_sch_rate = 0x1a14,
75 .int_grp = 0x1a20,
76 .int_grp2 = 0x1a24,
77 .hred2 = 0x1a44,
78 .ctx_ptr = 0x1b00,
79 .dtx_ptr = 0x1b04,
80 .crx_ptr = 0x1b10,
81 .drx_ptr = 0x1b14,
82 .fq_head = 0x1b20,
83 .fq_tail = 0x1b24,
84 .fq_count = 0x1b28,
85 .fq_blen = 0x1b2c,
86 },
87 .gdm1_cnt = 0x2400,
88 .gdma_to_ppe0 = 0x4444,
89 .ppe_base = {
90 [0] = 0x0c00,
91 },
92 .wdma_base = {
93 [0] = 0x2800,
94 [1] = 0x2c00,
95 },
96};
97
98static const struct mtk_reg_map mt7628_reg_map = {
99 .tx_irq_mask = 0x0a28,
100 .tx_irq_status = 0x0a20,
101 .pdma = {
developera7fbeec2024-02-02 13:56:07 +0800102 .tx_ptr = 0x0800,
103 .tx_cnt_cfg = 0x0804,
104 .pctx_ptr = 0x0808,
105 .pdtx_ptr = 0x080c,
developer68ce74f2023-01-03 16:11:57 +0800106 .rx_ptr = 0x0900,
107 .rx_cnt_cfg = 0x0904,
108 .pcrx_ptr = 0x0908,
109 .glo_cfg = 0x0a04,
110 .rst_idx = 0x0a08,
111 .delay_irq = 0x0a0c,
112 .irq_status = 0x0a20,
113 .irq_mask = 0x0a28,
114 .int_grp = 0x0a50,
115 .int_grp2 = 0x0a54,
116 },
117};
118
119static const struct mtk_reg_map mt7986_reg_map = {
120 .tx_irq_mask = 0x461c,
121 .tx_irq_status = 0x4618,
122 .pdma = {
developera7fbeec2024-02-02 13:56:07 +0800123 .tx_ptr = 0x4000,
124 .tx_cnt_cfg = 0x4004,
125 .pctx_ptr = 0x4008,
126 .pdtx_ptr = 0x400c,
developer8ecd51b2023-03-13 11:28:28 +0800127 .rx_ptr = 0x4100,
128 .rx_cnt_cfg = 0x4104,
129 .pcrx_ptr = 0x4108,
130 .glo_cfg = 0x4204,
131 .rst_idx = 0x4208,
132 .delay_irq = 0x420c,
133 .irq_status = 0x4220,
134 .irq_mask = 0x4228,
135 .int_grp = 0x4250,
136 .int_grp2 = 0x4254,
developer68ce74f2023-01-03 16:11:57 +0800137 },
138 .qdma = {
139 .qtx_cfg = 0x4400,
140 .qtx_sch = 0x4404,
141 .rx_ptr = 0x4500,
142 .rx_cnt_cfg = 0x4504,
143 .qcrx_ptr = 0x4508,
144 .glo_cfg = 0x4604,
145 .rst_idx = 0x4608,
146 .delay_irq = 0x460c,
147 .fc_th = 0x4610,
148 .int_grp = 0x4620,
149 .int_grp2 = 0x4624,
150 .hred2 = 0x4644,
151 .ctx_ptr = 0x4700,
152 .dtx_ptr = 0x4704,
153 .crx_ptr = 0x4710,
154 .drx_ptr = 0x4714,
155 .fq_head = 0x4720,
156 .fq_tail = 0x4724,
157 .fq_count = 0x4728,
158 .fq_blen = 0x472c,
159 .tx_sch_rate = 0x4798,
160 },
161 .gdm1_cnt = 0x1c00,
162 .gdma_to_ppe0 = 0x3333,
163 .ppe_base = {
164 [0] = 0x2000,
165 [1] = 0x2400,
166 },
167 .wdma_base = {
168 [0] = 0x4800,
169 [1] = 0x4c00,
170 },
171};
172
173static const struct mtk_reg_map mt7988_reg_map = {
174 .tx_irq_mask = 0x461c,
175 .tx_irq_status = 0x4618,
176 .pdma = {
developera7fbeec2024-02-02 13:56:07 +0800177 .tx_ptr = 0x6800,
178 .tx_cnt_cfg = 0x6804,
179 .pctx_ptr = 0x6808,
180 .pdtx_ptr = 0x680c,
developer68ce74f2023-01-03 16:11:57 +0800181 .rx_ptr = 0x6900,
182 .rx_cnt_cfg = 0x6904,
183 .pcrx_ptr = 0x6908,
184 .glo_cfg = 0x6a04,
185 .rst_idx = 0x6a08,
186 .delay_irq = 0x6a0c,
187 .irq_status = 0x6a20,
188 .irq_mask = 0x6a28,
189 .int_grp = 0x6a50,
190 .int_grp2 = 0x6a54,
191 },
192 .qdma = {
193 .qtx_cfg = 0x4400,
194 .qtx_sch = 0x4404,
195 .rx_ptr = 0x4500,
196 .rx_cnt_cfg = 0x4504,
197 .qcrx_ptr = 0x4508,
198 .glo_cfg = 0x4604,
199 .rst_idx = 0x4608,
200 .delay_irq = 0x460c,
201 .fc_th = 0x4610,
202 .int_grp = 0x4620,
203 .int_grp2 = 0x4624,
204 .hred2 = 0x4644,
205 .ctx_ptr = 0x4700,
206 .dtx_ptr = 0x4704,
207 .crx_ptr = 0x4710,
208 .drx_ptr = 0x4714,
209 .fq_head = 0x4720,
210 .fq_tail = 0x4724,
211 .fq_count = 0x4728,
212 .fq_blen = 0x472c,
213 .tx_sch_rate = 0x4798,
214 },
215 .gdm1_cnt = 0x1c00,
216 .gdma_to_ppe0 = 0x3333,
217 .ppe_base = {
218 [0] = 0x2000,
219 [1] = 0x2400,
220 [2] = 0x2c00,
221 },
222 .wdma_base = {
223 [0] = 0x4800,
224 [1] = 0x4c00,
225 [2] = 0x5000,
226 },
227};
228
developerfd40db22021-04-29 10:08:25 +0800229/* strings used by ethtool */
230static const struct mtk_ethtool_stats {
231 char str[ETH_GSTRING_LEN];
232 u32 offset;
233} mtk_ethtool_stats[] = {
234 MTK_ETHTOOL_STAT(tx_bytes),
235 MTK_ETHTOOL_STAT(tx_packets),
236 MTK_ETHTOOL_STAT(tx_skip),
237 MTK_ETHTOOL_STAT(tx_collisions),
238 MTK_ETHTOOL_STAT(rx_bytes),
239 MTK_ETHTOOL_STAT(rx_packets),
240 MTK_ETHTOOL_STAT(rx_overflow),
241 MTK_ETHTOOL_STAT(rx_fcs_errors),
242 MTK_ETHTOOL_STAT(rx_short_errors),
243 MTK_ETHTOOL_STAT(rx_long_errors),
244 MTK_ETHTOOL_STAT(rx_checksum_errors),
245 MTK_ETHTOOL_STAT(rx_flow_control_packets),
246};
247
248static const char * const mtk_clks_source_name[] = {
developer1bbcf512022-11-18 16:09:33 +0800249 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "gp3",
250 "xgp1", "xgp2", "xgp3", "crypto", "fe", "trgpll",
developerfd40db22021-04-29 10:08:25 +0800251 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
252 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
developer5cfc67a2022-12-29 19:06:51 +0800253 "sgmii_ck", "eth2pll", "wocpu0", "wocpu1",
254 "ethwarp_wocpu2", "ethwarp_wocpu1", "ethwarp_wocpu0",
255 "top_usxgmii0_sel", "top_usxgmii1_sel", "top_sgm0_sel", "top_sgm1_sel",
256 "top_xfi_phy0_xtal_sel", "top_xfi_phy1_xtal_sel", "top_eth_gmii_sel",
257 "top_eth_refck_50m_sel", "top_eth_sys_200m_sel", "top_eth_sys_sel",
258 "top_eth_xgmii_sel", "top_eth_mii_sel", "top_netsys_sel",
259 "top_netsys_500m_sel", "top_netsys_pao_2x_sel",
260 "top_netsys_sync_250m_sel", "top_netsys_ppefb_250m_sel",
developera4f52192024-02-26 15:31:13 +0800261 "top_netsys_warp_sel", "top_macsec_sel",
developerfd40db22021-04-29 10:08:25 +0800262};
263
264void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
265{
266 __raw_writel(val, eth->base + reg);
267}
268
269u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
270{
271 return __raw_readl(eth->base + reg);
272}
273
274u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
275{
276 u32 val;
277
278 val = mtk_r32(eth, reg);
279 val &= ~mask;
280 val |= set;
281 mtk_w32(eth, val, reg);
282 return reg;
283}
284
285static int mtk_mdio_busy_wait(struct mtk_eth *eth)
286{
287 unsigned long t_start = jiffies;
288
289 while (1) {
290 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
291 return 0;
292 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
293 break;
developerc4671b22021-05-28 13:16:42 +0800294 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800295 }
296
297 dev_err(eth->dev, "mdio: MDIO timeout\n");
298 return -1;
299}
300
developer599cda42022-05-24 15:13:31 +0800301u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
302 int phy_reg, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800303{
304 if (mtk_mdio_busy_wait(eth))
305 return -1;
306
307 write_data &= 0xffff;
308
developer599cda42022-05-24 15:13:31 +0800309 if (phy_reg & MII_ADDR_C45) {
310 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
311 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
312 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
313 MTK_PHY_IAC);
314
315 if (mtk_mdio_busy_wait(eth))
316 return -1;
317
318 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
319 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
320 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
321 MTK_PHY_IAC);
322 } else {
323 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
324 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
325 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
326 MTK_PHY_IAC);
327 }
developerfd40db22021-04-29 10:08:25 +0800328
329 if (mtk_mdio_busy_wait(eth))
330 return -1;
331
332 return 0;
333}
334
developer599cda42022-05-24 15:13:31 +0800335u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerfd40db22021-04-29 10:08:25 +0800336{
337 u32 d;
338
339 if (mtk_mdio_busy_wait(eth))
340 return 0xffff;
341
developer599cda42022-05-24 15:13:31 +0800342 if (phy_reg & MII_ADDR_C45) {
343 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
344 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
345 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
346 MTK_PHY_IAC);
347
348 if (mtk_mdio_busy_wait(eth))
349 return 0xffff;
350
351 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
352 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
353 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
354 MTK_PHY_IAC);
355 } else {
356 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
357 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
358 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
359 MTK_PHY_IAC);
360 }
developerfd40db22021-04-29 10:08:25 +0800361
362 if (mtk_mdio_busy_wait(eth))
363 return 0xffff;
364
365 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
366
367 return d;
368}
369
370static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
371 int phy_reg, u16 val)
372{
373 struct mtk_eth *eth = bus->priv;
374
375 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
376}
377
378static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
379{
380 struct mtk_eth *eth = bus->priv;
381
382 return _mtk_mdio_read(eth, phy_addr, phy_reg);
383}
384
developerabeadd52022-08-15 11:26:44 +0800385static int mtk_mdio_reset(struct mii_bus *bus)
386{
387 /* The mdiobus_register will trigger a reset pulse when enabling Bus reset,
388 * we just need to wait until device ready.
389 */
390 mdelay(20);
391
392 return 0;
393}
394
developerfd40db22021-04-29 10:08:25 +0800395static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
396 phy_interface_t interface)
397{
developer543e7922022-12-01 11:24:47 +0800398 u32 val = 0;
developerfd40db22021-04-29 10:08:25 +0800399
400 /* Check DDR memory type.
401 * Currently TRGMII mode with DDR2 memory is not supported.
402 */
403 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
404 if (interface == PHY_INTERFACE_MODE_TRGMII &&
405 val & SYSCFG_DRAM_TYPE_DDR2) {
406 dev_err(eth->dev,
407 "TRGMII mode with DDR2 memory is not supported!\n");
408 return -EOPNOTSUPP;
409 }
410
411 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
412 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
413
414 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
415 ETHSYS_TRGMII_MT7621_MASK, val);
416
417 return 0;
418}
419
420static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
421 phy_interface_t interface, int speed)
422{
423 u32 val;
424 int ret;
425
426 if (interface == PHY_INTERFACE_MODE_TRGMII) {
427 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
428 val = 500000000;
429 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
430 if (ret)
431 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
432 return;
433 }
434
435 val = (speed == SPEED_1000) ?
436 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
437 mtk_w32(eth, val, INTF_MODE);
438
439 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
440 ETHSYS_TRGMII_CLK_SEL362_5,
441 ETHSYS_TRGMII_CLK_SEL362_5);
442
443 val = (speed == SPEED_1000) ? 250000000 : 500000000;
444 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
445 if (ret)
446 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
447
448 val = (speed == SPEED_1000) ?
449 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
450 mtk_w32(eth, val, TRGMII_RCK_CTRL);
451
452 val = (speed == SPEED_1000) ?
453 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
454 mtk_w32(eth, val, TRGMII_TCK_CTRL);
455}
456
developer089e8852022-09-28 14:43:46 +0800457static void mtk_setup_bridge_switch(struct mtk_eth *eth)
458{
459 int val;
460
461 /* Force Port1 XGMAC Link Up */
462 val = mtk_r32(eth, MTK_XGMAC_STS(MTK_GMAC1_ID));
developer9a3fcc82023-11-18 10:44:34 +0800463 mtk_w32(eth, val | MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID),
developer089e8852022-09-28 14:43:46 +0800464 MTK_XGMAC_STS(MTK_GMAC1_ID));
465
466 /* Adjust GSW bridge IPG to 11*/
467 val = mtk_r32(eth, MTK_GSW_CFG);
468 val &= ~(GSWTX_IPG_MASK | GSWRX_IPG_MASK);
469 val |= (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
470 (GSW_IPG_11 << GSWRX_IPG_SHIFT);
471 mtk_w32(eth, val, MTK_GSW_CFG);
developer089e8852022-09-28 14:43:46 +0800472}
473
developera7570e72023-05-09 17:06:42 +0800474static bool mtk_check_gmac23_idle(struct mtk_mac *mac)
475{
476 u32 mac_fsm, gdm_fsm;
477
478 mac_fsm = mtk_r32(mac->hw, MTK_MAC_FSM(mac->id));
479
480 switch (mac->id) {
481 case MTK_GMAC2_ID:
482 gdm_fsm = mtk_r32(mac->hw, MTK_FE_GDM2_FSM);
483 break;
484 case MTK_GMAC3_ID:
485 gdm_fsm = mtk_r32(mac->hw, MTK_FE_GDM3_FSM);
486 break;
developer10b556b2023-05-15 09:49:08 +0800487 default:
488 return true;
developera7570e72023-05-09 17:06:42 +0800489 };
490
491 if ((mac_fsm & 0xFFFF0000) == 0x01010000 &&
492 (gdm_fsm & 0xFFFF0000) == 0x00000000)
493 return true;
494
495 return false;
496}
497
developer9b725932022-11-24 16:25:56 +0800498static void mtk_setup_eee(struct mtk_mac *mac, bool enable)
499{
500 struct mtk_eth *eth = mac->hw;
501 u32 mcr, mcr_cur;
502 u32 val;
503
504 mcr = mcr_cur = mtk_r32(eth, MTK_MAC_MCR(mac->id));
505 mcr &= ~(MAC_MCR_FORCE_EEE100 | MAC_MCR_FORCE_EEE1000);
506
507 if (enable) {
508 mac->tx_lpi_enabled = 1;
509
510 val = FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 19) |
511 FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 33) |
512 FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD,
513 mac->tx_lpi_timer) |
514 FIELD_PREP(MAC_EEE_RESV0, 14);
515 mtk_w32(eth, val, MTK_MAC_EEE(mac->id));
516
517 switch (mac->speed) {
518 case SPEED_1000:
519 mcr |= MAC_MCR_FORCE_EEE1000;
520 break;
521 case SPEED_100:
522 mcr |= MAC_MCR_FORCE_EEE100;
523 break;
524 };
525 } else {
526 mac->tx_lpi_enabled = 0;
527
528 mtk_w32(eth, 0x00000002, MTK_MAC_EEE(mac->id));
529 }
530
531 /* Only update control register when needed! */
532 if (mcr != mcr_cur)
533 mtk_w32(eth, mcr, MTK_MAC_MCR(mac->id));
534}
535
developer0fef5222023-04-26 14:48:31 +0800536static int mtk_get_hwver(struct mtk_eth *eth)
537{
538 struct device_node *np;
539 struct regmap *hwver;
540 u32 info = 0;
541
542 eth->hwver = MTK_HWID_V1;
543
544 np = of_parse_phandle(eth->dev->of_node, "mediatek,hwver", 0);
545 if (!np)
546 return -EINVAL;
547
548 hwver = syscon_node_to_regmap(np);
549 if (IS_ERR(hwver))
550 return PTR_ERR(hwver);
551
552 regmap_read(hwver, 0x8, &info);
553
554 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
555 eth->hwver = FIELD_GET(HWVER_BIT_NETSYS_3, info);
556 else
557 eth->hwver = FIELD_GET(HWVER_BIT_NETSYS_1_2, info);
558
559 of_node_put(np);
560
561 return 0;
562}
563
developer4e8a3fd2023-04-10 18:05:44 +0800564static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
565 phy_interface_t interface)
566{
567 struct mtk_mac *mac = container_of(config, struct mtk_mac,
568 phylink_config);
569 struct mtk_eth *eth = mac->hw;
570 unsigned int sid;
571
572 if (interface == PHY_INTERFACE_MODE_SGMII ||
573 phy_interface_mode_is_8023z(interface)) {
574 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
575 0 : mtk_mac2xgmii_id(eth, mac->id);
576
577 return mtk_sgmii_select_pcs(eth->sgmii, sid);
578 } else if (interface == PHY_INTERFACE_MODE_USXGMII ||
579 interface == PHY_INTERFACE_MODE_10GKR ||
580 interface == PHY_INTERFACE_MODE_5GBASER) {
581 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
582 mac->id != MTK_GMAC1_ID) {
583 sid = mtk_mac2xgmii_id(eth, mac->id);
584
585 return mtk_usxgmii_select_pcs(eth->usxgmii, sid);
586 }
587 }
588
589 return NULL;
590}
591
developer9a3fcc82023-11-18 10:44:34 +0800592static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode,
593 phy_interface_t iface)
594{
595 struct mtk_mac *mac = container_of(config, struct mtk_mac,
596 phylink_config);
597 u32 val;
598
599 if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
600 val = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
601 val &= 0xfffffff0;
602 val |= XMAC_MCR_TRX_DISABLE;
603 mtk_w32(mac->hw, val, MTK_XMAC_MCR(mac->id));
604
605 val = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
606 val |= MTK_XGMAC_FORCE_MODE(mac->id);
607 val &= ~MTK_XGMAC_FORCE_LINK(mac->id);
608 mtk_w32(mac->hw, val, MTK_XGMAC_STS(mac->id));
609 }
610
611 return 0;
612}
613
developerfd40db22021-04-29 10:08:25 +0800614static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
615 const struct phylink_link_state *state)
616{
617 struct mtk_mac *mac = container_of(config, struct mtk_mac,
618 phylink_config);
619 struct mtk_eth *eth = mac->hw;
developer1d86c102024-01-11 15:06:43 +0800620 u32 i;
developerff5e5092023-07-25 15:55:28 +0800621 int val = 0, ge_mode, err = 0;
developer82eae452023-02-13 10:04:09 +0800622 unsigned int mac_type = mac->type;
developerfd40db22021-04-29 10:08:25 +0800623
624 /* MT76x8 has no hardware settings between for the MAC */
625 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
626 mac->interface != state->interface) {
627 /* Setup soc pin functions */
628 switch (state->interface) {
629 case PHY_INTERFACE_MODE_TRGMII:
630 if (mac->id)
631 goto err_phy;
632 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
633 MTK_GMAC1_TRGMII))
634 goto err_phy;
635 /* fall through */
636 case PHY_INTERFACE_MODE_RGMII_TXID:
637 case PHY_INTERFACE_MODE_RGMII_RXID:
638 case PHY_INTERFACE_MODE_RGMII_ID:
639 case PHY_INTERFACE_MODE_RGMII:
640 case PHY_INTERFACE_MODE_MII:
641 case PHY_INTERFACE_MODE_REVMII:
642 case PHY_INTERFACE_MODE_RMII:
developer82eae452023-02-13 10:04:09 +0800643 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800644 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
645 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
646 if (err)
647 goto init_err;
648 }
649 break;
650 case PHY_INTERFACE_MODE_1000BASEX:
651 case PHY_INTERFACE_MODE_2500BASEX:
652 case PHY_INTERFACE_MODE_SGMII:
developer82eae452023-02-13 10:04:09 +0800653 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800654 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
655 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
656 if (err)
657 goto init_err;
658 }
659 break;
660 case PHY_INTERFACE_MODE_GMII:
developer82eae452023-02-13 10:04:09 +0800661 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800662 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
663 err = mtk_gmac_gephy_path_setup(eth, mac->id);
664 if (err)
665 goto init_err;
666 }
667 break;
developer30e13e72022-11-03 10:21:24 +0800668 case PHY_INTERFACE_MODE_XGMII:
developer82eae452023-02-13 10:04:09 +0800669 mac->type = MTK_XGDM_TYPE;
developer30e13e72022-11-03 10:21:24 +0800670 if (MTK_HAS_CAPS(eth->soc->caps, MTK_XGMII)) {
671 err = mtk_gmac_xgmii_path_setup(eth, mac->id);
672 if (err)
673 goto init_err;
674 }
675 break;
developer089e8852022-09-28 14:43:46 +0800676 case PHY_INTERFACE_MODE_USXGMII:
677 case PHY_INTERFACE_MODE_10GKR:
developercfa104b2023-01-11 17:40:41 +0800678 case PHY_INTERFACE_MODE_5GBASER:
developer82eae452023-02-13 10:04:09 +0800679 mac->type = MTK_XGDM_TYPE;
developer089e8852022-09-28 14:43:46 +0800680 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
681 err = mtk_gmac_usxgmii_path_setup(eth, mac->id);
682 if (err)
683 goto init_err;
684 }
685 break;
developerfd40db22021-04-29 10:08:25 +0800686 default:
687 goto err_phy;
688 }
689
690 /* Setup clock for 1st gmac */
691 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
692 !phy_interface_mode_is_8023z(state->interface) &&
693 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
694 if (MTK_HAS_CAPS(mac->hw->soc->caps,
695 MTK_TRGMII_MT7621_CLK)) {
696 if (mt7621_gmac0_rgmii_adjust(mac->hw,
697 state->interface))
698 goto err_phy;
699 } else {
700 mtk_gmac0_rgmii_adjust(mac->hw,
701 state->interface,
702 state->speed);
703
704 /* mt7623_pad_clk_setup */
705 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
706 mtk_w32(mac->hw,
707 TD_DM_DRVP(8) | TD_DM_DRVN(8),
708 TRGMII_TD_ODT(i));
709
710 /* Assert/release MT7623 RXC reset */
711 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
712 TRGMII_RCK_CTRL);
713 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
714 }
715 }
716
717 ge_mode = 0;
718 switch (state->interface) {
719 case PHY_INTERFACE_MODE_MII:
720 case PHY_INTERFACE_MODE_GMII:
721 ge_mode = 1;
722 break;
723 case PHY_INTERFACE_MODE_REVMII:
724 ge_mode = 2;
725 break;
726 case PHY_INTERFACE_MODE_RMII:
727 if (mac->id)
728 goto err_phy;
729 ge_mode = 3;
730 break;
731 default:
732 break;
733 }
734
735 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800736 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800737 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
738 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
739 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
740 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800741 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800742
743 mac->interface = state->interface;
744 }
745
746 /* SGMII */
747 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
748 phy_interface_mode_is_8023z(state->interface)) {
749 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
750 * being setup done.
751 */
developerd82e8372022-02-09 15:00:09 +0800752 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800753 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
754
755 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
756 SYSCFG0_SGMII_MASK,
757 ~(u32)SYSCFG0_SGMII_MASK);
758
developer1d86c102024-01-11 15:06:43 +0800759 /* Save the syscfg0 value for mac_finish */
760 mac->syscfg0 = val;
developerd82e8372022-02-09 15:00:09 +0800761 spin_unlock(&eth->syscfg0_lock);
developer089e8852022-09-28 14:43:46 +0800762 } else if (state->interface == PHY_INTERFACE_MODE_USXGMII ||
developercfa104b2023-01-11 17:40:41 +0800763 state->interface == PHY_INTERFACE_MODE_10GKR ||
764 state->interface == PHY_INTERFACE_MODE_5GBASER) {
developer4e8a3fd2023-04-10 18:05:44 +0800765 /* Nothing to do */
developerfd40db22021-04-29 10:08:25 +0800766 } else if (phylink_autoneg_inband(mode)) {
767 dev_err(eth->dev,
768 "In-band mode not supported in non SGMII mode!\n");
769 return;
770 }
771
772 /* Setup gmac */
developer30e13e72022-11-03 10:21:24 +0800773 if (mac->type == MTK_XGDM_TYPE) {
developer089e8852022-09-28 14:43:46 +0800774 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
775 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
developerfd40db22021-04-29 10:08:25 +0800776
developer089e8852022-09-28 14:43:46 +0800777 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerff5e5092023-07-25 15:55:28 +0800778 if (mac->id == MTK_GMAC1_ID)
developer089e8852022-09-28 14:43:46 +0800779 mtk_setup_bridge_switch(eth);
developer089e8852022-09-28 14:43:46 +0800780 }
developer82eae452023-02-13 10:04:09 +0800781 } else if (mac->type == MTK_GDM_TYPE) {
782 val = mtk_r32(eth, MTK_GDMA_EG_CTRL(mac->id));
783 mtk_w32(eth, val & ~MTK_GDMA_XGDM_SEL,
784 MTK_GDMA_EG_CTRL(mac->id));
785
developer4e8a3fd2023-04-10 18:05:44 +0800786 /* FIXME: In current hardware design, we have to reset FE
787 * when swtiching XGDM to GDM. Therefore, here trigger an SER
788 * to let GDM go back to the initial state.
789 */
developera7570e72023-05-09 17:06:42 +0800790 if (mac->type != mac_type && !mtk_check_gmac23_idle(mac)) {
791 if (!test_bit(MTK_RESETTING, &mac->hw->state)) {
developer82eae452023-02-13 10:04:09 +0800792 atomic_inc(&force);
793 schedule_work(&eth->pending_work);
developera7570e72023-05-09 17:06:42 +0800794 }
developer82eae452023-02-13 10:04:09 +0800795 }
developerfd40db22021-04-29 10:08:25 +0800796 }
797
developerfd40db22021-04-29 10:08:25 +0800798 return;
799
800err_phy:
801 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
802 mac->id, phy_modes(state->interface));
803 return;
804
805init_err:
806 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
807 mac->id, phy_modes(state->interface), err);
808}
809
developer4e8a3fd2023-04-10 18:05:44 +0800810static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
811 phy_interface_t interface)
812{
813 struct mtk_mac *mac = container_of(config, struct mtk_mac,
814 phylink_config);
815 struct mtk_eth *eth = mac->hw;
816
817 /* Enable SGMII */
818 if (interface == PHY_INTERFACE_MODE_SGMII ||
819 phy_interface_mode_is_8023z(interface))
820 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
821 SYSCFG0_SGMII_MASK, mac->syscfg0);
822
823 return 0;
824}
825
developer089e8852022-09-28 14:43:46 +0800826static int mtk_mac_pcs_get_state(struct phylink_config *config,
827 struct phylink_link_state *state)
developerfd40db22021-04-29 10:08:25 +0800828{
829 struct mtk_mac *mac = container_of(config, struct mtk_mac,
830 phylink_config);
developerfd40db22021-04-29 10:08:25 +0800831
developer089e8852022-09-28 14:43:46 +0800832 if (mac->type == MTK_XGDM_TYPE) {
833 u32 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
developerfd40db22021-04-29 10:08:25 +0800834
developer089e8852022-09-28 14:43:46 +0800835 if (mac->id == MTK_GMAC2_ID)
836 sts = sts >> 16;
developerfd40db22021-04-29 10:08:25 +0800837
developer4e8a3fd2023-04-10 18:05:44 +0800838 state->duplex = DUPLEX_FULL;
developer089e8852022-09-28 14:43:46 +0800839
840 switch (FIELD_GET(MTK_USXGMII_PCS_MODE, sts)) {
841 case 0:
842 state->speed = SPEED_10000;
843 break;
844 case 1:
845 state->speed = SPEED_5000;
846 break;
847 case 2:
848 state->speed = SPEED_2500;
849 break;
850 case 3:
851 state->speed = SPEED_1000;
852 break;
853 }
854
developer82eae452023-02-13 10:04:09 +0800855 state->interface = mac->interface;
developer089e8852022-09-28 14:43:46 +0800856 state->link = FIELD_GET(MTK_USXGMII_PCS_LINK, sts);
857 } else if (mac->type == MTK_GDM_TYPE) {
858 struct mtk_eth *eth = mac->hw;
developer4e8a3fd2023-04-10 18:05:44 +0800859 struct mtk_sgmii *ss = eth->sgmii;
developer089e8852022-09-28 14:43:46 +0800860 u32 id = mtk_mac2xgmii_id(eth, mac->id);
861 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
developer38afb1a2023-04-17 09:57:27 +0800862 u32 bm, adv, rgc3, sgm_mode;
developer089e8852022-09-28 14:43:46 +0800863
developer82eae452023-02-13 10:04:09 +0800864 state->interface = mac->interface;
developer089e8852022-09-28 14:43:46 +0800865
developer38afb1a2023-04-17 09:57:27 +0800866 regmap_read(ss->pcs[id].regmap, SGMSYS_PCS_CONTROL_1, &bm);
867 if (bm & SGMII_AN_ENABLE) {
developer4e8a3fd2023-04-10 18:05:44 +0800868 regmap_read(ss->pcs[id].regmap,
developer38afb1a2023-04-17 09:57:27 +0800869 SGMSYS_PCS_ADVERTISE, &adv);
developer089e8852022-09-28 14:43:46 +0800870
developer38afb1a2023-04-17 09:57:27 +0800871 phylink_mii_c22_pcs_decode_state(
872 state,
873 FIELD_GET(SGMII_BMSR, bm),
874 FIELD_GET(SGMII_LPA, adv));
developer089e8852022-09-28 14:43:46 +0800875 } else {
developer38afb1a2023-04-17 09:57:27 +0800876 state->link = !!(bm & SGMII_LINK_STATYS);
developer089e8852022-09-28 14:43:46 +0800877
developer38afb1a2023-04-17 09:57:27 +0800878 regmap_read(ss->pcs[id].regmap,
879 SGMSYS_SGMII_MODE, &sgm_mode);
developer089e8852022-09-28 14:43:46 +0800880
developer38afb1a2023-04-17 09:57:27 +0800881 switch (sgm_mode & SGMII_SPEED_MASK) {
882 case SGMII_SPEED_10:
developer089e8852022-09-28 14:43:46 +0800883 state->speed = SPEED_10;
884 break;
developer38afb1a2023-04-17 09:57:27 +0800885 case SGMII_SPEED_100:
developer089e8852022-09-28 14:43:46 +0800886 state->speed = SPEED_100;
887 break;
developer38afb1a2023-04-17 09:57:27 +0800888 case SGMII_SPEED_1000:
developer4e8a3fd2023-04-10 18:05:44 +0800889 regmap_read(ss->pcs[id].regmap,
developer38afb1a2023-04-17 09:57:27 +0800890 ss->pcs[id].ana_rgc3, &rgc3);
891 rgc3 = FIELD_GET(RG_PHY_SPEED_3_125G, rgc3);
developer4e8a3fd2023-04-10 18:05:44 +0800892 state->speed = rgc3 ? SPEED_2500 : SPEED_1000;
developer089e8852022-09-28 14:43:46 +0800893 break;
894 }
developer38afb1a2023-04-17 09:57:27 +0800895
896 if (sgm_mode & SGMII_DUPLEX_HALF)
897 state->duplex = DUPLEX_HALF;
898 else
899 state->duplex = DUPLEX_FULL;
developer089e8852022-09-28 14:43:46 +0800900 }
901
902 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
903 if (pmsr & MAC_MSR_RX_FC)
904 state->pause |= MLO_PAUSE_RX;
905 if (pmsr & MAC_MSR_TX_FC)
906 state->pause |= MLO_PAUSE_TX;
907 }
developerfd40db22021-04-29 10:08:25 +0800908
909 return 1;
910}
911
developer65f32592023-08-02 09:35:49 +0800912static int mtk_gdm_fsm_get(struct mtk_mac *mac, u32 gdm)
913{
914 u32 fsm = mtk_r32(mac->hw, gdm);
developer26d0a532023-08-28 16:24:58 +0800915 u32 ret = 0, val = 0;
developer65f32592023-08-02 09:35:49 +0800916
developer26d0a532023-08-28 16:24:58 +0800917 switch (mac->type) {
918 case MTK_GDM_TYPE:
developer65f32592023-08-02 09:35:49 +0800919 ret = fsm == 0;
developer26d0a532023-08-28 16:24:58 +0800920 break;
921 case MTK_XGDM_TYPE:
922 ret = fsm == 0x10000000;
923 break;
924 default:
925 break;
926 }
927
928 if ((mac->type == MTK_XGDM_TYPE) && (mac->id != MTK_GMAC1_ID)) {
929 val = mtk_r32(mac->hw, MTK_MAC_FSM(mac->id));
930 if ((val == 0x02010100) || (val == 0x01010100)) {
931 ret = (mac->interface == PHY_INTERFACE_MODE_XGMII) ?
932 ((fsm & 0x0fffffff) == 0) : ((fsm & 0x00ffffff) == 0);
developer65f32592023-08-02 09:35:49 +0800933 } else
developer26d0a532023-08-28 16:24:58 +0800934 ret = 0;
developer65f32592023-08-02 09:35:49 +0800935 }
936
937 return ret;
938}
939
940static void mtk_gdm_fsm_poll(struct mtk_mac *mac)
941{
942 u32 gdm = 0, i = 0;
943
944 switch (mac->id) {
945 case MTK_GMAC1_ID:
946 gdm = MTK_FE_GDM1_FSM;
947 break;
948 case MTK_GMAC2_ID:
949 gdm = MTK_FE_GDM2_FSM;
950 break;
951 case MTK_GMAC3_ID:
952 gdm = MTK_FE_GDM3_FSM;
953 break;
954 default:
955 pr_info("%s mac id invalid", __func__);
956 break;
957 }
developer26d0a532023-08-28 16:24:58 +0800958
developer65f32592023-08-02 09:35:49 +0800959 while (i < 3) {
960 if (mtk_gdm_fsm_get(mac, gdm))
961 break;
962 msleep(500);
963 i++;
964 }
965
966 if (i == 3)
967 pr_info("%s fsm invalid", __func__);
968}
969
970static void mtk_pse_port_link_set(struct mtk_mac *mac, bool up)
971{
developera7d382a2023-08-25 12:05:22 +0800972 u32 fe_glo_cfg, val = 0;
developer65f32592023-08-02 09:35:49 +0800973
974 fe_glo_cfg = mtk_r32(mac->hw, MTK_FE_GLO_CFG(mac->id));
975 switch (mac->id) {
976 case MTK_GMAC1_ID:
977 val = MTK_FE_LINK_DOWN_P1;
978 break;
979 case MTK_GMAC2_ID:
980 val = MTK_FE_LINK_DOWN_P2;
981 break;
982 case MTK_GMAC3_ID:
983 val = MTK_FE_LINK_DOWN_P15;
984 break;
985 }
986
987 if (!up)
988 fe_glo_cfg |= val;
989 else
990 fe_glo_cfg &= ~val;
991
992 mtk_w32(mac->hw, fe_glo_cfg, MTK_FE_GLO_CFG(mac->id));
993 mtk_gdm_fsm_poll(mac);
994}
995
developerfd40db22021-04-29 10:08:25 +0800996static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
997 phy_interface_t interface)
998{
999 struct mtk_mac *mac = container_of(config, struct mtk_mac,
1000 phylink_config);
developer21260d02023-09-04 11:29:04 +08001001 struct mtk_eth *eth = mac->hw;
1002 unsigned int id;
developerff5e5092023-07-25 15:55:28 +08001003 u32 mcr, sts;
developer089e8852022-09-28 14:43:46 +08001004
developer65f32592023-08-02 09:35:49 +08001005 mtk_pse_port_link_set(mac, false);
developer089e8852022-09-28 14:43:46 +08001006 if (mac->type == MTK_GDM_TYPE) {
1007 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
developer65f32592023-08-02 09:35:49 +08001008 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
developer089e8852022-09-28 14:43:46 +08001009 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
1010 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
developer21260d02023-09-04 11:29:04 +08001011 struct mtk_usxgmii_pcs *mpcs;
developerfd40db22021-04-29 10:08:25 +08001012
developer21260d02023-09-04 11:29:04 +08001013 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
developer089e8852022-09-28 14:43:46 +08001014 mcr &= 0xfffffff0;
1015 mcr |= XMAC_MCR_TRX_DISABLE;
1016 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
developerff5e5092023-07-25 15:55:28 +08001017
1018 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
1019 sts &= ~MTK_XGMAC_FORCE_LINK(mac->id);
1020 mtk_w32(mac->hw, sts, MTK_XGMAC_STS(mac->id));
developer21260d02023-09-04 11:29:04 +08001021
1022 id = mtk_mac2xgmii_id(eth, mac->id);
1023 mpcs = &eth->usxgmii->pcs[id];
developer089e8852022-09-28 14:43:46 +08001024 }
developerfd40db22021-04-29 10:08:25 +08001025}
1026
developer90270572024-01-30 09:26:15 +08001027static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
1028 int speed)
1029{
1030 const struct mtk_soc_data *soc = eth->soc;
1031 u32 val;
1032
1033 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1034 return;
1035
1036 val = MTK_QTX_SCH_MIN_RATE_EN |
1037 /* minimum: 10 Mbps */
1038 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
1039 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
1040 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
1041
1042 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1))
1043 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
1044
1045 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
1046 switch (speed) {
1047 case SPEED_10:
1048 val |= MTK_QTX_SCH_MAX_RATE_EN |
1049 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
1050 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
1051 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
1052 break;
1053 case SPEED_100:
1054 val |= MTK_QTX_SCH_MAX_RATE_EN |
1055 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
1056 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
1057 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
1058 break;
1059 case SPEED_1000:
1060 val |= MTK_QTX_SCH_MAX_RATE_EN |
1061 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
1062 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
1063 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
1064 break;
1065 default:
1066 break;
1067 }
1068 } else {
1069 switch (speed) {
1070 case SPEED_10:
1071 val |= MTK_QTX_SCH_MAX_RATE_EN |
1072 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
1073 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
1074 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
1075 break;
1076 case SPEED_100:
1077 val |= MTK_QTX_SCH_MAX_RATE_EN |
1078 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
1079 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
1080 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
1081 break;
1082 case SPEED_1000:
1083 val |= MTK_QTX_SCH_MAX_RATE_EN |
1084 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
1085 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
1086 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
1087 break;
1088 case SPEED_2500:
1089 val |= MTK_QTX_SCH_MAX_RATE_EN |
1090 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 25) |
1091 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
1092 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
1093 break;
1094 case SPEED_10000:
1095 val |= MTK_QTX_SCH_MAX_RATE_EN |
1096 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 100) |
1097 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
1098 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
1099 break;
1100 default:
1101 break;
1102 }
1103 }
1104
1105 mtk_w32(eth, (idx / MTK_QTX_PER_PAGE) & MTK_QTX_CFG_PAGE, MTK_QDMA_PAGE);
1106 mtk_w32(eth, val, MTK_QTX_SCH(idx));
1107}
1108
developerfd40db22021-04-29 10:08:25 +08001109static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
1110 phy_interface_t interface,
1111 struct phy_device *phy)
1112{
1113 struct mtk_mac *mac = container_of(config, struct mtk_mac,
1114 phylink_config);
developer9a3fcc82023-11-18 10:44:34 +08001115 u32 mcr, mcr_cur, sts;
developer089e8852022-09-28 14:43:46 +08001116
developer9b725932022-11-24 16:25:56 +08001117 mac->speed = speed;
1118
developer089e8852022-09-28 14:43:46 +08001119 if (mac->type == MTK_GDM_TYPE) {
1120 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
1121 mcr = mcr_cur;
1122 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
1123 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
1124 MAC_MCR_FORCE_RX_FC);
1125 mcr |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
1126 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
1127
1128 /* Configure speed */
1129 switch (speed) {
1130 case SPEED_2500:
1131 case SPEED_1000:
1132 mcr |= MAC_MCR_SPEED_1000;
1133 break;
1134 case SPEED_100:
1135 mcr |= MAC_MCR_SPEED_100;
1136 break;
1137 }
1138
1139 /* Configure duplex */
developer6a336d02023-11-20 22:49:54 +08001140 if (duplex == DUPLEX_FULL ||
1141 interface == PHY_INTERFACE_MODE_SGMII)
developer089e8852022-09-28 14:43:46 +08001142 mcr |= MAC_MCR_FORCE_DPX;
developer2cd8fe82024-03-19 23:10:37 +08001143 else if (interface == PHY_INTERFACE_MODE_GMII)
1144 mcr |= MAC_MCR_PRMBL_LMT_EN;
developer089e8852022-09-28 14:43:46 +08001145
1146 /* Configure pause modes -
1147 * phylink will avoid these for half duplex
1148 */
1149 if (tx_pause)
1150 mcr |= MAC_MCR_FORCE_TX_FC;
1151 if (rx_pause)
1152 mcr |= MAC_MCR_FORCE_RX_FC;
1153
1154 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
1155
1156 /* Only update control register when needed! */
1157 if (mcr != mcr_cur)
1158 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
developer9b725932022-11-24 16:25:56 +08001159
1160 if (mode == MLO_AN_PHY && phy)
1161 mtk_setup_eee(mac, phy_init_eee(phy, false) >= 0);
developer089e8852022-09-28 14:43:46 +08001162 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
developer9a3fcc82023-11-18 10:44:34 +08001163 if (mode == MLO_AN_INBAND)
1164 mdelay(1000);
1165
developerff5e5092023-07-25 15:55:28 +08001166 /* Eliminate the interference(before link-up) caused by PHY noise */
1167 mtk_m32(mac->hw, XMAC_LOGIC_RST, 0x0, MTK_XMAC_LOGIC_RST(mac->id));
1168 mdelay(20);
1169 mtk_m32(mac->hw, XMAC_GLB_CNTCLR, 0x1, MTK_XMAC_CNT_CTRL(mac->id));
1170
developer9a3fcc82023-11-18 10:44:34 +08001171 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
1172 sts |= MTK_XGMAC_FORCE_LINK(mac->id);
1173 mtk_w32(mac->hw, sts, MTK_XGMAC_STS(mac->id));
developerff5e5092023-07-25 15:55:28 +08001174
developer089e8852022-09-28 14:43:46 +08001175 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
1176
1177 mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC);
1178 /* Configure pause modes -
1179 * phylink will avoid these for half duplex
1180 */
1181 if (tx_pause)
1182 mcr |= XMAC_MCR_FORCE_TX_FC;
1183 if (rx_pause)
1184 mcr |= XMAC_MCR_FORCE_RX_FC;
developerfd40db22021-04-29 10:08:25 +08001185
developer089e8852022-09-28 14:43:46 +08001186 mcr &= ~(XMAC_MCR_TRX_DISABLE);
1187 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
1188 }
developer65f32592023-08-02 09:35:49 +08001189 mtk_pse_port_link_set(mac, true);
developerfd40db22021-04-29 10:08:25 +08001190}
1191
1192static void mtk_validate(struct phylink_config *config,
1193 unsigned long *supported,
1194 struct phylink_link_state *state)
1195{
1196 struct mtk_mac *mac = container_of(config, struct mtk_mac,
1197 phylink_config);
1198 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1199
1200 if (state->interface != PHY_INTERFACE_MODE_NA &&
1201 state->interface != PHY_INTERFACE_MODE_MII &&
1202 state->interface != PHY_INTERFACE_MODE_GMII &&
1203 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
1204 phy_interface_mode_is_rgmii(state->interface)) &&
1205 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
1206 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
1207 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
1208 (state->interface == PHY_INTERFACE_MODE_SGMII ||
developer089e8852022-09-28 14:43:46 +08001209 phy_interface_mode_is_8023z(state->interface))) &&
developer30e13e72022-11-03 10:21:24 +08001210 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_XGMII) &&
1211 (state->interface == PHY_INTERFACE_MODE_XGMII)) &&
developer089e8852022-09-28 14:43:46 +08001212 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
1213 (state->interface == PHY_INTERFACE_MODE_USXGMII)) &&
1214 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
1215 (state->interface == PHY_INTERFACE_MODE_10GKR))) {
developerfd40db22021-04-29 10:08:25 +08001216 linkmode_zero(supported);
1217 return;
1218 }
1219
1220 phylink_set_port_modes(mask);
1221 phylink_set(mask, Autoneg);
1222
1223 switch (state->interface) {
developer089e8852022-09-28 14:43:46 +08001224 case PHY_INTERFACE_MODE_USXGMII:
1225 case PHY_INTERFACE_MODE_10GKR:
1226 phylink_set(mask, 10000baseKR_Full);
1227 phylink_set(mask, 10000baseT_Full);
1228 phylink_set(mask, 10000baseCR_Full);
1229 phylink_set(mask, 10000baseSR_Full);
1230 phylink_set(mask, 10000baseLR_Full);
1231 phylink_set(mask, 10000baseLRM_Full);
1232 phylink_set(mask, 10000baseER_Full);
1233 phylink_set(mask, 100baseT_Half);
1234 phylink_set(mask, 100baseT_Full);
1235 phylink_set(mask, 1000baseT_Half);
1236 phylink_set(mask, 1000baseT_Full);
1237 phylink_set(mask, 1000baseX_Full);
developerb88cdb02022-10-12 18:10:03 +08001238 phylink_set(mask, 2500baseT_Full);
1239 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +08001240 break;
developerfd40db22021-04-29 10:08:25 +08001241 case PHY_INTERFACE_MODE_TRGMII:
1242 phylink_set(mask, 1000baseT_Full);
1243 break;
developer30e13e72022-11-03 10:21:24 +08001244 case PHY_INTERFACE_MODE_XGMII:
1245 /* fall through */
developerfd40db22021-04-29 10:08:25 +08001246 case PHY_INTERFACE_MODE_1000BASEX:
developerfd40db22021-04-29 10:08:25 +08001247 phylink_set(mask, 1000baseX_Full);
developerebf63e22023-06-15 17:45:36 +08001248 /* fall through */
developer089e8852022-09-28 14:43:46 +08001249 case PHY_INTERFACE_MODE_2500BASEX:
developerfd40db22021-04-29 10:08:25 +08001250 phylink_set(mask, 2500baseX_Full);
developer2fbee452022-08-12 13:58:20 +08001251 phylink_set(mask, 2500baseT_Full);
developerebf63e22023-06-15 17:45:36 +08001252 /* fall through */
developerfd40db22021-04-29 10:08:25 +08001253 case PHY_INTERFACE_MODE_GMII:
1254 case PHY_INTERFACE_MODE_RGMII:
1255 case PHY_INTERFACE_MODE_RGMII_ID:
1256 case PHY_INTERFACE_MODE_RGMII_RXID:
1257 case PHY_INTERFACE_MODE_RGMII_TXID:
1258 phylink_set(mask, 1000baseT_Half);
1259 /* fall through */
1260 case PHY_INTERFACE_MODE_SGMII:
1261 phylink_set(mask, 1000baseT_Full);
1262 phylink_set(mask, 1000baseX_Full);
1263 /* fall through */
1264 case PHY_INTERFACE_MODE_MII:
1265 case PHY_INTERFACE_MODE_RMII:
1266 case PHY_INTERFACE_MODE_REVMII:
1267 case PHY_INTERFACE_MODE_NA:
1268 default:
1269 phylink_set(mask, 10baseT_Half);
1270 phylink_set(mask, 10baseT_Full);
1271 phylink_set(mask, 100baseT_Half);
1272 phylink_set(mask, 100baseT_Full);
1273 break;
1274 }
1275
1276 if (state->interface == PHY_INTERFACE_MODE_NA) {
developer089e8852022-09-28 14:43:46 +08001277
1278 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII)) {
1279 phylink_set(mask, 10000baseKR_Full);
developerc9bd9ae2022-12-23 16:54:36 +08001280 phylink_set(mask, 10000baseT_Full);
developer3ef64802023-05-10 10:48:43 +08001281 phylink_set(mask, 10000baseCR_Full);
developer089e8852022-09-28 14:43:46 +08001282 phylink_set(mask, 10000baseSR_Full);
1283 phylink_set(mask, 10000baseLR_Full);
1284 phylink_set(mask, 10000baseLRM_Full);
1285 phylink_set(mask, 10000baseER_Full);
1286 phylink_set(mask, 1000baseKX_Full);
1287 phylink_set(mask, 1000baseT_Full);
1288 phylink_set(mask, 1000baseX_Full);
1289 phylink_set(mask, 2500baseX_Full);
developercfa104b2023-01-11 17:40:41 +08001290 phylink_set(mask, 2500baseT_Full);
1291 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +08001292 }
developerfd40db22021-04-29 10:08:25 +08001293 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
1294 phylink_set(mask, 1000baseT_Full);
1295 phylink_set(mask, 1000baseX_Full);
1296 phylink_set(mask, 2500baseX_Full);
1297 }
1298 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
1299 phylink_set(mask, 1000baseT_Full);
1300 phylink_set(mask, 1000baseT_Half);
1301 phylink_set(mask, 1000baseX_Full);
1302 }
1303 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
1304 phylink_set(mask, 1000baseT_Full);
1305 phylink_set(mask, 1000baseT_Half);
1306 }
1307 }
1308
developer30e13e72022-11-03 10:21:24 +08001309 if (mac->type == MTK_XGDM_TYPE) {
1310 phylink_clear(mask, 10baseT_Half);
1311 phylink_clear(mask, 100baseT_Half);
1312 phylink_clear(mask, 1000baseT_Half);
1313 }
1314
developerfd40db22021-04-29 10:08:25 +08001315 phylink_set(mask, Pause);
1316 phylink_set(mask, Asym_Pause);
1317
1318 linkmode_and(supported, supported, mask);
1319 linkmode_and(state->advertising, state->advertising, mask);
1320
1321 /* We can only operate at 2500BaseX or 1000BaseX. If requested
1322 * to advertise both, only report advertising at 2500BaseX.
1323 */
1324 phylink_helper_basex_speed(state);
1325}
1326
1327static const struct phylink_mac_ops mtk_phylink_ops = {
1328 .validate = mtk_validate,
developer4e8a3fd2023-04-10 18:05:44 +08001329 .mac_select_pcs = mtk_mac_select_pcs,
developer089e8852022-09-28 14:43:46 +08001330 .mac_link_state = mtk_mac_pcs_get_state,
developer9a3fcc82023-11-18 10:44:34 +08001331 .mac_prepare = mtk_mac_prepare,
developerfd40db22021-04-29 10:08:25 +08001332 .mac_config = mtk_mac_config,
developer4e8a3fd2023-04-10 18:05:44 +08001333 .mac_finish = mtk_mac_finish,
developerfd40db22021-04-29 10:08:25 +08001334 .mac_link_down = mtk_mac_link_down,
1335 .mac_link_up = mtk_mac_link_up,
1336};
1337
developerc4d8da72023-03-16 14:37:28 +08001338static int mtk_mdc_init(struct mtk_eth *eth)
developerfd40db22021-04-29 10:08:25 +08001339{
1340 struct device_node *mii_np;
developerc4d8da72023-03-16 14:37:28 +08001341 int max_clk = 2500000, divider;
developer778e4122023-04-20 16:09:32 +08001342 int ret = 0;
developerc8acd8d2022-11-10 09:07:10 +08001343 u32 val;
developerfd40db22021-04-29 10:08:25 +08001344
1345 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
1346 if (!mii_np) {
1347 dev_err(eth->dev, "no %s child node found", "mdio-bus");
1348 return -ENODEV;
1349 }
1350
1351 if (!of_device_is_available(mii_np)) {
1352 ret = -ENODEV;
1353 goto err_put_node;
1354 }
1355
developerc4d8da72023-03-16 14:37:28 +08001356 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
1357 if (val > MDC_MAX_FREQ ||
1358 val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1359 dev_err(eth->dev, "MDIO clock frequency out of range");
1360 ret = -EINVAL;
1361 goto err_put_node;
1362 }
developerc8acd8d2022-11-10 09:07:10 +08001363 max_clk = val;
developerc4d8da72023-03-16 14:37:28 +08001364 }
developerc8acd8d2022-11-10 09:07:10 +08001365
developerc4d8da72023-03-16 14:37:28 +08001366 divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
developerc8acd8d2022-11-10 09:07:10 +08001367
1368 /* Configure MDC Turbo Mode */
1369 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1370 val = mtk_r32(eth, MTK_MAC_MISC);
1371 val |= MISC_MDC_TURBO;
1372 mtk_w32(eth, val, MTK_MAC_MISC);
1373 } else {
1374 val = mtk_r32(eth, MTK_PPSC);
1375 val |= PPSC_MDC_TURBO;
1376 mtk_w32(eth, val, MTK_PPSC);
1377 }
1378
1379 /* Configure MDC Divider */
1380 val = mtk_r32(eth, MTK_PPSC);
1381 val &= ~PPSC_MDC_CFG;
1382 val |= FIELD_PREP(PPSC_MDC_CFG, divider);
1383 mtk_w32(eth, val, MTK_PPSC);
1384
developerc4d8da72023-03-16 14:37:28 +08001385 dev_info(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
1386
1387err_put_node:
1388 of_node_put(mii_np);
1389 return ret;
1390}
1391
1392static int mtk_mdio_init(struct mtk_eth *eth)
1393{
1394 struct device_node *mii_np;
1395 int ret;
1396
1397 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
1398 if (!mii_np) {
1399 dev_err(eth->dev, "no %s child node found", "mdio-bus");
1400 return -ENODEV;
1401 }
1402
1403 if (!of_device_is_available(mii_np)) {
1404 ret = -ENODEV;
1405 goto err_put_node;
1406 }
1407
1408 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
1409 if (!eth->mii_bus) {
1410 ret = -ENOMEM;
1411 goto err_put_node;
1412 }
1413
1414 eth->mii_bus->name = "mdio";
1415 eth->mii_bus->read = mtk_mdio_read;
1416 eth->mii_bus->write = mtk_mdio_write;
1417 eth->mii_bus->reset = mtk_mdio_reset;
1418 eth->mii_bus->priv = eth;
1419 eth->mii_bus->parent = eth->dev;
1420
1421 if (snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
1422 ret = -ENOMEM;
1423 goto err_put_node;
1424 }
developerc8acd8d2022-11-10 09:07:10 +08001425
developerfd40db22021-04-29 10:08:25 +08001426 ret = of_mdiobus_register(eth->mii_bus, mii_np);
1427
1428err_put_node:
1429 of_node_put(mii_np);
1430 return ret;
1431}
1432
1433static void mtk_mdio_cleanup(struct mtk_eth *eth)
1434{
1435 if (!eth->mii_bus)
1436 return;
1437
1438 mdiobus_unregister(eth->mii_bus);
1439}
1440
1441static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
1442{
1443 unsigned long flags;
1444 u32 val;
1445
developera7fbeec2024-02-02 13:56:07 +08001446 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1447 spin_lock_irqsave(&eth->tx_irq_lock, flags);
1448 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1449 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
1450 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1451 } else {
1452 spin_lock_irqsave(&eth->txrx_irq_lock, flags);
1453 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1454 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
1455 spin_unlock_irqrestore(&eth->txrx_irq_lock, flags);
1456 }
developerfd40db22021-04-29 10:08:25 +08001457}
1458
1459static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
1460{
1461 unsigned long flags;
1462 u32 val;
1463
developera7fbeec2024-02-02 13:56:07 +08001464 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1465 spin_lock_irqsave(&eth->tx_irq_lock, flags);
1466 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1467 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
1468 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1469 } else {
1470 spin_lock_irqsave(&eth->txrx_irq_lock, flags);
1471 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1472 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
1473 spin_unlock_irqrestore(&eth->txrx_irq_lock, flags);
1474 }
developerfd40db22021-04-29 10:08:25 +08001475}
1476
1477static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
1478{
1479 unsigned long flags;
1480 u32 val;
developera7fbeec2024-02-02 13:56:07 +08001481 spinlock_t *irq_lock;
developerfd40db22021-04-29 10:08:25 +08001482
developera7fbeec2024-02-02 13:56:07 +08001483 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1484 irq_lock = &eth->rx_irq_lock;
1485 else
1486 irq_lock = &eth->txrx_irq_lock;
1487
1488 spin_lock_irqsave(irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001489 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1490 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
developera7fbeec2024-02-02 13:56:07 +08001491 spin_unlock_irqrestore(irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +08001492}
1493
1494static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
1495{
1496 unsigned long flags;
1497 u32 val;
developera7fbeec2024-02-02 13:56:07 +08001498 spinlock_t *irq_lock;
1499
1500 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1501 irq_lock = &eth->rx_irq_lock;
1502 else
1503 irq_lock = &eth->txrx_irq_lock;
developerfd40db22021-04-29 10:08:25 +08001504
developera7fbeec2024-02-02 13:56:07 +08001505 spin_lock_irqsave(irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001506 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1507 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
developera7fbeec2024-02-02 13:56:07 +08001508 spin_unlock_irqrestore(irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +08001509}
1510
1511static int mtk_set_mac_address(struct net_device *dev, void *p)
1512{
1513 int ret = eth_mac_addr(dev, p);
1514 struct mtk_mac *mac = netdev_priv(dev);
1515 struct mtk_eth *eth = mac->hw;
1516 const char *macaddr = dev->dev_addr;
1517
1518 if (ret)
1519 return ret;
1520
1521 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
1522 return -EBUSY;
1523
1524 spin_lock_bh(&mac->hw->page_lock);
1525 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1526 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1527 MT7628_SDM_MAC_ADRH);
1528 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1529 (macaddr[4] << 8) | macaddr[5],
1530 MT7628_SDM_MAC_ADRL);
1531 } else {
1532 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1533 MTK_GDMA_MAC_ADRH(mac->id));
1534 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1535 (macaddr[4] << 8) | macaddr[5],
1536 MTK_GDMA_MAC_ADRL(mac->id));
1537 }
1538 spin_unlock_bh(&mac->hw->page_lock);
1539
1540 return 0;
1541}
1542
1543void mtk_stats_update_mac(struct mtk_mac *mac)
1544{
developer089e8852022-09-28 14:43:46 +08001545 struct mtk_eth *eth = mac->hw;
developer68ce74f2023-01-03 16:11:57 +08001546 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08001547 struct mtk_hw_stats *hw_stats = mac->hw_stats;
developer68ce74f2023-01-03 16:11:57 +08001548 unsigned int offs = hw_stats->reg_offset;
developerfd40db22021-04-29 10:08:25 +08001549 u64 stats;
1550
developerfd40db22021-04-29 10:08:25 +08001551 u64_stats_update_begin(&hw_stats->syncp);
1552
developer68ce74f2023-01-03 16:11:57 +08001553 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
1554 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
developerfd40db22021-04-29 10:08:25 +08001555 if (stats)
1556 hw_stats->rx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001557 hw_stats->rx_packets +=
1558 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x08 + offs);
1559 hw_stats->rx_overflow +=
1560 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1561 hw_stats->rx_fcs_errors +=
1562 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1563 hw_stats->rx_short_errors +=
1564 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1565 hw_stats->rx_long_errors +=
1566 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1567 hw_stats->rx_checksum_errors +=
1568 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
developerfd40db22021-04-29 10:08:25 +08001569 hw_stats->rx_flow_control_packets +=
developer68ce74f2023-01-03 16:11:57 +08001570 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
developer089e8852022-09-28 14:43:46 +08001571
1572 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer68ce74f2023-01-03 16:11:57 +08001573 hw_stats->tx_skip +=
1574 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1575 hw_stats->tx_collisions +=
1576 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1577 hw_stats->tx_bytes +=
1578 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1579 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
developer089e8852022-09-28 14:43:46 +08001580 if (stats)
1581 hw_stats->tx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001582 hw_stats->tx_packets +=
1583 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
developer089e8852022-09-28 14:43:46 +08001584 } else {
developer68ce74f2023-01-03 16:11:57 +08001585 hw_stats->tx_skip +=
1586 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1587 hw_stats->tx_collisions +=
1588 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1589 hw_stats->tx_bytes +=
1590 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1591 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
developer089e8852022-09-28 14:43:46 +08001592 if (stats)
1593 hw_stats->tx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001594 hw_stats->tx_packets +=
1595 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
developer089e8852022-09-28 14:43:46 +08001596 }
developer68ce74f2023-01-03 16:11:57 +08001597
1598 u64_stats_update_end(&hw_stats->syncp);
developerfd40db22021-04-29 10:08:25 +08001599}
1600
1601static void mtk_stats_update(struct mtk_eth *eth)
1602{
1603 int i;
1604
1605 for (i = 0; i < MTK_MAC_COUNT; i++) {
1606 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1607 continue;
1608 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1609 mtk_stats_update_mac(eth->mac[i]);
1610 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1611 }
1612 }
1613}
1614
1615static void mtk_get_stats64(struct net_device *dev,
1616 struct rtnl_link_stats64 *storage)
1617{
1618 struct mtk_mac *mac = netdev_priv(dev);
1619 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1620 unsigned int start;
1621
1622 if (netif_running(dev) && netif_device_present(dev)) {
1623 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1624 mtk_stats_update_mac(mac);
1625 spin_unlock_bh(&hw_stats->stats_lock);
1626 }
1627 }
1628
1629 do {
1630 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
1631 storage->rx_packets = hw_stats->rx_packets;
1632 storage->tx_packets = hw_stats->tx_packets;
1633 storage->rx_bytes = hw_stats->rx_bytes;
1634 storage->tx_bytes = hw_stats->tx_bytes;
1635 storage->collisions = hw_stats->tx_collisions;
1636 storage->rx_length_errors = hw_stats->rx_short_errors +
1637 hw_stats->rx_long_errors;
1638 storage->rx_over_errors = hw_stats->rx_overflow;
1639 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1640 storage->rx_errors = hw_stats->rx_checksum_errors;
1641 storage->tx_aborted_errors = hw_stats->tx_skip;
1642 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
1643
1644 storage->tx_errors = dev->stats.tx_errors;
1645 storage->rx_dropped = dev->stats.rx_dropped;
1646 storage->tx_dropped = dev->stats.tx_dropped;
1647}
1648
1649static inline int mtk_max_frag_size(int mtu)
1650{
1651 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1652 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
1653 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
1654
1655 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1656 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1657}
1658
1659static inline int mtk_max_buf_size(int frag_size)
1660{
1661 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1662 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1663
1664 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
1665
1666 return buf_size;
1667}
1668
developere9356982022-07-04 09:03:20 +08001669static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1670 struct mtk_rx_dma_v2 *dma_rxd)
developerfd40db22021-04-29 10:08:25 +08001671{
developerfd40db22021-04-29 10:08:25 +08001672 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +08001673 if (!(rxd->rxd2 & RX_DMA_DONE))
1674 return false;
1675
1676 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +08001677 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1678 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developere9356982022-07-04 09:03:20 +08001679
developer8ecd51b2023-03-13 11:28:28 +08001680 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developere9356982022-07-04 09:03:20 +08001681 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1682 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
developer006325c2022-10-06 16:39:50 +08001683 rxd->rxd7 = READ_ONCE(dma_rxd->rxd7);
developere9356982022-07-04 09:03:20 +08001684 }
1685
developerc4671b22021-05-28 13:16:42 +08001686 return true;
developerfd40db22021-04-29 10:08:25 +08001687}
1688
developeref5c2ed2024-01-08 12:15:13 +08001689static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1690{
1691 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1692 unsigned long data;
1693
1694 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1695 get_order(size));
1696
1697 return (void *)data;
1698}
1699
developerfd40db22021-04-29 10:08:25 +08001700/* the qdma core needs scratch memory to be setup */
1701static int mtk_init_fq_dma(struct mtk_eth *eth)
1702{
developere9356982022-07-04 09:03:20 +08001703 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001704 dma_addr_t phy_ring_tail;
1705 int cnt = MTK_DMA_SIZE;
1706 dma_addr_t dma_addr;
developerf459e682023-10-24 23:07:17 +08001707 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08001708 int i;
1709
1710 if (!eth->soc->has_sram) {
developer3f28d382023-03-07 16:06:30 +08001711 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08001712 cnt * soc->txrx.txd_size,
developerfd40db22021-04-29 10:08:25 +08001713 &eth->phy_scratch_ring,
developere9356982022-07-04 09:03:20 +08001714 GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001715 } else {
developera7fbeec2024-02-02 13:56:07 +08001716 eth->scratch_ring = eth->sram_base;
developerfd40db22021-04-29 10:08:25 +08001717 }
1718
1719 if (unlikely(!eth->scratch_ring))
1720 return -ENOMEM;
1721
developere9356982022-07-04 09:03:20 +08001722 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001723 if (unlikely(!eth->scratch_head))
1724 return -ENOMEM;
1725
developer3f28d382023-03-07 16:06:30 +08001726 dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001727 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1728 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001729 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
developerfd40db22021-04-29 10:08:25 +08001730 return -ENOMEM;
1731
developer8b6f2402022-11-28 13:42:34 +08001732 phy_ring_tail = eth->phy_scratch_ring +
1733 (dma_addr_t)soc->txrx.txd_size * (cnt - 1);
developerfd40db22021-04-29 10:08:25 +08001734
1735 for (i = 0; i < cnt; i++) {
developere9356982022-07-04 09:03:20 +08001736 struct mtk_tx_dma_v2 *txd;
1737
1738 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1739 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerfd40db22021-04-29 10:08:25 +08001740 if (i < cnt - 1)
developere9356982022-07-04 09:03:20 +08001741 txd->txd2 = eth->phy_scratch_ring +
1742 (i + 1) * soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001743
developerf459e682023-10-24 23:07:17 +08001744 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1745 TX_DMA_SDP1(dma_addr + i * MTK_QDMA_PAGE_SIZE) : 0;
1746
1747 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE) | addr64;
developere9356982022-07-04 09:03:20 +08001748 txd->txd4 = 0;
1749
developer089e8852022-09-28 14:43:46 +08001750 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1751 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001752 txd->txd5 = 0;
1753 txd->txd6 = 0;
1754 txd->txd7 = 0;
1755 txd->txd8 = 0;
developerfd40db22021-04-29 10:08:25 +08001756 }
developerfd40db22021-04-29 10:08:25 +08001757 }
1758
developer68ce74f2023-01-03 16:11:57 +08001759 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1760 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1761 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1762 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
developerfd40db22021-04-29 10:08:25 +08001763
1764 return 0;
1765}
1766
1767static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1768{
developere9356982022-07-04 09:03:20 +08001769 return ring->dma + (desc - ring->phys);
developerfd40db22021-04-29 10:08:25 +08001770}
1771
1772static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001773 void *txd, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001774{
developere9356982022-07-04 09:03:20 +08001775 int idx = (txd - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001776
1777 return &ring->buf[idx];
1778}
1779
1780static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001781 void *dma)
developerfd40db22021-04-29 10:08:25 +08001782{
1783 return ring->dma_pdma - ring->dma + dma;
1784}
1785
developere9356982022-07-04 09:03:20 +08001786static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001787{
developere9356982022-07-04 09:03:20 +08001788 return (dma - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001789}
1790
developerc4671b22021-05-28 13:16:42 +08001791static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1792 bool napi)
developerfd40db22021-04-29 10:08:25 +08001793{
1794 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1795 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
developer3f28d382023-03-07 16:06:30 +08001796 dma_unmap_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001797 dma_unmap_addr(tx_buf, dma_addr0),
1798 dma_unmap_len(tx_buf, dma_len0),
1799 DMA_TO_DEVICE);
1800 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
developer3f28d382023-03-07 16:06:30 +08001801 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001802 dma_unmap_addr(tx_buf, dma_addr0),
1803 dma_unmap_len(tx_buf, dma_len0),
1804 DMA_TO_DEVICE);
1805 }
1806 } else {
1807 if (dma_unmap_len(tx_buf, dma_len0)) {
developer3f28d382023-03-07 16:06:30 +08001808 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001809 dma_unmap_addr(tx_buf, dma_addr0),
1810 dma_unmap_len(tx_buf, dma_len0),
1811 DMA_TO_DEVICE);
1812 }
1813
1814 if (dma_unmap_len(tx_buf, dma_len1)) {
developer3f28d382023-03-07 16:06:30 +08001815 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001816 dma_unmap_addr(tx_buf, dma_addr1),
1817 dma_unmap_len(tx_buf, dma_len1),
1818 DMA_TO_DEVICE);
1819 }
1820 }
1821
1822 tx_buf->flags = 0;
1823 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +08001824 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
1825 if (napi)
1826 napi_consume_skb(tx_buf->skb, napi);
1827 else
1828 dev_kfree_skb_any(tx_buf->skb);
1829 }
developerfd40db22021-04-29 10:08:25 +08001830 tx_buf->skb = NULL;
1831}
1832
1833static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1834 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1835 size_t size, int idx)
1836{
developera7fbeec2024-02-02 13:56:07 +08001837 u64 addr64 = 0;
1838
developerfd40db22021-04-29 10:08:25 +08001839 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1840 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1841 dma_unmap_len_set(tx_buf, dma_len0, size);
1842 } else {
developera7fbeec2024-02-02 13:56:07 +08001843 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1844 TX_DMA_SDP1(mapped_addr) : 0;
1845
developerfd40db22021-04-29 10:08:25 +08001846 if (idx & 1) {
1847 txd->txd3 = mapped_addr;
developera7fbeec2024-02-02 13:56:07 +08001848 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1849 txd->txd4 = TX_DMA_PLEN1(size) | addr64;
1850 else
1851 txd->txd2 |= TX_DMA_PLEN1(size);
developerfd40db22021-04-29 10:08:25 +08001852 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1853 dma_unmap_len_set(tx_buf, dma_len1, size);
1854 } else {
1855 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1856 txd->txd1 = mapped_addr;
developera7fbeec2024-02-02 13:56:07 +08001857 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1858 txd->txd2 = TX_DMA_PLEN0(size) | addr64;
1859 else
1860 txd->txd2 = TX_DMA_PLEN0(size);
developerfd40db22021-04-29 10:08:25 +08001861 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1862 dma_unmap_len_set(tx_buf, dma_len0, size);
1863 }
1864 }
1865}
1866
developere9356982022-07-04 09:03:20 +08001867static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
1868 struct mtk_tx_dma_desc_info *info)
1869{
1870 struct mtk_mac *mac = netdev_priv(dev);
1871 struct mtk_eth *eth = mac->hw;
1872 struct mtk_tx_dma *desc = txd;
1873 u32 data;
1874
1875 WRITE_ONCE(desc->txd1, info->addr);
1876
1877 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1878 if (info->last)
1879 data |= TX_DMA_LS0;
1880 WRITE_ONCE(desc->txd3, data);
1881
1882 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1883 data |= QID_HIGH_BITS(info->qid);
1884 if (info->first) {
1885 if (info->gso)
1886 data |= TX_DMA_TSO;
1887 /* tx checksum offload */
1888 if (info->csum)
1889 data |= TX_DMA_CHKSUM;
1890 /* vlan header offload */
1891 if (info->vlan)
1892 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1893 }
1894
1895#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1896 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1897 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1898 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1899 }
1900
1901 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1902 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1903#endif
1904 WRITE_ONCE(desc->txd4, data);
1905}
1906
1907static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1908 struct mtk_tx_dma_desc_info *info)
1909{
1910 struct mtk_mac *mac = netdev_priv(dev);
1911 struct mtk_eth *eth = mac->hw;
1912 struct mtk_tx_dma_v2 *desc = txd;
developerce08bca2022-10-06 16:21:13 +08001913 u32 data = 0;
1914
developerce08bca2022-10-06 16:21:13 +08001915 WRITE_ONCE(desc->txd1, info->addr);
1916
1917 data = TX_DMA_PLEN0(info->size);
1918 if (info->last)
1919 data |= TX_DMA_LS0;
1920 WRITE_ONCE(desc->txd3, data);
1921
1922 data = ((mac->id == MTK_GMAC3_ID) ?
1923 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1924 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1925#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1926 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1927 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1928 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1929 }
1930
1931 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1932 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1933#endif
1934 WRITE_ONCE(desc->txd4, data);
1935
1936 data = 0;
1937 if (info->first) {
1938 if (info->gso)
1939 data |= TX_DMA_TSO_V2;
1940 /* tx checksum offload */
1941 if (info->csum)
1942 data |= TX_DMA_CHKSUM_V2;
1943 }
1944 WRITE_ONCE(desc->txd5, data);
1945
1946 data = 0;
1947 if (info->first && info->vlan)
1948 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1949 WRITE_ONCE(desc->txd6, data);
1950
1951 WRITE_ONCE(desc->txd7, 0);
1952 WRITE_ONCE(desc->txd8, 0);
1953}
1954
1955static void mtk_tx_set_dma_desc_v3(struct sk_buff *skb, struct net_device *dev, void *txd,
1956 struct mtk_tx_dma_desc_info *info)
1957{
1958 struct mtk_mac *mac = netdev_priv(dev);
1959 struct mtk_eth *eth = mac->hw;
1960 struct mtk_tx_dma_v2 *desc = txd;
developer089e8852022-09-28 14:43:46 +08001961 u64 addr64 = 0;
developere9356982022-07-04 09:03:20 +08001962 u32 data = 0;
developere9356982022-07-04 09:03:20 +08001963
developer089e8852022-09-28 14:43:46 +08001964 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1965 TX_DMA_SDP1(info->addr) : 0;
1966
developere9356982022-07-04 09:03:20 +08001967 WRITE_ONCE(desc->txd1, info->addr);
1968
1969 data = TX_DMA_PLEN0(info->size);
1970 if (info->last)
1971 data |= TX_DMA_LS0;
developer089e8852022-09-28 14:43:46 +08001972 WRITE_ONCE(desc->txd3, data | addr64);
developere9356982022-07-04 09:03:20 +08001973
developer089e8852022-09-28 14:43:46 +08001974 data = ((mac->id == MTK_GMAC3_ID) ?
1975 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
developerb9463012022-09-14 10:28:45 +08001976 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
developere9356982022-07-04 09:03:20 +08001977#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1978 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1979 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1980 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1981 }
1982
1983 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1984 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1985#endif
1986 WRITE_ONCE(desc->txd4, data);
1987
1988 data = 0;
1989 if (info->first) {
1990 if (info->gso)
1991 data |= TX_DMA_TSO_V2;
1992 /* tx checksum offload */
1993 if (info->csum)
1994 data |= TX_DMA_CHKSUM_V2;
developerce08bca2022-10-06 16:21:13 +08001995
1996 if (netdev_uses_dsa(dev))
1997 data |= TX_DMA_SPTAG_V3;
developere9356982022-07-04 09:03:20 +08001998 }
1999 WRITE_ONCE(desc->txd5, data);
2000
2001 data = 0;
2002 if (info->first && info->vlan)
2003 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
2004 WRITE_ONCE(desc->txd6, data);
2005
2006 WRITE_ONCE(desc->txd7, 0);
2007 WRITE_ONCE(desc->txd8, 0);
2008}
2009
developera7fbeec2024-02-02 13:56:07 +08002010static void mtk_tx_set_pdma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
2011 struct mtk_tx_dma_desc_info *info)
2012{
2013 struct mtk_mac *mac = netdev_priv(dev);
2014 struct mtk_tx_dma_v2 *desc = txd;
2015 u32 data = 0;
2016
2017 if (info->first) {
2018 data = ((mac->id == MTK_GMAC3_ID) ?
2019 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_PDMA;
2020 if (info->gso)
2021 data |= TX_DMA_TSO_V2;
2022 if (info->csum)
2023 data |= TX_DMA_CHKSUM_V2;
2024 if (netdev_uses_dsa(dev))
2025 data |= TX_DMA_SPTAG_V3;
2026 WRITE_ONCE(desc->txd5, data);
2027
2028 if (info->vlan) {
2029 WRITE_ONCE(desc->txd6, TX_DMA_INS_VLAN_V2);
2030 WRITE_ONCE(desc->txd7, info->vlan_tci);
2031 }
2032
2033 WRITE_ONCE(desc->txd8, 0);
2034 }
2035}
2036
developere9356982022-07-04 09:03:20 +08002037static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
2038 struct mtk_tx_dma_desc_info *info)
2039{
2040 struct mtk_mac *mac = netdev_priv(dev);
2041 struct mtk_eth *eth = mac->hw;
2042
developera7fbeec2024-02-02 13:56:07 +08002043 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2044 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
2045 mtk_tx_set_dma_desc_v3(skb, dev, txd, info);
2046 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2047 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
2048 else
2049 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
2050 } else {
2051 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
2052 mtk_tx_set_pdma_desc(skb, dev, txd, info);
2053 }
developere9356982022-07-04 09:03:20 +08002054}
2055
developerfd40db22021-04-29 10:08:25 +08002056static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
2057 int tx_num, struct mtk_tx_ring *ring, bool gso)
2058{
developere9356982022-07-04 09:03:20 +08002059 struct mtk_tx_dma_desc_info txd_info = {
2060 .size = skb_headlen(skb),
developer90270572024-01-30 09:26:15 +08002061 .qid = skb_get_queue_mapping(skb),
developere9356982022-07-04 09:03:20 +08002062 .gso = gso,
2063 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
2064 .vlan = skb_vlan_tag_present(skb),
2065 .vlan_tci = skb_vlan_tag_get(skb),
2066 .first = true,
2067 .last = !skb_is_nonlinear(skb),
2068 };
developer90270572024-01-30 09:26:15 +08002069 struct netdev_queue *txq;
developerfd40db22021-04-29 10:08:25 +08002070 struct mtk_mac *mac = netdev_priv(dev);
2071 struct mtk_eth *eth = mac->hw;
developere9356982022-07-04 09:03:20 +08002072 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002073 struct mtk_tx_dma *itxd, *txd;
2074 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
2075 struct mtk_tx_buf *itx_buf, *tx_buf;
developerfd40db22021-04-29 10:08:25 +08002076 int i, n_desc = 1;
developer90270572024-01-30 09:26:15 +08002077 int queue = skb_get_queue_mapping(skb);
developerfd40db22021-04-29 10:08:25 +08002078 int k = 0;
2079
developerb3a9e7b2023-02-08 15:18:10 +08002080 if (skb->len < 32) {
2081 if (skb_put_padto(skb, MTK_MIN_TX_LENGTH))
2082 return -ENOMEM;
2083
2084 txd_info.size = skb_headlen(skb);
2085 }
2086
developer90270572024-01-30 09:26:15 +08002087 txq = netdev_get_tx_queue(dev, txd_info.qid);
developerfd40db22021-04-29 10:08:25 +08002088 itxd = ring->next_free;
2089 itxd_pdma = qdma_to_pdma(ring, itxd);
2090 if (itxd == ring->last_free)
2091 return -ENOMEM;
2092
developere9356982022-07-04 09:03:20 +08002093 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08002094 memset(itx_buf, 0, sizeof(*itx_buf));
2095
developer3f28d382023-03-07 16:06:30 +08002096 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
developere9356982022-07-04 09:03:20 +08002097 DMA_TO_DEVICE);
developer3f28d382023-03-07 16:06:30 +08002098 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
developerfd40db22021-04-29 10:08:25 +08002099 return -ENOMEM;
2100
developera7fbeec2024-02-02 13:56:07 +08002101 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2102 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
2103 else
2104 mtk_tx_set_dma_desc(skb, dev, itxd_pdma, &txd_info);
developere9356982022-07-04 09:03:20 +08002105
developerfd40db22021-04-29 10:08:25 +08002106 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
developer089e8852022-09-28 14:43:46 +08002107 itx_buf->flags |= (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
2108 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
2109 MTK_TX_FLAGS_FPORT2;
developere9356982022-07-04 09:03:20 +08002110 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerfd40db22021-04-29 10:08:25 +08002111 k++);
2112
developerfd40db22021-04-29 10:08:25 +08002113 /* TX SG offload */
2114 txd = itxd;
2115 txd_pdma = qdma_to_pdma(ring, txd);
2116
developere9356982022-07-04 09:03:20 +08002117 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerfd40db22021-04-29 10:08:25 +08002118 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2119 unsigned int offset = 0;
2120 int frag_size = skb_frag_size(frag);
2121
2122 while (frag_size) {
developerfd40db22021-04-29 10:08:25 +08002123 bool new_desc = true;
2124
developere9356982022-07-04 09:03:20 +08002125 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerfd40db22021-04-29 10:08:25 +08002126 (i & 0x1)) {
2127 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
2128 txd_pdma = qdma_to_pdma(ring, txd);
2129 if (txd == ring->last_free)
2130 goto err_dma;
2131
2132 n_desc++;
2133 } else {
2134 new_desc = false;
2135 }
2136
developere9356982022-07-04 09:03:20 +08002137 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
2138 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
developer90270572024-01-30 09:26:15 +08002139 txd_info.qid = queue;
developere9356982022-07-04 09:03:20 +08002140 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
2141 !(frag_size - txd_info.size);
developer3f28d382023-03-07 16:06:30 +08002142 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
developere9356982022-07-04 09:03:20 +08002143 offset, txd_info.size,
2144 DMA_TO_DEVICE);
developer3f28d382023-03-07 16:06:30 +08002145 if (unlikely(dma_mapping_error(eth->dma_dev,
2146 txd_info.addr)))
developere9356982022-07-04 09:03:20 +08002147 goto err_dma;
developerfd40db22021-04-29 10:08:25 +08002148
developera7fbeec2024-02-02 13:56:07 +08002149 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2150 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
2151 else
2152 mtk_tx_set_dma_desc(skb, dev, txd_pdma, &txd_info);
developerfd40db22021-04-29 10:08:25 +08002153
developere9356982022-07-04 09:03:20 +08002154 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08002155 if (new_desc)
2156 memset(tx_buf, 0, sizeof(*tx_buf));
2157 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
2158 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
developer089e8852022-09-28 14:43:46 +08002159 tx_buf->flags |=
2160 (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
2161 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
2162 MTK_TX_FLAGS_FPORT2;
developerfd40db22021-04-29 10:08:25 +08002163
developere9356982022-07-04 09:03:20 +08002164 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
2165 txd_info.size, k++);
developerfd40db22021-04-29 10:08:25 +08002166
developere9356982022-07-04 09:03:20 +08002167 frag_size -= txd_info.size;
2168 offset += txd_info.size;
developerfd40db22021-04-29 10:08:25 +08002169 }
2170 }
2171
2172 /* store skb to cleanup */
2173 itx_buf->skb = skb;
2174
developere9356982022-07-04 09:03:20 +08002175 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developera7fbeec2024-02-02 13:56:07 +08002176 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
2177 if (k & 0x1)
2178 txd_pdma->txd2 |= TX_DMA_LS0;
2179 else
2180 txd_pdma->txd4 |= TX_DMA_LS1_V2;
2181 } else {
2182 if (k & 0x1)
2183 txd_pdma->txd2 |= TX_DMA_LS0;
2184 else
2185 txd_pdma->txd2 |= TX_DMA_LS1;
2186 }
developerfd40db22021-04-29 10:08:25 +08002187 }
2188
developer90270572024-01-30 09:26:15 +08002189 netdev_tx_sent_queue(txq, skb->len);
developerfd40db22021-04-29 10:08:25 +08002190 skb_tx_timestamp(skb);
2191
2192 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
2193 atomic_sub(n_desc, &ring->free_count);
2194
2195 /* make sure that all changes to the dma ring are flushed before we
2196 * continue
2197 */
2198 wmb();
2199
developere9356982022-07-04 09:03:20 +08002200 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developer90270572024-01-30 09:26:15 +08002201 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
developer68ce74f2023-01-03 16:11:57 +08002202 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
developerfd40db22021-04-29 10:08:25 +08002203 } else {
developere9356982022-07-04 09:03:20 +08002204 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
developerfd40db22021-04-29 10:08:25 +08002205 ring->dma_size);
developera7fbeec2024-02-02 13:56:07 +08002206 mtk_w32(eth, next_idx, soc->reg_map->pdma.pctx_ptr);
developerfd40db22021-04-29 10:08:25 +08002207 }
2208
2209 return 0;
2210
2211err_dma:
2212 do {
developere9356982022-07-04 09:03:20 +08002213 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08002214
2215 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08002216 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08002217
2218 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
developere9356982022-07-04 09:03:20 +08002219 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
developerfd40db22021-04-29 10:08:25 +08002220 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
2221
2222 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
2223 itxd_pdma = qdma_to_pdma(ring, itxd);
2224 } while (itxd != txd);
2225
2226 return -ENOMEM;
2227}
2228
2229static inline int mtk_cal_txd_req(struct sk_buff *skb)
2230{
2231 int i, nfrags;
2232 skb_frag_t *frag;
2233
2234 nfrags = 1;
2235 if (skb_is_gso(skb)) {
2236 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2237 frag = &skb_shinfo(skb)->frags[i];
2238 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
2239 MTK_TX_DMA_BUF_LEN);
2240 }
2241 } else {
2242 nfrags += skb_shinfo(skb)->nr_frags;
2243 }
2244
2245 return nfrags;
2246}
2247
2248static int mtk_queue_stopped(struct mtk_eth *eth)
2249{
2250 int i;
2251
2252 for (i = 0; i < MTK_MAC_COUNT; i++) {
2253 if (!eth->netdev[i])
2254 continue;
2255 if (netif_queue_stopped(eth->netdev[i]))
2256 return 1;
2257 }
2258
2259 return 0;
2260}
2261
2262static void mtk_wake_queue(struct mtk_eth *eth)
2263{
2264 int i;
2265
2266 for (i = 0; i < MTK_MAC_COUNT; i++) {
2267 if (!eth->netdev[i])
2268 continue;
developer90270572024-01-30 09:26:15 +08002269 netif_tx_wake_all_queues(eth->netdev[i]);
developerfd40db22021-04-29 10:08:25 +08002270 }
2271}
2272
2273static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
2274{
2275 struct mtk_mac *mac = netdev_priv(dev);
2276 struct mtk_eth *eth = mac->hw;
2277 struct mtk_tx_ring *ring = &eth->tx_ring;
2278 struct net_device_stats *stats = &dev->stats;
2279 bool gso = false;
2280 int tx_num;
2281
2282 /* normally we can rely on the stack not calling this more than once,
2283 * however we have 2 queues running on the same ring so we need to lock
2284 * the ring access
2285 */
2286 spin_lock(&eth->page_lock);
2287
2288 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2289 goto drop;
2290
2291 tx_num = mtk_cal_txd_req(skb);
2292 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
developer90270572024-01-30 09:26:15 +08002293 netif_tx_stop_all_queues(dev);
developerfd40db22021-04-29 10:08:25 +08002294 netif_err(eth, tx_queued, dev,
2295 "Tx Ring full when queue awake!\n");
2296 spin_unlock(&eth->page_lock);
2297 return NETDEV_TX_BUSY;
2298 }
2299
2300 /* TSO: fill MSS info in tcp checksum field */
2301 if (skb_is_gso(skb)) {
2302 if (skb_cow_head(skb, 0)) {
2303 netif_warn(eth, tx_err, dev,
2304 "GSO expand head fail.\n");
2305 goto drop;
2306 }
2307
2308 if (skb_shinfo(skb)->gso_type &
2309 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
2310 gso = true;
2311 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
2312 }
2313 }
2314
2315 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
2316 goto drop;
2317
2318 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
developer90270572024-01-30 09:26:15 +08002319 netif_tx_stop_all_queues(dev);
developerfd40db22021-04-29 10:08:25 +08002320
2321 spin_unlock(&eth->page_lock);
2322
2323 return NETDEV_TX_OK;
2324
2325drop:
2326 spin_unlock(&eth->page_lock);
2327 stats->tx_dropped++;
2328 dev_kfree_skb_any(skb);
2329 return NETDEV_TX_OK;
2330}
2331
developer18f46a82021-07-20 21:08:21 +08002332static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08002333{
developerddc36672023-10-16 15:13:58 +08002334 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerfd40db22021-04-29 10:08:25 +08002335}
2336
2337static int mtk_poll_rx(struct napi_struct *napi, int budget,
2338 struct mtk_eth *eth)
2339{
developer18f46a82021-07-20 21:08:21 +08002340 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
2341 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002342 int idx;
2343 struct sk_buff *skb;
2344 u8 *data, *new_data;
developere9356982022-07-04 09:03:20 +08002345 struct mtk_rx_dma_v2 *rxd, trxd;
developerfd40db22021-04-29 10:08:25 +08002346 int done = 0;
2347
developer18f46a82021-07-20 21:08:21 +08002348 if (unlikely(!ring))
2349 goto rx_done;
2350
developerfd40db22021-04-29 10:08:25 +08002351 while (done < budget) {
developer68ce74f2023-01-03 16:11:57 +08002352 unsigned int pktlen, *rxdcsum;
developer006325c2022-10-06 16:39:50 +08002353 struct net_device *netdev = NULL;
developer8e12a952023-12-27 16:14:05 +08002354 dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2355 u64 addr64 = 0;
developere9356982022-07-04 09:03:20 +08002356 int mac = 0;
developerfd40db22021-04-29 10:08:25 +08002357
developerfd40db22021-04-29 10:08:25 +08002358 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08002359 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
developerfd40db22021-04-29 10:08:25 +08002360 data = ring->data[idx];
2361
developere9356982022-07-04 09:03:20 +08002362 if (!mtk_rx_get_desc(eth, &trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08002363 break;
2364
2365 /* find out which mac the packet come from. values start at 1 */
2366 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2367 mac = 0;
2368 } else {
developer8ecd51b2023-03-13 11:28:28 +08002369 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer089e8852022-09-28 14:43:46 +08002370 switch (RX_DMA_GET_SPORT_V2(trxd.rxd5)) {
2371 case PSE_GDM1_PORT:
2372 case PSE_GDM2_PORT:
2373 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
2374 break;
2375 case PSE_GDM3_PORT:
2376 mac = MTK_GMAC3_ID;
2377 break;
2378 }
2379 } else
developerfd40db22021-04-29 10:08:25 +08002380 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
2381 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2382 }
2383
2384 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
2385 !eth->netdev[mac]))
2386 goto release_desc;
2387
2388 netdev = eth->netdev[mac];
2389
2390 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2391 goto release_desc;
2392
2393 /* alloc new buffer */
developeref5c2ed2024-01-08 12:15:13 +08002394 if (ring->frag_size <= PAGE_SIZE)
2395 new_data = napi_alloc_frag(ring->frag_size);
2396 else
2397 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
developerfd40db22021-04-29 10:08:25 +08002398 if (unlikely(!new_data)) {
2399 netdev->stats.rx_dropped++;
2400 goto release_desc;
2401 }
developer3f28d382023-03-07 16:06:30 +08002402 dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08002403 new_data + NET_SKB_PAD +
2404 eth->ip_align,
2405 ring->buf_size,
2406 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08002407 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
developerfd40db22021-04-29 10:08:25 +08002408 skb_free_frag(new_data);
2409 netdev->stats.rx_dropped++;
2410 goto release_desc;
2411 }
2412
developer089e8852022-09-28 14:43:46 +08002413 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2414 ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
2415
developer3f28d382023-03-07 16:06:30 +08002416 dma_unmap_single(eth->dma_dev,
developer11857ce2023-12-26 00:21:26 +08002417 ((u64)(trxd.rxd1) | addr64),
developerc4671b22021-05-28 13:16:42 +08002418 ring->buf_size, DMA_FROM_DEVICE);
2419
developerfd40db22021-04-29 10:08:25 +08002420 /* receive data */
2421 skb = build_skb(data, ring->frag_size);
2422 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08002423 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08002424 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08002425 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08002426 }
2427 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2428
developerfd40db22021-04-29 10:08:25 +08002429 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2430 skb->dev = netdev;
2431 skb_put(skb, pktlen);
2432
developer8ecd51b2023-03-13 11:28:28 +08002433 if ((MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)))
developer68ce74f2023-01-03 16:11:57 +08002434 rxdcsum = &trxd.rxd3;
2435 else
2436 rxdcsum = &trxd.rxd4;
2437
2438 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
developerfd40db22021-04-29 10:08:25 +08002439 skb->ip_summed = CHECKSUM_UNNECESSARY;
2440 else
2441 skb_checksum_none_assert(skb);
2442 skb->protocol = eth_type_trans(skb, netdev);
2443
2444 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developer8ecd51b2023-03-13 11:28:28 +08002445 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer255bba22021-07-27 15:16:33 +08002446 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08002447 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08002448 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08002449 RX_DMA_VID_V2(trxd.rxd4));
2450 } else {
2451 if (trxd.rxd2 & RX_DMA_VTAG)
2452 __vlan_hwaccel_put_tag(skb,
2453 htons(RX_DMA_VPID(trxd.rxd3)),
2454 RX_DMA_VID(trxd.rxd3));
2455 }
2456
2457 /* If netdev is attached to dsa switch, the special
2458 * tag inserted in VLAN field by switch hardware can
2459 * be offload by RX HW VLAN offload. Clears the VLAN
2460 * information from @skb to avoid unexpected 8021d
2461 * handler before packet enter dsa framework.
2462 */
2463 if (netdev_uses_dsa(netdev))
2464 __vlan_hwaccel_clear_tag(skb);
2465 }
2466
2467#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developer8ecd51b2023-03-13 11:28:28 +08002468 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developerfd40db22021-04-29 10:08:25 +08002469 *(u32 *)(skb->head) = trxd.rxd5;
2470 else
developerfd40db22021-04-29 10:08:25 +08002471 *(u32 *)(skb->head) = trxd.rxd4;
2472
2473 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08002474 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08002475 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
2476
2477 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
2478 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
2479 __func__, skb_hnat_reason(skb));
2480 skb->pkt_type = PACKET_HOST;
2481 }
2482
2483 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
2484 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
2485 skb_hnat_reason(skb), skb_hnat_alg(skb));
2486#endif
developer77d03a72021-06-06 00:06:00 +08002487 if (mtk_hwlro_stats_ebl &&
2488 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
2489 hw_lro_stats_update(ring->ring_no, &trxd);
2490 hw_lro_flush_stats_update(ring->ring_no, &trxd);
2491 }
developerfd40db22021-04-29 10:08:25 +08002492
2493 skb_record_rx_queue(skb, 0);
2494 napi_gro_receive(napi, skb);
2495
developerc4671b22021-05-28 13:16:42 +08002496skip_rx:
developerfd40db22021-04-29 10:08:25 +08002497 ring->data[idx] = new_data;
2498 rxd->rxd1 = (unsigned int)dma_addr;
2499
2500release_desc:
developer8e12a952023-12-27 16:14:05 +08002501 if (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) {
2502 if (unlikely(dma_addr == DMA_MAPPING_ERROR))
2503 addr64 = RX_DMA_GET_SDP1(rxd->rxd2);
2504 else
2505 addr64 = RX_DMA_SDP1(dma_addr);
2506 }
developer089e8852022-09-28 14:43:46 +08002507
developerfd40db22021-04-29 10:08:25 +08002508 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2509 rxd->rxd2 = RX_DMA_LSO;
developer8e12a952023-12-27 16:14:05 +08002510 else
developer089e8852022-09-28 14:43:46 +08002511 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08002512
2513 ring->calc_idx = idx;
2514
2515 done++;
2516 }
2517
2518rx_done:
2519 if (done) {
2520 /* make sure that all changes to the dma ring are flushed before
2521 * we continue
2522 */
2523 wmb();
developer18f46a82021-07-20 21:08:21 +08002524 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08002525 }
2526
2527 return done;
2528}
2529
developer90270572024-01-30 09:26:15 +08002530struct mtk_poll_state {
2531 struct netdev_queue *txq;
2532 unsigned int total;
2533 unsigned int done;
2534 unsigned int bytes;
2535};
2536
2537static void
2538mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2539 struct sk_buff *skb)
2540{
2541 struct netdev_queue *txq;
2542 struct net_device *dev;
2543 unsigned int bytes = skb->len;
2544
2545 state->total++;
2546
2547 dev = eth->netdev[mac];
2548 if (!dev)
2549 return;
2550
2551 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2552 if (state->txq == txq) {
2553 state->done++;
2554 state->bytes += bytes;
2555 return;
2556 }
2557
2558 if (state->txq)
2559 netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2560
2561 state->txq = txq;
2562 state->done = 1;
2563 state->bytes = bytes;
2564}
2565
developerfb556ca2021-10-13 10:52:09 +08002566static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developer90270572024-01-30 09:26:15 +08002567 struct mtk_poll_state *state)
developerfd40db22021-04-29 10:08:25 +08002568{
developer68ce74f2023-01-03 16:11:57 +08002569 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developere9356982022-07-04 09:03:20 +08002570 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002571 struct mtk_tx_ring *ring = &eth->tx_ring;
2572 struct mtk_tx_dma *desc;
2573 struct sk_buff *skb;
2574 struct mtk_tx_buf *tx_buf;
2575 u32 cpu, dma;
2576
developerc4671b22021-05-28 13:16:42 +08002577 cpu = ring->last_free_ptr;
developer68ce74f2023-01-03 16:11:57 +08002578 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
developerfd40db22021-04-29 10:08:25 +08002579
2580 desc = mtk_qdma_phys_to_virt(ring, cpu);
2581
2582 while ((cpu != dma) && budget) {
2583 u32 next_cpu = desc->txd2;
2584 int mac = 0;
2585
2586 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2587 break;
2588
2589 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2590
developere9356982022-07-04 09:03:20 +08002591 tx_buf = mtk_desc_to_tx_buf(ring, desc, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08002592 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
developer089e8852022-09-28 14:43:46 +08002593 mac = MTK_GMAC2_ID;
2594 else if (tx_buf->flags & MTK_TX_FLAGS_FPORT2)
2595 mac = MTK_GMAC3_ID;
developerfd40db22021-04-29 10:08:25 +08002596
2597 skb = tx_buf->skb;
2598 if (!skb)
2599 break;
2600
2601 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
developer90270572024-01-30 09:26:15 +08002602 mtk_poll_tx_done(eth, state, mac, skb);
developerfd40db22021-04-29 10:08:25 +08002603 budget--;
2604 }
developerc4671b22021-05-28 13:16:42 +08002605 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08002606
2607 ring->last_free = desc;
2608 atomic_inc(&ring->free_count);
2609
2610 cpu = next_cpu;
2611 }
2612
developerc4671b22021-05-28 13:16:42 +08002613 ring->last_free_ptr = cpu;
developer68ce74f2023-01-03 16:11:57 +08002614 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
developerfd40db22021-04-29 10:08:25 +08002615}
2616
developerfb556ca2021-10-13 10:52:09 +08002617static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developer90270572024-01-30 09:26:15 +08002618 struct mtk_poll_state *state)
developerfd40db22021-04-29 10:08:25 +08002619{
developera7fbeec2024-02-02 13:56:07 +08002620 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002621 struct mtk_tx_ring *ring = &eth->tx_ring;
2622 struct mtk_tx_dma *desc;
2623 struct sk_buff *skb;
2624 struct mtk_tx_buf *tx_buf;
2625 u32 cpu, dma;
2626
2627 cpu = ring->cpu_idx;
developera7fbeec2024-02-02 13:56:07 +08002628 dma = mtk_r32(eth, soc->reg_map->pdma.pdtx_ptr);
developerfd40db22021-04-29 10:08:25 +08002629
2630 while ((cpu != dma) && budget) {
developera7fbeec2024-02-02 13:56:07 +08002631 int mac = 0;
2632
developer53a5d372024-02-07 10:49:52 +08002633 desc = ring->dma_pdma + cpu * eth->soc->txrx.txd_size;
2634 if ((desc->txd2 & TX_DMA_OWNER_CPU) == 0)
2635 break;
2636
developerfd40db22021-04-29 10:08:25 +08002637 tx_buf = &ring->buf[cpu];
developera7fbeec2024-02-02 13:56:07 +08002638 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
2639 mac = MTK_GMAC2_ID;
2640 else if (tx_buf->flags & MTK_TX_FLAGS_FPORT2)
2641 mac = MTK_GMAC3_ID;
2642
developerfd40db22021-04-29 10:08:25 +08002643 skb = tx_buf->skb;
2644 if (!skb)
2645 break;
2646
2647 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
developera7fbeec2024-02-02 13:56:07 +08002648 mtk_poll_tx_done(eth, state, mac, skb);
developerfd40db22021-04-29 10:08:25 +08002649 budget--;
2650 }
2651
developerc4671b22021-05-28 13:16:42 +08002652 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08002653
developere9356982022-07-04 09:03:20 +08002654 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08002655 ring->last_free = desc;
2656 atomic_inc(&ring->free_count);
2657
2658 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2659 }
2660
2661 ring->cpu_idx = cpu;
developerfd40db22021-04-29 10:08:25 +08002662}
2663
2664static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2665{
2666 struct mtk_tx_ring *ring = &eth->tx_ring;
developer90270572024-01-30 09:26:15 +08002667 struct mtk_poll_state state = {};
developerfd40db22021-04-29 10:08:25 +08002668
2669 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developer90270572024-01-30 09:26:15 +08002670 mtk_poll_tx_qdma(eth, budget, &state);
developerfd40db22021-04-29 10:08:25 +08002671 else
developer90270572024-01-30 09:26:15 +08002672 mtk_poll_tx_pdma(eth, budget, &state);
developerfd40db22021-04-29 10:08:25 +08002673
developer90270572024-01-30 09:26:15 +08002674 if (state.txq)
2675 netdev_tx_completed_queue(state.txq, state.done, state.bytes);
developerfd40db22021-04-29 10:08:25 +08002676
2677 if (mtk_queue_stopped(eth) &&
2678 (atomic_read(&ring->free_count) > ring->thresh))
2679 mtk_wake_queue(eth);
2680
developer90270572024-01-30 09:26:15 +08002681 return state.total;
developerfd40db22021-04-29 10:08:25 +08002682}
2683
2684static void mtk_handle_status_irq(struct mtk_eth *eth)
2685{
developer8051e042022-04-08 13:26:36 +08002686 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002687
2688 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2689 mtk_stats_update(eth);
2690 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer8051e042022-04-08 13:26:36 +08002691 MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002692 }
2693}
2694
2695static int mtk_napi_tx(struct napi_struct *napi, int budget)
2696{
2697 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
developer68ce74f2023-01-03 16:11:57 +08002698 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08002699 u32 status, mask;
2700 int tx_done = 0;
2701
developera7fbeec2024-02-02 13:56:07 +08002702 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08002703 mtk_handle_status_irq(eth);
developera7fbeec2024-02-02 13:56:07 +08002704 mtk_w32(eth, MTK_TX_DONE_INT(0), reg_map->tx_irq_status);
2705 } else {
2706 mtk_w32(eth, MTK_TX_DONE_INT(0), reg_map->pdma.irq_status);
2707 }
developerfd40db22021-04-29 10:08:25 +08002708 tx_done = mtk_poll_tx(eth, budget);
2709
2710 if (unlikely(netif_msg_intr(eth))) {
developera7fbeec2024-02-02 13:56:07 +08002711 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2712 status = mtk_r32(eth, reg_map->tx_irq_status);
2713 mask = mtk_r32(eth, reg_map->tx_irq_mask);
2714 } else {
2715 status = mtk_r32(eth, reg_map->pdma.irq_status);
2716 mask = mtk_r32(eth, reg_map->pdma.irq_mask);
2717 }
developerfd40db22021-04-29 10:08:25 +08002718 dev_info(eth->dev,
2719 "done tx %d, intr 0x%08x/0x%x\n",
2720 tx_done, status, mask);
2721 }
2722
2723 if (tx_done == budget)
2724 return budget;
2725
developera7fbeec2024-02-02 13:56:07 +08002726 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2727 status = mtk_r32(eth, reg_map->tx_irq_status);
2728 else
2729 status = mtk_r32(eth, reg_map->pdma.irq_status);
2730 if (status & MTK_TX_DONE_INT(0))
developerfd40db22021-04-29 10:08:25 +08002731 return budget;
2732
developerc4671b22021-05-28 13:16:42 +08002733 if (napi_complete(napi))
developera7fbeec2024-02-02 13:56:07 +08002734 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002735
2736 return tx_done;
2737}
2738
2739static int mtk_napi_rx(struct napi_struct *napi, int budget)
2740{
developer18f46a82021-07-20 21:08:21 +08002741 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
2742 struct mtk_eth *eth = rx_napi->eth;
developer68ce74f2023-01-03 16:11:57 +08002743 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer18f46a82021-07-20 21:08:21 +08002744 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002745 u32 status, mask;
2746 int rx_done = 0;
2747 int remain_budget = budget;
2748
2749 mtk_handle_status_irq(eth);
2750
2751poll_again:
developer68ce74f2023-01-03 16:11:57 +08002752 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), reg_map->pdma.irq_status);
developerfd40db22021-04-29 10:08:25 +08002753 rx_done = mtk_poll_rx(napi, remain_budget, eth);
2754
2755 if (unlikely(netif_msg_intr(eth))) {
developer68ce74f2023-01-03 16:11:57 +08002756 status = mtk_r32(eth, reg_map->pdma.irq_status);
2757 mask = mtk_r32(eth, reg_map->pdma.irq_mask);
developerfd40db22021-04-29 10:08:25 +08002758 dev_info(eth->dev,
2759 "done rx %d, intr 0x%08x/0x%x\n",
2760 rx_done, status, mask);
2761 }
2762 if (rx_done == remain_budget)
2763 return budget;
2764
developer68ce74f2023-01-03 16:11:57 +08002765 status = mtk_r32(eth, reg_map->pdma.irq_status);
developer18f46a82021-07-20 21:08:21 +08002766 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08002767 remain_budget -= rx_done;
2768 goto poll_again;
2769 }
developerc4671b22021-05-28 13:16:42 +08002770
2771 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08002772 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08002773
2774 return rx_done + budget - remain_budget;
2775}
2776
2777static int mtk_tx_alloc(struct mtk_eth *eth)
2778{
developere9356982022-07-04 09:03:20 +08002779 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002780 struct mtk_tx_ring *ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002781 int i, sz = soc->txrx.txd_size;
2782 struct mtk_tx_dma_v2 *txd, *pdma_txd;
developerfd40db22021-04-29 10:08:25 +08002783
2784 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
2785 GFP_KERNEL);
2786 if (!ring->buf)
2787 goto no_tx_mem;
2788
2789 if (!eth->soc->has_sram)
developer3f28d382023-03-07 16:06:30 +08002790 ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002791 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002792 else {
developera7fbeec2024-02-02 13:56:07 +08002793 ring->dma = eth->sram_base + MTK_DMA_SIZE * sz;
developer8b6f2402022-11-28 13:42:34 +08002794 ring->phys = eth->phy_scratch_ring +
2795 MTK_DMA_SIZE * (dma_addr_t)sz;
developerfd40db22021-04-29 10:08:25 +08002796 }
2797
2798 if (!ring->dma)
2799 goto no_tx_mem;
2800
2801 for (i = 0; i < MTK_DMA_SIZE; i++) {
2802 int next = (i + 1) % MTK_DMA_SIZE;
2803 u32 next_ptr = ring->phys + next * sz;
2804
developere9356982022-07-04 09:03:20 +08002805 txd = ring->dma + i * sz;
2806 txd->txd2 = next_ptr;
2807 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2808 txd->txd4 = 0;
2809
developer089e8852022-09-28 14:43:46 +08002810 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2811 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08002812 txd->txd5 = 0;
2813 txd->txd6 = 0;
2814 txd->txd7 = 0;
2815 txd->txd8 = 0;
2816 }
developerfd40db22021-04-29 10:08:25 +08002817 }
2818
2819 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2820 * only as the framework. The real HW descriptors are the PDMA
2821 * descriptors in ring->dma_pdma.
2822 */
2823 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer3f28d382023-03-07 16:06:30 +08002824 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev,
2825 MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002826 &ring->phys_pdma, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002827 if (!ring->dma_pdma)
2828 goto no_tx_mem;
2829
2830 for (i = 0; i < MTK_DMA_SIZE; i++) {
developera7fbeec2024-02-02 13:56:07 +08002831 pdma_txd = ring->dma_pdma + i * sz;
developere9356982022-07-04 09:03:20 +08002832
2833 pdma_txd->txd2 = TX_DMA_DESP2_DEF;
2834 pdma_txd->txd4 = 0;
developera7fbeec2024-02-02 13:56:07 +08002835
2836 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
2837 pdma_txd->txd5 = 0;
2838 pdma_txd->txd6 = 0;
2839 pdma_txd->txd7 = 0;
2840 pdma_txd->txd8 = 0;
2841 }
developerfd40db22021-04-29 10:08:25 +08002842 }
2843 }
2844
2845 ring->dma_size = MTK_DMA_SIZE;
2846 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
developere9356982022-07-04 09:03:20 +08002847 ring->next_free = ring->dma;
2848 ring->last_free = (void *)txd;
developerc4671b22021-05-28 13:16:42 +08002849 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08002850 ring->thresh = MAX_SKB_FRAGS;
developera7fbeec2024-02-02 13:56:07 +08002851 ring->cpu_idx = 0;
developerfd40db22021-04-29 10:08:25 +08002852
2853 /* make sure that all changes to the dma ring are flushed before we
2854 * continue
2855 */
2856 wmb();
2857
2858 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer68ce74f2023-01-03 16:11:57 +08002859 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2860 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
developerfd40db22021-04-29 10:08:25 +08002861 mtk_w32(eth,
2862 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
developer68ce74f2023-01-03 16:11:57 +08002863 soc->reg_map->qdma.crx_ptr);
2864 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
developerfd40db22021-04-29 10:08:25 +08002865 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
developer68ce74f2023-01-03 16:11:57 +08002866 soc->reg_map->qdma.qtx_cfg);
developerfd40db22021-04-29 10:08:25 +08002867 } else {
developera7fbeec2024-02-02 13:56:07 +08002868 mtk_w32(eth, ring->phys_pdma, soc->reg_map->pdma.tx_ptr);
2869 mtk_w32(eth, MTK_DMA_SIZE, soc->reg_map->pdma.tx_cnt_cfg);
2870 mtk_w32(eth, ring->cpu_idx, soc->reg_map->pdma.pctx_ptr);
2871 mtk_w32(eth, MTK_PST_DTX_IDX_CFG(0), soc->reg_map->pdma.rst_idx);
developerfd40db22021-04-29 10:08:25 +08002872 }
2873
2874 return 0;
2875
2876no_tx_mem:
2877 return -ENOMEM;
2878}
2879
2880static void mtk_tx_clean(struct mtk_eth *eth)
2881{
developere9356982022-07-04 09:03:20 +08002882 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002883 struct mtk_tx_ring *ring = &eth->tx_ring;
2884 int i;
2885
2886 if (ring->buf) {
2887 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08002888 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08002889 kfree(ring->buf);
2890 ring->buf = NULL;
2891 }
2892
2893 if (!eth->soc->has_sram && ring->dma) {
developer3f28d382023-03-07 16:06:30 +08002894 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002895 MTK_DMA_SIZE * soc->txrx.txd_size,
2896 ring->dma, ring->phys);
developerfd40db22021-04-29 10:08:25 +08002897 ring->dma = NULL;
2898 }
2899
2900 if (ring->dma_pdma) {
developer3f28d382023-03-07 16:06:30 +08002901 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002902 MTK_DMA_SIZE * soc->txrx.txd_size,
2903 ring->dma_pdma, ring->phys_pdma);
developerfd40db22021-04-29 10:08:25 +08002904 ring->dma_pdma = NULL;
2905 }
2906}
2907
2908static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2909{
developer68ce74f2023-01-03 16:11:57 +08002910 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08002911 struct mtk_rx_ring *ring;
2912 int rx_data_len, rx_dma_size;
2913 int i;
developer089e8852022-09-28 14:43:46 +08002914 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002915
2916 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2917 if (ring_no)
2918 return -EINVAL;
2919 ring = &eth->rx_ring_qdma;
2920 } else {
2921 ring = &eth->rx_ring[ring_no];
2922 }
2923
2924 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2925 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2926 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2927 } else {
2928 rx_data_len = ETH_DATA_LEN;
2929 rx_dma_size = MTK_DMA_SIZE;
2930 }
2931
2932 ring->frag_size = mtk_max_frag_size(rx_data_len);
2933 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2934 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2935 GFP_KERNEL);
2936 if (!ring->data)
2937 return -ENOMEM;
2938
2939 for (i = 0; i < rx_dma_size; i++) {
developeref5c2ed2024-01-08 12:15:13 +08002940 if (ring->frag_size <= PAGE_SIZE)
2941 ring->data[i] = napi_alloc_frag(ring->frag_size);
2942 else
2943 ring->data[i] = mtk_max_lro_buf_alloc(GFP_ATOMIC);
developerfd40db22021-04-29 10:08:25 +08002944 if (!ring->data[i])
2945 return -ENOMEM;
2946 }
2947
2948 if ((!eth->soc->has_sram) || (eth->soc->has_sram
2949 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
developer3f28d382023-03-07 16:06:30 +08002950 ring->dma = dma_alloc_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002951 rx_dma_size * eth->soc->txrx.rxd_size,
2952 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002953 else {
2954 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002955 ring->dma = tx_ring->dma + MTK_DMA_SIZE *
developer8ecd51b2023-03-13 11:28:28 +08002956 eth->soc->txrx.txd_size * (ring_no + 1);
developer18f46a82021-07-20 21:08:21 +08002957 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
developer8ecd51b2023-03-13 11:28:28 +08002958 eth->soc->txrx.txd_size * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08002959 }
2960
2961 if (!ring->dma)
2962 return -ENOMEM;
2963
2964 for (i = 0; i < rx_dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002965 struct mtk_rx_dma_v2 *rxd;
2966
developer3f28d382023-03-07 16:06:30 +08002967 dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08002968 ring->data[i] + NET_SKB_PAD + eth->ip_align,
2969 ring->buf_size,
2970 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08002971 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
developerfd40db22021-04-29 10:08:25 +08002972 return -ENOMEM;
developere9356982022-07-04 09:03:20 +08002973
2974 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2975 rxd->rxd1 = (unsigned int)dma_addr;
developerfd40db22021-04-29 10:08:25 +08002976
developer089e8852022-09-28 14:43:46 +08002977 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2978 RX_DMA_SDP1(dma_addr) : 0;
2979
developerfd40db22021-04-29 10:08:25 +08002980 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developere9356982022-07-04 09:03:20 +08002981 rxd->rxd2 = RX_DMA_LSO;
developerfd40db22021-04-29 10:08:25 +08002982 else
developer089e8852022-09-28 14:43:46 +08002983 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08002984
developere9356982022-07-04 09:03:20 +08002985 rxd->rxd3 = 0;
2986 rxd->rxd4 = 0;
2987
developer8ecd51b2023-03-13 11:28:28 +08002988 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developere9356982022-07-04 09:03:20 +08002989 rxd->rxd5 = 0;
2990 rxd->rxd6 = 0;
2991 rxd->rxd7 = 0;
2992 rxd->rxd8 = 0;
developerfd40db22021-04-29 10:08:25 +08002993 }
developerfd40db22021-04-29 10:08:25 +08002994 }
2995 ring->dma_size = rx_dma_size;
2996 ring->calc_idx_update = false;
2997 ring->calc_idx = rx_dma_size - 1;
2998 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
2999 MTK_QRX_CRX_IDX_CFG(ring_no) :
3000 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08003001 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08003002 /* make sure that all changes to the dma ring are flushed before we
3003 * continue
3004 */
3005 wmb();
3006
3007 if (rx_flag == MTK_RX_FLAGS_QDMA) {
developer68ce74f2023-01-03 16:11:57 +08003008 mtk_w32(eth, ring->phys,
3009 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
3010 mtk_w32(eth, rx_dma_size,
3011 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
3012 mtk_w32(eth, ring->calc_idx,
3013 ring->crx_idx_reg);
3014 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
3015 reg_map->qdma.rst_idx);
developerfd40db22021-04-29 10:08:25 +08003016 } else {
developer68ce74f2023-01-03 16:11:57 +08003017 mtk_w32(eth, ring->phys,
3018 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
3019 mtk_w32(eth, rx_dma_size,
3020 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
3021 mtk_w32(eth, ring->calc_idx,
3022 ring->crx_idx_reg);
3023 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
3024 reg_map->pdma.rst_idx);
developerfd40db22021-04-29 10:08:25 +08003025 }
3026
3027 return 0;
3028}
3029
3030static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
3031{
3032 int i;
developer089e8852022-09-28 14:43:46 +08003033 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08003034
3035 if (ring->data && ring->dma) {
3036 for (i = 0; i < ring->dma_size; i++) {
developere9356982022-07-04 09:03:20 +08003037 struct mtk_rx_dma *rxd;
3038
developerfd40db22021-04-29 10:08:25 +08003039 if (!ring->data[i])
3040 continue;
developere9356982022-07-04 09:03:20 +08003041
3042 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
3043 if (!rxd->rxd1)
developerfd40db22021-04-29 10:08:25 +08003044 continue;
developere9356982022-07-04 09:03:20 +08003045
developer089e8852022-09-28 14:43:46 +08003046 addr64 = (MTK_HAS_CAPS(eth->soc->caps,
3047 MTK_8GB_ADDRESSING)) ?
3048 ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
3049
developer3f28d382023-03-07 16:06:30 +08003050 dma_unmap_single(eth->dma_dev,
developer11857ce2023-12-26 00:21:26 +08003051 ((u64)(rxd->rxd1) | addr64),
developerfd40db22021-04-29 10:08:25 +08003052 ring->buf_size,
3053 DMA_FROM_DEVICE);
3054 skb_free_frag(ring->data[i]);
3055 }
3056 kfree(ring->data);
3057 ring->data = NULL;
3058 }
3059
3060 if(in_sram)
3061 return;
3062
3063 if (ring->dma) {
developer3f28d382023-03-07 16:06:30 +08003064 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08003065 ring->dma_size * eth->soc->txrx.rxd_size,
developerfd40db22021-04-29 10:08:25 +08003066 ring->dma,
3067 ring->phys);
3068 ring->dma = NULL;
3069 }
3070}
3071
3072static int mtk_hwlro_rx_init(struct mtk_eth *eth)
3073{
3074 int i;
developer77d03a72021-06-06 00:06:00 +08003075 u32 val;
developerfd40db22021-04-29 10:08:25 +08003076 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
3077 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
3078
3079 /* set LRO rings to auto-learn modes */
3080 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
3081
3082 /* validate LRO ring */
3083 ring_ctrl_dw2 |= MTK_RING_VLD;
3084
3085 /* set AGE timer (unit: 20us) */
3086 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
3087 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
3088
3089 /* set max AGG timer (unit: 20us) */
3090 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
3091
3092 /* set max LRO AGG count */
3093 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
3094 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
3095
developer77d03a72021-06-06 00:06:00 +08003096 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08003097 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
3098 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
3099 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
3100 }
3101
3102 /* IPv4 checksum update enable */
3103 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
3104
3105 /* switch priority comparison to packet count mode */
3106 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
3107
3108 /* bandwidth threshold setting */
3109 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
3110
3111 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08003112 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08003113
3114 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
3115 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
3116 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
3117
developerfd40db22021-04-29 10:08:25 +08003118 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
3119 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
3120
developer8ecd51b2023-03-13 11:28:28 +08003121 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer77d03a72021-06-06 00:06:00 +08003122 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
3123 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
3124 MTK_PDMA_RX_CFG);
3125
3126 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
3127 } else {
3128 /* set HW LRO mode & the max aggregation count for rx packets */
3129 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
3130 }
3131
developerfd40db22021-04-29 10:08:25 +08003132 /* enable HW LRO */
3133 lro_ctrl_dw0 |= MTK_LRO_EN;
3134
developer77d03a72021-06-06 00:06:00 +08003135 /* enable cpu reason black list */
3136 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
3137
developerfd40db22021-04-29 10:08:25 +08003138 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
3139 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
3140
developer77d03a72021-06-06 00:06:00 +08003141 /* no use PPE cpu reason */
3142 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
3143
developerddc36672023-10-16 15:13:58 +08003144 /* Set perLRO GRP INT */
3145 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) ||
3146 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
3147 mtk_m32(eth, MTK_RX_DONE_INT(MTK_HW_LRO_RING(1)),
3148 MTK_RX_DONE_INT(MTK_HW_LRO_RING(1)), MTK_PDMA_INT_GRP1);
3149 mtk_m32(eth, MTK_RX_DONE_INT(MTK_HW_LRO_RING(2)),
3150 MTK_RX_DONE_INT(MTK_HW_LRO_RING(2)), MTK_PDMA_INT_GRP2);
3151 mtk_m32(eth, MTK_RX_DONE_INT(MTK_HW_LRO_RING(3)),
3152 MTK_RX_DONE_INT(MTK_HW_LRO_RING(3)), MTK_PDMA_INT_GRP3);
3153 }
3154
developerfd40db22021-04-29 10:08:25 +08003155 return 0;
3156}
3157
3158static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
3159{
3160 int i;
3161 u32 val;
3162
3163 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08003164 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08003165
3166 /* wait for relinquishments done */
3167 for (i = 0; i < 10; i++) {
3168 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08003169 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer8051e042022-04-08 13:26:36 +08003170 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08003171 continue;
3172 }
3173 break;
3174 }
3175
3176 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08003177 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08003178 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
3179
3180 /* disable HW LRO */
3181 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
3182}
3183
3184static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
3185{
3186 u32 reg_val;
3187
developer8ecd51b2023-03-13 11:28:28 +08003188 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developer77d03a72021-06-06 00:06:00 +08003189 idx += 1;
3190
developerfd40db22021-04-29 10:08:25 +08003191 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3192
3193 /* invalidate the IP setting */
3194 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3195
3196 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
3197
3198 /* validate the IP setting */
3199 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3200}
3201
3202static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
3203{
3204 u32 reg_val;
3205
developer8ecd51b2023-03-13 11:28:28 +08003206 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developer77d03a72021-06-06 00:06:00 +08003207 idx += 1;
3208
developerfd40db22021-04-29 10:08:25 +08003209 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3210
3211 /* invalidate the IP setting */
3212 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3213
3214 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
3215}
3216
3217static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
3218{
3219 int cnt = 0;
3220 int i;
3221
3222 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3223 if (mac->hwlro_ip[i])
3224 cnt++;
3225 }
3226
3227 return cnt;
3228}
3229
developer03cadcb2023-12-07 22:44:22 +08003230static int mtk_hwlro_add_ipaddr_idx(struct net_device *dev, u32 ip4dst)
3231{
3232 struct mtk_mac *mac = netdev_priv(dev);
3233 struct mtk_eth *eth = mac->hw;
3234 u32 reg_val;
3235 int i;
3236
3237 /* check for duplicate IP address in the current DIP list */
3238 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
3239 reg_val = mtk_r32(eth, MTK_LRO_DIP_DW0_CFG(i));
3240 if (reg_val == ip4dst)
3241 break;
3242 }
3243
3244 if (i <= MTK_HW_LRO_RING_NUM) {
3245 netdev_warn(dev, "Duplicate IP address at DIP(%d)!\n", i);
3246 return -EEXIST;
3247 }
3248
3249 /* find out available DIP index */
3250 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
3251 reg_val = mtk_r32(eth, MTK_LRO_DIP_DW0_CFG(i));
3252 if (reg_val == 0UL)
3253 break;
3254 }
3255
3256 if (i > MTK_HW_LRO_RING_NUM) {
3257 netdev_warn(dev, "DIP index is currently out of resource!\n");
3258 return -EBUSY;
3259 }
3260
3261 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
3262 i -= 1;
3263
3264 return i;
3265}
3266
3267static int mtk_hwlro_get_ipaddr_idx(struct net_device *dev, u32 ip4dst)
3268{
3269 struct mtk_mac *mac = netdev_priv(dev);
3270 struct mtk_eth *eth = mac->hw;
3271 u32 reg_val;
3272 int i;
3273
3274 /* find out DIP index that matches the given IP address */
3275 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
3276 reg_val = mtk_r32(eth, MTK_LRO_DIP_DW0_CFG(i));
3277 if (reg_val == ip4dst)
3278 break;
3279 }
3280
3281 if (i > MTK_HW_LRO_RING_NUM) {
3282 netdev_warn(dev, "DIP address is not exist!\n");
3283 return -ENOENT;
3284 }
3285
3286 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
3287 i -= 1;
3288
3289 return i;
3290}
3291
developerfd40db22021-04-29 10:08:25 +08003292static int mtk_hwlro_add_ipaddr(struct net_device *dev,
3293 struct ethtool_rxnfc *cmd)
3294{
3295 struct ethtool_rx_flow_spec *fsp =
3296 (struct ethtool_rx_flow_spec *)&cmd->fs;
3297 struct mtk_mac *mac = netdev_priv(dev);
3298 struct mtk_eth *eth = mac->hw;
3299 int hwlro_idx;
developer03cadcb2023-12-07 22:44:22 +08003300 u32 ip4dst;
developerfd40db22021-04-29 10:08:25 +08003301
3302 if ((fsp->flow_type != TCP_V4_FLOW) ||
3303 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
3304 (fsp->location > 1))
3305 return -EINVAL;
3306
developer03cadcb2023-12-07 22:44:22 +08003307 ip4dst = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
3308 hwlro_idx = mtk_hwlro_add_ipaddr_idx(dev, ip4dst);
3309 if (hwlro_idx < 0)
3310 return hwlro_idx;
developerfd40db22021-04-29 10:08:25 +08003311
developer03cadcb2023-12-07 22:44:22 +08003312 mac->hwlro_ip[fsp->location] = ip4dst;
developerfd40db22021-04-29 10:08:25 +08003313 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3314
3315 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
3316
3317 return 0;
3318}
3319
3320static int mtk_hwlro_del_ipaddr(struct net_device *dev,
3321 struct ethtool_rxnfc *cmd)
3322{
3323 struct ethtool_rx_flow_spec *fsp =
3324 (struct ethtool_rx_flow_spec *)&cmd->fs;
3325 struct mtk_mac *mac = netdev_priv(dev);
3326 struct mtk_eth *eth = mac->hw;
3327 int hwlro_idx;
developer03cadcb2023-12-07 22:44:22 +08003328 u32 ip4dst;
developerfd40db22021-04-29 10:08:25 +08003329
3330 if (fsp->location > 1)
3331 return -EINVAL;
3332
developer03cadcb2023-12-07 22:44:22 +08003333 ip4dst = mac->hwlro_ip[fsp->location];
3334 hwlro_idx = mtk_hwlro_get_ipaddr_idx(dev, ip4dst);
3335 if (hwlro_idx < 0)
3336 return hwlro_idx;
developerfd40db22021-04-29 10:08:25 +08003337
developer03cadcb2023-12-07 22:44:22 +08003338 mac->hwlro_ip[fsp->location] = 0;
developerfd40db22021-04-29 10:08:25 +08003339 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3340
3341 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3342
3343 return 0;
3344}
3345
developer03cadcb2023-12-07 22:44:22 +08003346static void mtk_hwlro_netdev_enable(struct net_device *dev)
3347{
3348 struct mtk_mac *mac = netdev_priv(dev);
3349 struct mtk_eth *eth = mac->hw;
3350 int i, hwlro_idx;
3351
3352 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3353 if (mac->hwlro_ip[i] == 0)
3354 continue;
3355
3356 hwlro_idx = mtk_hwlro_get_ipaddr_idx(dev, mac->hwlro_ip[i]);
3357 if (hwlro_idx < 0)
3358 continue;
3359
3360 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[i]);
3361 }
3362}
3363
developerfd40db22021-04-29 10:08:25 +08003364static void mtk_hwlro_netdev_disable(struct net_device *dev)
3365{
3366 struct mtk_mac *mac = netdev_priv(dev);
3367 struct mtk_eth *eth = mac->hw;
3368 int i, hwlro_idx;
3369
3370 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
developer03cadcb2023-12-07 22:44:22 +08003371 if (mac->hwlro_ip[i] == 0)
3372 continue;
3373
3374 hwlro_idx = mtk_hwlro_get_ipaddr_idx(dev, mac->hwlro_ip[i]);
3375 if (hwlro_idx < 0)
3376 continue;
3377
developerfd40db22021-04-29 10:08:25 +08003378 mac->hwlro_ip[i] = 0;
developerfd40db22021-04-29 10:08:25 +08003379
3380 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3381 }
3382
3383 mac->hwlro_ip_cnt = 0;
3384}
3385
3386static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
3387 struct ethtool_rxnfc *cmd)
3388{
3389 struct mtk_mac *mac = netdev_priv(dev);
3390 struct ethtool_rx_flow_spec *fsp =
3391 (struct ethtool_rx_flow_spec *)&cmd->fs;
3392
3393 /* only tcp dst ipv4 is meaningful, others are meaningless */
3394 fsp->flow_type = TCP_V4_FLOW;
3395 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
3396 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
3397
3398 fsp->h_u.tcp_ip4_spec.ip4src = 0;
3399 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
3400 fsp->h_u.tcp_ip4_spec.psrc = 0;
3401 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
3402 fsp->h_u.tcp_ip4_spec.pdst = 0;
3403 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
3404 fsp->h_u.tcp_ip4_spec.tos = 0;
3405 fsp->m_u.tcp_ip4_spec.tos = 0xff;
3406
3407 return 0;
3408}
3409
3410static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3411 struct ethtool_rxnfc *cmd,
3412 u32 *rule_locs)
3413{
3414 struct mtk_mac *mac = netdev_priv(dev);
3415 int cnt = 0;
3416 int i;
3417
3418 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3419 if (mac->hwlro_ip[i]) {
3420 rule_locs[cnt] = i;
3421 cnt++;
3422 }
3423 }
3424
3425 cmd->rule_cnt = cnt;
3426
3427 return 0;
3428}
3429
developerea49c302023-06-27 16:06:41 +08003430u32 mtk_rss_indr_table(struct mtk_rss_params *rss_params, int index)
developere3d0de22023-05-30 17:45:00 +08003431{
developerea49c302023-06-27 16:06:41 +08003432 u32 val = 0;
3433 int i;
developere3d0de22023-05-30 17:45:00 +08003434
developerea49c302023-06-27 16:06:41 +08003435 for (i = 16 * index; i < 16 * index + 16; i++)
3436 val |= (rss_params->indirection_table[i] << (2 * (i % 16)));
developere3d0de22023-05-30 17:45:00 +08003437
developerea49c302023-06-27 16:06:41 +08003438 return val;
developere3d0de22023-05-30 17:45:00 +08003439}
3440
developer18f46a82021-07-20 21:08:21 +08003441static int mtk_rss_init(struct mtk_eth *eth)
3442{
developerea49c302023-06-27 16:06:41 +08003443 struct mtk_rss_params *rss_params = &eth->rss_params;
3444 static u8 hash_key[MTK_RSS_HASH_KEYSIZE] = {
3445 0xfa, 0x01, 0xac, 0xbe, 0x3b, 0xb7, 0x42, 0x6a,
3446 0x0c, 0xf2, 0x30, 0x80, 0xa3, 0x2d, 0xcb, 0x77,
3447 0xb4, 0x30, 0x7b, 0xae, 0xcb, 0x2b, 0xca, 0xd0,
3448 0xb0, 0x8f, 0xa3, 0x43, 0x3d, 0x25, 0x67, 0x41,
3449 0xc2, 0x0e, 0x5b, 0x25, 0xda, 0x56, 0x5a, 0x6d};
developer18f46a82021-07-20 21:08:21 +08003450 u32 val;
developerea49c302023-06-27 16:06:41 +08003451 int i;
3452
3453 memcpy(rss_params->hash_key, hash_key, MTK_RSS_HASH_KEYSIZE);
3454
3455 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
3456 rss_params->indirection_table[i] = i % eth->soc->rss_num;
developer18f46a82021-07-20 21:08:21 +08003457
developer8ecd51b2023-03-13 11:28:28 +08003458 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer18f46a82021-07-20 21:08:21 +08003459 /* Set RSS rings to PSE modes */
3460 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
3461 val |= MTK_RING_PSE_MODE;
3462 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
3463
3464 /* Enable non-lro multiple rx */
3465 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
3466 val |= MTK_NON_LRO_MULTI_EN;
3467 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
3468
3469 /* Enable RSS dly int supoort */
3470 val |= MTK_LRO_DLY_INT_EN;
3471 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
developer18f46a82021-07-20 21:08:21 +08003472 }
3473
3474 /* Hash Type */
3475 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
3476 val |= MTK_RSS_IPV4_STATIC_HASH;
3477 val |= MTK_RSS_IPV6_STATIC_HASH;
3478 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3479
developerea49c302023-06-27 16:06:41 +08003480 /* Hash Key */
3481 for (i = 0; i < MTK_RSS_HASH_KEYSIZE / sizeof(u32); i++)
3482 mtk_w32(eth, rss_params->hash_key[i], MTK_RSS_HASH_KEY_DW(i));
3483
developer18f46a82021-07-20 21:08:21 +08003484 /* Select the size of indirection table */
developerea49c302023-06-27 16:06:41 +08003485 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE / 16; i++)
3486 mtk_w32(eth, mtk_rss_indr_table(rss_params, i),
3487 MTK_RSS_INDR_TABLE_DW(i));
developer18f46a82021-07-20 21:08:21 +08003488
3489 /* Pause */
3490 val |= MTK_RSS_CFG_REQ;
3491 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3492
3493 /* Enable RSS*/
3494 val |= MTK_RSS_EN;
3495 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3496
3497 /* Release pause */
3498 val &= ~(MTK_RSS_CFG_REQ);
3499 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3500
3501 /* Set perRSS GRP INT */
developerddc36672023-10-16 15:13:58 +08003502 mtk_m32(eth, MTK_RX_DONE_INT(MTK_RSS_RING(0)),
3503 MTK_RX_DONE_INT(MTK_RSS_RING(0)), MTK_PDMA_INT_GRP1);
3504 mtk_m32(eth, MTK_RX_DONE_INT(MTK_RSS_RING(1)),
3505 MTK_RX_DONE_INT(MTK_RSS_RING(1)), MTK_PDMA_INT_GRP2);
3506 mtk_m32(eth, MTK_RX_DONE_INT(MTK_RSS_RING(2)),
3507 MTK_RX_DONE_INT(MTK_RSS_RING(2)), MTK_PDMA_INT_GRP3);
developer18f46a82021-07-20 21:08:21 +08003508
3509 /* Set GRP INT */
developer94806ec2023-05-19 14:16:44 +08003510 mtk_w32(eth, 0x210FFFF2, MTK_FE_INT_GRP);
developer18f46a82021-07-20 21:08:21 +08003511
developer089e8852022-09-28 14:43:46 +08003512 /* Enable RSS delay interrupt */
developer933f09b2023-09-12 11:13:01 +08003513 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
3514 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
3515 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX2_DLY_INT);
3516 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX3_DLY_INT);
3517 } else
3518 mtk_w32(eth, MTK_MAX_DELAY_INT_V2, MTK_PDMA_RSS_DELAY_INT);
developer089e8852022-09-28 14:43:46 +08003519
developer18f46a82021-07-20 21:08:21 +08003520 return 0;
3521}
3522
3523static void mtk_rss_uninit(struct mtk_eth *eth)
3524{
3525 u32 val;
3526
3527 /* Pause */
3528 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
3529 val |= MTK_RSS_CFG_REQ;
3530 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3531
3532 /* Disable RSS*/
3533 val &= ~(MTK_RSS_EN);
3534 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3535
3536 /* Release pause */
3537 val &= ~(MTK_RSS_CFG_REQ);
3538 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3539}
3540
developerfd40db22021-04-29 10:08:25 +08003541static netdev_features_t mtk_fix_features(struct net_device *dev,
3542 netdev_features_t features)
3543{
3544 if (!(features & NETIF_F_LRO)) {
3545 struct mtk_mac *mac = netdev_priv(dev);
3546 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3547
3548 if (ip_cnt) {
3549 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3550
3551 features |= NETIF_F_LRO;
3552 }
3553 }
3554
3555 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
3556 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
3557
3558 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
3559 }
3560
3561 return features;
3562}
3563
3564static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3565{
3566 struct mtk_mac *mac = netdev_priv(dev);
3567 struct mtk_eth *eth = mac->hw;
developer03cadcb2023-12-07 22:44:22 +08003568 netdev_features_t lro;
developerfd40db22021-04-29 10:08:25 +08003569 int err = 0;
3570
3571 if (!((dev->features ^ features) & MTK_SET_FEATURES))
3572 return 0;
3573
developer03cadcb2023-12-07 22:44:22 +08003574 lro = dev->features & NETIF_F_LRO;
3575 if (!(features & NETIF_F_LRO) && lro)
developerfd40db22021-04-29 10:08:25 +08003576 mtk_hwlro_netdev_disable(dev);
developer03cadcb2023-12-07 22:44:22 +08003577 else if ((features & NETIF_F_LRO) && !lro)
3578 mtk_hwlro_netdev_enable(dev);
developerfd40db22021-04-29 10:08:25 +08003579
3580 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
3581 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3582 else
3583 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3584
3585 return err;
3586}
3587
3588/* wait for DMA to finish whatever it is doing before we start using it again */
3589static int mtk_dma_busy_wait(struct mtk_eth *eth)
3590{
3591 unsigned long t_start = jiffies;
3592
3593 while (1) {
3594 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3595 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
3596 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
3597 return 0;
3598 } else {
3599 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
3600 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
3601 return 0;
3602 }
3603
3604 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
3605 break;
3606 }
3607
3608 dev_err(eth->dev, "DMA init timeout\n");
3609 return -1;
3610}
3611
3612static int mtk_dma_init(struct mtk_eth *eth)
3613{
3614 int err;
3615 u32 i;
3616
3617 if (mtk_dma_busy_wait(eth))
3618 return -EBUSY;
3619
3620 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3621 /* QDMA needs scratch memory for internal reordering of the
3622 * descriptors
3623 */
3624 err = mtk_init_fq_dma(eth);
3625 if (err)
3626 return err;
3627 }
3628
3629 err = mtk_tx_alloc(eth);
3630 if (err)
3631 return err;
3632
3633 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3634 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3635 if (err)
3636 return err;
3637 }
3638
3639 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3640 if (err)
3641 return err;
3642
3643 if (eth->hwlro) {
developerddc36672023-10-16 15:13:58 +08003644 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++) {
3645 err = mtk_rx_alloc(eth, MTK_HW_LRO_RING(i), MTK_RX_FLAGS_HWLRO);
developerfd40db22021-04-29 10:08:25 +08003646 if (err)
3647 return err;
3648 }
3649 err = mtk_hwlro_rx_init(eth);
3650 if (err)
3651 return err;
3652 }
3653
developer18f46a82021-07-20 21:08:21 +08003654 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
developerddc36672023-10-16 15:13:58 +08003655 for (i = 0; i < MTK_RX_RSS_NUM; i++) {
3656 err = mtk_rx_alloc(eth, MTK_RSS_RING(i), MTK_RX_FLAGS_NORMAL);
developer18f46a82021-07-20 21:08:21 +08003657 if (err)
3658 return err;
3659 }
3660 err = mtk_rss_init(eth);
3661 if (err)
3662 return err;
3663 }
3664
developerfd40db22021-04-29 10:08:25 +08003665 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3666 /* Enable random early drop and set drop threshold
3667 * automatically
3668 */
3669 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
developer68ce74f2023-01-03 16:11:57 +08003670 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3671 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred2);
developerfd40db22021-04-29 10:08:25 +08003672 }
3673
3674 return 0;
3675}
3676
3677static void mtk_dma_free(struct mtk_eth *eth)
3678{
developere9356982022-07-04 09:03:20 +08003679 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08003680 int i;
3681
3682 for (i = 0; i < MTK_MAC_COUNT; i++)
3683 if (eth->netdev[i])
3684 netdev_reset_queue(eth->netdev[i]);
3685 if ( !eth->soc->has_sram && eth->scratch_ring) {
developer3f28d382023-03-07 16:06:30 +08003686 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08003687 MTK_DMA_SIZE * soc->txrx.txd_size,
3688 eth->scratch_ring, eth->phy_scratch_ring);
developerfd40db22021-04-29 10:08:25 +08003689 eth->scratch_ring = NULL;
3690 eth->phy_scratch_ring = 0;
3691 }
3692 mtk_tx_clean(eth);
developerb3ce86f2022-06-30 13:31:47 +08003693 mtk_rx_clean(eth, &eth->rx_ring[0],eth->soc->has_sram);
developerfd40db22021-04-29 10:08:25 +08003694 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
3695
3696 if (eth->hwlro) {
3697 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08003698
developerddc36672023-10-16 15:13:58 +08003699 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++)
3700 mtk_rx_clean(eth, &eth->rx_ring[MTK_HW_LRO_RING(i)], 0);
developerfd40db22021-04-29 10:08:25 +08003701 }
3702
developer18f46a82021-07-20 21:08:21 +08003703 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3704 mtk_rss_uninit(eth);
3705
developerddc36672023-10-16 15:13:58 +08003706 for (i = 0; i < MTK_RX_RSS_NUM; i++)
3707 mtk_rx_clean(eth, &eth->rx_ring[MTK_RSS_RING(i)], 1);
developer18f46a82021-07-20 21:08:21 +08003708 }
3709
developer94008d92021-09-23 09:47:41 +08003710 if (eth->scratch_head) {
3711 kfree(eth->scratch_head);
3712 eth->scratch_head = NULL;
3713 }
developerfd40db22021-04-29 10:08:25 +08003714}
3715
3716static void mtk_tx_timeout(struct net_device *dev)
3717{
3718 struct mtk_mac *mac = netdev_priv(dev);
3719 struct mtk_eth *eth = mac->hw;
3720
3721 eth->netdev[mac->id]->stats.tx_errors++;
3722 netif_err(eth, tx_err, dev,
3723 "transmit timed out\n");
developer8051e042022-04-08 13:26:36 +08003724
3725 if (atomic_read(&reset_lock) == 0)
3726 schedule_work(&eth->pending_work);
developerfd40db22021-04-29 10:08:25 +08003727}
3728
developer18f46a82021-07-20 21:08:21 +08003729static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08003730{
developer18f46a82021-07-20 21:08:21 +08003731 struct mtk_napi *rx_napi = priv;
3732 struct mtk_eth *eth = rx_napi->eth;
3733 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08003734
developerddc36672023-10-16 15:13:58 +08003735 if (unlikely(!(mtk_r32(eth, eth->soc->reg_map->pdma.irq_status) &
developerb7ee1f22024-01-08 13:52:33 +08003736 mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask) &
developerddc36672023-10-16 15:13:58 +08003737 MTK_RX_DONE_INT(ring->ring_no))))
3738 return IRQ_NONE;
3739
developer18f46a82021-07-20 21:08:21 +08003740 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08003741 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08003742 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08003743 }
3744
3745 return IRQ_HANDLED;
3746}
3747
3748static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3749{
3750 struct mtk_eth *eth = _eth;
3751
3752 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developera7fbeec2024-02-02 13:56:07 +08003753 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT(0));
developer6bbe70d2021-08-06 09:34:55 +08003754 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08003755 }
3756
3757 return IRQ_HANDLED;
3758}
3759
3760static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3761{
3762 struct mtk_eth *eth = _eth;
developer68ce74f2023-01-03 16:11:57 +08003763 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08003764
developer68ce74f2023-01-03 16:11:57 +08003765 if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT(0)) {
3766 if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT(0))
developer18f46a82021-07-20 21:08:21 +08003767 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08003768 }
developera7fbeec2024-02-02 13:56:07 +08003769
3770 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3771 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT(0)) {
3772 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT(0))
3773 mtk_handle_irq_tx(irq, _eth);
3774 }
3775 } else {
3776 if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_TX_DONE_INT(0)) {
3777 if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_TX_DONE_INT(0))
3778 mtk_handle_irq_tx(irq, _eth);
3779 }
developerfd40db22021-04-29 10:08:25 +08003780 }
3781
3782 return IRQ_HANDLED;
3783}
3784
developera2613e62022-07-01 18:29:37 +08003785static irqreturn_t mtk_handle_irq_fixed_link(int irq, void *_mac)
3786{
3787 struct mtk_mac *mac = _mac;
3788 struct mtk_eth *eth = mac->hw;
3789 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
3790 struct net_device *dev = phylink_priv->dev;
3791 int link_old, link_new;
3792
3793 // clear interrupt status for gpy211
3794 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
3795
3796 link_old = phylink_priv->link;
3797 link_new = _mtk_mdio_read(eth, phylink_priv->phyaddr, MII_BMSR) & BMSR_LSTATUS;
3798
3799 if (link_old != link_new) {
3800 phylink_priv->link = link_new;
3801 if (link_new) {
3802 printk("phylink.%d %s: Link is Up\n", phylink_priv->id, dev->name);
3803 if (dev)
3804 netif_carrier_on(dev);
3805 } else {
3806 printk("phylink.%d %s: Link is Down\n", phylink_priv->id, dev->name);
3807 if (dev)
3808 netif_carrier_off(dev);
3809 }
3810 }
3811
3812 return IRQ_HANDLED;
3813}
3814
developerfd40db22021-04-29 10:08:25 +08003815#ifdef CONFIG_NET_POLL_CONTROLLER
3816static void mtk_poll_controller(struct net_device *dev)
3817{
3818 struct mtk_mac *mac = netdev_priv(dev);
3819 struct mtk_eth *eth = mac->hw;
3820
developera7fbeec2024-02-02 13:56:07 +08003821 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT(0));
developer18f46a82021-07-20 21:08:21 +08003822 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developer94806ec2023-05-19 14:16:44 +08003823 mtk_handle_irq_rx(eth->irq_fe[2], &eth->rx_napi[0]);
developera7fbeec2024-02-02 13:56:07 +08003824 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT(0));
developer18f46a82021-07-20 21:08:21 +08003825 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08003826}
3827#endif
3828
3829static int mtk_start_dma(struct mtk_eth *eth)
3830{
3831 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer68ce74f2023-01-03 16:11:57 +08003832 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer77d03a72021-06-06 00:06:00 +08003833 int val, err;
developerfd40db22021-04-29 10:08:25 +08003834
3835 err = mtk_dma_init(eth);
3836 if (err) {
3837 mtk_dma_free(eth);
3838 return err;
3839 }
3840
3841 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer68ce74f2023-01-03 16:11:57 +08003842 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
developer089e8852022-09-28 14:43:46 +08003843 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
3844 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer19d84562022-04-21 17:01:06 +08003845 val &= ~MTK_RESV_BUF_MASK;
developerfd40db22021-04-29 10:08:25 +08003846 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003847 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08003848 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
3849 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
3850 MTK_RESV_BUF | MTK_WCOMP_EN |
3851 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
developer68ce74f2023-01-03 16:11:57 +08003852 MTK_RX_2B_OFFSET, reg_map->qdma.glo_cfg);
developer19d84562022-04-21 17:01:06 +08003853 }
developerfd40db22021-04-29 10:08:25 +08003854 else
3855 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003856 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08003857 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
3858 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
3859 MTK_RX_BT_32DWORDS,
developer68ce74f2023-01-03 16:11:57 +08003860 reg_map->qdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003861
developer68ce74f2023-01-03 16:11:57 +08003862 val = mtk_r32(eth, reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003863 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003864 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08003865 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
developer68ce74f2023-01-03 16:11:57 +08003866 reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003867 } else {
3868 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3869 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
developer68ce74f2023-01-03 16:11:57 +08003870 reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003871 }
3872
developer8ecd51b2023-03-13 11:28:28 +08003873 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) && eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08003874 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
3875 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
3876 }
3877
developerfd40db22021-04-29 10:08:25 +08003878 return 0;
3879}
3880
developerdca0fde2022-12-14 11:40:35 +08003881void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
developerfd40db22021-04-29 10:08:25 +08003882{
developerdca0fde2022-12-14 11:40:35 +08003883 u32 val;
developerfd40db22021-04-29 10:08:25 +08003884
3885 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3886 return;
3887
developerdca0fde2022-12-14 11:40:35 +08003888 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
developerfd40db22021-04-29 10:08:25 +08003889
developerdca0fde2022-12-14 11:40:35 +08003890 /* default setup the forward port to send frame to PDMA */
3891 val &= ~0xffff;
developerfd40db22021-04-29 10:08:25 +08003892
developerdca0fde2022-12-14 11:40:35 +08003893 /* Enable RX checksum */
3894 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
developerfd40db22021-04-29 10:08:25 +08003895
developerdca0fde2022-12-14 11:40:35 +08003896 val |= config;
developerfd40db22021-04-29 10:08:25 +08003897
developerdca0fde2022-12-14 11:40:35 +08003898 if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3899 val |= MTK_GDMA_SPECIAL_TAG;
developerfd40db22021-04-29 10:08:25 +08003900
developerdca0fde2022-12-14 11:40:35 +08003901 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
developerfd40db22021-04-29 10:08:25 +08003902}
3903
developer7cd7e5e2022-11-17 13:57:32 +08003904void mtk_set_pse_drop(u32 config)
3905{
3906 struct mtk_eth *eth = g_eth;
3907
3908 if (eth)
3909 mtk_w32(eth, config, PSE_PPE0_DROP);
3910}
3911EXPORT_SYMBOL(mtk_set_pse_drop);
3912
developer90270572024-01-30 09:26:15 +08003913static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3914{
3915 struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3916 struct mtk_eth *eth = mac->hw;
3917 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3918 struct ethtool_link_ksettings s;
3919 struct net_device *ldev;
3920 struct list_head *iter;
3921 struct dsa_port *dp;
3922 unsigned int queue = 0;
3923
3924 if (!eth->pppq_toggle)
3925 return NOTIFY_DONE;
3926
3927 if (event != NETDEV_CHANGE)
3928 return NOTIFY_DONE;
3929
3930 switch (mac->id) {
3931 case MTK_GMAC1_ID:
3932 netdev_for_each_lower_dev(dev, ldev, iter) {
3933 if (netdev_priv(ldev) == mac)
3934 goto dsa_set_speed;
3935 }
3936 break;
3937 case MTK_GMAC2_ID:
3938 if (strcmp(netdev_name(dev), "eth1"))
3939 break;
3940
3941 queue = MTK_QDMA_GMAC2_QID;
3942 goto set_speed;
3943 case MTK_GMAC3_ID:
3944 if (strcmp(netdev_name(dev), "eth2"))
3945 break;
3946
3947 queue = MTK_QDMA_GMAC3_QID;
3948 goto set_speed;
3949 default:
3950 pr_info("%s mac id invalid", __func__);
3951 break;
3952 }
3953
3954 return NOTIFY_DONE;
3955
3956set_speed:
3957 if (__ethtool_get_link_ksettings(dev, &s))
3958 return NOTIFY_DONE;
3959
3960 if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3961 return NOTIFY_DONE;
3962
3963 if (queue >= MTK_QDMA_TX_NUM)
3964 return NOTIFY_DONE;
3965
3966 if (mac->speed > 0 && mac->speed < s.base.speed)
3967 s.base.speed = 0;
3968
3969 mtk_set_queue_speed(eth, queue, s.base.speed);
3970
3971 return NOTIFY_DONE;
3972
3973dsa_set_speed:
3974 if (!dsa_slave_dev_check(dev))
3975 return NOTIFY_DONE;
3976
3977 if (__ethtool_get_link_ksettings(dev, &s))
3978 return NOTIFY_DONE;
3979
3980 if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3981 return NOTIFY_DONE;
3982
3983 dp = dsa_port_from_netdev(dev);
3984 if (dp->index >= MTK_QDMA_TX_NUM)
3985 return NOTIFY_DONE;
3986
3987 if (mac->speed > 0 && mac->speed <= s.base.speed)
3988 s.base.speed = 0;
3989
3990 mtk_set_queue_speed(eth, dp->index, s.base.speed);
3991
3992 return NOTIFY_DONE;
3993}
3994
developerfd40db22021-04-29 10:08:25 +08003995static int mtk_open(struct net_device *dev)
3996{
3997 struct mtk_mac *mac = netdev_priv(dev);
3998 struct mtk_eth *eth = mac->hw;
developera2613e62022-07-01 18:29:37 +08003999 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
developer4e8a3fd2023-04-10 18:05:44 +08004000 u32 id = mtk_mac2xgmii_id(eth, mac->id);
developer18f46a82021-07-20 21:08:21 +08004001 int err, i;
developer3a5969e2022-02-09 15:36:36 +08004002 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08004003
4004 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
4005 if (err) {
4006 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
4007 err);
4008 return err;
4009 }
4010
4011 /* we run 2 netdevs on the same dma ring so we only bring it up once */
4012 if (!refcount_read(&eth->dma_refcnt)) {
4013 int err = mtk_start_dma(eth);
4014
4015 if (err)
4016 return err;
4017
developerfd40db22021-04-29 10:08:25 +08004018
4019 /* Indicates CDM to parse the MTK special tag from CPU */
4020 if (netdev_uses_dsa(dev)) {
4021 u32 val;
4022 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
4023 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
4024 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
4025 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
4026 }
4027
4028 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08004029 napi_enable(&eth->rx_napi[0].napi);
developera7fbeec2024-02-02 13:56:07 +08004030 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT(0));
developer18f46a82021-07-20 21:08:21 +08004031 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
4032
4033 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
developerddc36672023-10-16 15:13:58 +08004034 for (i = 0; i < MTK_RX_RSS_NUM; i++) {
4035 napi_enable(&eth->rx_napi[MTK_RSS_RING(i)].napi);
4036 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(MTK_RSS_RING(i)));
4037 }
4038 }
4039
4040 if (MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO)) {
4041 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++) {
4042 napi_enable(&eth->rx_napi[MTK_HW_LRO_RING(i)].napi);
4043 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(MTK_HW_LRO_RING(i)));
developer18f46a82021-07-20 21:08:21 +08004044 }
4045 }
4046
developerfd40db22021-04-29 10:08:25 +08004047 refcount_set(&eth->dma_refcnt, 1);
4048 }
4049 else
4050 refcount_inc(&eth->dma_refcnt);
4051
developera2613e62022-07-01 18:29:37 +08004052 if (phylink_priv->desc) {
4053 /*Notice: This programming sequence is only for GPY211 single PHY chip.
4054 If single PHY chip is not GPY211, the following step you should do:
4055 1. Contact your Single PHY chip vendor and get the details of
4056 - how to enables link status change interrupt
4057 - how to clears interrupt source
4058 */
4059
4060 // clear interrupt source for gpy211
4061 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
4062
4063 // enable link status change interrupt for gpy211
4064 _mtk_mdio_write(eth, phylink_priv->phyaddr, 0x19, 0x0001);
4065
4066 phylink_priv->dev = dev;
4067
4068 // override dev pointer for single PHY chip 0
4069 if (phylink_priv->id == 0) {
4070 struct net_device *tmp;
4071
4072 tmp = __dev_get_by_name(&init_net, phylink_priv->label);
4073 if (tmp)
4074 phylink_priv->dev = tmp;
4075 else
4076 phylink_priv->dev = NULL;
4077 }
4078 }
4079
developerfd40db22021-04-29 10:08:25 +08004080 phylink_start(mac->phylink);
developer90270572024-01-30 09:26:15 +08004081 netif_tx_start_all_queues(dev);
developer3a5969e2022-02-09 15:36:36 +08004082 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer4e8a3fd2023-04-10 18:05:44 +08004083 if (!phy_node && eth->sgmii->pcs[id].regmap)
4084 regmap_write(eth->sgmii->pcs[id].regmap,
4085 SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developer089e8852022-09-28 14:43:46 +08004086
developerdca0fde2022-12-14 11:40:35 +08004087 mtk_gdm_config(eth, mac->id, MTK_GDMA_TO_PDMA);
4088
developerfd40db22021-04-29 10:08:25 +08004089 return 0;
4090}
4091
4092static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
4093{
4094 u32 val;
4095 int i;
4096
4097 /* stop the dma engine */
4098 spin_lock_bh(&eth->page_lock);
4099 val = mtk_r32(eth, glo_cfg);
4100 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
4101 glo_cfg);
4102 spin_unlock_bh(&eth->page_lock);
4103
4104 /* wait for dma stop */
4105 for (i = 0; i < 10; i++) {
4106 val = mtk_r32(eth, glo_cfg);
4107 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer8051e042022-04-08 13:26:36 +08004108 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08004109 continue;
4110 }
4111 break;
4112 }
4113}
4114
4115static int mtk_stop(struct net_device *dev)
4116{
4117 struct mtk_mac *mac = netdev_priv(dev);
4118 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08004119 int i;
developer4e8a3fd2023-04-10 18:05:44 +08004120 u32 id = mtk_mac2xgmii_id(eth, mac->id);
developer3a5969e2022-02-09 15:36:36 +08004121 u32 val = 0;
4122 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08004123
developerdca0fde2022-12-14 11:40:35 +08004124 mtk_gdm_config(eth, mac->id, MTK_GDMA_DROP_ALL);
developerfd40db22021-04-29 10:08:25 +08004125 netif_tx_disable(dev);
4126
developer3a5969e2022-02-09 15:36:36 +08004127 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer4e8a3fd2023-04-10 18:05:44 +08004128 if (!phy_node && eth->sgmii->pcs[id].regmap) {
4129 regmap_read(eth->sgmii->pcs[id].regmap,
4130 SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer3a5969e2022-02-09 15:36:36 +08004131 val |= SGMII_PHYA_PWD;
developer4e8a3fd2023-04-10 18:05:44 +08004132 regmap_write(eth->sgmii->pcs[id].regmap,
4133 SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer3a5969e2022-02-09 15:36:36 +08004134 }
4135
4136 //GMAC RX disable
4137 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
4138 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
4139
4140 phylink_stop(mac->phylink);
4141
developerfd40db22021-04-29 10:08:25 +08004142 phylink_disconnect_phy(mac->phylink);
4143
4144 /* only shutdown DMA if this is the last user */
4145 if (!refcount_dec_and_test(&eth->dma_refcnt))
4146 return 0;
4147
developerfd40db22021-04-29 10:08:25 +08004148
developera7fbeec2024-02-02 13:56:07 +08004149 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT(0));
developer18f46a82021-07-20 21:08:21 +08004150 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08004151 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08004152 napi_disable(&eth->rx_napi[0].napi);
4153
4154 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
developerddc36672023-10-16 15:13:58 +08004155 for (i = 0; i < MTK_RX_RSS_NUM; i++) {
4156 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(MTK_RSS_RING(i)));
4157 napi_disable(&eth->rx_napi[MTK_RSS_RING(i)].napi);
4158 }
4159 }
4160
4161 if (MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO)) {
4162 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++) {
4163 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(MTK_HW_LRO_RING(i)));
4164 napi_disable(&eth->rx_napi[MTK_HW_LRO_RING(i)].napi);
developer18f46a82021-07-20 21:08:21 +08004165 }
4166 }
developerfd40db22021-04-29 10:08:25 +08004167
4168 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developer68ce74f2023-01-03 16:11:57 +08004169 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
4170 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08004171
4172 mtk_dma_free(eth);
4173
4174 return 0;
4175}
4176
developer8051e042022-04-08 13:26:36 +08004177void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerfd40db22021-04-29 10:08:25 +08004178{
developer8051e042022-04-08 13:26:36 +08004179 u32 val = 0, i = 0;
developerfd40db22021-04-29 10:08:25 +08004180
developerfd40db22021-04-29 10:08:25 +08004181 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer8051e042022-04-08 13:26:36 +08004182 reset_bits, reset_bits);
4183
4184 while (i++ < 5000) {
4185 mdelay(1);
4186 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
4187
4188 if ((val & reset_bits) == reset_bits) {
4189 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
4190 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
4191 reset_bits, ~reset_bits);
4192 break;
4193 }
4194 }
4195
developerfd40db22021-04-29 10:08:25 +08004196 mdelay(10);
4197}
4198
4199static void mtk_clk_disable(struct mtk_eth *eth)
4200{
4201 int clk;
4202
4203 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
4204 clk_disable_unprepare(eth->clks[clk]);
4205}
4206
4207static int mtk_clk_enable(struct mtk_eth *eth)
4208{
4209 int clk, ret;
4210
4211 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
4212 ret = clk_prepare_enable(eth->clks[clk]);
4213 if (ret)
4214 goto err_disable_clks;
4215 }
4216
4217 return 0;
4218
4219err_disable_clks:
4220 while (--clk >= 0)
4221 clk_disable_unprepare(eth->clks[clk]);
4222
4223 return ret;
4224}
4225
developer18f46a82021-07-20 21:08:21 +08004226static int mtk_napi_init(struct mtk_eth *eth)
4227{
4228 struct mtk_napi *rx_napi = &eth->rx_napi[0];
4229 int i;
4230
4231 rx_napi->eth = eth;
4232 rx_napi->rx_ring = &eth->rx_ring[0];
4233 rx_napi->irq_grp_no = 2;
4234
4235 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
developerddc36672023-10-16 15:13:58 +08004236 for (i = 0; i < MTK_RX_RSS_NUM; i++) {
4237 rx_napi = &eth->rx_napi[MTK_RSS_RING(i)];
developer18f46a82021-07-20 21:08:21 +08004238 rx_napi->eth = eth;
developerddc36672023-10-16 15:13:58 +08004239 rx_napi->rx_ring = &eth->rx_ring[MTK_RSS_RING(i)];
developer18f46a82021-07-20 21:08:21 +08004240 rx_napi->irq_grp_no = 2 + i;
4241 }
4242 }
4243
developerddc36672023-10-16 15:13:58 +08004244 if (MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO)) {
4245 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++) {
4246 rx_napi = &eth->rx_napi[MTK_HW_LRO_RING(i)];
4247 rx_napi->eth = eth;
4248 rx_napi->rx_ring = &eth->rx_ring[MTK_HW_LRO_RING(i)];
4249 rx_napi->irq_grp_no = 2;
4250 }
4251 }
4252
developer18f46a82021-07-20 21:08:21 +08004253 return 0;
4254}
4255
developer8051e042022-04-08 13:26:36 +08004256static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerfd40db22021-04-29 10:08:25 +08004257{
developer3f28d382023-03-07 16:06:30 +08004258 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
4259 ETHSYS_DMA_AG_MAP_PPE;
developer68ce74f2023-01-03 16:11:57 +08004260 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer8051e042022-04-08 13:26:36 +08004261 int i, ret = 0;
developerdca0fde2022-12-14 11:40:35 +08004262 u32 val;
developerfd40db22021-04-29 10:08:25 +08004263
developer8051e042022-04-08 13:26:36 +08004264 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
4265 atomic_read(&reset_lock), atomic_read(&force));
developerfd40db22021-04-29 10:08:25 +08004266
developer8051e042022-04-08 13:26:36 +08004267 if (atomic_read(&reset_lock) == 0) {
4268 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
4269 return 0;
developerfd40db22021-04-29 10:08:25 +08004270
developer8051e042022-04-08 13:26:36 +08004271 pm_runtime_enable(eth->dev);
4272 pm_runtime_get_sync(eth->dev);
4273
4274 ret = mtk_clk_enable(eth);
4275 if (ret)
4276 goto err_disable_pm;
4277 }
developerfd40db22021-04-29 10:08:25 +08004278
developer3f28d382023-03-07 16:06:30 +08004279 if (eth->ethsys)
4280 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
4281 of_dma_is_coherent(eth->dma_dev->of_node) *
4282 dma_mask);
4283
developerfd40db22021-04-29 10:08:25 +08004284 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4285 ret = device_reset(eth->dev);
4286 if (ret) {
4287 dev_err(eth->dev, "MAC reset failed!\n");
4288 goto err_disable_pm;
4289 }
4290
4291 /* enable interrupt delay for RX */
4292 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
4293
4294 /* disable delay and normal interrupt */
4295 mtk_tx_irq_disable(eth, ~0);
4296 mtk_rx_irq_disable(eth, ~0);
4297
4298 return 0;
4299 }
4300
developer8051e042022-04-08 13:26:36 +08004301 pr_info("[%s] execute fe %s reset\n", __func__,
4302 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developer545abf02021-07-15 17:47:01 +08004303
developer8051e042022-04-08 13:26:36 +08004304 if (type == MTK_TYPE_WARM_RESET)
4305 mtk_eth_warm_reset(eth);
developer545abf02021-07-15 17:47:01 +08004306 else
developer8051e042022-04-08 13:26:36 +08004307 mtk_eth_cold_reset(eth);
developer545abf02021-07-15 17:47:01 +08004308
developerc4d8da72023-03-16 14:37:28 +08004309 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4310 mtk_mdc_init(eth);
4311
developer8ecd51b2023-03-13 11:28:28 +08004312 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer545abf02021-07-15 17:47:01 +08004313 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08004314 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08004315 }
developerfd40db22021-04-29 10:08:25 +08004316
4317 if (eth->pctl) {
4318 /* Set GE2 driving and slew rate */
4319 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
4320
4321 /* set GE2 TDSEL */
4322 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
4323
4324 /* set GE2 TUNE */
4325 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
4326 }
4327
4328 /* Set linkdown as the default for each GMAC. Its own MCR would be set
4329 * up with the more appropriate value when mtk_mac_config call is being
4330 * invoked.
4331 */
4332 for (i = 0; i < MTK_MAC_COUNT; i++)
4333 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
4334
4335 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08004336 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
4337 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
4338 else
4339 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08004340
4341 /* enable interrupt delay for RX/TX */
4342 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
4343 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
developera7fbeec2024-02-02 13:56:07 +08004344 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
4345 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_TX_DELAY_INT0);
4346 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_TX_DELAY_INT1);
4347 }
developerfd40db22021-04-29 10:08:25 +08004348
4349 mtk_tx_irq_disable(eth, ~0);
4350 mtk_rx_irq_disable(eth, ~0);
4351
4352 /* FE int grouping */
developera7fbeec2024-02-02 13:56:07 +08004353 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4354 mtk_w32(eth, MTK_TX_DONE_INT(0), reg_map->qdma.int_grp);
4355 else
4356 mtk_w32(eth, MTK_TX_DONE_INT(0), reg_map->pdma.int_grp);
developer68ce74f2023-01-03 16:11:57 +08004357 mtk_w32(eth, MTK_RX_DONE_INT(0), reg_map->qdma.int_grp2);
developera7fbeec2024-02-02 13:56:07 +08004358 if (MTK_HAS_CAPS(eth->soc->caps, MTK_PDMA_INT)) {
4359 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4360 mtk_w32(eth, 0x210FFFF2, MTK_FE_INT_GRP);
4361 else
4362 mtk_w32(eth, 0xFFFF1FF2, MTK_FE_INT_GRP);
4363 } else {
developer46ed9492023-10-31 15:19:05 +08004364 mtk_w32(eth, MTK_RX_DONE_INT(0), reg_map->pdma.int_grp);
4365 mtk_w32(eth, 0x210F2FF3, MTK_FE_INT_GRP);
4366 }
developerbe971722022-05-23 13:51:05 +08004367 mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
developer8051e042022-04-08 13:26:36 +08004368 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
4369 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerfd40db22021-04-29 10:08:25 +08004370
developer089e8852022-09-28 14:43:46 +08004371 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer0fef5222023-04-26 14:48:31 +08004372 /* PSE dummy page mechanism */
4373 if (eth->soc->caps != MT7988_CAPS || eth->hwver != MTK_HWID_V1)
4374 mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) |
4375 PSE_DUMMY_WORK_GDM(2) | PSE_DUMMY_WORK_GDM(3) |
4376 DUMMY_PAGE_THR, PSE_DUMY_REQ);
4377
developer089e8852022-09-28 14:43:46 +08004378 /* PSE should not drop port1, port8 and port9 packets */
4379 mtk_w32(eth, 0x00000302, PSE_NO_DROP_CFG);
4380
developer15f760a2022-10-12 15:57:21 +08004381 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
4382 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
4383
developer84d1e832022-11-24 11:25:05 +08004384 /* PSE free buffer drop threshold */
4385 mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
4386
developer089e8852022-09-28 14:43:46 +08004387 /* GDM and CDM Threshold */
4388 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
4389 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
4390
developerdca0fde2022-12-14 11:40:35 +08004391 /* Disable GDM1 RX CRC stripping */
4392 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(0));
4393 val &= ~MTK_GDMA_STRP_CRC;
4394 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(0));
4395
developer089e8852022-09-28 14:43:46 +08004396 /* PSE GDM3 MIB counter has incorrect hw default values,
4397 * so the driver ought to read clear the values beforehand
4398 * in case ethtool retrieve wrong mib values.
4399 */
4400 for (i = 0; i < MTK_STAT_OFFSET; i += 0x4)
4401 mtk_r32(eth,
4402 MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * 2 + i);
4403 } else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08004404 /* PSE Free Queue Flow Control */
4405 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
4406
developer459b78e2022-07-01 17:25:10 +08004407 /* PSE should not drop port8 and port9 packets from WDMA Tx */
4408 mtk_w32(eth, 0x00000300, PSE_NO_DROP_CFG);
4409
4410 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
4411 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
developer81bcad32021-07-15 14:14:38 +08004412
developerfef9efd2021-06-16 18:28:09 +08004413 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08004414 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
4415 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
4416 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
4417 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
4418 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
4419 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
4420 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
developerfd5f9152022-01-05 16:29:42 +08004421 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
developerfd40db22021-04-29 10:08:25 +08004422
developerfef9efd2021-06-16 18:28:09 +08004423 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08004424 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
4425 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
4426 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
4427 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
4428 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
4429 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
4430 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
4431 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08004432
4433 /* GDM and CDM Threshold */
4434 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4435 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4436 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4437 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4438 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4439 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08004440 }
4441
4442 return 0;
4443
4444err_disable_pm:
4445 pm_runtime_put_sync(eth->dev);
4446 pm_runtime_disable(eth->dev);
4447
4448 return ret;
4449}
4450
4451static int mtk_hw_deinit(struct mtk_eth *eth)
4452{
4453 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
4454 return 0;
4455
4456 mtk_clk_disable(eth);
4457
4458 pm_runtime_put_sync(eth->dev);
4459 pm_runtime_disable(eth->dev);
4460
4461 return 0;
4462}
4463
4464static int __init mtk_init(struct net_device *dev)
4465{
4466 struct mtk_mac *mac = netdev_priv(dev);
4467 struct mtk_eth *eth = mac->hw;
4468 const char *mac_addr;
4469
4470 mac_addr = of_get_mac_address(mac->of_node);
4471 if (!IS_ERR(mac_addr))
4472 ether_addr_copy(dev->dev_addr, mac_addr);
4473
4474 /* If the mac address is invalid, use random mac address */
4475 if (!is_valid_ether_addr(dev->dev_addr)) {
4476 eth_hw_addr_random(dev);
4477 dev_err(eth->dev, "generated random MAC address %pM\n",
4478 dev->dev_addr);
4479 }
4480
4481 return 0;
4482}
4483
4484static void mtk_uninit(struct net_device *dev)
4485{
4486 struct mtk_mac *mac = netdev_priv(dev);
4487 struct mtk_eth *eth = mac->hw;
4488
4489 phylink_disconnect_phy(mac->phylink);
4490 mtk_tx_irq_disable(eth, ~0);
4491 mtk_rx_irq_disable(eth, ~0);
4492}
4493
4494static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4495{
4496 struct mtk_mac *mac = netdev_priv(dev);
4497
4498 switch (cmd) {
4499 case SIOCGMIIPHY:
4500 case SIOCGMIIREG:
4501 case SIOCSMIIREG:
4502 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4503 default:
4504 /* default invoke the mtk_eth_dbg handler */
4505 return mtk_do_priv_ioctl(dev, ifr, cmd);
4506 break;
4507 }
4508
4509 return -EOPNOTSUPP;
4510}
4511
developer37482a42022-12-26 13:31:13 +08004512int mtk_phy_config(struct mtk_eth *eth, int enable)
4513{
4514 struct device_node *mii_np = NULL;
4515 struct device_node *child = NULL;
4516 int addr = 0;
4517 u32 val = 0;
4518
4519 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
4520 if (!mii_np) {
4521 dev_err(eth->dev, "no %s child node found", "mdio-bus");
4522 return -ENODEV;
4523 }
4524
4525 if (!of_device_is_available(mii_np)) {
4526 dev_err(eth->dev, "device is not available\n");
4527 return -ENODEV;
4528 }
4529
4530 for_each_available_child_of_node(mii_np, child) {
4531 addr = of_mdio_parse_addr(&eth->mii_bus->dev, child);
4532 if (addr < 0)
4533 continue;
4534 pr_info("%s %d addr:%d name:%s\n",
4535 __func__, __LINE__, addr, child->name);
4536 val = _mtk_mdio_read(eth, addr, mdiobus_c45_addr(0x1e, 0));
4537 if (enable)
4538 val &= ~BMCR_PDOWN;
4539 else
4540 val |= BMCR_PDOWN;
4541 _mtk_mdio_write(eth, addr, mdiobus_c45_addr(0x1e, 0), val);
4542 }
4543
4544 return 0;
4545}
4546
developerfd40db22021-04-29 10:08:25 +08004547static void mtk_pending_work(struct work_struct *work)
4548{
4549 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer8051e042022-04-08 13:26:36 +08004550 int err, i = 0;
developerfd40db22021-04-29 10:08:25 +08004551 unsigned long restart = 0;
developer8051e042022-04-08 13:26:36 +08004552 u32 val = 0;
4553
4554 atomic_inc(&reset_lock);
4555 val = mtk_r32(eth, MTK_FE_INT_STATUS);
4556 if (!mtk_check_reset_event(eth, val)) {
4557 atomic_dec(&reset_lock);
4558 pr_info("[%s] No need to do FE reset !\n", __func__);
4559 return;
4560 }
developerfd40db22021-04-29 10:08:25 +08004561
4562 rtnl_lock();
4563
developer37482a42022-12-26 13:31:13 +08004564 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
4565 cpu_relax();
4566
4567 mtk_phy_config(eth, 0);
developer8051e042022-04-08 13:26:36 +08004568
4569 /* Adjust PPE configurations to prepare for reset */
4570 mtk_prepare_reset_ppe(eth, 0);
4571 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4572 mtk_prepare_reset_ppe(eth, 1);
4573
4574 /* Adjust FE configurations to prepare for reset */
4575 mtk_prepare_reset_fe(eth);
4576
4577 /* Trigger Wifi SER reset */
developer6bb3f3a2022-11-22 09:59:14 +08004578 for (i = 0; i < MTK_MAC_COUNT; i++) {
4579 if (!eth->netdev[i])
4580 continue;
developer37482a42022-12-26 13:31:13 +08004581 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
4582 pr_info("send MTK_FE_STOP_TRAFFIC event\n");
4583 call_netdevice_notifiers(MTK_FE_STOP_TRAFFIC,
4584 eth->netdev[i]);
4585 } else {
4586 pr_info("send MTK_FE_START_RESET event\n");
4587 call_netdevice_notifiers(MTK_FE_START_RESET,
4588 eth->netdev[i]);
4589 }
developer6bb3f3a2022-11-22 09:59:14 +08004590 rtnl_unlock();
developer7979ddb2023-04-24 17:19:21 +08004591 if (!wait_for_completion_timeout(&wait_ser_done, 3000)) {
4592 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
4593 (mtk_stop_fail)) {
4594 pr_info("send MTK_FE_START_RESET stop\n");
4595 rtnl_lock();
4596 call_netdevice_notifiers(MTK_FE_START_RESET,
4597 eth->netdev[i]);
4598 rtnl_unlock();
4599 if (!wait_for_completion_timeout(&wait_ser_done,
4600 3000))
4601 pr_warn("wait for MTK_FE_START_RESET\n");
4602 }
developer0baa6962023-01-31 14:25:23 +08004603 pr_warn("wait for MTK_FE_START_RESET\n");
developer7979ddb2023-04-24 17:19:21 +08004604 }
developer6bb3f3a2022-11-22 09:59:14 +08004605 rtnl_lock();
4606 break;
4607 }
developerfd40db22021-04-29 10:08:25 +08004608
developer8051e042022-04-08 13:26:36 +08004609 del_timer_sync(&eth->mtk_dma_monitor_timer);
4610 pr_info("[%s] mtk_stop starts !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08004611 /* stop all devices to make sure that dma is properly shut down */
4612 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer59305712023-12-01 09:08:07 +08004613 if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
developerfd40db22021-04-29 10:08:25 +08004614 continue;
4615 mtk_stop(eth->netdev[i]);
4616 __set_bit(i, &restart);
4617 }
developer8051e042022-04-08 13:26:36 +08004618 pr_info("[%s] mtk_stop ends !\n", __func__);
4619 mdelay(15);
developerfd40db22021-04-29 10:08:25 +08004620
4621 if (eth->dev->pins)
4622 pinctrl_select_state(eth->dev->pins->p,
4623 eth->dev->pins->default_state);
developer8051e042022-04-08 13:26:36 +08004624
4625 pr_info("[%s] mtk_hw_init starts !\n", __func__);
4626 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
4627 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08004628
4629 /* restart DMA and enable IRQs */
4630 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer6bb3f3a2022-11-22 09:59:14 +08004631 if (!test_bit(i, &restart) || !eth->netdev[i])
developerfd40db22021-04-29 10:08:25 +08004632 continue;
4633 err = mtk_open(eth->netdev[i]);
4634 if (err) {
4635 netif_alert(eth, ifup, eth->netdev[i],
4636 "Driver up/down cycle failed, closing device.\n");
4637 dev_close(eth->netdev[i]);
4638 }
4639 }
4640
developer8051e042022-04-08 13:26:36 +08004641 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer6bb3f3a2022-11-22 09:59:14 +08004642 if (!eth->netdev[i])
4643 continue;
developer37482a42022-12-26 13:31:13 +08004644 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
4645 pr_info("send MTK_FE_START_TRAFFIC event\n");
4646 call_netdevice_notifiers(MTK_FE_START_TRAFFIC,
4647 eth->netdev[i]);
4648 } else {
4649 pr_info("send MTK_FE_RESET_DONE event\n");
4650 call_netdevice_notifiers(MTK_FE_RESET_DONE,
4651 eth->netdev[i]);
developer8051e042022-04-08 13:26:36 +08004652 }
developer37482a42022-12-26 13:31:13 +08004653 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE,
4654 eth->netdev[i]);
developer6bb3f3a2022-11-22 09:59:14 +08004655 break;
4656 }
developer8051e042022-04-08 13:26:36 +08004657
4658 atomic_dec(&reset_lock);
developer8051e042022-04-08 13:26:36 +08004659
4660 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
4661 eth->mtk_dma_monitor_timer.expires = jiffies;
4662 add_timer(&eth->mtk_dma_monitor_timer);
developer37482a42022-12-26 13:31:13 +08004663
4664 mtk_phy_config(eth, 1);
4665 mtk_reset_flag = 0;
developerfd40db22021-04-29 10:08:25 +08004666 clear_bit_unlock(MTK_RESETTING, &eth->state);
4667
4668 rtnl_unlock();
4669}
4670
4671static int mtk_free_dev(struct mtk_eth *eth)
4672{
4673 int i;
4674
4675 for (i = 0; i < MTK_MAC_COUNT; i++) {
4676 if (!eth->netdev[i])
4677 continue;
4678 free_netdev(eth->netdev[i]);
4679 }
4680
4681 return 0;
4682}
4683
4684static int mtk_unreg_dev(struct mtk_eth *eth)
4685{
4686 int i;
4687
4688 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer90270572024-01-30 09:26:15 +08004689 struct mtk_mac *mac;
developerfd40db22021-04-29 10:08:25 +08004690 if (!eth->netdev[i])
4691 continue;
developer90270572024-01-30 09:26:15 +08004692 mac = netdev_priv(eth->netdev[i]);
4693 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4694 unregister_netdevice_notifier(&mac->device_notifier);
developerfd40db22021-04-29 10:08:25 +08004695 unregister_netdev(eth->netdev[i]);
4696 }
4697
4698 return 0;
4699}
4700
4701static int mtk_cleanup(struct mtk_eth *eth)
4702{
4703 mtk_unreg_dev(eth);
4704 mtk_free_dev(eth);
4705 cancel_work_sync(&eth->pending_work);
4706
4707 return 0;
4708}
4709
4710static int mtk_get_link_ksettings(struct net_device *ndev,
4711 struct ethtool_link_ksettings *cmd)
4712{
4713 struct mtk_mac *mac = netdev_priv(ndev);
4714
4715 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4716 return -EBUSY;
4717
4718 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4719}
4720
4721static int mtk_set_link_ksettings(struct net_device *ndev,
4722 const struct ethtool_link_ksettings *cmd)
4723{
4724 struct mtk_mac *mac = netdev_priv(ndev);
4725
4726 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4727 return -EBUSY;
4728
4729 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4730}
4731
4732static void mtk_get_drvinfo(struct net_device *dev,
4733 struct ethtool_drvinfo *info)
4734{
4735 struct mtk_mac *mac = netdev_priv(dev);
4736
4737 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4738 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4739 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4740}
4741
4742static u32 mtk_get_msglevel(struct net_device *dev)
4743{
4744 struct mtk_mac *mac = netdev_priv(dev);
4745
4746 return mac->hw->msg_enable;
4747}
4748
4749static void mtk_set_msglevel(struct net_device *dev, u32 value)
4750{
4751 struct mtk_mac *mac = netdev_priv(dev);
4752
4753 mac->hw->msg_enable = value;
4754}
4755
4756static int mtk_nway_reset(struct net_device *dev)
4757{
4758 struct mtk_mac *mac = netdev_priv(dev);
4759
4760 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4761 return -EBUSY;
4762
4763 if (!mac->phylink)
4764 return -ENOTSUPP;
4765
4766 return phylink_ethtool_nway_reset(mac->phylink);
4767}
4768
4769static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4770{
4771 int i;
4772
4773 switch (stringset) {
4774 case ETH_SS_STATS:
4775 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4776 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4777 data += ETH_GSTRING_LEN;
4778 }
4779 break;
4780 }
4781}
4782
4783static int mtk_get_sset_count(struct net_device *dev, int sset)
4784{
4785 switch (sset) {
4786 case ETH_SS_STATS:
4787 return ARRAY_SIZE(mtk_ethtool_stats);
4788 default:
4789 return -EOPNOTSUPP;
4790 }
4791}
4792
4793static void mtk_get_ethtool_stats(struct net_device *dev,
4794 struct ethtool_stats *stats, u64 *data)
4795{
4796 struct mtk_mac *mac = netdev_priv(dev);
4797 struct mtk_hw_stats *hwstats = mac->hw_stats;
4798 u64 *data_src, *data_dst;
4799 unsigned int start;
4800 int i;
4801
4802 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4803 return;
4804
4805 if (netif_running(dev) && netif_device_present(dev)) {
4806 if (spin_trylock_bh(&hwstats->stats_lock)) {
4807 mtk_stats_update_mac(mac);
4808 spin_unlock_bh(&hwstats->stats_lock);
4809 }
4810 }
4811
4812 data_src = (u64 *)hwstats;
4813
4814 do {
4815 data_dst = data;
4816 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
4817
4818 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4819 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4820 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
4821}
4822
4823static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4824 u32 *rule_locs)
4825{
developerea49c302023-06-27 16:06:41 +08004826 struct mtk_mac *mac = netdev_priv(dev);
4827 struct mtk_eth *eth = mac->hw;
developerfd40db22021-04-29 10:08:25 +08004828 int ret = -EOPNOTSUPP;
4829
4830 switch (cmd->cmd) {
4831 case ETHTOOL_GRXRINGS:
4832 if (dev->hw_features & NETIF_F_LRO) {
4833 cmd->data = MTK_MAX_RX_RING_NUM;
4834 ret = 0;
developerea49c302023-06-27 16:06:41 +08004835 } else if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4836 cmd->data = eth->soc->rss_num;
4837 ret = 0;
developerfd40db22021-04-29 10:08:25 +08004838 }
4839 break;
4840 case ETHTOOL_GRXCLSRLCNT:
4841 if (dev->hw_features & NETIF_F_LRO) {
developerfd40db22021-04-29 10:08:25 +08004842 cmd->rule_cnt = mac->hwlro_ip_cnt;
4843 ret = 0;
4844 }
4845 break;
4846 case ETHTOOL_GRXCLSRULE:
4847 if (dev->hw_features & NETIF_F_LRO)
4848 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4849 break;
4850 case ETHTOOL_GRXCLSRLALL:
4851 if (dev->hw_features & NETIF_F_LRO)
4852 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4853 rule_locs);
4854 break;
4855 default:
4856 break;
4857 }
4858
4859 return ret;
4860}
4861
4862static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4863{
4864 int ret = -EOPNOTSUPP;
4865
4866 switch (cmd->cmd) {
4867 case ETHTOOL_SRXCLSRLINS:
4868 if (dev->hw_features & NETIF_F_LRO)
4869 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4870 break;
4871 case ETHTOOL_SRXCLSRLDEL:
4872 if (dev->hw_features & NETIF_F_LRO)
4873 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4874 break;
4875 default:
4876 break;
4877 }
4878
4879 return ret;
4880}
4881
developerea49c302023-06-27 16:06:41 +08004882static u32 mtk_get_rxfh_key_size(struct net_device *dev)
4883{
4884 return MTK_RSS_HASH_KEYSIZE;
4885}
4886
4887static u32 mtk_get_rxfh_indir_size(struct net_device *dev)
4888{
4889 return MTK_RSS_MAX_INDIRECTION_TABLE;
4890}
4891
4892static int mtk_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4893 u8 *hfunc)
4894{
4895 struct mtk_mac *mac = netdev_priv(dev);
4896 struct mtk_eth *eth = mac->hw;
4897 struct mtk_rss_params *rss_params = &eth->rss_params;
4898 int i;
4899
4900 if (hfunc)
4901 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
4902
4903 if (key) {
4904 memcpy(key, rss_params->hash_key,
4905 sizeof(rss_params->hash_key));
4906 }
4907
4908 if (indir) {
4909 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
4910 indir[i] = rss_params->indirection_table[i];
4911 }
4912
4913 return 0;
4914}
4915
4916static int mtk_set_rxfh(struct net_device *dev, const u32 *indir,
4917 const u8 *key, const u8 hfunc)
4918{
4919 struct mtk_mac *mac = netdev_priv(dev);
4920 struct mtk_eth *eth = mac->hw;
4921 struct mtk_rss_params *rss_params = &eth->rss_params;
4922 int i;
4923
4924 if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
4925 hfunc != ETH_RSS_HASH_TOP)
4926 return -EOPNOTSUPP;
4927
4928 if (key) {
4929 memcpy(rss_params->hash_key, key,
4930 sizeof(rss_params->hash_key));
4931
4932 for (i = 0; i < MTK_RSS_HASH_KEYSIZE / sizeof(u32); i++)
4933 mtk_w32(eth, rss_params->hash_key[i],
4934 MTK_RSS_HASH_KEY_DW(i));
4935 }
4936
4937 if (indir) {
4938 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
4939 rss_params->indirection_table[i] = indir[i];
4940
4941 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE / 16; i++)
4942 mtk_w32(eth, mtk_rss_indr_table(rss_params, i),
4943 MTK_RSS_INDR_TABLE_DW(i));
4944 }
4945
4946 return 0;
4947}
4948
developer6c5cbb52022-08-12 11:37:45 +08004949static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4950{
4951 struct mtk_mac *mac = netdev_priv(dev);
developerf2823bb2022-12-29 18:20:14 +08004952 struct mtk_eth *eth = mac->hw;
4953 u32 val;
4954
4955 pause->autoneg = 0;
4956
4957 if (mac->type == MTK_GDM_TYPE) {
4958 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
4959
4960 pause->rx_pause = !!(val & MAC_MCR_FORCE_RX_FC);
4961 pause->tx_pause = !!(val & MAC_MCR_FORCE_TX_FC);
4962 } else if (mac->type == MTK_XGDM_TYPE) {
4963 val = mtk_r32(eth, MTK_XMAC_MCR(mac->id));
developer6c5cbb52022-08-12 11:37:45 +08004964
developerf2823bb2022-12-29 18:20:14 +08004965 pause->rx_pause = !!(val & XMAC_MCR_FORCE_RX_FC);
4966 pause->tx_pause = !!(val & XMAC_MCR_FORCE_TX_FC);
4967 }
developer6c5cbb52022-08-12 11:37:45 +08004968}
4969
4970static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4971{
4972 struct mtk_mac *mac = netdev_priv(dev);
4973
4974 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4975}
4976
developer9b725932022-11-24 16:25:56 +08004977static int mtk_get_eee(struct net_device *dev, struct ethtool_eee *eee)
4978{
4979 struct mtk_mac *mac = netdev_priv(dev);
4980 struct mtk_eth *eth = mac->hw;
4981 u32 val;
4982
4983 if (mac->type == MTK_GDM_TYPE) {
4984 val = mtk_r32(eth, MTK_MAC_EEE(mac->id));
4985
4986 eee->tx_lpi_enabled = mac->tx_lpi_enabled;
4987 eee->tx_lpi_timer = FIELD_GET(MAC_EEE_LPI_TXIDLE_THD, val);
4988 }
4989
4990 return phylink_ethtool_get_eee(mac->phylink, eee);
4991}
4992
4993static int mtk_set_eee(struct net_device *dev, struct ethtool_eee *eee)
4994{
4995 struct mtk_mac *mac = netdev_priv(dev);
developer9b725932022-11-24 16:25:56 +08004996
4997 if (mac->type == MTK_GDM_TYPE) {
4998 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
4999 return -EINVAL;
5000
5001 mac->tx_lpi_timer = eee->tx_lpi_timer;
5002
5003 mtk_setup_eee(mac, eee->eee_enabled && eee->tx_lpi_timer);
5004 }
5005
5006 return phylink_ethtool_set_eee(mac->phylink, eee);
5007}
5008
developer90270572024-01-30 09:26:15 +08005009static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
5010 struct net_device *sb_dev)
5011{
5012 struct mtk_mac *mac = netdev_priv(dev);
5013 struct mtk_eth *eth = mac->hw;
5014 unsigned int queue = 0;
5015
5016 if (skb->mark > 0 && skb->mark < MTK_QDMA_TX_NUM)
5017 return skb->mark;
5018
5019 if (eth->pppq_toggle) {
5020 switch (mac->id) {
5021 case MTK_GMAC1_ID:
5022 queue = skb_get_queue_mapping(skb);
5023 break;
5024 case MTK_GMAC2_ID:
5025 queue = MTK_QDMA_GMAC2_QID;
5026 break;
5027 case MTK_GMAC3_ID:
5028 queue = MTK_QDMA_GMAC3_QID;
5029 break;
5030 default:
5031 pr_info("%s mac id invalid", __func__);
5032 break;
5033 }
5034 } else
5035 queue = mac->id ? MTK_QDMA_GMAC2_QID : 0;
5036
5037 if (queue >= MTK_QDMA_TX_NUM)
5038 queue = 0;
5039
5040 return queue;
5041}
5042
developerfd40db22021-04-29 10:08:25 +08005043static const struct ethtool_ops mtk_ethtool_ops = {
5044 .get_link_ksettings = mtk_get_link_ksettings,
5045 .set_link_ksettings = mtk_set_link_ksettings,
5046 .get_drvinfo = mtk_get_drvinfo,
5047 .get_msglevel = mtk_get_msglevel,
5048 .set_msglevel = mtk_set_msglevel,
5049 .nway_reset = mtk_nway_reset,
5050 .get_link = ethtool_op_get_link,
5051 .get_strings = mtk_get_strings,
5052 .get_sset_count = mtk_get_sset_count,
5053 .get_ethtool_stats = mtk_get_ethtool_stats,
5054 .get_rxnfc = mtk_get_rxnfc,
5055 .set_rxnfc = mtk_set_rxnfc,
developerea49c302023-06-27 16:06:41 +08005056 .get_rxfh_key_size = mtk_get_rxfh_key_size,
5057 .get_rxfh_indir_size = mtk_get_rxfh_indir_size,
5058 .get_rxfh = mtk_get_rxfh,
5059 .set_rxfh = mtk_set_rxfh,
developer6c5cbb52022-08-12 11:37:45 +08005060 .get_pauseparam = mtk_get_pauseparam,
5061 .set_pauseparam = mtk_set_pauseparam,
developer9b725932022-11-24 16:25:56 +08005062 .get_eee = mtk_get_eee,
5063 .set_eee = mtk_set_eee,
developerfd40db22021-04-29 10:08:25 +08005064};
5065
5066static const struct net_device_ops mtk_netdev_ops = {
5067 .ndo_init = mtk_init,
5068 .ndo_uninit = mtk_uninit,
5069 .ndo_open = mtk_open,
5070 .ndo_stop = mtk_stop,
5071 .ndo_start_xmit = mtk_start_xmit,
developer90270572024-01-30 09:26:15 +08005072 .ndo_select_queue = mtk_select_queue,
developerfd40db22021-04-29 10:08:25 +08005073 .ndo_set_mac_address = mtk_set_mac_address,
5074 .ndo_validate_addr = eth_validate_addr,
5075 .ndo_do_ioctl = mtk_do_ioctl,
5076 .ndo_tx_timeout = mtk_tx_timeout,
5077 .ndo_get_stats64 = mtk_get_stats64,
5078 .ndo_fix_features = mtk_fix_features,
5079 .ndo_set_features = mtk_set_features,
5080#ifdef CONFIG_NET_POLL_CONTROLLER
5081 .ndo_poll_controller = mtk_poll_controller,
5082#endif
5083};
5084
developerb6c36bf2023-09-07 12:05:01 +08005085static void mux_poll(struct work_struct *work)
5086{
5087 struct mtk_mux *mux = container_of(work, struct mtk_mux, poll.work);
5088 struct mtk_mac *mac = mux->mac;
5089 struct mtk_eth *eth = mac->hw;
5090 struct net_device *dev = eth->netdev[mac->id];
5091 unsigned int channel;
5092
5093 if (IS_ERR(mux->gpio[0]) || IS_ERR(mux->gpio[1]))
5094 goto exit;
5095
5096 channel = gpiod_get_value_cansleep(mux->gpio[0]);
5097 if (mux->channel == channel || !netif_running(dev))
5098 goto exit;
5099
5100 rtnl_lock();
5101
5102 mtk_stop(dev);
5103
5104 if (channel == 0 || channel == 1) {
5105 mac->of_node = mux->data[channel]->of_node;
5106 mac->phylink = mux->data[channel]->phylink;
5107 };
5108
5109 dev_info(eth->dev, "ethernet mux: switch to channel%d\n", channel);
5110
5111 gpiod_set_value_cansleep(mux->gpio[1], channel);
5112
5113 mtk_open(dev);
5114
5115 rtnl_unlock();
5116
5117 mux->channel = channel;
5118
5119exit:
5120 mod_delayed_work(system_wq, &mux->poll, msecs_to_jiffies(100));
5121}
5122
5123static int mtk_add_mux_channel(struct mtk_mux *mux, struct device_node *np)
5124{
5125 const __be32 *_id = of_get_property(np, "reg", NULL);
5126 struct mtk_mac *mac = mux->mac;
5127 struct mtk_eth *eth = mac->hw;
5128 struct mtk_mux_data *data;
5129 struct phylink *phylink;
5130 int phy_mode, id;
5131
5132 if (!_id) {
5133 dev_err(eth->dev, "missing mux channel id\n");
5134 return -EINVAL;
5135 }
5136
5137 id = be32_to_cpup(_id);
5138 if (id < 0 || id > 1) {
5139 dev_err(eth->dev, "%d is not a valid mux channel id\n", id);
5140 return -EINVAL;
5141 }
5142
5143 data = kmalloc(sizeof(*data), GFP_KERNEL);
5144 if (unlikely(!data)) {
5145 dev_err(eth->dev, "failed to create mux data structure\n");
5146 return -ENOMEM;
5147 }
5148
5149 mux->data[id] = data;
5150
5151 /* phylink create */
5152 phy_mode = of_get_phy_mode(np);
5153 if (phy_mode < 0) {
5154 dev_err(eth->dev, "incorrect phy-mode\n");
5155 return -EINVAL;
5156 }
5157
5158 phylink = phylink_create(&mux->mac->phylink_config,
5159 of_fwnode_handle(np),
5160 phy_mode, &mtk_phylink_ops);
5161 if (IS_ERR(phylink)) {
5162 dev_err(eth->dev, "failed to create phylink structure\n");
5163 return PTR_ERR(phylink);
5164 }
5165
5166 data->of_node = np;
5167 data->phylink = phylink;
5168
5169 return 0;
5170}
5171
5172static int mtk_add_mux(struct mtk_eth *eth, struct device_node *np)
5173{
5174 const __be32 *_id = of_get_property(np, "reg", NULL);
5175 struct device_node *child;
5176 struct mtk_mux *mux;
developere48f1362023-09-27 15:03:04 +08005177 int id, err;
developerb6c36bf2023-09-07 12:05:01 +08005178
5179 if (!_id) {
5180 dev_err(eth->dev, "missing attach mac id\n");
5181 return -EINVAL;
5182 }
5183
5184 id = be32_to_cpup(_id);
5185 if (id < 0 || id >= MTK_MAX_DEVS) {
5186 dev_err(eth->dev, "%d is not a valid attach mac id\n", id);
5187 return -EINVAL;
5188 }
5189
5190 mux = kmalloc(sizeof(struct mtk_mux), GFP_KERNEL);
5191 if (unlikely(!mux)) {
5192 dev_err(eth->dev, "failed to create mux structure\n");
5193 return -ENOMEM;
5194 }
5195
5196 eth->mux[id] = mux;
5197
5198 mux->mac = eth->mac[id];
5199 mux->channel = 0;
5200
5201 mux->gpio[0] = fwnode_get_named_gpiod(of_fwnode_handle(np),
5202 "mod-def0-gpios", 0,
5203 GPIOD_IN, "?");
5204 if (IS_ERR(mux->gpio[0]))
5205 dev_err(eth->dev, "failed to requset gpio for mod-def0-gpios\n");
5206
5207 mux->gpio[1] = fwnode_get_named_gpiod(of_fwnode_handle(np),
5208 "chan-sel-gpios", 0,
5209 GPIOD_OUT_LOW, "?");
5210 if (IS_ERR(mux->gpio[1]))
5211 dev_err(eth->dev, "failed to requset gpio for chan-sel-gpios\n");
5212
5213 for_each_child_of_node(np, child) {
5214 err = mtk_add_mux_channel(mux, child);
5215 if (err) {
5216 dev_err(eth->dev, "failed to add mtk_mux\n");
5217 of_node_put(child);
5218 return -ECHILD;
5219 }
5220 of_node_put(child);
5221 }
5222
5223 INIT_DELAYED_WORK(&mux->poll, mux_poll);
5224 mod_delayed_work(system_wq, &mux->poll, msecs_to_jiffies(3000));
5225
5226 return 0;
5227}
5228
developerfd40db22021-04-29 10:08:25 +08005229static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
5230{
5231 const __be32 *_id = of_get_property(np, "reg", NULL);
developer30e13e72022-11-03 10:21:24 +08005232 const char *label;
developerfd40db22021-04-29 10:08:25 +08005233 struct phylink *phylink;
developer30e13e72022-11-03 10:21:24 +08005234 int mac_type, phy_mode, id, err;
developerfd40db22021-04-29 10:08:25 +08005235 struct mtk_mac *mac;
developera2613e62022-07-01 18:29:37 +08005236 struct mtk_phylink_priv *phylink_priv;
5237 struct fwnode_handle *fixed_node;
5238 struct gpio_desc *desc;
developer90270572024-01-30 09:26:15 +08005239 int txqs = 1;
developerfd40db22021-04-29 10:08:25 +08005240
5241 if (!_id) {
5242 dev_err(eth->dev, "missing mac id\n");
5243 return -EINVAL;
5244 }
5245
5246 id = be32_to_cpup(_id);
developerfb556ca2021-10-13 10:52:09 +08005247 if (id < 0 || id >= MTK_MAC_COUNT) {
developerfd40db22021-04-29 10:08:25 +08005248 dev_err(eth->dev, "%d is not a valid mac id\n", id);
5249 return -EINVAL;
5250 }
5251
5252 if (eth->netdev[id]) {
5253 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
5254 return -EINVAL;
5255 }
5256
developer90270572024-01-30 09:26:15 +08005257 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
5258 txqs = MTK_QDMA_TX_NUM;
5259
5260 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
developerfd40db22021-04-29 10:08:25 +08005261 if (!eth->netdev[id]) {
5262 dev_err(eth->dev, "alloc_etherdev failed\n");
5263 return -ENOMEM;
5264 }
5265 mac = netdev_priv(eth->netdev[id]);
5266 eth->mac[id] = mac;
5267 mac->id = id;
5268 mac->hw = eth;
5269 mac->of_node = np;
5270
5271 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
5272 mac->hwlro_ip_cnt = 0;
5273
5274 mac->hw_stats = devm_kzalloc(eth->dev,
5275 sizeof(*mac->hw_stats),
5276 GFP_KERNEL);
5277 if (!mac->hw_stats) {
5278 dev_err(eth->dev, "failed to allocate counter memory\n");
5279 err = -ENOMEM;
5280 goto free_netdev;
5281 }
5282 spin_lock_init(&mac->hw_stats->stats_lock);
5283 u64_stats_init(&mac->hw_stats->syncp);
5284 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
5285
5286 /* phylink create */
5287 phy_mode = of_get_phy_mode(np);
5288 if (phy_mode < 0) {
5289 dev_err(eth->dev, "incorrect phy-mode\n");
5290 err = -EINVAL;
5291 goto free_netdev;
5292 }
5293
5294 /* mac config is not set */
5295 mac->interface = PHY_INTERFACE_MODE_NA;
5296 mac->mode = MLO_AN_PHY;
5297 mac->speed = SPEED_UNKNOWN;
5298
developer9b725932022-11-24 16:25:56 +08005299 mac->tx_lpi_timer = 1;
5300
developerfd40db22021-04-29 10:08:25 +08005301 mac->phylink_config.dev = &eth->netdev[id]->dev;
5302 mac->phylink_config.type = PHYLINK_NETDEV;
5303
developer30e13e72022-11-03 10:21:24 +08005304 mac->type = 0;
5305 if (!of_property_read_string(np, "mac-type", &label)) {
5306 for (mac_type = 0; mac_type < MTK_GDM_TYPE_MAX; mac_type++) {
5307 if (!strcasecmp(label, gdm_type(mac_type)))
5308 break;
5309 }
5310
5311 switch (mac_type) {
5312 case 0:
5313 mac->type = MTK_GDM_TYPE;
5314 break;
5315 case 1:
5316 mac->type = MTK_XGDM_TYPE;
5317 break;
5318 default:
5319 dev_warn(eth->dev, "incorrect mac-type\n");
5320 break;
5321 };
5322 }
developer089e8852022-09-28 14:43:46 +08005323
developerfd40db22021-04-29 10:08:25 +08005324 phylink = phylink_create(&mac->phylink_config,
5325 of_fwnode_handle(mac->of_node),
5326 phy_mode, &mtk_phylink_ops);
5327 if (IS_ERR(phylink)) {
5328 err = PTR_ERR(phylink);
5329 goto free_netdev;
5330 }
5331
5332 mac->phylink = phylink;
5333
developera2613e62022-07-01 18:29:37 +08005334 fixed_node = fwnode_get_named_child_node(of_fwnode_handle(mac->of_node),
5335 "fixed-link");
5336 if (fixed_node) {
5337 desc = fwnode_get_named_gpiod(fixed_node, "link-gpio",
5338 0, GPIOD_IN, "?");
5339 if (!IS_ERR(desc)) {
5340 struct device_node *phy_np;
5341 const char *label;
5342 int irq, phyaddr;
5343
5344 phylink_priv = &mac->phylink_priv;
5345
5346 phylink_priv->desc = desc;
5347 phylink_priv->id = id;
5348 phylink_priv->link = -1;
5349
5350 irq = gpiod_to_irq(desc);
5351 if (irq > 0) {
5352 devm_request_irq(eth->dev, irq, mtk_handle_irq_fixed_link,
5353 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
5354 "ethernet:fixed link", mac);
5355 }
5356
developer8b6f2402022-11-28 13:42:34 +08005357 if (!of_property_read_string(to_of_node(fixed_node),
5358 "label", &label)) {
developer659fdeb2022-12-01 23:03:07 +08005359 if (strlen(label) < 16) {
5360 strncpy(phylink_priv->label, label,
5361 strlen(label));
5362 } else
developer8b6f2402022-11-28 13:42:34 +08005363 dev_err(eth->dev, "insufficient space for label!\n");
5364 }
developera2613e62022-07-01 18:29:37 +08005365
5366 phy_np = of_parse_phandle(to_of_node(fixed_node), "phy-handle", 0);
5367 if (phy_np) {
5368 if (!of_property_read_u32(phy_np, "reg", &phyaddr))
5369 phylink_priv->phyaddr = phyaddr;
5370 }
5371 }
5372 fwnode_handle_put(fixed_node);
5373 }
5374
developerfd40db22021-04-29 10:08:25 +08005375 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
5376 eth->netdev[id]->watchdog_timeo = 5 * HZ;
5377 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
5378 eth->netdev[id]->base_addr = (unsigned long)eth->base;
5379
5380 eth->netdev[id]->hw_features = eth->soc->hw_features;
5381 if (eth->hwlro)
5382 eth->netdev[id]->hw_features |= NETIF_F_LRO;
5383
5384 eth->netdev[id]->vlan_features = eth->soc->hw_features &
5385 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
5386 eth->netdev[id]->features |= eth->soc->hw_features;
5387 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
5388
developer94806ec2023-05-19 14:16:44 +08005389 eth->netdev[id]->irq = eth->irq_fe[0];
developerfd40db22021-04-29 10:08:25 +08005390 eth->netdev[id]->dev.of_node = np;
5391
developer90270572024-01-30 09:26:15 +08005392 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
5393 mac->device_notifier.notifier_call = mtk_device_event;
5394 register_netdevice_notifier(&mac->device_notifier);
5395 }
5396
developerfd40db22021-04-29 10:08:25 +08005397 return 0;
5398
5399free_netdev:
5400 free_netdev(eth->netdev[id]);
5401 return err;
5402}
5403
developer3f28d382023-03-07 16:06:30 +08005404void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
5405{
5406 struct net_device *dev, *tmp;
5407 LIST_HEAD(dev_list);
5408 int i;
5409
5410 rtnl_lock();
5411
5412 for (i = 0; i < MTK_MAC_COUNT; i++) {
5413 dev = eth->netdev[i];
5414
5415 if (!dev || !(dev->flags & IFF_UP))
5416 continue;
5417
5418 list_add_tail(&dev->close_list, &dev_list);
5419 }
5420
5421 dev_close_many(&dev_list, false);
5422
5423 eth->dma_dev = dma_dev;
5424
5425 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
5426 list_del_init(&dev->close_list);
5427 dev_open(dev, NULL);
5428 }
5429
5430 rtnl_unlock();
5431}
5432
developerfd40db22021-04-29 10:08:25 +08005433static int mtk_probe(struct platform_device *pdev)
5434{
developerb6c36bf2023-09-07 12:05:01 +08005435 struct device_node *mac_np, *mux_np;
developerfd40db22021-04-29 10:08:25 +08005436 struct mtk_eth *eth;
5437 int err, i;
5438
5439 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
5440 if (!eth)
5441 return -ENOMEM;
5442
5443 eth->soc = of_device_get_match_data(&pdev->dev);
5444
5445 eth->dev = &pdev->dev;
developer3f28d382023-03-07 16:06:30 +08005446 eth->dma_dev = &pdev->dev;
developerfd40db22021-04-29 10:08:25 +08005447 eth->base = devm_platform_ioremap_resource(pdev, 0);
5448 if (IS_ERR(eth->base))
5449 return PTR_ERR(eth->base);
5450
developer089e8852022-09-28 14:43:46 +08005451 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developera7fbeec2024-02-02 13:56:07 +08005452 eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
developer089e8852022-09-28 14:43:46 +08005453 if (IS_ERR(eth->sram_base))
5454 return PTR_ERR(eth->sram_base);
developera7fbeec2024-02-02 13:56:07 +08005455 } else {
5456 eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
developer089e8852022-09-28 14:43:46 +08005457 }
5458
developerfd40db22021-04-29 10:08:25 +08005459 if(eth->soc->has_sram) {
5460 struct resource *res;
5461 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
developer4c32b7a2021-11-13 16:46:43 +08005462 if (unlikely(!res))
5463 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +08005464 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
5465 }
5466
developer0fef5222023-04-26 14:48:31 +08005467 mtk_get_hwver(eth);
5468
developer68ce74f2023-01-03 16:11:57 +08005469 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developerfd40db22021-04-29 10:08:25 +08005470 eth->ip_align = NET_IP_ALIGN;
developerfd40db22021-04-29 10:08:25 +08005471
developer089e8852022-09-28 14:43:46 +08005472 if (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) {
5473 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
5474 if (!err) {
5475 err = dma_set_coherent_mask(&pdev->dev,
developerf459e682023-10-24 23:07:17 +08005476 DMA_BIT_MASK(32));
developer089e8852022-09-28 14:43:46 +08005477 if (err) {
5478 dev_err(&pdev->dev, "Wrong DMA config\n");
5479 return -EINVAL;
5480 }
5481 }
5482 }
5483
developerfd40db22021-04-29 10:08:25 +08005484 spin_lock_init(&eth->page_lock);
5485 spin_lock_init(&eth->tx_irq_lock);
5486 spin_lock_init(&eth->rx_irq_lock);
developera7fbeec2024-02-02 13:56:07 +08005487 spin_lock_init(&eth->txrx_irq_lock);
developerd82e8372022-02-09 15:00:09 +08005488 spin_lock_init(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +08005489
5490 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5491 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5492 "mediatek,ethsys");
5493 if (IS_ERR(eth->ethsys)) {
5494 dev_err(&pdev->dev, "no ethsys regmap found\n");
5495 return PTR_ERR(eth->ethsys);
5496 }
5497 }
5498
5499 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
5500 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5501 "mediatek,infracfg");
5502 if (IS_ERR(eth->infra)) {
5503 dev_err(&pdev->dev, "no infracfg regmap found\n");
5504 return PTR_ERR(eth->infra);
5505 }
5506 }
5507
developer3f28d382023-03-07 16:06:30 +08005508 if (of_dma_is_coherent(pdev->dev.of_node)) {
5509 struct regmap *cci;
5510
5511 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5512 "cci-control-port");
5513 /* enable CPU/bus coherency */
5514 if (!IS_ERR(cci))
5515 regmap_write(cci, 0, 3);
5516 }
5517
developerfd40db22021-04-29 10:08:25 +08005518 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
developer4e8a3fd2023-04-10 18:05:44 +08005519 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
developerfd40db22021-04-29 10:08:25 +08005520 GFP_KERNEL);
developer4e8a3fd2023-04-10 18:05:44 +08005521 if (!eth->sgmii)
developerfd40db22021-04-29 10:08:25 +08005522 return -ENOMEM;
5523
developer4e8a3fd2023-04-10 18:05:44 +08005524 err = mtk_sgmii_init(eth, pdev->dev.of_node,
developerfd40db22021-04-29 10:08:25 +08005525 eth->soc->ana_rgc3);
developer089e8852022-09-28 14:43:46 +08005526 if (err)
5527 return err;
5528 }
5529
5530 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
developer4e8a3fd2023-04-10 18:05:44 +08005531 eth->usxgmii = devm_kzalloc(eth->dev, sizeof(*eth->usxgmii),
5532 GFP_KERNEL);
5533 if (!eth->usxgmii)
5534 return -ENOMEM;
developer089e8852022-09-28 14:43:46 +08005535
developer4e8a3fd2023-04-10 18:05:44 +08005536 err = mtk_usxgmii_init(eth, pdev->dev.of_node);
developer089e8852022-09-28 14:43:46 +08005537 if (err)
5538 return err;
5539
5540 err = mtk_toprgu_init(eth, pdev->dev.of_node);
developerfd40db22021-04-29 10:08:25 +08005541 if (err)
5542 return err;
5543 }
5544
5545 if (eth->soc->required_pctl) {
5546 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5547 "mediatek,pctl");
5548 if (IS_ERR(eth->pctl)) {
5549 dev_err(&pdev->dev, "no pctl regmap found\n");
5550 return PTR_ERR(eth->pctl);
5551 }
5552 }
5553
developer46ed9492023-10-31 15:19:05 +08005554 if (MTK_HAS_CAPS(eth->soc->caps, MTK_PDMA_INT)) {
5555 for (i = 0; i < MTK_PDMA_IRQ_NUM; i++)
5556 eth->irq_pdma[i] = platform_get_irq(pdev, i);
5557 }
developer94806ec2023-05-19 14:16:44 +08005558
5559 for (i = 0; i < MTK_FE_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08005560 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
developer94806ec2023-05-19 14:16:44 +08005561 eth->irq_fe[i] = eth->irq_fe[0];
developer46ed9492023-10-31 15:19:05 +08005562 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_PDMA_INT))
developer94806ec2023-05-19 14:16:44 +08005563 eth->irq_fe[i] =
5564 platform_get_irq(pdev, i + MTK_PDMA_IRQ_NUM);
developer46ed9492023-10-31 15:19:05 +08005565 else
5566 eth->irq_fe[i] = platform_get_irq(pdev, i);
developer94806ec2023-05-19 14:16:44 +08005567
5568 if (eth->irq_fe[i] < 0) {
developerfd40db22021-04-29 10:08:25 +08005569 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
5570 return -ENXIO;
5571 }
5572 }
5573
5574 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
5575 eth->clks[i] = devm_clk_get(eth->dev,
5576 mtk_clks_source_name[i]);
5577 if (IS_ERR(eth->clks[i])) {
5578 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
5579 return -EPROBE_DEFER;
5580 if (eth->soc->required_clks & BIT(i)) {
5581 dev_err(&pdev->dev, "clock %s not found\n",
5582 mtk_clks_source_name[i]);
5583 return -EINVAL;
5584 }
5585 eth->clks[i] = NULL;
5586 }
5587 }
5588
5589 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
5590 INIT_WORK(&eth->pending_work, mtk_pending_work);
5591
developer8051e042022-04-08 13:26:36 +08005592 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerfd40db22021-04-29 10:08:25 +08005593 if (err)
5594 return err;
5595
5596 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
5597
5598 for_each_child_of_node(pdev->dev.of_node, mac_np) {
5599 if (!of_device_is_compatible(mac_np,
5600 "mediatek,eth-mac"))
5601 continue;
5602
5603 if (!of_device_is_available(mac_np))
5604 continue;
5605
5606 err = mtk_add_mac(eth, mac_np);
5607 if (err) {
5608 of_node_put(mac_np);
5609 goto err_deinit_hw;
5610 }
5611 }
5612
developerb6c36bf2023-09-07 12:05:01 +08005613 mux_np = of_get_child_by_name(eth->dev->of_node, "mux-bus");
5614 if (mux_np) {
5615 struct device_node *child;
5616
5617 for_each_available_child_of_node(mux_np, child) {
5618 if (!of_device_is_compatible(child,
5619 "mediatek,eth-mux"))
5620 continue;
5621
5622 if (!of_device_is_available(child))
5623 continue;
5624
5625 err = mtk_add_mux(eth, child);
5626 if (err)
5627 dev_err(&pdev->dev, "failed to add mux\n");
5628
5629 of_node_put(mux_np);
5630 };
5631 }
5632
developer18f46a82021-07-20 21:08:21 +08005633 err = mtk_napi_init(eth);
5634 if (err)
5635 goto err_free_dev;
5636
developerfd40db22021-04-29 10:08:25 +08005637 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
developer94806ec2023-05-19 14:16:44 +08005638 err = devm_request_irq(eth->dev, eth->irq_fe[0],
developerfd40db22021-04-29 10:08:25 +08005639 mtk_handle_irq, 0,
5640 dev_name(eth->dev), eth);
5641 } else {
developer94806ec2023-05-19 14:16:44 +08005642 err = devm_request_irq(eth->dev, eth->irq_fe[1],
developerfd40db22021-04-29 10:08:25 +08005643 mtk_handle_irq_tx, 0,
5644 dev_name(eth->dev), eth);
5645 if (err)
5646 goto err_free_dev;
5647
developer46ed9492023-10-31 15:19:05 +08005648 if (MTK_HAS_CAPS(eth->soc->caps, MTK_PDMA_INT)) {
5649 err = devm_request_irq(eth->dev, eth->irq_fe[2],
5650 mtk_handle_fe_irq, 0,
5651 dev_name(eth->dev), eth);
5652 if (err)
5653 goto err_free_dev;
developer94806ec2023-05-19 14:16:44 +08005654
developer46ed9492023-10-31 15:19:05 +08005655 err = devm_request_irq(eth->dev, eth->irq_pdma[0],
5656 mtk_handle_irq_rx, IRQF_SHARED,
5657 dev_name(eth->dev), &eth->rx_napi[0]);
5658 if (err)
5659 goto err_free_dev;
developer18f46a82021-07-20 21:08:21 +08005660
developer46ed9492023-10-31 15:19:05 +08005661 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
5662 for (i = 0; i < MTK_RX_RSS_NUM; i++) {
5663 err = devm_request_irq(eth->dev,
5664 eth->irq_pdma[MTK_RSS_RING(i)],
5665 mtk_handle_irq_rx, IRQF_SHARED,
5666 dev_name(eth->dev),
5667 &eth->rx_napi[MTK_RSS_RING(i)]);
5668 if (err)
5669 goto err_free_dev;
5670 }
developerddc36672023-10-16 15:13:58 +08005671 }
developer46ed9492023-10-31 15:19:05 +08005672
5673 if (MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO)) {
5674 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) ||
5675 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) ? 0 : 1;
5676 for (; i < MTK_HW_LRO_RING_NUM; i++) {
5677 err = devm_request_irq(eth->dev,
5678 eth->irq_pdma[i],
5679 mtk_handle_irq_rx, IRQF_SHARED,
5680 dev_name(eth->dev),
5681 &eth->rx_napi[MTK_HW_LRO_RING(i)]);
5682 if (err)
5683 goto err_free_dev;
5684 }
5685 }
5686 } else {
5687 err = devm_request_irq(eth->dev, eth->irq_fe[2],
5688 mtk_handle_irq_rx, 0,
5689 dev_name(eth->dev), &eth->rx_napi[0]);
5690 if (err)
5691 goto err_free_dev;
developerddc36672023-10-16 15:13:58 +08005692
developer46ed9492023-10-31 15:19:05 +08005693 if (MTK_FE_IRQ_NUM > 3) {
5694 err = devm_request_irq(eth->dev, eth->irq_fe[3],
5695 mtk_handle_fe_irq, 0,
5696 dev_name(eth->dev), eth);
developer18f46a82021-07-20 21:08:21 +08005697 if (err)
5698 goto err_free_dev;
5699 }
5700 }
developerfd40db22021-04-29 10:08:25 +08005701 }
developer8051e042022-04-08 13:26:36 +08005702
developerfd40db22021-04-29 10:08:25 +08005703 if (err)
5704 goto err_free_dev;
5705
5706 /* No MT7628/88 support yet */
5707 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5708 err = mtk_mdio_init(eth);
5709 if (err)
5710 goto err_free_dev;
5711 }
5712
5713 for (i = 0; i < MTK_MAX_DEVS; i++) {
5714 if (!eth->netdev[i])
5715 continue;
5716
5717 err = register_netdev(eth->netdev[i]);
5718 if (err) {
5719 dev_err(eth->dev, "error bringing up device\n");
5720 goto err_deinit_mdio;
5721 } else
5722 netif_info(eth, probe, eth->netdev[i],
5723 "mediatek frame engine at 0x%08lx, irq %d\n",
developer94806ec2023-05-19 14:16:44 +08005724 eth->netdev[i]->base_addr, eth->irq_fe[0]);
developerfd40db22021-04-29 10:08:25 +08005725 }
5726
5727 /* we run 2 devices on the same DMA ring so we need a dummy device
5728 * for NAPI to work
5729 */
5730 init_dummy_netdev(&eth->dummy_dev);
5731 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
5732 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08005733 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08005734 MTK_NAPI_WEIGHT);
5735
developer18f46a82021-07-20 21:08:21 +08005736 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
developerddc36672023-10-16 15:13:58 +08005737 for (i = 0; i < MTK_RX_RSS_NUM; i++)
5738 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[MTK_RSS_RING(i)].napi,
5739 mtk_napi_rx, MTK_NAPI_WEIGHT);
5740 }
5741
5742 if (MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO)) {
5743 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++) {
5744 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[MTK_HW_LRO_RING(i)].napi,
developer18f46a82021-07-20 21:08:21 +08005745 mtk_napi_rx, MTK_NAPI_WEIGHT);
developerddc36672023-10-16 15:13:58 +08005746 }
developer18f46a82021-07-20 21:08:21 +08005747 }
5748
developerfd40db22021-04-29 10:08:25 +08005749 mtketh_debugfs_init(eth);
5750 debug_proc_init(eth);
5751
5752 platform_set_drvdata(pdev, eth);
5753
developer8051e042022-04-08 13:26:36 +08005754 register_netdevice_notifier(&mtk_eth_netdevice_nb);
developer37482a42022-12-26 13:31:13 +08005755#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer8051e042022-04-08 13:26:36 +08005756 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
5757 eth->mtk_dma_monitor_timer.expires = jiffies;
5758 add_timer(&eth->mtk_dma_monitor_timer);
developer793f7b42022-05-20 13:54:51 +08005759#endif
developer8051e042022-04-08 13:26:36 +08005760
developerfd40db22021-04-29 10:08:25 +08005761 return 0;
5762
5763err_deinit_mdio:
5764 mtk_mdio_cleanup(eth);
5765err_free_dev:
5766 mtk_free_dev(eth);
5767err_deinit_hw:
5768 mtk_hw_deinit(eth);
5769
5770 return err;
5771}
5772
5773static int mtk_remove(struct platform_device *pdev)
5774{
5775 struct mtk_eth *eth = platform_get_drvdata(pdev);
5776 struct mtk_mac *mac;
5777 int i;
5778
5779 /* stop all devices to make sure that dma is properly shut down */
5780 for (i = 0; i < MTK_MAC_COUNT; i++) {
5781 if (!eth->netdev[i])
5782 continue;
5783 mtk_stop(eth->netdev[i]);
5784 mac = netdev_priv(eth->netdev[i]);
5785 phylink_disconnect_phy(mac->phylink);
5786 }
5787
5788 mtk_hw_deinit(eth);
5789
5790 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08005791 netif_napi_del(&eth->rx_napi[0].napi);
5792
5793 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
5794 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
5795 netif_napi_del(&eth->rx_napi[i].napi);
5796 }
5797
developerfd40db22021-04-29 10:08:25 +08005798 mtk_cleanup(eth);
5799 mtk_mdio_cleanup(eth);
developer8051e042022-04-08 13:26:36 +08005800 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
5801 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08005802
5803 return 0;
5804}
5805
5806static const struct mtk_soc_data mt2701_data = {
developer68ce74f2023-01-03 16:11:57 +08005807 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005808 .caps = MT7623_CAPS | MTK_HWLRO,
5809 .hw_features = MTK_HW_FEATURES,
5810 .required_clks = MT7623_CLKS_BITMAP,
5811 .required_pctl = true,
5812 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005813 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005814 .txrx = {
5815 .txd_size = sizeof(struct mtk_tx_dma),
5816 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005817 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005818 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5819 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5820 },
developerfd40db22021-04-29 10:08:25 +08005821};
5822
5823static const struct mtk_soc_data mt7621_data = {
developer68ce74f2023-01-03 16:11:57 +08005824 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005825 .caps = MT7621_CAPS,
5826 .hw_features = MTK_HW_FEATURES,
5827 .required_clks = MT7621_CLKS_BITMAP,
5828 .required_pctl = false,
5829 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005830 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005831 .txrx = {
5832 .txd_size = sizeof(struct mtk_tx_dma),
developer68ce74f2023-01-03 16:11:57 +08005833 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005834 .rxd_size = sizeof(struct mtk_rx_dma),
5835 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5836 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5837 },
developerfd40db22021-04-29 10:08:25 +08005838};
5839
5840static const struct mtk_soc_data mt7622_data = {
developer68ce74f2023-01-03 16:11:57 +08005841 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005842 .ana_rgc3 = 0x2028,
5843 .caps = MT7622_CAPS | MTK_HWLRO,
5844 .hw_features = MTK_HW_FEATURES,
5845 .required_clks = MT7622_CLKS_BITMAP,
5846 .required_pctl = false,
5847 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005848 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005849 .txrx = {
5850 .txd_size = sizeof(struct mtk_tx_dma),
5851 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005852 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005853 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5854 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5855 },
developerfd40db22021-04-29 10:08:25 +08005856};
5857
5858static const struct mtk_soc_data mt7623_data = {
developer68ce74f2023-01-03 16:11:57 +08005859 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005860 .caps = MT7623_CAPS | MTK_HWLRO,
5861 .hw_features = MTK_HW_FEATURES,
5862 .required_clks = MT7623_CLKS_BITMAP,
5863 .required_pctl = true,
5864 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005865 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005866 .txrx = {
5867 .txd_size = sizeof(struct mtk_tx_dma),
5868 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005869 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005870 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5871 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5872 },
developerfd40db22021-04-29 10:08:25 +08005873};
5874
5875static const struct mtk_soc_data mt7629_data = {
developer68ce74f2023-01-03 16:11:57 +08005876 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005877 .ana_rgc3 = 0x128,
5878 .caps = MT7629_CAPS | MTK_HWLRO,
5879 .hw_features = MTK_HW_FEATURES,
5880 .required_clks = MT7629_CLKS_BITMAP,
5881 .required_pctl = false,
5882 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005883 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005884 .txrx = {
5885 .txd_size = sizeof(struct mtk_tx_dma),
5886 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005887 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005888 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5889 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5890 },
developerfd40db22021-04-29 10:08:25 +08005891};
5892
5893static const struct mtk_soc_data mt7986_data = {
developer68ce74f2023-01-03 16:11:57 +08005894 .reg_map = &mt7986_reg_map,
developerfd40db22021-04-29 10:08:25 +08005895 .ana_rgc3 = 0x128,
5896 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08005897 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08005898 .required_clks = MT7986_CLKS_BITMAP,
5899 .required_pctl = false,
developerc42fa982023-08-22 15:37:30 +08005900 .has_sram = false,
developer933f09b2023-09-12 11:13:01 +08005901 .rss_num = 4,
developere9356982022-07-04 09:03:20 +08005902 .txrx = {
5903 .txd_size = sizeof(struct mtk_tx_dma_v2),
developer8ecd51b2023-03-13 11:28:28 +08005904 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005905 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developere9356982022-07-04 09:03:20 +08005906 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5907 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
5908 },
developerfd40db22021-04-29 10:08:25 +08005909};
5910
developer255bba22021-07-27 15:16:33 +08005911static const struct mtk_soc_data mt7981_data = {
developer68ce74f2023-01-03 16:11:57 +08005912 .reg_map = &mt7986_reg_map,
developer255bba22021-07-27 15:16:33 +08005913 .ana_rgc3 = 0x128,
5914 .caps = MT7981_CAPS,
developer7377b0b2021-11-18 14:54:47 +08005915 .hw_features = MTK_HW_FEATURES,
developer255bba22021-07-27 15:16:33 +08005916 .required_clks = MT7981_CLKS_BITMAP,
5917 .required_pctl = false,
developerc42fa982023-08-22 15:37:30 +08005918 .has_sram = false,
developer933f09b2023-09-12 11:13:01 +08005919 .rss_num = 4,
developere9356982022-07-04 09:03:20 +08005920 .txrx = {
5921 .txd_size = sizeof(struct mtk_tx_dma_v2),
developer8ecd51b2023-03-13 11:28:28 +08005922 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005923 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developere9356982022-07-04 09:03:20 +08005924 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5925 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
5926 },
developer255bba22021-07-27 15:16:33 +08005927};
5928
developer089e8852022-09-28 14:43:46 +08005929static const struct mtk_soc_data mt7988_data = {
developer68ce74f2023-01-03 16:11:57 +08005930 .reg_map = &mt7988_reg_map,
developer089e8852022-09-28 14:43:46 +08005931 .ana_rgc3 = 0x128,
developerddc36672023-10-16 15:13:58 +08005932 .caps = MT7988_CAPS | MTK_HWLRO,
developer089e8852022-09-28 14:43:46 +08005933 .hw_features = MTK_HW_FEATURES,
5934 .required_clks = MT7988_CLKS_BITMAP,
5935 .required_pctl = false,
5936 .has_sram = true,
developere3d0de22023-05-30 17:45:00 +08005937 .rss_num = 4,
developer089e8852022-09-28 14:43:46 +08005938 .txrx = {
5939 .txd_size = sizeof(struct mtk_tx_dma_v2),
5940 .rxd_size = sizeof(struct mtk_rx_dma_v2),
developer68ce74f2023-01-03 16:11:57 +08005941 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developer089e8852022-09-28 14:43:46 +08005942 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5943 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
5944 },
5945};
5946
developerfd40db22021-04-29 10:08:25 +08005947static const struct mtk_soc_data rt5350_data = {
developer68ce74f2023-01-03 16:11:57 +08005948 .reg_map = &mt7628_reg_map,
developerfd40db22021-04-29 10:08:25 +08005949 .caps = MT7628_CAPS,
5950 .hw_features = MTK_HW_FEATURES_MT7628,
5951 .required_clks = MT7628_CLKS_BITMAP,
5952 .required_pctl = false,
5953 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005954 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005955 .txrx = {
5956 .txd_size = sizeof(struct mtk_tx_dma),
5957 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005958 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
developere9356982022-07-04 09:03:20 +08005959 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5960 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5961 },
developerfd40db22021-04-29 10:08:25 +08005962};
5963
5964const struct of_device_id of_mtk_match[] = {
5965 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
5966 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
5967 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
5968 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
5969 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
5970 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08005971 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developer089e8852022-09-28 14:43:46 +08005972 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data},
developerfd40db22021-04-29 10:08:25 +08005973 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
5974 {},
5975};
5976MODULE_DEVICE_TABLE(of, of_mtk_match);
5977
5978static struct platform_driver mtk_driver = {
5979 .probe = mtk_probe,
5980 .remove = mtk_remove,
5981 .driver = {
5982 .name = "mtk_soc_eth",
5983 .of_match_table = of_mtk_match,
5984 },
5985};
5986
5987module_platform_driver(mtk_driver);
5988
5989MODULE_LICENSE("GPL");
5990MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5991MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");