blob: b0cfc66bb012dd697fb7cfbdc795c7596eca3e17 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
developer3f28d382023-03-07 16:06:30 +080012#include <linux/of_address.h>
developerfd40db22021-04-29 10:08:25 +080013#include <linux/mfd/syscon.h>
14#include <linux/regmap.h>
15#include <linux/clk.h>
16#include <linux/pm_runtime.h>
17#include <linux/if_vlan.h>
18#include <linux/reset.h>
19#include <linux/tcp.h>
20#include <linux/interrupt.h>
21#include <linux/pinctrl/devinfo.h>
22#include <linux/phylink.h>
developera2613e62022-07-01 18:29:37 +080023#include <linux/gpio/consumer.h>
developerfd40db22021-04-29 10:08:25 +080024#include <net/dsa.h>
25
26#include "mtk_eth_soc.h"
27#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080028#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080029
30#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
31#include "mtk_hnat/nf_hnat_mtk.h"
32#endif
33
34static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080035atomic_t reset_lock = ATOMIC_INIT(0);
36atomic_t force = ATOMIC_INIT(0);
37
developerfd40db22021-04-29 10:08:25 +080038module_param_named(msg_level, mtk_msg_level, int, 0);
39MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080040DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080041
42#define MTK_ETHTOOL_STAT(x) { #x, \
43 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
44
developer68ce74f2023-01-03 16:11:57 +080045static const struct mtk_reg_map mtk_reg_map = {
46 .tx_irq_mask = 0x1a1c,
47 .tx_irq_status = 0x1a18,
48 .pdma = {
49 .rx_ptr = 0x0900,
50 .rx_cnt_cfg = 0x0904,
51 .pcrx_ptr = 0x0908,
52 .glo_cfg = 0x0a04,
53 .rst_idx = 0x0a08,
54 .delay_irq = 0x0a0c,
55 .irq_status = 0x0a20,
56 .irq_mask = 0x0a28,
57 .int_grp = 0x0a50,
58 .int_grp2 = 0x0a54,
59 },
60 .qdma = {
61 .qtx_cfg = 0x1800,
62 .qtx_sch = 0x1804,
63 .rx_ptr = 0x1900,
64 .rx_cnt_cfg = 0x1904,
65 .qcrx_ptr = 0x1908,
66 .glo_cfg = 0x1a04,
67 .rst_idx = 0x1a08,
68 .delay_irq = 0x1a0c,
69 .fc_th = 0x1a10,
70 .tx_sch_rate = 0x1a14,
71 .int_grp = 0x1a20,
72 .int_grp2 = 0x1a24,
73 .hred2 = 0x1a44,
74 .ctx_ptr = 0x1b00,
75 .dtx_ptr = 0x1b04,
76 .crx_ptr = 0x1b10,
77 .drx_ptr = 0x1b14,
78 .fq_head = 0x1b20,
79 .fq_tail = 0x1b24,
80 .fq_count = 0x1b28,
81 .fq_blen = 0x1b2c,
82 },
83 .gdm1_cnt = 0x2400,
84 .gdma_to_ppe0 = 0x4444,
85 .ppe_base = {
86 [0] = 0x0c00,
87 },
88 .wdma_base = {
89 [0] = 0x2800,
90 [1] = 0x2c00,
91 },
92};
93
94static const struct mtk_reg_map mt7628_reg_map = {
95 .tx_irq_mask = 0x0a28,
96 .tx_irq_status = 0x0a20,
97 .pdma = {
98 .rx_ptr = 0x0900,
99 .rx_cnt_cfg = 0x0904,
100 .pcrx_ptr = 0x0908,
101 .glo_cfg = 0x0a04,
102 .rst_idx = 0x0a08,
103 .delay_irq = 0x0a0c,
104 .irq_status = 0x0a20,
105 .irq_mask = 0x0a28,
106 .int_grp = 0x0a50,
107 .int_grp2 = 0x0a54,
108 },
109};
110
111static const struct mtk_reg_map mt7986_reg_map = {
112 .tx_irq_mask = 0x461c,
113 .tx_irq_status = 0x4618,
114 .pdma = {
developer8ecd51b2023-03-13 11:28:28 +0800115 .rx_ptr = 0x4100,
116 .rx_cnt_cfg = 0x4104,
117 .pcrx_ptr = 0x4108,
118 .glo_cfg = 0x4204,
119 .rst_idx = 0x4208,
120 .delay_irq = 0x420c,
121 .irq_status = 0x4220,
122 .irq_mask = 0x4228,
123 .int_grp = 0x4250,
124 .int_grp2 = 0x4254,
developer68ce74f2023-01-03 16:11:57 +0800125 },
126 .qdma = {
127 .qtx_cfg = 0x4400,
128 .qtx_sch = 0x4404,
129 .rx_ptr = 0x4500,
130 .rx_cnt_cfg = 0x4504,
131 .qcrx_ptr = 0x4508,
132 .glo_cfg = 0x4604,
133 .rst_idx = 0x4608,
134 .delay_irq = 0x460c,
135 .fc_th = 0x4610,
136 .int_grp = 0x4620,
137 .int_grp2 = 0x4624,
138 .hred2 = 0x4644,
139 .ctx_ptr = 0x4700,
140 .dtx_ptr = 0x4704,
141 .crx_ptr = 0x4710,
142 .drx_ptr = 0x4714,
143 .fq_head = 0x4720,
144 .fq_tail = 0x4724,
145 .fq_count = 0x4728,
146 .fq_blen = 0x472c,
147 .tx_sch_rate = 0x4798,
148 },
149 .gdm1_cnt = 0x1c00,
150 .gdma_to_ppe0 = 0x3333,
151 .ppe_base = {
152 [0] = 0x2000,
153 [1] = 0x2400,
154 },
155 .wdma_base = {
156 [0] = 0x4800,
157 [1] = 0x4c00,
158 },
159};
160
161static const struct mtk_reg_map mt7988_reg_map = {
162 .tx_irq_mask = 0x461c,
163 .tx_irq_status = 0x4618,
164 .pdma = {
165 .rx_ptr = 0x6900,
166 .rx_cnt_cfg = 0x6904,
167 .pcrx_ptr = 0x6908,
168 .glo_cfg = 0x6a04,
169 .rst_idx = 0x6a08,
170 .delay_irq = 0x6a0c,
171 .irq_status = 0x6a20,
172 .irq_mask = 0x6a28,
173 .int_grp = 0x6a50,
174 .int_grp2 = 0x6a54,
175 },
176 .qdma = {
177 .qtx_cfg = 0x4400,
178 .qtx_sch = 0x4404,
179 .rx_ptr = 0x4500,
180 .rx_cnt_cfg = 0x4504,
181 .qcrx_ptr = 0x4508,
182 .glo_cfg = 0x4604,
183 .rst_idx = 0x4608,
184 .delay_irq = 0x460c,
185 .fc_th = 0x4610,
186 .int_grp = 0x4620,
187 .int_grp2 = 0x4624,
188 .hred2 = 0x4644,
189 .ctx_ptr = 0x4700,
190 .dtx_ptr = 0x4704,
191 .crx_ptr = 0x4710,
192 .drx_ptr = 0x4714,
193 .fq_head = 0x4720,
194 .fq_tail = 0x4724,
195 .fq_count = 0x4728,
196 .fq_blen = 0x472c,
197 .tx_sch_rate = 0x4798,
198 },
199 .gdm1_cnt = 0x1c00,
200 .gdma_to_ppe0 = 0x3333,
201 .ppe_base = {
202 [0] = 0x2000,
203 [1] = 0x2400,
204 [2] = 0x2c00,
205 },
206 .wdma_base = {
207 [0] = 0x4800,
208 [1] = 0x4c00,
209 [2] = 0x5000,
210 },
211};
212
developerfd40db22021-04-29 10:08:25 +0800213/* strings used by ethtool */
214static const struct mtk_ethtool_stats {
215 char str[ETH_GSTRING_LEN];
216 u32 offset;
217} mtk_ethtool_stats[] = {
218 MTK_ETHTOOL_STAT(tx_bytes),
219 MTK_ETHTOOL_STAT(tx_packets),
220 MTK_ETHTOOL_STAT(tx_skip),
221 MTK_ETHTOOL_STAT(tx_collisions),
222 MTK_ETHTOOL_STAT(rx_bytes),
223 MTK_ETHTOOL_STAT(rx_packets),
224 MTK_ETHTOOL_STAT(rx_overflow),
225 MTK_ETHTOOL_STAT(rx_fcs_errors),
226 MTK_ETHTOOL_STAT(rx_short_errors),
227 MTK_ETHTOOL_STAT(rx_long_errors),
228 MTK_ETHTOOL_STAT(rx_checksum_errors),
229 MTK_ETHTOOL_STAT(rx_flow_control_packets),
230};
231
232static const char * const mtk_clks_source_name[] = {
developer1bbcf512022-11-18 16:09:33 +0800233 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "gp3",
234 "xgp1", "xgp2", "xgp3", "crypto", "fe", "trgpll",
developerfd40db22021-04-29 10:08:25 +0800235 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
236 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
developer5cfc67a2022-12-29 19:06:51 +0800237 "sgmii_ck", "eth2pll", "wocpu0", "wocpu1",
238 "ethwarp_wocpu2", "ethwarp_wocpu1", "ethwarp_wocpu0",
239 "top_usxgmii0_sel", "top_usxgmii1_sel", "top_sgm0_sel", "top_sgm1_sel",
240 "top_xfi_phy0_xtal_sel", "top_xfi_phy1_xtal_sel", "top_eth_gmii_sel",
241 "top_eth_refck_50m_sel", "top_eth_sys_200m_sel", "top_eth_sys_sel",
242 "top_eth_xgmii_sel", "top_eth_mii_sel", "top_netsys_sel",
243 "top_netsys_500m_sel", "top_netsys_pao_2x_sel",
244 "top_netsys_sync_250m_sel", "top_netsys_ppefb_250m_sel",
245 "top_netsys_warp_sel",
developerfd40db22021-04-29 10:08:25 +0800246};
247
248void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
249{
250 __raw_writel(val, eth->base + reg);
251}
252
253u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
254{
255 return __raw_readl(eth->base + reg);
256}
257
258u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
259{
260 u32 val;
261
262 val = mtk_r32(eth, reg);
263 val &= ~mask;
264 val |= set;
265 mtk_w32(eth, val, reg);
266 return reg;
267}
268
269static int mtk_mdio_busy_wait(struct mtk_eth *eth)
270{
271 unsigned long t_start = jiffies;
272
273 while (1) {
274 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
275 return 0;
276 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
277 break;
developerc4671b22021-05-28 13:16:42 +0800278 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800279 }
280
281 dev_err(eth->dev, "mdio: MDIO timeout\n");
282 return -1;
283}
284
developer599cda42022-05-24 15:13:31 +0800285u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
286 int phy_reg, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800287{
288 if (mtk_mdio_busy_wait(eth))
289 return -1;
290
291 write_data &= 0xffff;
292
developer599cda42022-05-24 15:13:31 +0800293 if (phy_reg & MII_ADDR_C45) {
294 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
295 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
296 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
297 MTK_PHY_IAC);
298
299 if (mtk_mdio_busy_wait(eth))
300 return -1;
301
302 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
303 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
304 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
305 MTK_PHY_IAC);
306 } else {
307 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
308 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
309 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
310 MTK_PHY_IAC);
311 }
developerfd40db22021-04-29 10:08:25 +0800312
313 if (mtk_mdio_busy_wait(eth))
314 return -1;
315
316 return 0;
317}
318
developer599cda42022-05-24 15:13:31 +0800319u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerfd40db22021-04-29 10:08:25 +0800320{
321 u32 d;
322
323 if (mtk_mdio_busy_wait(eth))
324 return 0xffff;
325
developer599cda42022-05-24 15:13:31 +0800326 if (phy_reg & MII_ADDR_C45) {
327 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
328 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
329 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
330 MTK_PHY_IAC);
331
332 if (mtk_mdio_busy_wait(eth))
333 return 0xffff;
334
335 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
336 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
337 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
338 MTK_PHY_IAC);
339 } else {
340 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
341 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
342 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
343 MTK_PHY_IAC);
344 }
developerfd40db22021-04-29 10:08:25 +0800345
346 if (mtk_mdio_busy_wait(eth))
347 return 0xffff;
348
349 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
350
351 return d;
352}
353
354static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
355 int phy_reg, u16 val)
356{
357 struct mtk_eth *eth = bus->priv;
358
359 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
360}
361
362static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
363{
364 struct mtk_eth *eth = bus->priv;
365
366 return _mtk_mdio_read(eth, phy_addr, phy_reg);
367}
368
developerabeadd52022-08-15 11:26:44 +0800369static int mtk_mdio_reset(struct mii_bus *bus)
370{
371 /* The mdiobus_register will trigger a reset pulse when enabling Bus reset,
372 * we just need to wait until device ready.
373 */
374 mdelay(20);
375
376 return 0;
377}
378
developerfd40db22021-04-29 10:08:25 +0800379static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
380 phy_interface_t interface)
381{
developer543e7922022-12-01 11:24:47 +0800382 u32 val = 0;
developerfd40db22021-04-29 10:08:25 +0800383
384 /* Check DDR memory type.
385 * Currently TRGMII mode with DDR2 memory is not supported.
386 */
387 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
388 if (interface == PHY_INTERFACE_MODE_TRGMII &&
389 val & SYSCFG_DRAM_TYPE_DDR2) {
390 dev_err(eth->dev,
391 "TRGMII mode with DDR2 memory is not supported!\n");
392 return -EOPNOTSUPP;
393 }
394
395 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
396 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
397
398 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
399 ETHSYS_TRGMII_MT7621_MASK, val);
400
401 return 0;
402}
403
404static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
405 phy_interface_t interface, int speed)
406{
407 u32 val;
408 int ret;
409
410 if (interface == PHY_INTERFACE_MODE_TRGMII) {
411 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
412 val = 500000000;
413 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
414 if (ret)
415 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
416 return;
417 }
418
419 val = (speed == SPEED_1000) ?
420 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
421 mtk_w32(eth, val, INTF_MODE);
422
423 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
424 ETHSYS_TRGMII_CLK_SEL362_5,
425 ETHSYS_TRGMII_CLK_SEL362_5);
426
427 val = (speed == SPEED_1000) ? 250000000 : 500000000;
428 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
429 if (ret)
430 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
431
432 val = (speed == SPEED_1000) ?
433 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
434 mtk_w32(eth, val, TRGMII_RCK_CTRL);
435
436 val = (speed == SPEED_1000) ?
437 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
438 mtk_w32(eth, val, TRGMII_TCK_CTRL);
439}
440
developer089e8852022-09-28 14:43:46 +0800441static void mtk_setup_bridge_switch(struct mtk_eth *eth)
442{
443 int val;
444
445 /* Force Port1 XGMAC Link Up */
446 val = mtk_r32(eth, MTK_XGMAC_STS(MTK_GMAC1_ID));
developer2b9bc722023-03-09 11:48:44 +0800447 mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
developer089e8852022-09-28 14:43:46 +0800448 MTK_XGMAC_STS(MTK_GMAC1_ID));
449
450 /* Adjust GSW bridge IPG to 11*/
451 val = mtk_r32(eth, MTK_GSW_CFG);
452 val &= ~(GSWTX_IPG_MASK | GSWRX_IPG_MASK);
453 val |= (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
454 (GSW_IPG_11 << GSWRX_IPG_SHIFT);
455 mtk_w32(eth, val, MTK_GSW_CFG);
developer089e8852022-09-28 14:43:46 +0800456}
457
developera7570e72023-05-09 17:06:42 +0800458static bool mtk_check_gmac23_idle(struct mtk_mac *mac)
459{
460 u32 mac_fsm, gdm_fsm;
461
462 mac_fsm = mtk_r32(mac->hw, MTK_MAC_FSM(mac->id));
463
464 switch (mac->id) {
465 case MTK_GMAC2_ID:
466 gdm_fsm = mtk_r32(mac->hw, MTK_FE_GDM2_FSM);
467 break;
468 case MTK_GMAC3_ID:
469 gdm_fsm = mtk_r32(mac->hw, MTK_FE_GDM3_FSM);
470 break;
developer10b556b2023-05-15 09:49:08 +0800471 default:
472 return true;
developera7570e72023-05-09 17:06:42 +0800473 };
474
475 if ((mac_fsm & 0xFFFF0000) == 0x01010000 &&
476 (gdm_fsm & 0xFFFF0000) == 0x00000000)
477 return true;
478
479 return false;
480}
481
developer9b725932022-11-24 16:25:56 +0800482static void mtk_setup_eee(struct mtk_mac *mac, bool enable)
483{
484 struct mtk_eth *eth = mac->hw;
485 u32 mcr, mcr_cur;
486 u32 val;
487
488 mcr = mcr_cur = mtk_r32(eth, MTK_MAC_MCR(mac->id));
489 mcr &= ~(MAC_MCR_FORCE_EEE100 | MAC_MCR_FORCE_EEE1000);
490
491 if (enable) {
492 mac->tx_lpi_enabled = 1;
493
494 val = FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 19) |
495 FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 33) |
496 FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD,
497 mac->tx_lpi_timer) |
498 FIELD_PREP(MAC_EEE_RESV0, 14);
499 mtk_w32(eth, val, MTK_MAC_EEE(mac->id));
500
501 switch (mac->speed) {
502 case SPEED_1000:
503 mcr |= MAC_MCR_FORCE_EEE1000;
504 break;
505 case SPEED_100:
506 mcr |= MAC_MCR_FORCE_EEE100;
507 break;
508 };
509 } else {
510 mac->tx_lpi_enabled = 0;
511
512 mtk_w32(eth, 0x00000002, MTK_MAC_EEE(mac->id));
513 }
514
515 /* Only update control register when needed! */
516 if (mcr != mcr_cur)
517 mtk_w32(eth, mcr, MTK_MAC_MCR(mac->id));
518}
519
developer0fef5222023-04-26 14:48:31 +0800520static int mtk_get_hwver(struct mtk_eth *eth)
521{
522 struct device_node *np;
523 struct regmap *hwver;
524 u32 info = 0;
525
526 eth->hwver = MTK_HWID_V1;
527
528 np = of_parse_phandle(eth->dev->of_node, "mediatek,hwver", 0);
529 if (!np)
530 return -EINVAL;
531
532 hwver = syscon_node_to_regmap(np);
533 if (IS_ERR(hwver))
534 return PTR_ERR(hwver);
535
536 regmap_read(hwver, 0x8, &info);
537
538 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
539 eth->hwver = FIELD_GET(HWVER_BIT_NETSYS_3, info);
540 else
541 eth->hwver = FIELD_GET(HWVER_BIT_NETSYS_1_2, info);
542
543 of_node_put(np);
544
545 return 0;
546}
547
developer4e8a3fd2023-04-10 18:05:44 +0800548static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
549 phy_interface_t interface)
550{
551 struct mtk_mac *mac = container_of(config, struct mtk_mac,
552 phylink_config);
553 struct mtk_eth *eth = mac->hw;
554 unsigned int sid;
555
556 if (interface == PHY_INTERFACE_MODE_SGMII ||
557 phy_interface_mode_is_8023z(interface)) {
558 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
559 0 : mtk_mac2xgmii_id(eth, mac->id);
560
561 return mtk_sgmii_select_pcs(eth->sgmii, sid);
562 } else if (interface == PHY_INTERFACE_MODE_USXGMII ||
563 interface == PHY_INTERFACE_MODE_10GKR ||
564 interface == PHY_INTERFACE_MODE_5GBASER) {
565 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
566 mac->id != MTK_GMAC1_ID) {
567 sid = mtk_mac2xgmii_id(eth, mac->id);
568
569 return mtk_usxgmii_select_pcs(eth->usxgmii, sid);
570 }
571 }
572
573 return NULL;
574}
575
developerfd40db22021-04-29 10:08:25 +0800576static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
577 const struct phylink_link_state *state)
578{
579 struct mtk_mac *mac = container_of(config, struct mtk_mac,
580 phylink_config);
581 struct mtk_eth *eth = mac->hw;
developer089e8852022-09-28 14:43:46 +0800582 u32 sid, i;
developerff5e5092023-07-25 15:55:28 +0800583 int val = 0, ge_mode, err = 0;
developer82eae452023-02-13 10:04:09 +0800584 unsigned int mac_type = mac->type;
developerfd40db22021-04-29 10:08:25 +0800585
586 /* MT76x8 has no hardware settings between for the MAC */
587 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
588 mac->interface != state->interface) {
589 /* Setup soc pin functions */
590 switch (state->interface) {
591 case PHY_INTERFACE_MODE_TRGMII:
592 if (mac->id)
593 goto err_phy;
594 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
595 MTK_GMAC1_TRGMII))
596 goto err_phy;
597 /* fall through */
598 case PHY_INTERFACE_MODE_RGMII_TXID:
599 case PHY_INTERFACE_MODE_RGMII_RXID:
600 case PHY_INTERFACE_MODE_RGMII_ID:
601 case PHY_INTERFACE_MODE_RGMII:
602 case PHY_INTERFACE_MODE_MII:
603 case PHY_INTERFACE_MODE_REVMII:
604 case PHY_INTERFACE_MODE_RMII:
developer82eae452023-02-13 10:04:09 +0800605 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800606 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
607 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
608 if (err)
609 goto init_err;
610 }
611 break;
612 case PHY_INTERFACE_MODE_1000BASEX:
613 case PHY_INTERFACE_MODE_2500BASEX:
614 case PHY_INTERFACE_MODE_SGMII:
developer82eae452023-02-13 10:04:09 +0800615 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800616 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
617 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
618 if (err)
619 goto init_err;
620 }
621 break;
622 case PHY_INTERFACE_MODE_GMII:
developer82eae452023-02-13 10:04:09 +0800623 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800624 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
625 err = mtk_gmac_gephy_path_setup(eth, mac->id);
626 if (err)
627 goto init_err;
628 }
629 break;
developer30e13e72022-11-03 10:21:24 +0800630 case PHY_INTERFACE_MODE_XGMII:
developer82eae452023-02-13 10:04:09 +0800631 mac->type = MTK_XGDM_TYPE;
developer30e13e72022-11-03 10:21:24 +0800632 if (MTK_HAS_CAPS(eth->soc->caps, MTK_XGMII)) {
633 err = mtk_gmac_xgmii_path_setup(eth, mac->id);
634 if (err)
635 goto init_err;
636 }
637 break;
developer089e8852022-09-28 14:43:46 +0800638 case PHY_INTERFACE_MODE_USXGMII:
639 case PHY_INTERFACE_MODE_10GKR:
developercfa104b2023-01-11 17:40:41 +0800640 case PHY_INTERFACE_MODE_5GBASER:
developer82eae452023-02-13 10:04:09 +0800641 mac->type = MTK_XGDM_TYPE;
developer089e8852022-09-28 14:43:46 +0800642 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
643 err = mtk_gmac_usxgmii_path_setup(eth, mac->id);
644 if (err)
645 goto init_err;
646 }
647 break;
developerfd40db22021-04-29 10:08:25 +0800648 default:
649 goto err_phy;
650 }
651
652 /* Setup clock for 1st gmac */
653 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
654 !phy_interface_mode_is_8023z(state->interface) &&
655 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
656 if (MTK_HAS_CAPS(mac->hw->soc->caps,
657 MTK_TRGMII_MT7621_CLK)) {
658 if (mt7621_gmac0_rgmii_adjust(mac->hw,
659 state->interface))
660 goto err_phy;
661 } else {
662 mtk_gmac0_rgmii_adjust(mac->hw,
663 state->interface,
664 state->speed);
665
666 /* mt7623_pad_clk_setup */
667 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
668 mtk_w32(mac->hw,
669 TD_DM_DRVP(8) | TD_DM_DRVN(8),
670 TRGMII_TD_ODT(i));
671
672 /* Assert/release MT7623 RXC reset */
673 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
674 TRGMII_RCK_CTRL);
675 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
676 }
677 }
678
679 ge_mode = 0;
680 switch (state->interface) {
681 case PHY_INTERFACE_MODE_MII:
682 case PHY_INTERFACE_MODE_GMII:
683 ge_mode = 1;
684 break;
685 case PHY_INTERFACE_MODE_REVMII:
686 ge_mode = 2;
687 break;
688 case PHY_INTERFACE_MODE_RMII:
689 if (mac->id)
690 goto err_phy;
691 ge_mode = 3;
692 break;
693 default:
694 break;
695 }
696
697 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800698 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800699 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
700 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
701 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
702 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800703 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800704
705 mac->interface = state->interface;
706 }
707
708 /* SGMII */
709 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
710 phy_interface_mode_is_8023z(state->interface)) {
711 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
712 * being setup done.
713 */
developerd82e8372022-02-09 15:00:09 +0800714 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800715 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
716
717 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
718 SYSCFG0_SGMII_MASK,
719 ~(u32)SYSCFG0_SGMII_MASK);
720
721 /* Decide how GMAC and SGMIISYS be mapped */
722 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
723 0 : mac->id;
724
developer4e8a3fd2023-04-10 18:05:44 +0800725 /* Save the syscfg0 value for mac_finish */
726 mac->syscfg0 = val;
developerd82e8372022-02-09 15:00:09 +0800727 spin_unlock(&eth->syscfg0_lock);
developer089e8852022-09-28 14:43:46 +0800728 } else if (state->interface == PHY_INTERFACE_MODE_USXGMII ||
developercfa104b2023-01-11 17:40:41 +0800729 state->interface == PHY_INTERFACE_MODE_10GKR ||
730 state->interface == PHY_INTERFACE_MODE_5GBASER) {
developer4e8a3fd2023-04-10 18:05:44 +0800731 /* Nothing to do */
developerfd40db22021-04-29 10:08:25 +0800732 } else if (phylink_autoneg_inband(mode)) {
733 dev_err(eth->dev,
734 "In-band mode not supported in non SGMII mode!\n");
735 return;
736 }
737
738 /* Setup gmac */
developer30e13e72022-11-03 10:21:24 +0800739 if (mac->type == MTK_XGDM_TYPE) {
developer089e8852022-09-28 14:43:46 +0800740 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
741 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
developerfd40db22021-04-29 10:08:25 +0800742
developer089e8852022-09-28 14:43:46 +0800743 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerff5e5092023-07-25 15:55:28 +0800744 if (mac->id == MTK_GMAC1_ID)
developer089e8852022-09-28 14:43:46 +0800745 mtk_setup_bridge_switch(eth);
developer089e8852022-09-28 14:43:46 +0800746 }
developer82eae452023-02-13 10:04:09 +0800747 } else if (mac->type == MTK_GDM_TYPE) {
748 val = mtk_r32(eth, MTK_GDMA_EG_CTRL(mac->id));
749 mtk_w32(eth, val & ~MTK_GDMA_XGDM_SEL,
750 MTK_GDMA_EG_CTRL(mac->id));
751
developer4e8a3fd2023-04-10 18:05:44 +0800752 /* FIXME: In current hardware design, we have to reset FE
753 * when swtiching XGDM to GDM. Therefore, here trigger an SER
754 * to let GDM go back to the initial state.
755 */
developera7570e72023-05-09 17:06:42 +0800756 if (mac->type != mac_type && !mtk_check_gmac23_idle(mac)) {
757 if (!test_bit(MTK_RESETTING, &mac->hw->state)) {
developer82eae452023-02-13 10:04:09 +0800758 atomic_inc(&force);
759 schedule_work(&eth->pending_work);
developera7570e72023-05-09 17:06:42 +0800760 }
developer82eae452023-02-13 10:04:09 +0800761 }
developerfd40db22021-04-29 10:08:25 +0800762 }
763
developerfd40db22021-04-29 10:08:25 +0800764 return;
765
766err_phy:
767 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
768 mac->id, phy_modes(state->interface));
769 return;
770
771init_err:
772 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
773 mac->id, phy_modes(state->interface), err);
774}
775
developer4e8a3fd2023-04-10 18:05:44 +0800776static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
777 phy_interface_t interface)
778{
779 struct mtk_mac *mac = container_of(config, struct mtk_mac,
780 phylink_config);
781 struct mtk_eth *eth = mac->hw;
782
783 /* Enable SGMII */
784 if (interface == PHY_INTERFACE_MODE_SGMII ||
785 phy_interface_mode_is_8023z(interface))
786 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
787 SYSCFG0_SGMII_MASK, mac->syscfg0);
788
789 return 0;
790}
791
developer089e8852022-09-28 14:43:46 +0800792static int mtk_mac_pcs_get_state(struct phylink_config *config,
793 struct phylink_link_state *state)
developerfd40db22021-04-29 10:08:25 +0800794{
795 struct mtk_mac *mac = container_of(config, struct mtk_mac,
796 phylink_config);
developerfd40db22021-04-29 10:08:25 +0800797
developer089e8852022-09-28 14:43:46 +0800798 if (mac->type == MTK_XGDM_TYPE) {
799 u32 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
developerfd40db22021-04-29 10:08:25 +0800800
developer089e8852022-09-28 14:43:46 +0800801 if (mac->id == MTK_GMAC2_ID)
802 sts = sts >> 16;
developerfd40db22021-04-29 10:08:25 +0800803
developer4e8a3fd2023-04-10 18:05:44 +0800804 state->duplex = DUPLEX_FULL;
developer089e8852022-09-28 14:43:46 +0800805
806 switch (FIELD_GET(MTK_USXGMII_PCS_MODE, sts)) {
807 case 0:
808 state->speed = SPEED_10000;
809 break;
810 case 1:
811 state->speed = SPEED_5000;
812 break;
813 case 2:
814 state->speed = SPEED_2500;
815 break;
816 case 3:
817 state->speed = SPEED_1000;
818 break;
819 }
820
developer82eae452023-02-13 10:04:09 +0800821 state->interface = mac->interface;
developer089e8852022-09-28 14:43:46 +0800822 state->link = FIELD_GET(MTK_USXGMII_PCS_LINK, sts);
823 } else if (mac->type == MTK_GDM_TYPE) {
824 struct mtk_eth *eth = mac->hw;
developer4e8a3fd2023-04-10 18:05:44 +0800825 struct mtk_sgmii *ss = eth->sgmii;
developer089e8852022-09-28 14:43:46 +0800826 u32 id = mtk_mac2xgmii_id(eth, mac->id);
827 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
developer38afb1a2023-04-17 09:57:27 +0800828 u32 bm, adv, rgc3, sgm_mode;
developer089e8852022-09-28 14:43:46 +0800829
developer82eae452023-02-13 10:04:09 +0800830 state->interface = mac->interface;
developer089e8852022-09-28 14:43:46 +0800831
developer38afb1a2023-04-17 09:57:27 +0800832 regmap_read(ss->pcs[id].regmap, SGMSYS_PCS_CONTROL_1, &bm);
833 if (bm & SGMII_AN_ENABLE) {
developer4e8a3fd2023-04-10 18:05:44 +0800834 regmap_read(ss->pcs[id].regmap,
developer38afb1a2023-04-17 09:57:27 +0800835 SGMSYS_PCS_ADVERTISE, &adv);
developer089e8852022-09-28 14:43:46 +0800836
developer38afb1a2023-04-17 09:57:27 +0800837 phylink_mii_c22_pcs_decode_state(
838 state,
839 FIELD_GET(SGMII_BMSR, bm),
840 FIELD_GET(SGMII_LPA, adv));
developer089e8852022-09-28 14:43:46 +0800841 } else {
developer38afb1a2023-04-17 09:57:27 +0800842 state->link = !!(bm & SGMII_LINK_STATYS);
developer089e8852022-09-28 14:43:46 +0800843
developer38afb1a2023-04-17 09:57:27 +0800844 regmap_read(ss->pcs[id].regmap,
845 SGMSYS_SGMII_MODE, &sgm_mode);
developer089e8852022-09-28 14:43:46 +0800846
developer38afb1a2023-04-17 09:57:27 +0800847 switch (sgm_mode & SGMII_SPEED_MASK) {
848 case SGMII_SPEED_10:
developer089e8852022-09-28 14:43:46 +0800849 state->speed = SPEED_10;
850 break;
developer38afb1a2023-04-17 09:57:27 +0800851 case SGMII_SPEED_100:
developer089e8852022-09-28 14:43:46 +0800852 state->speed = SPEED_100;
853 break;
developer38afb1a2023-04-17 09:57:27 +0800854 case SGMII_SPEED_1000:
developer4e8a3fd2023-04-10 18:05:44 +0800855 regmap_read(ss->pcs[id].regmap,
developer38afb1a2023-04-17 09:57:27 +0800856 ss->pcs[id].ana_rgc3, &rgc3);
857 rgc3 = FIELD_GET(RG_PHY_SPEED_3_125G, rgc3);
developer4e8a3fd2023-04-10 18:05:44 +0800858 state->speed = rgc3 ? SPEED_2500 : SPEED_1000;
developer089e8852022-09-28 14:43:46 +0800859 break;
860 }
developer38afb1a2023-04-17 09:57:27 +0800861
862 if (sgm_mode & SGMII_DUPLEX_HALF)
863 state->duplex = DUPLEX_HALF;
864 else
865 state->duplex = DUPLEX_FULL;
developer089e8852022-09-28 14:43:46 +0800866 }
867
868 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
869 if (pmsr & MAC_MSR_RX_FC)
870 state->pause |= MLO_PAUSE_RX;
871 if (pmsr & MAC_MSR_TX_FC)
872 state->pause |= MLO_PAUSE_TX;
873 }
developerfd40db22021-04-29 10:08:25 +0800874
875 return 1;
876}
877
developer65f32592023-08-02 09:35:49 +0800878static int mtk_gdm_fsm_get(struct mtk_mac *mac, u32 gdm)
879{
880 u32 fsm = mtk_r32(mac->hw, gdm);
developer26d0a532023-08-28 16:24:58 +0800881 u32 ret = 0, val = 0;
developer65f32592023-08-02 09:35:49 +0800882
developer26d0a532023-08-28 16:24:58 +0800883 switch (mac->type) {
884 case MTK_GDM_TYPE:
developer65f32592023-08-02 09:35:49 +0800885 ret = fsm == 0;
developer26d0a532023-08-28 16:24:58 +0800886 break;
887 case MTK_XGDM_TYPE:
888 ret = fsm == 0x10000000;
889 break;
890 default:
891 break;
892 }
893
894 if ((mac->type == MTK_XGDM_TYPE) && (mac->id != MTK_GMAC1_ID)) {
895 val = mtk_r32(mac->hw, MTK_MAC_FSM(mac->id));
896 if ((val == 0x02010100) || (val == 0x01010100)) {
897 ret = (mac->interface == PHY_INTERFACE_MODE_XGMII) ?
898 ((fsm & 0x0fffffff) == 0) : ((fsm & 0x00ffffff) == 0);
developer65f32592023-08-02 09:35:49 +0800899 } else
developer26d0a532023-08-28 16:24:58 +0800900 ret = 0;
developer65f32592023-08-02 09:35:49 +0800901 }
902
903 return ret;
904}
905
906static void mtk_gdm_fsm_poll(struct mtk_mac *mac)
907{
908 u32 gdm = 0, i = 0;
909
910 switch (mac->id) {
911 case MTK_GMAC1_ID:
912 gdm = MTK_FE_GDM1_FSM;
913 break;
914 case MTK_GMAC2_ID:
915 gdm = MTK_FE_GDM2_FSM;
916 break;
917 case MTK_GMAC3_ID:
918 gdm = MTK_FE_GDM3_FSM;
919 break;
920 default:
921 pr_info("%s mac id invalid", __func__);
922 break;
923 }
developer26d0a532023-08-28 16:24:58 +0800924
developer65f32592023-08-02 09:35:49 +0800925 while (i < 3) {
926 if (mtk_gdm_fsm_get(mac, gdm))
927 break;
928 msleep(500);
929 i++;
930 }
931
932 if (i == 3)
933 pr_info("%s fsm invalid", __func__);
934}
935
936static void mtk_pse_port_link_set(struct mtk_mac *mac, bool up)
937{
developera7d382a2023-08-25 12:05:22 +0800938 u32 fe_glo_cfg, val = 0;
developer65f32592023-08-02 09:35:49 +0800939
940 fe_glo_cfg = mtk_r32(mac->hw, MTK_FE_GLO_CFG(mac->id));
941 switch (mac->id) {
942 case MTK_GMAC1_ID:
943 val = MTK_FE_LINK_DOWN_P1;
944 break;
945 case MTK_GMAC2_ID:
946 val = MTK_FE_LINK_DOWN_P2;
947 break;
948 case MTK_GMAC3_ID:
949 val = MTK_FE_LINK_DOWN_P15;
950 break;
951 }
952
953 if (!up)
954 fe_glo_cfg |= val;
955 else
956 fe_glo_cfg &= ~val;
957
958 mtk_w32(mac->hw, fe_glo_cfg, MTK_FE_GLO_CFG(mac->id));
959 mtk_gdm_fsm_poll(mac);
960}
961
developerfd40db22021-04-29 10:08:25 +0800962static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
963 phy_interface_t interface)
964{
965 struct mtk_mac *mac = container_of(config, struct mtk_mac,
966 phylink_config);
developer21260d02023-09-04 11:29:04 +0800967 struct mtk_eth *eth = mac->hw;
968 unsigned int id;
developerff5e5092023-07-25 15:55:28 +0800969 u32 mcr, sts;
developer089e8852022-09-28 14:43:46 +0800970
developer65f32592023-08-02 09:35:49 +0800971 mtk_pse_port_link_set(mac, false);
developer089e8852022-09-28 14:43:46 +0800972 if (mac->type == MTK_GDM_TYPE) {
973 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
developer65f32592023-08-02 09:35:49 +0800974 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
developer089e8852022-09-28 14:43:46 +0800975 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
976 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
developer21260d02023-09-04 11:29:04 +0800977 struct mtk_usxgmii_pcs *mpcs;
developerfd40db22021-04-29 10:08:25 +0800978
developer21260d02023-09-04 11:29:04 +0800979 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
developer089e8852022-09-28 14:43:46 +0800980 mcr &= 0xfffffff0;
981 mcr |= XMAC_MCR_TRX_DISABLE;
982 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
developerff5e5092023-07-25 15:55:28 +0800983
984 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
985 sts &= ~MTK_XGMAC_FORCE_LINK(mac->id);
986 mtk_w32(mac->hw, sts, MTK_XGMAC_STS(mac->id));
developer21260d02023-09-04 11:29:04 +0800987
988 id = mtk_mac2xgmii_id(eth, mac->id);
989 mpcs = &eth->usxgmii->pcs[id];
990 cancel_delayed_work_sync(&mpcs->link_poll);
developer089e8852022-09-28 14:43:46 +0800991 }
developerfd40db22021-04-29 10:08:25 +0800992}
993
994static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
995 phy_interface_t interface,
996 struct phy_device *phy)
997{
998 struct mtk_mac *mac = container_of(config, struct mtk_mac,
999 phylink_config);
developerff5e5092023-07-25 15:55:28 +08001000 u32 mcr, mcr_cur, sts, force_link;
developer089e8852022-09-28 14:43:46 +08001001
developer9b725932022-11-24 16:25:56 +08001002 mac->speed = speed;
1003
developer089e8852022-09-28 14:43:46 +08001004 if (mac->type == MTK_GDM_TYPE) {
1005 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
1006 mcr = mcr_cur;
1007 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
1008 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
1009 MAC_MCR_FORCE_RX_FC);
1010 mcr |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
1011 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
1012
1013 /* Configure speed */
1014 switch (speed) {
1015 case SPEED_2500:
1016 case SPEED_1000:
1017 mcr |= MAC_MCR_SPEED_1000;
1018 break;
1019 case SPEED_100:
1020 mcr |= MAC_MCR_SPEED_100;
1021 break;
1022 }
1023
1024 /* Configure duplex */
1025 if (duplex == DUPLEX_FULL)
1026 mcr |= MAC_MCR_FORCE_DPX;
1027
1028 /* Configure pause modes -
1029 * phylink will avoid these for half duplex
1030 */
1031 if (tx_pause)
1032 mcr |= MAC_MCR_FORCE_TX_FC;
1033 if (rx_pause)
1034 mcr |= MAC_MCR_FORCE_RX_FC;
1035
1036 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
1037
1038 /* Only update control register when needed! */
1039 if (mcr != mcr_cur)
1040 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
developer9b725932022-11-24 16:25:56 +08001041
1042 if (mode == MLO_AN_PHY && phy)
1043 mtk_setup_eee(mac, phy_init_eee(phy, false) >= 0);
developer089e8852022-09-28 14:43:46 +08001044 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
developerff5e5092023-07-25 15:55:28 +08001045 /* Eliminate the interference(before link-up) caused by PHY noise */
1046 mtk_m32(mac->hw, XMAC_LOGIC_RST, 0x0, MTK_XMAC_LOGIC_RST(mac->id));
1047 mdelay(20);
1048 mtk_m32(mac->hw, XMAC_GLB_CNTCLR, 0x1, MTK_XMAC_CNT_CTRL(mac->id));
1049
1050 switch (mac->id) {
1051 case MTK_GMAC2_ID:
1052 force_link = (mac->interface ==
1053 PHY_INTERFACE_MODE_XGMII) ?
1054 MTK_XGMAC_FORCE_LINK(mac->id) : 0;
1055 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
1056 mtk_w32(mac->hw, sts | force_link,
1057 MTK_XGMAC_STS(mac->id));
1058 break;
1059 case MTK_GMAC3_ID:
1060 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
1061 mtk_w32(mac->hw,
1062 sts | MTK_XGMAC_FORCE_LINK(mac->id),
1063 MTK_XGMAC_STS(mac->id));
1064 break;
1065 }
1066
developer089e8852022-09-28 14:43:46 +08001067 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
1068
1069 mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC);
1070 /* Configure pause modes -
1071 * phylink will avoid these for half duplex
1072 */
1073 if (tx_pause)
1074 mcr |= XMAC_MCR_FORCE_TX_FC;
1075 if (rx_pause)
1076 mcr |= XMAC_MCR_FORCE_RX_FC;
developerfd40db22021-04-29 10:08:25 +08001077
developer089e8852022-09-28 14:43:46 +08001078 mcr &= ~(XMAC_MCR_TRX_DISABLE);
1079 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
1080 }
developer65f32592023-08-02 09:35:49 +08001081 mtk_pse_port_link_set(mac, true);
developerfd40db22021-04-29 10:08:25 +08001082}
1083
1084static void mtk_validate(struct phylink_config *config,
1085 unsigned long *supported,
1086 struct phylink_link_state *state)
1087{
1088 struct mtk_mac *mac = container_of(config, struct mtk_mac,
1089 phylink_config);
1090 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1091
1092 if (state->interface != PHY_INTERFACE_MODE_NA &&
1093 state->interface != PHY_INTERFACE_MODE_MII &&
1094 state->interface != PHY_INTERFACE_MODE_GMII &&
1095 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
1096 phy_interface_mode_is_rgmii(state->interface)) &&
1097 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
1098 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
1099 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
1100 (state->interface == PHY_INTERFACE_MODE_SGMII ||
developer089e8852022-09-28 14:43:46 +08001101 phy_interface_mode_is_8023z(state->interface))) &&
developer30e13e72022-11-03 10:21:24 +08001102 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_XGMII) &&
1103 (state->interface == PHY_INTERFACE_MODE_XGMII)) &&
developer089e8852022-09-28 14:43:46 +08001104 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
1105 (state->interface == PHY_INTERFACE_MODE_USXGMII)) &&
1106 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
1107 (state->interface == PHY_INTERFACE_MODE_10GKR))) {
developerfd40db22021-04-29 10:08:25 +08001108 linkmode_zero(supported);
1109 return;
1110 }
1111
1112 phylink_set_port_modes(mask);
1113 phylink_set(mask, Autoneg);
1114
1115 switch (state->interface) {
developer089e8852022-09-28 14:43:46 +08001116 case PHY_INTERFACE_MODE_USXGMII:
1117 case PHY_INTERFACE_MODE_10GKR:
1118 phylink_set(mask, 10000baseKR_Full);
1119 phylink_set(mask, 10000baseT_Full);
1120 phylink_set(mask, 10000baseCR_Full);
1121 phylink_set(mask, 10000baseSR_Full);
1122 phylink_set(mask, 10000baseLR_Full);
1123 phylink_set(mask, 10000baseLRM_Full);
1124 phylink_set(mask, 10000baseER_Full);
1125 phylink_set(mask, 100baseT_Half);
1126 phylink_set(mask, 100baseT_Full);
1127 phylink_set(mask, 1000baseT_Half);
1128 phylink_set(mask, 1000baseT_Full);
1129 phylink_set(mask, 1000baseX_Full);
developerb88cdb02022-10-12 18:10:03 +08001130 phylink_set(mask, 2500baseT_Full);
1131 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +08001132 break;
developerfd40db22021-04-29 10:08:25 +08001133 case PHY_INTERFACE_MODE_TRGMII:
1134 phylink_set(mask, 1000baseT_Full);
1135 break;
developer30e13e72022-11-03 10:21:24 +08001136 case PHY_INTERFACE_MODE_XGMII:
1137 /* fall through */
developerfd40db22021-04-29 10:08:25 +08001138 case PHY_INTERFACE_MODE_1000BASEX:
developerfd40db22021-04-29 10:08:25 +08001139 phylink_set(mask, 1000baseX_Full);
developerebf63e22023-06-15 17:45:36 +08001140 /* fall through */
developer089e8852022-09-28 14:43:46 +08001141 case PHY_INTERFACE_MODE_2500BASEX:
developerfd40db22021-04-29 10:08:25 +08001142 phylink_set(mask, 2500baseX_Full);
developer2fbee452022-08-12 13:58:20 +08001143 phylink_set(mask, 2500baseT_Full);
developerebf63e22023-06-15 17:45:36 +08001144 /* fall through */
developerfd40db22021-04-29 10:08:25 +08001145 case PHY_INTERFACE_MODE_GMII:
1146 case PHY_INTERFACE_MODE_RGMII:
1147 case PHY_INTERFACE_MODE_RGMII_ID:
1148 case PHY_INTERFACE_MODE_RGMII_RXID:
1149 case PHY_INTERFACE_MODE_RGMII_TXID:
1150 phylink_set(mask, 1000baseT_Half);
1151 /* fall through */
1152 case PHY_INTERFACE_MODE_SGMII:
1153 phylink_set(mask, 1000baseT_Full);
1154 phylink_set(mask, 1000baseX_Full);
1155 /* fall through */
1156 case PHY_INTERFACE_MODE_MII:
1157 case PHY_INTERFACE_MODE_RMII:
1158 case PHY_INTERFACE_MODE_REVMII:
1159 case PHY_INTERFACE_MODE_NA:
1160 default:
1161 phylink_set(mask, 10baseT_Half);
1162 phylink_set(mask, 10baseT_Full);
1163 phylink_set(mask, 100baseT_Half);
1164 phylink_set(mask, 100baseT_Full);
1165 break;
1166 }
1167
1168 if (state->interface == PHY_INTERFACE_MODE_NA) {
developer089e8852022-09-28 14:43:46 +08001169
1170 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII)) {
1171 phylink_set(mask, 10000baseKR_Full);
developerc9bd9ae2022-12-23 16:54:36 +08001172 phylink_set(mask, 10000baseT_Full);
developer3ef64802023-05-10 10:48:43 +08001173 phylink_set(mask, 10000baseCR_Full);
developer089e8852022-09-28 14:43:46 +08001174 phylink_set(mask, 10000baseSR_Full);
1175 phylink_set(mask, 10000baseLR_Full);
1176 phylink_set(mask, 10000baseLRM_Full);
1177 phylink_set(mask, 10000baseER_Full);
1178 phylink_set(mask, 1000baseKX_Full);
1179 phylink_set(mask, 1000baseT_Full);
1180 phylink_set(mask, 1000baseX_Full);
1181 phylink_set(mask, 2500baseX_Full);
developercfa104b2023-01-11 17:40:41 +08001182 phylink_set(mask, 2500baseT_Full);
1183 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +08001184 }
developerfd40db22021-04-29 10:08:25 +08001185 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
1186 phylink_set(mask, 1000baseT_Full);
1187 phylink_set(mask, 1000baseX_Full);
1188 phylink_set(mask, 2500baseX_Full);
1189 }
1190 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
1191 phylink_set(mask, 1000baseT_Full);
1192 phylink_set(mask, 1000baseT_Half);
1193 phylink_set(mask, 1000baseX_Full);
1194 }
1195 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
1196 phylink_set(mask, 1000baseT_Full);
1197 phylink_set(mask, 1000baseT_Half);
1198 }
1199 }
1200
developer30e13e72022-11-03 10:21:24 +08001201 if (mac->type == MTK_XGDM_TYPE) {
1202 phylink_clear(mask, 10baseT_Half);
1203 phylink_clear(mask, 100baseT_Half);
1204 phylink_clear(mask, 1000baseT_Half);
1205 }
1206
developerfd40db22021-04-29 10:08:25 +08001207 phylink_set(mask, Pause);
1208 phylink_set(mask, Asym_Pause);
1209
1210 linkmode_and(supported, supported, mask);
1211 linkmode_and(state->advertising, state->advertising, mask);
1212
1213 /* We can only operate at 2500BaseX or 1000BaseX. If requested
1214 * to advertise both, only report advertising at 2500BaseX.
1215 */
1216 phylink_helper_basex_speed(state);
1217}
1218
1219static const struct phylink_mac_ops mtk_phylink_ops = {
1220 .validate = mtk_validate,
developer4e8a3fd2023-04-10 18:05:44 +08001221 .mac_select_pcs = mtk_mac_select_pcs,
developer089e8852022-09-28 14:43:46 +08001222 .mac_link_state = mtk_mac_pcs_get_state,
developerfd40db22021-04-29 10:08:25 +08001223 .mac_config = mtk_mac_config,
developer4e8a3fd2023-04-10 18:05:44 +08001224 .mac_finish = mtk_mac_finish,
developerfd40db22021-04-29 10:08:25 +08001225 .mac_link_down = mtk_mac_link_down,
1226 .mac_link_up = mtk_mac_link_up,
1227};
1228
developerc4d8da72023-03-16 14:37:28 +08001229static int mtk_mdc_init(struct mtk_eth *eth)
developerfd40db22021-04-29 10:08:25 +08001230{
1231 struct device_node *mii_np;
developerc4d8da72023-03-16 14:37:28 +08001232 int max_clk = 2500000, divider;
developer778e4122023-04-20 16:09:32 +08001233 int ret = 0;
developerc8acd8d2022-11-10 09:07:10 +08001234 u32 val;
developerfd40db22021-04-29 10:08:25 +08001235
1236 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
1237 if (!mii_np) {
1238 dev_err(eth->dev, "no %s child node found", "mdio-bus");
1239 return -ENODEV;
1240 }
1241
1242 if (!of_device_is_available(mii_np)) {
1243 ret = -ENODEV;
1244 goto err_put_node;
1245 }
1246
developerc4d8da72023-03-16 14:37:28 +08001247 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
1248 if (val > MDC_MAX_FREQ ||
1249 val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1250 dev_err(eth->dev, "MDIO clock frequency out of range");
1251 ret = -EINVAL;
1252 goto err_put_node;
1253 }
developerc8acd8d2022-11-10 09:07:10 +08001254 max_clk = val;
developerc4d8da72023-03-16 14:37:28 +08001255 }
developerc8acd8d2022-11-10 09:07:10 +08001256
developerc4d8da72023-03-16 14:37:28 +08001257 divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
developerc8acd8d2022-11-10 09:07:10 +08001258
1259 /* Configure MDC Turbo Mode */
1260 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1261 val = mtk_r32(eth, MTK_MAC_MISC);
1262 val |= MISC_MDC_TURBO;
1263 mtk_w32(eth, val, MTK_MAC_MISC);
1264 } else {
1265 val = mtk_r32(eth, MTK_PPSC);
1266 val |= PPSC_MDC_TURBO;
1267 mtk_w32(eth, val, MTK_PPSC);
1268 }
1269
1270 /* Configure MDC Divider */
1271 val = mtk_r32(eth, MTK_PPSC);
1272 val &= ~PPSC_MDC_CFG;
1273 val |= FIELD_PREP(PPSC_MDC_CFG, divider);
1274 mtk_w32(eth, val, MTK_PPSC);
1275
developerc4d8da72023-03-16 14:37:28 +08001276 dev_info(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
1277
1278err_put_node:
1279 of_node_put(mii_np);
1280 return ret;
1281}
1282
1283static int mtk_mdio_init(struct mtk_eth *eth)
1284{
1285 struct device_node *mii_np;
1286 int ret;
1287
1288 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
1289 if (!mii_np) {
1290 dev_err(eth->dev, "no %s child node found", "mdio-bus");
1291 return -ENODEV;
1292 }
1293
1294 if (!of_device_is_available(mii_np)) {
1295 ret = -ENODEV;
1296 goto err_put_node;
1297 }
1298
1299 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
1300 if (!eth->mii_bus) {
1301 ret = -ENOMEM;
1302 goto err_put_node;
1303 }
1304
1305 eth->mii_bus->name = "mdio";
1306 eth->mii_bus->read = mtk_mdio_read;
1307 eth->mii_bus->write = mtk_mdio_write;
1308 eth->mii_bus->reset = mtk_mdio_reset;
1309 eth->mii_bus->priv = eth;
1310 eth->mii_bus->parent = eth->dev;
1311
1312 if (snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
1313 ret = -ENOMEM;
1314 goto err_put_node;
1315 }
developerc8acd8d2022-11-10 09:07:10 +08001316
developerfd40db22021-04-29 10:08:25 +08001317 ret = of_mdiobus_register(eth->mii_bus, mii_np);
1318
1319err_put_node:
1320 of_node_put(mii_np);
1321 return ret;
1322}
1323
1324static void mtk_mdio_cleanup(struct mtk_eth *eth)
1325{
1326 if (!eth->mii_bus)
1327 return;
1328
1329 mdiobus_unregister(eth->mii_bus);
1330}
1331
1332static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
1333{
1334 unsigned long flags;
1335 u32 val;
1336
1337 spin_lock_irqsave(&eth->tx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001338 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1339 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
developerfd40db22021-04-29 10:08:25 +08001340 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1341}
1342
1343static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
1344{
1345 unsigned long flags;
1346 u32 val;
1347
1348 spin_lock_irqsave(&eth->tx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001349 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1350 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
developerfd40db22021-04-29 10:08:25 +08001351 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1352}
1353
1354static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
1355{
1356 unsigned long flags;
1357 u32 val;
1358
1359 spin_lock_irqsave(&eth->rx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001360 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1361 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
developerfd40db22021-04-29 10:08:25 +08001362 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1363}
1364
1365static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
1366{
1367 unsigned long flags;
1368 u32 val;
1369
1370 spin_lock_irqsave(&eth->rx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001371 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1372 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
developerfd40db22021-04-29 10:08:25 +08001373 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1374}
1375
1376static int mtk_set_mac_address(struct net_device *dev, void *p)
1377{
1378 int ret = eth_mac_addr(dev, p);
1379 struct mtk_mac *mac = netdev_priv(dev);
1380 struct mtk_eth *eth = mac->hw;
1381 const char *macaddr = dev->dev_addr;
1382
1383 if (ret)
1384 return ret;
1385
1386 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
1387 return -EBUSY;
1388
1389 spin_lock_bh(&mac->hw->page_lock);
1390 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1391 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1392 MT7628_SDM_MAC_ADRH);
1393 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1394 (macaddr[4] << 8) | macaddr[5],
1395 MT7628_SDM_MAC_ADRL);
1396 } else {
1397 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1398 MTK_GDMA_MAC_ADRH(mac->id));
1399 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1400 (macaddr[4] << 8) | macaddr[5],
1401 MTK_GDMA_MAC_ADRL(mac->id));
1402 }
1403 spin_unlock_bh(&mac->hw->page_lock);
1404
1405 return 0;
1406}
1407
1408void mtk_stats_update_mac(struct mtk_mac *mac)
1409{
developer089e8852022-09-28 14:43:46 +08001410 struct mtk_eth *eth = mac->hw;
developer68ce74f2023-01-03 16:11:57 +08001411 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08001412 struct mtk_hw_stats *hw_stats = mac->hw_stats;
developer68ce74f2023-01-03 16:11:57 +08001413 unsigned int offs = hw_stats->reg_offset;
developerfd40db22021-04-29 10:08:25 +08001414 u64 stats;
1415
developerfd40db22021-04-29 10:08:25 +08001416 u64_stats_update_begin(&hw_stats->syncp);
1417
developer68ce74f2023-01-03 16:11:57 +08001418 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
1419 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
developerfd40db22021-04-29 10:08:25 +08001420 if (stats)
1421 hw_stats->rx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001422 hw_stats->rx_packets +=
1423 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x08 + offs);
1424 hw_stats->rx_overflow +=
1425 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1426 hw_stats->rx_fcs_errors +=
1427 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1428 hw_stats->rx_short_errors +=
1429 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1430 hw_stats->rx_long_errors +=
1431 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1432 hw_stats->rx_checksum_errors +=
1433 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
developerfd40db22021-04-29 10:08:25 +08001434 hw_stats->rx_flow_control_packets +=
developer68ce74f2023-01-03 16:11:57 +08001435 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
developer089e8852022-09-28 14:43:46 +08001436
1437 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer68ce74f2023-01-03 16:11:57 +08001438 hw_stats->tx_skip +=
1439 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1440 hw_stats->tx_collisions +=
1441 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1442 hw_stats->tx_bytes +=
1443 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1444 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
developer089e8852022-09-28 14:43:46 +08001445 if (stats)
1446 hw_stats->tx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001447 hw_stats->tx_packets +=
1448 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
developer089e8852022-09-28 14:43:46 +08001449 } else {
developer68ce74f2023-01-03 16:11:57 +08001450 hw_stats->tx_skip +=
1451 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1452 hw_stats->tx_collisions +=
1453 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1454 hw_stats->tx_bytes +=
1455 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1456 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
developer089e8852022-09-28 14:43:46 +08001457 if (stats)
1458 hw_stats->tx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001459 hw_stats->tx_packets +=
1460 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
developer089e8852022-09-28 14:43:46 +08001461 }
developer68ce74f2023-01-03 16:11:57 +08001462
1463 u64_stats_update_end(&hw_stats->syncp);
developerfd40db22021-04-29 10:08:25 +08001464}
1465
1466static void mtk_stats_update(struct mtk_eth *eth)
1467{
1468 int i;
1469
1470 for (i = 0; i < MTK_MAC_COUNT; i++) {
1471 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1472 continue;
1473 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1474 mtk_stats_update_mac(eth->mac[i]);
1475 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1476 }
1477 }
1478}
1479
1480static void mtk_get_stats64(struct net_device *dev,
1481 struct rtnl_link_stats64 *storage)
1482{
1483 struct mtk_mac *mac = netdev_priv(dev);
1484 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1485 unsigned int start;
1486
1487 if (netif_running(dev) && netif_device_present(dev)) {
1488 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1489 mtk_stats_update_mac(mac);
1490 spin_unlock_bh(&hw_stats->stats_lock);
1491 }
1492 }
1493
1494 do {
1495 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
1496 storage->rx_packets = hw_stats->rx_packets;
1497 storage->tx_packets = hw_stats->tx_packets;
1498 storage->rx_bytes = hw_stats->rx_bytes;
1499 storage->tx_bytes = hw_stats->tx_bytes;
1500 storage->collisions = hw_stats->tx_collisions;
1501 storage->rx_length_errors = hw_stats->rx_short_errors +
1502 hw_stats->rx_long_errors;
1503 storage->rx_over_errors = hw_stats->rx_overflow;
1504 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1505 storage->rx_errors = hw_stats->rx_checksum_errors;
1506 storage->tx_aborted_errors = hw_stats->tx_skip;
1507 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
1508
1509 storage->tx_errors = dev->stats.tx_errors;
1510 storage->rx_dropped = dev->stats.rx_dropped;
1511 storage->tx_dropped = dev->stats.tx_dropped;
1512}
1513
1514static inline int mtk_max_frag_size(int mtu)
1515{
1516 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1517 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
1518 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
1519
1520 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1521 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1522}
1523
1524static inline int mtk_max_buf_size(int frag_size)
1525{
1526 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1527 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1528
1529 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
1530
1531 return buf_size;
1532}
1533
developere9356982022-07-04 09:03:20 +08001534static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1535 struct mtk_rx_dma_v2 *dma_rxd)
developerfd40db22021-04-29 10:08:25 +08001536{
developerfd40db22021-04-29 10:08:25 +08001537 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +08001538 if (!(rxd->rxd2 & RX_DMA_DONE))
1539 return false;
1540
1541 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +08001542 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1543 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developere9356982022-07-04 09:03:20 +08001544
developer8ecd51b2023-03-13 11:28:28 +08001545 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developere9356982022-07-04 09:03:20 +08001546 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1547 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
developer006325c2022-10-06 16:39:50 +08001548 rxd->rxd7 = READ_ONCE(dma_rxd->rxd7);
developere9356982022-07-04 09:03:20 +08001549 }
1550
developerc4671b22021-05-28 13:16:42 +08001551 return true;
developerfd40db22021-04-29 10:08:25 +08001552}
1553
1554/* the qdma core needs scratch memory to be setup */
1555static int mtk_init_fq_dma(struct mtk_eth *eth)
1556{
developere9356982022-07-04 09:03:20 +08001557 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001558 dma_addr_t phy_ring_tail;
1559 int cnt = MTK_DMA_SIZE;
1560 dma_addr_t dma_addr;
1561 int i;
1562
1563 if (!eth->soc->has_sram) {
developer3f28d382023-03-07 16:06:30 +08001564 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08001565 cnt * soc->txrx.txd_size,
developerfd40db22021-04-29 10:08:25 +08001566 &eth->phy_scratch_ring,
developere9356982022-07-04 09:03:20 +08001567 GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001568 } else {
developer089e8852022-09-28 14:43:46 +08001569 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1570 eth->scratch_ring = eth->sram_base;
1571 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1572 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
developerfd40db22021-04-29 10:08:25 +08001573 }
1574
1575 if (unlikely(!eth->scratch_ring))
1576 return -ENOMEM;
1577
developere9356982022-07-04 09:03:20 +08001578 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001579 if (unlikely(!eth->scratch_head))
1580 return -ENOMEM;
1581
developer3f28d382023-03-07 16:06:30 +08001582 dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001583 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1584 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001585 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
developerfd40db22021-04-29 10:08:25 +08001586 return -ENOMEM;
1587
developer8b6f2402022-11-28 13:42:34 +08001588 phy_ring_tail = eth->phy_scratch_ring +
1589 (dma_addr_t)soc->txrx.txd_size * (cnt - 1);
developerfd40db22021-04-29 10:08:25 +08001590
1591 for (i = 0; i < cnt; i++) {
developere9356982022-07-04 09:03:20 +08001592 struct mtk_tx_dma_v2 *txd;
1593
1594 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1595 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerfd40db22021-04-29 10:08:25 +08001596 if (i < cnt - 1)
developere9356982022-07-04 09:03:20 +08001597 txd->txd2 = eth->phy_scratch_ring +
1598 (i + 1) * soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001599
developere9356982022-07-04 09:03:20 +08001600 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1601 txd->txd4 = 0;
1602
developer089e8852022-09-28 14:43:46 +08001603 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1604 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001605 txd->txd5 = 0;
1606 txd->txd6 = 0;
1607 txd->txd7 = 0;
1608 txd->txd8 = 0;
developerfd40db22021-04-29 10:08:25 +08001609 }
developerfd40db22021-04-29 10:08:25 +08001610 }
1611
developer68ce74f2023-01-03 16:11:57 +08001612 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1613 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1614 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1615 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
developerfd40db22021-04-29 10:08:25 +08001616
1617 return 0;
1618}
1619
1620static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1621{
developere9356982022-07-04 09:03:20 +08001622 return ring->dma + (desc - ring->phys);
developerfd40db22021-04-29 10:08:25 +08001623}
1624
1625static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001626 void *txd, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001627{
developere9356982022-07-04 09:03:20 +08001628 int idx = (txd - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001629
1630 return &ring->buf[idx];
1631}
1632
1633static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001634 void *dma)
developerfd40db22021-04-29 10:08:25 +08001635{
1636 return ring->dma_pdma - ring->dma + dma;
1637}
1638
developere9356982022-07-04 09:03:20 +08001639static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001640{
developere9356982022-07-04 09:03:20 +08001641 return (dma - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001642}
1643
developerc4671b22021-05-28 13:16:42 +08001644static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1645 bool napi)
developerfd40db22021-04-29 10:08:25 +08001646{
1647 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1648 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
developer3f28d382023-03-07 16:06:30 +08001649 dma_unmap_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001650 dma_unmap_addr(tx_buf, dma_addr0),
1651 dma_unmap_len(tx_buf, dma_len0),
1652 DMA_TO_DEVICE);
1653 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
developer3f28d382023-03-07 16:06:30 +08001654 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001655 dma_unmap_addr(tx_buf, dma_addr0),
1656 dma_unmap_len(tx_buf, dma_len0),
1657 DMA_TO_DEVICE);
1658 }
1659 } else {
1660 if (dma_unmap_len(tx_buf, dma_len0)) {
developer3f28d382023-03-07 16:06:30 +08001661 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001662 dma_unmap_addr(tx_buf, dma_addr0),
1663 dma_unmap_len(tx_buf, dma_len0),
1664 DMA_TO_DEVICE);
1665 }
1666
1667 if (dma_unmap_len(tx_buf, dma_len1)) {
developer3f28d382023-03-07 16:06:30 +08001668 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001669 dma_unmap_addr(tx_buf, dma_addr1),
1670 dma_unmap_len(tx_buf, dma_len1),
1671 DMA_TO_DEVICE);
1672 }
1673 }
1674
1675 tx_buf->flags = 0;
1676 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +08001677 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
1678 if (napi)
1679 napi_consume_skb(tx_buf->skb, napi);
1680 else
1681 dev_kfree_skb_any(tx_buf->skb);
1682 }
developerfd40db22021-04-29 10:08:25 +08001683 tx_buf->skb = NULL;
1684}
1685
1686static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1687 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1688 size_t size, int idx)
1689{
1690 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1691 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1692 dma_unmap_len_set(tx_buf, dma_len0, size);
1693 } else {
1694 if (idx & 1) {
1695 txd->txd3 = mapped_addr;
1696 txd->txd2 |= TX_DMA_PLEN1(size);
1697 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1698 dma_unmap_len_set(tx_buf, dma_len1, size);
1699 } else {
1700 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1701 txd->txd1 = mapped_addr;
1702 txd->txd2 = TX_DMA_PLEN0(size);
1703 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1704 dma_unmap_len_set(tx_buf, dma_len0, size);
1705 }
1706 }
1707}
1708
developere9356982022-07-04 09:03:20 +08001709static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
1710 struct mtk_tx_dma_desc_info *info)
1711{
1712 struct mtk_mac *mac = netdev_priv(dev);
1713 struct mtk_eth *eth = mac->hw;
1714 struct mtk_tx_dma *desc = txd;
1715 u32 data;
1716
1717 WRITE_ONCE(desc->txd1, info->addr);
1718
1719 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1720 if (info->last)
1721 data |= TX_DMA_LS0;
1722 WRITE_ONCE(desc->txd3, data);
1723
1724 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1725 data |= QID_HIGH_BITS(info->qid);
1726 if (info->first) {
1727 if (info->gso)
1728 data |= TX_DMA_TSO;
1729 /* tx checksum offload */
1730 if (info->csum)
1731 data |= TX_DMA_CHKSUM;
1732 /* vlan header offload */
1733 if (info->vlan)
1734 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1735 }
1736
1737#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1738 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1739 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1740 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1741 }
1742
1743 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1744 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1745#endif
1746 WRITE_ONCE(desc->txd4, data);
1747}
1748
1749static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1750 struct mtk_tx_dma_desc_info *info)
1751{
1752 struct mtk_mac *mac = netdev_priv(dev);
1753 struct mtk_eth *eth = mac->hw;
1754 struct mtk_tx_dma_v2 *desc = txd;
developerce08bca2022-10-06 16:21:13 +08001755 u32 data = 0;
1756
1757 if (!info->qid && mac->id)
1758 info->qid = MTK_QDMA_GMAC2_QID;
1759
1760 WRITE_ONCE(desc->txd1, info->addr);
1761
1762 data = TX_DMA_PLEN0(info->size);
1763 if (info->last)
1764 data |= TX_DMA_LS0;
1765 WRITE_ONCE(desc->txd3, data);
1766
1767 data = ((mac->id == MTK_GMAC3_ID) ?
1768 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1769 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1770#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1771 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1772 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1773 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1774 }
1775
1776 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1777 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1778#endif
1779 WRITE_ONCE(desc->txd4, data);
1780
1781 data = 0;
1782 if (info->first) {
1783 if (info->gso)
1784 data |= TX_DMA_TSO_V2;
1785 /* tx checksum offload */
1786 if (info->csum)
1787 data |= TX_DMA_CHKSUM_V2;
1788 }
1789 WRITE_ONCE(desc->txd5, data);
1790
1791 data = 0;
1792 if (info->first && info->vlan)
1793 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1794 WRITE_ONCE(desc->txd6, data);
1795
1796 WRITE_ONCE(desc->txd7, 0);
1797 WRITE_ONCE(desc->txd8, 0);
1798}
1799
1800static void mtk_tx_set_dma_desc_v3(struct sk_buff *skb, struct net_device *dev, void *txd,
1801 struct mtk_tx_dma_desc_info *info)
1802{
1803 struct mtk_mac *mac = netdev_priv(dev);
1804 struct mtk_eth *eth = mac->hw;
1805 struct mtk_tx_dma_v2 *desc = txd;
developer089e8852022-09-28 14:43:46 +08001806 u64 addr64 = 0;
developere9356982022-07-04 09:03:20 +08001807 u32 data = 0;
developere9356982022-07-04 09:03:20 +08001808
developerce08bca2022-10-06 16:21:13 +08001809 if (!info->qid && mac->id)
developerb9463012022-09-14 10:28:45 +08001810 info->qid = MTK_QDMA_GMAC2_QID;
developere9356982022-07-04 09:03:20 +08001811
developer089e8852022-09-28 14:43:46 +08001812 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1813 TX_DMA_SDP1(info->addr) : 0;
1814
developere9356982022-07-04 09:03:20 +08001815 WRITE_ONCE(desc->txd1, info->addr);
1816
1817 data = TX_DMA_PLEN0(info->size);
1818 if (info->last)
1819 data |= TX_DMA_LS0;
developer089e8852022-09-28 14:43:46 +08001820 WRITE_ONCE(desc->txd3, data | addr64);
developere9356982022-07-04 09:03:20 +08001821
developer089e8852022-09-28 14:43:46 +08001822 data = ((mac->id == MTK_GMAC3_ID) ?
1823 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
developerb9463012022-09-14 10:28:45 +08001824 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
developere9356982022-07-04 09:03:20 +08001825#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1826 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1827 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1828 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1829 }
1830
1831 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1832 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1833#endif
1834 WRITE_ONCE(desc->txd4, data);
1835
1836 data = 0;
1837 if (info->first) {
1838 if (info->gso)
1839 data |= TX_DMA_TSO_V2;
1840 /* tx checksum offload */
1841 if (info->csum)
1842 data |= TX_DMA_CHKSUM_V2;
developerce08bca2022-10-06 16:21:13 +08001843
1844 if (netdev_uses_dsa(dev))
1845 data |= TX_DMA_SPTAG_V3;
developere9356982022-07-04 09:03:20 +08001846 }
1847 WRITE_ONCE(desc->txd5, data);
1848
1849 data = 0;
1850 if (info->first && info->vlan)
1851 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1852 WRITE_ONCE(desc->txd6, data);
1853
1854 WRITE_ONCE(desc->txd7, 0);
1855 WRITE_ONCE(desc->txd8, 0);
1856}
1857
1858static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
1859 struct mtk_tx_dma_desc_info *info)
1860{
1861 struct mtk_mac *mac = netdev_priv(dev);
1862 struct mtk_eth *eth = mac->hw;
1863
developerce08bca2022-10-06 16:21:13 +08001864 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1865 mtk_tx_set_dma_desc_v3(skb, dev, txd, info);
1866 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developere9356982022-07-04 09:03:20 +08001867 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
1868 else
1869 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
1870}
1871
developerfd40db22021-04-29 10:08:25 +08001872static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1873 int tx_num, struct mtk_tx_ring *ring, bool gso)
1874{
developere9356982022-07-04 09:03:20 +08001875 struct mtk_tx_dma_desc_info txd_info = {
1876 .size = skb_headlen(skb),
1877 .qid = skb->mark & MTK_QDMA_TX_MASK,
1878 .gso = gso,
1879 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1880 .vlan = skb_vlan_tag_present(skb),
1881 .vlan_tci = skb_vlan_tag_get(skb),
1882 .first = true,
1883 .last = !skb_is_nonlinear(skb),
1884 };
developerfd40db22021-04-29 10:08:25 +08001885 struct mtk_mac *mac = netdev_priv(dev);
1886 struct mtk_eth *eth = mac->hw;
developere9356982022-07-04 09:03:20 +08001887 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001888 struct mtk_tx_dma *itxd, *txd;
1889 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1890 struct mtk_tx_buf *itx_buf, *tx_buf;
developerfd40db22021-04-29 10:08:25 +08001891 int i, n_desc = 1;
developerfd40db22021-04-29 10:08:25 +08001892 int k = 0;
1893
developerb3a9e7b2023-02-08 15:18:10 +08001894 if (skb->len < 32) {
1895 if (skb_put_padto(skb, MTK_MIN_TX_LENGTH))
1896 return -ENOMEM;
1897
1898 txd_info.size = skb_headlen(skb);
1899 }
1900
developerfd40db22021-04-29 10:08:25 +08001901 itxd = ring->next_free;
1902 itxd_pdma = qdma_to_pdma(ring, itxd);
1903 if (itxd == ring->last_free)
1904 return -ENOMEM;
1905
developere9356982022-07-04 09:03:20 +08001906 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001907 memset(itx_buf, 0, sizeof(*itx_buf));
1908
developer3f28d382023-03-07 16:06:30 +08001909 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
developere9356982022-07-04 09:03:20 +08001910 DMA_TO_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001911 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
developerfd40db22021-04-29 10:08:25 +08001912 return -ENOMEM;
1913
developere9356982022-07-04 09:03:20 +08001914 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
1915
developerfd40db22021-04-29 10:08:25 +08001916 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
developer089e8852022-09-28 14:43:46 +08001917 itx_buf->flags |= (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1918 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1919 MTK_TX_FLAGS_FPORT2;
developere9356982022-07-04 09:03:20 +08001920 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerfd40db22021-04-29 10:08:25 +08001921 k++);
1922
developerfd40db22021-04-29 10:08:25 +08001923 /* TX SG offload */
1924 txd = itxd;
1925 txd_pdma = qdma_to_pdma(ring, txd);
1926
developere9356982022-07-04 09:03:20 +08001927 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerfd40db22021-04-29 10:08:25 +08001928 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1929 unsigned int offset = 0;
1930 int frag_size = skb_frag_size(frag);
1931
1932 while (frag_size) {
developerfd40db22021-04-29 10:08:25 +08001933 bool new_desc = true;
1934
developere9356982022-07-04 09:03:20 +08001935 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerfd40db22021-04-29 10:08:25 +08001936 (i & 0x1)) {
1937 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1938 txd_pdma = qdma_to_pdma(ring, txd);
1939 if (txd == ring->last_free)
1940 goto err_dma;
1941
1942 n_desc++;
1943 } else {
1944 new_desc = false;
1945 }
1946
developere9356982022-07-04 09:03:20 +08001947 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1948 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1949 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1950 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1951 !(frag_size - txd_info.size);
developer3f28d382023-03-07 16:06:30 +08001952 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
developere9356982022-07-04 09:03:20 +08001953 offset, txd_info.size,
1954 DMA_TO_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001955 if (unlikely(dma_mapping_error(eth->dma_dev,
1956 txd_info.addr)))
developere9356982022-07-04 09:03:20 +08001957 goto err_dma;
developerfd40db22021-04-29 10:08:25 +08001958
developere9356982022-07-04 09:03:20 +08001959 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerfd40db22021-04-29 10:08:25 +08001960
developere9356982022-07-04 09:03:20 +08001961 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001962 if (new_desc)
1963 memset(tx_buf, 0, sizeof(*tx_buf));
1964 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1965 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
developer089e8852022-09-28 14:43:46 +08001966 tx_buf->flags |=
1967 (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1968 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1969 MTK_TX_FLAGS_FPORT2;
developerfd40db22021-04-29 10:08:25 +08001970
developere9356982022-07-04 09:03:20 +08001971 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1972 txd_info.size, k++);
developerfd40db22021-04-29 10:08:25 +08001973
developere9356982022-07-04 09:03:20 +08001974 frag_size -= txd_info.size;
1975 offset += txd_info.size;
developerfd40db22021-04-29 10:08:25 +08001976 }
1977 }
1978
1979 /* store skb to cleanup */
1980 itx_buf->skb = skb;
1981
developere9356982022-07-04 09:03:20 +08001982 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001983 if (k & 0x1)
1984 txd_pdma->txd2 |= TX_DMA_LS0;
1985 else
1986 txd_pdma->txd2 |= TX_DMA_LS1;
1987 }
1988
1989 netdev_sent_queue(dev, skb->len);
1990 skb_tx_timestamp(skb);
1991
1992 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1993 atomic_sub(n_desc, &ring->free_count);
1994
1995 /* make sure that all changes to the dma ring are flushed before we
1996 * continue
1997 */
1998 wmb();
1999
developere9356982022-07-04 09:03:20 +08002000 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08002001 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
2002 !netdev_xmit_more())
developer68ce74f2023-01-03 16:11:57 +08002003 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
developerfd40db22021-04-29 10:08:25 +08002004 } else {
developere9356982022-07-04 09:03:20 +08002005 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
developerfd40db22021-04-29 10:08:25 +08002006 ring->dma_size);
2007 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
2008 }
2009
2010 return 0;
2011
2012err_dma:
2013 do {
developere9356982022-07-04 09:03:20 +08002014 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08002015
2016 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08002017 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08002018
2019 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
developere9356982022-07-04 09:03:20 +08002020 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
developerfd40db22021-04-29 10:08:25 +08002021 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
2022
2023 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
2024 itxd_pdma = qdma_to_pdma(ring, itxd);
2025 } while (itxd != txd);
2026
2027 return -ENOMEM;
2028}
2029
2030static inline int mtk_cal_txd_req(struct sk_buff *skb)
2031{
2032 int i, nfrags;
2033 skb_frag_t *frag;
2034
2035 nfrags = 1;
2036 if (skb_is_gso(skb)) {
2037 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2038 frag = &skb_shinfo(skb)->frags[i];
2039 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
2040 MTK_TX_DMA_BUF_LEN);
2041 }
2042 } else {
2043 nfrags += skb_shinfo(skb)->nr_frags;
2044 }
2045
2046 return nfrags;
2047}
2048
2049static int mtk_queue_stopped(struct mtk_eth *eth)
2050{
2051 int i;
2052
2053 for (i = 0; i < MTK_MAC_COUNT; i++) {
2054 if (!eth->netdev[i])
2055 continue;
2056 if (netif_queue_stopped(eth->netdev[i]))
2057 return 1;
2058 }
2059
2060 return 0;
2061}
2062
2063static void mtk_wake_queue(struct mtk_eth *eth)
2064{
2065 int i;
2066
2067 for (i = 0; i < MTK_MAC_COUNT; i++) {
2068 if (!eth->netdev[i])
2069 continue;
2070 netif_wake_queue(eth->netdev[i]);
2071 }
2072}
2073
2074static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
2075{
2076 struct mtk_mac *mac = netdev_priv(dev);
2077 struct mtk_eth *eth = mac->hw;
2078 struct mtk_tx_ring *ring = &eth->tx_ring;
2079 struct net_device_stats *stats = &dev->stats;
2080 bool gso = false;
2081 int tx_num;
2082
2083 /* normally we can rely on the stack not calling this more than once,
2084 * however we have 2 queues running on the same ring so we need to lock
2085 * the ring access
2086 */
2087 spin_lock(&eth->page_lock);
2088
2089 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2090 goto drop;
2091
2092 tx_num = mtk_cal_txd_req(skb);
2093 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
2094 netif_stop_queue(dev);
2095 netif_err(eth, tx_queued, dev,
2096 "Tx Ring full when queue awake!\n");
2097 spin_unlock(&eth->page_lock);
2098 return NETDEV_TX_BUSY;
2099 }
2100
2101 /* TSO: fill MSS info in tcp checksum field */
2102 if (skb_is_gso(skb)) {
2103 if (skb_cow_head(skb, 0)) {
2104 netif_warn(eth, tx_err, dev,
2105 "GSO expand head fail.\n");
2106 goto drop;
2107 }
2108
2109 if (skb_shinfo(skb)->gso_type &
2110 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
2111 gso = true;
2112 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
2113 }
2114 }
2115
2116 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
2117 goto drop;
2118
2119 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
2120 netif_stop_queue(dev);
2121
2122 spin_unlock(&eth->page_lock);
2123
2124 return NETDEV_TX_OK;
2125
2126drop:
2127 spin_unlock(&eth->page_lock);
2128 stats->tx_dropped++;
2129 dev_kfree_skb_any(skb);
2130 return NETDEV_TX_OK;
2131}
2132
2133static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
2134{
2135 int i;
2136 struct mtk_rx_ring *ring;
2137 int idx;
2138
developerfd40db22021-04-29 10:08:25 +08002139 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
developere9356982022-07-04 09:03:20 +08002140 struct mtk_rx_dma *rxd;
2141
developer77d03a72021-06-06 00:06:00 +08002142 if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i))
2143 continue;
2144
developerfd40db22021-04-29 10:08:25 +08002145 ring = &eth->rx_ring[i];
2146 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08002147 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
2148 if (rxd->rxd2 & RX_DMA_DONE) {
developerfd40db22021-04-29 10:08:25 +08002149 ring->calc_idx_update = true;
2150 return ring;
2151 }
2152 }
2153
2154 return NULL;
2155}
2156
developer18f46a82021-07-20 21:08:21 +08002157static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08002158{
developerfd40db22021-04-29 10:08:25 +08002159 int i;
2160
developerfb556ca2021-10-13 10:52:09 +08002161 if (!eth->hwlro)
developerfd40db22021-04-29 10:08:25 +08002162 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerfb556ca2021-10-13 10:52:09 +08002163 else {
developerfd40db22021-04-29 10:08:25 +08002164 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
2165 ring = &eth->rx_ring[i];
2166 if (ring->calc_idx_update) {
2167 ring->calc_idx_update = false;
2168 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2169 }
2170 }
2171 }
2172}
2173
2174static int mtk_poll_rx(struct napi_struct *napi, int budget,
2175 struct mtk_eth *eth)
2176{
developer18f46a82021-07-20 21:08:21 +08002177 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
2178 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002179 int idx;
2180 struct sk_buff *skb;
developer089e8852022-09-28 14:43:46 +08002181 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002182 u8 *data, *new_data;
developere9356982022-07-04 09:03:20 +08002183 struct mtk_rx_dma_v2 *rxd, trxd;
developerfd40db22021-04-29 10:08:25 +08002184 int done = 0;
2185
developer18f46a82021-07-20 21:08:21 +08002186 if (unlikely(!ring))
2187 goto rx_done;
2188
developerfd40db22021-04-29 10:08:25 +08002189 while (done < budget) {
developer68ce74f2023-01-03 16:11:57 +08002190 unsigned int pktlen, *rxdcsum;
developer006325c2022-10-06 16:39:50 +08002191 struct net_device *netdev = NULL;
developer8b6f2402022-11-28 13:42:34 +08002192 dma_addr_t dma_addr = 0;
developere9356982022-07-04 09:03:20 +08002193 int mac = 0;
developerfd40db22021-04-29 10:08:25 +08002194
developer18f46a82021-07-20 21:08:21 +08002195 if (eth->hwlro)
2196 ring = mtk_get_rx_ring(eth);
2197
developerfd40db22021-04-29 10:08:25 +08002198 if (unlikely(!ring))
2199 goto rx_done;
2200
2201 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08002202 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
developerfd40db22021-04-29 10:08:25 +08002203 data = ring->data[idx];
2204
developere9356982022-07-04 09:03:20 +08002205 if (!mtk_rx_get_desc(eth, &trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08002206 break;
2207
2208 /* find out which mac the packet come from. values start at 1 */
2209 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2210 mac = 0;
2211 } else {
developer8ecd51b2023-03-13 11:28:28 +08002212 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer089e8852022-09-28 14:43:46 +08002213 switch (RX_DMA_GET_SPORT_V2(trxd.rxd5)) {
2214 case PSE_GDM1_PORT:
2215 case PSE_GDM2_PORT:
2216 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
2217 break;
2218 case PSE_GDM3_PORT:
2219 mac = MTK_GMAC3_ID;
2220 break;
2221 }
2222 } else
developerfd40db22021-04-29 10:08:25 +08002223 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
2224 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2225 }
2226
2227 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
2228 !eth->netdev[mac]))
2229 goto release_desc;
2230
2231 netdev = eth->netdev[mac];
2232
2233 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2234 goto release_desc;
2235
2236 /* alloc new buffer */
2237 new_data = napi_alloc_frag(ring->frag_size);
2238 if (unlikely(!new_data)) {
2239 netdev->stats.rx_dropped++;
2240 goto release_desc;
2241 }
developer3f28d382023-03-07 16:06:30 +08002242 dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08002243 new_data + NET_SKB_PAD +
2244 eth->ip_align,
2245 ring->buf_size,
2246 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08002247 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
developerfd40db22021-04-29 10:08:25 +08002248 skb_free_frag(new_data);
2249 netdev->stats.rx_dropped++;
2250 goto release_desc;
2251 }
2252
developer089e8852022-09-28 14:43:46 +08002253 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2254 ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
2255
developer3f28d382023-03-07 16:06:30 +08002256 dma_unmap_single(eth->dma_dev,
developer089e8852022-09-28 14:43:46 +08002257 (u64)(trxd.rxd1 | addr64),
developerc4671b22021-05-28 13:16:42 +08002258 ring->buf_size, DMA_FROM_DEVICE);
2259
developerfd40db22021-04-29 10:08:25 +08002260 /* receive data */
2261 skb = build_skb(data, ring->frag_size);
2262 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08002263 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08002264 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08002265 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08002266 }
2267 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2268
developerfd40db22021-04-29 10:08:25 +08002269 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2270 skb->dev = netdev;
2271 skb_put(skb, pktlen);
2272
developer8ecd51b2023-03-13 11:28:28 +08002273 if ((MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)))
developer68ce74f2023-01-03 16:11:57 +08002274 rxdcsum = &trxd.rxd3;
2275 else
2276 rxdcsum = &trxd.rxd4;
2277
2278 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
developerfd40db22021-04-29 10:08:25 +08002279 skb->ip_summed = CHECKSUM_UNNECESSARY;
2280 else
2281 skb_checksum_none_assert(skb);
2282 skb->protocol = eth_type_trans(skb, netdev);
2283
2284 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developer8ecd51b2023-03-13 11:28:28 +08002285 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer255bba22021-07-27 15:16:33 +08002286 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08002287 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08002288 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08002289 RX_DMA_VID_V2(trxd.rxd4));
2290 } else {
2291 if (trxd.rxd2 & RX_DMA_VTAG)
2292 __vlan_hwaccel_put_tag(skb,
2293 htons(RX_DMA_VPID(trxd.rxd3)),
2294 RX_DMA_VID(trxd.rxd3));
2295 }
2296
2297 /* If netdev is attached to dsa switch, the special
2298 * tag inserted in VLAN field by switch hardware can
2299 * be offload by RX HW VLAN offload. Clears the VLAN
2300 * information from @skb to avoid unexpected 8021d
2301 * handler before packet enter dsa framework.
2302 */
2303 if (netdev_uses_dsa(netdev))
2304 __vlan_hwaccel_clear_tag(skb);
2305 }
2306
2307#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developer8ecd51b2023-03-13 11:28:28 +08002308 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developerfd40db22021-04-29 10:08:25 +08002309 *(u32 *)(skb->head) = trxd.rxd5;
2310 else
developerfd40db22021-04-29 10:08:25 +08002311 *(u32 *)(skb->head) = trxd.rxd4;
2312
2313 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08002314 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08002315 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
2316
2317 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
2318 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
2319 __func__, skb_hnat_reason(skb));
2320 skb->pkt_type = PACKET_HOST;
2321 }
2322
2323 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
2324 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
2325 skb_hnat_reason(skb), skb_hnat_alg(skb));
2326#endif
developer77d03a72021-06-06 00:06:00 +08002327 if (mtk_hwlro_stats_ebl &&
2328 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
2329 hw_lro_stats_update(ring->ring_no, &trxd);
2330 hw_lro_flush_stats_update(ring->ring_no, &trxd);
2331 }
developerfd40db22021-04-29 10:08:25 +08002332
2333 skb_record_rx_queue(skb, 0);
2334 napi_gro_receive(napi, skb);
2335
developerc4671b22021-05-28 13:16:42 +08002336skip_rx:
developerfd40db22021-04-29 10:08:25 +08002337 ring->data[idx] = new_data;
2338 rxd->rxd1 = (unsigned int)dma_addr;
2339
2340release_desc:
developer089e8852022-09-28 14:43:46 +08002341 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2342 RX_DMA_SDP1(dma_addr) : 0;
2343
developerfd40db22021-04-29 10:08:25 +08002344 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2345 rxd->rxd2 = RX_DMA_LSO;
2346 else
developer089e8852022-09-28 14:43:46 +08002347 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08002348
2349 ring->calc_idx = idx;
2350
2351 done++;
2352 }
2353
2354rx_done:
2355 if (done) {
2356 /* make sure that all changes to the dma ring are flushed before
2357 * we continue
2358 */
2359 wmb();
developer18f46a82021-07-20 21:08:21 +08002360 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08002361 }
2362
2363 return done;
2364}
2365
developerfb556ca2021-10-13 10:52:09 +08002366static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08002367 unsigned int *done, unsigned int *bytes)
2368{
developer68ce74f2023-01-03 16:11:57 +08002369 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developere9356982022-07-04 09:03:20 +08002370 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002371 struct mtk_tx_ring *ring = &eth->tx_ring;
2372 struct mtk_tx_dma *desc;
2373 struct sk_buff *skb;
2374 struct mtk_tx_buf *tx_buf;
2375 u32 cpu, dma;
2376
developerc4671b22021-05-28 13:16:42 +08002377 cpu = ring->last_free_ptr;
developer68ce74f2023-01-03 16:11:57 +08002378 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
developerfd40db22021-04-29 10:08:25 +08002379
2380 desc = mtk_qdma_phys_to_virt(ring, cpu);
2381
2382 while ((cpu != dma) && budget) {
2383 u32 next_cpu = desc->txd2;
2384 int mac = 0;
2385
2386 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2387 break;
2388
2389 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2390
developere9356982022-07-04 09:03:20 +08002391 tx_buf = mtk_desc_to_tx_buf(ring, desc, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08002392 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
developer089e8852022-09-28 14:43:46 +08002393 mac = MTK_GMAC2_ID;
2394 else if (tx_buf->flags & MTK_TX_FLAGS_FPORT2)
2395 mac = MTK_GMAC3_ID;
developerfd40db22021-04-29 10:08:25 +08002396
2397 skb = tx_buf->skb;
2398 if (!skb)
2399 break;
2400
2401 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
2402 bytes[mac] += skb->len;
2403 done[mac]++;
2404 budget--;
2405 }
developerc4671b22021-05-28 13:16:42 +08002406 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08002407
2408 ring->last_free = desc;
2409 atomic_inc(&ring->free_count);
2410
2411 cpu = next_cpu;
2412 }
2413
developerc4671b22021-05-28 13:16:42 +08002414 ring->last_free_ptr = cpu;
developer68ce74f2023-01-03 16:11:57 +08002415 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
developerfd40db22021-04-29 10:08:25 +08002416}
2417
developerfb556ca2021-10-13 10:52:09 +08002418static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08002419 unsigned int *done, unsigned int *bytes)
2420{
2421 struct mtk_tx_ring *ring = &eth->tx_ring;
2422 struct mtk_tx_dma *desc;
2423 struct sk_buff *skb;
2424 struct mtk_tx_buf *tx_buf;
2425 u32 cpu, dma;
2426
2427 cpu = ring->cpu_idx;
2428 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2429
2430 while ((cpu != dma) && budget) {
2431 tx_buf = &ring->buf[cpu];
2432 skb = tx_buf->skb;
2433 if (!skb)
2434 break;
2435
2436 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
2437 bytes[0] += skb->len;
2438 done[0]++;
2439 budget--;
2440 }
2441
developerc4671b22021-05-28 13:16:42 +08002442 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08002443
developere9356982022-07-04 09:03:20 +08002444 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08002445 ring->last_free = desc;
2446 atomic_inc(&ring->free_count);
2447
2448 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2449 }
2450
2451 ring->cpu_idx = cpu;
developerfd40db22021-04-29 10:08:25 +08002452}
2453
2454static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2455{
2456 struct mtk_tx_ring *ring = &eth->tx_ring;
2457 unsigned int done[MTK_MAX_DEVS];
2458 unsigned int bytes[MTK_MAX_DEVS];
2459 int total = 0, i;
2460
2461 memset(done, 0, sizeof(done));
2462 memset(bytes, 0, sizeof(bytes));
2463
2464 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developerfb556ca2021-10-13 10:52:09 +08002465 mtk_poll_tx_qdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08002466 else
developerfb556ca2021-10-13 10:52:09 +08002467 mtk_poll_tx_pdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08002468
2469 for (i = 0; i < MTK_MAC_COUNT; i++) {
2470 if (!eth->netdev[i] || !done[i])
2471 continue;
2472 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
2473 total += done[i];
2474 }
2475
2476 if (mtk_queue_stopped(eth) &&
2477 (atomic_read(&ring->free_count) > ring->thresh))
2478 mtk_wake_queue(eth);
2479
2480 return total;
2481}
2482
2483static void mtk_handle_status_irq(struct mtk_eth *eth)
2484{
developer8051e042022-04-08 13:26:36 +08002485 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002486
2487 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2488 mtk_stats_update(eth);
2489 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer8051e042022-04-08 13:26:36 +08002490 MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002491 }
2492}
2493
2494static int mtk_napi_tx(struct napi_struct *napi, int budget)
2495{
2496 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
developer68ce74f2023-01-03 16:11:57 +08002497 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08002498 u32 status, mask;
2499 int tx_done = 0;
2500
2501 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2502 mtk_handle_status_irq(eth);
developer68ce74f2023-01-03 16:11:57 +08002503 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
developerfd40db22021-04-29 10:08:25 +08002504 tx_done = mtk_poll_tx(eth, budget);
2505
2506 if (unlikely(netif_msg_intr(eth))) {
developer68ce74f2023-01-03 16:11:57 +08002507 status = mtk_r32(eth, reg_map->tx_irq_status);
2508 mask = mtk_r32(eth, reg_map->tx_irq_mask);
developerfd40db22021-04-29 10:08:25 +08002509 dev_info(eth->dev,
2510 "done tx %d, intr 0x%08x/0x%x\n",
2511 tx_done, status, mask);
2512 }
2513
2514 if (tx_done == budget)
2515 return budget;
2516
developer68ce74f2023-01-03 16:11:57 +08002517 status = mtk_r32(eth, reg_map->tx_irq_status);
developerfd40db22021-04-29 10:08:25 +08002518 if (status & MTK_TX_DONE_INT)
2519 return budget;
2520
developerc4671b22021-05-28 13:16:42 +08002521 if (napi_complete(napi))
2522 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developerfd40db22021-04-29 10:08:25 +08002523
2524 return tx_done;
2525}
2526
2527static int mtk_napi_rx(struct napi_struct *napi, int budget)
2528{
developer18f46a82021-07-20 21:08:21 +08002529 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
2530 struct mtk_eth *eth = rx_napi->eth;
developer68ce74f2023-01-03 16:11:57 +08002531 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer18f46a82021-07-20 21:08:21 +08002532 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002533 u32 status, mask;
2534 int rx_done = 0;
2535 int remain_budget = budget;
2536
2537 mtk_handle_status_irq(eth);
2538
2539poll_again:
developer68ce74f2023-01-03 16:11:57 +08002540 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), reg_map->pdma.irq_status);
developerfd40db22021-04-29 10:08:25 +08002541 rx_done = mtk_poll_rx(napi, remain_budget, eth);
2542
2543 if (unlikely(netif_msg_intr(eth))) {
developer68ce74f2023-01-03 16:11:57 +08002544 status = mtk_r32(eth, reg_map->pdma.irq_status);
2545 mask = mtk_r32(eth, reg_map->pdma.irq_mask);
developerfd40db22021-04-29 10:08:25 +08002546 dev_info(eth->dev,
2547 "done rx %d, intr 0x%08x/0x%x\n",
2548 rx_done, status, mask);
2549 }
2550 if (rx_done == remain_budget)
2551 return budget;
2552
developer68ce74f2023-01-03 16:11:57 +08002553 status = mtk_r32(eth, reg_map->pdma.irq_status);
developer18f46a82021-07-20 21:08:21 +08002554 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08002555 remain_budget -= rx_done;
2556 goto poll_again;
2557 }
developerc4671b22021-05-28 13:16:42 +08002558
2559 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08002560 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08002561
2562 return rx_done + budget - remain_budget;
2563}
2564
2565static int mtk_tx_alloc(struct mtk_eth *eth)
2566{
developere9356982022-07-04 09:03:20 +08002567 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002568 struct mtk_tx_ring *ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002569 int i, sz = soc->txrx.txd_size;
2570 struct mtk_tx_dma_v2 *txd, *pdma_txd;
developerfd40db22021-04-29 10:08:25 +08002571
2572 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
2573 GFP_KERNEL);
2574 if (!ring->buf)
2575 goto no_tx_mem;
2576
2577 if (!eth->soc->has_sram)
developer3f28d382023-03-07 16:06:30 +08002578 ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002579 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002580 else {
developere9356982022-07-04 09:03:20 +08002581 ring->dma = eth->scratch_ring + MTK_DMA_SIZE * sz;
developer8b6f2402022-11-28 13:42:34 +08002582 ring->phys = eth->phy_scratch_ring +
2583 MTK_DMA_SIZE * (dma_addr_t)sz;
developerfd40db22021-04-29 10:08:25 +08002584 }
2585
2586 if (!ring->dma)
2587 goto no_tx_mem;
2588
2589 for (i = 0; i < MTK_DMA_SIZE; i++) {
2590 int next = (i + 1) % MTK_DMA_SIZE;
2591 u32 next_ptr = ring->phys + next * sz;
2592
developere9356982022-07-04 09:03:20 +08002593 txd = ring->dma + i * sz;
2594 txd->txd2 = next_ptr;
2595 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2596 txd->txd4 = 0;
2597
developer089e8852022-09-28 14:43:46 +08002598 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2599 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08002600 txd->txd5 = 0;
2601 txd->txd6 = 0;
2602 txd->txd7 = 0;
2603 txd->txd8 = 0;
2604 }
developerfd40db22021-04-29 10:08:25 +08002605 }
2606
2607 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2608 * only as the framework. The real HW descriptors are the PDMA
2609 * descriptors in ring->dma_pdma.
2610 */
2611 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer3f28d382023-03-07 16:06:30 +08002612 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev,
2613 MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002614 &ring->phys_pdma, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002615 if (!ring->dma_pdma)
2616 goto no_tx_mem;
2617
2618 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +08002619 pdma_txd = ring->dma_pdma + i *sz;
2620
2621 pdma_txd->txd2 = TX_DMA_DESP2_DEF;
2622 pdma_txd->txd4 = 0;
developerfd40db22021-04-29 10:08:25 +08002623 }
2624 }
2625
2626 ring->dma_size = MTK_DMA_SIZE;
2627 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
developere9356982022-07-04 09:03:20 +08002628 ring->next_free = ring->dma;
2629 ring->last_free = (void *)txd;
developerc4671b22021-05-28 13:16:42 +08002630 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08002631 ring->thresh = MAX_SKB_FRAGS;
2632
2633 /* make sure that all changes to the dma ring are flushed before we
2634 * continue
2635 */
2636 wmb();
2637
2638 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer68ce74f2023-01-03 16:11:57 +08002639 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2640 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
developerfd40db22021-04-29 10:08:25 +08002641 mtk_w32(eth,
2642 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
developer68ce74f2023-01-03 16:11:57 +08002643 soc->reg_map->qdma.crx_ptr);
2644 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
developerfd40db22021-04-29 10:08:25 +08002645 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
developer68ce74f2023-01-03 16:11:57 +08002646 soc->reg_map->qdma.qtx_cfg);
developerfd40db22021-04-29 10:08:25 +08002647 } else {
2648 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2649 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
2650 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
developer68ce74f2023-01-03 16:11:57 +08002651 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
developerfd40db22021-04-29 10:08:25 +08002652 }
2653
2654 return 0;
2655
2656no_tx_mem:
2657 return -ENOMEM;
2658}
2659
2660static void mtk_tx_clean(struct mtk_eth *eth)
2661{
developere9356982022-07-04 09:03:20 +08002662 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002663 struct mtk_tx_ring *ring = &eth->tx_ring;
2664 int i;
2665
2666 if (ring->buf) {
2667 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08002668 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08002669 kfree(ring->buf);
2670 ring->buf = NULL;
2671 }
2672
2673 if (!eth->soc->has_sram && ring->dma) {
developer3f28d382023-03-07 16:06:30 +08002674 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002675 MTK_DMA_SIZE * soc->txrx.txd_size,
2676 ring->dma, ring->phys);
developerfd40db22021-04-29 10:08:25 +08002677 ring->dma = NULL;
2678 }
2679
2680 if (ring->dma_pdma) {
developer3f28d382023-03-07 16:06:30 +08002681 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002682 MTK_DMA_SIZE * soc->txrx.txd_size,
2683 ring->dma_pdma, ring->phys_pdma);
developerfd40db22021-04-29 10:08:25 +08002684 ring->dma_pdma = NULL;
2685 }
2686}
2687
2688static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2689{
developer68ce74f2023-01-03 16:11:57 +08002690 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08002691 struct mtk_rx_ring *ring;
2692 int rx_data_len, rx_dma_size;
2693 int i;
developer089e8852022-09-28 14:43:46 +08002694 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002695
2696 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2697 if (ring_no)
2698 return -EINVAL;
2699 ring = &eth->rx_ring_qdma;
2700 } else {
2701 ring = &eth->rx_ring[ring_no];
2702 }
2703
2704 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2705 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2706 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2707 } else {
2708 rx_data_len = ETH_DATA_LEN;
2709 rx_dma_size = MTK_DMA_SIZE;
2710 }
2711
2712 ring->frag_size = mtk_max_frag_size(rx_data_len);
2713 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2714 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2715 GFP_KERNEL);
2716 if (!ring->data)
2717 return -ENOMEM;
2718
2719 for (i = 0; i < rx_dma_size; i++) {
2720 ring->data[i] = netdev_alloc_frag(ring->frag_size);
2721 if (!ring->data[i])
2722 return -ENOMEM;
2723 }
2724
2725 if ((!eth->soc->has_sram) || (eth->soc->has_sram
2726 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
developer3f28d382023-03-07 16:06:30 +08002727 ring->dma = dma_alloc_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002728 rx_dma_size * eth->soc->txrx.rxd_size,
2729 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002730 else {
2731 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002732 ring->dma = tx_ring->dma + MTK_DMA_SIZE *
developer8ecd51b2023-03-13 11:28:28 +08002733 eth->soc->txrx.txd_size * (ring_no + 1);
developer18f46a82021-07-20 21:08:21 +08002734 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
developer8ecd51b2023-03-13 11:28:28 +08002735 eth->soc->txrx.txd_size * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08002736 }
2737
2738 if (!ring->dma)
2739 return -ENOMEM;
2740
2741 for (i = 0; i < rx_dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002742 struct mtk_rx_dma_v2 *rxd;
2743
developer3f28d382023-03-07 16:06:30 +08002744 dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08002745 ring->data[i] + NET_SKB_PAD + eth->ip_align,
2746 ring->buf_size,
2747 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08002748 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
developerfd40db22021-04-29 10:08:25 +08002749 return -ENOMEM;
developere9356982022-07-04 09:03:20 +08002750
2751 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2752 rxd->rxd1 = (unsigned int)dma_addr;
developerfd40db22021-04-29 10:08:25 +08002753
developer089e8852022-09-28 14:43:46 +08002754 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2755 RX_DMA_SDP1(dma_addr) : 0;
2756
developerfd40db22021-04-29 10:08:25 +08002757 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developere9356982022-07-04 09:03:20 +08002758 rxd->rxd2 = RX_DMA_LSO;
developerfd40db22021-04-29 10:08:25 +08002759 else
developer089e8852022-09-28 14:43:46 +08002760 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08002761
developere9356982022-07-04 09:03:20 +08002762 rxd->rxd3 = 0;
2763 rxd->rxd4 = 0;
2764
developer8ecd51b2023-03-13 11:28:28 +08002765 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developere9356982022-07-04 09:03:20 +08002766 rxd->rxd5 = 0;
2767 rxd->rxd6 = 0;
2768 rxd->rxd7 = 0;
2769 rxd->rxd8 = 0;
developerfd40db22021-04-29 10:08:25 +08002770 }
developerfd40db22021-04-29 10:08:25 +08002771 }
2772 ring->dma_size = rx_dma_size;
2773 ring->calc_idx_update = false;
2774 ring->calc_idx = rx_dma_size - 1;
2775 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
2776 MTK_QRX_CRX_IDX_CFG(ring_no) :
2777 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08002778 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08002779 /* make sure that all changes to the dma ring are flushed before we
2780 * continue
2781 */
2782 wmb();
2783
2784 if (rx_flag == MTK_RX_FLAGS_QDMA) {
developer68ce74f2023-01-03 16:11:57 +08002785 mtk_w32(eth, ring->phys,
2786 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2787 mtk_w32(eth, rx_dma_size,
2788 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2789 mtk_w32(eth, ring->calc_idx,
2790 ring->crx_idx_reg);
2791 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2792 reg_map->qdma.rst_idx);
developerfd40db22021-04-29 10:08:25 +08002793 } else {
developer68ce74f2023-01-03 16:11:57 +08002794 mtk_w32(eth, ring->phys,
2795 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2796 mtk_w32(eth, rx_dma_size,
2797 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2798 mtk_w32(eth, ring->calc_idx,
2799 ring->crx_idx_reg);
2800 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2801 reg_map->pdma.rst_idx);
developerfd40db22021-04-29 10:08:25 +08002802 }
2803
2804 return 0;
2805}
2806
2807static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
2808{
2809 int i;
developer089e8852022-09-28 14:43:46 +08002810 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002811
2812 if (ring->data && ring->dma) {
2813 for (i = 0; i < ring->dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002814 struct mtk_rx_dma *rxd;
2815
developerfd40db22021-04-29 10:08:25 +08002816 if (!ring->data[i])
2817 continue;
developere9356982022-07-04 09:03:20 +08002818
2819 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2820 if (!rxd->rxd1)
developerfd40db22021-04-29 10:08:25 +08002821 continue;
developere9356982022-07-04 09:03:20 +08002822
developer089e8852022-09-28 14:43:46 +08002823 addr64 = (MTK_HAS_CAPS(eth->soc->caps,
2824 MTK_8GB_ADDRESSING)) ?
2825 ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
2826
developer3f28d382023-03-07 16:06:30 +08002827 dma_unmap_single(eth->dma_dev,
developer089e8852022-09-28 14:43:46 +08002828 (u64)(rxd->rxd1 | addr64),
developerfd40db22021-04-29 10:08:25 +08002829 ring->buf_size,
2830 DMA_FROM_DEVICE);
2831 skb_free_frag(ring->data[i]);
2832 }
2833 kfree(ring->data);
2834 ring->data = NULL;
2835 }
2836
2837 if(in_sram)
2838 return;
2839
2840 if (ring->dma) {
developer3f28d382023-03-07 16:06:30 +08002841 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002842 ring->dma_size * eth->soc->txrx.rxd_size,
developerfd40db22021-04-29 10:08:25 +08002843 ring->dma,
2844 ring->phys);
2845 ring->dma = NULL;
2846 }
2847}
2848
2849static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2850{
2851 int i;
developer77d03a72021-06-06 00:06:00 +08002852 u32 val;
developerfd40db22021-04-29 10:08:25 +08002853 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2854 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2855
2856 /* set LRO rings to auto-learn modes */
2857 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2858
2859 /* validate LRO ring */
2860 ring_ctrl_dw2 |= MTK_RING_VLD;
2861
2862 /* set AGE timer (unit: 20us) */
2863 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2864 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2865
2866 /* set max AGG timer (unit: 20us) */
2867 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2868
2869 /* set max LRO AGG count */
2870 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2871 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2872
developer77d03a72021-06-06 00:06:00 +08002873 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002874 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2875 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2876 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2877 }
2878
2879 /* IPv4 checksum update enable */
2880 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2881
2882 /* switch priority comparison to packet count mode */
2883 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2884
2885 /* bandwidth threshold setting */
2886 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2887
2888 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08002889 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08002890
2891 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2892 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2893 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2894
developerfd40db22021-04-29 10:08:25 +08002895 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2896 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2897
developer8ecd51b2023-03-13 11:28:28 +08002898 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer77d03a72021-06-06 00:06:00 +08002899 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
2900 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
2901 MTK_PDMA_RX_CFG);
2902
2903 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
2904 } else {
2905 /* set HW LRO mode & the max aggregation count for rx packets */
2906 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2907 }
2908
developerfd40db22021-04-29 10:08:25 +08002909 /* enable HW LRO */
2910 lro_ctrl_dw0 |= MTK_LRO_EN;
2911
developer77d03a72021-06-06 00:06:00 +08002912 /* enable cpu reason black list */
2913 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
2914
developerfd40db22021-04-29 10:08:25 +08002915 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2916 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2917
developer77d03a72021-06-06 00:06:00 +08002918 /* no use PPE cpu reason */
2919 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
2920
developerfd40db22021-04-29 10:08:25 +08002921 return 0;
2922}
2923
2924static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2925{
2926 int i;
2927 u32 val;
2928
2929 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08002930 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08002931
2932 /* wait for relinquishments done */
2933 for (i = 0; i < 10; i++) {
2934 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08002935 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer8051e042022-04-08 13:26:36 +08002936 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002937 continue;
2938 }
2939 break;
2940 }
2941
2942 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08002943 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08002944 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2945
2946 /* disable HW LRO */
2947 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2948}
2949
2950static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2951{
2952 u32 reg_val;
2953
developer8ecd51b2023-03-13 11:28:28 +08002954 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developer77d03a72021-06-06 00:06:00 +08002955 idx += 1;
2956
developerfd40db22021-04-29 10:08:25 +08002957 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2958
2959 /* invalidate the IP setting */
2960 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2961
2962 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2963
2964 /* validate the IP setting */
2965 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2966}
2967
2968static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2969{
2970 u32 reg_val;
2971
developer8ecd51b2023-03-13 11:28:28 +08002972 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developer77d03a72021-06-06 00:06:00 +08002973 idx += 1;
2974
developerfd40db22021-04-29 10:08:25 +08002975 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2976
2977 /* invalidate the IP setting */
2978 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2979
2980 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2981}
2982
2983static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2984{
2985 int cnt = 0;
2986 int i;
2987
2988 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2989 if (mac->hwlro_ip[i])
2990 cnt++;
2991 }
2992
2993 return cnt;
2994}
2995
2996static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2997 struct ethtool_rxnfc *cmd)
2998{
2999 struct ethtool_rx_flow_spec *fsp =
3000 (struct ethtool_rx_flow_spec *)&cmd->fs;
3001 struct mtk_mac *mac = netdev_priv(dev);
3002 struct mtk_eth *eth = mac->hw;
3003 int hwlro_idx;
3004
3005 if ((fsp->flow_type != TCP_V4_FLOW) ||
3006 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
3007 (fsp->location > 1))
3008 return -EINVAL;
3009
3010 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
3011 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3012
3013 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3014
3015 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
3016
3017 return 0;
3018}
3019
3020static int mtk_hwlro_del_ipaddr(struct net_device *dev,
3021 struct ethtool_rxnfc *cmd)
3022{
3023 struct ethtool_rx_flow_spec *fsp =
3024 (struct ethtool_rx_flow_spec *)&cmd->fs;
3025 struct mtk_mac *mac = netdev_priv(dev);
3026 struct mtk_eth *eth = mac->hw;
3027 int hwlro_idx;
3028
3029 if (fsp->location > 1)
3030 return -EINVAL;
3031
3032 mac->hwlro_ip[fsp->location] = 0;
3033 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3034
3035 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3036
3037 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3038
3039 return 0;
3040}
3041
3042static void mtk_hwlro_netdev_disable(struct net_device *dev)
3043{
3044 struct mtk_mac *mac = netdev_priv(dev);
3045 struct mtk_eth *eth = mac->hw;
3046 int i, hwlro_idx;
3047
3048 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3049 mac->hwlro_ip[i] = 0;
3050 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
3051
3052 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3053 }
3054
3055 mac->hwlro_ip_cnt = 0;
3056}
3057
3058static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
3059 struct ethtool_rxnfc *cmd)
3060{
3061 struct mtk_mac *mac = netdev_priv(dev);
3062 struct ethtool_rx_flow_spec *fsp =
3063 (struct ethtool_rx_flow_spec *)&cmd->fs;
3064
3065 /* only tcp dst ipv4 is meaningful, others are meaningless */
3066 fsp->flow_type = TCP_V4_FLOW;
3067 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
3068 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
3069
3070 fsp->h_u.tcp_ip4_spec.ip4src = 0;
3071 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
3072 fsp->h_u.tcp_ip4_spec.psrc = 0;
3073 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
3074 fsp->h_u.tcp_ip4_spec.pdst = 0;
3075 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
3076 fsp->h_u.tcp_ip4_spec.tos = 0;
3077 fsp->m_u.tcp_ip4_spec.tos = 0xff;
3078
3079 return 0;
3080}
3081
3082static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3083 struct ethtool_rxnfc *cmd,
3084 u32 *rule_locs)
3085{
3086 struct mtk_mac *mac = netdev_priv(dev);
3087 int cnt = 0;
3088 int i;
3089
3090 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3091 if (mac->hwlro_ip[i]) {
3092 rule_locs[cnt] = i;
3093 cnt++;
3094 }
3095 }
3096
3097 cmd->rule_cnt = cnt;
3098
3099 return 0;
3100}
3101
developerea49c302023-06-27 16:06:41 +08003102u32 mtk_rss_indr_table(struct mtk_rss_params *rss_params, int index)
developere3d0de22023-05-30 17:45:00 +08003103{
developerea49c302023-06-27 16:06:41 +08003104 u32 val = 0;
3105 int i;
developere3d0de22023-05-30 17:45:00 +08003106
developerea49c302023-06-27 16:06:41 +08003107 for (i = 16 * index; i < 16 * index + 16; i++)
3108 val |= (rss_params->indirection_table[i] << (2 * (i % 16)));
developere3d0de22023-05-30 17:45:00 +08003109
developerea49c302023-06-27 16:06:41 +08003110 return val;
developere3d0de22023-05-30 17:45:00 +08003111}
3112
developer18f46a82021-07-20 21:08:21 +08003113static int mtk_rss_init(struct mtk_eth *eth)
3114{
developerea49c302023-06-27 16:06:41 +08003115 struct mtk_rss_params *rss_params = &eth->rss_params;
3116 static u8 hash_key[MTK_RSS_HASH_KEYSIZE] = {
3117 0xfa, 0x01, 0xac, 0xbe, 0x3b, 0xb7, 0x42, 0x6a,
3118 0x0c, 0xf2, 0x30, 0x80, 0xa3, 0x2d, 0xcb, 0x77,
3119 0xb4, 0x30, 0x7b, 0xae, 0xcb, 0x2b, 0xca, 0xd0,
3120 0xb0, 0x8f, 0xa3, 0x43, 0x3d, 0x25, 0x67, 0x41,
3121 0xc2, 0x0e, 0x5b, 0x25, 0xda, 0x56, 0x5a, 0x6d};
developer18f46a82021-07-20 21:08:21 +08003122 u32 val;
developerea49c302023-06-27 16:06:41 +08003123 int i;
3124
3125 memcpy(rss_params->hash_key, hash_key, MTK_RSS_HASH_KEYSIZE);
3126
3127 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
3128 rss_params->indirection_table[i] = i % eth->soc->rss_num;
developer18f46a82021-07-20 21:08:21 +08003129
developer8ecd51b2023-03-13 11:28:28 +08003130 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer18f46a82021-07-20 21:08:21 +08003131 /* Set RSS rings to PSE modes */
3132 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
3133 val |= MTK_RING_PSE_MODE;
3134 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
3135
3136 /* Enable non-lro multiple rx */
3137 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
3138 val |= MTK_NON_LRO_MULTI_EN;
3139 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
3140
3141 /* Enable RSS dly int supoort */
3142 val |= MTK_LRO_DLY_INT_EN;
3143 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
3144
3145 /* Set RSS delay config int ring1 */
3146 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
3147 }
3148
3149 /* Hash Type */
3150 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
3151 val |= MTK_RSS_IPV4_STATIC_HASH;
3152 val |= MTK_RSS_IPV6_STATIC_HASH;
3153 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3154
developerea49c302023-06-27 16:06:41 +08003155 /* Hash Key */
3156 for (i = 0; i < MTK_RSS_HASH_KEYSIZE / sizeof(u32); i++)
3157 mtk_w32(eth, rss_params->hash_key[i], MTK_RSS_HASH_KEY_DW(i));
3158
developer18f46a82021-07-20 21:08:21 +08003159 /* Select the size of indirection table */
developerea49c302023-06-27 16:06:41 +08003160 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE / 16; i++)
3161 mtk_w32(eth, mtk_rss_indr_table(rss_params, i),
3162 MTK_RSS_INDR_TABLE_DW(i));
developer18f46a82021-07-20 21:08:21 +08003163
3164 /* Pause */
3165 val |= MTK_RSS_CFG_REQ;
3166 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3167
3168 /* Enable RSS*/
3169 val |= MTK_RSS_EN;
3170 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3171
3172 /* Release pause */
3173 val &= ~(MTK_RSS_CFG_REQ);
3174 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3175
3176 /* Set perRSS GRP INT */
developer94806ec2023-05-19 14:16:44 +08003177 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING(1)), MTK_PDMA_INT_GRP1);
3178 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING(2)), MTK_PDMA_INT_GRP2);
3179 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING(3)), MTK_PDMA_INT_GRP3);
developer18f46a82021-07-20 21:08:21 +08003180
3181 /* Set GRP INT */
developer94806ec2023-05-19 14:16:44 +08003182 mtk_w32(eth, 0x210FFFF2, MTK_FE_INT_GRP);
developer18f46a82021-07-20 21:08:21 +08003183
developer089e8852022-09-28 14:43:46 +08003184 /* Enable RSS delay interrupt */
3185 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_RSS_DELAY_INT);
3186
developer18f46a82021-07-20 21:08:21 +08003187 return 0;
3188}
3189
3190static void mtk_rss_uninit(struct mtk_eth *eth)
3191{
3192 u32 val;
3193
3194 /* Pause */
3195 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
3196 val |= MTK_RSS_CFG_REQ;
3197 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3198
3199 /* Disable RSS*/
3200 val &= ~(MTK_RSS_EN);
3201 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3202
3203 /* Release pause */
3204 val &= ~(MTK_RSS_CFG_REQ);
3205 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3206}
3207
developerfd40db22021-04-29 10:08:25 +08003208static netdev_features_t mtk_fix_features(struct net_device *dev,
3209 netdev_features_t features)
3210{
3211 if (!(features & NETIF_F_LRO)) {
3212 struct mtk_mac *mac = netdev_priv(dev);
3213 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3214
3215 if (ip_cnt) {
3216 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3217
3218 features |= NETIF_F_LRO;
3219 }
3220 }
3221
3222 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
3223 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
3224
3225 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
3226 }
3227
3228 return features;
3229}
3230
3231static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3232{
3233 struct mtk_mac *mac = netdev_priv(dev);
3234 struct mtk_eth *eth = mac->hw;
3235 int err = 0;
3236
3237 if (!((dev->features ^ features) & MTK_SET_FEATURES))
3238 return 0;
3239
3240 if (!(features & NETIF_F_LRO))
3241 mtk_hwlro_netdev_disable(dev);
3242
3243 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
3244 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3245 else
3246 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3247
3248 return err;
3249}
3250
3251/* wait for DMA to finish whatever it is doing before we start using it again */
3252static int mtk_dma_busy_wait(struct mtk_eth *eth)
3253{
3254 unsigned long t_start = jiffies;
3255
3256 while (1) {
3257 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3258 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
3259 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
3260 return 0;
3261 } else {
3262 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
3263 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
3264 return 0;
3265 }
3266
3267 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
3268 break;
3269 }
3270
3271 dev_err(eth->dev, "DMA init timeout\n");
3272 return -1;
3273}
3274
3275static int mtk_dma_init(struct mtk_eth *eth)
3276{
3277 int err;
3278 u32 i;
3279
3280 if (mtk_dma_busy_wait(eth))
3281 return -EBUSY;
3282
3283 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3284 /* QDMA needs scratch memory for internal reordering of the
3285 * descriptors
3286 */
3287 err = mtk_init_fq_dma(eth);
3288 if (err)
3289 return err;
3290 }
3291
3292 err = mtk_tx_alloc(eth);
3293 if (err)
3294 return err;
3295
3296 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3297 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3298 if (err)
3299 return err;
3300 }
3301
3302 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3303 if (err)
3304 return err;
3305
3306 if (eth->hwlro) {
developer8ecd51b2023-03-13 11:28:28 +08003307 i = (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) ? 1 : 4;
developer77d03a72021-06-06 00:06:00 +08003308 for (; i < MTK_MAX_RX_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08003309 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3310 if (err)
3311 return err;
3312 }
3313 err = mtk_hwlro_rx_init(eth);
3314 if (err)
3315 return err;
3316 }
3317
developer18f46a82021-07-20 21:08:21 +08003318 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3319 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3320 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL);
3321 if (err)
3322 return err;
3323 }
3324 err = mtk_rss_init(eth);
3325 if (err)
3326 return err;
3327 }
3328
developerfd40db22021-04-29 10:08:25 +08003329 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3330 /* Enable random early drop and set drop threshold
3331 * automatically
3332 */
3333 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
developer68ce74f2023-01-03 16:11:57 +08003334 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3335 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred2);
developerfd40db22021-04-29 10:08:25 +08003336 }
3337
3338 return 0;
3339}
3340
3341static void mtk_dma_free(struct mtk_eth *eth)
3342{
developere9356982022-07-04 09:03:20 +08003343 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08003344 int i;
3345
3346 for (i = 0; i < MTK_MAC_COUNT; i++)
3347 if (eth->netdev[i])
3348 netdev_reset_queue(eth->netdev[i]);
3349 if ( !eth->soc->has_sram && eth->scratch_ring) {
developer3f28d382023-03-07 16:06:30 +08003350 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08003351 MTK_DMA_SIZE * soc->txrx.txd_size,
3352 eth->scratch_ring, eth->phy_scratch_ring);
developerfd40db22021-04-29 10:08:25 +08003353 eth->scratch_ring = NULL;
3354 eth->phy_scratch_ring = 0;
3355 }
3356 mtk_tx_clean(eth);
developerb3ce86f2022-06-30 13:31:47 +08003357 mtk_rx_clean(eth, &eth->rx_ring[0],eth->soc->has_sram);
developerfd40db22021-04-29 10:08:25 +08003358 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
3359
3360 if (eth->hwlro) {
3361 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08003362
developer04ba53d2023-08-25 14:48:33 +08003363 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) ? 4 : 1;
developer77d03a72021-06-06 00:06:00 +08003364 for (; i < MTK_MAX_RX_RING_NUM; i++)
3365 mtk_rx_clean(eth, &eth->rx_ring[i], 0);
developerfd40db22021-04-29 10:08:25 +08003366 }
3367
developer18f46a82021-07-20 21:08:21 +08003368 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3369 mtk_rss_uninit(eth);
3370
3371 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3372 mtk_rx_clean(eth, &eth->rx_ring[i], 1);
3373 }
3374
developer94008d92021-09-23 09:47:41 +08003375 if (eth->scratch_head) {
3376 kfree(eth->scratch_head);
3377 eth->scratch_head = NULL;
3378 }
developerfd40db22021-04-29 10:08:25 +08003379}
3380
3381static void mtk_tx_timeout(struct net_device *dev)
3382{
3383 struct mtk_mac *mac = netdev_priv(dev);
3384 struct mtk_eth *eth = mac->hw;
3385
3386 eth->netdev[mac->id]->stats.tx_errors++;
3387 netif_err(eth, tx_err, dev,
3388 "transmit timed out\n");
developer8051e042022-04-08 13:26:36 +08003389
3390 if (atomic_read(&reset_lock) == 0)
3391 schedule_work(&eth->pending_work);
developerfd40db22021-04-29 10:08:25 +08003392}
3393
developer18f46a82021-07-20 21:08:21 +08003394static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08003395{
developer18f46a82021-07-20 21:08:21 +08003396 struct mtk_napi *rx_napi = priv;
3397 struct mtk_eth *eth = rx_napi->eth;
3398 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08003399
developer18f46a82021-07-20 21:08:21 +08003400 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08003401 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08003402 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08003403 }
3404
3405 return IRQ_HANDLED;
3406}
3407
3408static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3409{
3410 struct mtk_eth *eth = _eth;
3411
3412 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developerfd40db22021-04-29 10:08:25 +08003413 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer6bbe70d2021-08-06 09:34:55 +08003414 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08003415 }
3416
3417 return IRQ_HANDLED;
3418}
3419
3420static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3421{
3422 struct mtk_eth *eth = _eth;
developer68ce74f2023-01-03 16:11:57 +08003423 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08003424
developer68ce74f2023-01-03 16:11:57 +08003425 if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT(0)) {
3426 if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT(0))
developer18f46a82021-07-20 21:08:21 +08003427 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08003428 }
developer68ce74f2023-01-03 16:11:57 +08003429 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3430 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
developerfd40db22021-04-29 10:08:25 +08003431 mtk_handle_irq_tx(irq, _eth);
3432 }
3433
3434 return IRQ_HANDLED;
3435}
3436
developera2613e62022-07-01 18:29:37 +08003437static irqreturn_t mtk_handle_irq_fixed_link(int irq, void *_mac)
3438{
3439 struct mtk_mac *mac = _mac;
3440 struct mtk_eth *eth = mac->hw;
3441 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
3442 struct net_device *dev = phylink_priv->dev;
3443 int link_old, link_new;
3444
3445 // clear interrupt status for gpy211
3446 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
3447
3448 link_old = phylink_priv->link;
3449 link_new = _mtk_mdio_read(eth, phylink_priv->phyaddr, MII_BMSR) & BMSR_LSTATUS;
3450
3451 if (link_old != link_new) {
3452 phylink_priv->link = link_new;
3453 if (link_new) {
3454 printk("phylink.%d %s: Link is Up\n", phylink_priv->id, dev->name);
3455 if (dev)
3456 netif_carrier_on(dev);
3457 } else {
3458 printk("phylink.%d %s: Link is Down\n", phylink_priv->id, dev->name);
3459 if (dev)
3460 netif_carrier_off(dev);
3461 }
3462 }
3463
3464 return IRQ_HANDLED;
3465}
3466
developerfd40db22021-04-29 10:08:25 +08003467#ifdef CONFIG_NET_POLL_CONTROLLER
3468static void mtk_poll_controller(struct net_device *dev)
3469{
3470 struct mtk_mac *mac = netdev_priv(dev);
3471 struct mtk_eth *eth = mac->hw;
3472
3473 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003474 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developer94806ec2023-05-19 14:16:44 +08003475 mtk_handle_irq_rx(eth->irq_fe[2], &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08003476 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003477 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08003478}
3479#endif
3480
3481static int mtk_start_dma(struct mtk_eth *eth)
3482{
3483 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer68ce74f2023-01-03 16:11:57 +08003484 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer77d03a72021-06-06 00:06:00 +08003485 int val, err;
developerfd40db22021-04-29 10:08:25 +08003486
3487 err = mtk_dma_init(eth);
3488 if (err) {
3489 mtk_dma_free(eth);
3490 return err;
3491 }
3492
3493 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer68ce74f2023-01-03 16:11:57 +08003494 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
developer089e8852022-09-28 14:43:46 +08003495 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
3496 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer19d84562022-04-21 17:01:06 +08003497 val &= ~MTK_RESV_BUF_MASK;
developerfd40db22021-04-29 10:08:25 +08003498 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003499 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08003500 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
3501 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
3502 MTK_RESV_BUF | MTK_WCOMP_EN |
3503 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
developer68ce74f2023-01-03 16:11:57 +08003504 MTK_RX_2B_OFFSET, reg_map->qdma.glo_cfg);
developer19d84562022-04-21 17:01:06 +08003505 }
developerfd40db22021-04-29 10:08:25 +08003506 else
3507 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003508 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08003509 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
3510 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
3511 MTK_RX_BT_32DWORDS,
developer68ce74f2023-01-03 16:11:57 +08003512 reg_map->qdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003513
developer68ce74f2023-01-03 16:11:57 +08003514 val = mtk_r32(eth, reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003515 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003516 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08003517 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
developer68ce74f2023-01-03 16:11:57 +08003518 reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003519 } else {
3520 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3521 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
developer68ce74f2023-01-03 16:11:57 +08003522 reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003523 }
3524
developer8ecd51b2023-03-13 11:28:28 +08003525 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) && eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08003526 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
3527 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
3528 }
3529
developerfd40db22021-04-29 10:08:25 +08003530 return 0;
3531}
3532
developerdca0fde2022-12-14 11:40:35 +08003533void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
developerfd40db22021-04-29 10:08:25 +08003534{
developerdca0fde2022-12-14 11:40:35 +08003535 u32 val;
developerfd40db22021-04-29 10:08:25 +08003536
3537 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3538 return;
3539
developerdca0fde2022-12-14 11:40:35 +08003540 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
developerfd40db22021-04-29 10:08:25 +08003541
developerdca0fde2022-12-14 11:40:35 +08003542 /* default setup the forward port to send frame to PDMA */
3543 val &= ~0xffff;
developerfd40db22021-04-29 10:08:25 +08003544
developerdca0fde2022-12-14 11:40:35 +08003545 /* Enable RX checksum */
3546 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
developerfd40db22021-04-29 10:08:25 +08003547
developerdca0fde2022-12-14 11:40:35 +08003548 val |= config;
developerfd40db22021-04-29 10:08:25 +08003549
developerdca0fde2022-12-14 11:40:35 +08003550 if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3551 val |= MTK_GDMA_SPECIAL_TAG;
developerfd40db22021-04-29 10:08:25 +08003552
developerdca0fde2022-12-14 11:40:35 +08003553 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
developerfd40db22021-04-29 10:08:25 +08003554}
3555
developer7cd7e5e2022-11-17 13:57:32 +08003556void mtk_set_pse_drop(u32 config)
3557{
3558 struct mtk_eth *eth = g_eth;
3559
3560 if (eth)
3561 mtk_w32(eth, config, PSE_PPE0_DROP);
3562}
3563EXPORT_SYMBOL(mtk_set_pse_drop);
3564
developerfd40db22021-04-29 10:08:25 +08003565static int mtk_open(struct net_device *dev)
3566{
3567 struct mtk_mac *mac = netdev_priv(dev);
3568 struct mtk_eth *eth = mac->hw;
developera2613e62022-07-01 18:29:37 +08003569 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
developer4e8a3fd2023-04-10 18:05:44 +08003570 u32 id = mtk_mac2xgmii_id(eth, mac->id);
developer18f46a82021-07-20 21:08:21 +08003571 int err, i;
developer3a5969e2022-02-09 15:36:36 +08003572 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08003573
3574 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3575 if (err) {
3576 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3577 err);
3578 return err;
3579 }
3580
3581 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3582 if (!refcount_read(&eth->dma_refcnt)) {
3583 int err = mtk_start_dma(eth);
3584
3585 if (err)
3586 return err;
3587
developerfd40db22021-04-29 10:08:25 +08003588
3589 /* Indicates CDM to parse the MTK special tag from CPU */
3590 if (netdev_uses_dsa(dev)) {
3591 u32 val;
3592 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3593 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3594 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3595 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3596 }
3597
3598 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003599 napi_enable(&eth->rx_napi[0].napi);
developerfd40db22021-04-29 10:08:25 +08003600 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003601 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
3602
3603 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3604 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3605 napi_enable(&eth->rx_napi[i].napi);
3606 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i));
3607 }
3608 }
3609
developerfd40db22021-04-29 10:08:25 +08003610 refcount_set(&eth->dma_refcnt, 1);
3611 }
3612 else
3613 refcount_inc(&eth->dma_refcnt);
3614
developera2613e62022-07-01 18:29:37 +08003615 if (phylink_priv->desc) {
3616 /*Notice: This programming sequence is only for GPY211 single PHY chip.
3617 If single PHY chip is not GPY211, the following step you should do:
3618 1. Contact your Single PHY chip vendor and get the details of
3619 - how to enables link status change interrupt
3620 - how to clears interrupt source
3621 */
3622
3623 // clear interrupt source for gpy211
3624 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
3625
3626 // enable link status change interrupt for gpy211
3627 _mtk_mdio_write(eth, phylink_priv->phyaddr, 0x19, 0x0001);
3628
3629 phylink_priv->dev = dev;
3630
3631 // override dev pointer for single PHY chip 0
3632 if (phylink_priv->id == 0) {
3633 struct net_device *tmp;
3634
3635 tmp = __dev_get_by_name(&init_net, phylink_priv->label);
3636 if (tmp)
3637 phylink_priv->dev = tmp;
3638 else
3639 phylink_priv->dev = NULL;
3640 }
3641 }
3642
developerfd40db22021-04-29 10:08:25 +08003643 phylink_start(mac->phylink);
3644 netif_start_queue(dev);
developer3a5969e2022-02-09 15:36:36 +08003645 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer4e8a3fd2023-04-10 18:05:44 +08003646 if (!phy_node && eth->sgmii->pcs[id].regmap)
3647 regmap_write(eth->sgmii->pcs[id].regmap,
3648 SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developer089e8852022-09-28 14:43:46 +08003649
developerdca0fde2022-12-14 11:40:35 +08003650 mtk_gdm_config(eth, mac->id, MTK_GDMA_TO_PDMA);
3651
developerfd40db22021-04-29 10:08:25 +08003652 return 0;
3653}
3654
3655static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3656{
3657 u32 val;
3658 int i;
3659
3660 /* stop the dma engine */
3661 spin_lock_bh(&eth->page_lock);
3662 val = mtk_r32(eth, glo_cfg);
3663 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3664 glo_cfg);
3665 spin_unlock_bh(&eth->page_lock);
3666
3667 /* wait for dma stop */
3668 for (i = 0; i < 10; i++) {
3669 val = mtk_r32(eth, glo_cfg);
3670 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer8051e042022-04-08 13:26:36 +08003671 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08003672 continue;
3673 }
3674 break;
3675 }
3676}
3677
3678static int mtk_stop(struct net_device *dev)
3679{
3680 struct mtk_mac *mac = netdev_priv(dev);
3681 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08003682 int i;
developer4e8a3fd2023-04-10 18:05:44 +08003683 u32 id = mtk_mac2xgmii_id(eth, mac->id);
developer3a5969e2022-02-09 15:36:36 +08003684 u32 val = 0;
3685 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08003686
developerdca0fde2022-12-14 11:40:35 +08003687 mtk_gdm_config(eth, mac->id, MTK_GDMA_DROP_ALL);
developerfd40db22021-04-29 10:08:25 +08003688 netif_tx_disable(dev);
3689
developer3a5969e2022-02-09 15:36:36 +08003690 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer4e8a3fd2023-04-10 18:05:44 +08003691 if (!phy_node && eth->sgmii->pcs[id].regmap) {
3692 regmap_read(eth->sgmii->pcs[id].regmap,
3693 SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer3a5969e2022-02-09 15:36:36 +08003694 val |= SGMII_PHYA_PWD;
developer4e8a3fd2023-04-10 18:05:44 +08003695 regmap_write(eth->sgmii->pcs[id].regmap,
3696 SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer3a5969e2022-02-09 15:36:36 +08003697 }
3698
3699 //GMAC RX disable
3700 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
3701 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
3702
3703 phylink_stop(mac->phylink);
3704
developerfd40db22021-04-29 10:08:25 +08003705 phylink_disconnect_phy(mac->phylink);
3706
3707 /* only shutdown DMA if this is the last user */
3708 if (!refcount_dec_and_test(&eth->dma_refcnt))
3709 return 0;
3710
developerfd40db22021-04-29 10:08:25 +08003711
3712 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003713 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08003714 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003715 napi_disable(&eth->rx_napi[0].napi);
3716
3717 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3718 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3719 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i));
3720 napi_disable(&eth->rx_napi[i].napi);
3721 }
3722 }
developerfd40db22021-04-29 10:08:25 +08003723
3724 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developer68ce74f2023-01-03 16:11:57 +08003725 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3726 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003727
3728 mtk_dma_free(eth);
3729
3730 return 0;
3731}
3732
developer8051e042022-04-08 13:26:36 +08003733void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerfd40db22021-04-29 10:08:25 +08003734{
developer8051e042022-04-08 13:26:36 +08003735 u32 val = 0, i = 0;
developerfd40db22021-04-29 10:08:25 +08003736
developerfd40db22021-04-29 10:08:25 +08003737 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer8051e042022-04-08 13:26:36 +08003738 reset_bits, reset_bits);
3739
3740 while (i++ < 5000) {
3741 mdelay(1);
3742 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3743
3744 if ((val & reset_bits) == reset_bits) {
3745 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
3746 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3747 reset_bits, ~reset_bits);
3748 break;
3749 }
3750 }
3751
developerfd40db22021-04-29 10:08:25 +08003752 mdelay(10);
3753}
3754
3755static void mtk_clk_disable(struct mtk_eth *eth)
3756{
3757 int clk;
3758
3759 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3760 clk_disable_unprepare(eth->clks[clk]);
3761}
3762
3763static int mtk_clk_enable(struct mtk_eth *eth)
3764{
3765 int clk, ret;
3766
3767 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3768 ret = clk_prepare_enable(eth->clks[clk]);
3769 if (ret)
3770 goto err_disable_clks;
3771 }
3772
3773 return 0;
3774
3775err_disable_clks:
3776 while (--clk >= 0)
3777 clk_disable_unprepare(eth->clks[clk]);
3778
3779 return ret;
3780}
3781
developer18f46a82021-07-20 21:08:21 +08003782static int mtk_napi_init(struct mtk_eth *eth)
3783{
3784 struct mtk_napi *rx_napi = &eth->rx_napi[0];
3785 int i;
3786
3787 rx_napi->eth = eth;
3788 rx_napi->rx_ring = &eth->rx_ring[0];
3789 rx_napi->irq_grp_no = 2;
3790
3791 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3792 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3793 rx_napi = &eth->rx_napi[i];
3794 rx_napi->eth = eth;
3795 rx_napi->rx_ring = &eth->rx_ring[i];
3796 rx_napi->irq_grp_no = 2 + i;
3797 }
3798 }
3799
3800 return 0;
3801}
3802
developer8051e042022-04-08 13:26:36 +08003803static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerfd40db22021-04-29 10:08:25 +08003804{
developer3f28d382023-03-07 16:06:30 +08003805 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3806 ETHSYS_DMA_AG_MAP_PPE;
developer68ce74f2023-01-03 16:11:57 +08003807 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer8051e042022-04-08 13:26:36 +08003808 int i, ret = 0;
developerdca0fde2022-12-14 11:40:35 +08003809 u32 val;
developerfd40db22021-04-29 10:08:25 +08003810
developer8051e042022-04-08 13:26:36 +08003811 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
3812 atomic_read(&reset_lock), atomic_read(&force));
developerfd40db22021-04-29 10:08:25 +08003813
developer8051e042022-04-08 13:26:36 +08003814 if (atomic_read(&reset_lock) == 0) {
3815 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
3816 return 0;
developerfd40db22021-04-29 10:08:25 +08003817
developer8051e042022-04-08 13:26:36 +08003818 pm_runtime_enable(eth->dev);
3819 pm_runtime_get_sync(eth->dev);
3820
3821 ret = mtk_clk_enable(eth);
3822 if (ret)
3823 goto err_disable_pm;
3824 }
developerfd40db22021-04-29 10:08:25 +08003825
developer3f28d382023-03-07 16:06:30 +08003826 if (eth->ethsys)
3827 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3828 of_dma_is_coherent(eth->dma_dev->of_node) *
3829 dma_mask);
3830
developerfd40db22021-04-29 10:08:25 +08003831 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3832 ret = device_reset(eth->dev);
3833 if (ret) {
3834 dev_err(eth->dev, "MAC reset failed!\n");
3835 goto err_disable_pm;
3836 }
3837
3838 /* enable interrupt delay for RX */
3839 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
3840
3841 /* disable delay and normal interrupt */
3842 mtk_tx_irq_disable(eth, ~0);
3843 mtk_rx_irq_disable(eth, ~0);
3844
3845 return 0;
3846 }
3847
developer8051e042022-04-08 13:26:36 +08003848 pr_info("[%s] execute fe %s reset\n", __func__,
3849 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developer545abf02021-07-15 17:47:01 +08003850
developer8051e042022-04-08 13:26:36 +08003851 if (type == MTK_TYPE_WARM_RESET)
3852 mtk_eth_warm_reset(eth);
developer545abf02021-07-15 17:47:01 +08003853 else
developer8051e042022-04-08 13:26:36 +08003854 mtk_eth_cold_reset(eth);
developer545abf02021-07-15 17:47:01 +08003855
developerc4d8da72023-03-16 14:37:28 +08003856 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3857 mtk_mdc_init(eth);
3858
developer8ecd51b2023-03-13 11:28:28 +08003859 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer545abf02021-07-15 17:47:01 +08003860 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08003861 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08003862 }
developerfd40db22021-04-29 10:08:25 +08003863
3864 if (eth->pctl) {
3865 /* Set GE2 driving and slew rate */
3866 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3867
3868 /* set GE2 TDSEL */
3869 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3870
3871 /* set GE2 TUNE */
3872 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3873 }
3874
3875 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3876 * up with the more appropriate value when mtk_mac_config call is being
3877 * invoked.
3878 */
3879 for (i = 0; i < MTK_MAC_COUNT; i++)
3880 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3881
3882 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08003883 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
3884 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3885 else
3886 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08003887
3888 /* enable interrupt delay for RX/TX */
3889 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
3890 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
3891
3892 mtk_tx_irq_disable(eth, ~0);
3893 mtk_rx_irq_disable(eth, ~0);
3894
3895 /* FE int grouping */
developer68ce74f2023-01-03 16:11:57 +08003896 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3897 mtk_w32(eth, MTK_RX_DONE_INT(0), reg_map->qdma.int_grp2);
developer94806ec2023-05-19 14:16:44 +08003898 mtk_w32(eth, 0x210FFFF2, MTK_FE_INT_GRP);
developerbe971722022-05-23 13:51:05 +08003899 mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
developer8051e042022-04-08 13:26:36 +08003900 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
3901 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerfd40db22021-04-29 10:08:25 +08003902
developer089e8852022-09-28 14:43:46 +08003903 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer0fef5222023-04-26 14:48:31 +08003904 /* PSE dummy page mechanism */
3905 if (eth->soc->caps != MT7988_CAPS || eth->hwver != MTK_HWID_V1)
3906 mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) |
3907 PSE_DUMMY_WORK_GDM(2) | PSE_DUMMY_WORK_GDM(3) |
3908 DUMMY_PAGE_THR, PSE_DUMY_REQ);
3909
developer089e8852022-09-28 14:43:46 +08003910 /* PSE should not drop port1, port8 and port9 packets */
3911 mtk_w32(eth, 0x00000302, PSE_NO_DROP_CFG);
3912
developer15f760a2022-10-12 15:57:21 +08003913 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3914 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3915
developer84d1e832022-11-24 11:25:05 +08003916 /* PSE free buffer drop threshold */
3917 mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
3918
developer089e8852022-09-28 14:43:46 +08003919 /* GDM and CDM Threshold */
3920 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
3921 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
3922
developerdca0fde2022-12-14 11:40:35 +08003923 /* Disable GDM1 RX CRC stripping */
3924 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(0));
3925 val &= ~MTK_GDMA_STRP_CRC;
3926 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(0));
3927
developer089e8852022-09-28 14:43:46 +08003928 /* PSE GDM3 MIB counter has incorrect hw default values,
3929 * so the driver ought to read clear the values beforehand
3930 * in case ethtool retrieve wrong mib values.
3931 */
3932 for (i = 0; i < MTK_STAT_OFFSET; i += 0x4)
3933 mtk_r32(eth,
3934 MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * 2 + i);
3935 } else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08003936 /* PSE Free Queue Flow Control */
3937 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3938
developer459b78e2022-07-01 17:25:10 +08003939 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3940 mtk_w32(eth, 0x00000300, PSE_NO_DROP_CFG);
3941
3942 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3943 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
developer81bcad32021-07-15 14:14:38 +08003944
developerfef9efd2021-06-16 18:28:09 +08003945 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08003946 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3947 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3948 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3949 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3950 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3951 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3952 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
developerfd5f9152022-01-05 16:29:42 +08003953 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
developerfd40db22021-04-29 10:08:25 +08003954
developerfef9efd2021-06-16 18:28:09 +08003955 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08003956 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3957 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3958 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3959 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3960 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3961 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3962 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3963 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08003964
3965 /* GDM and CDM Threshold */
3966 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3967 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3968 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3969 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3970 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3971 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08003972 }
3973
3974 return 0;
3975
3976err_disable_pm:
3977 pm_runtime_put_sync(eth->dev);
3978 pm_runtime_disable(eth->dev);
3979
3980 return ret;
3981}
3982
3983static int mtk_hw_deinit(struct mtk_eth *eth)
3984{
3985 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3986 return 0;
3987
3988 mtk_clk_disable(eth);
3989
3990 pm_runtime_put_sync(eth->dev);
3991 pm_runtime_disable(eth->dev);
3992
3993 return 0;
3994}
3995
3996static int __init mtk_init(struct net_device *dev)
3997{
3998 struct mtk_mac *mac = netdev_priv(dev);
3999 struct mtk_eth *eth = mac->hw;
4000 const char *mac_addr;
4001
4002 mac_addr = of_get_mac_address(mac->of_node);
4003 if (!IS_ERR(mac_addr))
4004 ether_addr_copy(dev->dev_addr, mac_addr);
4005
4006 /* If the mac address is invalid, use random mac address */
4007 if (!is_valid_ether_addr(dev->dev_addr)) {
4008 eth_hw_addr_random(dev);
4009 dev_err(eth->dev, "generated random MAC address %pM\n",
4010 dev->dev_addr);
4011 }
4012
4013 return 0;
4014}
4015
4016static void mtk_uninit(struct net_device *dev)
4017{
4018 struct mtk_mac *mac = netdev_priv(dev);
4019 struct mtk_eth *eth = mac->hw;
4020
4021 phylink_disconnect_phy(mac->phylink);
4022 mtk_tx_irq_disable(eth, ~0);
4023 mtk_rx_irq_disable(eth, ~0);
4024}
4025
4026static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4027{
4028 struct mtk_mac *mac = netdev_priv(dev);
4029
4030 switch (cmd) {
4031 case SIOCGMIIPHY:
4032 case SIOCGMIIREG:
4033 case SIOCSMIIREG:
4034 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4035 default:
4036 /* default invoke the mtk_eth_dbg handler */
4037 return mtk_do_priv_ioctl(dev, ifr, cmd);
4038 break;
4039 }
4040
4041 return -EOPNOTSUPP;
4042}
4043
developer37482a42022-12-26 13:31:13 +08004044int mtk_phy_config(struct mtk_eth *eth, int enable)
4045{
4046 struct device_node *mii_np = NULL;
4047 struct device_node *child = NULL;
4048 int addr = 0;
4049 u32 val = 0;
4050
4051 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
4052 if (!mii_np) {
4053 dev_err(eth->dev, "no %s child node found", "mdio-bus");
4054 return -ENODEV;
4055 }
4056
4057 if (!of_device_is_available(mii_np)) {
4058 dev_err(eth->dev, "device is not available\n");
4059 return -ENODEV;
4060 }
4061
4062 for_each_available_child_of_node(mii_np, child) {
4063 addr = of_mdio_parse_addr(&eth->mii_bus->dev, child);
4064 if (addr < 0)
4065 continue;
4066 pr_info("%s %d addr:%d name:%s\n",
4067 __func__, __LINE__, addr, child->name);
4068 val = _mtk_mdio_read(eth, addr, mdiobus_c45_addr(0x1e, 0));
4069 if (enable)
4070 val &= ~BMCR_PDOWN;
4071 else
4072 val |= BMCR_PDOWN;
4073 _mtk_mdio_write(eth, addr, mdiobus_c45_addr(0x1e, 0), val);
4074 }
4075
4076 return 0;
4077}
4078
developerfd40db22021-04-29 10:08:25 +08004079static void mtk_pending_work(struct work_struct *work)
4080{
4081 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer8051e042022-04-08 13:26:36 +08004082 int err, i = 0;
developerfd40db22021-04-29 10:08:25 +08004083 unsigned long restart = 0;
developer8051e042022-04-08 13:26:36 +08004084 u32 val = 0;
4085
4086 atomic_inc(&reset_lock);
4087 val = mtk_r32(eth, MTK_FE_INT_STATUS);
4088 if (!mtk_check_reset_event(eth, val)) {
4089 atomic_dec(&reset_lock);
4090 pr_info("[%s] No need to do FE reset !\n", __func__);
4091 return;
4092 }
developerfd40db22021-04-29 10:08:25 +08004093
4094 rtnl_lock();
4095
developer37482a42022-12-26 13:31:13 +08004096 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
4097 cpu_relax();
4098
4099 mtk_phy_config(eth, 0);
developer8051e042022-04-08 13:26:36 +08004100
4101 /* Adjust PPE configurations to prepare for reset */
4102 mtk_prepare_reset_ppe(eth, 0);
4103 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4104 mtk_prepare_reset_ppe(eth, 1);
4105
4106 /* Adjust FE configurations to prepare for reset */
4107 mtk_prepare_reset_fe(eth);
4108
4109 /* Trigger Wifi SER reset */
developer6bb3f3a2022-11-22 09:59:14 +08004110 for (i = 0; i < MTK_MAC_COUNT; i++) {
4111 if (!eth->netdev[i])
4112 continue;
developer37482a42022-12-26 13:31:13 +08004113 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
4114 pr_info("send MTK_FE_STOP_TRAFFIC event\n");
4115 call_netdevice_notifiers(MTK_FE_STOP_TRAFFIC,
4116 eth->netdev[i]);
4117 } else {
4118 pr_info("send MTK_FE_START_RESET event\n");
4119 call_netdevice_notifiers(MTK_FE_START_RESET,
4120 eth->netdev[i]);
4121 }
developer6bb3f3a2022-11-22 09:59:14 +08004122 rtnl_unlock();
developer7979ddb2023-04-24 17:19:21 +08004123 if (!wait_for_completion_timeout(&wait_ser_done, 3000)) {
4124 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
4125 (mtk_stop_fail)) {
4126 pr_info("send MTK_FE_START_RESET stop\n");
4127 rtnl_lock();
4128 call_netdevice_notifiers(MTK_FE_START_RESET,
4129 eth->netdev[i]);
4130 rtnl_unlock();
4131 if (!wait_for_completion_timeout(&wait_ser_done,
4132 3000))
4133 pr_warn("wait for MTK_FE_START_RESET\n");
4134 }
developer0baa6962023-01-31 14:25:23 +08004135 pr_warn("wait for MTK_FE_START_RESET\n");
developer7979ddb2023-04-24 17:19:21 +08004136 }
developer6bb3f3a2022-11-22 09:59:14 +08004137 rtnl_lock();
4138 break;
4139 }
developerfd40db22021-04-29 10:08:25 +08004140
developer8051e042022-04-08 13:26:36 +08004141 del_timer_sync(&eth->mtk_dma_monitor_timer);
4142 pr_info("[%s] mtk_stop starts !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08004143 /* stop all devices to make sure that dma is properly shut down */
4144 for (i = 0; i < MTK_MAC_COUNT; i++) {
4145 if (!eth->netdev[i])
4146 continue;
4147 mtk_stop(eth->netdev[i]);
4148 __set_bit(i, &restart);
4149 }
developer8051e042022-04-08 13:26:36 +08004150 pr_info("[%s] mtk_stop ends !\n", __func__);
4151 mdelay(15);
developerfd40db22021-04-29 10:08:25 +08004152
4153 if (eth->dev->pins)
4154 pinctrl_select_state(eth->dev->pins->p,
4155 eth->dev->pins->default_state);
developer8051e042022-04-08 13:26:36 +08004156
4157 pr_info("[%s] mtk_hw_init starts !\n", __func__);
4158 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
4159 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08004160
4161 /* restart DMA and enable IRQs */
4162 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer6bb3f3a2022-11-22 09:59:14 +08004163 if (!test_bit(i, &restart) || !eth->netdev[i])
developerfd40db22021-04-29 10:08:25 +08004164 continue;
4165 err = mtk_open(eth->netdev[i]);
4166 if (err) {
4167 netif_alert(eth, ifup, eth->netdev[i],
4168 "Driver up/down cycle failed, closing device.\n");
4169 dev_close(eth->netdev[i]);
4170 }
4171 }
4172
developer8051e042022-04-08 13:26:36 +08004173 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer6bb3f3a2022-11-22 09:59:14 +08004174 if (!eth->netdev[i])
4175 continue;
developer37482a42022-12-26 13:31:13 +08004176 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
4177 pr_info("send MTK_FE_START_TRAFFIC event\n");
4178 call_netdevice_notifiers(MTK_FE_START_TRAFFIC,
4179 eth->netdev[i]);
4180 } else {
4181 pr_info("send MTK_FE_RESET_DONE event\n");
4182 call_netdevice_notifiers(MTK_FE_RESET_DONE,
4183 eth->netdev[i]);
developer8051e042022-04-08 13:26:36 +08004184 }
developer37482a42022-12-26 13:31:13 +08004185 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE,
4186 eth->netdev[i]);
developer6bb3f3a2022-11-22 09:59:14 +08004187 break;
4188 }
developer8051e042022-04-08 13:26:36 +08004189
4190 atomic_dec(&reset_lock);
developer8051e042022-04-08 13:26:36 +08004191
4192 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
4193 eth->mtk_dma_monitor_timer.expires = jiffies;
4194 add_timer(&eth->mtk_dma_monitor_timer);
developer37482a42022-12-26 13:31:13 +08004195
4196 mtk_phy_config(eth, 1);
4197 mtk_reset_flag = 0;
developerfd40db22021-04-29 10:08:25 +08004198 clear_bit_unlock(MTK_RESETTING, &eth->state);
4199
4200 rtnl_unlock();
4201}
4202
4203static int mtk_free_dev(struct mtk_eth *eth)
4204{
4205 int i;
4206
4207 for (i = 0; i < MTK_MAC_COUNT; i++) {
4208 if (!eth->netdev[i])
4209 continue;
4210 free_netdev(eth->netdev[i]);
4211 }
4212
4213 return 0;
4214}
4215
4216static int mtk_unreg_dev(struct mtk_eth *eth)
4217{
4218 int i;
4219
4220 for (i = 0; i < MTK_MAC_COUNT; i++) {
4221 if (!eth->netdev[i])
4222 continue;
4223 unregister_netdev(eth->netdev[i]);
4224 }
4225
4226 return 0;
4227}
4228
4229static int mtk_cleanup(struct mtk_eth *eth)
4230{
4231 mtk_unreg_dev(eth);
4232 mtk_free_dev(eth);
4233 cancel_work_sync(&eth->pending_work);
4234
4235 return 0;
4236}
4237
4238static int mtk_get_link_ksettings(struct net_device *ndev,
4239 struct ethtool_link_ksettings *cmd)
4240{
4241 struct mtk_mac *mac = netdev_priv(ndev);
4242
4243 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4244 return -EBUSY;
4245
4246 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4247}
4248
4249static int mtk_set_link_ksettings(struct net_device *ndev,
4250 const struct ethtool_link_ksettings *cmd)
4251{
4252 struct mtk_mac *mac = netdev_priv(ndev);
4253
4254 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4255 return -EBUSY;
4256
4257 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4258}
4259
4260static void mtk_get_drvinfo(struct net_device *dev,
4261 struct ethtool_drvinfo *info)
4262{
4263 struct mtk_mac *mac = netdev_priv(dev);
4264
4265 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4266 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4267 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4268}
4269
4270static u32 mtk_get_msglevel(struct net_device *dev)
4271{
4272 struct mtk_mac *mac = netdev_priv(dev);
4273
4274 return mac->hw->msg_enable;
4275}
4276
4277static void mtk_set_msglevel(struct net_device *dev, u32 value)
4278{
4279 struct mtk_mac *mac = netdev_priv(dev);
4280
4281 mac->hw->msg_enable = value;
4282}
4283
4284static int mtk_nway_reset(struct net_device *dev)
4285{
4286 struct mtk_mac *mac = netdev_priv(dev);
4287
4288 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4289 return -EBUSY;
4290
4291 if (!mac->phylink)
4292 return -ENOTSUPP;
4293
4294 return phylink_ethtool_nway_reset(mac->phylink);
4295}
4296
4297static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4298{
4299 int i;
4300
4301 switch (stringset) {
4302 case ETH_SS_STATS:
4303 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4304 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4305 data += ETH_GSTRING_LEN;
4306 }
4307 break;
4308 }
4309}
4310
4311static int mtk_get_sset_count(struct net_device *dev, int sset)
4312{
4313 switch (sset) {
4314 case ETH_SS_STATS:
4315 return ARRAY_SIZE(mtk_ethtool_stats);
4316 default:
4317 return -EOPNOTSUPP;
4318 }
4319}
4320
4321static void mtk_get_ethtool_stats(struct net_device *dev,
4322 struct ethtool_stats *stats, u64 *data)
4323{
4324 struct mtk_mac *mac = netdev_priv(dev);
4325 struct mtk_hw_stats *hwstats = mac->hw_stats;
4326 u64 *data_src, *data_dst;
4327 unsigned int start;
4328 int i;
4329
4330 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4331 return;
4332
4333 if (netif_running(dev) && netif_device_present(dev)) {
4334 if (spin_trylock_bh(&hwstats->stats_lock)) {
4335 mtk_stats_update_mac(mac);
4336 spin_unlock_bh(&hwstats->stats_lock);
4337 }
4338 }
4339
4340 data_src = (u64 *)hwstats;
4341
4342 do {
4343 data_dst = data;
4344 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
4345
4346 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4347 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4348 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
4349}
4350
4351static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4352 u32 *rule_locs)
4353{
developerea49c302023-06-27 16:06:41 +08004354 struct mtk_mac *mac = netdev_priv(dev);
4355 struct mtk_eth *eth = mac->hw;
developerfd40db22021-04-29 10:08:25 +08004356 int ret = -EOPNOTSUPP;
4357
4358 switch (cmd->cmd) {
4359 case ETHTOOL_GRXRINGS:
4360 if (dev->hw_features & NETIF_F_LRO) {
4361 cmd->data = MTK_MAX_RX_RING_NUM;
4362 ret = 0;
developerea49c302023-06-27 16:06:41 +08004363 } else if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4364 cmd->data = eth->soc->rss_num;
4365 ret = 0;
developerfd40db22021-04-29 10:08:25 +08004366 }
4367 break;
4368 case ETHTOOL_GRXCLSRLCNT:
4369 if (dev->hw_features & NETIF_F_LRO) {
developerfd40db22021-04-29 10:08:25 +08004370 cmd->rule_cnt = mac->hwlro_ip_cnt;
4371 ret = 0;
4372 }
4373 break;
4374 case ETHTOOL_GRXCLSRULE:
4375 if (dev->hw_features & NETIF_F_LRO)
4376 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4377 break;
4378 case ETHTOOL_GRXCLSRLALL:
4379 if (dev->hw_features & NETIF_F_LRO)
4380 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4381 rule_locs);
4382 break;
4383 default:
4384 break;
4385 }
4386
4387 return ret;
4388}
4389
4390static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4391{
4392 int ret = -EOPNOTSUPP;
4393
4394 switch (cmd->cmd) {
4395 case ETHTOOL_SRXCLSRLINS:
4396 if (dev->hw_features & NETIF_F_LRO)
4397 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4398 break;
4399 case ETHTOOL_SRXCLSRLDEL:
4400 if (dev->hw_features & NETIF_F_LRO)
4401 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4402 break;
4403 default:
4404 break;
4405 }
4406
4407 return ret;
4408}
4409
developerea49c302023-06-27 16:06:41 +08004410static u32 mtk_get_rxfh_key_size(struct net_device *dev)
4411{
4412 return MTK_RSS_HASH_KEYSIZE;
4413}
4414
4415static u32 mtk_get_rxfh_indir_size(struct net_device *dev)
4416{
4417 return MTK_RSS_MAX_INDIRECTION_TABLE;
4418}
4419
4420static int mtk_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4421 u8 *hfunc)
4422{
4423 struct mtk_mac *mac = netdev_priv(dev);
4424 struct mtk_eth *eth = mac->hw;
4425 struct mtk_rss_params *rss_params = &eth->rss_params;
4426 int i;
4427
4428 if (hfunc)
4429 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
4430
4431 if (key) {
4432 memcpy(key, rss_params->hash_key,
4433 sizeof(rss_params->hash_key));
4434 }
4435
4436 if (indir) {
4437 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
4438 indir[i] = rss_params->indirection_table[i];
4439 }
4440
4441 return 0;
4442}
4443
4444static int mtk_set_rxfh(struct net_device *dev, const u32 *indir,
4445 const u8 *key, const u8 hfunc)
4446{
4447 struct mtk_mac *mac = netdev_priv(dev);
4448 struct mtk_eth *eth = mac->hw;
4449 struct mtk_rss_params *rss_params = &eth->rss_params;
4450 int i;
4451
4452 if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
4453 hfunc != ETH_RSS_HASH_TOP)
4454 return -EOPNOTSUPP;
4455
4456 if (key) {
4457 memcpy(rss_params->hash_key, key,
4458 sizeof(rss_params->hash_key));
4459
4460 for (i = 0; i < MTK_RSS_HASH_KEYSIZE / sizeof(u32); i++)
4461 mtk_w32(eth, rss_params->hash_key[i],
4462 MTK_RSS_HASH_KEY_DW(i));
4463 }
4464
4465 if (indir) {
4466 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
4467 rss_params->indirection_table[i] = indir[i];
4468
4469 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE / 16; i++)
4470 mtk_w32(eth, mtk_rss_indr_table(rss_params, i),
4471 MTK_RSS_INDR_TABLE_DW(i));
4472 }
4473
4474 return 0;
4475}
4476
developer6c5cbb52022-08-12 11:37:45 +08004477static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4478{
4479 struct mtk_mac *mac = netdev_priv(dev);
developerf2823bb2022-12-29 18:20:14 +08004480 struct mtk_eth *eth = mac->hw;
4481 u32 val;
4482
4483 pause->autoneg = 0;
4484
4485 if (mac->type == MTK_GDM_TYPE) {
4486 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
4487
4488 pause->rx_pause = !!(val & MAC_MCR_FORCE_RX_FC);
4489 pause->tx_pause = !!(val & MAC_MCR_FORCE_TX_FC);
4490 } else if (mac->type == MTK_XGDM_TYPE) {
4491 val = mtk_r32(eth, MTK_XMAC_MCR(mac->id));
developer6c5cbb52022-08-12 11:37:45 +08004492
developerf2823bb2022-12-29 18:20:14 +08004493 pause->rx_pause = !!(val & XMAC_MCR_FORCE_RX_FC);
4494 pause->tx_pause = !!(val & XMAC_MCR_FORCE_TX_FC);
4495 }
developer6c5cbb52022-08-12 11:37:45 +08004496}
4497
4498static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4499{
4500 struct mtk_mac *mac = netdev_priv(dev);
4501
4502 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4503}
4504
developer9b725932022-11-24 16:25:56 +08004505static int mtk_get_eee(struct net_device *dev, struct ethtool_eee *eee)
4506{
4507 struct mtk_mac *mac = netdev_priv(dev);
4508 struct mtk_eth *eth = mac->hw;
4509 u32 val;
4510
4511 if (mac->type == MTK_GDM_TYPE) {
4512 val = mtk_r32(eth, MTK_MAC_EEE(mac->id));
4513
4514 eee->tx_lpi_enabled = mac->tx_lpi_enabled;
4515 eee->tx_lpi_timer = FIELD_GET(MAC_EEE_LPI_TXIDLE_THD, val);
4516 }
4517
4518 return phylink_ethtool_get_eee(mac->phylink, eee);
4519}
4520
4521static int mtk_set_eee(struct net_device *dev, struct ethtool_eee *eee)
4522{
4523 struct mtk_mac *mac = netdev_priv(dev);
developer9b725932022-11-24 16:25:56 +08004524
4525 if (mac->type == MTK_GDM_TYPE) {
4526 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
4527 return -EINVAL;
4528
4529 mac->tx_lpi_timer = eee->tx_lpi_timer;
4530
4531 mtk_setup_eee(mac, eee->eee_enabled && eee->tx_lpi_timer);
4532 }
4533
4534 return phylink_ethtool_set_eee(mac->phylink, eee);
4535}
4536
developerfd40db22021-04-29 10:08:25 +08004537static const struct ethtool_ops mtk_ethtool_ops = {
4538 .get_link_ksettings = mtk_get_link_ksettings,
4539 .set_link_ksettings = mtk_set_link_ksettings,
4540 .get_drvinfo = mtk_get_drvinfo,
4541 .get_msglevel = mtk_get_msglevel,
4542 .set_msglevel = mtk_set_msglevel,
4543 .nway_reset = mtk_nway_reset,
4544 .get_link = ethtool_op_get_link,
4545 .get_strings = mtk_get_strings,
4546 .get_sset_count = mtk_get_sset_count,
4547 .get_ethtool_stats = mtk_get_ethtool_stats,
4548 .get_rxnfc = mtk_get_rxnfc,
4549 .set_rxnfc = mtk_set_rxnfc,
developerea49c302023-06-27 16:06:41 +08004550 .get_rxfh_key_size = mtk_get_rxfh_key_size,
4551 .get_rxfh_indir_size = mtk_get_rxfh_indir_size,
4552 .get_rxfh = mtk_get_rxfh,
4553 .set_rxfh = mtk_set_rxfh,
developer6c5cbb52022-08-12 11:37:45 +08004554 .get_pauseparam = mtk_get_pauseparam,
4555 .set_pauseparam = mtk_set_pauseparam,
developer9b725932022-11-24 16:25:56 +08004556 .get_eee = mtk_get_eee,
4557 .set_eee = mtk_set_eee,
developerfd40db22021-04-29 10:08:25 +08004558};
4559
4560static const struct net_device_ops mtk_netdev_ops = {
4561 .ndo_init = mtk_init,
4562 .ndo_uninit = mtk_uninit,
4563 .ndo_open = mtk_open,
4564 .ndo_stop = mtk_stop,
4565 .ndo_start_xmit = mtk_start_xmit,
4566 .ndo_set_mac_address = mtk_set_mac_address,
4567 .ndo_validate_addr = eth_validate_addr,
4568 .ndo_do_ioctl = mtk_do_ioctl,
4569 .ndo_tx_timeout = mtk_tx_timeout,
4570 .ndo_get_stats64 = mtk_get_stats64,
4571 .ndo_fix_features = mtk_fix_features,
4572 .ndo_set_features = mtk_set_features,
4573#ifdef CONFIG_NET_POLL_CONTROLLER
4574 .ndo_poll_controller = mtk_poll_controller,
4575#endif
4576};
4577
developerb6c36bf2023-09-07 12:05:01 +08004578static void mux_poll(struct work_struct *work)
4579{
4580 struct mtk_mux *mux = container_of(work, struct mtk_mux, poll.work);
4581 struct mtk_mac *mac = mux->mac;
4582 struct mtk_eth *eth = mac->hw;
4583 struct net_device *dev = eth->netdev[mac->id];
4584 unsigned int channel;
4585
4586 if (IS_ERR(mux->gpio[0]) || IS_ERR(mux->gpio[1]))
4587 goto exit;
4588
4589 channel = gpiod_get_value_cansleep(mux->gpio[0]);
4590 if (mux->channel == channel || !netif_running(dev))
4591 goto exit;
4592
4593 rtnl_lock();
4594
4595 mtk_stop(dev);
4596
4597 if (channel == 0 || channel == 1) {
4598 mac->of_node = mux->data[channel]->of_node;
4599 mac->phylink = mux->data[channel]->phylink;
4600 };
4601
4602 dev_info(eth->dev, "ethernet mux: switch to channel%d\n", channel);
4603
4604 gpiod_set_value_cansleep(mux->gpio[1], channel);
4605
4606 mtk_open(dev);
4607
4608 rtnl_unlock();
4609
4610 mux->channel = channel;
4611
4612exit:
4613 mod_delayed_work(system_wq, &mux->poll, msecs_to_jiffies(100));
4614}
4615
4616static int mtk_add_mux_channel(struct mtk_mux *mux, struct device_node *np)
4617{
4618 const __be32 *_id = of_get_property(np, "reg", NULL);
4619 struct mtk_mac *mac = mux->mac;
4620 struct mtk_eth *eth = mac->hw;
4621 struct mtk_mux_data *data;
4622 struct phylink *phylink;
4623 int phy_mode, id;
4624
4625 if (!_id) {
4626 dev_err(eth->dev, "missing mux channel id\n");
4627 return -EINVAL;
4628 }
4629
4630 id = be32_to_cpup(_id);
4631 if (id < 0 || id > 1) {
4632 dev_err(eth->dev, "%d is not a valid mux channel id\n", id);
4633 return -EINVAL;
4634 }
4635
4636 data = kmalloc(sizeof(*data), GFP_KERNEL);
4637 if (unlikely(!data)) {
4638 dev_err(eth->dev, "failed to create mux data structure\n");
4639 return -ENOMEM;
4640 }
4641
4642 mux->data[id] = data;
4643
4644 /* phylink create */
4645 phy_mode = of_get_phy_mode(np);
4646 if (phy_mode < 0) {
4647 dev_err(eth->dev, "incorrect phy-mode\n");
4648 return -EINVAL;
4649 }
4650
4651 phylink = phylink_create(&mux->mac->phylink_config,
4652 of_fwnode_handle(np),
4653 phy_mode, &mtk_phylink_ops);
4654 if (IS_ERR(phylink)) {
4655 dev_err(eth->dev, "failed to create phylink structure\n");
4656 return PTR_ERR(phylink);
4657 }
4658
4659 data->of_node = np;
4660 data->phylink = phylink;
4661
4662 return 0;
4663}
4664
4665static int mtk_add_mux(struct mtk_eth *eth, struct device_node *np)
4666{
4667 const __be32 *_id = of_get_property(np, "reg", NULL);
4668 struct device_node *child;
4669 struct mtk_mux *mux;
4670 unsigned int id;
4671 int err;
4672
4673 if (!_id) {
4674 dev_err(eth->dev, "missing attach mac id\n");
4675 return -EINVAL;
4676 }
4677
4678 id = be32_to_cpup(_id);
4679 if (id < 0 || id >= MTK_MAX_DEVS) {
4680 dev_err(eth->dev, "%d is not a valid attach mac id\n", id);
4681 return -EINVAL;
4682 }
4683
4684 mux = kmalloc(sizeof(struct mtk_mux), GFP_KERNEL);
4685 if (unlikely(!mux)) {
4686 dev_err(eth->dev, "failed to create mux structure\n");
4687 return -ENOMEM;
4688 }
4689
4690 eth->mux[id] = mux;
4691
4692 mux->mac = eth->mac[id];
4693 mux->channel = 0;
4694
4695 mux->gpio[0] = fwnode_get_named_gpiod(of_fwnode_handle(np),
4696 "mod-def0-gpios", 0,
4697 GPIOD_IN, "?");
4698 if (IS_ERR(mux->gpio[0]))
4699 dev_err(eth->dev, "failed to requset gpio for mod-def0-gpios\n");
4700
4701 mux->gpio[1] = fwnode_get_named_gpiod(of_fwnode_handle(np),
4702 "chan-sel-gpios", 0,
4703 GPIOD_OUT_LOW, "?");
4704 if (IS_ERR(mux->gpio[1]))
4705 dev_err(eth->dev, "failed to requset gpio for chan-sel-gpios\n");
4706
4707 for_each_child_of_node(np, child) {
4708 err = mtk_add_mux_channel(mux, child);
4709 if (err) {
4710 dev_err(eth->dev, "failed to add mtk_mux\n");
4711 of_node_put(child);
4712 return -ECHILD;
4713 }
4714 of_node_put(child);
4715 }
4716
4717 INIT_DELAYED_WORK(&mux->poll, mux_poll);
4718 mod_delayed_work(system_wq, &mux->poll, msecs_to_jiffies(3000));
4719
4720 return 0;
4721}
4722
developerfd40db22021-04-29 10:08:25 +08004723static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4724{
4725 const __be32 *_id = of_get_property(np, "reg", NULL);
developer30e13e72022-11-03 10:21:24 +08004726 const char *label;
developerfd40db22021-04-29 10:08:25 +08004727 struct phylink *phylink;
developer30e13e72022-11-03 10:21:24 +08004728 int mac_type, phy_mode, id, err;
developerfd40db22021-04-29 10:08:25 +08004729 struct mtk_mac *mac;
developera2613e62022-07-01 18:29:37 +08004730 struct mtk_phylink_priv *phylink_priv;
4731 struct fwnode_handle *fixed_node;
4732 struct gpio_desc *desc;
developerfd40db22021-04-29 10:08:25 +08004733
4734 if (!_id) {
4735 dev_err(eth->dev, "missing mac id\n");
4736 return -EINVAL;
4737 }
4738
4739 id = be32_to_cpup(_id);
developerfb556ca2021-10-13 10:52:09 +08004740 if (id < 0 || id >= MTK_MAC_COUNT) {
developerfd40db22021-04-29 10:08:25 +08004741 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4742 return -EINVAL;
4743 }
4744
4745 if (eth->netdev[id]) {
4746 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4747 return -EINVAL;
4748 }
4749
4750 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
4751 if (!eth->netdev[id]) {
4752 dev_err(eth->dev, "alloc_etherdev failed\n");
4753 return -ENOMEM;
4754 }
4755 mac = netdev_priv(eth->netdev[id]);
4756 eth->mac[id] = mac;
4757 mac->id = id;
4758 mac->hw = eth;
4759 mac->of_node = np;
4760
4761 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4762 mac->hwlro_ip_cnt = 0;
4763
4764 mac->hw_stats = devm_kzalloc(eth->dev,
4765 sizeof(*mac->hw_stats),
4766 GFP_KERNEL);
4767 if (!mac->hw_stats) {
4768 dev_err(eth->dev, "failed to allocate counter memory\n");
4769 err = -ENOMEM;
4770 goto free_netdev;
4771 }
4772 spin_lock_init(&mac->hw_stats->stats_lock);
4773 u64_stats_init(&mac->hw_stats->syncp);
4774 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
4775
4776 /* phylink create */
4777 phy_mode = of_get_phy_mode(np);
4778 if (phy_mode < 0) {
4779 dev_err(eth->dev, "incorrect phy-mode\n");
4780 err = -EINVAL;
4781 goto free_netdev;
4782 }
4783
4784 /* mac config is not set */
4785 mac->interface = PHY_INTERFACE_MODE_NA;
4786 mac->mode = MLO_AN_PHY;
4787 mac->speed = SPEED_UNKNOWN;
4788
developer9b725932022-11-24 16:25:56 +08004789 mac->tx_lpi_timer = 1;
4790
developerfd40db22021-04-29 10:08:25 +08004791 mac->phylink_config.dev = &eth->netdev[id]->dev;
4792 mac->phylink_config.type = PHYLINK_NETDEV;
4793
developer30e13e72022-11-03 10:21:24 +08004794 mac->type = 0;
4795 if (!of_property_read_string(np, "mac-type", &label)) {
4796 for (mac_type = 0; mac_type < MTK_GDM_TYPE_MAX; mac_type++) {
4797 if (!strcasecmp(label, gdm_type(mac_type)))
4798 break;
4799 }
4800
4801 switch (mac_type) {
4802 case 0:
4803 mac->type = MTK_GDM_TYPE;
4804 break;
4805 case 1:
4806 mac->type = MTK_XGDM_TYPE;
4807 break;
4808 default:
4809 dev_warn(eth->dev, "incorrect mac-type\n");
4810 break;
4811 };
4812 }
developer089e8852022-09-28 14:43:46 +08004813
developerfd40db22021-04-29 10:08:25 +08004814 phylink = phylink_create(&mac->phylink_config,
4815 of_fwnode_handle(mac->of_node),
4816 phy_mode, &mtk_phylink_ops);
4817 if (IS_ERR(phylink)) {
4818 err = PTR_ERR(phylink);
4819 goto free_netdev;
4820 }
4821
4822 mac->phylink = phylink;
4823
developera2613e62022-07-01 18:29:37 +08004824 fixed_node = fwnode_get_named_child_node(of_fwnode_handle(mac->of_node),
4825 "fixed-link");
4826 if (fixed_node) {
4827 desc = fwnode_get_named_gpiod(fixed_node, "link-gpio",
4828 0, GPIOD_IN, "?");
4829 if (!IS_ERR(desc)) {
4830 struct device_node *phy_np;
4831 const char *label;
4832 int irq, phyaddr;
4833
4834 phylink_priv = &mac->phylink_priv;
4835
4836 phylink_priv->desc = desc;
4837 phylink_priv->id = id;
4838 phylink_priv->link = -1;
4839
4840 irq = gpiod_to_irq(desc);
4841 if (irq > 0) {
4842 devm_request_irq(eth->dev, irq, mtk_handle_irq_fixed_link,
4843 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
4844 "ethernet:fixed link", mac);
4845 }
4846
developer8b6f2402022-11-28 13:42:34 +08004847 if (!of_property_read_string(to_of_node(fixed_node),
4848 "label", &label)) {
developer659fdeb2022-12-01 23:03:07 +08004849 if (strlen(label) < 16) {
4850 strncpy(phylink_priv->label, label,
4851 strlen(label));
4852 } else
developer8b6f2402022-11-28 13:42:34 +08004853 dev_err(eth->dev, "insufficient space for label!\n");
4854 }
developera2613e62022-07-01 18:29:37 +08004855
4856 phy_np = of_parse_phandle(to_of_node(fixed_node), "phy-handle", 0);
4857 if (phy_np) {
4858 if (!of_property_read_u32(phy_np, "reg", &phyaddr))
4859 phylink_priv->phyaddr = phyaddr;
4860 }
4861 }
4862 fwnode_handle_put(fixed_node);
4863 }
4864
developerfd40db22021-04-29 10:08:25 +08004865 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4866 eth->netdev[id]->watchdog_timeo = 5 * HZ;
4867 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4868 eth->netdev[id]->base_addr = (unsigned long)eth->base;
4869
4870 eth->netdev[id]->hw_features = eth->soc->hw_features;
4871 if (eth->hwlro)
4872 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4873
4874 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4875 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
4876 eth->netdev[id]->features |= eth->soc->hw_features;
4877 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4878
developer94806ec2023-05-19 14:16:44 +08004879 eth->netdev[id]->irq = eth->irq_fe[0];
developerfd40db22021-04-29 10:08:25 +08004880 eth->netdev[id]->dev.of_node = np;
4881
4882 return 0;
4883
4884free_netdev:
4885 free_netdev(eth->netdev[id]);
4886 return err;
4887}
4888
developer3f28d382023-03-07 16:06:30 +08004889void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4890{
4891 struct net_device *dev, *tmp;
4892 LIST_HEAD(dev_list);
4893 int i;
4894
4895 rtnl_lock();
4896
4897 for (i = 0; i < MTK_MAC_COUNT; i++) {
4898 dev = eth->netdev[i];
4899
4900 if (!dev || !(dev->flags & IFF_UP))
4901 continue;
4902
4903 list_add_tail(&dev->close_list, &dev_list);
4904 }
4905
4906 dev_close_many(&dev_list, false);
4907
4908 eth->dma_dev = dma_dev;
4909
4910 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4911 list_del_init(&dev->close_list);
4912 dev_open(dev, NULL);
4913 }
4914
4915 rtnl_unlock();
4916}
4917
developerfd40db22021-04-29 10:08:25 +08004918static int mtk_probe(struct platform_device *pdev)
4919{
developerb6c36bf2023-09-07 12:05:01 +08004920 struct device_node *mac_np, *mux_np;
developerfd40db22021-04-29 10:08:25 +08004921 struct mtk_eth *eth;
4922 int err, i;
4923
4924 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4925 if (!eth)
4926 return -ENOMEM;
4927
4928 eth->soc = of_device_get_match_data(&pdev->dev);
4929
4930 eth->dev = &pdev->dev;
developer3f28d382023-03-07 16:06:30 +08004931 eth->dma_dev = &pdev->dev;
developerfd40db22021-04-29 10:08:25 +08004932 eth->base = devm_platform_ioremap_resource(pdev, 0);
4933 if (IS_ERR(eth->base))
4934 return PTR_ERR(eth->base);
4935
developer089e8852022-09-28 14:43:46 +08004936 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
4937 eth->sram_base = devm_platform_ioremap_resource(pdev, 1);
4938 if (IS_ERR(eth->sram_base))
4939 return PTR_ERR(eth->sram_base);
4940 }
4941
developerfd40db22021-04-29 10:08:25 +08004942 if(eth->soc->has_sram) {
4943 struct resource *res;
4944 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
developer4c32b7a2021-11-13 16:46:43 +08004945 if (unlikely(!res))
4946 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +08004947 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
4948 }
4949
developer0fef5222023-04-26 14:48:31 +08004950 mtk_get_hwver(eth);
4951
developer68ce74f2023-01-03 16:11:57 +08004952 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developerfd40db22021-04-29 10:08:25 +08004953 eth->ip_align = NET_IP_ALIGN;
developerfd40db22021-04-29 10:08:25 +08004954
developer089e8852022-09-28 14:43:46 +08004955 if (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) {
4956 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
4957 if (!err) {
4958 err = dma_set_coherent_mask(&pdev->dev,
4959 DMA_BIT_MASK(36));
4960 if (err) {
4961 dev_err(&pdev->dev, "Wrong DMA config\n");
4962 return -EINVAL;
4963 }
4964 }
4965 }
4966
developerfd40db22021-04-29 10:08:25 +08004967 spin_lock_init(&eth->page_lock);
4968 spin_lock_init(&eth->tx_irq_lock);
4969 spin_lock_init(&eth->rx_irq_lock);
developerd82e8372022-02-09 15:00:09 +08004970 spin_lock_init(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +08004971
4972 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4973 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4974 "mediatek,ethsys");
4975 if (IS_ERR(eth->ethsys)) {
4976 dev_err(&pdev->dev, "no ethsys regmap found\n");
4977 return PTR_ERR(eth->ethsys);
4978 }
4979 }
4980
4981 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4982 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4983 "mediatek,infracfg");
4984 if (IS_ERR(eth->infra)) {
4985 dev_err(&pdev->dev, "no infracfg regmap found\n");
4986 return PTR_ERR(eth->infra);
4987 }
4988 }
4989
developer3f28d382023-03-07 16:06:30 +08004990 if (of_dma_is_coherent(pdev->dev.of_node)) {
4991 struct regmap *cci;
4992
4993 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4994 "cci-control-port");
4995 /* enable CPU/bus coherency */
4996 if (!IS_ERR(cci))
4997 regmap_write(cci, 0, 3);
4998 }
4999
developerfd40db22021-04-29 10:08:25 +08005000 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
developer4e8a3fd2023-04-10 18:05:44 +08005001 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
developerfd40db22021-04-29 10:08:25 +08005002 GFP_KERNEL);
developer4e8a3fd2023-04-10 18:05:44 +08005003 if (!eth->sgmii)
developerfd40db22021-04-29 10:08:25 +08005004 return -ENOMEM;
5005
developer4e8a3fd2023-04-10 18:05:44 +08005006 err = mtk_sgmii_init(eth, pdev->dev.of_node,
developerfd40db22021-04-29 10:08:25 +08005007 eth->soc->ana_rgc3);
developer089e8852022-09-28 14:43:46 +08005008 if (err)
5009 return err;
5010 }
5011
5012 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
developer4e8a3fd2023-04-10 18:05:44 +08005013 eth->usxgmii = devm_kzalloc(eth->dev, sizeof(*eth->usxgmii),
5014 GFP_KERNEL);
5015 if (!eth->usxgmii)
5016 return -ENOMEM;
developer089e8852022-09-28 14:43:46 +08005017
developer4e8a3fd2023-04-10 18:05:44 +08005018 err = mtk_usxgmii_init(eth, pdev->dev.of_node);
developer089e8852022-09-28 14:43:46 +08005019 if (err)
5020 return err;
5021
5022 err = mtk_toprgu_init(eth, pdev->dev.of_node);
developerfd40db22021-04-29 10:08:25 +08005023 if (err)
5024 return err;
5025 }
5026
5027 if (eth->soc->required_pctl) {
5028 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5029 "mediatek,pctl");
5030 if (IS_ERR(eth->pctl)) {
5031 dev_err(&pdev->dev, "no pctl regmap found\n");
5032 return PTR_ERR(eth->pctl);
5033 }
5034 }
5035
developer94806ec2023-05-19 14:16:44 +08005036 for (i = 0; i < MTK_PDMA_IRQ_NUM; i++)
5037 eth->irq_pdma[i] = platform_get_irq(pdev, i);
5038
5039 for (i = 0; i < MTK_FE_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08005040 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
developer94806ec2023-05-19 14:16:44 +08005041 eth->irq_fe[i] = eth->irq_fe[0];
developerfd40db22021-04-29 10:08:25 +08005042 else
developer94806ec2023-05-19 14:16:44 +08005043 eth->irq_fe[i] =
5044 platform_get_irq(pdev, i + MTK_PDMA_IRQ_NUM);
5045
5046 if (eth->irq_fe[i] < 0) {
developerfd40db22021-04-29 10:08:25 +08005047 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
5048 return -ENXIO;
5049 }
5050 }
5051
5052 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
5053 eth->clks[i] = devm_clk_get(eth->dev,
5054 mtk_clks_source_name[i]);
5055 if (IS_ERR(eth->clks[i])) {
5056 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
5057 return -EPROBE_DEFER;
5058 if (eth->soc->required_clks & BIT(i)) {
5059 dev_err(&pdev->dev, "clock %s not found\n",
5060 mtk_clks_source_name[i]);
5061 return -EINVAL;
5062 }
5063 eth->clks[i] = NULL;
5064 }
5065 }
5066
5067 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
5068 INIT_WORK(&eth->pending_work, mtk_pending_work);
5069
developer8051e042022-04-08 13:26:36 +08005070 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerfd40db22021-04-29 10:08:25 +08005071 if (err)
5072 return err;
5073
5074 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
5075
5076 for_each_child_of_node(pdev->dev.of_node, mac_np) {
5077 if (!of_device_is_compatible(mac_np,
5078 "mediatek,eth-mac"))
5079 continue;
5080
5081 if (!of_device_is_available(mac_np))
5082 continue;
5083
5084 err = mtk_add_mac(eth, mac_np);
5085 if (err) {
5086 of_node_put(mac_np);
5087 goto err_deinit_hw;
5088 }
5089 }
5090
developerb6c36bf2023-09-07 12:05:01 +08005091 mux_np = of_get_child_by_name(eth->dev->of_node, "mux-bus");
5092 if (mux_np) {
5093 struct device_node *child;
5094
5095 for_each_available_child_of_node(mux_np, child) {
5096 if (!of_device_is_compatible(child,
5097 "mediatek,eth-mux"))
5098 continue;
5099
5100 if (!of_device_is_available(child))
5101 continue;
5102
5103 err = mtk_add_mux(eth, child);
5104 if (err)
5105 dev_err(&pdev->dev, "failed to add mux\n");
5106
5107 of_node_put(mux_np);
5108 };
5109 }
5110
developer18f46a82021-07-20 21:08:21 +08005111 err = mtk_napi_init(eth);
5112 if (err)
5113 goto err_free_dev;
5114
developerfd40db22021-04-29 10:08:25 +08005115 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
developer94806ec2023-05-19 14:16:44 +08005116 err = devm_request_irq(eth->dev, eth->irq_fe[0],
developerfd40db22021-04-29 10:08:25 +08005117 mtk_handle_irq, 0,
5118 dev_name(eth->dev), eth);
5119 } else {
developer94806ec2023-05-19 14:16:44 +08005120 err = devm_request_irq(eth->dev, eth->irq_fe[1],
developerfd40db22021-04-29 10:08:25 +08005121 mtk_handle_irq_tx, 0,
5122 dev_name(eth->dev), eth);
5123 if (err)
5124 goto err_free_dev;
5125
developer94806ec2023-05-19 14:16:44 +08005126 err = devm_request_irq(eth->dev, eth->irq_fe[2],
5127 mtk_handle_fe_irq, 0,
5128 dev_name(eth->dev), eth);
5129 if (err)
5130 goto err_free_dev;
5131
5132 err = devm_request_irq(eth->dev, eth->irq_pdma[0],
developerfd40db22021-04-29 10:08:25 +08005133 mtk_handle_irq_rx, 0,
developer18f46a82021-07-20 21:08:21 +08005134 dev_name(eth->dev), &eth->rx_napi[0]);
5135 if (err)
5136 goto err_free_dev;
5137
developer94806ec2023-05-19 14:16:44 +08005138 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
5139 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
5140 err = devm_request_irq(eth->dev,
5141 eth->irq_pdma[i],
5142 mtk_handle_irq_rx, 0,
5143 dev_name(eth->dev),
5144 &eth->rx_napi[i]);
developer18f46a82021-07-20 21:08:21 +08005145 if (err)
5146 goto err_free_dev;
5147 }
5148 }
developerfd40db22021-04-29 10:08:25 +08005149 }
developer8051e042022-04-08 13:26:36 +08005150
developerfd40db22021-04-29 10:08:25 +08005151 if (err)
5152 goto err_free_dev;
5153
5154 /* No MT7628/88 support yet */
5155 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5156 err = mtk_mdio_init(eth);
5157 if (err)
5158 goto err_free_dev;
5159 }
5160
5161 for (i = 0; i < MTK_MAX_DEVS; i++) {
5162 if (!eth->netdev[i])
5163 continue;
5164
5165 err = register_netdev(eth->netdev[i]);
5166 if (err) {
5167 dev_err(eth->dev, "error bringing up device\n");
5168 goto err_deinit_mdio;
5169 } else
5170 netif_info(eth, probe, eth->netdev[i],
5171 "mediatek frame engine at 0x%08lx, irq %d\n",
developer94806ec2023-05-19 14:16:44 +08005172 eth->netdev[i]->base_addr, eth->irq_fe[0]);
developerfd40db22021-04-29 10:08:25 +08005173 }
5174
5175 /* we run 2 devices on the same DMA ring so we need a dummy device
5176 * for NAPI to work
5177 */
5178 init_dummy_netdev(&eth->dummy_dev);
5179 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
5180 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08005181 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08005182 MTK_NAPI_WEIGHT);
5183
developer18f46a82021-07-20 21:08:21 +08005184 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
5185 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
5186 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[i].napi,
5187 mtk_napi_rx, MTK_NAPI_WEIGHT);
5188 }
5189
developerfd40db22021-04-29 10:08:25 +08005190 mtketh_debugfs_init(eth);
5191 debug_proc_init(eth);
5192
5193 platform_set_drvdata(pdev, eth);
5194
developer8051e042022-04-08 13:26:36 +08005195 register_netdevice_notifier(&mtk_eth_netdevice_nb);
developer37482a42022-12-26 13:31:13 +08005196#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer8051e042022-04-08 13:26:36 +08005197 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
5198 eth->mtk_dma_monitor_timer.expires = jiffies;
5199 add_timer(&eth->mtk_dma_monitor_timer);
developer793f7b42022-05-20 13:54:51 +08005200#endif
developer8051e042022-04-08 13:26:36 +08005201
developerfd40db22021-04-29 10:08:25 +08005202 return 0;
5203
5204err_deinit_mdio:
5205 mtk_mdio_cleanup(eth);
5206err_free_dev:
5207 mtk_free_dev(eth);
5208err_deinit_hw:
5209 mtk_hw_deinit(eth);
5210
5211 return err;
5212}
5213
5214static int mtk_remove(struct platform_device *pdev)
5215{
5216 struct mtk_eth *eth = platform_get_drvdata(pdev);
5217 struct mtk_mac *mac;
5218 int i;
5219
5220 /* stop all devices to make sure that dma is properly shut down */
5221 for (i = 0; i < MTK_MAC_COUNT; i++) {
5222 if (!eth->netdev[i])
5223 continue;
5224 mtk_stop(eth->netdev[i]);
5225 mac = netdev_priv(eth->netdev[i]);
5226 phylink_disconnect_phy(mac->phylink);
5227 }
5228
5229 mtk_hw_deinit(eth);
5230
5231 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08005232 netif_napi_del(&eth->rx_napi[0].napi);
5233
5234 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
5235 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
5236 netif_napi_del(&eth->rx_napi[i].napi);
5237 }
5238
developerfd40db22021-04-29 10:08:25 +08005239 mtk_cleanup(eth);
5240 mtk_mdio_cleanup(eth);
developer8051e042022-04-08 13:26:36 +08005241 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
5242 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08005243
5244 return 0;
5245}
5246
5247static const struct mtk_soc_data mt2701_data = {
developer68ce74f2023-01-03 16:11:57 +08005248 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005249 .caps = MT7623_CAPS | MTK_HWLRO,
5250 .hw_features = MTK_HW_FEATURES,
5251 .required_clks = MT7623_CLKS_BITMAP,
5252 .required_pctl = true,
5253 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005254 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005255 .txrx = {
5256 .txd_size = sizeof(struct mtk_tx_dma),
5257 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005258 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005259 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5260 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5261 },
developerfd40db22021-04-29 10:08:25 +08005262};
5263
5264static const struct mtk_soc_data mt7621_data = {
developer68ce74f2023-01-03 16:11:57 +08005265 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005266 .caps = MT7621_CAPS,
5267 .hw_features = MTK_HW_FEATURES,
5268 .required_clks = MT7621_CLKS_BITMAP,
5269 .required_pctl = false,
5270 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005271 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005272 .txrx = {
5273 .txd_size = sizeof(struct mtk_tx_dma),
developer68ce74f2023-01-03 16:11:57 +08005274 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005275 .rxd_size = sizeof(struct mtk_rx_dma),
5276 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5277 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5278 },
developerfd40db22021-04-29 10:08:25 +08005279};
5280
5281static const struct mtk_soc_data mt7622_data = {
developer68ce74f2023-01-03 16:11:57 +08005282 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005283 .ana_rgc3 = 0x2028,
5284 .caps = MT7622_CAPS | MTK_HWLRO,
5285 .hw_features = MTK_HW_FEATURES,
5286 .required_clks = MT7622_CLKS_BITMAP,
5287 .required_pctl = false,
5288 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005289 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005290 .txrx = {
5291 .txd_size = sizeof(struct mtk_tx_dma),
5292 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005293 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005294 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5295 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5296 },
developerfd40db22021-04-29 10:08:25 +08005297};
5298
5299static const struct mtk_soc_data mt7623_data = {
developer68ce74f2023-01-03 16:11:57 +08005300 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005301 .caps = MT7623_CAPS | MTK_HWLRO,
5302 .hw_features = MTK_HW_FEATURES,
5303 .required_clks = MT7623_CLKS_BITMAP,
5304 .required_pctl = true,
5305 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005306 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005307 .txrx = {
5308 .txd_size = sizeof(struct mtk_tx_dma),
5309 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005310 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005311 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5312 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5313 },
developerfd40db22021-04-29 10:08:25 +08005314};
5315
5316static const struct mtk_soc_data mt7629_data = {
developer68ce74f2023-01-03 16:11:57 +08005317 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005318 .ana_rgc3 = 0x128,
5319 .caps = MT7629_CAPS | MTK_HWLRO,
5320 .hw_features = MTK_HW_FEATURES,
5321 .required_clks = MT7629_CLKS_BITMAP,
5322 .required_pctl = false,
5323 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005324 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005325 .txrx = {
5326 .txd_size = sizeof(struct mtk_tx_dma),
5327 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005328 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005329 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5330 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5331 },
developerfd40db22021-04-29 10:08:25 +08005332};
5333
5334static const struct mtk_soc_data mt7986_data = {
developer68ce74f2023-01-03 16:11:57 +08005335 .reg_map = &mt7986_reg_map,
developerfd40db22021-04-29 10:08:25 +08005336 .ana_rgc3 = 0x128,
5337 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08005338 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08005339 .required_clks = MT7986_CLKS_BITMAP,
5340 .required_pctl = false,
developerc42fa982023-08-22 15:37:30 +08005341 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005342 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005343 .txrx = {
5344 .txd_size = sizeof(struct mtk_tx_dma_v2),
developer8ecd51b2023-03-13 11:28:28 +08005345 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005346 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developere9356982022-07-04 09:03:20 +08005347 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5348 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
5349 },
developerfd40db22021-04-29 10:08:25 +08005350};
5351
developer255bba22021-07-27 15:16:33 +08005352static const struct mtk_soc_data mt7981_data = {
developer68ce74f2023-01-03 16:11:57 +08005353 .reg_map = &mt7986_reg_map,
developer255bba22021-07-27 15:16:33 +08005354 .ana_rgc3 = 0x128,
5355 .caps = MT7981_CAPS,
developer7377b0b2021-11-18 14:54:47 +08005356 .hw_features = MTK_HW_FEATURES,
developer255bba22021-07-27 15:16:33 +08005357 .required_clks = MT7981_CLKS_BITMAP,
5358 .required_pctl = false,
developerc42fa982023-08-22 15:37:30 +08005359 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005360 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005361 .txrx = {
5362 .txd_size = sizeof(struct mtk_tx_dma_v2),
developer8ecd51b2023-03-13 11:28:28 +08005363 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005364 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developere9356982022-07-04 09:03:20 +08005365 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5366 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
5367 },
developer255bba22021-07-27 15:16:33 +08005368};
5369
developer089e8852022-09-28 14:43:46 +08005370static const struct mtk_soc_data mt7988_data = {
developer68ce74f2023-01-03 16:11:57 +08005371 .reg_map = &mt7988_reg_map,
developer089e8852022-09-28 14:43:46 +08005372 .ana_rgc3 = 0x128,
5373 .caps = MT7988_CAPS,
5374 .hw_features = MTK_HW_FEATURES,
5375 .required_clks = MT7988_CLKS_BITMAP,
5376 .required_pctl = false,
5377 .has_sram = true,
developere3d0de22023-05-30 17:45:00 +08005378 .rss_num = 4,
developer089e8852022-09-28 14:43:46 +08005379 .txrx = {
5380 .txd_size = sizeof(struct mtk_tx_dma_v2),
5381 .rxd_size = sizeof(struct mtk_rx_dma_v2),
developer68ce74f2023-01-03 16:11:57 +08005382 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developer089e8852022-09-28 14:43:46 +08005383 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5384 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
5385 },
5386};
5387
developerfd40db22021-04-29 10:08:25 +08005388static const struct mtk_soc_data rt5350_data = {
developer68ce74f2023-01-03 16:11:57 +08005389 .reg_map = &mt7628_reg_map,
developerfd40db22021-04-29 10:08:25 +08005390 .caps = MT7628_CAPS,
5391 .hw_features = MTK_HW_FEATURES_MT7628,
5392 .required_clks = MT7628_CLKS_BITMAP,
5393 .required_pctl = false,
5394 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005395 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005396 .txrx = {
5397 .txd_size = sizeof(struct mtk_tx_dma),
5398 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005399 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
developere9356982022-07-04 09:03:20 +08005400 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5401 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5402 },
developerfd40db22021-04-29 10:08:25 +08005403};
5404
5405const struct of_device_id of_mtk_match[] = {
5406 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
5407 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
5408 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
5409 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
5410 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
5411 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08005412 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developer089e8852022-09-28 14:43:46 +08005413 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data},
developerfd40db22021-04-29 10:08:25 +08005414 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
5415 {},
5416};
5417MODULE_DEVICE_TABLE(of, of_mtk_match);
5418
5419static struct platform_driver mtk_driver = {
5420 .probe = mtk_probe,
5421 .remove = mtk_remove,
5422 .driver = {
5423 .name = "mtk_soc_eth",
5424 .of_match_table = of_mtk_match,
5425 },
5426};
5427
5428module_platform_driver(mtk_driver);
5429
5430MODULE_LICENSE("GPL");
5431MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5432MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");