blob: 1c1eb75f8fdc4c81fdeca370051d4dfb5ecf84b1 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
developer3f28d382023-03-07 16:06:30 +080012#include <linux/of_address.h>
developerfd40db22021-04-29 10:08:25 +080013#include <linux/mfd/syscon.h>
14#include <linux/regmap.h>
15#include <linux/clk.h>
16#include <linux/pm_runtime.h>
17#include <linux/if_vlan.h>
18#include <linux/reset.h>
19#include <linux/tcp.h>
20#include <linux/interrupt.h>
21#include <linux/pinctrl/devinfo.h>
22#include <linux/phylink.h>
developera2613e62022-07-01 18:29:37 +080023#include <linux/gpio/consumer.h>
developerfd40db22021-04-29 10:08:25 +080024#include <net/dsa.h>
25
26#include "mtk_eth_soc.h"
27#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080028#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080029
30#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
31#include "mtk_hnat/nf_hnat_mtk.h"
32#endif
33
34static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080035atomic_t reset_lock = ATOMIC_INIT(0);
36atomic_t force = ATOMIC_INIT(0);
37
developerfd40db22021-04-29 10:08:25 +080038module_param_named(msg_level, mtk_msg_level, int, 0);
39MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080040DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080041
42#define MTK_ETHTOOL_STAT(x) { #x, \
43 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
44
developer68ce74f2023-01-03 16:11:57 +080045static const struct mtk_reg_map mtk_reg_map = {
46 .tx_irq_mask = 0x1a1c,
47 .tx_irq_status = 0x1a18,
48 .pdma = {
49 .rx_ptr = 0x0900,
50 .rx_cnt_cfg = 0x0904,
51 .pcrx_ptr = 0x0908,
52 .glo_cfg = 0x0a04,
53 .rst_idx = 0x0a08,
54 .delay_irq = 0x0a0c,
55 .irq_status = 0x0a20,
56 .irq_mask = 0x0a28,
57 .int_grp = 0x0a50,
58 .int_grp2 = 0x0a54,
59 },
60 .qdma = {
61 .qtx_cfg = 0x1800,
62 .qtx_sch = 0x1804,
63 .rx_ptr = 0x1900,
64 .rx_cnt_cfg = 0x1904,
65 .qcrx_ptr = 0x1908,
66 .glo_cfg = 0x1a04,
67 .rst_idx = 0x1a08,
68 .delay_irq = 0x1a0c,
69 .fc_th = 0x1a10,
70 .tx_sch_rate = 0x1a14,
71 .int_grp = 0x1a20,
72 .int_grp2 = 0x1a24,
73 .hred2 = 0x1a44,
74 .ctx_ptr = 0x1b00,
75 .dtx_ptr = 0x1b04,
76 .crx_ptr = 0x1b10,
77 .drx_ptr = 0x1b14,
78 .fq_head = 0x1b20,
79 .fq_tail = 0x1b24,
80 .fq_count = 0x1b28,
81 .fq_blen = 0x1b2c,
82 },
83 .gdm1_cnt = 0x2400,
84 .gdma_to_ppe0 = 0x4444,
85 .ppe_base = {
86 [0] = 0x0c00,
87 },
88 .wdma_base = {
89 [0] = 0x2800,
90 [1] = 0x2c00,
91 },
92};
93
94static const struct mtk_reg_map mt7628_reg_map = {
95 .tx_irq_mask = 0x0a28,
96 .tx_irq_status = 0x0a20,
97 .pdma = {
98 .rx_ptr = 0x0900,
99 .rx_cnt_cfg = 0x0904,
100 .pcrx_ptr = 0x0908,
101 .glo_cfg = 0x0a04,
102 .rst_idx = 0x0a08,
103 .delay_irq = 0x0a0c,
104 .irq_status = 0x0a20,
105 .irq_mask = 0x0a28,
106 .int_grp = 0x0a50,
107 .int_grp2 = 0x0a54,
108 },
109};
110
111static const struct mtk_reg_map mt7986_reg_map = {
112 .tx_irq_mask = 0x461c,
113 .tx_irq_status = 0x4618,
114 .pdma = {
developer8ecd51b2023-03-13 11:28:28 +0800115 .rx_ptr = 0x4100,
116 .rx_cnt_cfg = 0x4104,
117 .pcrx_ptr = 0x4108,
118 .glo_cfg = 0x4204,
119 .rst_idx = 0x4208,
120 .delay_irq = 0x420c,
121 .irq_status = 0x4220,
122 .irq_mask = 0x4228,
123 .int_grp = 0x4250,
124 .int_grp2 = 0x4254,
developer68ce74f2023-01-03 16:11:57 +0800125 },
126 .qdma = {
127 .qtx_cfg = 0x4400,
128 .qtx_sch = 0x4404,
129 .rx_ptr = 0x4500,
130 .rx_cnt_cfg = 0x4504,
131 .qcrx_ptr = 0x4508,
132 .glo_cfg = 0x4604,
133 .rst_idx = 0x4608,
134 .delay_irq = 0x460c,
135 .fc_th = 0x4610,
136 .int_grp = 0x4620,
137 .int_grp2 = 0x4624,
138 .hred2 = 0x4644,
139 .ctx_ptr = 0x4700,
140 .dtx_ptr = 0x4704,
141 .crx_ptr = 0x4710,
142 .drx_ptr = 0x4714,
143 .fq_head = 0x4720,
144 .fq_tail = 0x4724,
145 .fq_count = 0x4728,
146 .fq_blen = 0x472c,
147 .tx_sch_rate = 0x4798,
148 },
149 .gdm1_cnt = 0x1c00,
150 .gdma_to_ppe0 = 0x3333,
151 .ppe_base = {
152 [0] = 0x2000,
153 [1] = 0x2400,
154 },
155 .wdma_base = {
156 [0] = 0x4800,
157 [1] = 0x4c00,
158 },
159};
160
161static const struct mtk_reg_map mt7988_reg_map = {
162 .tx_irq_mask = 0x461c,
163 .tx_irq_status = 0x4618,
164 .pdma = {
165 .rx_ptr = 0x6900,
166 .rx_cnt_cfg = 0x6904,
167 .pcrx_ptr = 0x6908,
168 .glo_cfg = 0x6a04,
169 .rst_idx = 0x6a08,
170 .delay_irq = 0x6a0c,
171 .irq_status = 0x6a20,
172 .irq_mask = 0x6a28,
173 .int_grp = 0x6a50,
174 .int_grp2 = 0x6a54,
175 },
176 .qdma = {
177 .qtx_cfg = 0x4400,
178 .qtx_sch = 0x4404,
179 .rx_ptr = 0x4500,
180 .rx_cnt_cfg = 0x4504,
181 .qcrx_ptr = 0x4508,
182 .glo_cfg = 0x4604,
183 .rst_idx = 0x4608,
184 .delay_irq = 0x460c,
185 .fc_th = 0x4610,
186 .int_grp = 0x4620,
187 .int_grp2 = 0x4624,
188 .hred2 = 0x4644,
189 .ctx_ptr = 0x4700,
190 .dtx_ptr = 0x4704,
191 .crx_ptr = 0x4710,
192 .drx_ptr = 0x4714,
193 .fq_head = 0x4720,
194 .fq_tail = 0x4724,
195 .fq_count = 0x4728,
196 .fq_blen = 0x472c,
197 .tx_sch_rate = 0x4798,
198 },
199 .gdm1_cnt = 0x1c00,
200 .gdma_to_ppe0 = 0x3333,
201 .ppe_base = {
202 [0] = 0x2000,
203 [1] = 0x2400,
204 [2] = 0x2c00,
205 },
206 .wdma_base = {
207 [0] = 0x4800,
208 [1] = 0x4c00,
209 [2] = 0x5000,
210 },
211};
212
developerfd40db22021-04-29 10:08:25 +0800213/* strings used by ethtool */
214static const struct mtk_ethtool_stats {
215 char str[ETH_GSTRING_LEN];
216 u32 offset;
217} mtk_ethtool_stats[] = {
218 MTK_ETHTOOL_STAT(tx_bytes),
219 MTK_ETHTOOL_STAT(tx_packets),
220 MTK_ETHTOOL_STAT(tx_skip),
221 MTK_ETHTOOL_STAT(tx_collisions),
222 MTK_ETHTOOL_STAT(rx_bytes),
223 MTK_ETHTOOL_STAT(rx_packets),
224 MTK_ETHTOOL_STAT(rx_overflow),
225 MTK_ETHTOOL_STAT(rx_fcs_errors),
226 MTK_ETHTOOL_STAT(rx_short_errors),
227 MTK_ETHTOOL_STAT(rx_long_errors),
228 MTK_ETHTOOL_STAT(rx_checksum_errors),
229 MTK_ETHTOOL_STAT(rx_flow_control_packets),
230};
231
232static const char * const mtk_clks_source_name[] = {
developer1bbcf512022-11-18 16:09:33 +0800233 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "gp3",
234 "xgp1", "xgp2", "xgp3", "crypto", "fe", "trgpll",
developerfd40db22021-04-29 10:08:25 +0800235 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
236 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
developer5cfc67a2022-12-29 19:06:51 +0800237 "sgmii_ck", "eth2pll", "wocpu0", "wocpu1",
238 "ethwarp_wocpu2", "ethwarp_wocpu1", "ethwarp_wocpu0",
239 "top_usxgmii0_sel", "top_usxgmii1_sel", "top_sgm0_sel", "top_sgm1_sel",
240 "top_xfi_phy0_xtal_sel", "top_xfi_phy1_xtal_sel", "top_eth_gmii_sel",
241 "top_eth_refck_50m_sel", "top_eth_sys_200m_sel", "top_eth_sys_sel",
242 "top_eth_xgmii_sel", "top_eth_mii_sel", "top_netsys_sel",
243 "top_netsys_500m_sel", "top_netsys_pao_2x_sel",
244 "top_netsys_sync_250m_sel", "top_netsys_ppefb_250m_sel",
245 "top_netsys_warp_sel",
developerfd40db22021-04-29 10:08:25 +0800246};
247
248void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
249{
250 __raw_writel(val, eth->base + reg);
251}
252
253u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
254{
255 return __raw_readl(eth->base + reg);
256}
257
258u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
259{
260 u32 val;
261
262 val = mtk_r32(eth, reg);
263 val &= ~mask;
264 val |= set;
265 mtk_w32(eth, val, reg);
266 return reg;
267}
268
269static int mtk_mdio_busy_wait(struct mtk_eth *eth)
270{
271 unsigned long t_start = jiffies;
272
273 while (1) {
274 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
275 return 0;
276 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
277 break;
developerc4671b22021-05-28 13:16:42 +0800278 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800279 }
280
281 dev_err(eth->dev, "mdio: MDIO timeout\n");
282 return -1;
283}
284
developer599cda42022-05-24 15:13:31 +0800285u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
286 int phy_reg, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800287{
288 if (mtk_mdio_busy_wait(eth))
289 return -1;
290
291 write_data &= 0xffff;
292
developer599cda42022-05-24 15:13:31 +0800293 if (phy_reg & MII_ADDR_C45) {
294 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
295 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
296 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
297 MTK_PHY_IAC);
298
299 if (mtk_mdio_busy_wait(eth))
300 return -1;
301
302 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
303 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
304 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
305 MTK_PHY_IAC);
306 } else {
307 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
308 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
309 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
310 MTK_PHY_IAC);
311 }
developerfd40db22021-04-29 10:08:25 +0800312
313 if (mtk_mdio_busy_wait(eth))
314 return -1;
315
316 return 0;
317}
318
developer599cda42022-05-24 15:13:31 +0800319u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerfd40db22021-04-29 10:08:25 +0800320{
321 u32 d;
322
323 if (mtk_mdio_busy_wait(eth))
324 return 0xffff;
325
developer599cda42022-05-24 15:13:31 +0800326 if (phy_reg & MII_ADDR_C45) {
327 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
328 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
329 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
330 MTK_PHY_IAC);
331
332 if (mtk_mdio_busy_wait(eth))
333 return 0xffff;
334
335 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
336 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
337 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
338 MTK_PHY_IAC);
339 } else {
340 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
341 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
342 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
343 MTK_PHY_IAC);
344 }
developerfd40db22021-04-29 10:08:25 +0800345
346 if (mtk_mdio_busy_wait(eth))
347 return 0xffff;
348
349 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
350
351 return d;
352}
353
354static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
355 int phy_reg, u16 val)
356{
357 struct mtk_eth *eth = bus->priv;
358
359 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
360}
361
362static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
363{
364 struct mtk_eth *eth = bus->priv;
365
366 return _mtk_mdio_read(eth, phy_addr, phy_reg);
367}
368
developerabeadd52022-08-15 11:26:44 +0800369static int mtk_mdio_reset(struct mii_bus *bus)
370{
371 /* The mdiobus_register will trigger a reset pulse when enabling Bus reset,
372 * we just need to wait until device ready.
373 */
374 mdelay(20);
375
376 return 0;
377}
378
developerfd40db22021-04-29 10:08:25 +0800379static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
380 phy_interface_t interface)
381{
developer543e7922022-12-01 11:24:47 +0800382 u32 val = 0;
developerfd40db22021-04-29 10:08:25 +0800383
384 /* Check DDR memory type.
385 * Currently TRGMII mode with DDR2 memory is not supported.
386 */
387 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
388 if (interface == PHY_INTERFACE_MODE_TRGMII &&
389 val & SYSCFG_DRAM_TYPE_DDR2) {
390 dev_err(eth->dev,
391 "TRGMII mode with DDR2 memory is not supported!\n");
392 return -EOPNOTSUPP;
393 }
394
395 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
396 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
397
398 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
399 ETHSYS_TRGMII_MT7621_MASK, val);
400
401 return 0;
402}
403
404static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
405 phy_interface_t interface, int speed)
406{
407 u32 val;
408 int ret;
409
410 if (interface == PHY_INTERFACE_MODE_TRGMII) {
411 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
412 val = 500000000;
413 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
414 if (ret)
415 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
416 return;
417 }
418
419 val = (speed == SPEED_1000) ?
420 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
421 mtk_w32(eth, val, INTF_MODE);
422
423 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
424 ETHSYS_TRGMII_CLK_SEL362_5,
425 ETHSYS_TRGMII_CLK_SEL362_5);
426
427 val = (speed == SPEED_1000) ? 250000000 : 500000000;
428 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
429 if (ret)
430 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
431
432 val = (speed == SPEED_1000) ?
433 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
434 mtk_w32(eth, val, TRGMII_RCK_CTRL);
435
436 val = (speed == SPEED_1000) ?
437 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
438 mtk_w32(eth, val, TRGMII_TCK_CTRL);
439}
440
developer089e8852022-09-28 14:43:46 +0800441static void mtk_setup_bridge_switch(struct mtk_eth *eth)
442{
443 int val;
444
445 /* Force Port1 XGMAC Link Up */
446 val = mtk_r32(eth, MTK_XGMAC_STS(MTK_GMAC1_ID));
developer2b9bc722023-03-09 11:48:44 +0800447 mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
developer089e8852022-09-28 14:43:46 +0800448 MTK_XGMAC_STS(MTK_GMAC1_ID));
449
450 /* Adjust GSW bridge IPG to 11*/
451 val = mtk_r32(eth, MTK_GSW_CFG);
452 val &= ~(GSWTX_IPG_MASK | GSWRX_IPG_MASK);
453 val |= (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
454 (GSW_IPG_11 << GSWRX_IPG_SHIFT);
455 mtk_w32(eth, val, MTK_GSW_CFG);
developer089e8852022-09-28 14:43:46 +0800456}
457
developera7570e72023-05-09 17:06:42 +0800458static bool mtk_check_gmac23_idle(struct mtk_mac *mac)
459{
460 u32 mac_fsm, gdm_fsm;
461
462 mac_fsm = mtk_r32(mac->hw, MTK_MAC_FSM(mac->id));
463
464 switch (mac->id) {
465 case MTK_GMAC2_ID:
466 gdm_fsm = mtk_r32(mac->hw, MTK_FE_GDM2_FSM);
467 break;
468 case MTK_GMAC3_ID:
469 gdm_fsm = mtk_r32(mac->hw, MTK_FE_GDM3_FSM);
470 break;
developer10b556b2023-05-15 09:49:08 +0800471 default:
472 return true;
developera7570e72023-05-09 17:06:42 +0800473 };
474
475 if ((mac_fsm & 0xFFFF0000) == 0x01010000 &&
476 (gdm_fsm & 0xFFFF0000) == 0x00000000)
477 return true;
478
479 return false;
480}
481
developer9b725932022-11-24 16:25:56 +0800482static void mtk_setup_eee(struct mtk_mac *mac, bool enable)
483{
484 struct mtk_eth *eth = mac->hw;
485 u32 mcr, mcr_cur;
486 u32 val;
487
488 mcr = mcr_cur = mtk_r32(eth, MTK_MAC_MCR(mac->id));
489 mcr &= ~(MAC_MCR_FORCE_EEE100 | MAC_MCR_FORCE_EEE1000);
490
491 if (enable) {
492 mac->tx_lpi_enabled = 1;
493
494 val = FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 19) |
495 FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 33) |
496 FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD,
497 mac->tx_lpi_timer) |
498 FIELD_PREP(MAC_EEE_RESV0, 14);
499 mtk_w32(eth, val, MTK_MAC_EEE(mac->id));
500
501 switch (mac->speed) {
502 case SPEED_1000:
503 mcr |= MAC_MCR_FORCE_EEE1000;
504 break;
505 case SPEED_100:
506 mcr |= MAC_MCR_FORCE_EEE100;
507 break;
508 };
509 } else {
510 mac->tx_lpi_enabled = 0;
511
512 mtk_w32(eth, 0x00000002, MTK_MAC_EEE(mac->id));
513 }
514
515 /* Only update control register when needed! */
516 if (mcr != mcr_cur)
517 mtk_w32(eth, mcr, MTK_MAC_MCR(mac->id));
518}
519
developer0fef5222023-04-26 14:48:31 +0800520static int mtk_get_hwver(struct mtk_eth *eth)
521{
522 struct device_node *np;
523 struct regmap *hwver;
524 u32 info = 0;
525
526 eth->hwver = MTK_HWID_V1;
527
528 np = of_parse_phandle(eth->dev->of_node, "mediatek,hwver", 0);
529 if (!np)
530 return -EINVAL;
531
532 hwver = syscon_node_to_regmap(np);
533 if (IS_ERR(hwver))
534 return PTR_ERR(hwver);
535
536 regmap_read(hwver, 0x8, &info);
537
538 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
539 eth->hwver = FIELD_GET(HWVER_BIT_NETSYS_3, info);
540 else
541 eth->hwver = FIELD_GET(HWVER_BIT_NETSYS_1_2, info);
542
543 of_node_put(np);
544
545 return 0;
546}
547
developer4e8a3fd2023-04-10 18:05:44 +0800548static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
549 phy_interface_t interface)
550{
551 struct mtk_mac *mac = container_of(config, struct mtk_mac,
552 phylink_config);
553 struct mtk_eth *eth = mac->hw;
554 unsigned int sid;
555
556 if (interface == PHY_INTERFACE_MODE_SGMII ||
557 phy_interface_mode_is_8023z(interface)) {
558 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
559 0 : mtk_mac2xgmii_id(eth, mac->id);
560
561 return mtk_sgmii_select_pcs(eth->sgmii, sid);
562 } else if (interface == PHY_INTERFACE_MODE_USXGMII ||
563 interface == PHY_INTERFACE_MODE_10GKR ||
564 interface == PHY_INTERFACE_MODE_5GBASER) {
565 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
566 mac->id != MTK_GMAC1_ID) {
567 sid = mtk_mac2xgmii_id(eth, mac->id);
568
569 return mtk_usxgmii_select_pcs(eth->usxgmii, sid);
570 }
571 }
572
573 return NULL;
574}
575
developerfd40db22021-04-29 10:08:25 +0800576static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
577 const struct phylink_link_state *state)
578{
579 struct mtk_mac *mac = container_of(config, struct mtk_mac,
580 phylink_config);
581 struct mtk_eth *eth = mac->hw;
developer089e8852022-09-28 14:43:46 +0800582 u32 sid, i;
developerff5e5092023-07-25 15:55:28 +0800583 int val = 0, ge_mode, err = 0;
developer82eae452023-02-13 10:04:09 +0800584 unsigned int mac_type = mac->type;
developerfd40db22021-04-29 10:08:25 +0800585
586 /* MT76x8 has no hardware settings between for the MAC */
587 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
588 mac->interface != state->interface) {
589 /* Setup soc pin functions */
590 switch (state->interface) {
591 case PHY_INTERFACE_MODE_TRGMII:
592 if (mac->id)
593 goto err_phy;
594 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
595 MTK_GMAC1_TRGMII))
596 goto err_phy;
597 /* fall through */
598 case PHY_INTERFACE_MODE_RGMII_TXID:
599 case PHY_INTERFACE_MODE_RGMII_RXID:
600 case PHY_INTERFACE_MODE_RGMII_ID:
601 case PHY_INTERFACE_MODE_RGMII:
602 case PHY_INTERFACE_MODE_MII:
603 case PHY_INTERFACE_MODE_REVMII:
604 case PHY_INTERFACE_MODE_RMII:
developer82eae452023-02-13 10:04:09 +0800605 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800606 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
607 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
608 if (err)
609 goto init_err;
610 }
611 break;
612 case PHY_INTERFACE_MODE_1000BASEX:
613 case PHY_INTERFACE_MODE_2500BASEX:
614 case PHY_INTERFACE_MODE_SGMII:
developer82eae452023-02-13 10:04:09 +0800615 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800616 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
617 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
618 if (err)
619 goto init_err;
620 }
621 break;
622 case PHY_INTERFACE_MODE_GMII:
developer82eae452023-02-13 10:04:09 +0800623 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800624 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
625 err = mtk_gmac_gephy_path_setup(eth, mac->id);
626 if (err)
627 goto init_err;
628 }
629 break;
developer30e13e72022-11-03 10:21:24 +0800630 case PHY_INTERFACE_MODE_XGMII:
developer82eae452023-02-13 10:04:09 +0800631 mac->type = MTK_XGDM_TYPE;
developer30e13e72022-11-03 10:21:24 +0800632 if (MTK_HAS_CAPS(eth->soc->caps, MTK_XGMII)) {
633 err = mtk_gmac_xgmii_path_setup(eth, mac->id);
634 if (err)
635 goto init_err;
636 }
637 break;
developer089e8852022-09-28 14:43:46 +0800638 case PHY_INTERFACE_MODE_USXGMII:
639 case PHY_INTERFACE_MODE_10GKR:
developercfa104b2023-01-11 17:40:41 +0800640 case PHY_INTERFACE_MODE_5GBASER:
developer82eae452023-02-13 10:04:09 +0800641 mac->type = MTK_XGDM_TYPE;
developer089e8852022-09-28 14:43:46 +0800642 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
643 err = mtk_gmac_usxgmii_path_setup(eth, mac->id);
644 if (err)
645 goto init_err;
646 }
647 break;
developerfd40db22021-04-29 10:08:25 +0800648 default:
649 goto err_phy;
650 }
651
652 /* Setup clock for 1st gmac */
653 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
654 !phy_interface_mode_is_8023z(state->interface) &&
655 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
656 if (MTK_HAS_CAPS(mac->hw->soc->caps,
657 MTK_TRGMII_MT7621_CLK)) {
658 if (mt7621_gmac0_rgmii_adjust(mac->hw,
659 state->interface))
660 goto err_phy;
661 } else {
662 mtk_gmac0_rgmii_adjust(mac->hw,
663 state->interface,
664 state->speed);
665
666 /* mt7623_pad_clk_setup */
667 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
668 mtk_w32(mac->hw,
669 TD_DM_DRVP(8) | TD_DM_DRVN(8),
670 TRGMII_TD_ODT(i));
671
672 /* Assert/release MT7623 RXC reset */
673 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
674 TRGMII_RCK_CTRL);
675 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
676 }
677 }
678
679 ge_mode = 0;
680 switch (state->interface) {
681 case PHY_INTERFACE_MODE_MII:
682 case PHY_INTERFACE_MODE_GMII:
683 ge_mode = 1;
684 break;
685 case PHY_INTERFACE_MODE_REVMII:
686 ge_mode = 2;
687 break;
688 case PHY_INTERFACE_MODE_RMII:
689 if (mac->id)
690 goto err_phy;
691 ge_mode = 3;
692 break;
693 default:
694 break;
695 }
696
697 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800698 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800699 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
700 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
701 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
702 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800703 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800704
705 mac->interface = state->interface;
706 }
707
708 /* SGMII */
709 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
710 phy_interface_mode_is_8023z(state->interface)) {
711 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
712 * being setup done.
713 */
developerd82e8372022-02-09 15:00:09 +0800714 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800715 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
716
717 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
718 SYSCFG0_SGMII_MASK,
719 ~(u32)SYSCFG0_SGMII_MASK);
720
721 /* Decide how GMAC and SGMIISYS be mapped */
722 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
723 0 : mac->id;
724
developer4e8a3fd2023-04-10 18:05:44 +0800725 /* Save the syscfg0 value for mac_finish */
726 mac->syscfg0 = val;
developerd82e8372022-02-09 15:00:09 +0800727 spin_unlock(&eth->syscfg0_lock);
developer089e8852022-09-28 14:43:46 +0800728 } else if (state->interface == PHY_INTERFACE_MODE_USXGMII ||
developercfa104b2023-01-11 17:40:41 +0800729 state->interface == PHY_INTERFACE_MODE_10GKR ||
730 state->interface == PHY_INTERFACE_MODE_5GBASER) {
developer4e8a3fd2023-04-10 18:05:44 +0800731 /* Nothing to do */
developerfd40db22021-04-29 10:08:25 +0800732 } else if (phylink_autoneg_inband(mode)) {
733 dev_err(eth->dev,
734 "In-band mode not supported in non SGMII mode!\n");
735 return;
736 }
737
738 /* Setup gmac */
developer30e13e72022-11-03 10:21:24 +0800739 if (mac->type == MTK_XGDM_TYPE) {
developer089e8852022-09-28 14:43:46 +0800740 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
741 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
developerfd40db22021-04-29 10:08:25 +0800742
developer089e8852022-09-28 14:43:46 +0800743 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerff5e5092023-07-25 15:55:28 +0800744 if (mac->id == MTK_GMAC1_ID)
developer089e8852022-09-28 14:43:46 +0800745 mtk_setup_bridge_switch(eth);
developer089e8852022-09-28 14:43:46 +0800746 }
developer82eae452023-02-13 10:04:09 +0800747 } else if (mac->type == MTK_GDM_TYPE) {
748 val = mtk_r32(eth, MTK_GDMA_EG_CTRL(mac->id));
749 mtk_w32(eth, val & ~MTK_GDMA_XGDM_SEL,
750 MTK_GDMA_EG_CTRL(mac->id));
751
developer4e8a3fd2023-04-10 18:05:44 +0800752 /* FIXME: In current hardware design, we have to reset FE
753 * when swtiching XGDM to GDM. Therefore, here trigger an SER
754 * to let GDM go back to the initial state.
755 */
developera7570e72023-05-09 17:06:42 +0800756 if (mac->type != mac_type && !mtk_check_gmac23_idle(mac)) {
757 if (!test_bit(MTK_RESETTING, &mac->hw->state)) {
developer82eae452023-02-13 10:04:09 +0800758 atomic_inc(&force);
759 schedule_work(&eth->pending_work);
developera7570e72023-05-09 17:06:42 +0800760 }
developer82eae452023-02-13 10:04:09 +0800761 }
developerfd40db22021-04-29 10:08:25 +0800762 }
763
developerfd40db22021-04-29 10:08:25 +0800764 return;
765
766err_phy:
767 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
768 mac->id, phy_modes(state->interface));
769 return;
770
771init_err:
772 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
773 mac->id, phy_modes(state->interface), err);
774}
775
developer4e8a3fd2023-04-10 18:05:44 +0800776static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
777 phy_interface_t interface)
778{
779 struct mtk_mac *mac = container_of(config, struct mtk_mac,
780 phylink_config);
781 struct mtk_eth *eth = mac->hw;
782
783 /* Enable SGMII */
784 if (interface == PHY_INTERFACE_MODE_SGMII ||
785 phy_interface_mode_is_8023z(interface))
786 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
787 SYSCFG0_SGMII_MASK, mac->syscfg0);
788
789 return 0;
790}
791
developer089e8852022-09-28 14:43:46 +0800792static int mtk_mac_pcs_get_state(struct phylink_config *config,
793 struct phylink_link_state *state)
developerfd40db22021-04-29 10:08:25 +0800794{
795 struct mtk_mac *mac = container_of(config, struct mtk_mac,
796 phylink_config);
developerfd40db22021-04-29 10:08:25 +0800797
developer089e8852022-09-28 14:43:46 +0800798 if (mac->type == MTK_XGDM_TYPE) {
799 u32 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
developerfd40db22021-04-29 10:08:25 +0800800
developer089e8852022-09-28 14:43:46 +0800801 if (mac->id == MTK_GMAC2_ID)
802 sts = sts >> 16;
developerfd40db22021-04-29 10:08:25 +0800803
developer4e8a3fd2023-04-10 18:05:44 +0800804 state->duplex = DUPLEX_FULL;
developer089e8852022-09-28 14:43:46 +0800805
806 switch (FIELD_GET(MTK_USXGMII_PCS_MODE, sts)) {
807 case 0:
808 state->speed = SPEED_10000;
809 break;
810 case 1:
811 state->speed = SPEED_5000;
812 break;
813 case 2:
814 state->speed = SPEED_2500;
815 break;
816 case 3:
817 state->speed = SPEED_1000;
818 break;
819 }
820
developer82eae452023-02-13 10:04:09 +0800821 state->interface = mac->interface;
developer089e8852022-09-28 14:43:46 +0800822 state->link = FIELD_GET(MTK_USXGMII_PCS_LINK, sts);
823 } else if (mac->type == MTK_GDM_TYPE) {
824 struct mtk_eth *eth = mac->hw;
developer4e8a3fd2023-04-10 18:05:44 +0800825 struct mtk_sgmii *ss = eth->sgmii;
developer089e8852022-09-28 14:43:46 +0800826 u32 id = mtk_mac2xgmii_id(eth, mac->id);
827 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
developer38afb1a2023-04-17 09:57:27 +0800828 u32 bm, adv, rgc3, sgm_mode;
developer089e8852022-09-28 14:43:46 +0800829
developer82eae452023-02-13 10:04:09 +0800830 state->interface = mac->interface;
developer089e8852022-09-28 14:43:46 +0800831
developer38afb1a2023-04-17 09:57:27 +0800832 regmap_read(ss->pcs[id].regmap, SGMSYS_PCS_CONTROL_1, &bm);
833 if (bm & SGMII_AN_ENABLE) {
developer4e8a3fd2023-04-10 18:05:44 +0800834 regmap_read(ss->pcs[id].regmap,
developer38afb1a2023-04-17 09:57:27 +0800835 SGMSYS_PCS_ADVERTISE, &adv);
developer089e8852022-09-28 14:43:46 +0800836
developer38afb1a2023-04-17 09:57:27 +0800837 phylink_mii_c22_pcs_decode_state(
838 state,
839 FIELD_GET(SGMII_BMSR, bm),
840 FIELD_GET(SGMII_LPA, adv));
developer089e8852022-09-28 14:43:46 +0800841 } else {
developer38afb1a2023-04-17 09:57:27 +0800842 state->link = !!(bm & SGMII_LINK_STATYS);
developer089e8852022-09-28 14:43:46 +0800843
developer38afb1a2023-04-17 09:57:27 +0800844 regmap_read(ss->pcs[id].regmap,
845 SGMSYS_SGMII_MODE, &sgm_mode);
developer089e8852022-09-28 14:43:46 +0800846
developer38afb1a2023-04-17 09:57:27 +0800847 switch (sgm_mode & SGMII_SPEED_MASK) {
848 case SGMII_SPEED_10:
developer089e8852022-09-28 14:43:46 +0800849 state->speed = SPEED_10;
850 break;
developer38afb1a2023-04-17 09:57:27 +0800851 case SGMII_SPEED_100:
developer089e8852022-09-28 14:43:46 +0800852 state->speed = SPEED_100;
853 break;
developer38afb1a2023-04-17 09:57:27 +0800854 case SGMII_SPEED_1000:
developer4e8a3fd2023-04-10 18:05:44 +0800855 regmap_read(ss->pcs[id].regmap,
developer38afb1a2023-04-17 09:57:27 +0800856 ss->pcs[id].ana_rgc3, &rgc3);
857 rgc3 = FIELD_GET(RG_PHY_SPEED_3_125G, rgc3);
developer4e8a3fd2023-04-10 18:05:44 +0800858 state->speed = rgc3 ? SPEED_2500 : SPEED_1000;
developer089e8852022-09-28 14:43:46 +0800859 break;
860 }
developer38afb1a2023-04-17 09:57:27 +0800861
862 if (sgm_mode & SGMII_DUPLEX_HALF)
863 state->duplex = DUPLEX_HALF;
864 else
865 state->duplex = DUPLEX_FULL;
developer089e8852022-09-28 14:43:46 +0800866 }
867
868 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
869 if (pmsr & MAC_MSR_RX_FC)
870 state->pause |= MLO_PAUSE_RX;
871 if (pmsr & MAC_MSR_TX_FC)
872 state->pause |= MLO_PAUSE_TX;
873 }
developerfd40db22021-04-29 10:08:25 +0800874
875 return 1;
876}
877
developer65f32592023-08-02 09:35:49 +0800878static int mtk_gdm_fsm_get(struct mtk_mac *mac, u32 gdm)
879{
880 u32 fsm = mtk_r32(mac->hw, gdm);
developer26d0a532023-08-28 16:24:58 +0800881 u32 ret = 0, val = 0;
developer65f32592023-08-02 09:35:49 +0800882
developer26d0a532023-08-28 16:24:58 +0800883 switch (mac->type) {
884 case MTK_GDM_TYPE:
developer65f32592023-08-02 09:35:49 +0800885 ret = fsm == 0;
developer26d0a532023-08-28 16:24:58 +0800886 break;
887 case MTK_XGDM_TYPE:
888 ret = fsm == 0x10000000;
889 break;
890 default:
891 break;
892 }
893
894 if ((mac->type == MTK_XGDM_TYPE) && (mac->id != MTK_GMAC1_ID)) {
895 val = mtk_r32(mac->hw, MTK_MAC_FSM(mac->id));
896 if ((val == 0x02010100) || (val == 0x01010100)) {
897 ret = (mac->interface == PHY_INTERFACE_MODE_XGMII) ?
898 ((fsm & 0x0fffffff) == 0) : ((fsm & 0x00ffffff) == 0);
developer65f32592023-08-02 09:35:49 +0800899 } else
developer26d0a532023-08-28 16:24:58 +0800900 ret = 0;
developer65f32592023-08-02 09:35:49 +0800901 }
902
903 return ret;
904}
905
906static void mtk_gdm_fsm_poll(struct mtk_mac *mac)
907{
908 u32 gdm = 0, i = 0;
909
910 switch (mac->id) {
911 case MTK_GMAC1_ID:
912 gdm = MTK_FE_GDM1_FSM;
913 break;
914 case MTK_GMAC2_ID:
915 gdm = MTK_FE_GDM2_FSM;
916 break;
917 case MTK_GMAC3_ID:
918 gdm = MTK_FE_GDM3_FSM;
919 break;
920 default:
921 pr_info("%s mac id invalid", __func__);
922 break;
923 }
developer26d0a532023-08-28 16:24:58 +0800924
developer65f32592023-08-02 09:35:49 +0800925 while (i < 3) {
926 if (mtk_gdm_fsm_get(mac, gdm))
927 break;
928 msleep(500);
929 i++;
930 }
931
932 if (i == 3)
933 pr_info("%s fsm invalid", __func__);
934}
935
936static void mtk_pse_port_link_set(struct mtk_mac *mac, bool up)
937{
developera7d382a2023-08-25 12:05:22 +0800938 u32 fe_glo_cfg, val = 0;
developer65f32592023-08-02 09:35:49 +0800939
940 fe_glo_cfg = mtk_r32(mac->hw, MTK_FE_GLO_CFG(mac->id));
941 switch (mac->id) {
942 case MTK_GMAC1_ID:
943 val = MTK_FE_LINK_DOWN_P1;
944 break;
945 case MTK_GMAC2_ID:
946 val = MTK_FE_LINK_DOWN_P2;
947 break;
948 case MTK_GMAC3_ID:
949 val = MTK_FE_LINK_DOWN_P15;
950 break;
951 }
952
953 if (!up)
954 fe_glo_cfg |= val;
955 else
956 fe_glo_cfg &= ~val;
957
958 mtk_w32(mac->hw, fe_glo_cfg, MTK_FE_GLO_CFG(mac->id));
959 mtk_gdm_fsm_poll(mac);
960}
961
developerfd40db22021-04-29 10:08:25 +0800962static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
963 phy_interface_t interface)
964{
965 struct mtk_mac *mac = container_of(config, struct mtk_mac,
966 phylink_config);
developer21260d02023-09-04 11:29:04 +0800967 struct mtk_eth *eth = mac->hw;
968 unsigned int id;
developerff5e5092023-07-25 15:55:28 +0800969 u32 mcr, sts;
developer089e8852022-09-28 14:43:46 +0800970
developer65f32592023-08-02 09:35:49 +0800971 mtk_pse_port_link_set(mac, false);
developer089e8852022-09-28 14:43:46 +0800972 if (mac->type == MTK_GDM_TYPE) {
973 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
developer65f32592023-08-02 09:35:49 +0800974 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
developer089e8852022-09-28 14:43:46 +0800975 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
976 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
developer21260d02023-09-04 11:29:04 +0800977 struct mtk_usxgmii_pcs *mpcs;
developerfd40db22021-04-29 10:08:25 +0800978
developer21260d02023-09-04 11:29:04 +0800979 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
developer089e8852022-09-28 14:43:46 +0800980 mcr &= 0xfffffff0;
981 mcr |= XMAC_MCR_TRX_DISABLE;
982 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
developerff5e5092023-07-25 15:55:28 +0800983
984 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
985 sts &= ~MTK_XGMAC_FORCE_LINK(mac->id);
986 mtk_w32(mac->hw, sts, MTK_XGMAC_STS(mac->id));
developer21260d02023-09-04 11:29:04 +0800987
988 id = mtk_mac2xgmii_id(eth, mac->id);
989 mpcs = &eth->usxgmii->pcs[id];
990 cancel_delayed_work_sync(&mpcs->link_poll);
developer089e8852022-09-28 14:43:46 +0800991 }
developerfd40db22021-04-29 10:08:25 +0800992}
993
994static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
995 phy_interface_t interface,
996 struct phy_device *phy)
997{
998 struct mtk_mac *mac = container_of(config, struct mtk_mac,
999 phylink_config);
developerff5e5092023-07-25 15:55:28 +08001000 u32 mcr, mcr_cur, sts, force_link;
developer089e8852022-09-28 14:43:46 +08001001
developer9b725932022-11-24 16:25:56 +08001002 mac->speed = speed;
1003
developer089e8852022-09-28 14:43:46 +08001004 if (mac->type == MTK_GDM_TYPE) {
1005 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
1006 mcr = mcr_cur;
1007 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
1008 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
1009 MAC_MCR_FORCE_RX_FC);
1010 mcr |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
1011 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
1012
1013 /* Configure speed */
1014 switch (speed) {
1015 case SPEED_2500:
1016 case SPEED_1000:
1017 mcr |= MAC_MCR_SPEED_1000;
1018 break;
1019 case SPEED_100:
1020 mcr |= MAC_MCR_SPEED_100;
1021 break;
1022 }
1023
1024 /* Configure duplex */
1025 if (duplex == DUPLEX_FULL)
1026 mcr |= MAC_MCR_FORCE_DPX;
1027
1028 /* Configure pause modes -
1029 * phylink will avoid these for half duplex
1030 */
1031 if (tx_pause)
1032 mcr |= MAC_MCR_FORCE_TX_FC;
1033 if (rx_pause)
1034 mcr |= MAC_MCR_FORCE_RX_FC;
1035
1036 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
1037
1038 /* Only update control register when needed! */
1039 if (mcr != mcr_cur)
1040 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
developer9b725932022-11-24 16:25:56 +08001041
1042 if (mode == MLO_AN_PHY && phy)
1043 mtk_setup_eee(mac, phy_init_eee(phy, false) >= 0);
developer089e8852022-09-28 14:43:46 +08001044 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
developerff5e5092023-07-25 15:55:28 +08001045 /* Eliminate the interference(before link-up) caused by PHY noise */
1046 mtk_m32(mac->hw, XMAC_LOGIC_RST, 0x0, MTK_XMAC_LOGIC_RST(mac->id));
1047 mdelay(20);
1048 mtk_m32(mac->hw, XMAC_GLB_CNTCLR, 0x1, MTK_XMAC_CNT_CTRL(mac->id));
1049
1050 switch (mac->id) {
1051 case MTK_GMAC2_ID:
1052 force_link = (mac->interface ==
1053 PHY_INTERFACE_MODE_XGMII) ?
1054 MTK_XGMAC_FORCE_LINK(mac->id) : 0;
1055 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
1056 mtk_w32(mac->hw, sts | force_link,
1057 MTK_XGMAC_STS(mac->id));
1058 break;
1059 case MTK_GMAC3_ID:
1060 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
1061 mtk_w32(mac->hw,
1062 sts | MTK_XGMAC_FORCE_LINK(mac->id),
1063 MTK_XGMAC_STS(mac->id));
1064 break;
1065 }
1066
developer089e8852022-09-28 14:43:46 +08001067 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
1068
1069 mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC);
1070 /* Configure pause modes -
1071 * phylink will avoid these for half duplex
1072 */
1073 if (tx_pause)
1074 mcr |= XMAC_MCR_FORCE_TX_FC;
1075 if (rx_pause)
1076 mcr |= XMAC_MCR_FORCE_RX_FC;
developerfd40db22021-04-29 10:08:25 +08001077
developer089e8852022-09-28 14:43:46 +08001078 mcr &= ~(XMAC_MCR_TRX_DISABLE);
1079 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
1080 }
developer65f32592023-08-02 09:35:49 +08001081 mtk_pse_port_link_set(mac, true);
developerfd40db22021-04-29 10:08:25 +08001082}
1083
1084static void mtk_validate(struct phylink_config *config,
1085 unsigned long *supported,
1086 struct phylink_link_state *state)
1087{
1088 struct mtk_mac *mac = container_of(config, struct mtk_mac,
1089 phylink_config);
1090 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1091
1092 if (state->interface != PHY_INTERFACE_MODE_NA &&
1093 state->interface != PHY_INTERFACE_MODE_MII &&
1094 state->interface != PHY_INTERFACE_MODE_GMII &&
1095 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
1096 phy_interface_mode_is_rgmii(state->interface)) &&
1097 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
1098 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
1099 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
1100 (state->interface == PHY_INTERFACE_MODE_SGMII ||
developer089e8852022-09-28 14:43:46 +08001101 phy_interface_mode_is_8023z(state->interface))) &&
developer30e13e72022-11-03 10:21:24 +08001102 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_XGMII) &&
1103 (state->interface == PHY_INTERFACE_MODE_XGMII)) &&
developer089e8852022-09-28 14:43:46 +08001104 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
1105 (state->interface == PHY_INTERFACE_MODE_USXGMII)) &&
1106 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
1107 (state->interface == PHY_INTERFACE_MODE_10GKR))) {
developerfd40db22021-04-29 10:08:25 +08001108 linkmode_zero(supported);
1109 return;
1110 }
1111
1112 phylink_set_port_modes(mask);
1113 phylink_set(mask, Autoneg);
1114
1115 switch (state->interface) {
developer089e8852022-09-28 14:43:46 +08001116 case PHY_INTERFACE_MODE_USXGMII:
1117 case PHY_INTERFACE_MODE_10GKR:
1118 phylink_set(mask, 10000baseKR_Full);
1119 phylink_set(mask, 10000baseT_Full);
1120 phylink_set(mask, 10000baseCR_Full);
1121 phylink_set(mask, 10000baseSR_Full);
1122 phylink_set(mask, 10000baseLR_Full);
1123 phylink_set(mask, 10000baseLRM_Full);
1124 phylink_set(mask, 10000baseER_Full);
1125 phylink_set(mask, 100baseT_Half);
1126 phylink_set(mask, 100baseT_Full);
1127 phylink_set(mask, 1000baseT_Half);
1128 phylink_set(mask, 1000baseT_Full);
1129 phylink_set(mask, 1000baseX_Full);
developerb88cdb02022-10-12 18:10:03 +08001130 phylink_set(mask, 2500baseT_Full);
1131 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +08001132 break;
developerfd40db22021-04-29 10:08:25 +08001133 case PHY_INTERFACE_MODE_TRGMII:
1134 phylink_set(mask, 1000baseT_Full);
1135 break;
developer30e13e72022-11-03 10:21:24 +08001136 case PHY_INTERFACE_MODE_XGMII:
1137 /* fall through */
developerfd40db22021-04-29 10:08:25 +08001138 case PHY_INTERFACE_MODE_1000BASEX:
developerfd40db22021-04-29 10:08:25 +08001139 phylink_set(mask, 1000baseX_Full);
developerebf63e22023-06-15 17:45:36 +08001140 /* fall through */
developer089e8852022-09-28 14:43:46 +08001141 case PHY_INTERFACE_MODE_2500BASEX:
developerfd40db22021-04-29 10:08:25 +08001142 phylink_set(mask, 2500baseX_Full);
developer2fbee452022-08-12 13:58:20 +08001143 phylink_set(mask, 2500baseT_Full);
developerebf63e22023-06-15 17:45:36 +08001144 /* fall through */
developerfd40db22021-04-29 10:08:25 +08001145 case PHY_INTERFACE_MODE_GMII:
1146 case PHY_INTERFACE_MODE_RGMII:
1147 case PHY_INTERFACE_MODE_RGMII_ID:
1148 case PHY_INTERFACE_MODE_RGMII_RXID:
1149 case PHY_INTERFACE_MODE_RGMII_TXID:
1150 phylink_set(mask, 1000baseT_Half);
1151 /* fall through */
1152 case PHY_INTERFACE_MODE_SGMII:
1153 phylink_set(mask, 1000baseT_Full);
1154 phylink_set(mask, 1000baseX_Full);
1155 /* fall through */
1156 case PHY_INTERFACE_MODE_MII:
1157 case PHY_INTERFACE_MODE_RMII:
1158 case PHY_INTERFACE_MODE_REVMII:
1159 case PHY_INTERFACE_MODE_NA:
1160 default:
1161 phylink_set(mask, 10baseT_Half);
1162 phylink_set(mask, 10baseT_Full);
1163 phylink_set(mask, 100baseT_Half);
1164 phylink_set(mask, 100baseT_Full);
1165 break;
1166 }
1167
1168 if (state->interface == PHY_INTERFACE_MODE_NA) {
developer089e8852022-09-28 14:43:46 +08001169
1170 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII)) {
1171 phylink_set(mask, 10000baseKR_Full);
developerc9bd9ae2022-12-23 16:54:36 +08001172 phylink_set(mask, 10000baseT_Full);
developer3ef64802023-05-10 10:48:43 +08001173 phylink_set(mask, 10000baseCR_Full);
developer089e8852022-09-28 14:43:46 +08001174 phylink_set(mask, 10000baseSR_Full);
1175 phylink_set(mask, 10000baseLR_Full);
1176 phylink_set(mask, 10000baseLRM_Full);
1177 phylink_set(mask, 10000baseER_Full);
1178 phylink_set(mask, 1000baseKX_Full);
1179 phylink_set(mask, 1000baseT_Full);
1180 phylink_set(mask, 1000baseX_Full);
1181 phylink_set(mask, 2500baseX_Full);
developercfa104b2023-01-11 17:40:41 +08001182 phylink_set(mask, 2500baseT_Full);
1183 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +08001184 }
developerfd40db22021-04-29 10:08:25 +08001185 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
1186 phylink_set(mask, 1000baseT_Full);
1187 phylink_set(mask, 1000baseX_Full);
1188 phylink_set(mask, 2500baseX_Full);
1189 }
1190 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
1191 phylink_set(mask, 1000baseT_Full);
1192 phylink_set(mask, 1000baseT_Half);
1193 phylink_set(mask, 1000baseX_Full);
1194 }
1195 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
1196 phylink_set(mask, 1000baseT_Full);
1197 phylink_set(mask, 1000baseT_Half);
1198 }
1199 }
1200
developer30e13e72022-11-03 10:21:24 +08001201 if (mac->type == MTK_XGDM_TYPE) {
1202 phylink_clear(mask, 10baseT_Half);
1203 phylink_clear(mask, 100baseT_Half);
1204 phylink_clear(mask, 1000baseT_Half);
1205 }
1206
developerfd40db22021-04-29 10:08:25 +08001207 phylink_set(mask, Pause);
1208 phylink_set(mask, Asym_Pause);
1209
1210 linkmode_and(supported, supported, mask);
1211 linkmode_and(state->advertising, state->advertising, mask);
1212
1213 /* We can only operate at 2500BaseX or 1000BaseX. If requested
1214 * to advertise both, only report advertising at 2500BaseX.
1215 */
1216 phylink_helper_basex_speed(state);
1217}
1218
1219static const struct phylink_mac_ops mtk_phylink_ops = {
1220 .validate = mtk_validate,
developer4e8a3fd2023-04-10 18:05:44 +08001221 .mac_select_pcs = mtk_mac_select_pcs,
developer089e8852022-09-28 14:43:46 +08001222 .mac_link_state = mtk_mac_pcs_get_state,
developerfd40db22021-04-29 10:08:25 +08001223 .mac_config = mtk_mac_config,
developer4e8a3fd2023-04-10 18:05:44 +08001224 .mac_finish = mtk_mac_finish,
developerfd40db22021-04-29 10:08:25 +08001225 .mac_link_down = mtk_mac_link_down,
1226 .mac_link_up = mtk_mac_link_up,
1227};
1228
developerc4d8da72023-03-16 14:37:28 +08001229static int mtk_mdc_init(struct mtk_eth *eth)
developerfd40db22021-04-29 10:08:25 +08001230{
1231 struct device_node *mii_np;
developerc4d8da72023-03-16 14:37:28 +08001232 int max_clk = 2500000, divider;
developer778e4122023-04-20 16:09:32 +08001233 int ret = 0;
developerc8acd8d2022-11-10 09:07:10 +08001234 u32 val;
developerfd40db22021-04-29 10:08:25 +08001235
1236 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
1237 if (!mii_np) {
1238 dev_err(eth->dev, "no %s child node found", "mdio-bus");
1239 return -ENODEV;
1240 }
1241
1242 if (!of_device_is_available(mii_np)) {
1243 ret = -ENODEV;
1244 goto err_put_node;
1245 }
1246
developerc4d8da72023-03-16 14:37:28 +08001247 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
1248 if (val > MDC_MAX_FREQ ||
1249 val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1250 dev_err(eth->dev, "MDIO clock frequency out of range");
1251 ret = -EINVAL;
1252 goto err_put_node;
1253 }
developerc8acd8d2022-11-10 09:07:10 +08001254 max_clk = val;
developerc4d8da72023-03-16 14:37:28 +08001255 }
developerc8acd8d2022-11-10 09:07:10 +08001256
developerc4d8da72023-03-16 14:37:28 +08001257 divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
developerc8acd8d2022-11-10 09:07:10 +08001258
1259 /* Configure MDC Turbo Mode */
1260 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1261 val = mtk_r32(eth, MTK_MAC_MISC);
1262 val |= MISC_MDC_TURBO;
1263 mtk_w32(eth, val, MTK_MAC_MISC);
1264 } else {
1265 val = mtk_r32(eth, MTK_PPSC);
1266 val |= PPSC_MDC_TURBO;
1267 mtk_w32(eth, val, MTK_PPSC);
1268 }
1269
1270 /* Configure MDC Divider */
1271 val = mtk_r32(eth, MTK_PPSC);
1272 val &= ~PPSC_MDC_CFG;
1273 val |= FIELD_PREP(PPSC_MDC_CFG, divider);
1274 mtk_w32(eth, val, MTK_PPSC);
1275
developerc4d8da72023-03-16 14:37:28 +08001276 dev_info(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
1277
1278err_put_node:
1279 of_node_put(mii_np);
1280 return ret;
1281}
1282
1283static int mtk_mdio_init(struct mtk_eth *eth)
1284{
1285 struct device_node *mii_np;
1286 int ret;
1287
1288 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
1289 if (!mii_np) {
1290 dev_err(eth->dev, "no %s child node found", "mdio-bus");
1291 return -ENODEV;
1292 }
1293
1294 if (!of_device_is_available(mii_np)) {
1295 ret = -ENODEV;
1296 goto err_put_node;
1297 }
1298
1299 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
1300 if (!eth->mii_bus) {
1301 ret = -ENOMEM;
1302 goto err_put_node;
1303 }
1304
1305 eth->mii_bus->name = "mdio";
1306 eth->mii_bus->read = mtk_mdio_read;
1307 eth->mii_bus->write = mtk_mdio_write;
1308 eth->mii_bus->reset = mtk_mdio_reset;
1309 eth->mii_bus->priv = eth;
1310 eth->mii_bus->parent = eth->dev;
1311
1312 if (snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
1313 ret = -ENOMEM;
1314 goto err_put_node;
1315 }
developerc8acd8d2022-11-10 09:07:10 +08001316
developerfd40db22021-04-29 10:08:25 +08001317 ret = of_mdiobus_register(eth->mii_bus, mii_np);
1318
1319err_put_node:
1320 of_node_put(mii_np);
1321 return ret;
1322}
1323
1324static void mtk_mdio_cleanup(struct mtk_eth *eth)
1325{
1326 if (!eth->mii_bus)
1327 return;
1328
1329 mdiobus_unregister(eth->mii_bus);
1330}
1331
1332static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
1333{
1334 unsigned long flags;
1335 u32 val;
1336
1337 spin_lock_irqsave(&eth->tx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001338 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1339 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
developerfd40db22021-04-29 10:08:25 +08001340 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1341}
1342
1343static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
1344{
1345 unsigned long flags;
1346 u32 val;
1347
1348 spin_lock_irqsave(&eth->tx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001349 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1350 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
developerfd40db22021-04-29 10:08:25 +08001351 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1352}
1353
1354static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
1355{
1356 unsigned long flags;
1357 u32 val;
1358
1359 spin_lock_irqsave(&eth->rx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001360 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1361 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
developerfd40db22021-04-29 10:08:25 +08001362 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1363}
1364
1365static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
1366{
1367 unsigned long flags;
1368 u32 val;
1369
1370 spin_lock_irqsave(&eth->rx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001371 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1372 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
developerfd40db22021-04-29 10:08:25 +08001373 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1374}
1375
1376static int mtk_set_mac_address(struct net_device *dev, void *p)
1377{
1378 int ret = eth_mac_addr(dev, p);
1379 struct mtk_mac *mac = netdev_priv(dev);
1380 struct mtk_eth *eth = mac->hw;
1381 const char *macaddr = dev->dev_addr;
1382
1383 if (ret)
1384 return ret;
1385
1386 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
1387 return -EBUSY;
1388
1389 spin_lock_bh(&mac->hw->page_lock);
1390 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1391 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1392 MT7628_SDM_MAC_ADRH);
1393 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1394 (macaddr[4] << 8) | macaddr[5],
1395 MT7628_SDM_MAC_ADRL);
1396 } else {
1397 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1398 MTK_GDMA_MAC_ADRH(mac->id));
1399 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1400 (macaddr[4] << 8) | macaddr[5],
1401 MTK_GDMA_MAC_ADRL(mac->id));
1402 }
1403 spin_unlock_bh(&mac->hw->page_lock);
1404
1405 return 0;
1406}
1407
1408void mtk_stats_update_mac(struct mtk_mac *mac)
1409{
developer089e8852022-09-28 14:43:46 +08001410 struct mtk_eth *eth = mac->hw;
developer68ce74f2023-01-03 16:11:57 +08001411 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08001412 struct mtk_hw_stats *hw_stats = mac->hw_stats;
developer68ce74f2023-01-03 16:11:57 +08001413 unsigned int offs = hw_stats->reg_offset;
developerfd40db22021-04-29 10:08:25 +08001414 u64 stats;
1415
developerfd40db22021-04-29 10:08:25 +08001416 u64_stats_update_begin(&hw_stats->syncp);
1417
developer68ce74f2023-01-03 16:11:57 +08001418 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
1419 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
developerfd40db22021-04-29 10:08:25 +08001420 if (stats)
1421 hw_stats->rx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001422 hw_stats->rx_packets +=
1423 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x08 + offs);
1424 hw_stats->rx_overflow +=
1425 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1426 hw_stats->rx_fcs_errors +=
1427 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1428 hw_stats->rx_short_errors +=
1429 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1430 hw_stats->rx_long_errors +=
1431 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1432 hw_stats->rx_checksum_errors +=
1433 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
developerfd40db22021-04-29 10:08:25 +08001434 hw_stats->rx_flow_control_packets +=
developer68ce74f2023-01-03 16:11:57 +08001435 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
developer089e8852022-09-28 14:43:46 +08001436
1437 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer68ce74f2023-01-03 16:11:57 +08001438 hw_stats->tx_skip +=
1439 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1440 hw_stats->tx_collisions +=
1441 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1442 hw_stats->tx_bytes +=
1443 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1444 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
developer089e8852022-09-28 14:43:46 +08001445 if (stats)
1446 hw_stats->tx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001447 hw_stats->tx_packets +=
1448 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
developer089e8852022-09-28 14:43:46 +08001449 } else {
developer68ce74f2023-01-03 16:11:57 +08001450 hw_stats->tx_skip +=
1451 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1452 hw_stats->tx_collisions +=
1453 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1454 hw_stats->tx_bytes +=
1455 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1456 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
developer089e8852022-09-28 14:43:46 +08001457 if (stats)
1458 hw_stats->tx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001459 hw_stats->tx_packets +=
1460 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
developer089e8852022-09-28 14:43:46 +08001461 }
developer68ce74f2023-01-03 16:11:57 +08001462
1463 u64_stats_update_end(&hw_stats->syncp);
developerfd40db22021-04-29 10:08:25 +08001464}
1465
1466static void mtk_stats_update(struct mtk_eth *eth)
1467{
1468 int i;
1469
1470 for (i = 0; i < MTK_MAC_COUNT; i++) {
1471 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1472 continue;
1473 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1474 mtk_stats_update_mac(eth->mac[i]);
1475 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1476 }
1477 }
1478}
1479
1480static void mtk_get_stats64(struct net_device *dev,
1481 struct rtnl_link_stats64 *storage)
1482{
1483 struct mtk_mac *mac = netdev_priv(dev);
1484 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1485 unsigned int start;
1486
1487 if (netif_running(dev) && netif_device_present(dev)) {
1488 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1489 mtk_stats_update_mac(mac);
1490 spin_unlock_bh(&hw_stats->stats_lock);
1491 }
1492 }
1493
1494 do {
1495 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
1496 storage->rx_packets = hw_stats->rx_packets;
1497 storage->tx_packets = hw_stats->tx_packets;
1498 storage->rx_bytes = hw_stats->rx_bytes;
1499 storage->tx_bytes = hw_stats->tx_bytes;
1500 storage->collisions = hw_stats->tx_collisions;
1501 storage->rx_length_errors = hw_stats->rx_short_errors +
1502 hw_stats->rx_long_errors;
1503 storage->rx_over_errors = hw_stats->rx_overflow;
1504 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1505 storage->rx_errors = hw_stats->rx_checksum_errors;
1506 storage->tx_aborted_errors = hw_stats->tx_skip;
1507 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
1508
1509 storage->tx_errors = dev->stats.tx_errors;
1510 storage->rx_dropped = dev->stats.rx_dropped;
1511 storage->tx_dropped = dev->stats.tx_dropped;
1512}
1513
1514static inline int mtk_max_frag_size(int mtu)
1515{
1516 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1517 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
1518 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
1519
1520 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1521 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1522}
1523
1524static inline int mtk_max_buf_size(int frag_size)
1525{
1526 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1527 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1528
1529 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
1530
1531 return buf_size;
1532}
1533
developere9356982022-07-04 09:03:20 +08001534static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1535 struct mtk_rx_dma_v2 *dma_rxd)
developerfd40db22021-04-29 10:08:25 +08001536{
developerfd40db22021-04-29 10:08:25 +08001537 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +08001538 if (!(rxd->rxd2 & RX_DMA_DONE))
1539 return false;
1540
1541 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +08001542 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1543 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developere9356982022-07-04 09:03:20 +08001544
developer8ecd51b2023-03-13 11:28:28 +08001545 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developere9356982022-07-04 09:03:20 +08001546 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1547 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
developer006325c2022-10-06 16:39:50 +08001548 rxd->rxd7 = READ_ONCE(dma_rxd->rxd7);
developere9356982022-07-04 09:03:20 +08001549 }
1550
developerc4671b22021-05-28 13:16:42 +08001551 return true;
developerfd40db22021-04-29 10:08:25 +08001552}
1553
1554/* the qdma core needs scratch memory to be setup */
1555static int mtk_init_fq_dma(struct mtk_eth *eth)
1556{
developere9356982022-07-04 09:03:20 +08001557 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001558 dma_addr_t phy_ring_tail;
1559 int cnt = MTK_DMA_SIZE;
1560 dma_addr_t dma_addr;
1561 int i;
1562
1563 if (!eth->soc->has_sram) {
developer3f28d382023-03-07 16:06:30 +08001564 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08001565 cnt * soc->txrx.txd_size,
developerfd40db22021-04-29 10:08:25 +08001566 &eth->phy_scratch_ring,
developere9356982022-07-04 09:03:20 +08001567 GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001568 } else {
developer089e8852022-09-28 14:43:46 +08001569 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1570 eth->scratch_ring = eth->sram_base;
1571 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1572 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
developerfd40db22021-04-29 10:08:25 +08001573 }
1574
1575 if (unlikely(!eth->scratch_ring))
1576 return -ENOMEM;
1577
developere9356982022-07-04 09:03:20 +08001578 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001579 if (unlikely(!eth->scratch_head))
1580 return -ENOMEM;
1581
developer3f28d382023-03-07 16:06:30 +08001582 dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001583 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1584 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001585 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
developerfd40db22021-04-29 10:08:25 +08001586 return -ENOMEM;
1587
developer8b6f2402022-11-28 13:42:34 +08001588 phy_ring_tail = eth->phy_scratch_ring +
1589 (dma_addr_t)soc->txrx.txd_size * (cnt - 1);
developerfd40db22021-04-29 10:08:25 +08001590
1591 for (i = 0; i < cnt; i++) {
developere9356982022-07-04 09:03:20 +08001592 struct mtk_tx_dma_v2 *txd;
1593
1594 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1595 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerfd40db22021-04-29 10:08:25 +08001596 if (i < cnt - 1)
developere9356982022-07-04 09:03:20 +08001597 txd->txd2 = eth->phy_scratch_ring +
1598 (i + 1) * soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001599
developere9356982022-07-04 09:03:20 +08001600 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1601 txd->txd4 = 0;
1602
developer089e8852022-09-28 14:43:46 +08001603 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1604 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001605 txd->txd5 = 0;
1606 txd->txd6 = 0;
1607 txd->txd7 = 0;
1608 txd->txd8 = 0;
developerfd40db22021-04-29 10:08:25 +08001609 }
developerfd40db22021-04-29 10:08:25 +08001610 }
1611
developer68ce74f2023-01-03 16:11:57 +08001612 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1613 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1614 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1615 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
developerfd40db22021-04-29 10:08:25 +08001616
1617 return 0;
1618}
1619
1620static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1621{
developere9356982022-07-04 09:03:20 +08001622 return ring->dma + (desc - ring->phys);
developerfd40db22021-04-29 10:08:25 +08001623}
1624
1625static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001626 void *txd, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001627{
developere9356982022-07-04 09:03:20 +08001628 int idx = (txd - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001629
1630 return &ring->buf[idx];
1631}
1632
1633static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001634 void *dma)
developerfd40db22021-04-29 10:08:25 +08001635{
1636 return ring->dma_pdma - ring->dma + dma;
1637}
1638
developere9356982022-07-04 09:03:20 +08001639static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001640{
developere9356982022-07-04 09:03:20 +08001641 return (dma - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001642}
1643
developerc4671b22021-05-28 13:16:42 +08001644static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1645 bool napi)
developerfd40db22021-04-29 10:08:25 +08001646{
1647 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1648 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
developer3f28d382023-03-07 16:06:30 +08001649 dma_unmap_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001650 dma_unmap_addr(tx_buf, dma_addr0),
1651 dma_unmap_len(tx_buf, dma_len0),
1652 DMA_TO_DEVICE);
1653 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
developer3f28d382023-03-07 16:06:30 +08001654 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001655 dma_unmap_addr(tx_buf, dma_addr0),
1656 dma_unmap_len(tx_buf, dma_len0),
1657 DMA_TO_DEVICE);
1658 }
1659 } else {
1660 if (dma_unmap_len(tx_buf, dma_len0)) {
developer3f28d382023-03-07 16:06:30 +08001661 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001662 dma_unmap_addr(tx_buf, dma_addr0),
1663 dma_unmap_len(tx_buf, dma_len0),
1664 DMA_TO_DEVICE);
1665 }
1666
1667 if (dma_unmap_len(tx_buf, dma_len1)) {
developer3f28d382023-03-07 16:06:30 +08001668 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001669 dma_unmap_addr(tx_buf, dma_addr1),
1670 dma_unmap_len(tx_buf, dma_len1),
1671 DMA_TO_DEVICE);
1672 }
1673 }
1674
1675 tx_buf->flags = 0;
1676 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +08001677 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
1678 if (napi)
1679 napi_consume_skb(tx_buf->skb, napi);
1680 else
1681 dev_kfree_skb_any(tx_buf->skb);
1682 }
developerfd40db22021-04-29 10:08:25 +08001683 tx_buf->skb = NULL;
1684}
1685
1686static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1687 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1688 size_t size, int idx)
1689{
1690 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1691 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1692 dma_unmap_len_set(tx_buf, dma_len0, size);
1693 } else {
1694 if (idx & 1) {
1695 txd->txd3 = mapped_addr;
1696 txd->txd2 |= TX_DMA_PLEN1(size);
1697 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1698 dma_unmap_len_set(tx_buf, dma_len1, size);
1699 } else {
1700 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1701 txd->txd1 = mapped_addr;
1702 txd->txd2 = TX_DMA_PLEN0(size);
1703 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1704 dma_unmap_len_set(tx_buf, dma_len0, size);
1705 }
1706 }
1707}
1708
developere9356982022-07-04 09:03:20 +08001709static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
1710 struct mtk_tx_dma_desc_info *info)
1711{
1712 struct mtk_mac *mac = netdev_priv(dev);
1713 struct mtk_eth *eth = mac->hw;
1714 struct mtk_tx_dma *desc = txd;
1715 u32 data;
1716
1717 WRITE_ONCE(desc->txd1, info->addr);
1718
1719 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1720 if (info->last)
1721 data |= TX_DMA_LS0;
1722 WRITE_ONCE(desc->txd3, data);
1723
1724 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1725 data |= QID_HIGH_BITS(info->qid);
1726 if (info->first) {
1727 if (info->gso)
1728 data |= TX_DMA_TSO;
1729 /* tx checksum offload */
1730 if (info->csum)
1731 data |= TX_DMA_CHKSUM;
1732 /* vlan header offload */
1733 if (info->vlan)
1734 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1735 }
1736
1737#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1738 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1739 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1740 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1741 }
1742
1743 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1744 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1745#endif
1746 WRITE_ONCE(desc->txd4, data);
1747}
1748
1749static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1750 struct mtk_tx_dma_desc_info *info)
1751{
1752 struct mtk_mac *mac = netdev_priv(dev);
1753 struct mtk_eth *eth = mac->hw;
1754 struct mtk_tx_dma_v2 *desc = txd;
developerce08bca2022-10-06 16:21:13 +08001755 u32 data = 0;
1756
1757 if (!info->qid && mac->id)
1758 info->qid = MTK_QDMA_GMAC2_QID;
1759
1760 WRITE_ONCE(desc->txd1, info->addr);
1761
1762 data = TX_DMA_PLEN0(info->size);
1763 if (info->last)
1764 data |= TX_DMA_LS0;
1765 WRITE_ONCE(desc->txd3, data);
1766
1767 data = ((mac->id == MTK_GMAC3_ID) ?
1768 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1769 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1770#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1771 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1772 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1773 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1774 }
1775
1776 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1777 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1778#endif
1779 WRITE_ONCE(desc->txd4, data);
1780
1781 data = 0;
1782 if (info->first) {
1783 if (info->gso)
1784 data |= TX_DMA_TSO_V2;
1785 /* tx checksum offload */
1786 if (info->csum)
1787 data |= TX_DMA_CHKSUM_V2;
1788 }
1789 WRITE_ONCE(desc->txd5, data);
1790
1791 data = 0;
1792 if (info->first && info->vlan)
1793 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1794 WRITE_ONCE(desc->txd6, data);
1795
1796 WRITE_ONCE(desc->txd7, 0);
1797 WRITE_ONCE(desc->txd8, 0);
1798}
1799
1800static void mtk_tx_set_dma_desc_v3(struct sk_buff *skb, struct net_device *dev, void *txd,
1801 struct mtk_tx_dma_desc_info *info)
1802{
1803 struct mtk_mac *mac = netdev_priv(dev);
1804 struct mtk_eth *eth = mac->hw;
1805 struct mtk_tx_dma_v2 *desc = txd;
developer089e8852022-09-28 14:43:46 +08001806 u64 addr64 = 0;
developere9356982022-07-04 09:03:20 +08001807 u32 data = 0;
developere9356982022-07-04 09:03:20 +08001808
developerce08bca2022-10-06 16:21:13 +08001809 if (!info->qid && mac->id)
developerb9463012022-09-14 10:28:45 +08001810 info->qid = MTK_QDMA_GMAC2_QID;
developere9356982022-07-04 09:03:20 +08001811
developer089e8852022-09-28 14:43:46 +08001812 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1813 TX_DMA_SDP1(info->addr) : 0;
1814
developere9356982022-07-04 09:03:20 +08001815 WRITE_ONCE(desc->txd1, info->addr);
1816
1817 data = TX_DMA_PLEN0(info->size);
1818 if (info->last)
1819 data |= TX_DMA_LS0;
developer089e8852022-09-28 14:43:46 +08001820 WRITE_ONCE(desc->txd3, data | addr64);
developere9356982022-07-04 09:03:20 +08001821
developer089e8852022-09-28 14:43:46 +08001822 data = ((mac->id == MTK_GMAC3_ID) ?
1823 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
developerb9463012022-09-14 10:28:45 +08001824 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
developere9356982022-07-04 09:03:20 +08001825#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1826 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1827 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1828 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1829 }
1830
1831 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1832 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1833#endif
1834 WRITE_ONCE(desc->txd4, data);
1835
1836 data = 0;
1837 if (info->first) {
1838 if (info->gso)
1839 data |= TX_DMA_TSO_V2;
1840 /* tx checksum offload */
1841 if (info->csum)
1842 data |= TX_DMA_CHKSUM_V2;
developerce08bca2022-10-06 16:21:13 +08001843
1844 if (netdev_uses_dsa(dev))
1845 data |= TX_DMA_SPTAG_V3;
developere9356982022-07-04 09:03:20 +08001846 }
1847 WRITE_ONCE(desc->txd5, data);
1848
1849 data = 0;
1850 if (info->first && info->vlan)
1851 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1852 WRITE_ONCE(desc->txd6, data);
1853
1854 WRITE_ONCE(desc->txd7, 0);
1855 WRITE_ONCE(desc->txd8, 0);
1856}
1857
1858static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
1859 struct mtk_tx_dma_desc_info *info)
1860{
1861 struct mtk_mac *mac = netdev_priv(dev);
1862 struct mtk_eth *eth = mac->hw;
1863
developerce08bca2022-10-06 16:21:13 +08001864 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1865 mtk_tx_set_dma_desc_v3(skb, dev, txd, info);
1866 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developere9356982022-07-04 09:03:20 +08001867 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
1868 else
1869 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
1870}
1871
developerfd40db22021-04-29 10:08:25 +08001872static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1873 int tx_num, struct mtk_tx_ring *ring, bool gso)
1874{
developere9356982022-07-04 09:03:20 +08001875 struct mtk_tx_dma_desc_info txd_info = {
1876 .size = skb_headlen(skb),
1877 .qid = skb->mark & MTK_QDMA_TX_MASK,
1878 .gso = gso,
1879 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1880 .vlan = skb_vlan_tag_present(skb),
1881 .vlan_tci = skb_vlan_tag_get(skb),
1882 .first = true,
1883 .last = !skb_is_nonlinear(skb),
1884 };
developerfd40db22021-04-29 10:08:25 +08001885 struct mtk_mac *mac = netdev_priv(dev);
1886 struct mtk_eth *eth = mac->hw;
developere9356982022-07-04 09:03:20 +08001887 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001888 struct mtk_tx_dma *itxd, *txd;
1889 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1890 struct mtk_tx_buf *itx_buf, *tx_buf;
developerfd40db22021-04-29 10:08:25 +08001891 int i, n_desc = 1;
developerfd40db22021-04-29 10:08:25 +08001892 int k = 0;
1893
developerb3a9e7b2023-02-08 15:18:10 +08001894 if (skb->len < 32) {
1895 if (skb_put_padto(skb, MTK_MIN_TX_LENGTH))
1896 return -ENOMEM;
1897
1898 txd_info.size = skb_headlen(skb);
1899 }
1900
developerfd40db22021-04-29 10:08:25 +08001901 itxd = ring->next_free;
1902 itxd_pdma = qdma_to_pdma(ring, itxd);
1903 if (itxd == ring->last_free)
1904 return -ENOMEM;
1905
developere9356982022-07-04 09:03:20 +08001906 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001907 memset(itx_buf, 0, sizeof(*itx_buf));
1908
developer3f28d382023-03-07 16:06:30 +08001909 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
developere9356982022-07-04 09:03:20 +08001910 DMA_TO_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001911 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
developerfd40db22021-04-29 10:08:25 +08001912 return -ENOMEM;
1913
developere9356982022-07-04 09:03:20 +08001914 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
1915
developerfd40db22021-04-29 10:08:25 +08001916 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
developer089e8852022-09-28 14:43:46 +08001917 itx_buf->flags |= (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1918 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1919 MTK_TX_FLAGS_FPORT2;
developere9356982022-07-04 09:03:20 +08001920 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerfd40db22021-04-29 10:08:25 +08001921 k++);
1922
developerfd40db22021-04-29 10:08:25 +08001923 /* TX SG offload */
1924 txd = itxd;
1925 txd_pdma = qdma_to_pdma(ring, txd);
1926
developere9356982022-07-04 09:03:20 +08001927 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerfd40db22021-04-29 10:08:25 +08001928 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1929 unsigned int offset = 0;
1930 int frag_size = skb_frag_size(frag);
1931
1932 while (frag_size) {
developerfd40db22021-04-29 10:08:25 +08001933 bool new_desc = true;
1934
developere9356982022-07-04 09:03:20 +08001935 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerfd40db22021-04-29 10:08:25 +08001936 (i & 0x1)) {
1937 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1938 txd_pdma = qdma_to_pdma(ring, txd);
1939 if (txd == ring->last_free)
1940 goto err_dma;
1941
1942 n_desc++;
1943 } else {
1944 new_desc = false;
1945 }
1946
developere9356982022-07-04 09:03:20 +08001947 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1948 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1949 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1950 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1951 !(frag_size - txd_info.size);
developer3f28d382023-03-07 16:06:30 +08001952 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
developere9356982022-07-04 09:03:20 +08001953 offset, txd_info.size,
1954 DMA_TO_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001955 if (unlikely(dma_mapping_error(eth->dma_dev,
1956 txd_info.addr)))
developere9356982022-07-04 09:03:20 +08001957 goto err_dma;
developerfd40db22021-04-29 10:08:25 +08001958
developere9356982022-07-04 09:03:20 +08001959 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerfd40db22021-04-29 10:08:25 +08001960
developere9356982022-07-04 09:03:20 +08001961 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001962 if (new_desc)
1963 memset(tx_buf, 0, sizeof(*tx_buf));
1964 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1965 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
developer089e8852022-09-28 14:43:46 +08001966 tx_buf->flags |=
1967 (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1968 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1969 MTK_TX_FLAGS_FPORT2;
developerfd40db22021-04-29 10:08:25 +08001970
developere9356982022-07-04 09:03:20 +08001971 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1972 txd_info.size, k++);
developerfd40db22021-04-29 10:08:25 +08001973
developere9356982022-07-04 09:03:20 +08001974 frag_size -= txd_info.size;
1975 offset += txd_info.size;
developerfd40db22021-04-29 10:08:25 +08001976 }
1977 }
1978
1979 /* store skb to cleanup */
1980 itx_buf->skb = skb;
1981
developere9356982022-07-04 09:03:20 +08001982 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001983 if (k & 0x1)
1984 txd_pdma->txd2 |= TX_DMA_LS0;
1985 else
1986 txd_pdma->txd2 |= TX_DMA_LS1;
1987 }
1988
1989 netdev_sent_queue(dev, skb->len);
1990 skb_tx_timestamp(skb);
1991
1992 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1993 atomic_sub(n_desc, &ring->free_count);
1994
1995 /* make sure that all changes to the dma ring are flushed before we
1996 * continue
1997 */
1998 wmb();
1999
developere9356982022-07-04 09:03:20 +08002000 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08002001 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
2002 !netdev_xmit_more())
developer68ce74f2023-01-03 16:11:57 +08002003 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
developerfd40db22021-04-29 10:08:25 +08002004 } else {
developere9356982022-07-04 09:03:20 +08002005 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
developerfd40db22021-04-29 10:08:25 +08002006 ring->dma_size);
2007 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
2008 }
2009
2010 return 0;
2011
2012err_dma:
2013 do {
developere9356982022-07-04 09:03:20 +08002014 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08002015
2016 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08002017 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08002018
2019 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
developere9356982022-07-04 09:03:20 +08002020 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
developerfd40db22021-04-29 10:08:25 +08002021 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
2022
2023 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
2024 itxd_pdma = qdma_to_pdma(ring, itxd);
2025 } while (itxd != txd);
2026
2027 return -ENOMEM;
2028}
2029
2030static inline int mtk_cal_txd_req(struct sk_buff *skb)
2031{
2032 int i, nfrags;
2033 skb_frag_t *frag;
2034
2035 nfrags = 1;
2036 if (skb_is_gso(skb)) {
2037 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2038 frag = &skb_shinfo(skb)->frags[i];
2039 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
2040 MTK_TX_DMA_BUF_LEN);
2041 }
2042 } else {
2043 nfrags += skb_shinfo(skb)->nr_frags;
2044 }
2045
2046 return nfrags;
2047}
2048
2049static int mtk_queue_stopped(struct mtk_eth *eth)
2050{
2051 int i;
2052
2053 for (i = 0; i < MTK_MAC_COUNT; i++) {
2054 if (!eth->netdev[i])
2055 continue;
2056 if (netif_queue_stopped(eth->netdev[i]))
2057 return 1;
2058 }
2059
2060 return 0;
2061}
2062
2063static void mtk_wake_queue(struct mtk_eth *eth)
2064{
2065 int i;
2066
2067 for (i = 0; i < MTK_MAC_COUNT; i++) {
2068 if (!eth->netdev[i])
2069 continue;
2070 netif_wake_queue(eth->netdev[i]);
2071 }
2072}
2073
2074static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
2075{
2076 struct mtk_mac *mac = netdev_priv(dev);
2077 struct mtk_eth *eth = mac->hw;
2078 struct mtk_tx_ring *ring = &eth->tx_ring;
2079 struct net_device_stats *stats = &dev->stats;
2080 bool gso = false;
2081 int tx_num;
2082
2083 /* normally we can rely on the stack not calling this more than once,
2084 * however we have 2 queues running on the same ring so we need to lock
2085 * the ring access
2086 */
2087 spin_lock(&eth->page_lock);
2088
2089 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2090 goto drop;
2091
2092 tx_num = mtk_cal_txd_req(skb);
2093 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
2094 netif_stop_queue(dev);
2095 netif_err(eth, tx_queued, dev,
2096 "Tx Ring full when queue awake!\n");
2097 spin_unlock(&eth->page_lock);
2098 return NETDEV_TX_BUSY;
2099 }
2100
2101 /* TSO: fill MSS info in tcp checksum field */
2102 if (skb_is_gso(skb)) {
2103 if (skb_cow_head(skb, 0)) {
2104 netif_warn(eth, tx_err, dev,
2105 "GSO expand head fail.\n");
2106 goto drop;
2107 }
2108
2109 if (skb_shinfo(skb)->gso_type &
2110 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
2111 gso = true;
2112 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
2113 }
2114 }
2115
2116 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
2117 goto drop;
2118
2119 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
2120 netif_stop_queue(dev);
2121
2122 spin_unlock(&eth->page_lock);
2123
2124 return NETDEV_TX_OK;
2125
2126drop:
2127 spin_unlock(&eth->page_lock);
2128 stats->tx_dropped++;
2129 dev_kfree_skb_any(skb);
2130 return NETDEV_TX_OK;
2131}
2132
2133static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
2134{
2135 int i;
2136 struct mtk_rx_ring *ring;
2137 int idx;
2138
developerfd40db22021-04-29 10:08:25 +08002139 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
developere9356982022-07-04 09:03:20 +08002140 struct mtk_rx_dma *rxd;
2141
developer77d03a72021-06-06 00:06:00 +08002142 if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i))
2143 continue;
2144
developerfd40db22021-04-29 10:08:25 +08002145 ring = &eth->rx_ring[i];
2146 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08002147 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
2148 if (rxd->rxd2 & RX_DMA_DONE) {
developerfd40db22021-04-29 10:08:25 +08002149 ring->calc_idx_update = true;
2150 return ring;
2151 }
2152 }
2153
2154 return NULL;
2155}
2156
developer18f46a82021-07-20 21:08:21 +08002157static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08002158{
developerfd40db22021-04-29 10:08:25 +08002159 int i;
2160
developerfb556ca2021-10-13 10:52:09 +08002161 if (!eth->hwlro)
developerfd40db22021-04-29 10:08:25 +08002162 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerfb556ca2021-10-13 10:52:09 +08002163 else {
developerfd40db22021-04-29 10:08:25 +08002164 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
2165 ring = &eth->rx_ring[i];
2166 if (ring->calc_idx_update) {
2167 ring->calc_idx_update = false;
2168 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2169 }
2170 }
2171 }
2172}
2173
2174static int mtk_poll_rx(struct napi_struct *napi, int budget,
2175 struct mtk_eth *eth)
2176{
developer18f46a82021-07-20 21:08:21 +08002177 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
2178 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002179 int idx;
2180 struct sk_buff *skb;
developer089e8852022-09-28 14:43:46 +08002181 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002182 u8 *data, *new_data;
developere9356982022-07-04 09:03:20 +08002183 struct mtk_rx_dma_v2 *rxd, trxd;
developerfd40db22021-04-29 10:08:25 +08002184 int done = 0;
2185
developer18f46a82021-07-20 21:08:21 +08002186 if (unlikely(!ring))
2187 goto rx_done;
2188
developerfd40db22021-04-29 10:08:25 +08002189 while (done < budget) {
developer68ce74f2023-01-03 16:11:57 +08002190 unsigned int pktlen, *rxdcsum;
developer006325c2022-10-06 16:39:50 +08002191 struct net_device *netdev = NULL;
developer8b6f2402022-11-28 13:42:34 +08002192 dma_addr_t dma_addr = 0;
developere9356982022-07-04 09:03:20 +08002193 int mac = 0;
developerfd40db22021-04-29 10:08:25 +08002194
developer18f46a82021-07-20 21:08:21 +08002195 if (eth->hwlro)
2196 ring = mtk_get_rx_ring(eth);
2197
developerfd40db22021-04-29 10:08:25 +08002198 if (unlikely(!ring))
2199 goto rx_done;
2200
2201 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08002202 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
developerfd40db22021-04-29 10:08:25 +08002203 data = ring->data[idx];
2204
developere9356982022-07-04 09:03:20 +08002205 if (!mtk_rx_get_desc(eth, &trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08002206 break;
2207
2208 /* find out which mac the packet come from. values start at 1 */
2209 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2210 mac = 0;
2211 } else {
developer8ecd51b2023-03-13 11:28:28 +08002212 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer089e8852022-09-28 14:43:46 +08002213 switch (RX_DMA_GET_SPORT_V2(trxd.rxd5)) {
2214 case PSE_GDM1_PORT:
2215 case PSE_GDM2_PORT:
2216 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
2217 break;
2218 case PSE_GDM3_PORT:
2219 mac = MTK_GMAC3_ID;
2220 break;
2221 }
2222 } else
developerfd40db22021-04-29 10:08:25 +08002223 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
2224 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2225 }
2226
2227 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
2228 !eth->netdev[mac]))
2229 goto release_desc;
2230
2231 netdev = eth->netdev[mac];
2232
2233 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2234 goto release_desc;
2235
2236 /* alloc new buffer */
2237 new_data = napi_alloc_frag(ring->frag_size);
2238 if (unlikely(!new_data)) {
2239 netdev->stats.rx_dropped++;
2240 goto release_desc;
2241 }
developer3f28d382023-03-07 16:06:30 +08002242 dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08002243 new_data + NET_SKB_PAD +
2244 eth->ip_align,
2245 ring->buf_size,
2246 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08002247 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
developerfd40db22021-04-29 10:08:25 +08002248 skb_free_frag(new_data);
2249 netdev->stats.rx_dropped++;
2250 goto release_desc;
2251 }
2252
developer089e8852022-09-28 14:43:46 +08002253 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2254 ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
2255
developer3f28d382023-03-07 16:06:30 +08002256 dma_unmap_single(eth->dma_dev,
developer089e8852022-09-28 14:43:46 +08002257 (u64)(trxd.rxd1 | addr64),
developerc4671b22021-05-28 13:16:42 +08002258 ring->buf_size, DMA_FROM_DEVICE);
2259
developerfd40db22021-04-29 10:08:25 +08002260 /* receive data */
2261 skb = build_skb(data, ring->frag_size);
2262 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08002263 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08002264 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08002265 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08002266 }
2267 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2268
developerfd40db22021-04-29 10:08:25 +08002269 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2270 skb->dev = netdev;
2271 skb_put(skb, pktlen);
2272
developer8ecd51b2023-03-13 11:28:28 +08002273 if ((MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)))
developer68ce74f2023-01-03 16:11:57 +08002274 rxdcsum = &trxd.rxd3;
2275 else
2276 rxdcsum = &trxd.rxd4;
2277
2278 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
developerfd40db22021-04-29 10:08:25 +08002279 skb->ip_summed = CHECKSUM_UNNECESSARY;
2280 else
2281 skb_checksum_none_assert(skb);
2282 skb->protocol = eth_type_trans(skb, netdev);
2283
2284 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developer8ecd51b2023-03-13 11:28:28 +08002285 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer255bba22021-07-27 15:16:33 +08002286 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08002287 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08002288 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08002289 RX_DMA_VID_V2(trxd.rxd4));
2290 } else {
2291 if (trxd.rxd2 & RX_DMA_VTAG)
2292 __vlan_hwaccel_put_tag(skb,
2293 htons(RX_DMA_VPID(trxd.rxd3)),
2294 RX_DMA_VID(trxd.rxd3));
2295 }
2296
2297 /* If netdev is attached to dsa switch, the special
2298 * tag inserted in VLAN field by switch hardware can
2299 * be offload by RX HW VLAN offload. Clears the VLAN
2300 * information from @skb to avoid unexpected 8021d
2301 * handler before packet enter dsa framework.
2302 */
2303 if (netdev_uses_dsa(netdev))
2304 __vlan_hwaccel_clear_tag(skb);
2305 }
2306
2307#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developer8ecd51b2023-03-13 11:28:28 +08002308 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developerfd40db22021-04-29 10:08:25 +08002309 *(u32 *)(skb->head) = trxd.rxd5;
2310 else
developerfd40db22021-04-29 10:08:25 +08002311 *(u32 *)(skb->head) = trxd.rxd4;
2312
2313 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08002314 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08002315 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
2316
2317 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
2318 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
2319 __func__, skb_hnat_reason(skb));
2320 skb->pkt_type = PACKET_HOST;
2321 }
2322
2323 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
2324 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
2325 skb_hnat_reason(skb), skb_hnat_alg(skb));
2326#endif
developer77d03a72021-06-06 00:06:00 +08002327 if (mtk_hwlro_stats_ebl &&
2328 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
2329 hw_lro_stats_update(ring->ring_no, &trxd);
2330 hw_lro_flush_stats_update(ring->ring_no, &trxd);
2331 }
developerfd40db22021-04-29 10:08:25 +08002332
2333 skb_record_rx_queue(skb, 0);
2334 napi_gro_receive(napi, skb);
2335
developerc4671b22021-05-28 13:16:42 +08002336skip_rx:
developerfd40db22021-04-29 10:08:25 +08002337 ring->data[idx] = new_data;
2338 rxd->rxd1 = (unsigned int)dma_addr;
2339
2340release_desc:
developer089e8852022-09-28 14:43:46 +08002341 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2342 RX_DMA_SDP1(dma_addr) : 0;
2343
developerfd40db22021-04-29 10:08:25 +08002344 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2345 rxd->rxd2 = RX_DMA_LSO;
2346 else
developer089e8852022-09-28 14:43:46 +08002347 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08002348
2349 ring->calc_idx = idx;
2350
2351 done++;
2352 }
2353
2354rx_done:
2355 if (done) {
2356 /* make sure that all changes to the dma ring are flushed before
2357 * we continue
2358 */
2359 wmb();
developer18f46a82021-07-20 21:08:21 +08002360 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08002361 }
2362
2363 return done;
2364}
2365
developerfb556ca2021-10-13 10:52:09 +08002366static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08002367 unsigned int *done, unsigned int *bytes)
2368{
developer68ce74f2023-01-03 16:11:57 +08002369 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developere9356982022-07-04 09:03:20 +08002370 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002371 struct mtk_tx_ring *ring = &eth->tx_ring;
2372 struct mtk_tx_dma *desc;
2373 struct sk_buff *skb;
2374 struct mtk_tx_buf *tx_buf;
2375 u32 cpu, dma;
2376
developerc4671b22021-05-28 13:16:42 +08002377 cpu = ring->last_free_ptr;
developer68ce74f2023-01-03 16:11:57 +08002378 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
developerfd40db22021-04-29 10:08:25 +08002379
2380 desc = mtk_qdma_phys_to_virt(ring, cpu);
2381
2382 while ((cpu != dma) && budget) {
2383 u32 next_cpu = desc->txd2;
2384 int mac = 0;
2385
2386 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2387 break;
2388
2389 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2390
developere9356982022-07-04 09:03:20 +08002391 tx_buf = mtk_desc_to_tx_buf(ring, desc, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08002392 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
developer089e8852022-09-28 14:43:46 +08002393 mac = MTK_GMAC2_ID;
2394 else if (tx_buf->flags & MTK_TX_FLAGS_FPORT2)
2395 mac = MTK_GMAC3_ID;
developerfd40db22021-04-29 10:08:25 +08002396
2397 skb = tx_buf->skb;
2398 if (!skb)
2399 break;
2400
2401 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
2402 bytes[mac] += skb->len;
2403 done[mac]++;
2404 budget--;
2405 }
developerc4671b22021-05-28 13:16:42 +08002406 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08002407
2408 ring->last_free = desc;
2409 atomic_inc(&ring->free_count);
2410
2411 cpu = next_cpu;
2412 }
2413
developerc4671b22021-05-28 13:16:42 +08002414 ring->last_free_ptr = cpu;
developer68ce74f2023-01-03 16:11:57 +08002415 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
developerfd40db22021-04-29 10:08:25 +08002416}
2417
developerfb556ca2021-10-13 10:52:09 +08002418static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08002419 unsigned int *done, unsigned int *bytes)
2420{
2421 struct mtk_tx_ring *ring = &eth->tx_ring;
2422 struct mtk_tx_dma *desc;
2423 struct sk_buff *skb;
2424 struct mtk_tx_buf *tx_buf;
2425 u32 cpu, dma;
2426
2427 cpu = ring->cpu_idx;
2428 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2429
2430 while ((cpu != dma) && budget) {
2431 tx_buf = &ring->buf[cpu];
2432 skb = tx_buf->skb;
2433 if (!skb)
2434 break;
2435
2436 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
2437 bytes[0] += skb->len;
2438 done[0]++;
2439 budget--;
2440 }
2441
developerc4671b22021-05-28 13:16:42 +08002442 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08002443
developere9356982022-07-04 09:03:20 +08002444 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08002445 ring->last_free = desc;
2446 atomic_inc(&ring->free_count);
2447
2448 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2449 }
2450
2451 ring->cpu_idx = cpu;
developerfd40db22021-04-29 10:08:25 +08002452}
2453
2454static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2455{
2456 struct mtk_tx_ring *ring = &eth->tx_ring;
2457 unsigned int done[MTK_MAX_DEVS];
2458 unsigned int bytes[MTK_MAX_DEVS];
2459 int total = 0, i;
2460
2461 memset(done, 0, sizeof(done));
2462 memset(bytes, 0, sizeof(bytes));
2463
2464 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developerfb556ca2021-10-13 10:52:09 +08002465 mtk_poll_tx_qdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08002466 else
developerfb556ca2021-10-13 10:52:09 +08002467 mtk_poll_tx_pdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08002468
2469 for (i = 0; i < MTK_MAC_COUNT; i++) {
2470 if (!eth->netdev[i] || !done[i])
2471 continue;
2472 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
2473 total += done[i];
2474 }
2475
2476 if (mtk_queue_stopped(eth) &&
2477 (atomic_read(&ring->free_count) > ring->thresh))
2478 mtk_wake_queue(eth);
2479
2480 return total;
2481}
2482
2483static void mtk_handle_status_irq(struct mtk_eth *eth)
2484{
developer8051e042022-04-08 13:26:36 +08002485 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002486
2487 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2488 mtk_stats_update(eth);
2489 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer8051e042022-04-08 13:26:36 +08002490 MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002491 }
2492}
2493
2494static int mtk_napi_tx(struct napi_struct *napi, int budget)
2495{
2496 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
developer68ce74f2023-01-03 16:11:57 +08002497 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08002498 u32 status, mask;
2499 int tx_done = 0;
2500
2501 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2502 mtk_handle_status_irq(eth);
developer68ce74f2023-01-03 16:11:57 +08002503 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
developerfd40db22021-04-29 10:08:25 +08002504 tx_done = mtk_poll_tx(eth, budget);
2505
2506 if (unlikely(netif_msg_intr(eth))) {
developer68ce74f2023-01-03 16:11:57 +08002507 status = mtk_r32(eth, reg_map->tx_irq_status);
2508 mask = mtk_r32(eth, reg_map->tx_irq_mask);
developerfd40db22021-04-29 10:08:25 +08002509 dev_info(eth->dev,
2510 "done tx %d, intr 0x%08x/0x%x\n",
2511 tx_done, status, mask);
2512 }
2513
2514 if (tx_done == budget)
2515 return budget;
2516
developer68ce74f2023-01-03 16:11:57 +08002517 status = mtk_r32(eth, reg_map->tx_irq_status);
developerfd40db22021-04-29 10:08:25 +08002518 if (status & MTK_TX_DONE_INT)
2519 return budget;
2520
developerc4671b22021-05-28 13:16:42 +08002521 if (napi_complete(napi))
2522 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developerfd40db22021-04-29 10:08:25 +08002523
2524 return tx_done;
2525}
2526
2527static int mtk_napi_rx(struct napi_struct *napi, int budget)
2528{
developer18f46a82021-07-20 21:08:21 +08002529 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
2530 struct mtk_eth *eth = rx_napi->eth;
developer68ce74f2023-01-03 16:11:57 +08002531 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer18f46a82021-07-20 21:08:21 +08002532 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002533 u32 status, mask;
2534 int rx_done = 0;
2535 int remain_budget = budget;
2536
2537 mtk_handle_status_irq(eth);
2538
2539poll_again:
developer68ce74f2023-01-03 16:11:57 +08002540 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), reg_map->pdma.irq_status);
developerfd40db22021-04-29 10:08:25 +08002541 rx_done = mtk_poll_rx(napi, remain_budget, eth);
2542
2543 if (unlikely(netif_msg_intr(eth))) {
developer68ce74f2023-01-03 16:11:57 +08002544 status = mtk_r32(eth, reg_map->pdma.irq_status);
2545 mask = mtk_r32(eth, reg_map->pdma.irq_mask);
developerfd40db22021-04-29 10:08:25 +08002546 dev_info(eth->dev,
2547 "done rx %d, intr 0x%08x/0x%x\n",
2548 rx_done, status, mask);
2549 }
2550 if (rx_done == remain_budget)
2551 return budget;
2552
developer68ce74f2023-01-03 16:11:57 +08002553 status = mtk_r32(eth, reg_map->pdma.irq_status);
developer18f46a82021-07-20 21:08:21 +08002554 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08002555 remain_budget -= rx_done;
2556 goto poll_again;
2557 }
developerc4671b22021-05-28 13:16:42 +08002558
2559 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08002560 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08002561
2562 return rx_done + budget - remain_budget;
2563}
2564
2565static int mtk_tx_alloc(struct mtk_eth *eth)
2566{
developere9356982022-07-04 09:03:20 +08002567 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002568 struct mtk_tx_ring *ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002569 int i, sz = soc->txrx.txd_size;
2570 struct mtk_tx_dma_v2 *txd, *pdma_txd;
developerfd40db22021-04-29 10:08:25 +08002571
2572 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
2573 GFP_KERNEL);
2574 if (!ring->buf)
2575 goto no_tx_mem;
2576
2577 if (!eth->soc->has_sram)
developer3f28d382023-03-07 16:06:30 +08002578 ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002579 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002580 else {
developere9356982022-07-04 09:03:20 +08002581 ring->dma = eth->scratch_ring + MTK_DMA_SIZE * sz;
developer8b6f2402022-11-28 13:42:34 +08002582 ring->phys = eth->phy_scratch_ring +
2583 MTK_DMA_SIZE * (dma_addr_t)sz;
developerfd40db22021-04-29 10:08:25 +08002584 }
2585
2586 if (!ring->dma)
2587 goto no_tx_mem;
2588
2589 for (i = 0; i < MTK_DMA_SIZE; i++) {
2590 int next = (i + 1) % MTK_DMA_SIZE;
2591 u32 next_ptr = ring->phys + next * sz;
2592
developere9356982022-07-04 09:03:20 +08002593 txd = ring->dma + i * sz;
2594 txd->txd2 = next_ptr;
2595 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2596 txd->txd4 = 0;
2597
developer089e8852022-09-28 14:43:46 +08002598 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2599 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08002600 txd->txd5 = 0;
2601 txd->txd6 = 0;
2602 txd->txd7 = 0;
2603 txd->txd8 = 0;
2604 }
developerfd40db22021-04-29 10:08:25 +08002605 }
2606
2607 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2608 * only as the framework. The real HW descriptors are the PDMA
2609 * descriptors in ring->dma_pdma.
2610 */
2611 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer3f28d382023-03-07 16:06:30 +08002612 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev,
2613 MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002614 &ring->phys_pdma, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002615 if (!ring->dma_pdma)
2616 goto no_tx_mem;
2617
2618 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +08002619 pdma_txd = ring->dma_pdma + i *sz;
2620
2621 pdma_txd->txd2 = TX_DMA_DESP2_DEF;
2622 pdma_txd->txd4 = 0;
developerfd40db22021-04-29 10:08:25 +08002623 }
2624 }
2625
2626 ring->dma_size = MTK_DMA_SIZE;
2627 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
developere9356982022-07-04 09:03:20 +08002628 ring->next_free = ring->dma;
2629 ring->last_free = (void *)txd;
developerc4671b22021-05-28 13:16:42 +08002630 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08002631 ring->thresh = MAX_SKB_FRAGS;
2632
2633 /* make sure that all changes to the dma ring are flushed before we
2634 * continue
2635 */
2636 wmb();
2637
2638 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer68ce74f2023-01-03 16:11:57 +08002639 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2640 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
developerfd40db22021-04-29 10:08:25 +08002641 mtk_w32(eth,
2642 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
developer68ce74f2023-01-03 16:11:57 +08002643 soc->reg_map->qdma.crx_ptr);
2644 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
developerfd40db22021-04-29 10:08:25 +08002645 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
developer68ce74f2023-01-03 16:11:57 +08002646 soc->reg_map->qdma.qtx_cfg);
developerfd40db22021-04-29 10:08:25 +08002647 } else {
2648 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2649 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
2650 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
developer68ce74f2023-01-03 16:11:57 +08002651 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
developerfd40db22021-04-29 10:08:25 +08002652 }
2653
2654 return 0;
2655
2656no_tx_mem:
2657 return -ENOMEM;
2658}
2659
2660static void mtk_tx_clean(struct mtk_eth *eth)
2661{
developere9356982022-07-04 09:03:20 +08002662 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002663 struct mtk_tx_ring *ring = &eth->tx_ring;
2664 int i;
2665
2666 if (ring->buf) {
2667 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08002668 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08002669 kfree(ring->buf);
2670 ring->buf = NULL;
2671 }
2672
2673 if (!eth->soc->has_sram && ring->dma) {
developer3f28d382023-03-07 16:06:30 +08002674 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002675 MTK_DMA_SIZE * soc->txrx.txd_size,
2676 ring->dma, ring->phys);
developerfd40db22021-04-29 10:08:25 +08002677 ring->dma = NULL;
2678 }
2679
2680 if (ring->dma_pdma) {
developer3f28d382023-03-07 16:06:30 +08002681 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002682 MTK_DMA_SIZE * soc->txrx.txd_size,
2683 ring->dma_pdma, ring->phys_pdma);
developerfd40db22021-04-29 10:08:25 +08002684 ring->dma_pdma = NULL;
2685 }
2686}
2687
2688static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2689{
developer68ce74f2023-01-03 16:11:57 +08002690 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08002691 struct mtk_rx_ring *ring;
2692 int rx_data_len, rx_dma_size;
2693 int i;
developer089e8852022-09-28 14:43:46 +08002694 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002695
2696 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2697 if (ring_no)
2698 return -EINVAL;
2699 ring = &eth->rx_ring_qdma;
2700 } else {
2701 ring = &eth->rx_ring[ring_no];
2702 }
2703
2704 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2705 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2706 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2707 } else {
2708 rx_data_len = ETH_DATA_LEN;
2709 rx_dma_size = MTK_DMA_SIZE;
2710 }
2711
2712 ring->frag_size = mtk_max_frag_size(rx_data_len);
2713 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2714 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2715 GFP_KERNEL);
2716 if (!ring->data)
2717 return -ENOMEM;
2718
2719 for (i = 0; i < rx_dma_size; i++) {
2720 ring->data[i] = netdev_alloc_frag(ring->frag_size);
2721 if (!ring->data[i])
2722 return -ENOMEM;
2723 }
2724
2725 if ((!eth->soc->has_sram) || (eth->soc->has_sram
2726 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
developer3f28d382023-03-07 16:06:30 +08002727 ring->dma = dma_alloc_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002728 rx_dma_size * eth->soc->txrx.rxd_size,
2729 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002730 else {
2731 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002732 ring->dma = tx_ring->dma + MTK_DMA_SIZE *
developer8ecd51b2023-03-13 11:28:28 +08002733 eth->soc->txrx.txd_size * (ring_no + 1);
developer18f46a82021-07-20 21:08:21 +08002734 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
developer8ecd51b2023-03-13 11:28:28 +08002735 eth->soc->txrx.txd_size * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08002736 }
2737
2738 if (!ring->dma)
2739 return -ENOMEM;
2740
2741 for (i = 0; i < rx_dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002742 struct mtk_rx_dma_v2 *rxd;
2743
developer3f28d382023-03-07 16:06:30 +08002744 dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08002745 ring->data[i] + NET_SKB_PAD + eth->ip_align,
2746 ring->buf_size,
2747 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08002748 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
developerfd40db22021-04-29 10:08:25 +08002749 return -ENOMEM;
developere9356982022-07-04 09:03:20 +08002750
2751 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2752 rxd->rxd1 = (unsigned int)dma_addr;
developerfd40db22021-04-29 10:08:25 +08002753
developer089e8852022-09-28 14:43:46 +08002754 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2755 RX_DMA_SDP1(dma_addr) : 0;
2756
developerfd40db22021-04-29 10:08:25 +08002757 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developere9356982022-07-04 09:03:20 +08002758 rxd->rxd2 = RX_DMA_LSO;
developerfd40db22021-04-29 10:08:25 +08002759 else
developer089e8852022-09-28 14:43:46 +08002760 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08002761
developere9356982022-07-04 09:03:20 +08002762 rxd->rxd3 = 0;
2763 rxd->rxd4 = 0;
2764
developer8ecd51b2023-03-13 11:28:28 +08002765 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developere9356982022-07-04 09:03:20 +08002766 rxd->rxd5 = 0;
2767 rxd->rxd6 = 0;
2768 rxd->rxd7 = 0;
2769 rxd->rxd8 = 0;
developerfd40db22021-04-29 10:08:25 +08002770 }
developerfd40db22021-04-29 10:08:25 +08002771 }
2772 ring->dma_size = rx_dma_size;
2773 ring->calc_idx_update = false;
2774 ring->calc_idx = rx_dma_size - 1;
2775 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
2776 MTK_QRX_CRX_IDX_CFG(ring_no) :
2777 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08002778 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08002779 /* make sure that all changes to the dma ring are flushed before we
2780 * continue
2781 */
2782 wmb();
2783
2784 if (rx_flag == MTK_RX_FLAGS_QDMA) {
developer68ce74f2023-01-03 16:11:57 +08002785 mtk_w32(eth, ring->phys,
2786 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2787 mtk_w32(eth, rx_dma_size,
2788 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2789 mtk_w32(eth, ring->calc_idx,
2790 ring->crx_idx_reg);
2791 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2792 reg_map->qdma.rst_idx);
developerfd40db22021-04-29 10:08:25 +08002793 } else {
developer68ce74f2023-01-03 16:11:57 +08002794 mtk_w32(eth, ring->phys,
2795 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2796 mtk_w32(eth, rx_dma_size,
2797 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2798 mtk_w32(eth, ring->calc_idx,
2799 ring->crx_idx_reg);
2800 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2801 reg_map->pdma.rst_idx);
developerfd40db22021-04-29 10:08:25 +08002802 }
2803
2804 return 0;
2805}
2806
2807static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
2808{
2809 int i;
developer089e8852022-09-28 14:43:46 +08002810 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002811
2812 if (ring->data && ring->dma) {
2813 for (i = 0; i < ring->dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002814 struct mtk_rx_dma *rxd;
2815
developerfd40db22021-04-29 10:08:25 +08002816 if (!ring->data[i])
2817 continue;
developere9356982022-07-04 09:03:20 +08002818
2819 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2820 if (!rxd->rxd1)
developerfd40db22021-04-29 10:08:25 +08002821 continue;
developere9356982022-07-04 09:03:20 +08002822
developer089e8852022-09-28 14:43:46 +08002823 addr64 = (MTK_HAS_CAPS(eth->soc->caps,
2824 MTK_8GB_ADDRESSING)) ?
2825 ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
2826
developer3f28d382023-03-07 16:06:30 +08002827 dma_unmap_single(eth->dma_dev,
developer089e8852022-09-28 14:43:46 +08002828 (u64)(rxd->rxd1 | addr64),
developerfd40db22021-04-29 10:08:25 +08002829 ring->buf_size,
2830 DMA_FROM_DEVICE);
2831 skb_free_frag(ring->data[i]);
2832 }
2833 kfree(ring->data);
2834 ring->data = NULL;
2835 }
2836
2837 if(in_sram)
2838 return;
2839
2840 if (ring->dma) {
developer3f28d382023-03-07 16:06:30 +08002841 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002842 ring->dma_size * eth->soc->txrx.rxd_size,
developerfd40db22021-04-29 10:08:25 +08002843 ring->dma,
2844 ring->phys);
2845 ring->dma = NULL;
2846 }
2847}
2848
2849static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2850{
2851 int i;
developer77d03a72021-06-06 00:06:00 +08002852 u32 val;
developerfd40db22021-04-29 10:08:25 +08002853 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2854 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2855
2856 /* set LRO rings to auto-learn modes */
2857 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2858
2859 /* validate LRO ring */
2860 ring_ctrl_dw2 |= MTK_RING_VLD;
2861
2862 /* set AGE timer (unit: 20us) */
2863 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2864 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2865
2866 /* set max AGG timer (unit: 20us) */
2867 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2868
2869 /* set max LRO AGG count */
2870 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2871 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2872
developer77d03a72021-06-06 00:06:00 +08002873 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002874 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2875 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2876 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2877 }
2878
2879 /* IPv4 checksum update enable */
2880 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2881
2882 /* switch priority comparison to packet count mode */
2883 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2884
2885 /* bandwidth threshold setting */
2886 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2887
2888 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08002889 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08002890
2891 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2892 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2893 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2894
developerfd40db22021-04-29 10:08:25 +08002895 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2896 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2897
developer8ecd51b2023-03-13 11:28:28 +08002898 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer77d03a72021-06-06 00:06:00 +08002899 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
2900 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
2901 MTK_PDMA_RX_CFG);
2902
2903 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
2904 } else {
2905 /* set HW LRO mode & the max aggregation count for rx packets */
2906 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2907 }
2908
developerfd40db22021-04-29 10:08:25 +08002909 /* enable HW LRO */
2910 lro_ctrl_dw0 |= MTK_LRO_EN;
2911
developer77d03a72021-06-06 00:06:00 +08002912 /* enable cpu reason black list */
2913 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
2914
developerfd40db22021-04-29 10:08:25 +08002915 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2916 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2917
developer77d03a72021-06-06 00:06:00 +08002918 /* no use PPE cpu reason */
2919 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
2920
developerfd40db22021-04-29 10:08:25 +08002921 return 0;
2922}
2923
2924static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2925{
2926 int i;
2927 u32 val;
2928
2929 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08002930 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08002931
2932 /* wait for relinquishments done */
2933 for (i = 0; i < 10; i++) {
2934 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08002935 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer8051e042022-04-08 13:26:36 +08002936 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002937 continue;
2938 }
2939 break;
2940 }
2941
2942 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08002943 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08002944 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2945
2946 /* disable HW LRO */
2947 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2948}
2949
2950static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2951{
2952 u32 reg_val;
2953
developer8ecd51b2023-03-13 11:28:28 +08002954 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developer77d03a72021-06-06 00:06:00 +08002955 idx += 1;
2956
developerfd40db22021-04-29 10:08:25 +08002957 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2958
2959 /* invalidate the IP setting */
2960 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2961
2962 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2963
2964 /* validate the IP setting */
2965 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2966}
2967
2968static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2969{
2970 u32 reg_val;
2971
developer8ecd51b2023-03-13 11:28:28 +08002972 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developer77d03a72021-06-06 00:06:00 +08002973 idx += 1;
2974
developerfd40db22021-04-29 10:08:25 +08002975 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2976
2977 /* invalidate the IP setting */
2978 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2979
2980 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2981}
2982
2983static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2984{
2985 int cnt = 0;
2986 int i;
2987
2988 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2989 if (mac->hwlro_ip[i])
2990 cnt++;
2991 }
2992
2993 return cnt;
2994}
2995
2996static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2997 struct ethtool_rxnfc *cmd)
2998{
2999 struct ethtool_rx_flow_spec *fsp =
3000 (struct ethtool_rx_flow_spec *)&cmd->fs;
3001 struct mtk_mac *mac = netdev_priv(dev);
3002 struct mtk_eth *eth = mac->hw;
3003 int hwlro_idx;
3004
3005 if ((fsp->flow_type != TCP_V4_FLOW) ||
3006 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
3007 (fsp->location > 1))
3008 return -EINVAL;
3009
3010 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
3011 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3012
3013 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3014
3015 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
3016
3017 return 0;
3018}
3019
3020static int mtk_hwlro_del_ipaddr(struct net_device *dev,
3021 struct ethtool_rxnfc *cmd)
3022{
3023 struct ethtool_rx_flow_spec *fsp =
3024 (struct ethtool_rx_flow_spec *)&cmd->fs;
3025 struct mtk_mac *mac = netdev_priv(dev);
3026 struct mtk_eth *eth = mac->hw;
3027 int hwlro_idx;
3028
3029 if (fsp->location > 1)
3030 return -EINVAL;
3031
3032 mac->hwlro_ip[fsp->location] = 0;
3033 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3034
3035 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3036
3037 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3038
3039 return 0;
3040}
3041
3042static void mtk_hwlro_netdev_disable(struct net_device *dev)
3043{
3044 struct mtk_mac *mac = netdev_priv(dev);
3045 struct mtk_eth *eth = mac->hw;
3046 int i, hwlro_idx;
3047
3048 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3049 mac->hwlro_ip[i] = 0;
3050 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
3051
3052 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3053 }
3054
3055 mac->hwlro_ip_cnt = 0;
3056}
3057
3058static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
3059 struct ethtool_rxnfc *cmd)
3060{
3061 struct mtk_mac *mac = netdev_priv(dev);
3062 struct ethtool_rx_flow_spec *fsp =
3063 (struct ethtool_rx_flow_spec *)&cmd->fs;
3064
3065 /* only tcp dst ipv4 is meaningful, others are meaningless */
3066 fsp->flow_type = TCP_V4_FLOW;
3067 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
3068 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
3069
3070 fsp->h_u.tcp_ip4_spec.ip4src = 0;
3071 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
3072 fsp->h_u.tcp_ip4_spec.psrc = 0;
3073 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
3074 fsp->h_u.tcp_ip4_spec.pdst = 0;
3075 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
3076 fsp->h_u.tcp_ip4_spec.tos = 0;
3077 fsp->m_u.tcp_ip4_spec.tos = 0xff;
3078
3079 return 0;
3080}
3081
3082static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3083 struct ethtool_rxnfc *cmd,
3084 u32 *rule_locs)
3085{
3086 struct mtk_mac *mac = netdev_priv(dev);
3087 int cnt = 0;
3088 int i;
3089
3090 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3091 if (mac->hwlro_ip[i]) {
3092 rule_locs[cnt] = i;
3093 cnt++;
3094 }
3095 }
3096
3097 cmd->rule_cnt = cnt;
3098
3099 return 0;
3100}
3101
developerea49c302023-06-27 16:06:41 +08003102u32 mtk_rss_indr_table(struct mtk_rss_params *rss_params, int index)
developere3d0de22023-05-30 17:45:00 +08003103{
developerea49c302023-06-27 16:06:41 +08003104 u32 val = 0;
3105 int i;
developere3d0de22023-05-30 17:45:00 +08003106
developerea49c302023-06-27 16:06:41 +08003107 for (i = 16 * index; i < 16 * index + 16; i++)
3108 val |= (rss_params->indirection_table[i] << (2 * (i % 16)));
developere3d0de22023-05-30 17:45:00 +08003109
developerea49c302023-06-27 16:06:41 +08003110 return val;
developere3d0de22023-05-30 17:45:00 +08003111}
3112
developer18f46a82021-07-20 21:08:21 +08003113static int mtk_rss_init(struct mtk_eth *eth)
3114{
developerea49c302023-06-27 16:06:41 +08003115 struct mtk_rss_params *rss_params = &eth->rss_params;
3116 static u8 hash_key[MTK_RSS_HASH_KEYSIZE] = {
3117 0xfa, 0x01, 0xac, 0xbe, 0x3b, 0xb7, 0x42, 0x6a,
3118 0x0c, 0xf2, 0x30, 0x80, 0xa3, 0x2d, 0xcb, 0x77,
3119 0xb4, 0x30, 0x7b, 0xae, 0xcb, 0x2b, 0xca, 0xd0,
3120 0xb0, 0x8f, 0xa3, 0x43, 0x3d, 0x25, 0x67, 0x41,
3121 0xc2, 0x0e, 0x5b, 0x25, 0xda, 0x56, 0x5a, 0x6d};
developer18f46a82021-07-20 21:08:21 +08003122 u32 val;
developerea49c302023-06-27 16:06:41 +08003123 int i;
3124
3125 memcpy(rss_params->hash_key, hash_key, MTK_RSS_HASH_KEYSIZE);
3126
3127 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
3128 rss_params->indirection_table[i] = i % eth->soc->rss_num;
developer18f46a82021-07-20 21:08:21 +08003129
developer8ecd51b2023-03-13 11:28:28 +08003130 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer18f46a82021-07-20 21:08:21 +08003131 /* Set RSS rings to PSE modes */
3132 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
3133 val |= MTK_RING_PSE_MODE;
3134 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
3135
3136 /* Enable non-lro multiple rx */
3137 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
3138 val |= MTK_NON_LRO_MULTI_EN;
3139 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
3140
3141 /* Enable RSS dly int supoort */
3142 val |= MTK_LRO_DLY_INT_EN;
3143 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
developer18f46a82021-07-20 21:08:21 +08003144 }
3145
3146 /* Hash Type */
3147 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
3148 val |= MTK_RSS_IPV4_STATIC_HASH;
3149 val |= MTK_RSS_IPV6_STATIC_HASH;
3150 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3151
developerea49c302023-06-27 16:06:41 +08003152 /* Hash Key */
3153 for (i = 0; i < MTK_RSS_HASH_KEYSIZE / sizeof(u32); i++)
3154 mtk_w32(eth, rss_params->hash_key[i], MTK_RSS_HASH_KEY_DW(i));
3155
developer18f46a82021-07-20 21:08:21 +08003156 /* Select the size of indirection table */
developerea49c302023-06-27 16:06:41 +08003157 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE / 16; i++)
3158 mtk_w32(eth, mtk_rss_indr_table(rss_params, i),
3159 MTK_RSS_INDR_TABLE_DW(i));
developer18f46a82021-07-20 21:08:21 +08003160
3161 /* Pause */
3162 val |= MTK_RSS_CFG_REQ;
3163 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3164
3165 /* Enable RSS*/
3166 val |= MTK_RSS_EN;
3167 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3168
3169 /* Release pause */
3170 val &= ~(MTK_RSS_CFG_REQ);
3171 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3172
3173 /* Set perRSS GRP INT */
developer94806ec2023-05-19 14:16:44 +08003174 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING(1)), MTK_PDMA_INT_GRP1);
3175 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING(2)), MTK_PDMA_INT_GRP2);
3176 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING(3)), MTK_PDMA_INT_GRP3);
developer18f46a82021-07-20 21:08:21 +08003177
3178 /* Set GRP INT */
developer94806ec2023-05-19 14:16:44 +08003179 mtk_w32(eth, 0x210FFFF2, MTK_FE_INT_GRP);
developer18f46a82021-07-20 21:08:21 +08003180
developer089e8852022-09-28 14:43:46 +08003181 /* Enable RSS delay interrupt */
developer933f09b2023-09-12 11:13:01 +08003182 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
3183 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
3184 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX2_DLY_INT);
3185 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX3_DLY_INT);
3186 } else
3187 mtk_w32(eth, MTK_MAX_DELAY_INT_V2, MTK_PDMA_RSS_DELAY_INT);
developer089e8852022-09-28 14:43:46 +08003188
developer18f46a82021-07-20 21:08:21 +08003189 return 0;
3190}
3191
3192static void mtk_rss_uninit(struct mtk_eth *eth)
3193{
3194 u32 val;
3195
3196 /* Pause */
3197 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
3198 val |= MTK_RSS_CFG_REQ;
3199 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3200
3201 /* Disable RSS*/
3202 val &= ~(MTK_RSS_EN);
3203 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3204
3205 /* Release pause */
3206 val &= ~(MTK_RSS_CFG_REQ);
3207 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3208}
3209
developerfd40db22021-04-29 10:08:25 +08003210static netdev_features_t mtk_fix_features(struct net_device *dev,
3211 netdev_features_t features)
3212{
3213 if (!(features & NETIF_F_LRO)) {
3214 struct mtk_mac *mac = netdev_priv(dev);
3215 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3216
3217 if (ip_cnt) {
3218 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3219
3220 features |= NETIF_F_LRO;
3221 }
3222 }
3223
3224 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
3225 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
3226
3227 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
3228 }
3229
3230 return features;
3231}
3232
3233static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3234{
3235 struct mtk_mac *mac = netdev_priv(dev);
3236 struct mtk_eth *eth = mac->hw;
3237 int err = 0;
3238
3239 if (!((dev->features ^ features) & MTK_SET_FEATURES))
3240 return 0;
3241
3242 if (!(features & NETIF_F_LRO))
3243 mtk_hwlro_netdev_disable(dev);
3244
3245 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
3246 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3247 else
3248 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3249
3250 return err;
3251}
3252
3253/* wait for DMA to finish whatever it is doing before we start using it again */
3254static int mtk_dma_busy_wait(struct mtk_eth *eth)
3255{
3256 unsigned long t_start = jiffies;
3257
3258 while (1) {
3259 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3260 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
3261 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
3262 return 0;
3263 } else {
3264 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
3265 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
3266 return 0;
3267 }
3268
3269 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
3270 break;
3271 }
3272
3273 dev_err(eth->dev, "DMA init timeout\n");
3274 return -1;
3275}
3276
3277static int mtk_dma_init(struct mtk_eth *eth)
3278{
3279 int err;
3280 u32 i;
3281
3282 if (mtk_dma_busy_wait(eth))
3283 return -EBUSY;
3284
3285 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3286 /* QDMA needs scratch memory for internal reordering of the
3287 * descriptors
3288 */
3289 err = mtk_init_fq_dma(eth);
3290 if (err)
3291 return err;
3292 }
3293
3294 err = mtk_tx_alloc(eth);
3295 if (err)
3296 return err;
3297
3298 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3299 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3300 if (err)
3301 return err;
3302 }
3303
3304 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3305 if (err)
3306 return err;
3307
3308 if (eth->hwlro) {
developer8ecd51b2023-03-13 11:28:28 +08003309 i = (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) ? 1 : 4;
developer77d03a72021-06-06 00:06:00 +08003310 for (; i < MTK_MAX_RX_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08003311 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3312 if (err)
3313 return err;
3314 }
3315 err = mtk_hwlro_rx_init(eth);
3316 if (err)
3317 return err;
3318 }
3319
developer18f46a82021-07-20 21:08:21 +08003320 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3321 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3322 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL);
3323 if (err)
3324 return err;
3325 }
3326 err = mtk_rss_init(eth);
3327 if (err)
3328 return err;
3329 }
3330
developerfd40db22021-04-29 10:08:25 +08003331 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3332 /* Enable random early drop and set drop threshold
3333 * automatically
3334 */
3335 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
developer68ce74f2023-01-03 16:11:57 +08003336 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3337 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred2);
developerfd40db22021-04-29 10:08:25 +08003338 }
3339
3340 return 0;
3341}
3342
3343static void mtk_dma_free(struct mtk_eth *eth)
3344{
developere9356982022-07-04 09:03:20 +08003345 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08003346 int i;
3347
3348 for (i = 0; i < MTK_MAC_COUNT; i++)
3349 if (eth->netdev[i])
3350 netdev_reset_queue(eth->netdev[i]);
3351 if ( !eth->soc->has_sram && eth->scratch_ring) {
developer3f28d382023-03-07 16:06:30 +08003352 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08003353 MTK_DMA_SIZE * soc->txrx.txd_size,
3354 eth->scratch_ring, eth->phy_scratch_ring);
developerfd40db22021-04-29 10:08:25 +08003355 eth->scratch_ring = NULL;
3356 eth->phy_scratch_ring = 0;
3357 }
3358 mtk_tx_clean(eth);
developerb3ce86f2022-06-30 13:31:47 +08003359 mtk_rx_clean(eth, &eth->rx_ring[0],eth->soc->has_sram);
developerfd40db22021-04-29 10:08:25 +08003360 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
3361
3362 if (eth->hwlro) {
3363 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08003364
developer04ba53d2023-08-25 14:48:33 +08003365 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) ? 4 : 1;
developer77d03a72021-06-06 00:06:00 +08003366 for (; i < MTK_MAX_RX_RING_NUM; i++)
3367 mtk_rx_clean(eth, &eth->rx_ring[i], 0);
developerfd40db22021-04-29 10:08:25 +08003368 }
3369
developer18f46a82021-07-20 21:08:21 +08003370 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3371 mtk_rss_uninit(eth);
3372
3373 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3374 mtk_rx_clean(eth, &eth->rx_ring[i], 1);
3375 }
3376
developer94008d92021-09-23 09:47:41 +08003377 if (eth->scratch_head) {
3378 kfree(eth->scratch_head);
3379 eth->scratch_head = NULL;
3380 }
developerfd40db22021-04-29 10:08:25 +08003381}
3382
3383static void mtk_tx_timeout(struct net_device *dev)
3384{
3385 struct mtk_mac *mac = netdev_priv(dev);
3386 struct mtk_eth *eth = mac->hw;
3387
3388 eth->netdev[mac->id]->stats.tx_errors++;
3389 netif_err(eth, tx_err, dev,
3390 "transmit timed out\n");
developer8051e042022-04-08 13:26:36 +08003391
3392 if (atomic_read(&reset_lock) == 0)
3393 schedule_work(&eth->pending_work);
developerfd40db22021-04-29 10:08:25 +08003394}
3395
developer18f46a82021-07-20 21:08:21 +08003396static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08003397{
developer18f46a82021-07-20 21:08:21 +08003398 struct mtk_napi *rx_napi = priv;
3399 struct mtk_eth *eth = rx_napi->eth;
3400 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08003401
developer18f46a82021-07-20 21:08:21 +08003402 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08003403 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08003404 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08003405 }
3406
3407 return IRQ_HANDLED;
3408}
3409
3410static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3411{
3412 struct mtk_eth *eth = _eth;
3413
3414 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developerfd40db22021-04-29 10:08:25 +08003415 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer6bbe70d2021-08-06 09:34:55 +08003416 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08003417 }
3418
3419 return IRQ_HANDLED;
3420}
3421
3422static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3423{
3424 struct mtk_eth *eth = _eth;
developer68ce74f2023-01-03 16:11:57 +08003425 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08003426
developer68ce74f2023-01-03 16:11:57 +08003427 if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT(0)) {
3428 if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT(0))
developer18f46a82021-07-20 21:08:21 +08003429 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08003430 }
developer68ce74f2023-01-03 16:11:57 +08003431 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3432 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
developerfd40db22021-04-29 10:08:25 +08003433 mtk_handle_irq_tx(irq, _eth);
3434 }
3435
3436 return IRQ_HANDLED;
3437}
3438
developera2613e62022-07-01 18:29:37 +08003439static irqreturn_t mtk_handle_irq_fixed_link(int irq, void *_mac)
3440{
3441 struct mtk_mac *mac = _mac;
3442 struct mtk_eth *eth = mac->hw;
3443 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
3444 struct net_device *dev = phylink_priv->dev;
3445 int link_old, link_new;
3446
3447 // clear interrupt status for gpy211
3448 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
3449
3450 link_old = phylink_priv->link;
3451 link_new = _mtk_mdio_read(eth, phylink_priv->phyaddr, MII_BMSR) & BMSR_LSTATUS;
3452
3453 if (link_old != link_new) {
3454 phylink_priv->link = link_new;
3455 if (link_new) {
3456 printk("phylink.%d %s: Link is Up\n", phylink_priv->id, dev->name);
3457 if (dev)
3458 netif_carrier_on(dev);
3459 } else {
3460 printk("phylink.%d %s: Link is Down\n", phylink_priv->id, dev->name);
3461 if (dev)
3462 netif_carrier_off(dev);
3463 }
3464 }
3465
3466 return IRQ_HANDLED;
3467}
3468
developerfd40db22021-04-29 10:08:25 +08003469#ifdef CONFIG_NET_POLL_CONTROLLER
3470static void mtk_poll_controller(struct net_device *dev)
3471{
3472 struct mtk_mac *mac = netdev_priv(dev);
3473 struct mtk_eth *eth = mac->hw;
3474
3475 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003476 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developer94806ec2023-05-19 14:16:44 +08003477 mtk_handle_irq_rx(eth->irq_fe[2], &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08003478 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003479 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08003480}
3481#endif
3482
3483static int mtk_start_dma(struct mtk_eth *eth)
3484{
3485 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer68ce74f2023-01-03 16:11:57 +08003486 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer77d03a72021-06-06 00:06:00 +08003487 int val, err;
developerfd40db22021-04-29 10:08:25 +08003488
3489 err = mtk_dma_init(eth);
3490 if (err) {
3491 mtk_dma_free(eth);
3492 return err;
3493 }
3494
3495 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer68ce74f2023-01-03 16:11:57 +08003496 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
developer089e8852022-09-28 14:43:46 +08003497 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
3498 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer19d84562022-04-21 17:01:06 +08003499 val &= ~MTK_RESV_BUF_MASK;
developerfd40db22021-04-29 10:08:25 +08003500 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003501 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08003502 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
3503 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
3504 MTK_RESV_BUF | MTK_WCOMP_EN |
3505 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
developer68ce74f2023-01-03 16:11:57 +08003506 MTK_RX_2B_OFFSET, reg_map->qdma.glo_cfg);
developer19d84562022-04-21 17:01:06 +08003507 }
developerfd40db22021-04-29 10:08:25 +08003508 else
3509 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003510 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08003511 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
3512 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
3513 MTK_RX_BT_32DWORDS,
developer68ce74f2023-01-03 16:11:57 +08003514 reg_map->qdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003515
developer68ce74f2023-01-03 16:11:57 +08003516 val = mtk_r32(eth, reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003517 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003518 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08003519 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
developer68ce74f2023-01-03 16:11:57 +08003520 reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003521 } else {
3522 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3523 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
developer68ce74f2023-01-03 16:11:57 +08003524 reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003525 }
3526
developer8ecd51b2023-03-13 11:28:28 +08003527 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) && eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08003528 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
3529 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
3530 }
3531
developerfd40db22021-04-29 10:08:25 +08003532 return 0;
3533}
3534
developerdca0fde2022-12-14 11:40:35 +08003535void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
developerfd40db22021-04-29 10:08:25 +08003536{
developerdca0fde2022-12-14 11:40:35 +08003537 u32 val;
developerfd40db22021-04-29 10:08:25 +08003538
3539 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3540 return;
3541
developerdca0fde2022-12-14 11:40:35 +08003542 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
developerfd40db22021-04-29 10:08:25 +08003543
developerdca0fde2022-12-14 11:40:35 +08003544 /* default setup the forward port to send frame to PDMA */
3545 val &= ~0xffff;
developerfd40db22021-04-29 10:08:25 +08003546
developerdca0fde2022-12-14 11:40:35 +08003547 /* Enable RX checksum */
3548 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
developerfd40db22021-04-29 10:08:25 +08003549
developerdca0fde2022-12-14 11:40:35 +08003550 val |= config;
developerfd40db22021-04-29 10:08:25 +08003551
developerdca0fde2022-12-14 11:40:35 +08003552 if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3553 val |= MTK_GDMA_SPECIAL_TAG;
developerfd40db22021-04-29 10:08:25 +08003554
developerdca0fde2022-12-14 11:40:35 +08003555 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
developerfd40db22021-04-29 10:08:25 +08003556}
3557
developer7cd7e5e2022-11-17 13:57:32 +08003558void mtk_set_pse_drop(u32 config)
3559{
3560 struct mtk_eth *eth = g_eth;
3561
3562 if (eth)
3563 mtk_w32(eth, config, PSE_PPE0_DROP);
3564}
3565EXPORT_SYMBOL(mtk_set_pse_drop);
3566
developerfd40db22021-04-29 10:08:25 +08003567static int mtk_open(struct net_device *dev)
3568{
3569 struct mtk_mac *mac = netdev_priv(dev);
3570 struct mtk_eth *eth = mac->hw;
developera2613e62022-07-01 18:29:37 +08003571 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
developer4e8a3fd2023-04-10 18:05:44 +08003572 u32 id = mtk_mac2xgmii_id(eth, mac->id);
developer18f46a82021-07-20 21:08:21 +08003573 int err, i;
developer3a5969e2022-02-09 15:36:36 +08003574 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08003575
3576 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3577 if (err) {
3578 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3579 err);
3580 return err;
3581 }
3582
3583 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3584 if (!refcount_read(&eth->dma_refcnt)) {
3585 int err = mtk_start_dma(eth);
3586
3587 if (err)
3588 return err;
3589
developerfd40db22021-04-29 10:08:25 +08003590
3591 /* Indicates CDM to parse the MTK special tag from CPU */
3592 if (netdev_uses_dsa(dev)) {
3593 u32 val;
3594 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3595 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3596 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3597 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3598 }
3599
3600 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003601 napi_enable(&eth->rx_napi[0].napi);
developerfd40db22021-04-29 10:08:25 +08003602 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003603 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
3604
3605 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3606 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3607 napi_enable(&eth->rx_napi[i].napi);
3608 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i));
3609 }
3610 }
3611
developerfd40db22021-04-29 10:08:25 +08003612 refcount_set(&eth->dma_refcnt, 1);
3613 }
3614 else
3615 refcount_inc(&eth->dma_refcnt);
3616
developera2613e62022-07-01 18:29:37 +08003617 if (phylink_priv->desc) {
3618 /*Notice: This programming sequence is only for GPY211 single PHY chip.
3619 If single PHY chip is not GPY211, the following step you should do:
3620 1. Contact your Single PHY chip vendor and get the details of
3621 - how to enables link status change interrupt
3622 - how to clears interrupt source
3623 */
3624
3625 // clear interrupt source for gpy211
3626 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
3627
3628 // enable link status change interrupt for gpy211
3629 _mtk_mdio_write(eth, phylink_priv->phyaddr, 0x19, 0x0001);
3630
3631 phylink_priv->dev = dev;
3632
3633 // override dev pointer for single PHY chip 0
3634 if (phylink_priv->id == 0) {
3635 struct net_device *tmp;
3636
3637 tmp = __dev_get_by_name(&init_net, phylink_priv->label);
3638 if (tmp)
3639 phylink_priv->dev = tmp;
3640 else
3641 phylink_priv->dev = NULL;
3642 }
3643 }
3644
developerfd40db22021-04-29 10:08:25 +08003645 phylink_start(mac->phylink);
3646 netif_start_queue(dev);
developer3a5969e2022-02-09 15:36:36 +08003647 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer4e8a3fd2023-04-10 18:05:44 +08003648 if (!phy_node && eth->sgmii->pcs[id].regmap)
3649 regmap_write(eth->sgmii->pcs[id].regmap,
3650 SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developer089e8852022-09-28 14:43:46 +08003651
developerdca0fde2022-12-14 11:40:35 +08003652 mtk_gdm_config(eth, mac->id, MTK_GDMA_TO_PDMA);
3653
developerfd40db22021-04-29 10:08:25 +08003654 return 0;
3655}
3656
3657static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3658{
3659 u32 val;
3660 int i;
3661
3662 /* stop the dma engine */
3663 spin_lock_bh(&eth->page_lock);
3664 val = mtk_r32(eth, glo_cfg);
3665 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3666 glo_cfg);
3667 spin_unlock_bh(&eth->page_lock);
3668
3669 /* wait for dma stop */
3670 for (i = 0; i < 10; i++) {
3671 val = mtk_r32(eth, glo_cfg);
3672 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer8051e042022-04-08 13:26:36 +08003673 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08003674 continue;
3675 }
3676 break;
3677 }
3678}
3679
3680static int mtk_stop(struct net_device *dev)
3681{
3682 struct mtk_mac *mac = netdev_priv(dev);
3683 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08003684 int i;
developer4e8a3fd2023-04-10 18:05:44 +08003685 u32 id = mtk_mac2xgmii_id(eth, mac->id);
developer3a5969e2022-02-09 15:36:36 +08003686 u32 val = 0;
3687 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08003688
developerdca0fde2022-12-14 11:40:35 +08003689 mtk_gdm_config(eth, mac->id, MTK_GDMA_DROP_ALL);
developerfd40db22021-04-29 10:08:25 +08003690 netif_tx_disable(dev);
3691
developer3a5969e2022-02-09 15:36:36 +08003692 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer4e8a3fd2023-04-10 18:05:44 +08003693 if (!phy_node && eth->sgmii->pcs[id].regmap) {
3694 regmap_read(eth->sgmii->pcs[id].regmap,
3695 SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer3a5969e2022-02-09 15:36:36 +08003696 val |= SGMII_PHYA_PWD;
developer4e8a3fd2023-04-10 18:05:44 +08003697 regmap_write(eth->sgmii->pcs[id].regmap,
3698 SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer3a5969e2022-02-09 15:36:36 +08003699 }
3700
3701 //GMAC RX disable
3702 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
3703 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
3704
3705 phylink_stop(mac->phylink);
3706
developerfd40db22021-04-29 10:08:25 +08003707 phylink_disconnect_phy(mac->phylink);
3708
3709 /* only shutdown DMA if this is the last user */
3710 if (!refcount_dec_and_test(&eth->dma_refcnt))
3711 return 0;
3712
developerfd40db22021-04-29 10:08:25 +08003713
3714 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003715 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08003716 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003717 napi_disable(&eth->rx_napi[0].napi);
3718
3719 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3720 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3721 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i));
3722 napi_disable(&eth->rx_napi[i].napi);
3723 }
3724 }
developerfd40db22021-04-29 10:08:25 +08003725
3726 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developer68ce74f2023-01-03 16:11:57 +08003727 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3728 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003729
3730 mtk_dma_free(eth);
3731
3732 return 0;
3733}
3734
developer8051e042022-04-08 13:26:36 +08003735void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerfd40db22021-04-29 10:08:25 +08003736{
developer8051e042022-04-08 13:26:36 +08003737 u32 val = 0, i = 0;
developerfd40db22021-04-29 10:08:25 +08003738
developerfd40db22021-04-29 10:08:25 +08003739 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer8051e042022-04-08 13:26:36 +08003740 reset_bits, reset_bits);
3741
3742 while (i++ < 5000) {
3743 mdelay(1);
3744 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3745
3746 if ((val & reset_bits) == reset_bits) {
3747 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
3748 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3749 reset_bits, ~reset_bits);
3750 break;
3751 }
3752 }
3753
developerfd40db22021-04-29 10:08:25 +08003754 mdelay(10);
3755}
3756
3757static void mtk_clk_disable(struct mtk_eth *eth)
3758{
3759 int clk;
3760
3761 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3762 clk_disable_unprepare(eth->clks[clk]);
3763}
3764
3765static int mtk_clk_enable(struct mtk_eth *eth)
3766{
3767 int clk, ret;
3768
3769 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3770 ret = clk_prepare_enable(eth->clks[clk]);
3771 if (ret)
3772 goto err_disable_clks;
3773 }
3774
3775 return 0;
3776
3777err_disable_clks:
3778 while (--clk >= 0)
3779 clk_disable_unprepare(eth->clks[clk]);
3780
3781 return ret;
3782}
3783
developer18f46a82021-07-20 21:08:21 +08003784static int mtk_napi_init(struct mtk_eth *eth)
3785{
3786 struct mtk_napi *rx_napi = &eth->rx_napi[0];
3787 int i;
3788
3789 rx_napi->eth = eth;
3790 rx_napi->rx_ring = &eth->rx_ring[0];
3791 rx_napi->irq_grp_no = 2;
3792
3793 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3794 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3795 rx_napi = &eth->rx_napi[i];
3796 rx_napi->eth = eth;
3797 rx_napi->rx_ring = &eth->rx_ring[i];
3798 rx_napi->irq_grp_no = 2 + i;
3799 }
3800 }
3801
3802 return 0;
3803}
3804
developer8051e042022-04-08 13:26:36 +08003805static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerfd40db22021-04-29 10:08:25 +08003806{
developer3f28d382023-03-07 16:06:30 +08003807 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3808 ETHSYS_DMA_AG_MAP_PPE;
developer68ce74f2023-01-03 16:11:57 +08003809 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer8051e042022-04-08 13:26:36 +08003810 int i, ret = 0;
developerdca0fde2022-12-14 11:40:35 +08003811 u32 val;
developerfd40db22021-04-29 10:08:25 +08003812
developer8051e042022-04-08 13:26:36 +08003813 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
3814 atomic_read(&reset_lock), atomic_read(&force));
developerfd40db22021-04-29 10:08:25 +08003815
developer8051e042022-04-08 13:26:36 +08003816 if (atomic_read(&reset_lock) == 0) {
3817 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
3818 return 0;
developerfd40db22021-04-29 10:08:25 +08003819
developer8051e042022-04-08 13:26:36 +08003820 pm_runtime_enable(eth->dev);
3821 pm_runtime_get_sync(eth->dev);
3822
3823 ret = mtk_clk_enable(eth);
3824 if (ret)
3825 goto err_disable_pm;
3826 }
developerfd40db22021-04-29 10:08:25 +08003827
developer3f28d382023-03-07 16:06:30 +08003828 if (eth->ethsys)
3829 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3830 of_dma_is_coherent(eth->dma_dev->of_node) *
3831 dma_mask);
3832
developerfd40db22021-04-29 10:08:25 +08003833 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3834 ret = device_reset(eth->dev);
3835 if (ret) {
3836 dev_err(eth->dev, "MAC reset failed!\n");
3837 goto err_disable_pm;
3838 }
3839
3840 /* enable interrupt delay for RX */
3841 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
3842
3843 /* disable delay and normal interrupt */
3844 mtk_tx_irq_disable(eth, ~0);
3845 mtk_rx_irq_disable(eth, ~0);
3846
3847 return 0;
3848 }
3849
developer8051e042022-04-08 13:26:36 +08003850 pr_info("[%s] execute fe %s reset\n", __func__,
3851 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developer545abf02021-07-15 17:47:01 +08003852
developer8051e042022-04-08 13:26:36 +08003853 if (type == MTK_TYPE_WARM_RESET)
3854 mtk_eth_warm_reset(eth);
developer545abf02021-07-15 17:47:01 +08003855 else
developer8051e042022-04-08 13:26:36 +08003856 mtk_eth_cold_reset(eth);
developer545abf02021-07-15 17:47:01 +08003857
developerc4d8da72023-03-16 14:37:28 +08003858 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3859 mtk_mdc_init(eth);
3860
developer8ecd51b2023-03-13 11:28:28 +08003861 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer545abf02021-07-15 17:47:01 +08003862 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08003863 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08003864 }
developerfd40db22021-04-29 10:08:25 +08003865
3866 if (eth->pctl) {
3867 /* Set GE2 driving and slew rate */
3868 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3869
3870 /* set GE2 TDSEL */
3871 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3872
3873 /* set GE2 TUNE */
3874 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3875 }
3876
3877 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3878 * up with the more appropriate value when mtk_mac_config call is being
3879 * invoked.
3880 */
3881 for (i = 0; i < MTK_MAC_COUNT; i++)
3882 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3883
3884 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08003885 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
3886 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3887 else
3888 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08003889
3890 /* enable interrupt delay for RX/TX */
3891 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
3892 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
3893
3894 mtk_tx_irq_disable(eth, ~0);
3895 mtk_rx_irq_disable(eth, ~0);
3896
3897 /* FE int grouping */
developer68ce74f2023-01-03 16:11:57 +08003898 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3899 mtk_w32(eth, MTK_RX_DONE_INT(0), reg_map->qdma.int_grp2);
developer94806ec2023-05-19 14:16:44 +08003900 mtk_w32(eth, 0x210FFFF2, MTK_FE_INT_GRP);
developerbe971722022-05-23 13:51:05 +08003901 mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
developer8051e042022-04-08 13:26:36 +08003902 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
3903 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerfd40db22021-04-29 10:08:25 +08003904
developer089e8852022-09-28 14:43:46 +08003905 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer0fef5222023-04-26 14:48:31 +08003906 /* PSE dummy page mechanism */
3907 if (eth->soc->caps != MT7988_CAPS || eth->hwver != MTK_HWID_V1)
3908 mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) |
3909 PSE_DUMMY_WORK_GDM(2) | PSE_DUMMY_WORK_GDM(3) |
3910 DUMMY_PAGE_THR, PSE_DUMY_REQ);
3911
developer089e8852022-09-28 14:43:46 +08003912 /* PSE should not drop port1, port8 and port9 packets */
3913 mtk_w32(eth, 0x00000302, PSE_NO_DROP_CFG);
3914
developer15f760a2022-10-12 15:57:21 +08003915 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3916 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3917
developer84d1e832022-11-24 11:25:05 +08003918 /* PSE free buffer drop threshold */
3919 mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
3920
developer089e8852022-09-28 14:43:46 +08003921 /* GDM and CDM Threshold */
3922 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
3923 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
3924
developerdca0fde2022-12-14 11:40:35 +08003925 /* Disable GDM1 RX CRC stripping */
3926 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(0));
3927 val &= ~MTK_GDMA_STRP_CRC;
3928 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(0));
3929
developer089e8852022-09-28 14:43:46 +08003930 /* PSE GDM3 MIB counter has incorrect hw default values,
3931 * so the driver ought to read clear the values beforehand
3932 * in case ethtool retrieve wrong mib values.
3933 */
3934 for (i = 0; i < MTK_STAT_OFFSET; i += 0x4)
3935 mtk_r32(eth,
3936 MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * 2 + i);
3937 } else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08003938 /* PSE Free Queue Flow Control */
3939 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3940
developer459b78e2022-07-01 17:25:10 +08003941 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3942 mtk_w32(eth, 0x00000300, PSE_NO_DROP_CFG);
3943
3944 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3945 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
developer81bcad32021-07-15 14:14:38 +08003946
developerfef9efd2021-06-16 18:28:09 +08003947 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08003948 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3949 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3950 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3951 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3952 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3953 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3954 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
developerfd5f9152022-01-05 16:29:42 +08003955 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
developerfd40db22021-04-29 10:08:25 +08003956
developerfef9efd2021-06-16 18:28:09 +08003957 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08003958 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3959 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3960 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3961 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3962 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3963 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3964 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3965 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08003966
3967 /* GDM and CDM Threshold */
3968 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3969 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3970 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3971 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3972 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3973 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08003974 }
3975
3976 return 0;
3977
3978err_disable_pm:
3979 pm_runtime_put_sync(eth->dev);
3980 pm_runtime_disable(eth->dev);
3981
3982 return ret;
3983}
3984
3985static int mtk_hw_deinit(struct mtk_eth *eth)
3986{
3987 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3988 return 0;
3989
3990 mtk_clk_disable(eth);
3991
3992 pm_runtime_put_sync(eth->dev);
3993 pm_runtime_disable(eth->dev);
3994
3995 return 0;
3996}
3997
3998static int __init mtk_init(struct net_device *dev)
3999{
4000 struct mtk_mac *mac = netdev_priv(dev);
4001 struct mtk_eth *eth = mac->hw;
4002 const char *mac_addr;
4003
4004 mac_addr = of_get_mac_address(mac->of_node);
4005 if (!IS_ERR(mac_addr))
4006 ether_addr_copy(dev->dev_addr, mac_addr);
4007
4008 /* If the mac address is invalid, use random mac address */
4009 if (!is_valid_ether_addr(dev->dev_addr)) {
4010 eth_hw_addr_random(dev);
4011 dev_err(eth->dev, "generated random MAC address %pM\n",
4012 dev->dev_addr);
4013 }
4014
4015 return 0;
4016}
4017
4018static void mtk_uninit(struct net_device *dev)
4019{
4020 struct mtk_mac *mac = netdev_priv(dev);
4021 struct mtk_eth *eth = mac->hw;
4022
4023 phylink_disconnect_phy(mac->phylink);
4024 mtk_tx_irq_disable(eth, ~0);
4025 mtk_rx_irq_disable(eth, ~0);
4026}
4027
4028static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4029{
4030 struct mtk_mac *mac = netdev_priv(dev);
4031
4032 switch (cmd) {
4033 case SIOCGMIIPHY:
4034 case SIOCGMIIREG:
4035 case SIOCSMIIREG:
4036 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4037 default:
4038 /* default invoke the mtk_eth_dbg handler */
4039 return mtk_do_priv_ioctl(dev, ifr, cmd);
4040 break;
4041 }
4042
4043 return -EOPNOTSUPP;
4044}
4045
developer37482a42022-12-26 13:31:13 +08004046int mtk_phy_config(struct mtk_eth *eth, int enable)
4047{
4048 struct device_node *mii_np = NULL;
4049 struct device_node *child = NULL;
4050 int addr = 0;
4051 u32 val = 0;
4052
4053 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
4054 if (!mii_np) {
4055 dev_err(eth->dev, "no %s child node found", "mdio-bus");
4056 return -ENODEV;
4057 }
4058
4059 if (!of_device_is_available(mii_np)) {
4060 dev_err(eth->dev, "device is not available\n");
4061 return -ENODEV;
4062 }
4063
4064 for_each_available_child_of_node(mii_np, child) {
4065 addr = of_mdio_parse_addr(&eth->mii_bus->dev, child);
4066 if (addr < 0)
4067 continue;
4068 pr_info("%s %d addr:%d name:%s\n",
4069 __func__, __LINE__, addr, child->name);
4070 val = _mtk_mdio_read(eth, addr, mdiobus_c45_addr(0x1e, 0));
4071 if (enable)
4072 val &= ~BMCR_PDOWN;
4073 else
4074 val |= BMCR_PDOWN;
4075 _mtk_mdio_write(eth, addr, mdiobus_c45_addr(0x1e, 0), val);
4076 }
4077
4078 return 0;
4079}
4080
developerfd40db22021-04-29 10:08:25 +08004081static void mtk_pending_work(struct work_struct *work)
4082{
4083 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer8051e042022-04-08 13:26:36 +08004084 int err, i = 0;
developerfd40db22021-04-29 10:08:25 +08004085 unsigned long restart = 0;
developer8051e042022-04-08 13:26:36 +08004086 u32 val = 0;
4087
4088 atomic_inc(&reset_lock);
4089 val = mtk_r32(eth, MTK_FE_INT_STATUS);
4090 if (!mtk_check_reset_event(eth, val)) {
4091 atomic_dec(&reset_lock);
4092 pr_info("[%s] No need to do FE reset !\n", __func__);
4093 return;
4094 }
developerfd40db22021-04-29 10:08:25 +08004095
4096 rtnl_lock();
4097
developer37482a42022-12-26 13:31:13 +08004098 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
4099 cpu_relax();
4100
4101 mtk_phy_config(eth, 0);
developer8051e042022-04-08 13:26:36 +08004102
4103 /* Adjust PPE configurations to prepare for reset */
4104 mtk_prepare_reset_ppe(eth, 0);
4105 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4106 mtk_prepare_reset_ppe(eth, 1);
4107
4108 /* Adjust FE configurations to prepare for reset */
4109 mtk_prepare_reset_fe(eth);
4110
4111 /* Trigger Wifi SER reset */
developer6bb3f3a2022-11-22 09:59:14 +08004112 for (i = 0; i < MTK_MAC_COUNT; i++) {
4113 if (!eth->netdev[i])
4114 continue;
developer37482a42022-12-26 13:31:13 +08004115 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
4116 pr_info("send MTK_FE_STOP_TRAFFIC event\n");
4117 call_netdevice_notifiers(MTK_FE_STOP_TRAFFIC,
4118 eth->netdev[i]);
4119 } else {
4120 pr_info("send MTK_FE_START_RESET event\n");
4121 call_netdevice_notifiers(MTK_FE_START_RESET,
4122 eth->netdev[i]);
4123 }
developer6bb3f3a2022-11-22 09:59:14 +08004124 rtnl_unlock();
developer7979ddb2023-04-24 17:19:21 +08004125 if (!wait_for_completion_timeout(&wait_ser_done, 3000)) {
4126 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
4127 (mtk_stop_fail)) {
4128 pr_info("send MTK_FE_START_RESET stop\n");
4129 rtnl_lock();
4130 call_netdevice_notifiers(MTK_FE_START_RESET,
4131 eth->netdev[i]);
4132 rtnl_unlock();
4133 if (!wait_for_completion_timeout(&wait_ser_done,
4134 3000))
4135 pr_warn("wait for MTK_FE_START_RESET\n");
4136 }
developer0baa6962023-01-31 14:25:23 +08004137 pr_warn("wait for MTK_FE_START_RESET\n");
developer7979ddb2023-04-24 17:19:21 +08004138 }
developer6bb3f3a2022-11-22 09:59:14 +08004139 rtnl_lock();
4140 break;
4141 }
developerfd40db22021-04-29 10:08:25 +08004142
developer8051e042022-04-08 13:26:36 +08004143 del_timer_sync(&eth->mtk_dma_monitor_timer);
4144 pr_info("[%s] mtk_stop starts !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08004145 /* stop all devices to make sure that dma is properly shut down */
4146 for (i = 0; i < MTK_MAC_COUNT; i++) {
4147 if (!eth->netdev[i])
4148 continue;
4149 mtk_stop(eth->netdev[i]);
4150 __set_bit(i, &restart);
4151 }
developer8051e042022-04-08 13:26:36 +08004152 pr_info("[%s] mtk_stop ends !\n", __func__);
4153 mdelay(15);
developerfd40db22021-04-29 10:08:25 +08004154
4155 if (eth->dev->pins)
4156 pinctrl_select_state(eth->dev->pins->p,
4157 eth->dev->pins->default_state);
developer8051e042022-04-08 13:26:36 +08004158
4159 pr_info("[%s] mtk_hw_init starts !\n", __func__);
4160 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
4161 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08004162
4163 /* restart DMA and enable IRQs */
4164 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer6bb3f3a2022-11-22 09:59:14 +08004165 if (!test_bit(i, &restart) || !eth->netdev[i])
developerfd40db22021-04-29 10:08:25 +08004166 continue;
4167 err = mtk_open(eth->netdev[i]);
4168 if (err) {
4169 netif_alert(eth, ifup, eth->netdev[i],
4170 "Driver up/down cycle failed, closing device.\n");
4171 dev_close(eth->netdev[i]);
4172 }
4173 }
4174
developer8051e042022-04-08 13:26:36 +08004175 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer6bb3f3a2022-11-22 09:59:14 +08004176 if (!eth->netdev[i])
4177 continue;
developer37482a42022-12-26 13:31:13 +08004178 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
4179 pr_info("send MTK_FE_START_TRAFFIC event\n");
4180 call_netdevice_notifiers(MTK_FE_START_TRAFFIC,
4181 eth->netdev[i]);
4182 } else {
4183 pr_info("send MTK_FE_RESET_DONE event\n");
4184 call_netdevice_notifiers(MTK_FE_RESET_DONE,
4185 eth->netdev[i]);
developer8051e042022-04-08 13:26:36 +08004186 }
developer37482a42022-12-26 13:31:13 +08004187 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE,
4188 eth->netdev[i]);
developer6bb3f3a2022-11-22 09:59:14 +08004189 break;
4190 }
developer8051e042022-04-08 13:26:36 +08004191
4192 atomic_dec(&reset_lock);
developer8051e042022-04-08 13:26:36 +08004193
4194 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
4195 eth->mtk_dma_monitor_timer.expires = jiffies;
4196 add_timer(&eth->mtk_dma_monitor_timer);
developer37482a42022-12-26 13:31:13 +08004197
4198 mtk_phy_config(eth, 1);
4199 mtk_reset_flag = 0;
developerfd40db22021-04-29 10:08:25 +08004200 clear_bit_unlock(MTK_RESETTING, &eth->state);
4201
4202 rtnl_unlock();
4203}
4204
4205static int mtk_free_dev(struct mtk_eth *eth)
4206{
4207 int i;
4208
4209 for (i = 0; i < MTK_MAC_COUNT; i++) {
4210 if (!eth->netdev[i])
4211 continue;
4212 free_netdev(eth->netdev[i]);
4213 }
4214
4215 return 0;
4216}
4217
4218static int mtk_unreg_dev(struct mtk_eth *eth)
4219{
4220 int i;
4221
4222 for (i = 0; i < MTK_MAC_COUNT; i++) {
4223 if (!eth->netdev[i])
4224 continue;
4225 unregister_netdev(eth->netdev[i]);
4226 }
4227
4228 return 0;
4229}
4230
4231static int mtk_cleanup(struct mtk_eth *eth)
4232{
4233 mtk_unreg_dev(eth);
4234 mtk_free_dev(eth);
4235 cancel_work_sync(&eth->pending_work);
4236
4237 return 0;
4238}
4239
4240static int mtk_get_link_ksettings(struct net_device *ndev,
4241 struct ethtool_link_ksettings *cmd)
4242{
4243 struct mtk_mac *mac = netdev_priv(ndev);
4244
4245 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4246 return -EBUSY;
4247
4248 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4249}
4250
4251static int mtk_set_link_ksettings(struct net_device *ndev,
4252 const struct ethtool_link_ksettings *cmd)
4253{
4254 struct mtk_mac *mac = netdev_priv(ndev);
4255
4256 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4257 return -EBUSY;
4258
4259 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4260}
4261
4262static void mtk_get_drvinfo(struct net_device *dev,
4263 struct ethtool_drvinfo *info)
4264{
4265 struct mtk_mac *mac = netdev_priv(dev);
4266
4267 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4268 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4269 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4270}
4271
4272static u32 mtk_get_msglevel(struct net_device *dev)
4273{
4274 struct mtk_mac *mac = netdev_priv(dev);
4275
4276 return mac->hw->msg_enable;
4277}
4278
4279static void mtk_set_msglevel(struct net_device *dev, u32 value)
4280{
4281 struct mtk_mac *mac = netdev_priv(dev);
4282
4283 mac->hw->msg_enable = value;
4284}
4285
4286static int mtk_nway_reset(struct net_device *dev)
4287{
4288 struct mtk_mac *mac = netdev_priv(dev);
4289
4290 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4291 return -EBUSY;
4292
4293 if (!mac->phylink)
4294 return -ENOTSUPP;
4295
4296 return phylink_ethtool_nway_reset(mac->phylink);
4297}
4298
4299static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4300{
4301 int i;
4302
4303 switch (stringset) {
4304 case ETH_SS_STATS:
4305 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4306 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4307 data += ETH_GSTRING_LEN;
4308 }
4309 break;
4310 }
4311}
4312
4313static int mtk_get_sset_count(struct net_device *dev, int sset)
4314{
4315 switch (sset) {
4316 case ETH_SS_STATS:
4317 return ARRAY_SIZE(mtk_ethtool_stats);
4318 default:
4319 return -EOPNOTSUPP;
4320 }
4321}
4322
4323static void mtk_get_ethtool_stats(struct net_device *dev,
4324 struct ethtool_stats *stats, u64 *data)
4325{
4326 struct mtk_mac *mac = netdev_priv(dev);
4327 struct mtk_hw_stats *hwstats = mac->hw_stats;
4328 u64 *data_src, *data_dst;
4329 unsigned int start;
4330 int i;
4331
4332 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4333 return;
4334
4335 if (netif_running(dev) && netif_device_present(dev)) {
4336 if (spin_trylock_bh(&hwstats->stats_lock)) {
4337 mtk_stats_update_mac(mac);
4338 spin_unlock_bh(&hwstats->stats_lock);
4339 }
4340 }
4341
4342 data_src = (u64 *)hwstats;
4343
4344 do {
4345 data_dst = data;
4346 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
4347
4348 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4349 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4350 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
4351}
4352
4353static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4354 u32 *rule_locs)
4355{
developerea49c302023-06-27 16:06:41 +08004356 struct mtk_mac *mac = netdev_priv(dev);
4357 struct mtk_eth *eth = mac->hw;
developerfd40db22021-04-29 10:08:25 +08004358 int ret = -EOPNOTSUPP;
4359
4360 switch (cmd->cmd) {
4361 case ETHTOOL_GRXRINGS:
4362 if (dev->hw_features & NETIF_F_LRO) {
4363 cmd->data = MTK_MAX_RX_RING_NUM;
4364 ret = 0;
developerea49c302023-06-27 16:06:41 +08004365 } else if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4366 cmd->data = eth->soc->rss_num;
4367 ret = 0;
developerfd40db22021-04-29 10:08:25 +08004368 }
4369 break;
4370 case ETHTOOL_GRXCLSRLCNT:
4371 if (dev->hw_features & NETIF_F_LRO) {
developerfd40db22021-04-29 10:08:25 +08004372 cmd->rule_cnt = mac->hwlro_ip_cnt;
4373 ret = 0;
4374 }
4375 break;
4376 case ETHTOOL_GRXCLSRULE:
4377 if (dev->hw_features & NETIF_F_LRO)
4378 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4379 break;
4380 case ETHTOOL_GRXCLSRLALL:
4381 if (dev->hw_features & NETIF_F_LRO)
4382 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4383 rule_locs);
4384 break;
4385 default:
4386 break;
4387 }
4388
4389 return ret;
4390}
4391
4392static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4393{
4394 int ret = -EOPNOTSUPP;
4395
4396 switch (cmd->cmd) {
4397 case ETHTOOL_SRXCLSRLINS:
4398 if (dev->hw_features & NETIF_F_LRO)
4399 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4400 break;
4401 case ETHTOOL_SRXCLSRLDEL:
4402 if (dev->hw_features & NETIF_F_LRO)
4403 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4404 break;
4405 default:
4406 break;
4407 }
4408
4409 return ret;
4410}
4411
developerea49c302023-06-27 16:06:41 +08004412static u32 mtk_get_rxfh_key_size(struct net_device *dev)
4413{
4414 return MTK_RSS_HASH_KEYSIZE;
4415}
4416
4417static u32 mtk_get_rxfh_indir_size(struct net_device *dev)
4418{
4419 return MTK_RSS_MAX_INDIRECTION_TABLE;
4420}
4421
4422static int mtk_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4423 u8 *hfunc)
4424{
4425 struct mtk_mac *mac = netdev_priv(dev);
4426 struct mtk_eth *eth = mac->hw;
4427 struct mtk_rss_params *rss_params = &eth->rss_params;
4428 int i;
4429
4430 if (hfunc)
4431 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
4432
4433 if (key) {
4434 memcpy(key, rss_params->hash_key,
4435 sizeof(rss_params->hash_key));
4436 }
4437
4438 if (indir) {
4439 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
4440 indir[i] = rss_params->indirection_table[i];
4441 }
4442
4443 return 0;
4444}
4445
4446static int mtk_set_rxfh(struct net_device *dev, const u32 *indir,
4447 const u8 *key, const u8 hfunc)
4448{
4449 struct mtk_mac *mac = netdev_priv(dev);
4450 struct mtk_eth *eth = mac->hw;
4451 struct mtk_rss_params *rss_params = &eth->rss_params;
4452 int i;
4453
4454 if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
4455 hfunc != ETH_RSS_HASH_TOP)
4456 return -EOPNOTSUPP;
4457
4458 if (key) {
4459 memcpy(rss_params->hash_key, key,
4460 sizeof(rss_params->hash_key));
4461
4462 for (i = 0; i < MTK_RSS_HASH_KEYSIZE / sizeof(u32); i++)
4463 mtk_w32(eth, rss_params->hash_key[i],
4464 MTK_RSS_HASH_KEY_DW(i));
4465 }
4466
4467 if (indir) {
4468 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
4469 rss_params->indirection_table[i] = indir[i];
4470
4471 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE / 16; i++)
4472 mtk_w32(eth, mtk_rss_indr_table(rss_params, i),
4473 MTK_RSS_INDR_TABLE_DW(i));
4474 }
4475
4476 return 0;
4477}
4478
developer6c5cbb52022-08-12 11:37:45 +08004479static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4480{
4481 struct mtk_mac *mac = netdev_priv(dev);
developerf2823bb2022-12-29 18:20:14 +08004482 struct mtk_eth *eth = mac->hw;
4483 u32 val;
4484
4485 pause->autoneg = 0;
4486
4487 if (mac->type == MTK_GDM_TYPE) {
4488 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
4489
4490 pause->rx_pause = !!(val & MAC_MCR_FORCE_RX_FC);
4491 pause->tx_pause = !!(val & MAC_MCR_FORCE_TX_FC);
4492 } else if (mac->type == MTK_XGDM_TYPE) {
4493 val = mtk_r32(eth, MTK_XMAC_MCR(mac->id));
developer6c5cbb52022-08-12 11:37:45 +08004494
developerf2823bb2022-12-29 18:20:14 +08004495 pause->rx_pause = !!(val & XMAC_MCR_FORCE_RX_FC);
4496 pause->tx_pause = !!(val & XMAC_MCR_FORCE_TX_FC);
4497 }
developer6c5cbb52022-08-12 11:37:45 +08004498}
4499
4500static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4501{
4502 struct mtk_mac *mac = netdev_priv(dev);
4503
4504 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4505}
4506
developer9b725932022-11-24 16:25:56 +08004507static int mtk_get_eee(struct net_device *dev, struct ethtool_eee *eee)
4508{
4509 struct mtk_mac *mac = netdev_priv(dev);
4510 struct mtk_eth *eth = mac->hw;
4511 u32 val;
4512
4513 if (mac->type == MTK_GDM_TYPE) {
4514 val = mtk_r32(eth, MTK_MAC_EEE(mac->id));
4515
4516 eee->tx_lpi_enabled = mac->tx_lpi_enabled;
4517 eee->tx_lpi_timer = FIELD_GET(MAC_EEE_LPI_TXIDLE_THD, val);
4518 }
4519
4520 return phylink_ethtool_get_eee(mac->phylink, eee);
4521}
4522
4523static int mtk_set_eee(struct net_device *dev, struct ethtool_eee *eee)
4524{
4525 struct mtk_mac *mac = netdev_priv(dev);
developer9b725932022-11-24 16:25:56 +08004526
4527 if (mac->type == MTK_GDM_TYPE) {
4528 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
4529 return -EINVAL;
4530
4531 mac->tx_lpi_timer = eee->tx_lpi_timer;
4532
4533 mtk_setup_eee(mac, eee->eee_enabled && eee->tx_lpi_timer);
4534 }
4535
4536 return phylink_ethtool_set_eee(mac->phylink, eee);
4537}
4538
developerfd40db22021-04-29 10:08:25 +08004539static const struct ethtool_ops mtk_ethtool_ops = {
4540 .get_link_ksettings = mtk_get_link_ksettings,
4541 .set_link_ksettings = mtk_set_link_ksettings,
4542 .get_drvinfo = mtk_get_drvinfo,
4543 .get_msglevel = mtk_get_msglevel,
4544 .set_msglevel = mtk_set_msglevel,
4545 .nway_reset = mtk_nway_reset,
4546 .get_link = ethtool_op_get_link,
4547 .get_strings = mtk_get_strings,
4548 .get_sset_count = mtk_get_sset_count,
4549 .get_ethtool_stats = mtk_get_ethtool_stats,
4550 .get_rxnfc = mtk_get_rxnfc,
4551 .set_rxnfc = mtk_set_rxnfc,
developerea49c302023-06-27 16:06:41 +08004552 .get_rxfh_key_size = mtk_get_rxfh_key_size,
4553 .get_rxfh_indir_size = mtk_get_rxfh_indir_size,
4554 .get_rxfh = mtk_get_rxfh,
4555 .set_rxfh = mtk_set_rxfh,
developer6c5cbb52022-08-12 11:37:45 +08004556 .get_pauseparam = mtk_get_pauseparam,
4557 .set_pauseparam = mtk_set_pauseparam,
developer9b725932022-11-24 16:25:56 +08004558 .get_eee = mtk_get_eee,
4559 .set_eee = mtk_set_eee,
developerfd40db22021-04-29 10:08:25 +08004560};
4561
4562static const struct net_device_ops mtk_netdev_ops = {
4563 .ndo_init = mtk_init,
4564 .ndo_uninit = mtk_uninit,
4565 .ndo_open = mtk_open,
4566 .ndo_stop = mtk_stop,
4567 .ndo_start_xmit = mtk_start_xmit,
4568 .ndo_set_mac_address = mtk_set_mac_address,
4569 .ndo_validate_addr = eth_validate_addr,
4570 .ndo_do_ioctl = mtk_do_ioctl,
4571 .ndo_tx_timeout = mtk_tx_timeout,
4572 .ndo_get_stats64 = mtk_get_stats64,
4573 .ndo_fix_features = mtk_fix_features,
4574 .ndo_set_features = mtk_set_features,
4575#ifdef CONFIG_NET_POLL_CONTROLLER
4576 .ndo_poll_controller = mtk_poll_controller,
4577#endif
4578};
4579
developerb6c36bf2023-09-07 12:05:01 +08004580static void mux_poll(struct work_struct *work)
4581{
4582 struct mtk_mux *mux = container_of(work, struct mtk_mux, poll.work);
4583 struct mtk_mac *mac = mux->mac;
4584 struct mtk_eth *eth = mac->hw;
4585 struct net_device *dev = eth->netdev[mac->id];
4586 unsigned int channel;
4587
4588 if (IS_ERR(mux->gpio[0]) || IS_ERR(mux->gpio[1]))
4589 goto exit;
4590
4591 channel = gpiod_get_value_cansleep(mux->gpio[0]);
4592 if (mux->channel == channel || !netif_running(dev))
4593 goto exit;
4594
4595 rtnl_lock();
4596
4597 mtk_stop(dev);
4598
4599 if (channel == 0 || channel == 1) {
4600 mac->of_node = mux->data[channel]->of_node;
4601 mac->phylink = mux->data[channel]->phylink;
4602 };
4603
4604 dev_info(eth->dev, "ethernet mux: switch to channel%d\n", channel);
4605
4606 gpiod_set_value_cansleep(mux->gpio[1], channel);
4607
4608 mtk_open(dev);
4609
4610 rtnl_unlock();
4611
4612 mux->channel = channel;
4613
4614exit:
4615 mod_delayed_work(system_wq, &mux->poll, msecs_to_jiffies(100));
4616}
4617
4618static int mtk_add_mux_channel(struct mtk_mux *mux, struct device_node *np)
4619{
4620 const __be32 *_id = of_get_property(np, "reg", NULL);
4621 struct mtk_mac *mac = mux->mac;
4622 struct mtk_eth *eth = mac->hw;
4623 struct mtk_mux_data *data;
4624 struct phylink *phylink;
4625 int phy_mode, id;
4626
4627 if (!_id) {
4628 dev_err(eth->dev, "missing mux channel id\n");
4629 return -EINVAL;
4630 }
4631
4632 id = be32_to_cpup(_id);
4633 if (id < 0 || id > 1) {
4634 dev_err(eth->dev, "%d is not a valid mux channel id\n", id);
4635 return -EINVAL;
4636 }
4637
4638 data = kmalloc(sizeof(*data), GFP_KERNEL);
4639 if (unlikely(!data)) {
4640 dev_err(eth->dev, "failed to create mux data structure\n");
4641 return -ENOMEM;
4642 }
4643
4644 mux->data[id] = data;
4645
4646 /* phylink create */
4647 phy_mode = of_get_phy_mode(np);
4648 if (phy_mode < 0) {
4649 dev_err(eth->dev, "incorrect phy-mode\n");
4650 return -EINVAL;
4651 }
4652
4653 phylink = phylink_create(&mux->mac->phylink_config,
4654 of_fwnode_handle(np),
4655 phy_mode, &mtk_phylink_ops);
4656 if (IS_ERR(phylink)) {
4657 dev_err(eth->dev, "failed to create phylink structure\n");
4658 return PTR_ERR(phylink);
4659 }
4660
4661 data->of_node = np;
4662 data->phylink = phylink;
4663
4664 return 0;
4665}
4666
4667static int mtk_add_mux(struct mtk_eth *eth, struct device_node *np)
4668{
4669 const __be32 *_id = of_get_property(np, "reg", NULL);
4670 struct device_node *child;
4671 struct mtk_mux *mux;
developere48f1362023-09-27 15:03:04 +08004672 int id, err;
developerb6c36bf2023-09-07 12:05:01 +08004673
4674 if (!_id) {
4675 dev_err(eth->dev, "missing attach mac id\n");
4676 return -EINVAL;
4677 }
4678
4679 id = be32_to_cpup(_id);
4680 if (id < 0 || id >= MTK_MAX_DEVS) {
4681 dev_err(eth->dev, "%d is not a valid attach mac id\n", id);
4682 return -EINVAL;
4683 }
4684
4685 mux = kmalloc(sizeof(struct mtk_mux), GFP_KERNEL);
4686 if (unlikely(!mux)) {
4687 dev_err(eth->dev, "failed to create mux structure\n");
4688 return -ENOMEM;
4689 }
4690
4691 eth->mux[id] = mux;
4692
4693 mux->mac = eth->mac[id];
4694 mux->channel = 0;
4695
4696 mux->gpio[0] = fwnode_get_named_gpiod(of_fwnode_handle(np),
4697 "mod-def0-gpios", 0,
4698 GPIOD_IN, "?");
4699 if (IS_ERR(mux->gpio[0]))
4700 dev_err(eth->dev, "failed to requset gpio for mod-def0-gpios\n");
4701
4702 mux->gpio[1] = fwnode_get_named_gpiod(of_fwnode_handle(np),
4703 "chan-sel-gpios", 0,
4704 GPIOD_OUT_LOW, "?");
4705 if (IS_ERR(mux->gpio[1]))
4706 dev_err(eth->dev, "failed to requset gpio for chan-sel-gpios\n");
4707
4708 for_each_child_of_node(np, child) {
4709 err = mtk_add_mux_channel(mux, child);
4710 if (err) {
4711 dev_err(eth->dev, "failed to add mtk_mux\n");
4712 of_node_put(child);
4713 return -ECHILD;
4714 }
4715 of_node_put(child);
4716 }
4717
4718 INIT_DELAYED_WORK(&mux->poll, mux_poll);
4719 mod_delayed_work(system_wq, &mux->poll, msecs_to_jiffies(3000));
4720
4721 return 0;
4722}
4723
developerfd40db22021-04-29 10:08:25 +08004724static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4725{
4726 const __be32 *_id = of_get_property(np, "reg", NULL);
developer30e13e72022-11-03 10:21:24 +08004727 const char *label;
developerfd40db22021-04-29 10:08:25 +08004728 struct phylink *phylink;
developer30e13e72022-11-03 10:21:24 +08004729 int mac_type, phy_mode, id, err;
developerfd40db22021-04-29 10:08:25 +08004730 struct mtk_mac *mac;
developera2613e62022-07-01 18:29:37 +08004731 struct mtk_phylink_priv *phylink_priv;
4732 struct fwnode_handle *fixed_node;
4733 struct gpio_desc *desc;
developerfd40db22021-04-29 10:08:25 +08004734
4735 if (!_id) {
4736 dev_err(eth->dev, "missing mac id\n");
4737 return -EINVAL;
4738 }
4739
4740 id = be32_to_cpup(_id);
developerfb556ca2021-10-13 10:52:09 +08004741 if (id < 0 || id >= MTK_MAC_COUNT) {
developerfd40db22021-04-29 10:08:25 +08004742 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4743 return -EINVAL;
4744 }
4745
4746 if (eth->netdev[id]) {
4747 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4748 return -EINVAL;
4749 }
4750
4751 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
4752 if (!eth->netdev[id]) {
4753 dev_err(eth->dev, "alloc_etherdev failed\n");
4754 return -ENOMEM;
4755 }
4756 mac = netdev_priv(eth->netdev[id]);
4757 eth->mac[id] = mac;
4758 mac->id = id;
4759 mac->hw = eth;
4760 mac->of_node = np;
4761
4762 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4763 mac->hwlro_ip_cnt = 0;
4764
4765 mac->hw_stats = devm_kzalloc(eth->dev,
4766 sizeof(*mac->hw_stats),
4767 GFP_KERNEL);
4768 if (!mac->hw_stats) {
4769 dev_err(eth->dev, "failed to allocate counter memory\n");
4770 err = -ENOMEM;
4771 goto free_netdev;
4772 }
4773 spin_lock_init(&mac->hw_stats->stats_lock);
4774 u64_stats_init(&mac->hw_stats->syncp);
4775 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
4776
4777 /* phylink create */
4778 phy_mode = of_get_phy_mode(np);
4779 if (phy_mode < 0) {
4780 dev_err(eth->dev, "incorrect phy-mode\n");
4781 err = -EINVAL;
4782 goto free_netdev;
4783 }
4784
4785 /* mac config is not set */
4786 mac->interface = PHY_INTERFACE_MODE_NA;
4787 mac->mode = MLO_AN_PHY;
4788 mac->speed = SPEED_UNKNOWN;
4789
developer9b725932022-11-24 16:25:56 +08004790 mac->tx_lpi_timer = 1;
4791
developerfd40db22021-04-29 10:08:25 +08004792 mac->phylink_config.dev = &eth->netdev[id]->dev;
4793 mac->phylink_config.type = PHYLINK_NETDEV;
4794
developer30e13e72022-11-03 10:21:24 +08004795 mac->type = 0;
4796 if (!of_property_read_string(np, "mac-type", &label)) {
4797 for (mac_type = 0; mac_type < MTK_GDM_TYPE_MAX; mac_type++) {
4798 if (!strcasecmp(label, gdm_type(mac_type)))
4799 break;
4800 }
4801
4802 switch (mac_type) {
4803 case 0:
4804 mac->type = MTK_GDM_TYPE;
4805 break;
4806 case 1:
4807 mac->type = MTK_XGDM_TYPE;
4808 break;
4809 default:
4810 dev_warn(eth->dev, "incorrect mac-type\n");
4811 break;
4812 };
4813 }
developer089e8852022-09-28 14:43:46 +08004814
developerfd40db22021-04-29 10:08:25 +08004815 phylink = phylink_create(&mac->phylink_config,
4816 of_fwnode_handle(mac->of_node),
4817 phy_mode, &mtk_phylink_ops);
4818 if (IS_ERR(phylink)) {
4819 err = PTR_ERR(phylink);
4820 goto free_netdev;
4821 }
4822
4823 mac->phylink = phylink;
4824
developera2613e62022-07-01 18:29:37 +08004825 fixed_node = fwnode_get_named_child_node(of_fwnode_handle(mac->of_node),
4826 "fixed-link");
4827 if (fixed_node) {
4828 desc = fwnode_get_named_gpiod(fixed_node, "link-gpio",
4829 0, GPIOD_IN, "?");
4830 if (!IS_ERR(desc)) {
4831 struct device_node *phy_np;
4832 const char *label;
4833 int irq, phyaddr;
4834
4835 phylink_priv = &mac->phylink_priv;
4836
4837 phylink_priv->desc = desc;
4838 phylink_priv->id = id;
4839 phylink_priv->link = -1;
4840
4841 irq = gpiod_to_irq(desc);
4842 if (irq > 0) {
4843 devm_request_irq(eth->dev, irq, mtk_handle_irq_fixed_link,
4844 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
4845 "ethernet:fixed link", mac);
4846 }
4847
developer8b6f2402022-11-28 13:42:34 +08004848 if (!of_property_read_string(to_of_node(fixed_node),
4849 "label", &label)) {
developer659fdeb2022-12-01 23:03:07 +08004850 if (strlen(label) < 16) {
4851 strncpy(phylink_priv->label, label,
4852 strlen(label));
4853 } else
developer8b6f2402022-11-28 13:42:34 +08004854 dev_err(eth->dev, "insufficient space for label!\n");
4855 }
developera2613e62022-07-01 18:29:37 +08004856
4857 phy_np = of_parse_phandle(to_of_node(fixed_node), "phy-handle", 0);
4858 if (phy_np) {
4859 if (!of_property_read_u32(phy_np, "reg", &phyaddr))
4860 phylink_priv->phyaddr = phyaddr;
4861 }
4862 }
4863 fwnode_handle_put(fixed_node);
4864 }
4865
developerfd40db22021-04-29 10:08:25 +08004866 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4867 eth->netdev[id]->watchdog_timeo = 5 * HZ;
4868 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4869 eth->netdev[id]->base_addr = (unsigned long)eth->base;
4870
4871 eth->netdev[id]->hw_features = eth->soc->hw_features;
4872 if (eth->hwlro)
4873 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4874
4875 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4876 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
4877 eth->netdev[id]->features |= eth->soc->hw_features;
4878 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4879
developer94806ec2023-05-19 14:16:44 +08004880 eth->netdev[id]->irq = eth->irq_fe[0];
developerfd40db22021-04-29 10:08:25 +08004881 eth->netdev[id]->dev.of_node = np;
4882
4883 return 0;
4884
4885free_netdev:
4886 free_netdev(eth->netdev[id]);
4887 return err;
4888}
4889
developer3f28d382023-03-07 16:06:30 +08004890void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4891{
4892 struct net_device *dev, *tmp;
4893 LIST_HEAD(dev_list);
4894 int i;
4895
4896 rtnl_lock();
4897
4898 for (i = 0; i < MTK_MAC_COUNT; i++) {
4899 dev = eth->netdev[i];
4900
4901 if (!dev || !(dev->flags & IFF_UP))
4902 continue;
4903
4904 list_add_tail(&dev->close_list, &dev_list);
4905 }
4906
4907 dev_close_many(&dev_list, false);
4908
4909 eth->dma_dev = dma_dev;
4910
4911 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4912 list_del_init(&dev->close_list);
4913 dev_open(dev, NULL);
4914 }
4915
4916 rtnl_unlock();
4917}
4918
developerfd40db22021-04-29 10:08:25 +08004919static int mtk_probe(struct platform_device *pdev)
4920{
developerb6c36bf2023-09-07 12:05:01 +08004921 struct device_node *mac_np, *mux_np;
developerfd40db22021-04-29 10:08:25 +08004922 struct mtk_eth *eth;
4923 int err, i;
4924
4925 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4926 if (!eth)
4927 return -ENOMEM;
4928
4929 eth->soc = of_device_get_match_data(&pdev->dev);
4930
4931 eth->dev = &pdev->dev;
developer3f28d382023-03-07 16:06:30 +08004932 eth->dma_dev = &pdev->dev;
developerfd40db22021-04-29 10:08:25 +08004933 eth->base = devm_platform_ioremap_resource(pdev, 0);
4934 if (IS_ERR(eth->base))
4935 return PTR_ERR(eth->base);
4936
developer089e8852022-09-28 14:43:46 +08004937 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
4938 eth->sram_base = devm_platform_ioremap_resource(pdev, 1);
4939 if (IS_ERR(eth->sram_base))
4940 return PTR_ERR(eth->sram_base);
4941 }
4942
developerfd40db22021-04-29 10:08:25 +08004943 if(eth->soc->has_sram) {
4944 struct resource *res;
4945 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
developer4c32b7a2021-11-13 16:46:43 +08004946 if (unlikely(!res))
4947 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +08004948 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
4949 }
4950
developer0fef5222023-04-26 14:48:31 +08004951 mtk_get_hwver(eth);
4952
developer68ce74f2023-01-03 16:11:57 +08004953 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developerfd40db22021-04-29 10:08:25 +08004954 eth->ip_align = NET_IP_ALIGN;
developerfd40db22021-04-29 10:08:25 +08004955
developer089e8852022-09-28 14:43:46 +08004956 if (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) {
4957 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
4958 if (!err) {
4959 err = dma_set_coherent_mask(&pdev->dev,
4960 DMA_BIT_MASK(36));
4961 if (err) {
4962 dev_err(&pdev->dev, "Wrong DMA config\n");
4963 return -EINVAL;
4964 }
4965 }
4966 }
4967
developerfd40db22021-04-29 10:08:25 +08004968 spin_lock_init(&eth->page_lock);
4969 spin_lock_init(&eth->tx_irq_lock);
4970 spin_lock_init(&eth->rx_irq_lock);
developerd82e8372022-02-09 15:00:09 +08004971 spin_lock_init(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +08004972
4973 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4974 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4975 "mediatek,ethsys");
4976 if (IS_ERR(eth->ethsys)) {
4977 dev_err(&pdev->dev, "no ethsys regmap found\n");
4978 return PTR_ERR(eth->ethsys);
4979 }
4980 }
4981
4982 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4983 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4984 "mediatek,infracfg");
4985 if (IS_ERR(eth->infra)) {
4986 dev_err(&pdev->dev, "no infracfg regmap found\n");
4987 return PTR_ERR(eth->infra);
4988 }
4989 }
4990
developer3f28d382023-03-07 16:06:30 +08004991 if (of_dma_is_coherent(pdev->dev.of_node)) {
4992 struct regmap *cci;
4993
4994 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4995 "cci-control-port");
4996 /* enable CPU/bus coherency */
4997 if (!IS_ERR(cci))
4998 regmap_write(cci, 0, 3);
4999 }
5000
developerfd40db22021-04-29 10:08:25 +08005001 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
developer4e8a3fd2023-04-10 18:05:44 +08005002 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
developerfd40db22021-04-29 10:08:25 +08005003 GFP_KERNEL);
developer4e8a3fd2023-04-10 18:05:44 +08005004 if (!eth->sgmii)
developerfd40db22021-04-29 10:08:25 +08005005 return -ENOMEM;
5006
developer4e8a3fd2023-04-10 18:05:44 +08005007 err = mtk_sgmii_init(eth, pdev->dev.of_node,
developerfd40db22021-04-29 10:08:25 +08005008 eth->soc->ana_rgc3);
developer089e8852022-09-28 14:43:46 +08005009 if (err)
5010 return err;
5011 }
5012
5013 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
developer4e8a3fd2023-04-10 18:05:44 +08005014 eth->usxgmii = devm_kzalloc(eth->dev, sizeof(*eth->usxgmii),
5015 GFP_KERNEL);
5016 if (!eth->usxgmii)
5017 return -ENOMEM;
developer089e8852022-09-28 14:43:46 +08005018
developer4e8a3fd2023-04-10 18:05:44 +08005019 err = mtk_usxgmii_init(eth, pdev->dev.of_node);
developer089e8852022-09-28 14:43:46 +08005020 if (err)
5021 return err;
5022
5023 err = mtk_toprgu_init(eth, pdev->dev.of_node);
developerfd40db22021-04-29 10:08:25 +08005024 if (err)
5025 return err;
5026 }
5027
5028 if (eth->soc->required_pctl) {
5029 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5030 "mediatek,pctl");
5031 if (IS_ERR(eth->pctl)) {
5032 dev_err(&pdev->dev, "no pctl regmap found\n");
5033 return PTR_ERR(eth->pctl);
5034 }
5035 }
5036
developer94806ec2023-05-19 14:16:44 +08005037 for (i = 0; i < MTK_PDMA_IRQ_NUM; i++)
5038 eth->irq_pdma[i] = platform_get_irq(pdev, i);
5039
5040 for (i = 0; i < MTK_FE_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08005041 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
developer94806ec2023-05-19 14:16:44 +08005042 eth->irq_fe[i] = eth->irq_fe[0];
developerfd40db22021-04-29 10:08:25 +08005043 else
developer94806ec2023-05-19 14:16:44 +08005044 eth->irq_fe[i] =
5045 platform_get_irq(pdev, i + MTK_PDMA_IRQ_NUM);
5046
5047 if (eth->irq_fe[i] < 0) {
developerfd40db22021-04-29 10:08:25 +08005048 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
5049 return -ENXIO;
5050 }
5051 }
5052
5053 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
5054 eth->clks[i] = devm_clk_get(eth->dev,
5055 mtk_clks_source_name[i]);
5056 if (IS_ERR(eth->clks[i])) {
5057 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
5058 return -EPROBE_DEFER;
5059 if (eth->soc->required_clks & BIT(i)) {
5060 dev_err(&pdev->dev, "clock %s not found\n",
5061 mtk_clks_source_name[i]);
5062 return -EINVAL;
5063 }
5064 eth->clks[i] = NULL;
5065 }
5066 }
5067
5068 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
5069 INIT_WORK(&eth->pending_work, mtk_pending_work);
5070
developer8051e042022-04-08 13:26:36 +08005071 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerfd40db22021-04-29 10:08:25 +08005072 if (err)
5073 return err;
5074
5075 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
5076
5077 for_each_child_of_node(pdev->dev.of_node, mac_np) {
5078 if (!of_device_is_compatible(mac_np,
5079 "mediatek,eth-mac"))
5080 continue;
5081
5082 if (!of_device_is_available(mac_np))
5083 continue;
5084
5085 err = mtk_add_mac(eth, mac_np);
5086 if (err) {
5087 of_node_put(mac_np);
5088 goto err_deinit_hw;
5089 }
5090 }
5091
developerb6c36bf2023-09-07 12:05:01 +08005092 mux_np = of_get_child_by_name(eth->dev->of_node, "mux-bus");
5093 if (mux_np) {
5094 struct device_node *child;
5095
5096 for_each_available_child_of_node(mux_np, child) {
5097 if (!of_device_is_compatible(child,
5098 "mediatek,eth-mux"))
5099 continue;
5100
5101 if (!of_device_is_available(child))
5102 continue;
5103
5104 err = mtk_add_mux(eth, child);
5105 if (err)
5106 dev_err(&pdev->dev, "failed to add mux\n");
5107
5108 of_node_put(mux_np);
5109 };
5110 }
5111
developer18f46a82021-07-20 21:08:21 +08005112 err = mtk_napi_init(eth);
5113 if (err)
5114 goto err_free_dev;
5115
developerfd40db22021-04-29 10:08:25 +08005116 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
developer94806ec2023-05-19 14:16:44 +08005117 err = devm_request_irq(eth->dev, eth->irq_fe[0],
developerfd40db22021-04-29 10:08:25 +08005118 mtk_handle_irq, 0,
5119 dev_name(eth->dev), eth);
5120 } else {
developer94806ec2023-05-19 14:16:44 +08005121 err = devm_request_irq(eth->dev, eth->irq_fe[1],
developerfd40db22021-04-29 10:08:25 +08005122 mtk_handle_irq_tx, 0,
5123 dev_name(eth->dev), eth);
5124 if (err)
5125 goto err_free_dev;
5126
developer94806ec2023-05-19 14:16:44 +08005127 err = devm_request_irq(eth->dev, eth->irq_fe[2],
5128 mtk_handle_fe_irq, 0,
5129 dev_name(eth->dev), eth);
5130 if (err)
5131 goto err_free_dev;
5132
5133 err = devm_request_irq(eth->dev, eth->irq_pdma[0],
developerfd40db22021-04-29 10:08:25 +08005134 mtk_handle_irq_rx, 0,
developer18f46a82021-07-20 21:08:21 +08005135 dev_name(eth->dev), &eth->rx_napi[0]);
5136 if (err)
5137 goto err_free_dev;
5138
developer94806ec2023-05-19 14:16:44 +08005139 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
5140 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
5141 err = devm_request_irq(eth->dev,
5142 eth->irq_pdma[i],
5143 mtk_handle_irq_rx, 0,
5144 dev_name(eth->dev),
5145 &eth->rx_napi[i]);
developer18f46a82021-07-20 21:08:21 +08005146 if (err)
5147 goto err_free_dev;
5148 }
5149 }
developerfd40db22021-04-29 10:08:25 +08005150 }
developer8051e042022-04-08 13:26:36 +08005151
developerfd40db22021-04-29 10:08:25 +08005152 if (err)
5153 goto err_free_dev;
5154
5155 /* No MT7628/88 support yet */
5156 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5157 err = mtk_mdio_init(eth);
5158 if (err)
5159 goto err_free_dev;
5160 }
5161
5162 for (i = 0; i < MTK_MAX_DEVS; i++) {
5163 if (!eth->netdev[i])
5164 continue;
5165
5166 err = register_netdev(eth->netdev[i]);
5167 if (err) {
5168 dev_err(eth->dev, "error bringing up device\n");
5169 goto err_deinit_mdio;
5170 } else
5171 netif_info(eth, probe, eth->netdev[i],
5172 "mediatek frame engine at 0x%08lx, irq %d\n",
developer94806ec2023-05-19 14:16:44 +08005173 eth->netdev[i]->base_addr, eth->irq_fe[0]);
developerfd40db22021-04-29 10:08:25 +08005174 }
5175
5176 /* we run 2 devices on the same DMA ring so we need a dummy device
5177 * for NAPI to work
5178 */
5179 init_dummy_netdev(&eth->dummy_dev);
5180 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
5181 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08005182 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08005183 MTK_NAPI_WEIGHT);
5184
developer18f46a82021-07-20 21:08:21 +08005185 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
5186 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
5187 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[i].napi,
5188 mtk_napi_rx, MTK_NAPI_WEIGHT);
5189 }
5190
developerfd40db22021-04-29 10:08:25 +08005191 mtketh_debugfs_init(eth);
5192 debug_proc_init(eth);
5193
5194 platform_set_drvdata(pdev, eth);
5195
developer8051e042022-04-08 13:26:36 +08005196 register_netdevice_notifier(&mtk_eth_netdevice_nb);
developer37482a42022-12-26 13:31:13 +08005197#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer8051e042022-04-08 13:26:36 +08005198 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
5199 eth->mtk_dma_monitor_timer.expires = jiffies;
5200 add_timer(&eth->mtk_dma_monitor_timer);
developer793f7b42022-05-20 13:54:51 +08005201#endif
developer8051e042022-04-08 13:26:36 +08005202
developerfd40db22021-04-29 10:08:25 +08005203 return 0;
5204
5205err_deinit_mdio:
5206 mtk_mdio_cleanup(eth);
5207err_free_dev:
5208 mtk_free_dev(eth);
5209err_deinit_hw:
5210 mtk_hw_deinit(eth);
5211
5212 return err;
5213}
5214
5215static int mtk_remove(struct platform_device *pdev)
5216{
5217 struct mtk_eth *eth = platform_get_drvdata(pdev);
5218 struct mtk_mac *mac;
5219 int i;
5220
5221 /* stop all devices to make sure that dma is properly shut down */
5222 for (i = 0; i < MTK_MAC_COUNT; i++) {
5223 if (!eth->netdev[i])
5224 continue;
5225 mtk_stop(eth->netdev[i]);
5226 mac = netdev_priv(eth->netdev[i]);
5227 phylink_disconnect_phy(mac->phylink);
5228 }
5229
5230 mtk_hw_deinit(eth);
5231
5232 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08005233 netif_napi_del(&eth->rx_napi[0].napi);
5234
5235 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
5236 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
5237 netif_napi_del(&eth->rx_napi[i].napi);
5238 }
5239
developerfd40db22021-04-29 10:08:25 +08005240 mtk_cleanup(eth);
5241 mtk_mdio_cleanup(eth);
developer8051e042022-04-08 13:26:36 +08005242 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
5243 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08005244
5245 return 0;
5246}
5247
5248static const struct mtk_soc_data mt2701_data = {
developer68ce74f2023-01-03 16:11:57 +08005249 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005250 .caps = MT7623_CAPS | MTK_HWLRO,
5251 .hw_features = MTK_HW_FEATURES,
5252 .required_clks = MT7623_CLKS_BITMAP,
5253 .required_pctl = true,
5254 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005255 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005256 .txrx = {
5257 .txd_size = sizeof(struct mtk_tx_dma),
5258 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005259 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005260 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5261 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5262 },
developerfd40db22021-04-29 10:08:25 +08005263};
5264
5265static const struct mtk_soc_data mt7621_data = {
developer68ce74f2023-01-03 16:11:57 +08005266 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005267 .caps = MT7621_CAPS,
5268 .hw_features = MTK_HW_FEATURES,
5269 .required_clks = MT7621_CLKS_BITMAP,
5270 .required_pctl = false,
5271 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005272 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005273 .txrx = {
5274 .txd_size = sizeof(struct mtk_tx_dma),
developer68ce74f2023-01-03 16:11:57 +08005275 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005276 .rxd_size = sizeof(struct mtk_rx_dma),
5277 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5278 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5279 },
developerfd40db22021-04-29 10:08:25 +08005280};
5281
5282static const struct mtk_soc_data mt7622_data = {
developer68ce74f2023-01-03 16:11:57 +08005283 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005284 .ana_rgc3 = 0x2028,
5285 .caps = MT7622_CAPS | MTK_HWLRO,
5286 .hw_features = MTK_HW_FEATURES,
5287 .required_clks = MT7622_CLKS_BITMAP,
5288 .required_pctl = false,
5289 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005290 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005291 .txrx = {
5292 .txd_size = sizeof(struct mtk_tx_dma),
5293 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005294 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005295 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5296 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5297 },
developerfd40db22021-04-29 10:08:25 +08005298};
5299
5300static const struct mtk_soc_data mt7623_data = {
developer68ce74f2023-01-03 16:11:57 +08005301 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005302 .caps = MT7623_CAPS | MTK_HWLRO,
5303 .hw_features = MTK_HW_FEATURES,
5304 .required_clks = MT7623_CLKS_BITMAP,
5305 .required_pctl = true,
5306 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005307 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005308 .txrx = {
5309 .txd_size = sizeof(struct mtk_tx_dma),
5310 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005311 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005312 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5313 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5314 },
developerfd40db22021-04-29 10:08:25 +08005315};
5316
5317static const struct mtk_soc_data mt7629_data = {
developer68ce74f2023-01-03 16:11:57 +08005318 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08005319 .ana_rgc3 = 0x128,
5320 .caps = MT7629_CAPS | MTK_HWLRO,
5321 .hw_features = MTK_HW_FEATURES,
5322 .required_clks = MT7629_CLKS_BITMAP,
5323 .required_pctl = false,
5324 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005325 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005326 .txrx = {
5327 .txd_size = sizeof(struct mtk_tx_dma),
5328 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005329 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08005330 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5331 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5332 },
developerfd40db22021-04-29 10:08:25 +08005333};
5334
5335static const struct mtk_soc_data mt7986_data = {
developer68ce74f2023-01-03 16:11:57 +08005336 .reg_map = &mt7986_reg_map,
developerfd40db22021-04-29 10:08:25 +08005337 .ana_rgc3 = 0x128,
5338 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08005339 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08005340 .required_clks = MT7986_CLKS_BITMAP,
5341 .required_pctl = false,
developerc42fa982023-08-22 15:37:30 +08005342 .has_sram = false,
developer933f09b2023-09-12 11:13:01 +08005343 .rss_num = 4,
developere9356982022-07-04 09:03:20 +08005344 .txrx = {
5345 .txd_size = sizeof(struct mtk_tx_dma_v2),
developer8ecd51b2023-03-13 11:28:28 +08005346 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005347 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developere9356982022-07-04 09:03:20 +08005348 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5349 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
5350 },
developerfd40db22021-04-29 10:08:25 +08005351};
5352
developer255bba22021-07-27 15:16:33 +08005353static const struct mtk_soc_data mt7981_data = {
developer68ce74f2023-01-03 16:11:57 +08005354 .reg_map = &mt7986_reg_map,
developer255bba22021-07-27 15:16:33 +08005355 .ana_rgc3 = 0x128,
5356 .caps = MT7981_CAPS,
developer7377b0b2021-11-18 14:54:47 +08005357 .hw_features = MTK_HW_FEATURES,
developer255bba22021-07-27 15:16:33 +08005358 .required_clks = MT7981_CLKS_BITMAP,
5359 .required_pctl = false,
developerc42fa982023-08-22 15:37:30 +08005360 .has_sram = false,
developer933f09b2023-09-12 11:13:01 +08005361 .rss_num = 4,
developere9356982022-07-04 09:03:20 +08005362 .txrx = {
5363 .txd_size = sizeof(struct mtk_tx_dma_v2),
developer8ecd51b2023-03-13 11:28:28 +08005364 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005365 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developere9356982022-07-04 09:03:20 +08005366 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5367 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
5368 },
developer255bba22021-07-27 15:16:33 +08005369};
5370
developer089e8852022-09-28 14:43:46 +08005371static const struct mtk_soc_data mt7988_data = {
developer68ce74f2023-01-03 16:11:57 +08005372 .reg_map = &mt7988_reg_map,
developer089e8852022-09-28 14:43:46 +08005373 .ana_rgc3 = 0x128,
5374 .caps = MT7988_CAPS,
5375 .hw_features = MTK_HW_FEATURES,
5376 .required_clks = MT7988_CLKS_BITMAP,
5377 .required_pctl = false,
5378 .has_sram = true,
developere3d0de22023-05-30 17:45:00 +08005379 .rss_num = 4,
developer089e8852022-09-28 14:43:46 +08005380 .txrx = {
5381 .txd_size = sizeof(struct mtk_tx_dma_v2),
5382 .rxd_size = sizeof(struct mtk_rx_dma_v2),
developer68ce74f2023-01-03 16:11:57 +08005383 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developer089e8852022-09-28 14:43:46 +08005384 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5385 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
5386 },
5387};
5388
developerfd40db22021-04-29 10:08:25 +08005389static const struct mtk_soc_data rt5350_data = {
developer68ce74f2023-01-03 16:11:57 +08005390 .reg_map = &mt7628_reg_map,
developerfd40db22021-04-29 10:08:25 +08005391 .caps = MT7628_CAPS,
5392 .hw_features = MTK_HW_FEATURES_MT7628,
5393 .required_clks = MT7628_CLKS_BITMAP,
5394 .required_pctl = false,
5395 .has_sram = false,
developere3d0de22023-05-30 17:45:00 +08005396 .rss_num = 0,
developere9356982022-07-04 09:03:20 +08005397 .txrx = {
5398 .txd_size = sizeof(struct mtk_tx_dma),
5399 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08005400 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
developere9356982022-07-04 09:03:20 +08005401 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5402 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5403 },
developerfd40db22021-04-29 10:08:25 +08005404};
5405
5406const struct of_device_id of_mtk_match[] = {
5407 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
5408 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
5409 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
5410 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
5411 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
5412 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08005413 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developer089e8852022-09-28 14:43:46 +08005414 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data},
developerfd40db22021-04-29 10:08:25 +08005415 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
5416 {},
5417};
5418MODULE_DEVICE_TABLE(of, of_mtk_match);
5419
5420static struct platform_driver mtk_driver = {
5421 .probe = mtk_probe,
5422 .remove = mtk_remove,
5423 .driver = {
5424 .name = "mtk_soc_eth",
5425 .of_match_table = of_mtk_match,
5426 },
5427};
5428
5429module_platform_driver(mtk_driver);
5430
5431MODULE_LICENSE("GPL");
5432MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5433MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");