blob: 46deff5e8f92b1823b55e34bc8e3939fdb7f55da [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
12#include <linux/mfd/syscon.h>
13#include <linux/regmap.h>
14#include <linux/clk.h>
15#include <linux/pm_runtime.h>
16#include <linux/if_vlan.h>
17#include <linux/reset.h>
18#include <linux/tcp.h>
19#include <linux/interrupt.h>
20#include <linux/pinctrl/devinfo.h>
21#include <linux/phylink.h>
developera2613e62022-07-01 18:29:37 +080022#include <linux/gpio/consumer.h>
developerfd40db22021-04-29 10:08:25 +080023#include <net/dsa.h>
24
25#include "mtk_eth_soc.h"
26#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080027#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080028
29#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
30#include "mtk_hnat/nf_hnat_mtk.h"
31#endif
32
33static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080034atomic_t reset_lock = ATOMIC_INIT(0);
35atomic_t force = ATOMIC_INIT(0);
36
developerfd40db22021-04-29 10:08:25 +080037module_param_named(msg_level, mtk_msg_level, int, 0);
38MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080039DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080040
41#define MTK_ETHTOOL_STAT(x) { #x, \
42 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
43
44/* strings used by ethtool */
45static const struct mtk_ethtool_stats {
46 char str[ETH_GSTRING_LEN];
47 u32 offset;
48} mtk_ethtool_stats[] = {
49 MTK_ETHTOOL_STAT(tx_bytes),
50 MTK_ETHTOOL_STAT(tx_packets),
51 MTK_ETHTOOL_STAT(tx_skip),
52 MTK_ETHTOOL_STAT(tx_collisions),
53 MTK_ETHTOOL_STAT(rx_bytes),
54 MTK_ETHTOOL_STAT(rx_packets),
55 MTK_ETHTOOL_STAT(rx_overflow),
56 MTK_ETHTOOL_STAT(rx_fcs_errors),
57 MTK_ETHTOOL_STAT(rx_short_errors),
58 MTK_ETHTOOL_STAT(rx_long_errors),
59 MTK_ETHTOOL_STAT(rx_checksum_errors),
60 MTK_ETHTOOL_STAT(rx_flow_control_packets),
61};
62
63static const char * const mtk_clks_source_name[] = {
64 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
65 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
66 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
67 "sgmii_ck", "eth2pll", "wocpu0","wocpu1",
68};
69
70void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
71{
72 __raw_writel(val, eth->base + reg);
73}
74
75u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
76{
77 return __raw_readl(eth->base + reg);
78}
79
80u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
81{
82 u32 val;
83
84 val = mtk_r32(eth, reg);
85 val &= ~mask;
86 val |= set;
87 mtk_w32(eth, val, reg);
88 return reg;
89}
90
91static int mtk_mdio_busy_wait(struct mtk_eth *eth)
92{
93 unsigned long t_start = jiffies;
94
95 while (1) {
96 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
97 return 0;
98 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
99 break;
developerc4671b22021-05-28 13:16:42 +0800100 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800101 }
102
103 dev_err(eth->dev, "mdio: MDIO timeout\n");
104 return -1;
105}
106
developer599cda42022-05-24 15:13:31 +0800107u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
108 int phy_reg, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800109{
110 if (mtk_mdio_busy_wait(eth))
111 return -1;
112
113 write_data &= 0xffff;
114
developer599cda42022-05-24 15:13:31 +0800115 if (phy_reg & MII_ADDR_C45) {
116 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
117 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
118 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
119 MTK_PHY_IAC);
120
121 if (mtk_mdio_busy_wait(eth))
122 return -1;
123
124 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
125 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
126 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
127 MTK_PHY_IAC);
128 } else {
129 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
130 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
131 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
132 MTK_PHY_IAC);
133 }
developerfd40db22021-04-29 10:08:25 +0800134
135 if (mtk_mdio_busy_wait(eth))
136 return -1;
137
138 return 0;
139}
140
developer599cda42022-05-24 15:13:31 +0800141u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerfd40db22021-04-29 10:08:25 +0800142{
143 u32 d;
144
145 if (mtk_mdio_busy_wait(eth))
146 return 0xffff;
147
developer599cda42022-05-24 15:13:31 +0800148 if (phy_reg & MII_ADDR_C45) {
149 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
150 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
151 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
152 MTK_PHY_IAC);
153
154 if (mtk_mdio_busy_wait(eth))
155 return 0xffff;
156
157 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
158 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
159 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
160 MTK_PHY_IAC);
161 } else {
162 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
163 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
164 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
165 MTK_PHY_IAC);
166 }
developerfd40db22021-04-29 10:08:25 +0800167
168 if (mtk_mdio_busy_wait(eth))
169 return 0xffff;
170
171 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
172
173 return d;
174}
175
176static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
177 int phy_reg, u16 val)
178{
179 struct mtk_eth *eth = bus->priv;
180
181 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
182}
183
184static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
185{
186 struct mtk_eth *eth = bus->priv;
187
188 return _mtk_mdio_read(eth, phy_addr, phy_reg);
189}
190
developerabeadd52022-08-15 11:26:44 +0800191static int mtk_mdio_reset(struct mii_bus *bus)
192{
193 /* The mdiobus_register will trigger a reset pulse when enabling Bus reset,
194 * we just need to wait until device ready.
195 */
196 mdelay(20);
197
198 return 0;
199}
200
developerfd40db22021-04-29 10:08:25 +0800201static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
202 phy_interface_t interface)
203{
204 u32 val;
205
206 /* Check DDR memory type.
207 * Currently TRGMII mode with DDR2 memory is not supported.
208 */
209 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
210 if (interface == PHY_INTERFACE_MODE_TRGMII &&
211 val & SYSCFG_DRAM_TYPE_DDR2) {
212 dev_err(eth->dev,
213 "TRGMII mode with DDR2 memory is not supported!\n");
214 return -EOPNOTSUPP;
215 }
216
217 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
218 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
219
220 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
221 ETHSYS_TRGMII_MT7621_MASK, val);
222
223 return 0;
224}
225
226static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
227 phy_interface_t interface, int speed)
228{
229 u32 val;
230 int ret;
231
232 if (interface == PHY_INTERFACE_MODE_TRGMII) {
233 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
234 val = 500000000;
235 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
236 if (ret)
237 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
238 return;
239 }
240
241 val = (speed == SPEED_1000) ?
242 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
243 mtk_w32(eth, val, INTF_MODE);
244
245 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
246 ETHSYS_TRGMII_CLK_SEL362_5,
247 ETHSYS_TRGMII_CLK_SEL362_5);
248
249 val = (speed == SPEED_1000) ? 250000000 : 500000000;
250 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
251 if (ret)
252 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
253
254 val = (speed == SPEED_1000) ?
255 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
256 mtk_w32(eth, val, TRGMII_RCK_CTRL);
257
258 val = (speed == SPEED_1000) ?
259 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
260 mtk_w32(eth, val, TRGMII_TCK_CTRL);
261}
262
developer089e8852022-09-28 14:43:46 +0800263static void mtk_setup_bridge_switch(struct mtk_eth *eth)
264{
265 int val;
266
267 /* Force Port1 XGMAC Link Up */
268 val = mtk_r32(eth, MTK_XGMAC_STS(MTK_GMAC1_ID));
269 mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK,
270 MTK_XGMAC_STS(MTK_GMAC1_ID));
271
272 /* Adjust GSW bridge IPG to 11*/
273 val = mtk_r32(eth, MTK_GSW_CFG);
274 val &= ~(GSWTX_IPG_MASK | GSWRX_IPG_MASK);
275 val |= (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
276 (GSW_IPG_11 << GSWRX_IPG_SHIFT);
277 mtk_w32(eth, val, MTK_GSW_CFG);
278
279 /* Disable GDM1 RX CRC stripping */
280 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(0));
281 val &= ~MTK_GDMA_STRP_CRC;
282 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(0));
283}
284
developerfd40db22021-04-29 10:08:25 +0800285static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
286 const struct phylink_link_state *state)
287{
288 struct mtk_mac *mac = container_of(config, struct mtk_mac,
289 phylink_config);
290 struct mtk_eth *eth = mac->hw;
developer089e8852022-09-28 14:43:46 +0800291 u32 sid, i;
developerfb556ca2021-10-13 10:52:09 +0800292 int val, ge_mode, err=0;
developerfd40db22021-04-29 10:08:25 +0800293
294 /* MT76x8 has no hardware settings between for the MAC */
295 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
296 mac->interface != state->interface) {
297 /* Setup soc pin functions */
298 switch (state->interface) {
299 case PHY_INTERFACE_MODE_TRGMII:
300 if (mac->id)
301 goto err_phy;
302 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
303 MTK_GMAC1_TRGMII))
304 goto err_phy;
305 /* fall through */
306 case PHY_INTERFACE_MODE_RGMII_TXID:
307 case PHY_INTERFACE_MODE_RGMII_RXID:
308 case PHY_INTERFACE_MODE_RGMII_ID:
309 case PHY_INTERFACE_MODE_RGMII:
310 case PHY_INTERFACE_MODE_MII:
311 case PHY_INTERFACE_MODE_REVMII:
312 case PHY_INTERFACE_MODE_RMII:
313 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
314 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
315 if (err)
316 goto init_err;
317 }
318 break;
319 case PHY_INTERFACE_MODE_1000BASEX:
320 case PHY_INTERFACE_MODE_2500BASEX:
321 case PHY_INTERFACE_MODE_SGMII:
322 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
323 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
324 if (err)
325 goto init_err;
326 }
327 break;
328 case PHY_INTERFACE_MODE_GMII:
329 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
330 err = mtk_gmac_gephy_path_setup(eth, mac->id);
331 if (err)
332 goto init_err;
333 }
334 break;
developer089e8852022-09-28 14:43:46 +0800335 case PHY_INTERFACE_MODE_USXGMII:
336 case PHY_INTERFACE_MODE_10GKR:
337 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
338 err = mtk_gmac_usxgmii_path_setup(eth, mac->id);
339 if (err)
340 goto init_err;
341 }
342 break;
developerfd40db22021-04-29 10:08:25 +0800343 default:
344 goto err_phy;
345 }
346
347 /* Setup clock for 1st gmac */
348 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
349 !phy_interface_mode_is_8023z(state->interface) &&
350 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
351 if (MTK_HAS_CAPS(mac->hw->soc->caps,
352 MTK_TRGMII_MT7621_CLK)) {
353 if (mt7621_gmac0_rgmii_adjust(mac->hw,
354 state->interface))
355 goto err_phy;
356 } else {
357 mtk_gmac0_rgmii_adjust(mac->hw,
358 state->interface,
359 state->speed);
360
361 /* mt7623_pad_clk_setup */
362 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
363 mtk_w32(mac->hw,
364 TD_DM_DRVP(8) | TD_DM_DRVN(8),
365 TRGMII_TD_ODT(i));
366
367 /* Assert/release MT7623 RXC reset */
368 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
369 TRGMII_RCK_CTRL);
370 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
371 }
372 }
373
374 ge_mode = 0;
375 switch (state->interface) {
376 case PHY_INTERFACE_MODE_MII:
377 case PHY_INTERFACE_MODE_GMII:
378 ge_mode = 1;
379 break;
380 case PHY_INTERFACE_MODE_REVMII:
381 ge_mode = 2;
382 break;
383 case PHY_INTERFACE_MODE_RMII:
384 if (mac->id)
385 goto err_phy;
386 ge_mode = 3;
387 break;
388 default:
389 break;
390 }
391
392 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800393 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800394 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
395 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
396 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
397 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800398 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800399
400 mac->interface = state->interface;
401 }
402
403 /* SGMII */
404 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
405 phy_interface_mode_is_8023z(state->interface)) {
406 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
407 * being setup done.
408 */
developerd82e8372022-02-09 15:00:09 +0800409 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800410 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
411
412 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
413 SYSCFG0_SGMII_MASK,
414 ~(u32)SYSCFG0_SGMII_MASK);
415
416 /* Decide how GMAC and SGMIISYS be mapped */
417 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
418 0 : mac->id;
419
420 /* Setup SGMIISYS with the determined property */
421 if (state->interface != PHY_INTERFACE_MODE_SGMII)
developer089e8852022-09-28 14:43:46 +0800422 err = mtk_sgmii_setup_mode_force(eth->xgmii, sid,
developerfd40db22021-04-29 10:08:25 +0800423 state);
developer2fbee452022-08-12 13:58:20 +0800424 else
developer089e8852022-09-28 14:43:46 +0800425 err = mtk_sgmii_setup_mode_an(eth->xgmii, sid);
developerfd40db22021-04-29 10:08:25 +0800426
developerd82e8372022-02-09 15:00:09 +0800427 if (err) {
428 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800429 goto init_err;
developerd82e8372022-02-09 15:00:09 +0800430 }
developerfd40db22021-04-29 10:08:25 +0800431
432 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
433 SYSCFG0_SGMII_MASK, val);
developerd82e8372022-02-09 15:00:09 +0800434 spin_unlock(&eth->syscfg0_lock);
developer089e8852022-09-28 14:43:46 +0800435 } else if (state->interface == PHY_INTERFACE_MODE_USXGMII ||
436 state->interface == PHY_INTERFACE_MODE_10GKR) {
437 sid = mac->id;
438
439 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
440 sid != MTK_GMAC1_ID) {
441 if (phylink_autoneg_inband(mode))
442 err = mtk_usxgmii_setup_mode_force(eth->xgmii, sid,
443 SPEED_10000);
444 else
445 err = mtk_usxgmii_setup_mode_an(eth->xgmii, sid,
446 SPEED_10000);
447
448 if (err)
449 goto init_err;
450 }
developerfd40db22021-04-29 10:08:25 +0800451 } else if (phylink_autoneg_inband(mode)) {
452 dev_err(eth->dev,
453 "In-band mode not supported in non SGMII mode!\n");
454 return;
455 }
456
457 /* Setup gmac */
developer089e8852022-09-28 14:43:46 +0800458 if (state->interface == PHY_INTERFACE_MODE_USXGMII ||
459 state->interface == PHY_INTERFACE_MODE_10GKR) {
460 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
461 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
developerfd40db22021-04-29 10:08:25 +0800462
developer089e8852022-09-28 14:43:46 +0800463 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
464 switch (mac->id) {
465 case MTK_GMAC1_ID:
466 mtk_setup_bridge_switch(eth);
467 break;
468 case MTK_GMAC3_ID:
469 val = mtk_r32(eth, MTK_XGMAC_STS(mac->id));
470 mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK,
471 MTK_XGMAC_STS(mac->id));
472 break;
473 }
474 }
developerfd40db22021-04-29 10:08:25 +0800475 }
476
developerfd40db22021-04-29 10:08:25 +0800477 return;
478
479err_phy:
480 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
481 mac->id, phy_modes(state->interface));
482 return;
483
484init_err:
485 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
486 mac->id, phy_modes(state->interface), err);
487}
488
developer089e8852022-09-28 14:43:46 +0800489static int mtk_mac_pcs_get_state(struct phylink_config *config,
490 struct phylink_link_state *state)
developerfd40db22021-04-29 10:08:25 +0800491{
492 struct mtk_mac *mac = container_of(config, struct mtk_mac,
493 phylink_config);
developerfd40db22021-04-29 10:08:25 +0800494
developer089e8852022-09-28 14:43:46 +0800495 if (mac->type == MTK_XGDM_TYPE) {
496 u32 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
developerfd40db22021-04-29 10:08:25 +0800497
developer089e8852022-09-28 14:43:46 +0800498 if (mac->id == MTK_GMAC2_ID)
499 sts = sts >> 16;
developerfd40db22021-04-29 10:08:25 +0800500
developer089e8852022-09-28 14:43:46 +0800501 state->duplex = 1;
502
503 switch (FIELD_GET(MTK_USXGMII_PCS_MODE, sts)) {
504 case 0:
505 state->speed = SPEED_10000;
506 break;
507 case 1:
508 state->speed = SPEED_5000;
509 break;
510 case 2:
511 state->speed = SPEED_2500;
512 break;
513 case 3:
514 state->speed = SPEED_1000;
515 break;
516 }
517
518 state->link = FIELD_GET(MTK_USXGMII_PCS_LINK, sts);
519 } else if (mac->type == MTK_GDM_TYPE) {
520 struct mtk_eth *eth = mac->hw;
521 struct mtk_xgmii *ss = eth->xgmii;
522 u32 id = mtk_mac2xgmii_id(eth, mac->id);
523 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
524 u32 val;
525
526 regmap_read(ss->regmap_sgmii[id], SGMSYS_PCS_CONTROL_1, &val);
527
528 state->link = FIELD_GET(SGMII_LINK_STATYS, val);
529
530 if (FIELD_GET(SGMII_AN_ENABLE, val)) {
531 regmap_read(ss->regmap_sgmii[id], SGMII_PCS_SPEED_ABILITY, &val);
532
533 val = val >> 16;
534
535 state->duplex = FIELD_GET(SGMII_PCS_SPEED_DUPLEX, val);
536
537 switch (FIELD_GET(SGMII_PCS_SPEED_MASK, val)) {
538 case 0:
539 state->speed = SPEED_10;
540 break;
541 case 1:
542 state->speed = SPEED_100;
543 break;
544 case 2:
545 state->speed = SPEED_1000;
546 break;
547 }
548 } else {
549 regmap_read(ss->regmap_sgmii[id], SGMSYS_SGMII_MODE, &val);
550
551 state->duplex = !FIELD_GET(SGMII_DUPLEX_FULL, val);
552
553 switch (FIELD_GET(SGMII_SPEED_MASK, val)) {
554 case 0:
555 state->speed = SPEED_10;
556 break;
557 case 1:
558 state->speed = SPEED_100;
559 break;
560 case 2:
561 regmap_read(ss->regmap_sgmii[id], ss->ana_rgc3, &val);
562 state->speed = (FIELD_GET(RG_PHY_SPEED_3_125G, val)) ? SPEED_2500 : SPEED_1000;
563 break;
564 }
565 }
566
567 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
568 if (pmsr & MAC_MSR_RX_FC)
569 state->pause |= MLO_PAUSE_RX;
570 if (pmsr & MAC_MSR_TX_FC)
571 state->pause |= MLO_PAUSE_TX;
572 }
developerfd40db22021-04-29 10:08:25 +0800573
574 return 1;
575}
576
577static void mtk_mac_an_restart(struct phylink_config *config)
578{
579 struct mtk_mac *mac = container_of(config, struct mtk_mac,
580 phylink_config);
581
developer089e8852022-09-28 14:43:46 +0800582 if (mac->type != MTK_XGDM_TYPE)
583 mtk_sgmii_restart_an(mac->hw, mac->id);
developerfd40db22021-04-29 10:08:25 +0800584}
585
586static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
587 phy_interface_t interface)
588{
589 struct mtk_mac *mac = container_of(config, struct mtk_mac,
590 phylink_config);
developer089e8852022-09-28 14:43:46 +0800591 u32 mcr;
592
593 if (mac->type == MTK_GDM_TYPE) {
594 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
595 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
596 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
597 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
598 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
developerfd40db22021-04-29 10:08:25 +0800599
developer089e8852022-09-28 14:43:46 +0800600 mcr &= 0xfffffff0;
601 mcr |= XMAC_MCR_TRX_DISABLE;
602 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
603 }
developerfd40db22021-04-29 10:08:25 +0800604}
605
606static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
607 phy_interface_t interface,
608 struct phy_device *phy)
609{
610 struct mtk_mac *mac = container_of(config, struct mtk_mac,
611 phylink_config);
developer089e8852022-09-28 14:43:46 +0800612 u32 mcr, mcr_cur;
613
614 if (mac->type == MTK_GDM_TYPE) {
615 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
616 mcr = mcr_cur;
617 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
618 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
619 MAC_MCR_FORCE_RX_FC);
620 mcr |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
621 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
622
623 /* Configure speed */
624 switch (speed) {
625 case SPEED_2500:
626 case SPEED_1000:
627 mcr |= MAC_MCR_SPEED_1000;
628 break;
629 case SPEED_100:
630 mcr |= MAC_MCR_SPEED_100;
631 break;
632 }
633
634 /* Configure duplex */
635 if (duplex == DUPLEX_FULL)
636 mcr |= MAC_MCR_FORCE_DPX;
637
638 /* Configure pause modes -
639 * phylink will avoid these for half duplex
640 */
641 if (tx_pause)
642 mcr |= MAC_MCR_FORCE_TX_FC;
643 if (rx_pause)
644 mcr |= MAC_MCR_FORCE_RX_FC;
645
646 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
647
648 /* Only update control register when needed! */
649 if (mcr != mcr_cur)
650 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
651 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
652 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
653
654 mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC);
655 /* Configure pause modes -
656 * phylink will avoid these for half duplex
657 */
658 if (tx_pause)
659 mcr |= XMAC_MCR_FORCE_TX_FC;
660 if (rx_pause)
661 mcr |= XMAC_MCR_FORCE_RX_FC;
developerfd40db22021-04-29 10:08:25 +0800662
developer089e8852022-09-28 14:43:46 +0800663 mcr &= ~(XMAC_MCR_TRX_DISABLE);
664 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
665 }
developerfd40db22021-04-29 10:08:25 +0800666}
667
668static void mtk_validate(struct phylink_config *config,
669 unsigned long *supported,
670 struct phylink_link_state *state)
671{
672 struct mtk_mac *mac = container_of(config, struct mtk_mac,
673 phylink_config);
674 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
675
676 if (state->interface != PHY_INTERFACE_MODE_NA &&
677 state->interface != PHY_INTERFACE_MODE_MII &&
678 state->interface != PHY_INTERFACE_MODE_GMII &&
679 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
680 phy_interface_mode_is_rgmii(state->interface)) &&
681 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
682 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
683 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
684 (state->interface == PHY_INTERFACE_MODE_SGMII ||
developer089e8852022-09-28 14:43:46 +0800685 phy_interface_mode_is_8023z(state->interface))) &&
686 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
687 (state->interface == PHY_INTERFACE_MODE_USXGMII)) &&
688 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
689 (state->interface == PHY_INTERFACE_MODE_10GKR))) {
developerfd40db22021-04-29 10:08:25 +0800690 linkmode_zero(supported);
691 return;
692 }
693
694 phylink_set_port_modes(mask);
695 phylink_set(mask, Autoneg);
696
697 switch (state->interface) {
developer089e8852022-09-28 14:43:46 +0800698 case PHY_INTERFACE_MODE_USXGMII:
699 case PHY_INTERFACE_MODE_10GKR:
700 phylink_set(mask, 10000baseKR_Full);
701 phylink_set(mask, 10000baseT_Full);
702 phylink_set(mask, 10000baseCR_Full);
703 phylink_set(mask, 10000baseSR_Full);
704 phylink_set(mask, 10000baseLR_Full);
705 phylink_set(mask, 10000baseLRM_Full);
706 phylink_set(mask, 10000baseER_Full);
707 phylink_set(mask, 100baseT_Half);
708 phylink_set(mask, 100baseT_Full);
709 phylink_set(mask, 1000baseT_Half);
710 phylink_set(mask, 1000baseT_Full);
711 phylink_set(mask, 1000baseX_Full);
712 break;
developerfd40db22021-04-29 10:08:25 +0800713 case PHY_INTERFACE_MODE_TRGMII:
714 phylink_set(mask, 1000baseT_Full);
715 break;
716 case PHY_INTERFACE_MODE_1000BASEX:
developerfd40db22021-04-29 10:08:25 +0800717 phylink_set(mask, 1000baseX_Full);
developer089e8852022-09-28 14:43:46 +0800718 /* fall through; */
719 case PHY_INTERFACE_MODE_2500BASEX:
developerfd40db22021-04-29 10:08:25 +0800720 phylink_set(mask, 2500baseX_Full);
developer2fbee452022-08-12 13:58:20 +0800721 phylink_set(mask, 2500baseT_Full);
722 /* fall through; */
developerfd40db22021-04-29 10:08:25 +0800723 case PHY_INTERFACE_MODE_GMII:
724 case PHY_INTERFACE_MODE_RGMII:
725 case PHY_INTERFACE_MODE_RGMII_ID:
726 case PHY_INTERFACE_MODE_RGMII_RXID:
727 case PHY_INTERFACE_MODE_RGMII_TXID:
728 phylink_set(mask, 1000baseT_Half);
729 /* fall through */
730 case PHY_INTERFACE_MODE_SGMII:
731 phylink_set(mask, 1000baseT_Full);
732 phylink_set(mask, 1000baseX_Full);
733 /* fall through */
734 case PHY_INTERFACE_MODE_MII:
735 case PHY_INTERFACE_MODE_RMII:
736 case PHY_INTERFACE_MODE_REVMII:
737 case PHY_INTERFACE_MODE_NA:
738 default:
739 phylink_set(mask, 10baseT_Half);
740 phylink_set(mask, 10baseT_Full);
741 phylink_set(mask, 100baseT_Half);
742 phylink_set(mask, 100baseT_Full);
743 break;
744 }
745
746 if (state->interface == PHY_INTERFACE_MODE_NA) {
developer089e8852022-09-28 14:43:46 +0800747
748 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII)) {
749 phylink_set(mask, 10000baseKR_Full);
750 phylink_set(mask, 10000baseSR_Full);
751 phylink_set(mask, 10000baseLR_Full);
752 phylink_set(mask, 10000baseLRM_Full);
753 phylink_set(mask, 10000baseER_Full);
754 phylink_set(mask, 1000baseKX_Full);
755 phylink_set(mask, 1000baseT_Full);
756 phylink_set(mask, 1000baseX_Full);
757 phylink_set(mask, 2500baseX_Full);
758 }
developerfd40db22021-04-29 10:08:25 +0800759 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
760 phylink_set(mask, 1000baseT_Full);
761 phylink_set(mask, 1000baseX_Full);
762 phylink_set(mask, 2500baseX_Full);
763 }
764 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
765 phylink_set(mask, 1000baseT_Full);
766 phylink_set(mask, 1000baseT_Half);
767 phylink_set(mask, 1000baseX_Full);
768 }
769 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
770 phylink_set(mask, 1000baseT_Full);
771 phylink_set(mask, 1000baseT_Half);
772 }
773 }
774
775 phylink_set(mask, Pause);
776 phylink_set(mask, Asym_Pause);
777
778 linkmode_and(supported, supported, mask);
779 linkmode_and(state->advertising, state->advertising, mask);
780
781 /* We can only operate at 2500BaseX or 1000BaseX. If requested
782 * to advertise both, only report advertising at 2500BaseX.
783 */
784 phylink_helper_basex_speed(state);
785}
786
787static const struct phylink_mac_ops mtk_phylink_ops = {
788 .validate = mtk_validate,
developer089e8852022-09-28 14:43:46 +0800789 .mac_link_state = mtk_mac_pcs_get_state,
developerfd40db22021-04-29 10:08:25 +0800790 .mac_an_restart = mtk_mac_an_restart,
791 .mac_config = mtk_mac_config,
792 .mac_link_down = mtk_mac_link_down,
793 .mac_link_up = mtk_mac_link_up,
794};
795
796static int mtk_mdio_init(struct mtk_eth *eth)
797{
798 struct device_node *mii_np;
799 int ret;
800
801 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
802 if (!mii_np) {
803 dev_err(eth->dev, "no %s child node found", "mdio-bus");
804 return -ENODEV;
805 }
806
807 if (!of_device_is_available(mii_np)) {
808 ret = -ENODEV;
809 goto err_put_node;
810 }
811
812 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
813 if (!eth->mii_bus) {
814 ret = -ENOMEM;
815 goto err_put_node;
816 }
817
818 eth->mii_bus->name = "mdio";
819 eth->mii_bus->read = mtk_mdio_read;
820 eth->mii_bus->write = mtk_mdio_write;
developerabeadd52022-08-15 11:26:44 +0800821 eth->mii_bus->reset = mtk_mdio_reset;
developerfd40db22021-04-29 10:08:25 +0800822 eth->mii_bus->priv = eth;
823 eth->mii_bus->parent = eth->dev;
824
developer6fd46562021-10-14 15:04:34 +0800825 if(snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
developerfb556ca2021-10-13 10:52:09 +0800826 ret = -ENOMEM;
827 goto err_put_node;
828 }
developerfd40db22021-04-29 10:08:25 +0800829 ret = of_mdiobus_register(eth->mii_bus, mii_np);
830
831err_put_node:
832 of_node_put(mii_np);
833 return ret;
834}
835
836static void mtk_mdio_cleanup(struct mtk_eth *eth)
837{
838 if (!eth->mii_bus)
839 return;
840
841 mdiobus_unregister(eth->mii_bus);
842}
843
844static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
845{
846 unsigned long flags;
847 u32 val;
848
849 spin_lock_irqsave(&eth->tx_irq_lock, flags);
850 val = mtk_r32(eth, eth->tx_int_mask_reg);
851 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
852 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
853}
854
855static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
856{
857 unsigned long flags;
858 u32 val;
859
860 spin_lock_irqsave(&eth->tx_irq_lock, flags);
861 val = mtk_r32(eth, eth->tx_int_mask_reg);
862 mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
863 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
864}
865
866static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
867{
868 unsigned long flags;
869 u32 val;
870
871 spin_lock_irqsave(&eth->rx_irq_lock, flags);
872 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
873 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
874 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
875}
876
877static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
878{
879 unsigned long flags;
880 u32 val;
881
882 spin_lock_irqsave(&eth->rx_irq_lock, flags);
883 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
884 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
885 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
886}
887
888static int mtk_set_mac_address(struct net_device *dev, void *p)
889{
890 int ret = eth_mac_addr(dev, p);
891 struct mtk_mac *mac = netdev_priv(dev);
892 struct mtk_eth *eth = mac->hw;
893 const char *macaddr = dev->dev_addr;
894
895 if (ret)
896 return ret;
897
898 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
899 return -EBUSY;
900
901 spin_lock_bh(&mac->hw->page_lock);
902 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
903 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
904 MT7628_SDM_MAC_ADRH);
905 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
906 (macaddr[4] << 8) | macaddr[5],
907 MT7628_SDM_MAC_ADRL);
908 } else {
909 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
910 MTK_GDMA_MAC_ADRH(mac->id));
911 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
912 (macaddr[4] << 8) | macaddr[5],
913 MTK_GDMA_MAC_ADRL(mac->id));
914 }
915 spin_unlock_bh(&mac->hw->page_lock);
916
917 return 0;
918}
919
920void mtk_stats_update_mac(struct mtk_mac *mac)
921{
developer089e8852022-09-28 14:43:46 +0800922 struct mtk_eth *eth = mac->hw;
developerfd40db22021-04-29 10:08:25 +0800923 struct mtk_hw_stats *hw_stats = mac->hw_stats;
924 unsigned int base = MTK_GDM1_TX_GBCNT;
925 u64 stats;
926
927 base += hw_stats->reg_offset;
928
929 u64_stats_update_begin(&hw_stats->syncp);
930
931 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
932 stats = mtk_r32(mac->hw, base + 0x04);
933 if (stats)
934 hw_stats->rx_bytes += (stats << 32);
935 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
936 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
937 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
938 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
939 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
940 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
941 hw_stats->rx_flow_control_packets +=
942 mtk_r32(mac->hw, base + 0x24);
developer089e8852022-09-28 14:43:46 +0800943
944 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
945 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x50);
946 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x54);
947 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x40);
948 stats = mtk_r32(mac->hw, base + 0x44);
949 if (stats)
950 hw_stats->tx_bytes += (stats << 32);
951 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x48);
952 u64_stats_update_end(&hw_stats->syncp);
953 } else {
954 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
955 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
956 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
957 stats = mtk_r32(mac->hw, base + 0x34);
958 if (stats)
959 hw_stats->tx_bytes += (stats << 32);
960 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
961 u64_stats_update_end(&hw_stats->syncp);
962 }
developerfd40db22021-04-29 10:08:25 +0800963}
964
965static void mtk_stats_update(struct mtk_eth *eth)
966{
967 int i;
968
969 for (i = 0; i < MTK_MAC_COUNT; i++) {
970 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
971 continue;
972 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
973 mtk_stats_update_mac(eth->mac[i]);
974 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
975 }
976 }
977}
978
979static void mtk_get_stats64(struct net_device *dev,
980 struct rtnl_link_stats64 *storage)
981{
982 struct mtk_mac *mac = netdev_priv(dev);
983 struct mtk_hw_stats *hw_stats = mac->hw_stats;
984 unsigned int start;
985
986 if (netif_running(dev) && netif_device_present(dev)) {
987 if (spin_trylock_bh(&hw_stats->stats_lock)) {
988 mtk_stats_update_mac(mac);
989 spin_unlock_bh(&hw_stats->stats_lock);
990 }
991 }
992
993 do {
994 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
995 storage->rx_packets = hw_stats->rx_packets;
996 storage->tx_packets = hw_stats->tx_packets;
997 storage->rx_bytes = hw_stats->rx_bytes;
998 storage->tx_bytes = hw_stats->tx_bytes;
999 storage->collisions = hw_stats->tx_collisions;
1000 storage->rx_length_errors = hw_stats->rx_short_errors +
1001 hw_stats->rx_long_errors;
1002 storage->rx_over_errors = hw_stats->rx_overflow;
1003 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1004 storage->rx_errors = hw_stats->rx_checksum_errors;
1005 storage->tx_aborted_errors = hw_stats->tx_skip;
1006 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
1007
1008 storage->tx_errors = dev->stats.tx_errors;
1009 storage->rx_dropped = dev->stats.rx_dropped;
1010 storage->tx_dropped = dev->stats.tx_dropped;
1011}
1012
1013static inline int mtk_max_frag_size(int mtu)
1014{
1015 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1016 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
1017 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
1018
1019 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1020 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1021}
1022
1023static inline int mtk_max_buf_size(int frag_size)
1024{
1025 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1026 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1027
1028 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
1029
1030 return buf_size;
1031}
1032
developere9356982022-07-04 09:03:20 +08001033static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1034 struct mtk_rx_dma_v2 *dma_rxd)
developerfd40db22021-04-29 10:08:25 +08001035{
developerfd40db22021-04-29 10:08:25 +08001036 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +08001037 if (!(rxd->rxd2 & RX_DMA_DONE))
1038 return false;
1039
1040 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +08001041 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1042 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developere9356982022-07-04 09:03:20 +08001043
developer089e8852022-09-28 14:43:46 +08001044 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1045 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001046 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1047 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1048 }
1049
developerc4671b22021-05-28 13:16:42 +08001050 return true;
developerfd40db22021-04-29 10:08:25 +08001051}
1052
1053/* the qdma core needs scratch memory to be setup */
1054static int mtk_init_fq_dma(struct mtk_eth *eth)
1055{
developere9356982022-07-04 09:03:20 +08001056 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001057 dma_addr_t phy_ring_tail;
1058 int cnt = MTK_DMA_SIZE;
1059 dma_addr_t dma_addr;
1060 int i;
1061
1062 if (!eth->soc->has_sram) {
1063 eth->scratch_ring = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001064 cnt * soc->txrx.txd_size,
developerfd40db22021-04-29 10:08:25 +08001065 &eth->phy_scratch_ring,
developere9356982022-07-04 09:03:20 +08001066 GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001067 } else {
developer089e8852022-09-28 14:43:46 +08001068 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1069 eth->scratch_ring = eth->sram_base;
1070 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1071 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
developerfd40db22021-04-29 10:08:25 +08001072 }
1073
1074 if (unlikely(!eth->scratch_ring))
1075 return -ENOMEM;
1076
developere9356982022-07-04 09:03:20 +08001077 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001078 if (unlikely(!eth->scratch_head))
1079 return -ENOMEM;
1080
1081 dma_addr = dma_map_single(eth->dev,
1082 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1083 DMA_FROM_DEVICE);
1084 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1085 return -ENOMEM;
1086
developere9356982022-07-04 09:03:20 +08001087 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
developerfd40db22021-04-29 10:08:25 +08001088
1089 for (i = 0; i < cnt; i++) {
developere9356982022-07-04 09:03:20 +08001090 struct mtk_tx_dma_v2 *txd;
1091
1092 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1093 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerfd40db22021-04-29 10:08:25 +08001094 if (i < cnt - 1)
developere9356982022-07-04 09:03:20 +08001095 txd->txd2 = eth->phy_scratch_ring +
1096 (i + 1) * soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001097
developere9356982022-07-04 09:03:20 +08001098 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1099 txd->txd4 = 0;
1100
developer089e8852022-09-28 14:43:46 +08001101 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1102 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001103 txd->txd5 = 0;
1104 txd->txd6 = 0;
1105 txd->txd7 = 0;
1106 txd->txd8 = 0;
developerfd40db22021-04-29 10:08:25 +08001107 }
developerfd40db22021-04-29 10:08:25 +08001108 }
1109
1110 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
1111 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
1112 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
1113 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
1114
1115 return 0;
1116}
1117
1118static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1119{
developere9356982022-07-04 09:03:20 +08001120 return ring->dma + (desc - ring->phys);
developerfd40db22021-04-29 10:08:25 +08001121}
1122
1123static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001124 void *txd, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001125{
developere9356982022-07-04 09:03:20 +08001126 int idx = (txd - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001127
1128 return &ring->buf[idx];
1129}
1130
1131static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001132 void *dma)
developerfd40db22021-04-29 10:08:25 +08001133{
1134 return ring->dma_pdma - ring->dma + dma;
1135}
1136
developere9356982022-07-04 09:03:20 +08001137static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001138{
developere9356982022-07-04 09:03:20 +08001139 return (dma - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001140}
1141
developerc4671b22021-05-28 13:16:42 +08001142static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1143 bool napi)
developerfd40db22021-04-29 10:08:25 +08001144{
1145 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1146 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1147 dma_unmap_single(eth->dev,
1148 dma_unmap_addr(tx_buf, dma_addr0),
1149 dma_unmap_len(tx_buf, dma_len0),
1150 DMA_TO_DEVICE);
1151 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1152 dma_unmap_page(eth->dev,
1153 dma_unmap_addr(tx_buf, dma_addr0),
1154 dma_unmap_len(tx_buf, dma_len0),
1155 DMA_TO_DEVICE);
1156 }
1157 } else {
1158 if (dma_unmap_len(tx_buf, dma_len0)) {
1159 dma_unmap_page(eth->dev,
1160 dma_unmap_addr(tx_buf, dma_addr0),
1161 dma_unmap_len(tx_buf, dma_len0),
1162 DMA_TO_DEVICE);
1163 }
1164
1165 if (dma_unmap_len(tx_buf, dma_len1)) {
1166 dma_unmap_page(eth->dev,
1167 dma_unmap_addr(tx_buf, dma_addr1),
1168 dma_unmap_len(tx_buf, dma_len1),
1169 DMA_TO_DEVICE);
1170 }
1171 }
1172
1173 tx_buf->flags = 0;
1174 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +08001175 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
1176 if (napi)
1177 napi_consume_skb(tx_buf->skb, napi);
1178 else
1179 dev_kfree_skb_any(tx_buf->skb);
1180 }
developerfd40db22021-04-29 10:08:25 +08001181 tx_buf->skb = NULL;
1182}
1183
1184static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1185 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1186 size_t size, int idx)
1187{
1188 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1189 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1190 dma_unmap_len_set(tx_buf, dma_len0, size);
1191 } else {
1192 if (idx & 1) {
1193 txd->txd3 = mapped_addr;
1194 txd->txd2 |= TX_DMA_PLEN1(size);
1195 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1196 dma_unmap_len_set(tx_buf, dma_len1, size);
1197 } else {
1198 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1199 txd->txd1 = mapped_addr;
1200 txd->txd2 = TX_DMA_PLEN0(size);
1201 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1202 dma_unmap_len_set(tx_buf, dma_len0, size);
1203 }
1204 }
1205}
1206
developere9356982022-07-04 09:03:20 +08001207static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
1208 struct mtk_tx_dma_desc_info *info)
1209{
1210 struct mtk_mac *mac = netdev_priv(dev);
1211 struct mtk_eth *eth = mac->hw;
1212 struct mtk_tx_dma *desc = txd;
1213 u32 data;
1214
1215 WRITE_ONCE(desc->txd1, info->addr);
1216
1217 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1218 if (info->last)
1219 data |= TX_DMA_LS0;
1220 WRITE_ONCE(desc->txd3, data);
1221
1222 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1223 data |= QID_HIGH_BITS(info->qid);
1224 if (info->first) {
1225 if (info->gso)
1226 data |= TX_DMA_TSO;
1227 /* tx checksum offload */
1228 if (info->csum)
1229 data |= TX_DMA_CHKSUM;
1230 /* vlan header offload */
1231 if (info->vlan)
1232 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1233 }
1234
1235#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1236 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1237 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1238 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1239 }
1240
1241 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1242 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1243#endif
1244 WRITE_ONCE(desc->txd4, data);
1245}
1246
1247static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1248 struct mtk_tx_dma_desc_info *info)
1249{
1250 struct mtk_mac *mac = netdev_priv(dev);
1251 struct mtk_eth *eth = mac->hw;
1252 struct mtk_tx_dma_v2 *desc = txd;
developer089e8852022-09-28 14:43:46 +08001253 u64 addr64 = 0;
developere9356982022-07-04 09:03:20 +08001254 u32 data = 0;
developere9356982022-07-04 09:03:20 +08001255
1256 if(!info->qid && mac->id)
developerb9463012022-09-14 10:28:45 +08001257 info->qid = MTK_QDMA_GMAC2_QID;
developere9356982022-07-04 09:03:20 +08001258
developer089e8852022-09-28 14:43:46 +08001259 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1260 TX_DMA_SDP1(info->addr) : 0;
1261
developere9356982022-07-04 09:03:20 +08001262 WRITE_ONCE(desc->txd1, info->addr);
1263
1264 data = TX_DMA_PLEN0(info->size);
1265 if (info->last)
1266 data |= TX_DMA_LS0;
developer089e8852022-09-28 14:43:46 +08001267 WRITE_ONCE(desc->txd3, data | addr64);
developere9356982022-07-04 09:03:20 +08001268
developer089e8852022-09-28 14:43:46 +08001269 data = ((mac->id == MTK_GMAC3_ID) ?
1270 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
developerb9463012022-09-14 10:28:45 +08001271 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
developere9356982022-07-04 09:03:20 +08001272#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1273 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1274 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1275 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1276 }
1277
1278 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1279 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1280#endif
1281 WRITE_ONCE(desc->txd4, data);
1282
1283 data = 0;
1284 if (info->first) {
1285 if (info->gso)
1286 data |= TX_DMA_TSO_V2;
1287 /* tx checksum offload */
1288 if (info->csum)
1289 data |= TX_DMA_CHKSUM_V2;
1290 }
1291 WRITE_ONCE(desc->txd5, data);
1292
1293 data = 0;
1294 if (info->first && info->vlan)
1295 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1296 WRITE_ONCE(desc->txd6, data);
1297
1298 WRITE_ONCE(desc->txd7, 0);
1299 WRITE_ONCE(desc->txd8, 0);
1300}
1301
1302static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
1303 struct mtk_tx_dma_desc_info *info)
1304{
1305 struct mtk_mac *mac = netdev_priv(dev);
1306 struct mtk_eth *eth = mac->hw;
1307
developer089e8852022-09-28 14:43:46 +08001308 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1309 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developere9356982022-07-04 09:03:20 +08001310 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
1311 else
1312 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
1313}
1314
developerfd40db22021-04-29 10:08:25 +08001315static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1316 int tx_num, struct mtk_tx_ring *ring, bool gso)
1317{
developere9356982022-07-04 09:03:20 +08001318 struct mtk_tx_dma_desc_info txd_info = {
1319 .size = skb_headlen(skb),
1320 .qid = skb->mark & MTK_QDMA_TX_MASK,
1321 .gso = gso,
1322 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1323 .vlan = skb_vlan_tag_present(skb),
1324 .vlan_tci = skb_vlan_tag_get(skb),
1325 .first = true,
1326 .last = !skb_is_nonlinear(skb),
1327 };
developerfd40db22021-04-29 10:08:25 +08001328 struct mtk_mac *mac = netdev_priv(dev);
1329 struct mtk_eth *eth = mac->hw;
developere9356982022-07-04 09:03:20 +08001330 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001331 struct mtk_tx_dma *itxd, *txd;
1332 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1333 struct mtk_tx_buf *itx_buf, *tx_buf;
developerfd40db22021-04-29 10:08:25 +08001334 int i, n_desc = 1;
developerfd40db22021-04-29 10:08:25 +08001335 int k = 0;
1336
1337 itxd = ring->next_free;
1338 itxd_pdma = qdma_to_pdma(ring, itxd);
1339 if (itxd == ring->last_free)
1340 return -ENOMEM;
1341
developere9356982022-07-04 09:03:20 +08001342 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001343 memset(itx_buf, 0, sizeof(*itx_buf));
1344
developere9356982022-07-04 09:03:20 +08001345 txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
1346 DMA_TO_DEVICE);
1347 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
developerfd40db22021-04-29 10:08:25 +08001348 return -ENOMEM;
1349
developere9356982022-07-04 09:03:20 +08001350 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
1351
developerfd40db22021-04-29 10:08:25 +08001352 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
developer089e8852022-09-28 14:43:46 +08001353 itx_buf->flags |= (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1354 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1355 MTK_TX_FLAGS_FPORT2;
developere9356982022-07-04 09:03:20 +08001356 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerfd40db22021-04-29 10:08:25 +08001357 k++);
1358
developerfd40db22021-04-29 10:08:25 +08001359 /* TX SG offload */
1360 txd = itxd;
1361 txd_pdma = qdma_to_pdma(ring, txd);
1362
developere9356982022-07-04 09:03:20 +08001363 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerfd40db22021-04-29 10:08:25 +08001364 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1365 unsigned int offset = 0;
1366 int frag_size = skb_frag_size(frag);
1367
1368 while (frag_size) {
developerfd40db22021-04-29 10:08:25 +08001369 bool new_desc = true;
1370
developere9356982022-07-04 09:03:20 +08001371 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerfd40db22021-04-29 10:08:25 +08001372 (i & 0x1)) {
1373 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1374 txd_pdma = qdma_to_pdma(ring, txd);
1375 if (txd == ring->last_free)
1376 goto err_dma;
1377
1378 n_desc++;
1379 } else {
1380 new_desc = false;
1381 }
1382
developere9356982022-07-04 09:03:20 +08001383 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1384 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1385 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1386 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1387 !(frag_size - txd_info.size);
1388 txd_info.addr = skb_frag_dma_map(eth->dev, frag,
1389 offset, txd_info.size,
1390 DMA_TO_DEVICE);
1391 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
1392 goto err_dma;
developerfd40db22021-04-29 10:08:25 +08001393
developere9356982022-07-04 09:03:20 +08001394 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerfd40db22021-04-29 10:08:25 +08001395
developere9356982022-07-04 09:03:20 +08001396 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001397 if (new_desc)
1398 memset(tx_buf, 0, sizeof(*tx_buf));
1399 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1400 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
developer089e8852022-09-28 14:43:46 +08001401 tx_buf->flags |=
1402 (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1403 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1404 MTK_TX_FLAGS_FPORT2;
developerfd40db22021-04-29 10:08:25 +08001405
developere9356982022-07-04 09:03:20 +08001406 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1407 txd_info.size, k++);
developerfd40db22021-04-29 10:08:25 +08001408
developere9356982022-07-04 09:03:20 +08001409 frag_size -= txd_info.size;
1410 offset += txd_info.size;
developerfd40db22021-04-29 10:08:25 +08001411 }
1412 }
1413
1414 /* store skb to cleanup */
1415 itx_buf->skb = skb;
1416
developere9356982022-07-04 09:03:20 +08001417 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001418 if (k & 0x1)
1419 txd_pdma->txd2 |= TX_DMA_LS0;
1420 else
1421 txd_pdma->txd2 |= TX_DMA_LS1;
1422 }
1423
1424 netdev_sent_queue(dev, skb->len);
1425 skb_tx_timestamp(skb);
1426
1427 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1428 atomic_sub(n_desc, &ring->free_count);
1429
1430 /* make sure that all changes to the dma ring are flushed before we
1431 * continue
1432 */
1433 wmb();
1434
developere9356982022-07-04 09:03:20 +08001435 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001436 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1437 !netdev_xmit_more())
1438 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1439 } else {
developere9356982022-07-04 09:03:20 +08001440 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
developerfd40db22021-04-29 10:08:25 +08001441 ring->dma_size);
1442 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1443 }
1444
1445 return 0;
1446
1447err_dma:
1448 do {
developere9356982022-07-04 09:03:20 +08001449 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001450
1451 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08001452 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08001453
1454 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
developere9356982022-07-04 09:03:20 +08001455 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
developerfd40db22021-04-29 10:08:25 +08001456 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1457
1458 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1459 itxd_pdma = qdma_to_pdma(ring, itxd);
1460 } while (itxd != txd);
1461
1462 return -ENOMEM;
1463}
1464
1465static inline int mtk_cal_txd_req(struct sk_buff *skb)
1466{
1467 int i, nfrags;
1468 skb_frag_t *frag;
1469
1470 nfrags = 1;
1471 if (skb_is_gso(skb)) {
1472 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1473 frag = &skb_shinfo(skb)->frags[i];
1474 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1475 MTK_TX_DMA_BUF_LEN);
1476 }
1477 } else {
1478 nfrags += skb_shinfo(skb)->nr_frags;
1479 }
1480
1481 return nfrags;
1482}
1483
1484static int mtk_queue_stopped(struct mtk_eth *eth)
1485{
1486 int i;
1487
1488 for (i = 0; i < MTK_MAC_COUNT; i++) {
1489 if (!eth->netdev[i])
1490 continue;
1491 if (netif_queue_stopped(eth->netdev[i]))
1492 return 1;
1493 }
1494
1495 return 0;
1496}
1497
1498static void mtk_wake_queue(struct mtk_eth *eth)
1499{
1500 int i;
1501
1502 for (i = 0; i < MTK_MAC_COUNT; i++) {
1503 if (!eth->netdev[i])
1504 continue;
1505 netif_wake_queue(eth->netdev[i]);
1506 }
1507}
1508
1509static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1510{
1511 struct mtk_mac *mac = netdev_priv(dev);
1512 struct mtk_eth *eth = mac->hw;
1513 struct mtk_tx_ring *ring = &eth->tx_ring;
1514 struct net_device_stats *stats = &dev->stats;
1515 bool gso = false;
1516 int tx_num;
1517
1518 /* normally we can rely on the stack not calling this more than once,
1519 * however we have 2 queues running on the same ring so we need to lock
1520 * the ring access
1521 */
1522 spin_lock(&eth->page_lock);
1523
1524 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1525 goto drop;
1526
1527 tx_num = mtk_cal_txd_req(skb);
1528 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1529 netif_stop_queue(dev);
1530 netif_err(eth, tx_queued, dev,
1531 "Tx Ring full when queue awake!\n");
1532 spin_unlock(&eth->page_lock);
1533 return NETDEV_TX_BUSY;
1534 }
1535
1536 /* TSO: fill MSS info in tcp checksum field */
1537 if (skb_is_gso(skb)) {
1538 if (skb_cow_head(skb, 0)) {
1539 netif_warn(eth, tx_err, dev,
1540 "GSO expand head fail.\n");
1541 goto drop;
1542 }
1543
1544 if (skb_shinfo(skb)->gso_type &
1545 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1546 gso = true;
1547 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1548 }
1549 }
1550
1551 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1552 goto drop;
1553
1554 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1555 netif_stop_queue(dev);
1556
1557 spin_unlock(&eth->page_lock);
1558
1559 return NETDEV_TX_OK;
1560
1561drop:
1562 spin_unlock(&eth->page_lock);
1563 stats->tx_dropped++;
1564 dev_kfree_skb_any(skb);
1565 return NETDEV_TX_OK;
1566}
1567
1568static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1569{
1570 int i;
1571 struct mtk_rx_ring *ring;
1572 int idx;
1573
developerfd40db22021-04-29 10:08:25 +08001574 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
developere9356982022-07-04 09:03:20 +08001575 struct mtk_rx_dma *rxd;
1576
developer77d03a72021-06-06 00:06:00 +08001577 if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i))
1578 continue;
1579
developerfd40db22021-04-29 10:08:25 +08001580 ring = &eth->rx_ring[i];
1581 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001582 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1583 if (rxd->rxd2 & RX_DMA_DONE) {
developerfd40db22021-04-29 10:08:25 +08001584 ring->calc_idx_update = true;
1585 return ring;
1586 }
1587 }
1588
1589 return NULL;
1590}
1591
developer18f46a82021-07-20 21:08:21 +08001592static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08001593{
developerfd40db22021-04-29 10:08:25 +08001594 int i;
1595
developerfb556ca2021-10-13 10:52:09 +08001596 if (!eth->hwlro)
developerfd40db22021-04-29 10:08:25 +08001597 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerfb556ca2021-10-13 10:52:09 +08001598 else {
developerfd40db22021-04-29 10:08:25 +08001599 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1600 ring = &eth->rx_ring[i];
1601 if (ring->calc_idx_update) {
1602 ring->calc_idx_update = false;
1603 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1604 }
1605 }
1606 }
1607}
1608
1609static int mtk_poll_rx(struct napi_struct *napi, int budget,
1610 struct mtk_eth *eth)
1611{
developer18f46a82021-07-20 21:08:21 +08001612 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1613 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001614 int idx;
1615 struct sk_buff *skb;
developer089e8852022-09-28 14:43:46 +08001616 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08001617 u8 *data, *new_data;
developere9356982022-07-04 09:03:20 +08001618 struct mtk_rx_dma_v2 *rxd, trxd;
developerfd40db22021-04-29 10:08:25 +08001619 int done = 0;
1620
developer18f46a82021-07-20 21:08:21 +08001621 if (unlikely(!ring))
1622 goto rx_done;
1623
developerfd40db22021-04-29 10:08:25 +08001624 while (done < budget) {
1625 struct net_device *netdev;
1626 unsigned int pktlen;
1627 dma_addr_t dma_addr;
developere9356982022-07-04 09:03:20 +08001628 int mac = 0;
developerfd40db22021-04-29 10:08:25 +08001629
developer18f46a82021-07-20 21:08:21 +08001630 if (eth->hwlro)
1631 ring = mtk_get_rx_ring(eth);
1632
developerfd40db22021-04-29 10:08:25 +08001633 if (unlikely(!ring))
1634 goto rx_done;
1635
1636 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001637 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
developerfd40db22021-04-29 10:08:25 +08001638 data = ring->data[idx];
1639
developere9356982022-07-04 09:03:20 +08001640 if (!mtk_rx_get_desc(eth, &trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08001641 break;
1642
1643 /* find out which mac the packet come from. values start at 1 */
1644 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1645 mac = 0;
1646 } else {
developer089e8852022-09-28 14:43:46 +08001647 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1648 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1649 switch (RX_DMA_GET_SPORT_V2(trxd.rxd5)) {
1650 case PSE_GDM1_PORT:
1651 case PSE_GDM2_PORT:
1652 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1653 break;
1654 case PSE_GDM3_PORT:
1655 mac = MTK_GMAC3_ID;
1656 break;
1657 }
1658 } else
developerfd40db22021-04-29 10:08:25 +08001659 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
1660 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1661 }
1662
1663 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1664 !eth->netdev[mac]))
1665 goto release_desc;
1666
1667 netdev = eth->netdev[mac];
1668
1669 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1670 goto release_desc;
1671
1672 /* alloc new buffer */
1673 new_data = napi_alloc_frag(ring->frag_size);
1674 if (unlikely(!new_data)) {
1675 netdev->stats.rx_dropped++;
1676 goto release_desc;
1677 }
1678 dma_addr = dma_map_single(eth->dev,
1679 new_data + NET_SKB_PAD +
1680 eth->ip_align,
1681 ring->buf_size,
1682 DMA_FROM_DEVICE);
1683 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1684 skb_free_frag(new_data);
1685 netdev->stats.rx_dropped++;
1686 goto release_desc;
1687 }
1688
developer089e8852022-09-28 14:43:46 +08001689 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1690 ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
1691
1692 dma_unmap_single(eth->dev,
1693 (u64)(trxd.rxd1 | addr64),
developerc4671b22021-05-28 13:16:42 +08001694 ring->buf_size, DMA_FROM_DEVICE);
1695
developerfd40db22021-04-29 10:08:25 +08001696 /* receive data */
1697 skb = build_skb(data, ring->frag_size);
1698 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08001699 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08001700 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08001701 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08001702 }
1703 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1704
developerfd40db22021-04-29 10:08:25 +08001705 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1706 skb->dev = netdev;
1707 skb_put(skb, pktlen);
1708
developer089e8852022-09-28 14:43:46 +08001709 if ((MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) &&
developerfd40db22021-04-29 10:08:25 +08001710 (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
developer089e8852022-09-28 14:43:46 +08001711 (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) &&
developerfd40db22021-04-29 10:08:25 +08001712 (trxd.rxd3 & eth->rx_dma_l4_valid)))
1713 skb->ip_summed = CHECKSUM_UNNECESSARY;
1714 else
1715 skb_checksum_none_assert(skb);
1716 skb->protocol = eth_type_trans(skb, netdev);
1717
1718 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developer089e8852022-09-28 14:43:46 +08001719 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1720 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer255bba22021-07-27 15:16:33 +08001721 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08001722 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08001723 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08001724 RX_DMA_VID_V2(trxd.rxd4));
1725 } else {
1726 if (trxd.rxd2 & RX_DMA_VTAG)
1727 __vlan_hwaccel_put_tag(skb,
1728 htons(RX_DMA_VPID(trxd.rxd3)),
1729 RX_DMA_VID(trxd.rxd3));
1730 }
1731
1732 /* If netdev is attached to dsa switch, the special
1733 * tag inserted in VLAN field by switch hardware can
1734 * be offload by RX HW VLAN offload. Clears the VLAN
1735 * information from @skb to avoid unexpected 8021d
1736 * handler before packet enter dsa framework.
1737 */
1738 if (netdev_uses_dsa(netdev))
1739 __vlan_hwaccel_clear_tag(skb);
1740 }
1741
1742#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developer089e8852022-09-28 14:43:46 +08001743 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1744 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +08001745 *(u32 *)(skb->head) = trxd.rxd5;
1746 else
developerfd40db22021-04-29 10:08:25 +08001747 *(u32 *)(skb->head) = trxd.rxd4;
1748
1749 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001750 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001751 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1752
1753 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
1754 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
1755 __func__, skb_hnat_reason(skb));
1756 skb->pkt_type = PACKET_HOST;
1757 }
1758
1759 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
1760 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
1761 skb_hnat_reason(skb), skb_hnat_alg(skb));
1762#endif
developer77d03a72021-06-06 00:06:00 +08001763 if (mtk_hwlro_stats_ebl &&
1764 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
1765 hw_lro_stats_update(ring->ring_no, &trxd);
1766 hw_lro_flush_stats_update(ring->ring_no, &trxd);
1767 }
developerfd40db22021-04-29 10:08:25 +08001768
1769 skb_record_rx_queue(skb, 0);
1770 napi_gro_receive(napi, skb);
1771
developerc4671b22021-05-28 13:16:42 +08001772skip_rx:
developerfd40db22021-04-29 10:08:25 +08001773 ring->data[idx] = new_data;
1774 rxd->rxd1 = (unsigned int)dma_addr;
1775
1776release_desc:
developer089e8852022-09-28 14:43:46 +08001777 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1778 RX_DMA_SDP1(dma_addr) : 0;
1779
developerfd40db22021-04-29 10:08:25 +08001780 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1781 rxd->rxd2 = RX_DMA_LSO;
1782 else
developer089e8852022-09-28 14:43:46 +08001783 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08001784
1785 ring->calc_idx = idx;
1786
1787 done++;
1788 }
1789
1790rx_done:
1791 if (done) {
1792 /* make sure that all changes to the dma ring are flushed before
1793 * we continue
1794 */
1795 wmb();
developer18f46a82021-07-20 21:08:21 +08001796 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08001797 }
1798
1799 return done;
1800}
1801
developerfb556ca2021-10-13 10:52:09 +08001802static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001803 unsigned int *done, unsigned int *bytes)
1804{
developere9356982022-07-04 09:03:20 +08001805 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001806 struct mtk_tx_ring *ring = &eth->tx_ring;
1807 struct mtk_tx_dma *desc;
1808 struct sk_buff *skb;
1809 struct mtk_tx_buf *tx_buf;
1810 u32 cpu, dma;
1811
developerc4671b22021-05-28 13:16:42 +08001812 cpu = ring->last_free_ptr;
developerfd40db22021-04-29 10:08:25 +08001813 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1814
1815 desc = mtk_qdma_phys_to_virt(ring, cpu);
1816
1817 while ((cpu != dma) && budget) {
1818 u32 next_cpu = desc->txd2;
1819 int mac = 0;
1820
1821 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1822 break;
1823
1824 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1825
developere9356982022-07-04 09:03:20 +08001826 tx_buf = mtk_desc_to_tx_buf(ring, desc, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001827 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
developer089e8852022-09-28 14:43:46 +08001828 mac = MTK_GMAC2_ID;
1829 else if (tx_buf->flags & MTK_TX_FLAGS_FPORT2)
1830 mac = MTK_GMAC3_ID;
developerfd40db22021-04-29 10:08:25 +08001831
1832 skb = tx_buf->skb;
1833 if (!skb)
1834 break;
1835
1836 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1837 bytes[mac] += skb->len;
1838 done[mac]++;
1839 budget--;
1840 }
developerc4671b22021-05-28 13:16:42 +08001841 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001842
1843 ring->last_free = desc;
1844 atomic_inc(&ring->free_count);
1845
1846 cpu = next_cpu;
1847 }
1848
developerc4671b22021-05-28 13:16:42 +08001849 ring->last_free_ptr = cpu;
developerfd40db22021-04-29 10:08:25 +08001850 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001851}
1852
developerfb556ca2021-10-13 10:52:09 +08001853static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001854 unsigned int *done, unsigned int *bytes)
1855{
1856 struct mtk_tx_ring *ring = &eth->tx_ring;
1857 struct mtk_tx_dma *desc;
1858 struct sk_buff *skb;
1859 struct mtk_tx_buf *tx_buf;
1860 u32 cpu, dma;
1861
1862 cpu = ring->cpu_idx;
1863 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1864
1865 while ((cpu != dma) && budget) {
1866 tx_buf = &ring->buf[cpu];
1867 skb = tx_buf->skb;
1868 if (!skb)
1869 break;
1870
1871 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1872 bytes[0] += skb->len;
1873 done[0]++;
1874 budget--;
1875 }
1876
developerc4671b22021-05-28 13:16:42 +08001877 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001878
developere9356982022-07-04 09:03:20 +08001879 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001880 ring->last_free = desc;
1881 atomic_inc(&ring->free_count);
1882
1883 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1884 }
1885
1886 ring->cpu_idx = cpu;
developerfd40db22021-04-29 10:08:25 +08001887}
1888
1889static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1890{
1891 struct mtk_tx_ring *ring = &eth->tx_ring;
1892 unsigned int done[MTK_MAX_DEVS];
1893 unsigned int bytes[MTK_MAX_DEVS];
1894 int total = 0, i;
1895
1896 memset(done, 0, sizeof(done));
1897 memset(bytes, 0, sizeof(bytes));
1898
1899 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developerfb556ca2021-10-13 10:52:09 +08001900 mtk_poll_tx_qdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001901 else
developerfb556ca2021-10-13 10:52:09 +08001902 mtk_poll_tx_pdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001903
1904 for (i = 0; i < MTK_MAC_COUNT; i++) {
1905 if (!eth->netdev[i] || !done[i])
1906 continue;
1907 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1908 total += done[i];
1909 }
1910
1911 if (mtk_queue_stopped(eth) &&
1912 (atomic_read(&ring->free_count) > ring->thresh))
1913 mtk_wake_queue(eth);
1914
1915 return total;
1916}
1917
1918static void mtk_handle_status_irq(struct mtk_eth *eth)
1919{
developer8051e042022-04-08 13:26:36 +08001920 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001921
1922 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1923 mtk_stats_update(eth);
1924 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer8051e042022-04-08 13:26:36 +08001925 MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001926 }
1927}
1928
1929static int mtk_napi_tx(struct napi_struct *napi, int budget)
1930{
1931 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1932 u32 status, mask;
1933 int tx_done = 0;
1934
1935 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1936 mtk_handle_status_irq(eth);
1937 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1938 tx_done = mtk_poll_tx(eth, budget);
1939
1940 if (unlikely(netif_msg_intr(eth))) {
1941 status = mtk_r32(eth, eth->tx_int_status_reg);
1942 mask = mtk_r32(eth, eth->tx_int_mask_reg);
1943 dev_info(eth->dev,
1944 "done tx %d, intr 0x%08x/0x%x\n",
1945 tx_done, status, mask);
1946 }
1947
1948 if (tx_done == budget)
1949 return budget;
1950
1951 status = mtk_r32(eth, eth->tx_int_status_reg);
1952 if (status & MTK_TX_DONE_INT)
1953 return budget;
1954
developerc4671b22021-05-28 13:16:42 +08001955 if (napi_complete(napi))
1956 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developerfd40db22021-04-29 10:08:25 +08001957
1958 return tx_done;
1959}
1960
1961static int mtk_napi_rx(struct napi_struct *napi, int budget)
1962{
developer18f46a82021-07-20 21:08:21 +08001963 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1964 struct mtk_eth *eth = rx_napi->eth;
1965 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001966 u32 status, mask;
1967 int rx_done = 0;
1968 int remain_budget = budget;
1969
1970 mtk_handle_status_irq(eth);
1971
1972poll_again:
developer18f46a82021-07-20 21:08:21 +08001973 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), MTK_PDMA_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001974 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1975
1976 if (unlikely(netif_msg_intr(eth))) {
1977 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1978 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1979 dev_info(eth->dev,
1980 "done rx %d, intr 0x%08x/0x%x\n",
1981 rx_done, status, mask);
1982 }
1983 if (rx_done == remain_budget)
1984 return budget;
1985
1986 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
developer18f46a82021-07-20 21:08:21 +08001987 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08001988 remain_budget -= rx_done;
1989 goto poll_again;
1990 }
developerc4671b22021-05-28 13:16:42 +08001991
1992 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08001993 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08001994
1995 return rx_done + budget - remain_budget;
1996}
1997
1998static int mtk_tx_alloc(struct mtk_eth *eth)
1999{
developere9356982022-07-04 09:03:20 +08002000 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002001 struct mtk_tx_ring *ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002002 int i, sz = soc->txrx.txd_size;
2003 struct mtk_tx_dma_v2 *txd, *pdma_txd;
developerfd40db22021-04-29 10:08:25 +08002004
2005 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
2006 GFP_KERNEL);
2007 if (!ring->buf)
2008 goto no_tx_mem;
2009
2010 if (!eth->soc->has_sram)
2011 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002012 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002013 else {
developere9356982022-07-04 09:03:20 +08002014 ring->dma = eth->scratch_ring + MTK_DMA_SIZE * sz;
developerfd40db22021-04-29 10:08:25 +08002015 ring->phys = eth->phy_scratch_ring + MTK_DMA_SIZE * sz;
2016 }
2017
2018 if (!ring->dma)
2019 goto no_tx_mem;
2020
2021 for (i = 0; i < MTK_DMA_SIZE; i++) {
2022 int next = (i + 1) % MTK_DMA_SIZE;
2023 u32 next_ptr = ring->phys + next * sz;
2024
developere9356982022-07-04 09:03:20 +08002025 txd = ring->dma + i * sz;
2026 txd->txd2 = next_ptr;
2027 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2028 txd->txd4 = 0;
2029
developer089e8852022-09-28 14:43:46 +08002030 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2031 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08002032 txd->txd5 = 0;
2033 txd->txd6 = 0;
2034 txd->txd7 = 0;
2035 txd->txd8 = 0;
2036 }
developerfd40db22021-04-29 10:08:25 +08002037 }
2038
2039 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2040 * only as the framework. The real HW descriptors are the PDMA
2041 * descriptors in ring->dma_pdma.
2042 */
2043 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2044 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002045 &ring->phys_pdma, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002046 if (!ring->dma_pdma)
2047 goto no_tx_mem;
2048
2049 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +08002050 pdma_txd = ring->dma_pdma + i *sz;
2051
2052 pdma_txd->txd2 = TX_DMA_DESP2_DEF;
2053 pdma_txd->txd4 = 0;
developerfd40db22021-04-29 10:08:25 +08002054 }
2055 }
2056
2057 ring->dma_size = MTK_DMA_SIZE;
2058 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
developere9356982022-07-04 09:03:20 +08002059 ring->next_free = ring->dma;
2060 ring->last_free = (void *)txd;
developerc4671b22021-05-28 13:16:42 +08002061 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08002062 ring->thresh = MAX_SKB_FRAGS;
2063
2064 /* make sure that all changes to the dma ring are flushed before we
2065 * continue
2066 */
2067 wmb();
2068
2069 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2070 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
2071 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
2072 mtk_w32(eth,
2073 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
2074 MTK_QTX_CRX_PTR);
developerc4671b22021-05-28 13:16:42 +08002075 mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
developerfd40db22021-04-29 10:08:25 +08002076 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
2077 MTK_QTX_CFG(0));
2078 } else {
2079 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2080 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
2081 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2082 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
2083 }
2084
2085 return 0;
2086
2087no_tx_mem:
2088 return -ENOMEM;
2089}
2090
2091static void mtk_tx_clean(struct mtk_eth *eth)
2092{
developere9356982022-07-04 09:03:20 +08002093 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002094 struct mtk_tx_ring *ring = &eth->tx_ring;
2095 int i;
2096
2097 if (ring->buf) {
2098 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08002099 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08002100 kfree(ring->buf);
2101 ring->buf = NULL;
2102 }
2103
2104 if (!eth->soc->has_sram && ring->dma) {
2105 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002106 MTK_DMA_SIZE * soc->txrx.txd_size,
2107 ring->dma, ring->phys);
developerfd40db22021-04-29 10:08:25 +08002108 ring->dma = NULL;
2109 }
2110
2111 if (ring->dma_pdma) {
2112 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002113 MTK_DMA_SIZE * soc->txrx.txd_size,
2114 ring->dma_pdma, ring->phys_pdma);
developerfd40db22021-04-29 10:08:25 +08002115 ring->dma_pdma = NULL;
2116 }
2117}
2118
2119static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2120{
2121 struct mtk_rx_ring *ring;
2122 int rx_data_len, rx_dma_size;
2123 int i;
developer089e8852022-09-28 14:43:46 +08002124 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002125
2126 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2127 if (ring_no)
2128 return -EINVAL;
2129 ring = &eth->rx_ring_qdma;
2130 } else {
2131 ring = &eth->rx_ring[ring_no];
2132 }
2133
2134 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2135 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2136 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2137 } else {
2138 rx_data_len = ETH_DATA_LEN;
2139 rx_dma_size = MTK_DMA_SIZE;
2140 }
2141
2142 ring->frag_size = mtk_max_frag_size(rx_data_len);
2143 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2144 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2145 GFP_KERNEL);
2146 if (!ring->data)
2147 return -ENOMEM;
2148
2149 for (i = 0; i < rx_dma_size; i++) {
2150 ring->data[i] = netdev_alloc_frag(ring->frag_size);
2151 if (!ring->data[i])
2152 return -ENOMEM;
2153 }
2154
2155 if ((!eth->soc->has_sram) || (eth->soc->has_sram
2156 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
2157 ring->dma = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002158 rx_dma_size * eth->soc->txrx.rxd_size,
2159 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002160 else {
2161 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002162 ring->dma = tx_ring->dma + MTK_DMA_SIZE *
2163 eth->soc->txrx.rxd_size * (ring_no + 1);
developer18f46a82021-07-20 21:08:21 +08002164 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
developere9356982022-07-04 09:03:20 +08002165 eth->soc->txrx.rxd_size * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08002166 }
2167
2168 if (!ring->dma)
2169 return -ENOMEM;
2170
2171 for (i = 0; i < rx_dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002172 struct mtk_rx_dma_v2 *rxd;
2173
developerfd40db22021-04-29 10:08:25 +08002174 dma_addr_t dma_addr = dma_map_single(eth->dev,
2175 ring->data[i] + NET_SKB_PAD + eth->ip_align,
2176 ring->buf_size,
2177 DMA_FROM_DEVICE);
2178 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
2179 return -ENOMEM;
developere9356982022-07-04 09:03:20 +08002180
2181 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2182 rxd->rxd1 = (unsigned int)dma_addr;
developerfd40db22021-04-29 10:08:25 +08002183
developer089e8852022-09-28 14:43:46 +08002184 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2185 RX_DMA_SDP1(dma_addr) : 0;
2186
developerfd40db22021-04-29 10:08:25 +08002187 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developere9356982022-07-04 09:03:20 +08002188 rxd->rxd2 = RX_DMA_LSO;
developerfd40db22021-04-29 10:08:25 +08002189 else
developer089e8852022-09-28 14:43:46 +08002190 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08002191
developere9356982022-07-04 09:03:20 +08002192 rxd->rxd3 = 0;
2193 rxd->rxd4 = 0;
2194
developer089e8852022-09-28 14:43:46 +08002195 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2196 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08002197 rxd->rxd5 = 0;
2198 rxd->rxd6 = 0;
2199 rxd->rxd7 = 0;
2200 rxd->rxd8 = 0;
developerfd40db22021-04-29 10:08:25 +08002201 }
developerfd40db22021-04-29 10:08:25 +08002202 }
2203 ring->dma_size = rx_dma_size;
2204 ring->calc_idx_update = false;
2205 ring->calc_idx = rx_dma_size - 1;
2206 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
2207 MTK_QRX_CRX_IDX_CFG(ring_no) :
2208 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08002209 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08002210 /* make sure that all changes to the dma ring are flushed before we
2211 * continue
2212 */
2213 wmb();
2214
2215 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2216 mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
2217 mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
2218 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2219 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
2220 } else {
2221 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
2222 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
2223 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2224 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
2225 }
2226
2227 return 0;
2228}
2229
2230static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
2231{
2232 int i;
developer089e8852022-09-28 14:43:46 +08002233 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002234
2235 if (ring->data && ring->dma) {
2236 for (i = 0; i < ring->dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002237 struct mtk_rx_dma *rxd;
2238
developerfd40db22021-04-29 10:08:25 +08002239 if (!ring->data[i])
2240 continue;
developere9356982022-07-04 09:03:20 +08002241
2242 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2243 if (!rxd->rxd1)
developerfd40db22021-04-29 10:08:25 +08002244 continue;
developere9356982022-07-04 09:03:20 +08002245
developer089e8852022-09-28 14:43:46 +08002246 addr64 = (MTK_HAS_CAPS(eth->soc->caps,
2247 MTK_8GB_ADDRESSING)) ?
2248 ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
2249
developerfd40db22021-04-29 10:08:25 +08002250 dma_unmap_single(eth->dev,
developer089e8852022-09-28 14:43:46 +08002251 (u64)(rxd->rxd1 | addr64),
developerfd40db22021-04-29 10:08:25 +08002252 ring->buf_size,
2253 DMA_FROM_DEVICE);
2254 skb_free_frag(ring->data[i]);
2255 }
2256 kfree(ring->data);
2257 ring->data = NULL;
2258 }
2259
2260 if(in_sram)
2261 return;
2262
2263 if (ring->dma) {
2264 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002265 ring->dma_size * eth->soc->txrx.rxd_size,
developerfd40db22021-04-29 10:08:25 +08002266 ring->dma,
2267 ring->phys);
2268 ring->dma = NULL;
2269 }
2270}
2271
2272static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2273{
2274 int i;
developer77d03a72021-06-06 00:06:00 +08002275 u32 val;
developerfd40db22021-04-29 10:08:25 +08002276 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2277 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2278
2279 /* set LRO rings to auto-learn modes */
2280 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2281
2282 /* validate LRO ring */
2283 ring_ctrl_dw2 |= MTK_RING_VLD;
2284
2285 /* set AGE timer (unit: 20us) */
2286 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2287 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2288
2289 /* set max AGG timer (unit: 20us) */
2290 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2291
2292 /* set max LRO AGG count */
2293 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2294 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2295
developer77d03a72021-06-06 00:06:00 +08002296 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002297 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2298 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2299 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2300 }
2301
2302 /* IPv4 checksum update enable */
2303 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2304
2305 /* switch priority comparison to packet count mode */
2306 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2307
2308 /* bandwidth threshold setting */
2309 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2310
2311 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08002312 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08002313
2314 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2315 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2316 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2317
developerfd40db22021-04-29 10:08:25 +08002318 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2319 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2320
developer089e8852022-09-28 14:43:46 +08002321 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2322 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer77d03a72021-06-06 00:06:00 +08002323 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
2324 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
2325 MTK_PDMA_RX_CFG);
2326
2327 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
2328 } else {
2329 /* set HW LRO mode & the max aggregation count for rx packets */
2330 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2331 }
2332
developerfd40db22021-04-29 10:08:25 +08002333 /* enable HW LRO */
2334 lro_ctrl_dw0 |= MTK_LRO_EN;
2335
developer77d03a72021-06-06 00:06:00 +08002336 /* enable cpu reason black list */
2337 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
2338
developerfd40db22021-04-29 10:08:25 +08002339 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2340 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2341
developer77d03a72021-06-06 00:06:00 +08002342 /* no use PPE cpu reason */
2343 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
2344
developerfd40db22021-04-29 10:08:25 +08002345 return 0;
2346}
2347
2348static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2349{
2350 int i;
2351 u32 val;
2352
2353 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08002354 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08002355
2356 /* wait for relinquishments done */
2357 for (i = 0; i < 10; i++) {
2358 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08002359 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer8051e042022-04-08 13:26:36 +08002360 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002361 continue;
2362 }
2363 break;
2364 }
2365
2366 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08002367 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08002368 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2369
2370 /* disable HW LRO */
2371 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2372}
2373
2374static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2375{
2376 u32 reg_val;
2377
developer089e8852022-09-28 14:43:46 +08002378 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2379 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77d03a72021-06-06 00:06:00 +08002380 idx += 1;
2381
developerfd40db22021-04-29 10:08:25 +08002382 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2383
2384 /* invalidate the IP setting */
2385 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2386
2387 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2388
2389 /* validate the IP setting */
2390 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2391}
2392
2393static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2394{
2395 u32 reg_val;
2396
developer089e8852022-09-28 14:43:46 +08002397 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2398 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77d03a72021-06-06 00:06:00 +08002399 idx += 1;
2400
developerfd40db22021-04-29 10:08:25 +08002401 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2402
2403 /* invalidate the IP setting */
2404 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2405
2406 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2407}
2408
2409static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2410{
2411 int cnt = 0;
2412 int i;
2413
2414 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2415 if (mac->hwlro_ip[i])
2416 cnt++;
2417 }
2418
2419 return cnt;
2420}
2421
2422static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2423 struct ethtool_rxnfc *cmd)
2424{
2425 struct ethtool_rx_flow_spec *fsp =
2426 (struct ethtool_rx_flow_spec *)&cmd->fs;
2427 struct mtk_mac *mac = netdev_priv(dev);
2428 struct mtk_eth *eth = mac->hw;
2429 int hwlro_idx;
2430
2431 if ((fsp->flow_type != TCP_V4_FLOW) ||
2432 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2433 (fsp->location > 1))
2434 return -EINVAL;
2435
2436 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2437 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2438
2439 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2440
2441 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2442
2443 return 0;
2444}
2445
2446static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2447 struct ethtool_rxnfc *cmd)
2448{
2449 struct ethtool_rx_flow_spec *fsp =
2450 (struct ethtool_rx_flow_spec *)&cmd->fs;
2451 struct mtk_mac *mac = netdev_priv(dev);
2452 struct mtk_eth *eth = mac->hw;
2453 int hwlro_idx;
2454
2455 if (fsp->location > 1)
2456 return -EINVAL;
2457
2458 mac->hwlro_ip[fsp->location] = 0;
2459 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2460
2461 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2462
2463 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2464
2465 return 0;
2466}
2467
2468static void mtk_hwlro_netdev_disable(struct net_device *dev)
2469{
2470 struct mtk_mac *mac = netdev_priv(dev);
2471 struct mtk_eth *eth = mac->hw;
2472 int i, hwlro_idx;
2473
2474 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2475 mac->hwlro_ip[i] = 0;
2476 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2477
2478 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2479 }
2480
2481 mac->hwlro_ip_cnt = 0;
2482}
2483
2484static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2485 struct ethtool_rxnfc *cmd)
2486{
2487 struct mtk_mac *mac = netdev_priv(dev);
2488 struct ethtool_rx_flow_spec *fsp =
2489 (struct ethtool_rx_flow_spec *)&cmd->fs;
2490
2491 /* only tcp dst ipv4 is meaningful, others are meaningless */
2492 fsp->flow_type = TCP_V4_FLOW;
2493 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2494 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2495
2496 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2497 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2498 fsp->h_u.tcp_ip4_spec.psrc = 0;
2499 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2500 fsp->h_u.tcp_ip4_spec.pdst = 0;
2501 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2502 fsp->h_u.tcp_ip4_spec.tos = 0;
2503 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2504
2505 return 0;
2506}
2507
2508static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2509 struct ethtool_rxnfc *cmd,
2510 u32 *rule_locs)
2511{
2512 struct mtk_mac *mac = netdev_priv(dev);
2513 int cnt = 0;
2514 int i;
2515
2516 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2517 if (mac->hwlro_ip[i]) {
2518 rule_locs[cnt] = i;
2519 cnt++;
2520 }
2521 }
2522
2523 cmd->rule_cnt = cnt;
2524
2525 return 0;
2526}
2527
developer18f46a82021-07-20 21:08:21 +08002528static int mtk_rss_init(struct mtk_eth *eth)
2529{
2530 u32 val;
2531
developer089e8852022-09-28 14:43:46 +08002532 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) {
developer18f46a82021-07-20 21:08:21 +08002533 /* Set RSS rings to PSE modes */
2534 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
2535 val |= MTK_RING_PSE_MODE;
2536 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
2537
2538 /* Enable non-lro multiple rx */
2539 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2540 val |= MTK_NON_LRO_MULTI_EN;
2541 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2542
2543 /* Enable RSS dly int supoort */
2544 val |= MTK_LRO_DLY_INT_EN;
2545 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2546
2547 /* Set RSS delay config int ring1 */
2548 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
2549 }
2550
2551 /* Hash Type */
2552 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2553 val |= MTK_RSS_IPV4_STATIC_HASH;
2554 val |= MTK_RSS_IPV6_STATIC_HASH;
2555 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2556
2557 /* Select the size of indirection table */
2558 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW0);
2559 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW1);
2560 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW2);
2561 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW3);
2562 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW4);
2563 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW5);
2564 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW6);
2565 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW7);
2566
2567 /* Pause */
2568 val |= MTK_RSS_CFG_REQ;
2569 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2570
2571 /* Enable RSS*/
2572 val |= MTK_RSS_EN;
2573 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2574
2575 /* Release pause */
2576 val &= ~(MTK_RSS_CFG_REQ);
2577 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2578
2579 /* Set perRSS GRP INT */
2580 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING1), MTK_PDMA_INT_GRP3);
2581
2582 /* Set GRP INT */
2583 mtk_w32(eth, 0x21021030, MTK_FE_INT_GRP);
2584
developer089e8852022-09-28 14:43:46 +08002585 /* Enable RSS delay interrupt */
2586 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_RSS_DELAY_INT);
2587
developer18f46a82021-07-20 21:08:21 +08002588 return 0;
2589}
2590
2591static void mtk_rss_uninit(struct mtk_eth *eth)
2592{
2593 u32 val;
2594
2595 /* Pause */
2596 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2597 val |= MTK_RSS_CFG_REQ;
2598 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2599
2600 /* Disable RSS*/
2601 val &= ~(MTK_RSS_EN);
2602 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2603
2604 /* Release pause */
2605 val &= ~(MTK_RSS_CFG_REQ);
2606 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2607}
2608
developerfd40db22021-04-29 10:08:25 +08002609static netdev_features_t mtk_fix_features(struct net_device *dev,
2610 netdev_features_t features)
2611{
2612 if (!(features & NETIF_F_LRO)) {
2613 struct mtk_mac *mac = netdev_priv(dev);
2614 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2615
2616 if (ip_cnt) {
2617 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2618
2619 features |= NETIF_F_LRO;
2620 }
2621 }
2622
2623 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
2624 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
2625
2626 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2627 }
2628
2629 return features;
2630}
2631
2632static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2633{
2634 struct mtk_mac *mac = netdev_priv(dev);
2635 struct mtk_eth *eth = mac->hw;
2636 int err = 0;
2637
2638 if (!((dev->features ^ features) & MTK_SET_FEATURES))
2639 return 0;
2640
2641 if (!(features & NETIF_F_LRO))
2642 mtk_hwlro_netdev_disable(dev);
2643
2644 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
2645 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
2646 else
2647 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2648
2649 return err;
2650}
2651
2652/* wait for DMA to finish whatever it is doing before we start using it again */
2653static int mtk_dma_busy_wait(struct mtk_eth *eth)
2654{
2655 unsigned long t_start = jiffies;
2656
2657 while (1) {
2658 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2659 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2660 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2661 return 0;
2662 } else {
2663 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2664 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2665 return 0;
2666 }
2667
2668 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2669 break;
2670 }
2671
2672 dev_err(eth->dev, "DMA init timeout\n");
2673 return -1;
2674}
2675
2676static int mtk_dma_init(struct mtk_eth *eth)
2677{
2678 int err;
2679 u32 i;
2680
2681 if (mtk_dma_busy_wait(eth))
2682 return -EBUSY;
2683
2684 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2685 /* QDMA needs scratch memory for internal reordering of the
2686 * descriptors
2687 */
2688 err = mtk_init_fq_dma(eth);
2689 if (err)
2690 return err;
2691 }
2692
2693 err = mtk_tx_alloc(eth);
2694 if (err)
2695 return err;
2696
2697 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2698 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2699 if (err)
2700 return err;
2701 }
2702
2703 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2704 if (err)
2705 return err;
2706
2707 if (eth->hwlro) {
developer089e8852022-09-28 14:43:46 +08002708 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) ? 1 : 4;
developer77d03a72021-06-06 00:06:00 +08002709 for (; i < MTK_MAX_RX_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002710 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2711 if (err)
2712 return err;
2713 }
2714 err = mtk_hwlro_rx_init(eth);
2715 if (err)
2716 return err;
2717 }
2718
developer18f46a82021-07-20 21:08:21 +08002719 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2720 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2721 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL);
2722 if (err)
2723 return err;
2724 }
2725 err = mtk_rss_init(eth);
2726 if (err)
2727 return err;
2728 }
2729
developerfd40db22021-04-29 10:08:25 +08002730 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2731 /* Enable random early drop and set drop threshold
2732 * automatically
2733 */
2734 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2735 FC_THRES_MIN, MTK_QDMA_FC_THRES);
2736 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2737 }
2738
2739 return 0;
2740}
2741
2742static void mtk_dma_free(struct mtk_eth *eth)
2743{
developere9356982022-07-04 09:03:20 +08002744 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002745 int i;
2746
2747 for (i = 0; i < MTK_MAC_COUNT; i++)
2748 if (eth->netdev[i])
2749 netdev_reset_queue(eth->netdev[i]);
2750 if ( !eth->soc->has_sram && eth->scratch_ring) {
2751 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002752 MTK_DMA_SIZE * soc->txrx.txd_size,
2753 eth->scratch_ring, eth->phy_scratch_ring);
developerfd40db22021-04-29 10:08:25 +08002754 eth->scratch_ring = NULL;
2755 eth->phy_scratch_ring = 0;
2756 }
2757 mtk_tx_clean(eth);
developerb3ce86f2022-06-30 13:31:47 +08002758 mtk_rx_clean(eth, &eth->rx_ring[0],eth->soc->has_sram);
developerfd40db22021-04-29 10:08:25 +08002759 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
2760
2761 if (eth->hwlro) {
2762 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08002763
developer089e8852022-09-28 14:43:46 +08002764 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) ? 1 : 4;
developer77d03a72021-06-06 00:06:00 +08002765 for (; i < MTK_MAX_RX_RING_NUM; i++)
2766 mtk_rx_clean(eth, &eth->rx_ring[i], 0);
developerfd40db22021-04-29 10:08:25 +08002767 }
2768
developer18f46a82021-07-20 21:08:21 +08002769 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2770 mtk_rss_uninit(eth);
2771
2772 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
2773 mtk_rx_clean(eth, &eth->rx_ring[i], 1);
2774 }
2775
developer94008d92021-09-23 09:47:41 +08002776 if (eth->scratch_head) {
2777 kfree(eth->scratch_head);
2778 eth->scratch_head = NULL;
2779 }
developerfd40db22021-04-29 10:08:25 +08002780}
2781
2782static void mtk_tx_timeout(struct net_device *dev)
2783{
2784 struct mtk_mac *mac = netdev_priv(dev);
2785 struct mtk_eth *eth = mac->hw;
2786
2787 eth->netdev[mac->id]->stats.tx_errors++;
2788 netif_err(eth, tx_err, dev,
2789 "transmit timed out\n");
developer8051e042022-04-08 13:26:36 +08002790
2791 if (atomic_read(&reset_lock) == 0)
2792 schedule_work(&eth->pending_work);
developerfd40db22021-04-29 10:08:25 +08002793}
2794
developer18f46a82021-07-20 21:08:21 +08002795static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08002796{
developer18f46a82021-07-20 21:08:21 +08002797 struct mtk_napi *rx_napi = priv;
2798 struct mtk_eth *eth = rx_napi->eth;
2799 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002800
developer18f46a82021-07-20 21:08:21 +08002801 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08002802 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08002803 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08002804 }
2805
2806 return IRQ_HANDLED;
2807}
2808
2809static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2810{
2811 struct mtk_eth *eth = _eth;
2812
2813 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developerfd40db22021-04-29 10:08:25 +08002814 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer6bbe70d2021-08-06 09:34:55 +08002815 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08002816 }
2817
2818 return IRQ_HANDLED;
2819}
2820
2821static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2822{
2823 struct mtk_eth *eth = _eth;
2824
developer18f46a82021-07-20 21:08:21 +08002825 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT(0)) {
2826 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT(0))
2827 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002828 }
2829 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2830 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2831 mtk_handle_irq_tx(irq, _eth);
2832 }
2833
2834 return IRQ_HANDLED;
2835}
2836
developera2613e62022-07-01 18:29:37 +08002837static irqreturn_t mtk_handle_irq_fixed_link(int irq, void *_mac)
2838{
2839 struct mtk_mac *mac = _mac;
2840 struct mtk_eth *eth = mac->hw;
2841 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
2842 struct net_device *dev = phylink_priv->dev;
2843 int link_old, link_new;
2844
2845 // clear interrupt status for gpy211
2846 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
2847
2848 link_old = phylink_priv->link;
2849 link_new = _mtk_mdio_read(eth, phylink_priv->phyaddr, MII_BMSR) & BMSR_LSTATUS;
2850
2851 if (link_old != link_new) {
2852 phylink_priv->link = link_new;
2853 if (link_new) {
2854 printk("phylink.%d %s: Link is Up\n", phylink_priv->id, dev->name);
2855 if (dev)
2856 netif_carrier_on(dev);
2857 } else {
2858 printk("phylink.%d %s: Link is Down\n", phylink_priv->id, dev->name);
2859 if (dev)
2860 netif_carrier_off(dev);
2861 }
2862 }
2863
2864 return IRQ_HANDLED;
2865}
2866
developerfd40db22021-04-29 10:08:25 +08002867#ifdef CONFIG_NET_POLL_CONTROLLER
2868static void mtk_poll_controller(struct net_device *dev)
2869{
2870 struct mtk_mac *mac = netdev_priv(dev);
2871 struct mtk_eth *eth = mac->hw;
2872
2873 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002874 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
2875 mtk_handle_irq_rx(eth->irq[2], &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002876 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002877 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002878}
2879#endif
2880
2881static int mtk_start_dma(struct mtk_eth *eth)
2882{
2883 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer77d03a72021-06-06 00:06:00 +08002884 int val, err;
developerfd40db22021-04-29 10:08:25 +08002885
2886 err = mtk_dma_init(eth);
2887 if (err) {
2888 mtk_dma_free(eth);
2889 return err;
2890 }
2891
2892 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer15d0d282021-07-14 16:40:44 +08002893 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
developer089e8852022-09-28 14:43:46 +08002894 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2895 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer19d84562022-04-21 17:01:06 +08002896 val &= ~MTK_RESV_BUF_MASK;
developerfd40db22021-04-29 10:08:25 +08002897 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002898 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002899 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
2900 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
2901 MTK_RESV_BUF | MTK_WCOMP_EN |
2902 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
developer1ac65932022-07-19 17:23:32 +08002903 MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08002904 }
developerfd40db22021-04-29 10:08:25 +08002905 else
2906 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002907 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002908 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
2909 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2910 MTK_RX_BT_32DWORDS,
2911 MTK_QDMA_GLO_CFG);
2912
developer15d0d282021-07-14 16:40:44 +08002913 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
developerfd40db22021-04-29 10:08:25 +08002914 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002915 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08002916 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2917 MTK_PDMA_GLO_CFG);
2918 } else {
2919 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2920 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2921 MTK_PDMA_GLO_CFG);
2922 }
2923
developer089e8852022-09-28 14:43:46 +08002924 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) && eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08002925 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
2926 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
2927 }
2928
developerfd40db22021-04-29 10:08:25 +08002929 return 0;
2930}
2931
developer8051e042022-04-08 13:26:36 +08002932void mtk_gdm_config(struct mtk_eth *eth, u32 config)
developerfd40db22021-04-29 10:08:25 +08002933{
2934 int i;
2935
2936 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2937 return;
2938
2939 for (i = 0; i < MTK_MAC_COUNT; i++) {
2940 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2941
2942 /* default setup the forward port to send frame to PDMA */
2943 val &= ~0xffff;
2944
2945 /* Enable RX checksum */
2946 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2947
2948 val |= config;
2949
2950 if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
2951 val |= MTK_GDMA_SPECIAL_TAG;
2952
2953 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2954 }
developerfd40db22021-04-29 10:08:25 +08002955}
2956
2957static int mtk_open(struct net_device *dev)
2958{
2959 struct mtk_mac *mac = netdev_priv(dev);
2960 struct mtk_eth *eth = mac->hw;
developera2613e62022-07-01 18:29:37 +08002961 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
developer18f46a82021-07-20 21:08:21 +08002962 int err, i;
developer3a5969e2022-02-09 15:36:36 +08002963 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08002964
2965 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2966 if (err) {
2967 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2968 err);
2969 return err;
2970 }
2971
2972 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2973 if (!refcount_read(&eth->dma_refcnt)) {
2974 int err = mtk_start_dma(eth);
2975
2976 if (err)
2977 return err;
2978
2979 mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
2980
2981 /* Indicates CDM to parse the MTK special tag from CPU */
2982 if (netdev_uses_dsa(dev)) {
2983 u32 val;
2984 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2985 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2986 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
2987 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
2988 }
2989
2990 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002991 napi_enable(&eth->rx_napi[0].napi);
developerfd40db22021-04-29 10:08:25 +08002992 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002993 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
2994
2995 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2996 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2997 napi_enable(&eth->rx_napi[i].napi);
2998 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i));
2999 }
3000 }
3001
developerfd40db22021-04-29 10:08:25 +08003002 refcount_set(&eth->dma_refcnt, 1);
3003 }
3004 else
3005 refcount_inc(&eth->dma_refcnt);
3006
developera2613e62022-07-01 18:29:37 +08003007 if (phylink_priv->desc) {
3008 /*Notice: This programming sequence is only for GPY211 single PHY chip.
3009 If single PHY chip is not GPY211, the following step you should do:
3010 1. Contact your Single PHY chip vendor and get the details of
3011 - how to enables link status change interrupt
3012 - how to clears interrupt source
3013 */
3014
3015 // clear interrupt source for gpy211
3016 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
3017
3018 // enable link status change interrupt for gpy211
3019 _mtk_mdio_write(eth, phylink_priv->phyaddr, 0x19, 0x0001);
3020
3021 phylink_priv->dev = dev;
3022
3023 // override dev pointer for single PHY chip 0
3024 if (phylink_priv->id == 0) {
3025 struct net_device *tmp;
3026
3027 tmp = __dev_get_by_name(&init_net, phylink_priv->label);
3028 if (tmp)
3029 phylink_priv->dev = tmp;
3030 else
3031 phylink_priv->dev = NULL;
3032 }
3033 }
3034
developerfd40db22021-04-29 10:08:25 +08003035 phylink_start(mac->phylink);
3036 netif_start_queue(dev);
developer3a5969e2022-02-09 15:36:36 +08003037 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer089e8852022-09-28 14:43:46 +08003038 if (!phy_node && eth->xgmii->regmap_sgmii[mac->id])
3039 regmap_write(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
3040
developerfd40db22021-04-29 10:08:25 +08003041 return 0;
3042}
3043
3044static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3045{
3046 u32 val;
3047 int i;
3048
3049 /* stop the dma engine */
3050 spin_lock_bh(&eth->page_lock);
3051 val = mtk_r32(eth, glo_cfg);
3052 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3053 glo_cfg);
3054 spin_unlock_bh(&eth->page_lock);
3055
3056 /* wait for dma stop */
3057 for (i = 0; i < 10; i++) {
3058 val = mtk_r32(eth, glo_cfg);
3059 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer8051e042022-04-08 13:26:36 +08003060 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08003061 continue;
3062 }
3063 break;
3064 }
3065}
3066
3067static int mtk_stop(struct net_device *dev)
3068{
3069 struct mtk_mac *mac = netdev_priv(dev);
3070 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08003071 int i;
developer3a5969e2022-02-09 15:36:36 +08003072 u32 val = 0;
3073 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08003074
3075 netif_tx_disable(dev);
3076
developer3a5969e2022-02-09 15:36:36 +08003077 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
3078 if (phy_node) {
3079 val = _mtk_mdio_read(eth, 0, 0);
3080 val |= BMCR_PDOWN;
3081 _mtk_mdio_write(eth, 0, 0, val);
developer089e8852022-09-28 14:43:46 +08003082 } else if (eth->xgmii->regmap_sgmii[mac->id]) {
3083 regmap_read(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer3a5969e2022-02-09 15:36:36 +08003084 val |= SGMII_PHYA_PWD;
developer089e8852022-09-28 14:43:46 +08003085 regmap_write(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer3a5969e2022-02-09 15:36:36 +08003086 }
3087
3088 //GMAC RX disable
3089 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
3090 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
3091
3092 phylink_stop(mac->phylink);
3093
developerfd40db22021-04-29 10:08:25 +08003094 phylink_disconnect_phy(mac->phylink);
3095
3096 /* only shutdown DMA if this is the last user */
3097 if (!refcount_dec_and_test(&eth->dma_refcnt))
3098 return 0;
3099
3100 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3101
3102 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003103 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08003104 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003105 napi_disable(&eth->rx_napi[0].napi);
3106
3107 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3108 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3109 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i));
3110 napi_disable(&eth->rx_napi[i].napi);
3111 }
3112 }
developerfd40db22021-04-29 10:08:25 +08003113
3114 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3115 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
3116 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
3117
3118 mtk_dma_free(eth);
3119
3120 return 0;
3121}
3122
developer8051e042022-04-08 13:26:36 +08003123void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerfd40db22021-04-29 10:08:25 +08003124{
developer8051e042022-04-08 13:26:36 +08003125 u32 val = 0, i = 0;
developerfd40db22021-04-29 10:08:25 +08003126
developerfd40db22021-04-29 10:08:25 +08003127 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer8051e042022-04-08 13:26:36 +08003128 reset_bits, reset_bits);
3129
3130 while (i++ < 5000) {
3131 mdelay(1);
3132 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3133
3134 if ((val & reset_bits) == reset_bits) {
3135 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
3136 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3137 reset_bits, ~reset_bits);
3138 break;
3139 }
3140 }
3141
developerfd40db22021-04-29 10:08:25 +08003142 mdelay(10);
3143}
3144
3145static void mtk_clk_disable(struct mtk_eth *eth)
3146{
3147 int clk;
3148
3149 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3150 clk_disable_unprepare(eth->clks[clk]);
3151}
3152
3153static int mtk_clk_enable(struct mtk_eth *eth)
3154{
3155 int clk, ret;
3156
3157 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3158 ret = clk_prepare_enable(eth->clks[clk]);
3159 if (ret)
3160 goto err_disable_clks;
3161 }
3162
3163 return 0;
3164
3165err_disable_clks:
3166 while (--clk >= 0)
3167 clk_disable_unprepare(eth->clks[clk]);
3168
3169 return ret;
3170}
3171
developer18f46a82021-07-20 21:08:21 +08003172static int mtk_napi_init(struct mtk_eth *eth)
3173{
3174 struct mtk_napi *rx_napi = &eth->rx_napi[0];
3175 int i;
3176
3177 rx_napi->eth = eth;
3178 rx_napi->rx_ring = &eth->rx_ring[0];
3179 rx_napi->irq_grp_no = 2;
3180
3181 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3182 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3183 rx_napi = &eth->rx_napi[i];
3184 rx_napi->eth = eth;
3185 rx_napi->rx_ring = &eth->rx_ring[i];
3186 rx_napi->irq_grp_no = 2 + i;
3187 }
3188 }
3189
3190 return 0;
3191}
3192
developer8051e042022-04-08 13:26:36 +08003193static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerfd40db22021-04-29 10:08:25 +08003194{
developer8051e042022-04-08 13:26:36 +08003195 int i, ret = 0;
developerfd40db22021-04-29 10:08:25 +08003196
developer8051e042022-04-08 13:26:36 +08003197 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
3198 atomic_read(&reset_lock), atomic_read(&force));
developerfd40db22021-04-29 10:08:25 +08003199
developer8051e042022-04-08 13:26:36 +08003200 if (atomic_read(&reset_lock) == 0) {
3201 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
3202 return 0;
developerfd40db22021-04-29 10:08:25 +08003203
developer8051e042022-04-08 13:26:36 +08003204 pm_runtime_enable(eth->dev);
3205 pm_runtime_get_sync(eth->dev);
3206
3207 ret = mtk_clk_enable(eth);
3208 if (ret)
3209 goto err_disable_pm;
3210 }
developerfd40db22021-04-29 10:08:25 +08003211
3212 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3213 ret = device_reset(eth->dev);
3214 if (ret) {
3215 dev_err(eth->dev, "MAC reset failed!\n");
3216 goto err_disable_pm;
3217 }
3218
3219 /* enable interrupt delay for RX */
3220 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
3221
3222 /* disable delay and normal interrupt */
3223 mtk_tx_irq_disable(eth, ~0);
3224 mtk_rx_irq_disable(eth, ~0);
3225
3226 return 0;
3227 }
3228
developer8051e042022-04-08 13:26:36 +08003229 pr_info("[%s] execute fe %s reset\n", __func__,
3230 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developer545abf02021-07-15 17:47:01 +08003231
developer8051e042022-04-08 13:26:36 +08003232 if (type == MTK_TYPE_WARM_RESET)
3233 mtk_eth_warm_reset(eth);
developer545abf02021-07-15 17:47:01 +08003234 else
developer8051e042022-04-08 13:26:36 +08003235 mtk_eth_cold_reset(eth);
developer545abf02021-07-15 17:47:01 +08003236
developer089e8852022-09-28 14:43:46 +08003237 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
3238 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer545abf02021-07-15 17:47:01 +08003239 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08003240 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08003241 }
developerfd40db22021-04-29 10:08:25 +08003242
3243 if (eth->pctl) {
3244 /* Set GE2 driving and slew rate */
3245 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3246
3247 /* set GE2 TDSEL */
3248 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3249
3250 /* set GE2 TUNE */
3251 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3252 }
3253
3254 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3255 * up with the more appropriate value when mtk_mac_config call is being
3256 * invoked.
3257 */
3258 for (i = 0; i < MTK_MAC_COUNT; i++)
3259 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3260
3261 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08003262 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
3263 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3264 else
3265 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08003266
3267 /* enable interrupt delay for RX/TX */
3268 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
3269 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
3270
3271 mtk_tx_irq_disable(eth, ~0);
3272 mtk_rx_irq_disable(eth, ~0);
3273
3274 /* FE int grouping */
3275 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003276 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_PDMA_INT_GRP2);
developerfd40db22021-04-29 10:08:25 +08003277 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003278 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_QDMA_INT_GRP2);
developer8051e042022-04-08 13:26:36 +08003279 mtk_w32(eth, 0x21021003, MTK_FE_INT_GRP);
developerbe971722022-05-23 13:51:05 +08003280 mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
developer8051e042022-04-08 13:26:36 +08003281 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
3282 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerfd40db22021-04-29 10:08:25 +08003283
developer089e8852022-09-28 14:43:46 +08003284 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
3285 /* PSE should not drop port1, port8 and port9 packets */
3286 mtk_w32(eth, 0x00000302, PSE_NO_DROP_CFG);
3287
3288 /* GDM and CDM Threshold */
3289 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
3290 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
3291
3292 /* PSE GDM3 MIB counter has incorrect hw default values,
3293 * so the driver ought to read clear the values beforehand
3294 * in case ethtool retrieve wrong mib values.
3295 */
3296 for (i = 0; i < MTK_STAT_OFFSET; i += 0x4)
3297 mtk_r32(eth,
3298 MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * 2 + i);
3299 } else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08003300 /* PSE Free Queue Flow Control */
3301 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3302
developer459b78e2022-07-01 17:25:10 +08003303 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3304 mtk_w32(eth, 0x00000300, PSE_NO_DROP_CFG);
3305
3306 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3307 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
developer81bcad32021-07-15 14:14:38 +08003308
developerfef9efd2021-06-16 18:28:09 +08003309 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08003310 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3311 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3312 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3313 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3314 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3315 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3316 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
developerfd5f9152022-01-05 16:29:42 +08003317 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
developerfd40db22021-04-29 10:08:25 +08003318
developerfef9efd2021-06-16 18:28:09 +08003319 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08003320 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3321 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3322 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3323 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3324 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3325 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3326 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3327 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08003328
3329 /* GDM and CDM Threshold */
3330 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3331 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3332 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3333 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3334 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3335 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08003336 }
3337
3338 return 0;
3339
3340err_disable_pm:
3341 pm_runtime_put_sync(eth->dev);
3342 pm_runtime_disable(eth->dev);
3343
3344 return ret;
3345}
3346
3347static int mtk_hw_deinit(struct mtk_eth *eth)
3348{
3349 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3350 return 0;
3351
3352 mtk_clk_disable(eth);
3353
3354 pm_runtime_put_sync(eth->dev);
3355 pm_runtime_disable(eth->dev);
3356
3357 return 0;
3358}
3359
3360static int __init mtk_init(struct net_device *dev)
3361{
3362 struct mtk_mac *mac = netdev_priv(dev);
3363 struct mtk_eth *eth = mac->hw;
3364 const char *mac_addr;
3365
3366 mac_addr = of_get_mac_address(mac->of_node);
3367 if (!IS_ERR(mac_addr))
3368 ether_addr_copy(dev->dev_addr, mac_addr);
3369
3370 /* If the mac address is invalid, use random mac address */
3371 if (!is_valid_ether_addr(dev->dev_addr)) {
3372 eth_hw_addr_random(dev);
3373 dev_err(eth->dev, "generated random MAC address %pM\n",
3374 dev->dev_addr);
3375 }
3376
3377 return 0;
3378}
3379
3380static void mtk_uninit(struct net_device *dev)
3381{
3382 struct mtk_mac *mac = netdev_priv(dev);
3383 struct mtk_eth *eth = mac->hw;
3384
3385 phylink_disconnect_phy(mac->phylink);
3386 mtk_tx_irq_disable(eth, ~0);
3387 mtk_rx_irq_disable(eth, ~0);
3388}
3389
3390static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3391{
3392 struct mtk_mac *mac = netdev_priv(dev);
3393
3394 switch (cmd) {
3395 case SIOCGMIIPHY:
3396 case SIOCGMIIREG:
3397 case SIOCSMIIREG:
3398 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3399 default:
3400 /* default invoke the mtk_eth_dbg handler */
3401 return mtk_do_priv_ioctl(dev, ifr, cmd);
3402 break;
3403 }
3404
3405 return -EOPNOTSUPP;
3406}
3407
3408static void mtk_pending_work(struct work_struct *work)
3409{
3410 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer8051e042022-04-08 13:26:36 +08003411 struct device_node *phy_node = NULL;
3412 struct mtk_mac *mac = NULL;
3413 int err, i = 0;
developerfd40db22021-04-29 10:08:25 +08003414 unsigned long restart = 0;
developer8051e042022-04-08 13:26:36 +08003415 u32 val = 0;
3416
3417 atomic_inc(&reset_lock);
3418 val = mtk_r32(eth, MTK_FE_INT_STATUS);
3419 if (!mtk_check_reset_event(eth, val)) {
3420 atomic_dec(&reset_lock);
3421 pr_info("[%s] No need to do FE reset !\n", __func__);
3422 return;
3423 }
developerfd40db22021-04-29 10:08:25 +08003424
3425 rtnl_lock();
3426
developer8051e042022-04-08 13:26:36 +08003427 /* Disabe FE P3 and P4 */
3428 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3429 val |= MTK_FE_LINK_DOWN_P3;
3430 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3431 val |= MTK_FE_LINK_DOWN_P4;
3432 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3433
3434 /* Adjust PPE configurations to prepare for reset */
3435 mtk_prepare_reset_ppe(eth, 0);
3436 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3437 mtk_prepare_reset_ppe(eth, 1);
3438
3439 /* Adjust FE configurations to prepare for reset */
3440 mtk_prepare_reset_fe(eth);
3441
3442 /* Trigger Wifi SER reset */
3443 call_netdevice_notifiers(MTK_FE_START_RESET, eth->netdev[0]);
3444 rtnl_unlock();
3445 wait_for_completion_timeout(&wait_ser_done, 5000);
3446 rtnl_lock();
developerfd40db22021-04-29 10:08:25 +08003447
3448 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
3449 cpu_relax();
3450
developer8051e042022-04-08 13:26:36 +08003451 del_timer_sync(&eth->mtk_dma_monitor_timer);
3452 pr_info("[%s] mtk_stop starts !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003453 /* stop all devices to make sure that dma is properly shut down */
3454 for (i = 0; i < MTK_MAC_COUNT; i++) {
3455 if (!eth->netdev[i])
3456 continue;
3457 mtk_stop(eth->netdev[i]);
3458 __set_bit(i, &restart);
3459 }
developer8051e042022-04-08 13:26:36 +08003460 pr_info("[%s] mtk_stop ends !\n", __func__);
3461 mdelay(15);
developerfd40db22021-04-29 10:08:25 +08003462
3463 if (eth->dev->pins)
3464 pinctrl_select_state(eth->dev->pins->p,
3465 eth->dev->pins->default_state);
developer8051e042022-04-08 13:26:36 +08003466
3467 pr_info("[%s] mtk_hw_init starts !\n", __func__);
3468 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
3469 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003470
3471 /* restart DMA and enable IRQs */
3472 for (i = 0; i < MTK_MAC_COUNT; i++) {
3473 if (!test_bit(i, &restart))
3474 continue;
3475 err = mtk_open(eth->netdev[i]);
3476 if (err) {
3477 netif_alert(eth, ifup, eth->netdev[i],
3478 "Driver up/down cycle failed, closing device.\n");
3479 dev_close(eth->netdev[i]);
3480 }
3481 }
3482
developer8051e042022-04-08 13:26:36 +08003483 /* Set KA tick select */
3484 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(0));
3485 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3486 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(1));
3487
3488 /* Enabe FE P3 and P4*/
3489 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3490 val &= ~MTK_FE_LINK_DOWN_P3;
3491 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3492 val &= ~MTK_FE_LINK_DOWN_P4;
3493 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3494
3495 /* Power up sgmii */
3496 for (i = 0; i < MTK_MAC_COUNT; i++) {
3497 mac = netdev_priv(eth->netdev[i]);
3498 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer089e8852022-09-28 14:43:46 +08003499 if (!phy_node && eth->xgmii->regmap_sgmii[i]) {
developer8051e042022-04-08 13:26:36 +08003500 mtk_gmac_sgmii_path_setup(eth, i);
developer089e8852022-09-28 14:43:46 +08003501 regmap_write(eth->xgmii->regmap_sgmii[i], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developer8051e042022-04-08 13:26:36 +08003502 }
3503 }
3504
3505 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE, eth->netdev[0]);
3506 pr_info("[%s] HNAT reset done !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003507
developer8051e042022-04-08 13:26:36 +08003508 call_netdevice_notifiers(MTK_FE_RESET_DONE, eth->netdev[0]);
3509 pr_info("[%s] WiFi SER reset done !\n", __func__);
3510
3511 atomic_dec(&reset_lock);
3512 if (atomic_read(&force) > 0)
3513 atomic_dec(&force);
3514
3515 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3516 eth->mtk_dma_monitor_timer.expires = jiffies;
3517 add_timer(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08003518 clear_bit_unlock(MTK_RESETTING, &eth->state);
3519
3520 rtnl_unlock();
3521}
3522
3523static int mtk_free_dev(struct mtk_eth *eth)
3524{
3525 int i;
3526
3527 for (i = 0; i < MTK_MAC_COUNT; i++) {
3528 if (!eth->netdev[i])
3529 continue;
3530 free_netdev(eth->netdev[i]);
3531 }
3532
3533 return 0;
3534}
3535
3536static int mtk_unreg_dev(struct mtk_eth *eth)
3537{
3538 int i;
3539
3540 for (i = 0; i < MTK_MAC_COUNT; i++) {
3541 if (!eth->netdev[i])
3542 continue;
3543 unregister_netdev(eth->netdev[i]);
3544 }
3545
3546 return 0;
3547}
3548
3549static int mtk_cleanup(struct mtk_eth *eth)
3550{
3551 mtk_unreg_dev(eth);
3552 mtk_free_dev(eth);
3553 cancel_work_sync(&eth->pending_work);
3554
3555 return 0;
3556}
3557
3558static int mtk_get_link_ksettings(struct net_device *ndev,
3559 struct ethtool_link_ksettings *cmd)
3560{
3561 struct mtk_mac *mac = netdev_priv(ndev);
3562
3563 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3564 return -EBUSY;
3565
3566 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3567}
3568
3569static int mtk_set_link_ksettings(struct net_device *ndev,
3570 const struct ethtool_link_ksettings *cmd)
3571{
3572 struct mtk_mac *mac = netdev_priv(ndev);
3573
3574 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3575 return -EBUSY;
3576
3577 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3578}
3579
3580static void mtk_get_drvinfo(struct net_device *dev,
3581 struct ethtool_drvinfo *info)
3582{
3583 struct mtk_mac *mac = netdev_priv(dev);
3584
3585 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3586 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3587 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3588}
3589
3590static u32 mtk_get_msglevel(struct net_device *dev)
3591{
3592 struct mtk_mac *mac = netdev_priv(dev);
3593
3594 return mac->hw->msg_enable;
3595}
3596
3597static void mtk_set_msglevel(struct net_device *dev, u32 value)
3598{
3599 struct mtk_mac *mac = netdev_priv(dev);
3600
3601 mac->hw->msg_enable = value;
3602}
3603
3604static int mtk_nway_reset(struct net_device *dev)
3605{
3606 struct mtk_mac *mac = netdev_priv(dev);
3607
3608 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3609 return -EBUSY;
3610
3611 if (!mac->phylink)
3612 return -ENOTSUPP;
3613
3614 return phylink_ethtool_nway_reset(mac->phylink);
3615}
3616
3617static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3618{
3619 int i;
3620
3621 switch (stringset) {
3622 case ETH_SS_STATS:
3623 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3624 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3625 data += ETH_GSTRING_LEN;
3626 }
3627 break;
3628 }
3629}
3630
3631static int mtk_get_sset_count(struct net_device *dev, int sset)
3632{
3633 switch (sset) {
3634 case ETH_SS_STATS:
3635 return ARRAY_SIZE(mtk_ethtool_stats);
3636 default:
3637 return -EOPNOTSUPP;
3638 }
3639}
3640
3641static void mtk_get_ethtool_stats(struct net_device *dev,
3642 struct ethtool_stats *stats, u64 *data)
3643{
3644 struct mtk_mac *mac = netdev_priv(dev);
3645 struct mtk_hw_stats *hwstats = mac->hw_stats;
3646 u64 *data_src, *data_dst;
3647 unsigned int start;
3648 int i;
3649
3650 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3651 return;
3652
3653 if (netif_running(dev) && netif_device_present(dev)) {
3654 if (spin_trylock_bh(&hwstats->stats_lock)) {
3655 mtk_stats_update_mac(mac);
3656 spin_unlock_bh(&hwstats->stats_lock);
3657 }
3658 }
3659
3660 data_src = (u64 *)hwstats;
3661
3662 do {
3663 data_dst = data;
3664 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
3665
3666 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3667 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3668 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
3669}
3670
3671static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3672 u32 *rule_locs)
3673{
3674 int ret = -EOPNOTSUPP;
3675
3676 switch (cmd->cmd) {
3677 case ETHTOOL_GRXRINGS:
3678 if (dev->hw_features & NETIF_F_LRO) {
3679 cmd->data = MTK_MAX_RX_RING_NUM;
3680 ret = 0;
3681 }
3682 break;
3683 case ETHTOOL_GRXCLSRLCNT:
3684 if (dev->hw_features & NETIF_F_LRO) {
3685 struct mtk_mac *mac = netdev_priv(dev);
3686
3687 cmd->rule_cnt = mac->hwlro_ip_cnt;
3688 ret = 0;
3689 }
3690 break;
3691 case ETHTOOL_GRXCLSRULE:
3692 if (dev->hw_features & NETIF_F_LRO)
3693 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3694 break;
3695 case ETHTOOL_GRXCLSRLALL:
3696 if (dev->hw_features & NETIF_F_LRO)
3697 ret = mtk_hwlro_get_fdir_all(dev, cmd,
3698 rule_locs);
3699 break;
3700 default:
3701 break;
3702 }
3703
3704 return ret;
3705}
3706
3707static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3708{
3709 int ret = -EOPNOTSUPP;
3710
3711 switch (cmd->cmd) {
3712 case ETHTOOL_SRXCLSRLINS:
3713 if (dev->hw_features & NETIF_F_LRO)
3714 ret = mtk_hwlro_add_ipaddr(dev, cmd);
3715 break;
3716 case ETHTOOL_SRXCLSRLDEL:
3717 if (dev->hw_features & NETIF_F_LRO)
3718 ret = mtk_hwlro_del_ipaddr(dev, cmd);
3719 break;
3720 default:
3721 break;
3722 }
3723
3724 return ret;
3725}
3726
developer6c5cbb52022-08-12 11:37:45 +08003727static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3728{
3729 struct mtk_mac *mac = netdev_priv(dev);
3730
3731 phylink_ethtool_get_pauseparam(mac->phylink, pause);
3732}
3733
3734static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3735{
3736 struct mtk_mac *mac = netdev_priv(dev);
3737
3738 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
3739}
3740
developerfd40db22021-04-29 10:08:25 +08003741static const struct ethtool_ops mtk_ethtool_ops = {
3742 .get_link_ksettings = mtk_get_link_ksettings,
3743 .set_link_ksettings = mtk_set_link_ksettings,
3744 .get_drvinfo = mtk_get_drvinfo,
3745 .get_msglevel = mtk_get_msglevel,
3746 .set_msglevel = mtk_set_msglevel,
3747 .nway_reset = mtk_nway_reset,
3748 .get_link = ethtool_op_get_link,
3749 .get_strings = mtk_get_strings,
3750 .get_sset_count = mtk_get_sset_count,
3751 .get_ethtool_stats = mtk_get_ethtool_stats,
3752 .get_rxnfc = mtk_get_rxnfc,
3753 .set_rxnfc = mtk_set_rxnfc,
developer6c5cbb52022-08-12 11:37:45 +08003754 .get_pauseparam = mtk_get_pauseparam,
3755 .set_pauseparam = mtk_set_pauseparam,
developerfd40db22021-04-29 10:08:25 +08003756};
3757
3758static const struct net_device_ops mtk_netdev_ops = {
3759 .ndo_init = mtk_init,
3760 .ndo_uninit = mtk_uninit,
3761 .ndo_open = mtk_open,
3762 .ndo_stop = mtk_stop,
3763 .ndo_start_xmit = mtk_start_xmit,
3764 .ndo_set_mac_address = mtk_set_mac_address,
3765 .ndo_validate_addr = eth_validate_addr,
3766 .ndo_do_ioctl = mtk_do_ioctl,
3767 .ndo_tx_timeout = mtk_tx_timeout,
3768 .ndo_get_stats64 = mtk_get_stats64,
3769 .ndo_fix_features = mtk_fix_features,
3770 .ndo_set_features = mtk_set_features,
3771#ifdef CONFIG_NET_POLL_CONTROLLER
3772 .ndo_poll_controller = mtk_poll_controller,
3773#endif
3774};
3775
3776static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
3777{
3778 const __be32 *_id = of_get_property(np, "reg", NULL);
3779 struct phylink *phylink;
3780 int phy_mode, id, err;
3781 struct mtk_mac *mac;
developera2613e62022-07-01 18:29:37 +08003782 struct mtk_phylink_priv *phylink_priv;
3783 struct fwnode_handle *fixed_node;
3784 struct gpio_desc *desc;
developerfd40db22021-04-29 10:08:25 +08003785
3786 if (!_id) {
3787 dev_err(eth->dev, "missing mac id\n");
3788 return -EINVAL;
3789 }
3790
3791 id = be32_to_cpup(_id);
developerfb556ca2021-10-13 10:52:09 +08003792 if (id < 0 || id >= MTK_MAC_COUNT) {
developerfd40db22021-04-29 10:08:25 +08003793 dev_err(eth->dev, "%d is not a valid mac id\n", id);
3794 return -EINVAL;
3795 }
3796
3797 if (eth->netdev[id]) {
3798 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
3799 return -EINVAL;
3800 }
3801
3802 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
3803 if (!eth->netdev[id]) {
3804 dev_err(eth->dev, "alloc_etherdev failed\n");
3805 return -ENOMEM;
3806 }
3807 mac = netdev_priv(eth->netdev[id]);
3808 eth->mac[id] = mac;
3809 mac->id = id;
3810 mac->hw = eth;
3811 mac->of_node = np;
3812
3813 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
3814 mac->hwlro_ip_cnt = 0;
3815
3816 mac->hw_stats = devm_kzalloc(eth->dev,
3817 sizeof(*mac->hw_stats),
3818 GFP_KERNEL);
3819 if (!mac->hw_stats) {
3820 dev_err(eth->dev, "failed to allocate counter memory\n");
3821 err = -ENOMEM;
3822 goto free_netdev;
3823 }
3824 spin_lock_init(&mac->hw_stats->stats_lock);
3825 u64_stats_init(&mac->hw_stats->syncp);
3826 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
3827
3828 /* phylink create */
3829 phy_mode = of_get_phy_mode(np);
3830 if (phy_mode < 0) {
3831 dev_err(eth->dev, "incorrect phy-mode\n");
3832 err = -EINVAL;
3833 goto free_netdev;
3834 }
3835
3836 /* mac config is not set */
3837 mac->interface = PHY_INTERFACE_MODE_NA;
3838 mac->mode = MLO_AN_PHY;
3839 mac->speed = SPEED_UNKNOWN;
3840
3841 mac->phylink_config.dev = &eth->netdev[id]->dev;
3842 mac->phylink_config.type = PHYLINK_NETDEV;
3843
developer089e8852022-09-28 14:43:46 +08003844 mac->type = (phy_mode == PHY_INTERFACE_MODE_10GKR ||
3845 phy_mode == PHY_INTERFACE_MODE_USXGMII) ?
3846 MTK_XGDM_TYPE : MTK_GDM_TYPE;
3847
developerfd40db22021-04-29 10:08:25 +08003848 phylink = phylink_create(&mac->phylink_config,
3849 of_fwnode_handle(mac->of_node),
3850 phy_mode, &mtk_phylink_ops);
3851 if (IS_ERR(phylink)) {
3852 err = PTR_ERR(phylink);
3853 goto free_netdev;
3854 }
3855
3856 mac->phylink = phylink;
3857
developera2613e62022-07-01 18:29:37 +08003858 fixed_node = fwnode_get_named_child_node(of_fwnode_handle(mac->of_node),
3859 "fixed-link");
3860 if (fixed_node) {
3861 desc = fwnode_get_named_gpiod(fixed_node, "link-gpio",
3862 0, GPIOD_IN, "?");
3863 if (!IS_ERR(desc)) {
3864 struct device_node *phy_np;
3865 const char *label;
3866 int irq, phyaddr;
3867
3868 phylink_priv = &mac->phylink_priv;
3869
3870 phylink_priv->desc = desc;
3871 phylink_priv->id = id;
3872 phylink_priv->link = -1;
3873
3874 irq = gpiod_to_irq(desc);
3875 if (irq > 0) {
3876 devm_request_irq(eth->dev, irq, mtk_handle_irq_fixed_link,
3877 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
3878 "ethernet:fixed link", mac);
3879 }
3880
3881 if (!of_property_read_string(to_of_node(fixed_node), "label", &label))
3882 strcpy(phylink_priv->label, label);
3883
3884 phy_np = of_parse_phandle(to_of_node(fixed_node), "phy-handle", 0);
3885 if (phy_np) {
3886 if (!of_property_read_u32(phy_np, "reg", &phyaddr))
3887 phylink_priv->phyaddr = phyaddr;
3888 }
3889 }
3890 fwnode_handle_put(fixed_node);
3891 }
3892
developerfd40db22021-04-29 10:08:25 +08003893 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
3894 eth->netdev[id]->watchdog_timeo = 5 * HZ;
3895 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
3896 eth->netdev[id]->base_addr = (unsigned long)eth->base;
3897
3898 eth->netdev[id]->hw_features = eth->soc->hw_features;
3899 if (eth->hwlro)
3900 eth->netdev[id]->hw_features |= NETIF_F_LRO;
3901
3902 eth->netdev[id]->vlan_features = eth->soc->hw_features &
3903 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3904 eth->netdev[id]->features |= eth->soc->hw_features;
3905 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
3906
3907 eth->netdev[id]->irq = eth->irq[0];
3908 eth->netdev[id]->dev.of_node = np;
3909
3910 return 0;
3911
3912free_netdev:
3913 free_netdev(eth->netdev[id]);
3914 return err;
3915}
3916
3917static int mtk_probe(struct platform_device *pdev)
3918{
3919 struct device_node *mac_np;
3920 struct mtk_eth *eth;
3921 int err, i;
3922
3923 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3924 if (!eth)
3925 return -ENOMEM;
3926
3927 eth->soc = of_device_get_match_data(&pdev->dev);
3928
3929 eth->dev = &pdev->dev;
3930 eth->base = devm_platform_ioremap_resource(pdev, 0);
3931 if (IS_ERR(eth->base))
3932 return PTR_ERR(eth->base);
3933
developer089e8852022-09-28 14:43:46 +08003934 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
3935 eth->sram_base = devm_platform_ioremap_resource(pdev, 1);
3936 if (IS_ERR(eth->sram_base))
3937 return PTR_ERR(eth->sram_base);
3938 }
3939
developerfd40db22021-04-29 10:08:25 +08003940 if(eth->soc->has_sram) {
3941 struct resource *res;
3942 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
developer4c32b7a2021-11-13 16:46:43 +08003943 if (unlikely(!res))
3944 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +08003945 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
3946 }
3947
3948 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3949 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
3950 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
3951 } else {
3952 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
3953 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
3954 }
3955
3956 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3957 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
3958 eth->ip_align = NET_IP_ALIGN;
3959 } else {
developer089e8852022-09-28 14:43:46 +08003960 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
3961 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +08003962 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
3963 else
3964 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
3965 }
3966
developer089e8852022-09-28 14:43:46 +08003967 if (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) {
3968 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
3969 if (!err) {
3970 err = dma_set_coherent_mask(&pdev->dev,
3971 DMA_BIT_MASK(36));
3972 if (err) {
3973 dev_err(&pdev->dev, "Wrong DMA config\n");
3974 return -EINVAL;
3975 }
3976 }
3977 }
3978
developerfd40db22021-04-29 10:08:25 +08003979 spin_lock_init(&eth->page_lock);
3980 spin_lock_init(&eth->tx_irq_lock);
3981 spin_lock_init(&eth->rx_irq_lock);
developerd82e8372022-02-09 15:00:09 +08003982 spin_lock_init(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +08003983
3984 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3985 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3986 "mediatek,ethsys");
3987 if (IS_ERR(eth->ethsys)) {
3988 dev_err(&pdev->dev, "no ethsys regmap found\n");
3989 return PTR_ERR(eth->ethsys);
3990 }
3991 }
3992
3993 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3994 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3995 "mediatek,infracfg");
3996 if (IS_ERR(eth->infra)) {
3997 dev_err(&pdev->dev, "no infracfg regmap found\n");
3998 return PTR_ERR(eth->infra);
3999 }
4000 }
4001
4002 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
developer089e8852022-09-28 14:43:46 +08004003 eth->xgmii = devm_kzalloc(eth->dev, sizeof(*eth->xgmii),
developerfd40db22021-04-29 10:08:25 +08004004 GFP_KERNEL);
developer089e8852022-09-28 14:43:46 +08004005 if (!eth->xgmii)
developerfd40db22021-04-29 10:08:25 +08004006 return -ENOMEM;
4007
developer089e8852022-09-28 14:43:46 +08004008 eth->xgmii->eth = eth;
4009 err = mtk_sgmii_init(eth->xgmii, pdev->dev.of_node,
developerfd40db22021-04-29 10:08:25 +08004010 eth->soc->ana_rgc3);
4011
developer089e8852022-09-28 14:43:46 +08004012 if (err)
4013 return err;
4014 }
4015
4016 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
4017 err = mtk_usxgmii_init(eth->xgmii, pdev->dev.of_node);
4018 if (err)
4019 return err;
4020
4021 err = mtk_xfi_pextp_init(eth->xgmii, pdev->dev.of_node);
4022 if (err)
4023 return err;
4024
4025 err = mtk_xfi_pll_init(eth->xgmii, pdev->dev.of_node);
4026 if (err)
4027 return err;
4028
4029 err = mtk_toprgu_init(eth, pdev->dev.of_node);
developerfd40db22021-04-29 10:08:25 +08004030 if (err)
4031 return err;
4032 }
4033
4034 if (eth->soc->required_pctl) {
4035 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4036 "mediatek,pctl");
4037 if (IS_ERR(eth->pctl)) {
4038 dev_err(&pdev->dev, "no pctl regmap found\n");
4039 return PTR_ERR(eth->pctl);
4040 }
4041 }
4042
developer18f46a82021-07-20 21:08:21 +08004043 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08004044 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4045 eth->irq[i] = eth->irq[0];
4046 else
4047 eth->irq[i] = platform_get_irq(pdev, i);
4048 if (eth->irq[i] < 0) {
4049 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4050 return -ENXIO;
4051 }
4052 }
4053
4054 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4055 eth->clks[i] = devm_clk_get(eth->dev,
4056 mtk_clks_source_name[i]);
4057 if (IS_ERR(eth->clks[i])) {
4058 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
4059 return -EPROBE_DEFER;
4060 if (eth->soc->required_clks & BIT(i)) {
4061 dev_err(&pdev->dev, "clock %s not found\n",
4062 mtk_clks_source_name[i]);
4063 return -EINVAL;
4064 }
4065 eth->clks[i] = NULL;
4066 }
4067 }
4068
4069 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4070 INIT_WORK(&eth->pending_work, mtk_pending_work);
4071
developer8051e042022-04-08 13:26:36 +08004072 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerfd40db22021-04-29 10:08:25 +08004073 if (err)
4074 return err;
4075
4076 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4077
4078 for_each_child_of_node(pdev->dev.of_node, mac_np) {
4079 if (!of_device_is_compatible(mac_np,
4080 "mediatek,eth-mac"))
4081 continue;
4082
4083 if (!of_device_is_available(mac_np))
4084 continue;
4085
4086 err = mtk_add_mac(eth, mac_np);
4087 if (err) {
4088 of_node_put(mac_np);
4089 goto err_deinit_hw;
4090 }
4091 }
4092
developer18f46a82021-07-20 21:08:21 +08004093 err = mtk_napi_init(eth);
4094 if (err)
4095 goto err_free_dev;
4096
developerfd40db22021-04-29 10:08:25 +08004097 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4098 err = devm_request_irq(eth->dev, eth->irq[0],
4099 mtk_handle_irq, 0,
4100 dev_name(eth->dev), eth);
4101 } else {
4102 err = devm_request_irq(eth->dev, eth->irq[1],
4103 mtk_handle_irq_tx, 0,
4104 dev_name(eth->dev), eth);
4105 if (err)
4106 goto err_free_dev;
4107
4108 err = devm_request_irq(eth->dev, eth->irq[2],
4109 mtk_handle_irq_rx, 0,
developer18f46a82021-07-20 21:08:21 +08004110 dev_name(eth->dev), &eth->rx_napi[0]);
4111 if (err)
4112 goto err_free_dev;
4113
developer793f7b42022-05-20 13:54:51 +08004114 if (MTK_MAX_IRQ_NUM > 3) {
4115 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4116 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
4117 err = devm_request_irq(eth->dev,
4118 eth->irq[2 + i],
4119 mtk_handle_irq_rx, 0,
4120 dev_name(eth->dev),
4121 &eth->rx_napi[i]);
4122 if (err)
4123 goto err_free_dev;
4124 }
4125 } else {
4126 err = devm_request_irq(eth->dev, eth->irq[3],
4127 mtk_handle_fe_irq, 0,
4128 dev_name(eth->dev), eth);
developer18f46a82021-07-20 21:08:21 +08004129 if (err)
4130 goto err_free_dev;
4131 }
4132 }
developerfd40db22021-04-29 10:08:25 +08004133 }
developer8051e042022-04-08 13:26:36 +08004134
developerfd40db22021-04-29 10:08:25 +08004135 if (err)
4136 goto err_free_dev;
4137
4138 /* No MT7628/88 support yet */
4139 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4140 err = mtk_mdio_init(eth);
4141 if (err)
4142 goto err_free_dev;
4143 }
4144
4145 for (i = 0; i < MTK_MAX_DEVS; i++) {
4146 if (!eth->netdev[i])
4147 continue;
4148
4149 err = register_netdev(eth->netdev[i]);
4150 if (err) {
4151 dev_err(eth->dev, "error bringing up device\n");
4152 goto err_deinit_mdio;
4153 } else
4154 netif_info(eth, probe, eth->netdev[i],
4155 "mediatek frame engine at 0x%08lx, irq %d\n",
4156 eth->netdev[i]->base_addr, eth->irq[0]);
4157 }
4158
4159 /* we run 2 devices on the same DMA ring so we need a dummy device
4160 * for NAPI to work
4161 */
4162 init_dummy_netdev(&eth->dummy_dev);
4163 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
4164 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08004165 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08004166 MTK_NAPI_WEIGHT);
4167
developer18f46a82021-07-20 21:08:21 +08004168 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4169 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
4170 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[i].napi,
4171 mtk_napi_rx, MTK_NAPI_WEIGHT);
4172 }
4173
developerfd40db22021-04-29 10:08:25 +08004174 mtketh_debugfs_init(eth);
4175 debug_proc_init(eth);
4176
4177 platform_set_drvdata(pdev, eth);
4178
developer8051e042022-04-08 13:26:36 +08004179 register_netdevice_notifier(&mtk_eth_netdevice_nb);
developer793f7b42022-05-20 13:54:51 +08004180#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developer8051e042022-04-08 13:26:36 +08004181 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
4182 eth->mtk_dma_monitor_timer.expires = jiffies;
4183 add_timer(&eth->mtk_dma_monitor_timer);
developer793f7b42022-05-20 13:54:51 +08004184#endif
developer8051e042022-04-08 13:26:36 +08004185
developerfd40db22021-04-29 10:08:25 +08004186 return 0;
4187
4188err_deinit_mdio:
4189 mtk_mdio_cleanup(eth);
4190err_free_dev:
4191 mtk_free_dev(eth);
4192err_deinit_hw:
4193 mtk_hw_deinit(eth);
4194
4195 return err;
4196}
4197
4198static int mtk_remove(struct platform_device *pdev)
4199{
4200 struct mtk_eth *eth = platform_get_drvdata(pdev);
4201 struct mtk_mac *mac;
4202 int i;
4203
4204 /* stop all devices to make sure that dma is properly shut down */
4205 for (i = 0; i < MTK_MAC_COUNT; i++) {
4206 if (!eth->netdev[i])
4207 continue;
4208 mtk_stop(eth->netdev[i]);
4209 mac = netdev_priv(eth->netdev[i]);
4210 phylink_disconnect_phy(mac->phylink);
4211 }
4212
4213 mtk_hw_deinit(eth);
4214
4215 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08004216 netif_napi_del(&eth->rx_napi[0].napi);
4217
4218 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4219 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
4220 netif_napi_del(&eth->rx_napi[i].napi);
4221 }
4222
developerfd40db22021-04-29 10:08:25 +08004223 mtk_cleanup(eth);
4224 mtk_mdio_cleanup(eth);
developer8051e042022-04-08 13:26:36 +08004225 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
4226 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08004227
4228 return 0;
4229}
4230
4231static const struct mtk_soc_data mt2701_data = {
4232 .caps = MT7623_CAPS | MTK_HWLRO,
4233 .hw_features = MTK_HW_FEATURES,
4234 .required_clks = MT7623_CLKS_BITMAP,
4235 .required_pctl = true,
4236 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004237 .txrx = {
4238 .txd_size = sizeof(struct mtk_tx_dma),
4239 .rxd_size = sizeof(struct mtk_rx_dma),
4240 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4241 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4242 },
developerfd40db22021-04-29 10:08:25 +08004243};
4244
4245static const struct mtk_soc_data mt7621_data = {
4246 .caps = MT7621_CAPS,
4247 .hw_features = MTK_HW_FEATURES,
4248 .required_clks = MT7621_CLKS_BITMAP,
4249 .required_pctl = false,
4250 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004251 .txrx = {
4252 .txd_size = sizeof(struct mtk_tx_dma),
4253 .rxd_size = sizeof(struct mtk_rx_dma),
4254 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4255 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4256 },
developerfd40db22021-04-29 10:08:25 +08004257};
4258
4259static const struct mtk_soc_data mt7622_data = {
4260 .ana_rgc3 = 0x2028,
4261 .caps = MT7622_CAPS | MTK_HWLRO,
4262 .hw_features = MTK_HW_FEATURES,
4263 .required_clks = MT7622_CLKS_BITMAP,
4264 .required_pctl = false,
4265 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004266 .txrx = {
4267 .txd_size = sizeof(struct mtk_tx_dma),
4268 .rxd_size = sizeof(struct mtk_rx_dma),
4269 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4270 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4271 },
developerfd40db22021-04-29 10:08:25 +08004272};
4273
4274static const struct mtk_soc_data mt7623_data = {
4275 .caps = MT7623_CAPS | MTK_HWLRO,
4276 .hw_features = MTK_HW_FEATURES,
4277 .required_clks = MT7623_CLKS_BITMAP,
4278 .required_pctl = true,
4279 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004280 .txrx = {
4281 .txd_size = sizeof(struct mtk_tx_dma),
4282 .rxd_size = sizeof(struct mtk_rx_dma),
4283 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4284 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4285 },
developerfd40db22021-04-29 10:08:25 +08004286};
4287
4288static const struct mtk_soc_data mt7629_data = {
4289 .ana_rgc3 = 0x128,
4290 .caps = MT7629_CAPS | MTK_HWLRO,
4291 .hw_features = MTK_HW_FEATURES,
4292 .required_clks = MT7629_CLKS_BITMAP,
4293 .required_pctl = false,
4294 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004295 .txrx = {
4296 .txd_size = sizeof(struct mtk_tx_dma),
4297 .rxd_size = sizeof(struct mtk_rx_dma),
4298 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4299 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4300 },
developerfd40db22021-04-29 10:08:25 +08004301};
4302
4303static const struct mtk_soc_data mt7986_data = {
4304 .ana_rgc3 = 0x128,
4305 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08004306 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08004307 .required_clks = MT7986_CLKS_BITMAP,
4308 .required_pctl = false,
4309 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004310 .txrx = {
4311 .txd_size = sizeof(struct mtk_tx_dma_v2),
4312 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4313 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4314 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4315 },
developerfd40db22021-04-29 10:08:25 +08004316};
4317
developer255bba22021-07-27 15:16:33 +08004318static const struct mtk_soc_data mt7981_data = {
4319 .ana_rgc3 = 0x128,
4320 .caps = MT7981_CAPS,
developer7377b0b2021-11-18 14:54:47 +08004321 .hw_features = MTK_HW_FEATURES,
developer255bba22021-07-27 15:16:33 +08004322 .required_clks = MT7981_CLKS_BITMAP,
4323 .required_pctl = false,
4324 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004325 .txrx = {
4326 .txd_size = sizeof(struct mtk_tx_dma_v2),
4327 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4328 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4329 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4330 },
developer255bba22021-07-27 15:16:33 +08004331};
4332
developer089e8852022-09-28 14:43:46 +08004333static const struct mtk_soc_data mt7988_data = {
4334 .ana_rgc3 = 0x128,
4335 .caps = MT7988_CAPS,
4336 .hw_features = MTK_HW_FEATURES,
4337 .required_clks = MT7988_CLKS_BITMAP,
4338 .required_pctl = false,
4339 .has_sram = true,
4340 .txrx = {
4341 .txd_size = sizeof(struct mtk_tx_dma_v2),
4342 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4343 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4344 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4345 },
4346};
4347
developerfd40db22021-04-29 10:08:25 +08004348static const struct mtk_soc_data rt5350_data = {
4349 .caps = MT7628_CAPS,
4350 .hw_features = MTK_HW_FEATURES_MT7628,
4351 .required_clks = MT7628_CLKS_BITMAP,
4352 .required_pctl = false,
4353 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004354 .txrx = {
4355 .txd_size = sizeof(struct mtk_tx_dma),
4356 .rxd_size = sizeof(struct mtk_rx_dma),
4357 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4358 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4359 },
developerfd40db22021-04-29 10:08:25 +08004360};
4361
4362const struct of_device_id of_mtk_match[] = {
4363 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4364 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4365 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4366 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4367 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4368 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08004369 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developer089e8852022-09-28 14:43:46 +08004370 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data},
developerfd40db22021-04-29 10:08:25 +08004371 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4372 {},
4373};
4374MODULE_DEVICE_TABLE(of, of_mtk_match);
4375
4376static struct platform_driver mtk_driver = {
4377 .probe = mtk_probe,
4378 .remove = mtk_remove,
4379 .driver = {
4380 .name = "mtk_soc_eth",
4381 .of_match_table = of_mtk_match,
4382 },
4383};
4384
4385module_platform_driver(mtk_driver);
4386
4387MODULE_LICENSE("GPL");
4388MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4389MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");