blob: a05cd19df6f3819e24b55908ee1cc520440dd3f5 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
12#include <linux/mfd/syscon.h>
13#include <linux/regmap.h>
14#include <linux/clk.h>
15#include <linux/pm_runtime.h>
16#include <linux/if_vlan.h>
17#include <linux/reset.h>
18#include <linux/tcp.h>
19#include <linux/interrupt.h>
20#include <linux/pinctrl/devinfo.h>
21#include <linux/phylink.h>
developera2613e62022-07-01 18:29:37 +080022#include <linux/gpio/consumer.h>
developerfd40db22021-04-29 10:08:25 +080023#include <net/dsa.h>
24
25#include "mtk_eth_soc.h"
26#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080027#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080028
29#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
30#include "mtk_hnat/nf_hnat_mtk.h"
31#endif
32
33static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080034atomic_t reset_lock = ATOMIC_INIT(0);
35atomic_t force = ATOMIC_INIT(0);
36
developerfd40db22021-04-29 10:08:25 +080037module_param_named(msg_level, mtk_msg_level, int, 0);
38MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080039DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080040
41#define MTK_ETHTOOL_STAT(x) { #x, \
42 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
43
44/* strings used by ethtool */
45static const struct mtk_ethtool_stats {
46 char str[ETH_GSTRING_LEN];
47 u32 offset;
48} mtk_ethtool_stats[] = {
49 MTK_ETHTOOL_STAT(tx_bytes),
50 MTK_ETHTOOL_STAT(tx_packets),
51 MTK_ETHTOOL_STAT(tx_skip),
52 MTK_ETHTOOL_STAT(tx_collisions),
53 MTK_ETHTOOL_STAT(rx_bytes),
54 MTK_ETHTOOL_STAT(rx_packets),
55 MTK_ETHTOOL_STAT(rx_overflow),
56 MTK_ETHTOOL_STAT(rx_fcs_errors),
57 MTK_ETHTOOL_STAT(rx_short_errors),
58 MTK_ETHTOOL_STAT(rx_long_errors),
59 MTK_ETHTOOL_STAT(rx_checksum_errors),
60 MTK_ETHTOOL_STAT(rx_flow_control_packets),
61};
62
63static const char * const mtk_clks_source_name[] = {
64 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
65 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
66 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
67 "sgmii_ck", "eth2pll", "wocpu0","wocpu1",
68};
69
70void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
71{
72 __raw_writel(val, eth->base + reg);
73}
74
75u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
76{
77 return __raw_readl(eth->base + reg);
78}
79
80u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
81{
82 u32 val;
83
84 val = mtk_r32(eth, reg);
85 val &= ~mask;
86 val |= set;
87 mtk_w32(eth, val, reg);
88 return reg;
89}
90
91static int mtk_mdio_busy_wait(struct mtk_eth *eth)
92{
93 unsigned long t_start = jiffies;
94
95 while (1) {
96 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
97 return 0;
98 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
99 break;
developerc4671b22021-05-28 13:16:42 +0800100 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800101 }
102
103 dev_err(eth->dev, "mdio: MDIO timeout\n");
104 return -1;
105}
106
developer599cda42022-05-24 15:13:31 +0800107u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
108 int phy_reg, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800109{
110 if (mtk_mdio_busy_wait(eth))
111 return -1;
112
113 write_data &= 0xffff;
114
developer599cda42022-05-24 15:13:31 +0800115 if (phy_reg & MII_ADDR_C45) {
116 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
117 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
118 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
119 MTK_PHY_IAC);
120
121 if (mtk_mdio_busy_wait(eth))
122 return -1;
123
124 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
125 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
126 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
127 MTK_PHY_IAC);
128 } else {
129 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
130 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
131 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
132 MTK_PHY_IAC);
133 }
developerfd40db22021-04-29 10:08:25 +0800134
135 if (mtk_mdio_busy_wait(eth))
136 return -1;
137
138 return 0;
139}
140
developer599cda42022-05-24 15:13:31 +0800141u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerfd40db22021-04-29 10:08:25 +0800142{
143 u32 d;
144
145 if (mtk_mdio_busy_wait(eth))
146 return 0xffff;
147
developer599cda42022-05-24 15:13:31 +0800148 if (phy_reg & MII_ADDR_C45) {
149 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
150 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
151 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
152 MTK_PHY_IAC);
153
154 if (mtk_mdio_busy_wait(eth))
155 return 0xffff;
156
157 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
158 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
159 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
160 MTK_PHY_IAC);
161 } else {
162 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
163 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
164 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
165 MTK_PHY_IAC);
166 }
developerfd40db22021-04-29 10:08:25 +0800167
168 if (mtk_mdio_busy_wait(eth))
169 return 0xffff;
170
171 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
172
173 return d;
174}
175
176static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
177 int phy_reg, u16 val)
178{
179 struct mtk_eth *eth = bus->priv;
180
181 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
182}
183
184static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
185{
186 struct mtk_eth *eth = bus->priv;
187
188 return _mtk_mdio_read(eth, phy_addr, phy_reg);
189}
190
developerabeadd52022-08-15 11:26:44 +0800191static int mtk_mdio_reset(struct mii_bus *bus)
192{
193 /* The mdiobus_register will trigger a reset pulse when enabling Bus reset,
194 * we just need to wait until device ready.
195 */
196 mdelay(20);
197
198 return 0;
199}
200
developerfd40db22021-04-29 10:08:25 +0800201static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
202 phy_interface_t interface)
203{
204 u32 val;
205
206 /* Check DDR memory type.
207 * Currently TRGMII mode with DDR2 memory is not supported.
208 */
209 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
210 if (interface == PHY_INTERFACE_MODE_TRGMII &&
211 val & SYSCFG_DRAM_TYPE_DDR2) {
212 dev_err(eth->dev,
213 "TRGMII mode with DDR2 memory is not supported!\n");
214 return -EOPNOTSUPP;
215 }
216
217 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
218 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
219
220 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
221 ETHSYS_TRGMII_MT7621_MASK, val);
222
223 return 0;
224}
225
226static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
227 phy_interface_t interface, int speed)
228{
229 u32 val;
230 int ret;
231
232 if (interface == PHY_INTERFACE_MODE_TRGMII) {
233 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
234 val = 500000000;
235 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
236 if (ret)
237 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
238 return;
239 }
240
241 val = (speed == SPEED_1000) ?
242 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
243 mtk_w32(eth, val, INTF_MODE);
244
245 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
246 ETHSYS_TRGMII_CLK_SEL362_5,
247 ETHSYS_TRGMII_CLK_SEL362_5);
248
249 val = (speed == SPEED_1000) ? 250000000 : 500000000;
250 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
251 if (ret)
252 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
253
254 val = (speed == SPEED_1000) ?
255 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
256 mtk_w32(eth, val, TRGMII_RCK_CTRL);
257
258 val = (speed == SPEED_1000) ?
259 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
260 mtk_w32(eth, val, TRGMII_TCK_CTRL);
261}
262
263static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
264 const struct phylink_link_state *state)
265{
266 struct mtk_mac *mac = container_of(config, struct mtk_mac,
267 phylink_config);
268 struct mtk_eth *eth = mac->hw;
269 u32 mcr_cur, mcr_new, sid, i;
developerfb556ca2021-10-13 10:52:09 +0800270 int val, ge_mode, err=0;
developerfd40db22021-04-29 10:08:25 +0800271
272 /* MT76x8 has no hardware settings between for the MAC */
273 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
274 mac->interface != state->interface) {
275 /* Setup soc pin functions */
276 switch (state->interface) {
277 case PHY_INTERFACE_MODE_TRGMII:
278 if (mac->id)
279 goto err_phy;
280 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
281 MTK_GMAC1_TRGMII))
282 goto err_phy;
283 /* fall through */
284 case PHY_INTERFACE_MODE_RGMII_TXID:
285 case PHY_INTERFACE_MODE_RGMII_RXID:
286 case PHY_INTERFACE_MODE_RGMII_ID:
287 case PHY_INTERFACE_MODE_RGMII:
288 case PHY_INTERFACE_MODE_MII:
289 case PHY_INTERFACE_MODE_REVMII:
290 case PHY_INTERFACE_MODE_RMII:
291 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
292 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
293 if (err)
294 goto init_err;
295 }
296 break;
297 case PHY_INTERFACE_MODE_1000BASEX:
298 case PHY_INTERFACE_MODE_2500BASEX:
299 case PHY_INTERFACE_MODE_SGMII:
300 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
301 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
302 if (err)
303 goto init_err;
304 }
305 break;
306 case PHY_INTERFACE_MODE_GMII:
307 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
308 err = mtk_gmac_gephy_path_setup(eth, mac->id);
309 if (err)
310 goto init_err;
311 }
312 break;
313 default:
314 goto err_phy;
315 }
316
317 /* Setup clock for 1st gmac */
318 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
319 !phy_interface_mode_is_8023z(state->interface) &&
320 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
321 if (MTK_HAS_CAPS(mac->hw->soc->caps,
322 MTK_TRGMII_MT7621_CLK)) {
323 if (mt7621_gmac0_rgmii_adjust(mac->hw,
324 state->interface))
325 goto err_phy;
326 } else {
327 mtk_gmac0_rgmii_adjust(mac->hw,
328 state->interface,
329 state->speed);
330
331 /* mt7623_pad_clk_setup */
332 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
333 mtk_w32(mac->hw,
334 TD_DM_DRVP(8) | TD_DM_DRVN(8),
335 TRGMII_TD_ODT(i));
336
337 /* Assert/release MT7623 RXC reset */
338 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
339 TRGMII_RCK_CTRL);
340 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
341 }
342 }
343
344 ge_mode = 0;
345 switch (state->interface) {
346 case PHY_INTERFACE_MODE_MII:
347 case PHY_INTERFACE_MODE_GMII:
348 ge_mode = 1;
349 break;
350 case PHY_INTERFACE_MODE_REVMII:
351 ge_mode = 2;
352 break;
353 case PHY_INTERFACE_MODE_RMII:
354 if (mac->id)
355 goto err_phy;
356 ge_mode = 3;
357 break;
358 default:
359 break;
360 }
361
362 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800363 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800364 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
365 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
366 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
367 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800368 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800369
370 mac->interface = state->interface;
371 }
372
373 /* SGMII */
374 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
375 phy_interface_mode_is_8023z(state->interface)) {
376 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
377 * being setup done.
378 */
developerd82e8372022-02-09 15:00:09 +0800379 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800380 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
381
382 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
383 SYSCFG0_SGMII_MASK,
384 ~(u32)SYSCFG0_SGMII_MASK);
385
386 /* Decide how GMAC and SGMIISYS be mapped */
387 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
388 0 : mac->id;
389
390 /* Setup SGMIISYS with the determined property */
391 if (state->interface != PHY_INTERFACE_MODE_SGMII)
392 err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
393 state);
developer2fbee452022-08-12 13:58:20 +0800394 else
developerfd40db22021-04-29 10:08:25 +0800395 err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
396
developerd82e8372022-02-09 15:00:09 +0800397 if (err) {
398 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800399 goto init_err;
developerd82e8372022-02-09 15:00:09 +0800400 }
developerfd40db22021-04-29 10:08:25 +0800401
402 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
403 SYSCFG0_SGMII_MASK, val);
developerd82e8372022-02-09 15:00:09 +0800404 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800405 } else if (phylink_autoneg_inband(mode)) {
406 dev_err(eth->dev,
407 "In-band mode not supported in non SGMII mode!\n");
408 return;
409 }
410
411 /* Setup gmac */
412 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
413 mcr_new = mcr_cur;
414 mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
415 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
416 MAC_MCR_FORCE_RX_FC);
417 mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
418 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
419
420 switch (state->speed) {
421 case SPEED_2500:
422 case SPEED_1000:
423 mcr_new |= MAC_MCR_SPEED_1000;
424 break;
425 case SPEED_100:
426 mcr_new |= MAC_MCR_SPEED_100;
427 break;
428 }
429 if (state->duplex == DUPLEX_FULL) {
430 mcr_new |= MAC_MCR_FORCE_DPX;
431 if (state->pause & MLO_PAUSE_TX)
432 mcr_new |= MAC_MCR_FORCE_TX_FC;
433 if (state->pause & MLO_PAUSE_RX)
434 mcr_new |= MAC_MCR_FORCE_RX_FC;
435 }
436
437 /* Only update control register when needed! */
438 if (mcr_new != mcr_cur)
439 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
440
441 return;
442
443err_phy:
444 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
445 mac->id, phy_modes(state->interface));
446 return;
447
448init_err:
449 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
450 mac->id, phy_modes(state->interface), err);
451}
452
453static int mtk_mac_link_state(struct phylink_config *config,
454 struct phylink_link_state *state)
455{
456 struct mtk_mac *mac = container_of(config, struct mtk_mac,
457 phylink_config);
458 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
459
460 state->link = (pmsr & MAC_MSR_LINK);
461 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
462
463 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
464 case 0:
465 state->speed = SPEED_10;
466 break;
467 case MAC_MSR_SPEED_100:
468 state->speed = SPEED_100;
469 break;
470 case MAC_MSR_SPEED_1000:
471 state->speed = SPEED_1000;
472 break;
473 default:
474 state->speed = SPEED_UNKNOWN;
475 break;
476 }
477
478 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
479 if (pmsr & MAC_MSR_RX_FC)
480 state->pause |= MLO_PAUSE_RX;
481 if (pmsr & MAC_MSR_TX_FC)
482 state->pause |= MLO_PAUSE_TX;
483
484 return 1;
485}
486
487static void mtk_mac_an_restart(struct phylink_config *config)
488{
489 struct mtk_mac *mac = container_of(config, struct mtk_mac,
490 phylink_config);
491
492 mtk_sgmii_restart_an(mac->hw, mac->id);
493}
494
495static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
496 phy_interface_t interface)
497{
498 struct mtk_mac *mac = container_of(config, struct mtk_mac,
499 phylink_config);
500 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
501
502 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
503 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
504}
505
506static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
507 phy_interface_t interface,
508 struct phy_device *phy)
509{
510 struct mtk_mac *mac = container_of(config, struct mtk_mac,
511 phylink_config);
512 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
513
514 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
515 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
516}
517
518static void mtk_validate(struct phylink_config *config,
519 unsigned long *supported,
520 struct phylink_link_state *state)
521{
522 struct mtk_mac *mac = container_of(config, struct mtk_mac,
523 phylink_config);
524 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
525
526 if (state->interface != PHY_INTERFACE_MODE_NA &&
527 state->interface != PHY_INTERFACE_MODE_MII &&
528 state->interface != PHY_INTERFACE_MODE_GMII &&
529 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
530 phy_interface_mode_is_rgmii(state->interface)) &&
531 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
532 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
533 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
534 (state->interface == PHY_INTERFACE_MODE_SGMII ||
535 phy_interface_mode_is_8023z(state->interface)))) {
536 linkmode_zero(supported);
537 return;
538 }
539
540 phylink_set_port_modes(mask);
541 phylink_set(mask, Autoneg);
542
543 switch (state->interface) {
544 case PHY_INTERFACE_MODE_TRGMII:
545 phylink_set(mask, 1000baseT_Full);
546 break;
547 case PHY_INTERFACE_MODE_1000BASEX:
548 case PHY_INTERFACE_MODE_2500BASEX:
549 phylink_set(mask, 1000baseX_Full);
550 phylink_set(mask, 2500baseX_Full);
developer2fbee452022-08-12 13:58:20 +0800551 phylink_set(mask, 2500baseT_Full);
552 /* fall through; */
developerfd40db22021-04-29 10:08:25 +0800553 case PHY_INTERFACE_MODE_GMII:
554 case PHY_INTERFACE_MODE_RGMII:
555 case PHY_INTERFACE_MODE_RGMII_ID:
556 case PHY_INTERFACE_MODE_RGMII_RXID:
557 case PHY_INTERFACE_MODE_RGMII_TXID:
558 phylink_set(mask, 1000baseT_Half);
559 /* fall through */
560 case PHY_INTERFACE_MODE_SGMII:
561 phylink_set(mask, 1000baseT_Full);
562 phylink_set(mask, 1000baseX_Full);
563 /* fall through */
564 case PHY_INTERFACE_MODE_MII:
565 case PHY_INTERFACE_MODE_RMII:
566 case PHY_INTERFACE_MODE_REVMII:
567 case PHY_INTERFACE_MODE_NA:
568 default:
569 phylink_set(mask, 10baseT_Half);
570 phylink_set(mask, 10baseT_Full);
571 phylink_set(mask, 100baseT_Half);
572 phylink_set(mask, 100baseT_Full);
573 break;
574 }
575
576 if (state->interface == PHY_INTERFACE_MODE_NA) {
577 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
578 phylink_set(mask, 1000baseT_Full);
579 phylink_set(mask, 1000baseX_Full);
580 phylink_set(mask, 2500baseX_Full);
581 }
582 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
583 phylink_set(mask, 1000baseT_Full);
584 phylink_set(mask, 1000baseT_Half);
585 phylink_set(mask, 1000baseX_Full);
586 }
587 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
588 phylink_set(mask, 1000baseT_Full);
589 phylink_set(mask, 1000baseT_Half);
590 }
591 }
592
593 phylink_set(mask, Pause);
594 phylink_set(mask, Asym_Pause);
595
596 linkmode_and(supported, supported, mask);
597 linkmode_and(state->advertising, state->advertising, mask);
598
599 /* We can only operate at 2500BaseX or 1000BaseX. If requested
600 * to advertise both, only report advertising at 2500BaseX.
601 */
602 phylink_helper_basex_speed(state);
603}
604
605static const struct phylink_mac_ops mtk_phylink_ops = {
606 .validate = mtk_validate,
607 .mac_link_state = mtk_mac_link_state,
608 .mac_an_restart = mtk_mac_an_restart,
609 .mac_config = mtk_mac_config,
610 .mac_link_down = mtk_mac_link_down,
611 .mac_link_up = mtk_mac_link_up,
612};
613
614static int mtk_mdio_init(struct mtk_eth *eth)
615{
616 struct device_node *mii_np;
617 int ret;
618
619 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
620 if (!mii_np) {
621 dev_err(eth->dev, "no %s child node found", "mdio-bus");
622 return -ENODEV;
623 }
624
625 if (!of_device_is_available(mii_np)) {
626 ret = -ENODEV;
627 goto err_put_node;
628 }
629
630 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
631 if (!eth->mii_bus) {
632 ret = -ENOMEM;
633 goto err_put_node;
634 }
635
636 eth->mii_bus->name = "mdio";
637 eth->mii_bus->read = mtk_mdio_read;
638 eth->mii_bus->write = mtk_mdio_write;
developerabeadd52022-08-15 11:26:44 +0800639 eth->mii_bus->reset = mtk_mdio_reset;
developerfd40db22021-04-29 10:08:25 +0800640 eth->mii_bus->priv = eth;
641 eth->mii_bus->parent = eth->dev;
642
developer6fd46562021-10-14 15:04:34 +0800643 if(snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
developerfb556ca2021-10-13 10:52:09 +0800644 ret = -ENOMEM;
645 goto err_put_node;
646 }
developerfd40db22021-04-29 10:08:25 +0800647 ret = of_mdiobus_register(eth->mii_bus, mii_np);
648
649err_put_node:
650 of_node_put(mii_np);
651 return ret;
652}
653
654static void mtk_mdio_cleanup(struct mtk_eth *eth)
655{
656 if (!eth->mii_bus)
657 return;
658
659 mdiobus_unregister(eth->mii_bus);
660}
661
662static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
663{
664 unsigned long flags;
665 u32 val;
666
667 spin_lock_irqsave(&eth->tx_irq_lock, flags);
668 val = mtk_r32(eth, eth->tx_int_mask_reg);
669 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
670 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
671}
672
673static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
674{
675 unsigned long flags;
676 u32 val;
677
678 spin_lock_irqsave(&eth->tx_irq_lock, flags);
679 val = mtk_r32(eth, eth->tx_int_mask_reg);
680 mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
681 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
682}
683
684static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
685{
686 unsigned long flags;
687 u32 val;
688
689 spin_lock_irqsave(&eth->rx_irq_lock, flags);
690 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
691 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
692 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
693}
694
695static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
696{
697 unsigned long flags;
698 u32 val;
699
700 spin_lock_irqsave(&eth->rx_irq_lock, flags);
701 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
702 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
703 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
704}
705
706static int mtk_set_mac_address(struct net_device *dev, void *p)
707{
708 int ret = eth_mac_addr(dev, p);
709 struct mtk_mac *mac = netdev_priv(dev);
710 struct mtk_eth *eth = mac->hw;
711 const char *macaddr = dev->dev_addr;
712
713 if (ret)
714 return ret;
715
716 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
717 return -EBUSY;
718
719 spin_lock_bh(&mac->hw->page_lock);
720 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
721 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
722 MT7628_SDM_MAC_ADRH);
723 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
724 (macaddr[4] << 8) | macaddr[5],
725 MT7628_SDM_MAC_ADRL);
726 } else {
727 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
728 MTK_GDMA_MAC_ADRH(mac->id));
729 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
730 (macaddr[4] << 8) | macaddr[5],
731 MTK_GDMA_MAC_ADRL(mac->id));
732 }
733 spin_unlock_bh(&mac->hw->page_lock);
734
735 return 0;
736}
737
738void mtk_stats_update_mac(struct mtk_mac *mac)
739{
740 struct mtk_hw_stats *hw_stats = mac->hw_stats;
741 unsigned int base = MTK_GDM1_TX_GBCNT;
742 u64 stats;
743
744 base += hw_stats->reg_offset;
745
746 u64_stats_update_begin(&hw_stats->syncp);
747
748 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
749 stats = mtk_r32(mac->hw, base + 0x04);
750 if (stats)
751 hw_stats->rx_bytes += (stats << 32);
752 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
753 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
754 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
755 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
756 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
757 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
758 hw_stats->rx_flow_control_packets +=
759 mtk_r32(mac->hw, base + 0x24);
760 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
761 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
762 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
763 stats = mtk_r32(mac->hw, base + 0x34);
764 if (stats)
765 hw_stats->tx_bytes += (stats << 32);
766 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
767 u64_stats_update_end(&hw_stats->syncp);
768}
769
770static void mtk_stats_update(struct mtk_eth *eth)
771{
772 int i;
773
774 for (i = 0; i < MTK_MAC_COUNT; i++) {
775 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
776 continue;
777 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
778 mtk_stats_update_mac(eth->mac[i]);
779 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
780 }
781 }
782}
783
784static void mtk_get_stats64(struct net_device *dev,
785 struct rtnl_link_stats64 *storage)
786{
787 struct mtk_mac *mac = netdev_priv(dev);
788 struct mtk_hw_stats *hw_stats = mac->hw_stats;
789 unsigned int start;
790
791 if (netif_running(dev) && netif_device_present(dev)) {
792 if (spin_trylock_bh(&hw_stats->stats_lock)) {
793 mtk_stats_update_mac(mac);
794 spin_unlock_bh(&hw_stats->stats_lock);
795 }
796 }
797
798 do {
799 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
800 storage->rx_packets = hw_stats->rx_packets;
801 storage->tx_packets = hw_stats->tx_packets;
802 storage->rx_bytes = hw_stats->rx_bytes;
803 storage->tx_bytes = hw_stats->tx_bytes;
804 storage->collisions = hw_stats->tx_collisions;
805 storage->rx_length_errors = hw_stats->rx_short_errors +
806 hw_stats->rx_long_errors;
807 storage->rx_over_errors = hw_stats->rx_overflow;
808 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
809 storage->rx_errors = hw_stats->rx_checksum_errors;
810 storage->tx_aborted_errors = hw_stats->tx_skip;
811 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
812
813 storage->tx_errors = dev->stats.tx_errors;
814 storage->rx_dropped = dev->stats.rx_dropped;
815 storage->tx_dropped = dev->stats.tx_dropped;
816}
817
818static inline int mtk_max_frag_size(int mtu)
819{
820 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
821 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
822 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
823
824 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
825 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
826}
827
828static inline int mtk_max_buf_size(int frag_size)
829{
830 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
831 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
832
833 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
834
835 return buf_size;
836}
837
developere9356982022-07-04 09:03:20 +0800838static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
839 struct mtk_rx_dma_v2 *dma_rxd)
developerfd40db22021-04-29 10:08:25 +0800840{
developerfd40db22021-04-29 10:08:25 +0800841 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +0800842 if (!(rxd->rxd2 & RX_DMA_DONE))
843 return false;
844
845 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +0800846 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
847 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developere9356982022-07-04 09:03:20 +0800848
849 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
850 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
851 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
852 }
853
developerc4671b22021-05-28 13:16:42 +0800854 return true;
developerfd40db22021-04-29 10:08:25 +0800855}
856
857/* the qdma core needs scratch memory to be setup */
858static int mtk_init_fq_dma(struct mtk_eth *eth)
859{
developere9356982022-07-04 09:03:20 +0800860 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +0800861 dma_addr_t phy_ring_tail;
862 int cnt = MTK_DMA_SIZE;
863 dma_addr_t dma_addr;
864 int i;
865
866 if (!eth->soc->has_sram) {
867 eth->scratch_ring = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +0800868 cnt * soc->txrx.txd_size,
developerfd40db22021-04-29 10:08:25 +0800869 &eth->phy_scratch_ring,
developere9356982022-07-04 09:03:20 +0800870 GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +0800871 } else {
872 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
873 }
874
875 if (unlikely(!eth->scratch_ring))
876 return -ENOMEM;
877
developere9356982022-07-04 09:03:20 +0800878 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +0800879 if (unlikely(!eth->scratch_head))
880 return -ENOMEM;
881
882 dma_addr = dma_map_single(eth->dev,
883 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
884 DMA_FROM_DEVICE);
885 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
886 return -ENOMEM;
887
developere9356982022-07-04 09:03:20 +0800888 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
developerfd40db22021-04-29 10:08:25 +0800889
890 for (i = 0; i < cnt; i++) {
developere9356982022-07-04 09:03:20 +0800891 struct mtk_tx_dma_v2 *txd;
892
893 txd = eth->scratch_ring + i * soc->txrx.txd_size;
894 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerfd40db22021-04-29 10:08:25 +0800895 if (i < cnt - 1)
developere9356982022-07-04 09:03:20 +0800896 txd->txd2 = eth->phy_scratch_ring +
897 (i + 1) * soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +0800898
developere9356982022-07-04 09:03:20 +0800899 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
900 txd->txd4 = 0;
901
902 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
903 txd->txd5 = 0;
904 txd->txd6 = 0;
905 txd->txd7 = 0;
906 txd->txd8 = 0;
developerfd40db22021-04-29 10:08:25 +0800907 }
developerfd40db22021-04-29 10:08:25 +0800908 }
909
910 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
911 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
912 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
913 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
914
915 return 0;
916}
917
918static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
919{
developere9356982022-07-04 09:03:20 +0800920 return ring->dma + (desc - ring->phys);
developerfd40db22021-04-29 10:08:25 +0800921}
922
923static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +0800924 void *txd, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +0800925{
developere9356982022-07-04 09:03:20 +0800926 int idx = (txd - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +0800927
928 return &ring->buf[idx];
929}
930
931static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +0800932 void *dma)
developerfd40db22021-04-29 10:08:25 +0800933{
934 return ring->dma_pdma - ring->dma + dma;
935}
936
developere9356982022-07-04 09:03:20 +0800937static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +0800938{
developere9356982022-07-04 09:03:20 +0800939 return (dma - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +0800940}
941
developerc4671b22021-05-28 13:16:42 +0800942static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
943 bool napi)
developerfd40db22021-04-29 10:08:25 +0800944{
945 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
946 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
947 dma_unmap_single(eth->dev,
948 dma_unmap_addr(tx_buf, dma_addr0),
949 dma_unmap_len(tx_buf, dma_len0),
950 DMA_TO_DEVICE);
951 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
952 dma_unmap_page(eth->dev,
953 dma_unmap_addr(tx_buf, dma_addr0),
954 dma_unmap_len(tx_buf, dma_len0),
955 DMA_TO_DEVICE);
956 }
957 } else {
958 if (dma_unmap_len(tx_buf, dma_len0)) {
959 dma_unmap_page(eth->dev,
960 dma_unmap_addr(tx_buf, dma_addr0),
961 dma_unmap_len(tx_buf, dma_len0),
962 DMA_TO_DEVICE);
963 }
964
965 if (dma_unmap_len(tx_buf, dma_len1)) {
966 dma_unmap_page(eth->dev,
967 dma_unmap_addr(tx_buf, dma_addr1),
968 dma_unmap_len(tx_buf, dma_len1),
969 DMA_TO_DEVICE);
970 }
971 }
972
973 tx_buf->flags = 0;
974 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +0800975 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
976 if (napi)
977 napi_consume_skb(tx_buf->skb, napi);
978 else
979 dev_kfree_skb_any(tx_buf->skb);
980 }
developerfd40db22021-04-29 10:08:25 +0800981 tx_buf->skb = NULL;
982}
983
984static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
985 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
986 size_t size, int idx)
987{
988 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
989 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
990 dma_unmap_len_set(tx_buf, dma_len0, size);
991 } else {
992 if (idx & 1) {
993 txd->txd3 = mapped_addr;
994 txd->txd2 |= TX_DMA_PLEN1(size);
995 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
996 dma_unmap_len_set(tx_buf, dma_len1, size);
997 } else {
998 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
999 txd->txd1 = mapped_addr;
1000 txd->txd2 = TX_DMA_PLEN0(size);
1001 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1002 dma_unmap_len_set(tx_buf, dma_len0, size);
1003 }
1004 }
1005}
1006
developere9356982022-07-04 09:03:20 +08001007static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
1008 struct mtk_tx_dma_desc_info *info)
1009{
1010 struct mtk_mac *mac = netdev_priv(dev);
1011 struct mtk_eth *eth = mac->hw;
1012 struct mtk_tx_dma *desc = txd;
1013 u32 data;
1014
1015 WRITE_ONCE(desc->txd1, info->addr);
1016
1017 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1018 if (info->last)
1019 data |= TX_DMA_LS0;
1020 WRITE_ONCE(desc->txd3, data);
1021
1022 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1023 data |= QID_HIGH_BITS(info->qid);
1024 if (info->first) {
1025 if (info->gso)
1026 data |= TX_DMA_TSO;
1027 /* tx checksum offload */
1028 if (info->csum)
1029 data |= TX_DMA_CHKSUM;
1030 /* vlan header offload */
1031 if (info->vlan)
1032 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1033 }
1034
1035#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1036 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1037 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1038 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1039 }
1040
1041 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1042 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1043#endif
1044 WRITE_ONCE(desc->txd4, data);
1045}
1046
1047static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1048 struct mtk_tx_dma_desc_info *info)
1049{
1050 struct mtk_mac *mac = netdev_priv(dev);
1051 struct mtk_eth *eth = mac->hw;
1052 struct mtk_tx_dma_v2 *desc = txd;
1053 u32 data = 0;
developere9356982022-07-04 09:03:20 +08001054
1055 if(!info->qid && mac->id)
developerb9463012022-09-14 10:28:45 +08001056 info->qid = MTK_QDMA_GMAC2_QID;
developere9356982022-07-04 09:03:20 +08001057
1058 WRITE_ONCE(desc->txd1, info->addr);
1059
1060 data = TX_DMA_PLEN0(info->size);
1061 if (info->last)
1062 data |= TX_DMA_LS0;
1063 WRITE_ONCE(desc->txd3, data);
1064
1065 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
developerb9463012022-09-14 10:28:45 +08001066 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
developere9356982022-07-04 09:03:20 +08001067#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1068 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1069 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1070 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1071 }
1072
1073 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1074 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1075#endif
1076 WRITE_ONCE(desc->txd4, data);
1077
1078 data = 0;
1079 if (info->first) {
1080 if (info->gso)
1081 data |= TX_DMA_TSO_V2;
1082 /* tx checksum offload */
1083 if (info->csum)
1084 data |= TX_DMA_CHKSUM_V2;
1085 }
1086 WRITE_ONCE(desc->txd5, data);
1087
1088 data = 0;
1089 if (info->first && info->vlan)
1090 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1091 WRITE_ONCE(desc->txd6, data);
1092
1093 WRITE_ONCE(desc->txd7, 0);
1094 WRITE_ONCE(desc->txd8, 0);
1095}
1096
1097static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
1098 struct mtk_tx_dma_desc_info *info)
1099{
1100 struct mtk_mac *mac = netdev_priv(dev);
1101 struct mtk_eth *eth = mac->hw;
1102
1103 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1104 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
1105 else
1106 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
1107}
1108
developerfd40db22021-04-29 10:08:25 +08001109static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1110 int tx_num, struct mtk_tx_ring *ring, bool gso)
1111{
developere9356982022-07-04 09:03:20 +08001112 struct mtk_tx_dma_desc_info txd_info = {
1113 .size = skb_headlen(skb),
1114 .qid = skb->mark & MTK_QDMA_TX_MASK,
1115 .gso = gso,
1116 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1117 .vlan = skb_vlan_tag_present(skb),
1118 .vlan_tci = skb_vlan_tag_get(skb),
1119 .first = true,
1120 .last = !skb_is_nonlinear(skb),
1121 };
developerfd40db22021-04-29 10:08:25 +08001122 struct mtk_mac *mac = netdev_priv(dev);
1123 struct mtk_eth *eth = mac->hw;
developere9356982022-07-04 09:03:20 +08001124 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001125 struct mtk_tx_dma *itxd, *txd;
1126 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1127 struct mtk_tx_buf *itx_buf, *tx_buf;
developerfd40db22021-04-29 10:08:25 +08001128 int i, n_desc = 1;
developerfd40db22021-04-29 10:08:25 +08001129 int k = 0;
1130
1131 itxd = ring->next_free;
1132 itxd_pdma = qdma_to_pdma(ring, itxd);
1133 if (itxd == ring->last_free)
1134 return -ENOMEM;
1135
developere9356982022-07-04 09:03:20 +08001136 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001137 memset(itx_buf, 0, sizeof(*itx_buf));
1138
developere9356982022-07-04 09:03:20 +08001139 txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
1140 DMA_TO_DEVICE);
1141 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
developerfd40db22021-04-29 10:08:25 +08001142 return -ENOMEM;
1143
developere9356982022-07-04 09:03:20 +08001144 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
1145
developerfd40db22021-04-29 10:08:25 +08001146 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1147 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1148 MTK_TX_FLAGS_FPORT1;
developere9356982022-07-04 09:03:20 +08001149 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerfd40db22021-04-29 10:08:25 +08001150 k++);
1151
developerfd40db22021-04-29 10:08:25 +08001152 /* TX SG offload */
1153 txd = itxd;
1154 txd_pdma = qdma_to_pdma(ring, txd);
1155
developere9356982022-07-04 09:03:20 +08001156 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerfd40db22021-04-29 10:08:25 +08001157 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1158 unsigned int offset = 0;
1159 int frag_size = skb_frag_size(frag);
1160
1161 while (frag_size) {
developerfd40db22021-04-29 10:08:25 +08001162 bool new_desc = true;
1163
developere9356982022-07-04 09:03:20 +08001164 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerfd40db22021-04-29 10:08:25 +08001165 (i & 0x1)) {
1166 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1167 txd_pdma = qdma_to_pdma(ring, txd);
1168 if (txd == ring->last_free)
1169 goto err_dma;
1170
1171 n_desc++;
1172 } else {
1173 new_desc = false;
1174 }
1175
developere9356982022-07-04 09:03:20 +08001176 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1177 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1178 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1179 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1180 !(frag_size - txd_info.size);
1181 txd_info.addr = skb_frag_dma_map(eth->dev, frag,
1182 offset, txd_info.size,
1183 DMA_TO_DEVICE);
1184 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
1185 goto err_dma;
developerfd40db22021-04-29 10:08:25 +08001186
developere9356982022-07-04 09:03:20 +08001187 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerfd40db22021-04-29 10:08:25 +08001188
developere9356982022-07-04 09:03:20 +08001189 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001190 if (new_desc)
1191 memset(tx_buf, 0, sizeof(*tx_buf));
1192 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1193 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1194 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1195 MTK_TX_FLAGS_FPORT1;
1196
developere9356982022-07-04 09:03:20 +08001197 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1198 txd_info.size, k++);
developerfd40db22021-04-29 10:08:25 +08001199
developere9356982022-07-04 09:03:20 +08001200 frag_size -= txd_info.size;
1201 offset += txd_info.size;
developerfd40db22021-04-29 10:08:25 +08001202 }
1203 }
1204
1205 /* store skb to cleanup */
1206 itx_buf->skb = skb;
1207
developere9356982022-07-04 09:03:20 +08001208 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001209 if (k & 0x1)
1210 txd_pdma->txd2 |= TX_DMA_LS0;
1211 else
1212 txd_pdma->txd2 |= TX_DMA_LS1;
1213 }
1214
1215 netdev_sent_queue(dev, skb->len);
1216 skb_tx_timestamp(skb);
1217
1218 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1219 atomic_sub(n_desc, &ring->free_count);
1220
1221 /* make sure that all changes to the dma ring are flushed before we
1222 * continue
1223 */
1224 wmb();
1225
developere9356982022-07-04 09:03:20 +08001226 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001227 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1228 !netdev_xmit_more())
1229 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1230 } else {
developere9356982022-07-04 09:03:20 +08001231 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
developerfd40db22021-04-29 10:08:25 +08001232 ring->dma_size);
1233 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1234 }
1235
1236 return 0;
1237
1238err_dma:
1239 do {
developere9356982022-07-04 09:03:20 +08001240 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001241
1242 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08001243 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08001244
1245 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
developere9356982022-07-04 09:03:20 +08001246 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
developerfd40db22021-04-29 10:08:25 +08001247 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1248
1249 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1250 itxd_pdma = qdma_to_pdma(ring, itxd);
1251 } while (itxd != txd);
1252
1253 return -ENOMEM;
1254}
1255
1256static inline int mtk_cal_txd_req(struct sk_buff *skb)
1257{
1258 int i, nfrags;
1259 skb_frag_t *frag;
1260
1261 nfrags = 1;
1262 if (skb_is_gso(skb)) {
1263 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1264 frag = &skb_shinfo(skb)->frags[i];
1265 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1266 MTK_TX_DMA_BUF_LEN);
1267 }
1268 } else {
1269 nfrags += skb_shinfo(skb)->nr_frags;
1270 }
1271
1272 return nfrags;
1273}
1274
1275static int mtk_queue_stopped(struct mtk_eth *eth)
1276{
1277 int i;
1278
1279 for (i = 0; i < MTK_MAC_COUNT; i++) {
1280 if (!eth->netdev[i])
1281 continue;
1282 if (netif_queue_stopped(eth->netdev[i]))
1283 return 1;
1284 }
1285
1286 return 0;
1287}
1288
1289static void mtk_wake_queue(struct mtk_eth *eth)
1290{
1291 int i;
1292
1293 for (i = 0; i < MTK_MAC_COUNT; i++) {
1294 if (!eth->netdev[i])
1295 continue;
1296 netif_wake_queue(eth->netdev[i]);
1297 }
1298}
1299
1300static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1301{
1302 struct mtk_mac *mac = netdev_priv(dev);
1303 struct mtk_eth *eth = mac->hw;
1304 struct mtk_tx_ring *ring = &eth->tx_ring;
1305 struct net_device_stats *stats = &dev->stats;
1306 bool gso = false;
1307 int tx_num;
1308
1309 /* normally we can rely on the stack not calling this more than once,
1310 * however we have 2 queues running on the same ring so we need to lock
1311 * the ring access
1312 */
1313 spin_lock(&eth->page_lock);
1314
1315 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1316 goto drop;
1317
1318 tx_num = mtk_cal_txd_req(skb);
1319 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1320 netif_stop_queue(dev);
1321 netif_err(eth, tx_queued, dev,
1322 "Tx Ring full when queue awake!\n");
1323 spin_unlock(&eth->page_lock);
1324 return NETDEV_TX_BUSY;
1325 }
1326
1327 /* TSO: fill MSS info in tcp checksum field */
1328 if (skb_is_gso(skb)) {
1329 if (skb_cow_head(skb, 0)) {
1330 netif_warn(eth, tx_err, dev,
1331 "GSO expand head fail.\n");
1332 goto drop;
1333 }
1334
1335 if (skb_shinfo(skb)->gso_type &
1336 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1337 gso = true;
1338 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1339 }
1340 }
1341
1342 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1343 goto drop;
1344
1345 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1346 netif_stop_queue(dev);
1347
1348 spin_unlock(&eth->page_lock);
1349
1350 return NETDEV_TX_OK;
1351
1352drop:
1353 spin_unlock(&eth->page_lock);
1354 stats->tx_dropped++;
1355 dev_kfree_skb_any(skb);
1356 return NETDEV_TX_OK;
1357}
1358
1359static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1360{
1361 int i;
1362 struct mtk_rx_ring *ring;
1363 int idx;
1364
developerfd40db22021-04-29 10:08:25 +08001365 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
developere9356982022-07-04 09:03:20 +08001366 struct mtk_rx_dma *rxd;
1367
developer77d03a72021-06-06 00:06:00 +08001368 if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i))
1369 continue;
1370
developerfd40db22021-04-29 10:08:25 +08001371 ring = &eth->rx_ring[i];
1372 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001373 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1374 if (rxd->rxd2 & RX_DMA_DONE) {
developerfd40db22021-04-29 10:08:25 +08001375 ring->calc_idx_update = true;
1376 return ring;
1377 }
1378 }
1379
1380 return NULL;
1381}
1382
developer18f46a82021-07-20 21:08:21 +08001383static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08001384{
developerfd40db22021-04-29 10:08:25 +08001385 int i;
1386
developerfb556ca2021-10-13 10:52:09 +08001387 if (!eth->hwlro)
developerfd40db22021-04-29 10:08:25 +08001388 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerfb556ca2021-10-13 10:52:09 +08001389 else {
developerfd40db22021-04-29 10:08:25 +08001390 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1391 ring = &eth->rx_ring[i];
1392 if (ring->calc_idx_update) {
1393 ring->calc_idx_update = false;
1394 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1395 }
1396 }
1397 }
1398}
1399
1400static int mtk_poll_rx(struct napi_struct *napi, int budget,
1401 struct mtk_eth *eth)
1402{
developer18f46a82021-07-20 21:08:21 +08001403 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1404 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001405 int idx;
1406 struct sk_buff *skb;
1407 u8 *data, *new_data;
developere9356982022-07-04 09:03:20 +08001408 struct mtk_rx_dma_v2 *rxd, trxd;
developerfd40db22021-04-29 10:08:25 +08001409 int done = 0;
1410
developer18f46a82021-07-20 21:08:21 +08001411 if (unlikely(!ring))
1412 goto rx_done;
1413
developerfd40db22021-04-29 10:08:25 +08001414 while (done < budget) {
1415 struct net_device *netdev;
1416 unsigned int pktlen;
1417 dma_addr_t dma_addr;
developere9356982022-07-04 09:03:20 +08001418 int mac = 0;
developerfd40db22021-04-29 10:08:25 +08001419
developer18f46a82021-07-20 21:08:21 +08001420 if (eth->hwlro)
1421 ring = mtk_get_rx_ring(eth);
1422
developerfd40db22021-04-29 10:08:25 +08001423 if (unlikely(!ring))
1424 goto rx_done;
1425
1426 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001427 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
developerfd40db22021-04-29 10:08:25 +08001428 data = ring->data[idx];
1429
developere9356982022-07-04 09:03:20 +08001430 if (!mtk_rx_get_desc(eth, &trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08001431 break;
1432
1433 /* find out which mac the packet come from. values start at 1 */
1434 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1435 mac = 0;
1436 } else {
developera2bdbd52021-05-31 19:10:17 +08001437 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developere9356982022-07-04 09:03:20 +08001438 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
developerfd40db22021-04-29 10:08:25 +08001439 else
developerfd40db22021-04-29 10:08:25 +08001440 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
1441 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1442 }
1443
1444 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1445 !eth->netdev[mac]))
1446 goto release_desc;
1447
1448 netdev = eth->netdev[mac];
1449
1450 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1451 goto release_desc;
1452
1453 /* alloc new buffer */
1454 new_data = napi_alloc_frag(ring->frag_size);
1455 if (unlikely(!new_data)) {
1456 netdev->stats.rx_dropped++;
1457 goto release_desc;
1458 }
1459 dma_addr = dma_map_single(eth->dev,
1460 new_data + NET_SKB_PAD +
1461 eth->ip_align,
1462 ring->buf_size,
1463 DMA_FROM_DEVICE);
1464 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1465 skb_free_frag(new_data);
1466 netdev->stats.rx_dropped++;
1467 goto release_desc;
1468 }
1469
developerc4671b22021-05-28 13:16:42 +08001470 dma_unmap_single(eth->dev, trxd.rxd1,
1471 ring->buf_size, DMA_FROM_DEVICE);
1472
developerfd40db22021-04-29 10:08:25 +08001473 /* receive data */
1474 skb = build_skb(data, ring->frag_size);
1475 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08001476 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08001477 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08001478 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08001479 }
1480 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1481
developerfd40db22021-04-29 10:08:25 +08001482 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1483 skb->dev = netdev;
1484 skb_put(skb, pktlen);
1485
developera2bdbd52021-05-31 19:10:17 +08001486 if ((!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
developerfd40db22021-04-29 10:08:25 +08001487 (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
developera2bdbd52021-05-31 19:10:17 +08001488 (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
developerfd40db22021-04-29 10:08:25 +08001489 (trxd.rxd3 & eth->rx_dma_l4_valid)))
1490 skb->ip_summed = CHECKSUM_UNNECESSARY;
1491 else
1492 skb_checksum_none_assert(skb);
1493 skb->protocol = eth_type_trans(skb, netdev);
1494
1495 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developera2bdbd52021-05-31 19:10:17 +08001496 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developer255bba22021-07-27 15:16:33 +08001497 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08001498 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08001499 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08001500 RX_DMA_VID_V2(trxd.rxd4));
1501 } else {
1502 if (trxd.rxd2 & RX_DMA_VTAG)
1503 __vlan_hwaccel_put_tag(skb,
1504 htons(RX_DMA_VPID(trxd.rxd3)),
1505 RX_DMA_VID(trxd.rxd3));
1506 }
1507
1508 /* If netdev is attached to dsa switch, the special
1509 * tag inserted in VLAN field by switch hardware can
1510 * be offload by RX HW VLAN offload. Clears the VLAN
1511 * information from @skb to avoid unexpected 8021d
1512 * handler before packet enter dsa framework.
1513 */
1514 if (netdev_uses_dsa(netdev))
1515 __vlan_hwaccel_clear_tag(skb);
1516 }
1517
1518#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developera2bdbd52021-05-31 19:10:17 +08001519 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08001520 *(u32 *)(skb->head) = trxd.rxd5;
1521 else
developerfd40db22021-04-29 10:08:25 +08001522 *(u32 *)(skb->head) = trxd.rxd4;
1523
1524 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001525 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001526 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1527
1528 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
1529 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
1530 __func__, skb_hnat_reason(skb));
1531 skb->pkt_type = PACKET_HOST;
1532 }
1533
1534 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
1535 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
1536 skb_hnat_reason(skb), skb_hnat_alg(skb));
1537#endif
developer77d03a72021-06-06 00:06:00 +08001538 if (mtk_hwlro_stats_ebl &&
1539 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
1540 hw_lro_stats_update(ring->ring_no, &trxd);
1541 hw_lro_flush_stats_update(ring->ring_no, &trxd);
1542 }
developerfd40db22021-04-29 10:08:25 +08001543
1544 skb_record_rx_queue(skb, 0);
1545 napi_gro_receive(napi, skb);
1546
developerc4671b22021-05-28 13:16:42 +08001547skip_rx:
developerfd40db22021-04-29 10:08:25 +08001548 ring->data[idx] = new_data;
1549 rxd->rxd1 = (unsigned int)dma_addr;
1550
1551release_desc:
1552 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1553 rxd->rxd2 = RX_DMA_LSO;
1554 else
1555 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1556
1557 ring->calc_idx = idx;
1558
1559 done++;
1560 }
1561
1562rx_done:
1563 if (done) {
1564 /* make sure that all changes to the dma ring are flushed before
1565 * we continue
1566 */
1567 wmb();
developer18f46a82021-07-20 21:08:21 +08001568 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08001569 }
1570
1571 return done;
1572}
1573
developerfb556ca2021-10-13 10:52:09 +08001574static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001575 unsigned int *done, unsigned int *bytes)
1576{
developere9356982022-07-04 09:03:20 +08001577 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001578 struct mtk_tx_ring *ring = &eth->tx_ring;
1579 struct mtk_tx_dma *desc;
1580 struct sk_buff *skb;
1581 struct mtk_tx_buf *tx_buf;
1582 u32 cpu, dma;
1583
developerc4671b22021-05-28 13:16:42 +08001584 cpu = ring->last_free_ptr;
developerfd40db22021-04-29 10:08:25 +08001585 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1586
1587 desc = mtk_qdma_phys_to_virt(ring, cpu);
1588
1589 while ((cpu != dma) && budget) {
1590 u32 next_cpu = desc->txd2;
1591 int mac = 0;
1592
1593 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1594 break;
1595
1596 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1597
developere9356982022-07-04 09:03:20 +08001598 tx_buf = mtk_desc_to_tx_buf(ring, desc, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001599 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1600 mac = 1;
1601
1602 skb = tx_buf->skb;
1603 if (!skb)
1604 break;
1605
1606 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1607 bytes[mac] += skb->len;
1608 done[mac]++;
1609 budget--;
1610 }
developerc4671b22021-05-28 13:16:42 +08001611 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001612
1613 ring->last_free = desc;
1614 atomic_inc(&ring->free_count);
1615
1616 cpu = next_cpu;
1617 }
1618
developerc4671b22021-05-28 13:16:42 +08001619 ring->last_free_ptr = cpu;
developerfd40db22021-04-29 10:08:25 +08001620 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001621}
1622
developerfb556ca2021-10-13 10:52:09 +08001623static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001624 unsigned int *done, unsigned int *bytes)
1625{
1626 struct mtk_tx_ring *ring = &eth->tx_ring;
1627 struct mtk_tx_dma *desc;
1628 struct sk_buff *skb;
1629 struct mtk_tx_buf *tx_buf;
1630 u32 cpu, dma;
1631
1632 cpu = ring->cpu_idx;
1633 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1634
1635 while ((cpu != dma) && budget) {
1636 tx_buf = &ring->buf[cpu];
1637 skb = tx_buf->skb;
1638 if (!skb)
1639 break;
1640
1641 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1642 bytes[0] += skb->len;
1643 done[0]++;
1644 budget--;
1645 }
1646
developerc4671b22021-05-28 13:16:42 +08001647 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001648
developere9356982022-07-04 09:03:20 +08001649 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001650 ring->last_free = desc;
1651 atomic_inc(&ring->free_count);
1652
1653 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1654 }
1655
1656 ring->cpu_idx = cpu;
developerfd40db22021-04-29 10:08:25 +08001657}
1658
1659static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1660{
1661 struct mtk_tx_ring *ring = &eth->tx_ring;
1662 unsigned int done[MTK_MAX_DEVS];
1663 unsigned int bytes[MTK_MAX_DEVS];
1664 int total = 0, i;
1665
1666 memset(done, 0, sizeof(done));
1667 memset(bytes, 0, sizeof(bytes));
1668
1669 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developerfb556ca2021-10-13 10:52:09 +08001670 mtk_poll_tx_qdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001671 else
developerfb556ca2021-10-13 10:52:09 +08001672 mtk_poll_tx_pdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001673
1674 for (i = 0; i < MTK_MAC_COUNT; i++) {
1675 if (!eth->netdev[i] || !done[i])
1676 continue;
1677 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1678 total += done[i];
1679 }
1680
1681 if (mtk_queue_stopped(eth) &&
1682 (atomic_read(&ring->free_count) > ring->thresh))
1683 mtk_wake_queue(eth);
1684
1685 return total;
1686}
1687
1688static void mtk_handle_status_irq(struct mtk_eth *eth)
1689{
developer8051e042022-04-08 13:26:36 +08001690 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001691
1692 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1693 mtk_stats_update(eth);
1694 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer8051e042022-04-08 13:26:36 +08001695 MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001696 }
1697}
1698
1699static int mtk_napi_tx(struct napi_struct *napi, int budget)
1700{
1701 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1702 u32 status, mask;
1703 int tx_done = 0;
1704
1705 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1706 mtk_handle_status_irq(eth);
1707 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1708 tx_done = mtk_poll_tx(eth, budget);
1709
1710 if (unlikely(netif_msg_intr(eth))) {
1711 status = mtk_r32(eth, eth->tx_int_status_reg);
1712 mask = mtk_r32(eth, eth->tx_int_mask_reg);
1713 dev_info(eth->dev,
1714 "done tx %d, intr 0x%08x/0x%x\n",
1715 tx_done, status, mask);
1716 }
1717
1718 if (tx_done == budget)
1719 return budget;
1720
1721 status = mtk_r32(eth, eth->tx_int_status_reg);
1722 if (status & MTK_TX_DONE_INT)
1723 return budget;
1724
developerc4671b22021-05-28 13:16:42 +08001725 if (napi_complete(napi))
1726 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developerfd40db22021-04-29 10:08:25 +08001727
1728 return tx_done;
1729}
1730
1731static int mtk_napi_rx(struct napi_struct *napi, int budget)
1732{
developer18f46a82021-07-20 21:08:21 +08001733 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1734 struct mtk_eth *eth = rx_napi->eth;
1735 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001736 u32 status, mask;
1737 int rx_done = 0;
1738 int remain_budget = budget;
1739
1740 mtk_handle_status_irq(eth);
1741
1742poll_again:
developer18f46a82021-07-20 21:08:21 +08001743 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), MTK_PDMA_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001744 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1745
1746 if (unlikely(netif_msg_intr(eth))) {
1747 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1748 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1749 dev_info(eth->dev,
1750 "done rx %d, intr 0x%08x/0x%x\n",
1751 rx_done, status, mask);
1752 }
1753 if (rx_done == remain_budget)
1754 return budget;
1755
1756 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
developer18f46a82021-07-20 21:08:21 +08001757 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08001758 remain_budget -= rx_done;
1759 goto poll_again;
1760 }
developerc4671b22021-05-28 13:16:42 +08001761
1762 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08001763 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08001764
1765 return rx_done + budget - remain_budget;
1766}
1767
1768static int mtk_tx_alloc(struct mtk_eth *eth)
1769{
developere9356982022-07-04 09:03:20 +08001770 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001771 struct mtk_tx_ring *ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08001772 int i, sz = soc->txrx.txd_size;
1773 struct mtk_tx_dma_v2 *txd, *pdma_txd;
developerfd40db22021-04-29 10:08:25 +08001774
1775 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1776 GFP_KERNEL);
1777 if (!ring->buf)
1778 goto no_tx_mem;
1779
1780 if (!eth->soc->has_sram)
1781 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08001782 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001783 else {
developere9356982022-07-04 09:03:20 +08001784 ring->dma = eth->scratch_ring + MTK_DMA_SIZE * sz;
developerfd40db22021-04-29 10:08:25 +08001785 ring->phys = eth->phy_scratch_ring + MTK_DMA_SIZE * sz;
1786 }
1787
1788 if (!ring->dma)
1789 goto no_tx_mem;
1790
1791 for (i = 0; i < MTK_DMA_SIZE; i++) {
1792 int next = (i + 1) % MTK_DMA_SIZE;
1793 u32 next_ptr = ring->phys + next * sz;
1794
developere9356982022-07-04 09:03:20 +08001795 txd = ring->dma + i * sz;
1796 txd->txd2 = next_ptr;
1797 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1798 txd->txd4 = 0;
1799
1800 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1801 txd->txd5 = 0;
1802 txd->txd6 = 0;
1803 txd->txd7 = 0;
1804 txd->txd8 = 0;
1805 }
developerfd40db22021-04-29 10:08:25 +08001806 }
1807
1808 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1809 * only as the framework. The real HW descriptors are the PDMA
1810 * descriptors in ring->dma_pdma.
1811 */
1812 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1813 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08001814 &ring->phys_pdma, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001815 if (!ring->dma_pdma)
1816 goto no_tx_mem;
1817
1818 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +08001819 pdma_txd = ring->dma_pdma + i *sz;
1820
1821 pdma_txd->txd2 = TX_DMA_DESP2_DEF;
1822 pdma_txd->txd4 = 0;
developerfd40db22021-04-29 10:08:25 +08001823 }
1824 }
1825
1826 ring->dma_size = MTK_DMA_SIZE;
1827 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
developere9356982022-07-04 09:03:20 +08001828 ring->next_free = ring->dma;
1829 ring->last_free = (void *)txd;
developerc4671b22021-05-28 13:16:42 +08001830 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08001831 ring->thresh = MAX_SKB_FRAGS;
1832
1833 /* make sure that all changes to the dma ring are flushed before we
1834 * continue
1835 */
1836 wmb();
1837
1838 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1839 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1840 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1841 mtk_w32(eth,
1842 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1843 MTK_QTX_CRX_PTR);
developerc4671b22021-05-28 13:16:42 +08001844 mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001845 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1846 MTK_QTX_CFG(0));
1847 } else {
1848 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1849 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1850 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1851 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1852 }
1853
1854 return 0;
1855
1856no_tx_mem:
1857 return -ENOMEM;
1858}
1859
1860static void mtk_tx_clean(struct mtk_eth *eth)
1861{
developere9356982022-07-04 09:03:20 +08001862 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001863 struct mtk_tx_ring *ring = &eth->tx_ring;
1864 int i;
1865
1866 if (ring->buf) {
1867 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08001868 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08001869 kfree(ring->buf);
1870 ring->buf = NULL;
1871 }
1872
1873 if (!eth->soc->has_sram && ring->dma) {
1874 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001875 MTK_DMA_SIZE * soc->txrx.txd_size,
1876 ring->dma, ring->phys);
developerfd40db22021-04-29 10:08:25 +08001877 ring->dma = NULL;
1878 }
1879
1880 if (ring->dma_pdma) {
1881 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001882 MTK_DMA_SIZE * soc->txrx.txd_size,
1883 ring->dma_pdma, ring->phys_pdma);
developerfd40db22021-04-29 10:08:25 +08001884 ring->dma_pdma = NULL;
1885 }
1886}
1887
1888static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1889{
1890 struct mtk_rx_ring *ring;
1891 int rx_data_len, rx_dma_size;
1892 int i;
1893
1894 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1895 if (ring_no)
1896 return -EINVAL;
1897 ring = &eth->rx_ring_qdma;
1898 } else {
1899 ring = &eth->rx_ring[ring_no];
1900 }
1901
1902 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1903 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1904 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1905 } else {
1906 rx_data_len = ETH_DATA_LEN;
1907 rx_dma_size = MTK_DMA_SIZE;
1908 }
1909
1910 ring->frag_size = mtk_max_frag_size(rx_data_len);
1911 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1912 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1913 GFP_KERNEL);
1914 if (!ring->data)
1915 return -ENOMEM;
1916
1917 for (i = 0; i < rx_dma_size; i++) {
1918 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1919 if (!ring->data[i])
1920 return -ENOMEM;
1921 }
1922
1923 if ((!eth->soc->has_sram) || (eth->soc->has_sram
1924 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
1925 ring->dma = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001926 rx_dma_size * eth->soc->txrx.rxd_size,
1927 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001928 else {
1929 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08001930 ring->dma = tx_ring->dma + MTK_DMA_SIZE *
1931 eth->soc->txrx.rxd_size * (ring_no + 1);
developer18f46a82021-07-20 21:08:21 +08001932 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
developere9356982022-07-04 09:03:20 +08001933 eth->soc->txrx.rxd_size * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08001934 }
1935
1936 if (!ring->dma)
1937 return -ENOMEM;
1938
1939 for (i = 0; i < rx_dma_size; i++) {
developere9356982022-07-04 09:03:20 +08001940 struct mtk_rx_dma_v2 *rxd;
1941
developerfd40db22021-04-29 10:08:25 +08001942 dma_addr_t dma_addr = dma_map_single(eth->dev,
1943 ring->data[i] + NET_SKB_PAD + eth->ip_align,
1944 ring->buf_size,
1945 DMA_FROM_DEVICE);
1946 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1947 return -ENOMEM;
developere9356982022-07-04 09:03:20 +08001948
1949 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
1950 rxd->rxd1 = (unsigned int)dma_addr;
developerfd40db22021-04-29 10:08:25 +08001951
1952 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developere9356982022-07-04 09:03:20 +08001953 rxd->rxd2 = RX_DMA_LSO;
developerfd40db22021-04-29 10:08:25 +08001954 else
developere9356982022-07-04 09:03:20 +08001955 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
developerfd40db22021-04-29 10:08:25 +08001956
developere9356982022-07-04 09:03:20 +08001957 rxd->rxd3 = 0;
1958 rxd->rxd4 = 0;
1959
1960 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1961 rxd->rxd5 = 0;
1962 rxd->rxd6 = 0;
1963 rxd->rxd7 = 0;
1964 rxd->rxd8 = 0;
developerfd40db22021-04-29 10:08:25 +08001965 }
developerfd40db22021-04-29 10:08:25 +08001966 }
1967 ring->dma_size = rx_dma_size;
1968 ring->calc_idx_update = false;
1969 ring->calc_idx = rx_dma_size - 1;
1970 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
1971 MTK_QRX_CRX_IDX_CFG(ring_no) :
1972 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08001973 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08001974 /* make sure that all changes to the dma ring are flushed before we
1975 * continue
1976 */
1977 wmb();
1978
1979 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1980 mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
1981 mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
1982 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1983 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
1984 } else {
1985 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
1986 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
1987 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1988 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
1989 }
1990
1991 return 0;
1992}
1993
1994static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
1995{
1996 int i;
1997
1998 if (ring->data && ring->dma) {
1999 for (i = 0; i < ring->dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002000 struct mtk_rx_dma *rxd;
2001
developerfd40db22021-04-29 10:08:25 +08002002 if (!ring->data[i])
2003 continue;
developere9356982022-07-04 09:03:20 +08002004
2005 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2006 if (!rxd->rxd1)
developerfd40db22021-04-29 10:08:25 +08002007 continue;
developere9356982022-07-04 09:03:20 +08002008
developerfd40db22021-04-29 10:08:25 +08002009 dma_unmap_single(eth->dev,
developere9356982022-07-04 09:03:20 +08002010 rxd->rxd1,
developerfd40db22021-04-29 10:08:25 +08002011 ring->buf_size,
2012 DMA_FROM_DEVICE);
2013 skb_free_frag(ring->data[i]);
2014 }
2015 kfree(ring->data);
2016 ring->data = NULL;
2017 }
2018
2019 if(in_sram)
2020 return;
2021
2022 if (ring->dma) {
2023 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002024 ring->dma_size * eth->soc->txrx.rxd_size,
developerfd40db22021-04-29 10:08:25 +08002025 ring->dma,
2026 ring->phys);
2027 ring->dma = NULL;
2028 }
2029}
2030
2031static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2032{
2033 int i;
developer77d03a72021-06-06 00:06:00 +08002034 u32 val;
developerfd40db22021-04-29 10:08:25 +08002035 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2036 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2037
2038 /* set LRO rings to auto-learn modes */
2039 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2040
2041 /* validate LRO ring */
2042 ring_ctrl_dw2 |= MTK_RING_VLD;
2043
2044 /* set AGE timer (unit: 20us) */
2045 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2046 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2047
2048 /* set max AGG timer (unit: 20us) */
2049 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2050
2051 /* set max LRO AGG count */
2052 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2053 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2054
developer77d03a72021-06-06 00:06:00 +08002055 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002056 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2057 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2058 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2059 }
2060
2061 /* IPv4 checksum update enable */
2062 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2063
2064 /* switch priority comparison to packet count mode */
2065 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2066
2067 /* bandwidth threshold setting */
2068 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2069
2070 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08002071 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08002072
2073 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2074 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2075 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2076
developerfd40db22021-04-29 10:08:25 +08002077 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2078 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2079
developer77d03a72021-06-06 00:06:00 +08002080 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2081 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
2082 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
2083 MTK_PDMA_RX_CFG);
2084
2085 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
2086 } else {
2087 /* set HW LRO mode & the max aggregation count for rx packets */
2088 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2089 }
2090
developerfd40db22021-04-29 10:08:25 +08002091 /* enable HW LRO */
2092 lro_ctrl_dw0 |= MTK_LRO_EN;
2093
developer77d03a72021-06-06 00:06:00 +08002094 /* enable cpu reason black list */
2095 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
2096
developerfd40db22021-04-29 10:08:25 +08002097 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2098 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2099
developer77d03a72021-06-06 00:06:00 +08002100 /* no use PPE cpu reason */
2101 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
2102
developerfd40db22021-04-29 10:08:25 +08002103 return 0;
2104}
2105
2106static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2107{
2108 int i;
2109 u32 val;
2110
2111 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08002112 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08002113
2114 /* wait for relinquishments done */
2115 for (i = 0; i < 10; i++) {
2116 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08002117 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer8051e042022-04-08 13:26:36 +08002118 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002119 continue;
2120 }
2121 break;
2122 }
2123
2124 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08002125 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08002126 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2127
2128 /* disable HW LRO */
2129 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2130}
2131
2132static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2133{
2134 u32 reg_val;
2135
developer77d03a72021-06-06 00:06:00 +08002136 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2137 idx += 1;
2138
developerfd40db22021-04-29 10:08:25 +08002139 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2140
2141 /* invalidate the IP setting */
2142 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2143
2144 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2145
2146 /* validate the IP setting */
2147 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2148}
2149
2150static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2151{
2152 u32 reg_val;
2153
developer77d03a72021-06-06 00:06:00 +08002154 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2155 idx += 1;
2156
developerfd40db22021-04-29 10:08:25 +08002157 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2158
2159 /* invalidate the IP setting */
2160 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2161
2162 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2163}
2164
2165static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2166{
2167 int cnt = 0;
2168 int i;
2169
2170 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2171 if (mac->hwlro_ip[i])
2172 cnt++;
2173 }
2174
2175 return cnt;
2176}
2177
2178static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2179 struct ethtool_rxnfc *cmd)
2180{
2181 struct ethtool_rx_flow_spec *fsp =
2182 (struct ethtool_rx_flow_spec *)&cmd->fs;
2183 struct mtk_mac *mac = netdev_priv(dev);
2184 struct mtk_eth *eth = mac->hw;
2185 int hwlro_idx;
2186
2187 if ((fsp->flow_type != TCP_V4_FLOW) ||
2188 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2189 (fsp->location > 1))
2190 return -EINVAL;
2191
2192 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2193 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2194
2195 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2196
2197 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2198
2199 return 0;
2200}
2201
2202static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2203 struct ethtool_rxnfc *cmd)
2204{
2205 struct ethtool_rx_flow_spec *fsp =
2206 (struct ethtool_rx_flow_spec *)&cmd->fs;
2207 struct mtk_mac *mac = netdev_priv(dev);
2208 struct mtk_eth *eth = mac->hw;
2209 int hwlro_idx;
2210
2211 if (fsp->location > 1)
2212 return -EINVAL;
2213
2214 mac->hwlro_ip[fsp->location] = 0;
2215 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2216
2217 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2218
2219 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2220
2221 return 0;
2222}
2223
2224static void mtk_hwlro_netdev_disable(struct net_device *dev)
2225{
2226 struct mtk_mac *mac = netdev_priv(dev);
2227 struct mtk_eth *eth = mac->hw;
2228 int i, hwlro_idx;
2229
2230 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2231 mac->hwlro_ip[i] = 0;
2232 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2233
2234 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2235 }
2236
2237 mac->hwlro_ip_cnt = 0;
2238}
2239
2240static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2241 struct ethtool_rxnfc *cmd)
2242{
2243 struct mtk_mac *mac = netdev_priv(dev);
2244 struct ethtool_rx_flow_spec *fsp =
2245 (struct ethtool_rx_flow_spec *)&cmd->fs;
2246
2247 /* only tcp dst ipv4 is meaningful, others are meaningless */
2248 fsp->flow_type = TCP_V4_FLOW;
2249 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2250 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2251
2252 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2253 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2254 fsp->h_u.tcp_ip4_spec.psrc = 0;
2255 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2256 fsp->h_u.tcp_ip4_spec.pdst = 0;
2257 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2258 fsp->h_u.tcp_ip4_spec.tos = 0;
2259 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2260
2261 return 0;
2262}
2263
2264static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2265 struct ethtool_rxnfc *cmd,
2266 u32 *rule_locs)
2267{
2268 struct mtk_mac *mac = netdev_priv(dev);
2269 int cnt = 0;
2270 int i;
2271
2272 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2273 if (mac->hwlro_ip[i]) {
2274 rule_locs[cnt] = i;
2275 cnt++;
2276 }
2277 }
2278
2279 cmd->rule_cnt = cnt;
2280
2281 return 0;
2282}
2283
developer18f46a82021-07-20 21:08:21 +08002284static int mtk_rss_init(struct mtk_eth *eth)
2285{
2286 u32 val;
2287
2288 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2289 /* Set RSS rings to PSE modes */
2290 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
2291 val |= MTK_RING_PSE_MODE;
2292 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
2293
2294 /* Enable non-lro multiple rx */
2295 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2296 val |= MTK_NON_LRO_MULTI_EN;
2297 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2298
2299 /* Enable RSS dly int supoort */
2300 val |= MTK_LRO_DLY_INT_EN;
2301 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2302
2303 /* Set RSS delay config int ring1 */
2304 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
2305 }
2306
2307 /* Hash Type */
2308 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2309 val |= MTK_RSS_IPV4_STATIC_HASH;
2310 val |= MTK_RSS_IPV6_STATIC_HASH;
2311 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2312
2313 /* Select the size of indirection table */
2314 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW0);
2315 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW1);
2316 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW2);
2317 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW3);
2318 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW4);
2319 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW5);
2320 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW6);
2321 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW7);
2322
2323 /* Pause */
2324 val |= MTK_RSS_CFG_REQ;
2325 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2326
2327 /* Enable RSS*/
2328 val |= MTK_RSS_EN;
2329 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2330
2331 /* Release pause */
2332 val &= ~(MTK_RSS_CFG_REQ);
2333 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2334
2335 /* Set perRSS GRP INT */
2336 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING1), MTK_PDMA_INT_GRP3);
2337
2338 /* Set GRP INT */
2339 mtk_w32(eth, 0x21021030, MTK_FE_INT_GRP);
2340
2341 return 0;
2342}
2343
2344static void mtk_rss_uninit(struct mtk_eth *eth)
2345{
2346 u32 val;
2347
2348 /* Pause */
2349 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2350 val |= MTK_RSS_CFG_REQ;
2351 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2352
2353 /* Disable RSS*/
2354 val &= ~(MTK_RSS_EN);
2355 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2356
2357 /* Release pause */
2358 val &= ~(MTK_RSS_CFG_REQ);
2359 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2360}
2361
developerfd40db22021-04-29 10:08:25 +08002362static netdev_features_t mtk_fix_features(struct net_device *dev,
2363 netdev_features_t features)
2364{
2365 if (!(features & NETIF_F_LRO)) {
2366 struct mtk_mac *mac = netdev_priv(dev);
2367 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2368
2369 if (ip_cnt) {
2370 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2371
2372 features |= NETIF_F_LRO;
2373 }
2374 }
2375
2376 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
2377 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
2378
2379 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2380 }
2381
2382 return features;
2383}
2384
2385static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2386{
2387 struct mtk_mac *mac = netdev_priv(dev);
2388 struct mtk_eth *eth = mac->hw;
2389 int err = 0;
2390
2391 if (!((dev->features ^ features) & MTK_SET_FEATURES))
2392 return 0;
2393
2394 if (!(features & NETIF_F_LRO))
2395 mtk_hwlro_netdev_disable(dev);
2396
2397 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
2398 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
2399 else
2400 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2401
2402 return err;
2403}
2404
2405/* wait for DMA to finish whatever it is doing before we start using it again */
2406static int mtk_dma_busy_wait(struct mtk_eth *eth)
2407{
2408 unsigned long t_start = jiffies;
2409
2410 while (1) {
2411 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2412 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2413 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2414 return 0;
2415 } else {
2416 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2417 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2418 return 0;
2419 }
2420
2421 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2422 break;
2423 }
2424
2425 dev_err(eth->dev, "DMA init timeout\n");
2426 return -1;
2427}
2428
2429static int mtk_dma_init(struct mtk_eth *eth)
2430{
2431 int err;
2432 u32 i;
2433
2434 if (mtk_dma_busy_wait(eth))
2435 return -EBUSY;
2436
2437 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2438 /* QDMA needs scratch memory for internal reordering of the
2439 * descriptors
2440 */
2441 err = mtk_init_fq_dma(eth);
2442 if (err)
2443 return err;
2444 }
2445
2446 err = mtk_tx_alloc(eth);
2447 if (err)
2448 return err;
2449
2450 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2451 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2452 if (err)
2453 return err;
2454 }
2455
2456 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2457 if (err)
2458 return err;
2459
2460 if (eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08002461 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1;
2462 for (; i < MTK_MAX_RX_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002463 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2464 if (err)
2465 return err;
2466 }
2467 err = mtk_hwlro_rx_init(eth);
2468 if (err)
2469 return err;
2470 }
2471
developer18f46a82021-07-20 21:08:21 +08002472 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2473 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2474 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL);
2475 if (err)
2476 return err;
2477 }
2478 err = mtk_rss_init(eth);
2479 if (err)
2480 return err;
2481 }
2482
developerfd40db22021-04-29 10:08:25 +08002483 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2484 /* Enable random early drop and set drop threshold
2485 * automatically
2486 */
2487 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2488 FC_THRES_MIN, MTK_QDMA_FC_THRES);
2489 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2490 }
2491
2492 return 0;
2493}
2494
2495static void mtk_dma_free(struct mtk_eth *eth)
2496{
developere9356982022-07-04 09:03:20 +08002497 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002498 int i;
2499
2500 for (i = 0; i < MTK_MAC_COUNT; i++)
2501 if (eth->netdev[i])
2502 netdev_reset_queue(eth->netdev[i]);
2503 if ( !eth->soc->has_sram && eth->scratch_ring) {
2504 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002505 MTK_DMA_SIZE * soc->txrx.txd_size,
2506 eth->scratch_ring, eth->phy_scratch_ring);
developerfd40db22021-04-29 10:08:25 +08002507 eth->scratch_ring = NULL;
2508 eth->phy_scratch_ring = 0;
2509 }
2510 mtk_tx_clean(eth);
developerb3ce86f2022-06-30 13:31:47 +08002511 mtk_rx_clean(eth, &eth->rx_ring[0],eth->soc->has_sram);
developerfd40db22021-04-29 10:08:25 +08002512 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
2513
2514 if (eth->hwlro) {
2515 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08002516
2517 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1;
2518 for (; i < MTK_MAX_RX_RING_NUM; i++)
2519 mtk_rx_clean(eth, &eth->rx_ring[i], 0);
developerfd40db22021-04-29 10:08:25 +08002520 }
2521
developer18f46a82021-07-20 21:08:21 +08002522 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2523 mtk_rss_uninit(eth);
2524
2525 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
2526 mtk_rx_clean(eth, &eth->rx_ring[i], 1);
2527 }
2528
developer94008d92021-09-23 09:47:41 +08002529 if (eth->scratch_head) {
2530 kfree(eth->scratch_head);
2531 eth->scratch_head = NULL;
2532 }
developerfd40db22021-04-29 10:08:25 +08002533}
2534
2535static void mtk_tx_timeout(struct net_device *dev)
2536{
2537 struct mtk_mac *mac = netdev_priv(dev);
2538 struct mtk_eth *eth = mac->hw;
2539
2540 eth->netdev[mac->id]->stats.tx_errors++;
2541 netif_err(eth, tx_err, dev,
2542 "transmit timed out\n");
developer8051e042022-04-08 13:26:36 +08002543
2544 if (atomic_read(&reset_lock) == 0)
2545 schedule_work(&eth->pending_work);
developerfd40db22021-04-29 10:08:25 +08002546}
2547
developer18f46a82021-07-20 21:08:21 +08002548static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08002549{
developer18f46a82021-07-20 21:08:21 +08002550 struct mtk_napi *rx_napi = priv;
2551 struct mtk_eth *eth = rx_napi->eth;
2552 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002553
developer18f46a82021-07-20 21:08:21 +08002554 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08002555 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08002556 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08002557 }
2558
2559 return IRQ_HANDLED;
2560}
2561
2562static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2563{
2564 struct mtk_eth *eth = _eth;
2565
2566 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developerfd40db22021-04-29 10:08:25 +08002567 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer6bbe70d2021-08-06 09:34:55 +08002568 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08002569 }
2570
2571 return IRQ_HANDLED;
2572}
2573
2574static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2575{
2576 struct mtk_eth *eth = _eth;
2577
developer18f46a82021-07-20 21:08:21 +08002578 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT(0)) {
2579 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT(0))
2580 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002581 }
2582 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2583 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2584 mtk_handle_irq_tx(irq, _eth);
2585 }
2586
2587 return IRQ_HANDLED;
2588}
2589
developera2613e62022-07-01 18:29:37 +08002590static irqreturn_t mtk_handle_irq_fixed_link(int irq, void *_mac)
2591{
2592 struct mtk_mac *mac = _mac;
2593 struct mtk_eth *eth = mac->hw;
2594 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
2595 struct net_device *dev = phylink_priv->dev;
2596 int link_old, link_new;
2597
2598 // clear interrupt status for gpy211
2599 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
2600
2601 link_old = phylink_priv->link;
2602 link_new = _mtk_mdio_read(eth, phylink_priv->phyaddr, MII_BMSR) & BMSR_LSTATUS;
2603
2604 if (link_old != link_new) {
2605 phylink_priv->link = link_new;
2606 if (link_new) {
2607 printk("phylink.%d %s: Link is Up\n", phylink_priv->id, dev->name);
2608 if (dev)
2609 netif_carrier_on(dev);
2610 } else {
2611 printk("phylink.%d %s: Link is Down\n", phylink_priv->id, dev->name);
2612 if (dev)
2613 netif_carrier_off(dev);
2614 }
2615 }
2616
2617 return IRQ_HANDLED;
2618}
2619
developerfd40db22021-04-29 10:08:25 +08002620#ifdef CONFIG_NET_POLL_CONTROLLER
2621static void mtk_poll_controller(struct net_device *dev)
2622{
2623 struct mtk_mac *mac = netdev_priv(dev);
2624 struct mtk_eth *eth = mac->hw;
2625
2626 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002627 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
2628 mtk_handle_irq_rx(eth->irq[2], &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002629 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002630 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002631}
2632#endif
2633
2634static int mtk_start_dma(struct mtk_eth *eth)
2635{
2636 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer77d03a72021-06-06 00:06:00 +08002637 int val, err;
developerfd40db22021-04-29 10:08:25 +08002638
2639 err = mtk_dma_init(eth);
2640 if (err) {
2641 mtk_dma_free(eth);
2642 return err;
2643 }
2644
2645 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer15d0d282021-07-14 16:40:44 +08002646 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08002647 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2648 val &= ~MTK_RESV_BUF_MASK;
developerfd40db22021-04-29 10:08:25 +08002649 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002650 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002651 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
2652 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
2653 MTK_RESV_BUF | MTK_WCOMP_EN |
2654 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
developer1ac65932022-07-19 17:23:32 +08002655 MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08002656 }
developerfd40db22021-04-29 10:08:25 +08002657 else
2658 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002659 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002660 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
2661 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2662 MTK_RX_BT_32DWORDS,
2663 MTK_QDMA_GLO_CFG);
2664
developer15d0d282021-07-14 16:40:44 +08002665 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
developerfd40db22021-04-29 10:08:25 +08002666 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002667 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08002668 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2669 MTK_PDMA_GLO_CFG);
2670 } else {
2671 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2672 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2673 MTK_PDMA_GLO_CFG);
2674 }
2675
developer77d03a72021-06-06 00:06:00 +08002676 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) && eth->hwlro) {
2677 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
2678 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
2679 }
2680
developerfd40db22021-04-29 10:08:25 +08002681 return 0;
2682}
2683
developer8051e042022-04-08 13:26:36 +08002684void mtk_gdm_config(struct mtk_eth *eth, u32 config)
developerfd40db22021-04-29 10:08:25 +08002685{
2686 int i;
2687
2688 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2689 return;
2690
2691 for (i = 0; i < MTK_MAC_COUNT; i++) {
2692 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2693
2694 /* default setup the forward port to send frame to PDMA */
2695 val &= ~0xffff;
2696
2697 /* Enable RX checksum */
2698 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2699
2700 val |= config;
2701
2702 if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
2703 val |= MTK_GDMA_SPECIAL_TAG;
2704
2705 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2706 }
developerfd40db22021-04-29 10:08:25 +08002707}
2708
2709static int mtk_open(struct net_device *dev)
2710{
2711 struct mtk_mac *mac = netdev_priv(dev);
2712 struct mtk_eth *eth = mac->hw;
developera2613e62022-07-01 18:29:37 +08002713 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
developer18f46a82021-07-20 21:08:21 +08002714 int err, i;
developer3a5969e2022-02-09 15:36:36 +08002715 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08002716
2717 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2718 if (err) {
2719 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2720 err);
2721 return err;
2722 }
2723
2724 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2725 if (!refcount_read(&eth->dma_refcnt)) {
2726 int err = mtk_start_dma(eth);
2727
2728 if (err)
2729 return err;
2730
2731 mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
2732
2733 /* Indicates CDM to parse the MTK special tag from CPU */
2734 if (netdev_uses_dsa(dev)) {
2735 u32 val;
2736 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2737 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2738 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
2739 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
2740 }
2741
2742 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002743 napi_enable(&eth->rx_napi[0].napi);
developerfd40db22021-04-29 10:08:25 +08002744 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002745 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
2746
2747 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2748 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2749 napi_enable(&eth->rx_napi[i].napi);
2750 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i));
2751 }
2752 }
2753
developerfd40db22021-04-29 10:08:25 +08002754 refcount_set(&eth->dma_refcnt, 1);
2755 }
2756 else
2757 refcount_inc(&eth->dma_refcnt);
2758
developera2613e62022-07-01 18:29:37 +08002759 if (phylink_priv->desc) {
2760 /*Notice: This programming sequence is only for GPY211 single PHY chip.
2761 If single PHY chip is not GPY211, the following step you should do:
2762 1. Contact your Single PHY chip vendor and get the details of
2763 - how to enables link status change interrupt
2764 - how to clears interrupt source
2765 */
2766
2767 // clear interrupt source for gpy211
2768 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
2769
2770 // enable link status change interrupt for gpy211
2771 _mtk_mdio_write(eth, phylink_priv->phyaddr, 0x19, 0x0001);
2772
2773 phylink_priv->dev = dev;
2774
2775 // override dev pointer for single PHY chip 0
2776 if (phylink_priv->id == 0) {
2777 struct net_device *tmp;
2778
2779 tmp = __dev_get_by_name(&init_net, phylink_priv->label);
2780 if (tmp)
2781 phylink_priv->dev = tmp;
2782 else
2783 phylink_priv->dev = NULL;
2784 }
2785 }
2786
developerfd40db22021-04-29 10:08:25 +08002787 phylink_start(mac->phylink);
2788 netif_start_queue(dev);
developer3a5969e2022-02-09 15:36:36 +08002789 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer793f7b42022-05-20 13:54:51 +08002790 if (!phy_node && eth->sgmii->regmap[mac->id]) {
developer1a63ef92022-04-15 17:17:32 +08002791 regmap_write(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developer3a5969e2022-02-09 15:36:36 +08002792 }
developerfd40db22021-04-29 10:08:25 +08002793 return 0;
2794}
2795
2796static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2797{
2798 u32 val;
2799 int i;
2800
2801 /* stop the dma engine */
2802 spin_lock_bh(&eth->page_lock);
2803 val = mtk_r32(eth, glo_cfg);
2804 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2805 glo_cfg);
2806 spin_unlock_bh(&eth->page_lock);
2807
2808 /* wait for dma stop */
2809 for (i = 0; i < 10; i++) {
2810 val = mtk_r32(eth, glo_cfg);
2811 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer8051e042022-04-08 13:26:36 +08002812 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002813 continue;
2814 }
2815 break;
2816 }
2817}
2818
2819static int mtk_stop(struct net_device *dev)
2820{
2821 struct mtk_mac *mac = netdev_priv(dev);
2822 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08002823 int i;
developer3a5969e2022-02-09 15:36:36 +08002824 u32 val = 0;
2825 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08002826
2827 netif_tx_disable(dev);
2828
developer3a5969e2022-02-09 15:36:36 +08002829 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
2830 if (phy_node) {
2831 val = _mtk_mdio_read(eth, 0, 0);
2832 val |= BMCR_PDOWN;
2833 _mtk_mdio_write(eth, 0, 0, val);
developer793f7b42022-05-20 13:54:51 +08002834 } else if (eth->sgmii->regmap[mac->id]) {
developer1a63ef92022-04-15 17:17:32 +08002835 regmap_read(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer3a5969e2022-02-09 15:36:36 +08002836 val |= SGMII_PHYA_PWD;
developer1a63ef92022-04-15 17:17:32 +08002837 regmap_write(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer3a5969e2022-02-09 15:36:36 +08002838 }
2839
2840 //GMAC RX disable
2841 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
2842 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
2843
2844 phylink_stop(mac->phylink);
2845
developerfd40db22021-04-29 10:08:25 +08002846 phylink_disconnect_phy(mac->phylink);
2847
2848 /* only shutdown DMA if this is the last user */
2849 if (!refcount_dec_and_test(&eth->dma_refcnt))
2850 return 0;
2851
2852 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2853
2854 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002855 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002856 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002857 napi_disable(&eth->rx_napi[0].napi);
2858
2859 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2860 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2861 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i));
2862 napi_disable(&eth->rx_napi[i].napi);
2863 }
2864 }
developerfd40db22021-04-29 10:08:25 +08002865
2866 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2867 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2868 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2869
2870 mtk_dma_free(eth);
2871
2872 return 0;
2873}
2874
developer8051e042022-04-08 13:26:36 +08002875void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerfd40db22021-04-29 10:08:25 +08002876{
developer8051e042022-04-08 13:26:36 +08002877 u32 val = 0, i = 0;
developerfd40db22021-04-29 10:08:25 +08002878
developerfd40db22021-04-29 10:08:25 +08002879 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer8051e042022-04-08 13:26:36 +08002880 reset_bits, reset_bits);
2881
2882 while (i++ < 5000) {
2883 mdelay(1);
2884 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
2885
2886 if ((val & reset_bits) == reset_bits) {
2887 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
2888 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2889 reset_bits, ~reset_bits);
2890 break;
2891 }
2892 }
2893
developerfd40db22021-04-29 10:08:25 +08002894 mdelay(10);
2895}
2896
2897static void mtk_clk_disable(struct mtk_eth *eth)
2898{
2899 int clk;
2900
2901 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2902 clk_disable_unprepare(eth->clks[clk]);
2903}
2904
2905static int mtk_clk_enable(struct mtk_eth *eth)
2906{
2907 int clk, ret;
2908
2909 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2910 ret = clk_prepare_enable(eth->clks[clk]);
2911 if (ret)
2912 goto err_disable_clks;
2913 }
2914
2915 return 0;
2916
2917err_disable_clks:
2918 while (--clk >= 0)
2919 clk_disable_unprepare(eth->clks[clk]);
2920
2921 return ret;
2922}
2923
developer18f46a82021-07-20 21:08:21 +08002924static int mtk_napi_init(struct mtk_eth *eth)
2925{
2926 struct mtk_napi *rx_napi = &eth->rx_napi[0];
2927 int i;
2928
2929 rx_napi->eth = eth;
2930 rx_napi->rx_ring = &eth->rx_ring[0];
2931 rx_napi->irq_grp_no = 2;
2932
2933 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2934 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2935 rx_napi = &eth->rx_napi[i];
2936 rx_napi->eth = eth;
2937 rx_napi->rx_ring = &eth->rx_ring[i];
2938 rx_napi->irq_grp_no = 2 + i;
2939 }
2940 }
2941
2942 return 0;
2943}
2944
developer8051e042022-04-08 13:26:36 +08002945static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerfd40db22021-04-29 10:08:25 +08002946{
developer8051e042022-04-08 13:26:36 +08002947 int i, ret = 0;
developerfd40db22021-04-29 10:08:25 +08002948
developer8051e042022-04-08 13:26:36 +08002949 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
2950 atomic_read(&reset_lock), atomic_read(&force));
developerfd40db22021-04-29 10:08:25 +08002951
developer8051e042022-04-08 13:26:36 +08002952 if (atomic_read(&reset_lock) == 0) {
2953 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
2954 return 0;
developerfd40db22021-04-29 10:08:25 +08002955
developer8051e042022-04-08 13:26:36 +08002956 pm_runtime_enable(eth->dev);
2957 pm_runtime_get_sync(eth->dev);
2958
2959 ret = mtk_clk_enable(eth);
2960 if (ret)
2961 goto err_disable_pm;
2962 }
developerfd40db22021-04-29 10:08:25 +08002963
2964 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2965 ret = device_reset(eth->dev);
2966 if (ret) {
2967 dev_err(eth->dev, "MAC reset failed!\n");
2968 goto err_disable_pm;
2969 }
2970
2971 /* enable interrupt delay for RX */
2972 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2973
2974 /* disable delay and normal interrupt */
2975 mtk_tx_irq_disable(eth, ~0);
2976 mtk_rx_irq_disable(eth, ~0);
2977
2978 return 0;
2979 }
2980
developer8051e042022-04-08 13:26:36 +08002981 pr_info("[%s] execute fe %s reset\n", __func__,
2982 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developer545abf02021-07-15 17:47:01 +08002983
developer8051e042022-04-08 13:26:36 +08002984 if (type == MTK_TYPE_WARM_RESET)
2985 mtk_eth_warm_reset(eth);
developer545abf02021-07-15 17:47:01 +08002986 else
developer8051e042022-04-08 13:26:36 +08002987 mtk_eth_cold_reset(eth);
developer545abf02021-07-15 17:47:01 +08002988
2989 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developer545abf02021-07-15 17:47:01 +08002990 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08002991 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08002992 }
developerfd40db22021-04-29 10:08:25 +08002993
2994 if (eth->pctl) {
2995 /* Set GE2 driving and slew rate */
2996 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2997
2998 /* set GE2 TDSEL */
2999 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3000
3001 /* set GE2 TUNE */
3002 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3003 }
3004
3005 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3006 * up with the more appropriate value when mtk_mac_config call is being
3007 * invoked.
3008 */
3009 for (i = 0; i < MTK_MAC_COUNT; i++)
3010 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3011
3012 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08003013 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
3014 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3015 else
3016 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08003017
3018 /* enable interrupt delay for RX/TX */
3019 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
3020 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
3021
3022 mtk_tx_irq_disable(eth, ~0);
3023 mtk_rx_irq_disable(eth, ~0);
3024
3025 /* FE int grouping */
3026 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003027 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_PDMA_INT_GRP2);
developerfd40db22021-04-29 10:08:25 +08003028 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003029 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_QDMA_INT_GRP2);
developer8051e042022-04-08 13:26:36 +08003030 mtk_w32(eth, 0x21021003, MTK_FE_INT_GRP);
developerbe971722022-05-23 13:51:05 +08003031 mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
developer8051e042022-04-08 13:26:36 +08003032 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
3033 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerfd40db22021-04-29 10:08:25 +08003034
developera2bdbd52021-05-31 19:10:17 +08003035 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08003036 /* PSE Free Queue Flow Control */
3037 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3038
developer459b78e2022-07-01 17:25:10 +08003039 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3040 mtk_w32(eth, 0x00000300, PSE_NO_DROP_CFG);
3041
3042 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3043 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
developer81bcad32021-07-15 14:14:38 +08003044
developerfef9efd2021-06-16 18:28:09 +08003045 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08003046 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3047 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3048 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3049 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3050 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3051 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3052 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
developerfd5f9152022-01-05 16:29:42 +08003053 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
developerfd40db22021-04-29 10:08:25 +08003054
developerfef9efd2021-06-16 18:28:09 +08003055 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08003056 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3057 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3058 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3059 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3060 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3061 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3062 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3063 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08003064
3065 /* GDM and CDM Threshold */
3066 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3067 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3068 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3069 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3070 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3071 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08003072 }
3073
3074 return 0;
3075
3076err_disable_pm:
3077 pm_runtime_put_sync(eth->dev);
3078 pm_runtime_disable(eth->dev);
3079
3080 return ret;
3081}
3082
3083static int mtk_hw_deinit(struct mtk_eth *eth)
3084{
3085 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3086 return 0;
3087
3088 mtk_clk_disable(eth);
3089
3090 pm_runtime_put_sync(eth->dev);
3091 pm_runtime_disable(eth->dev);
3092
3093 return 0;
3094}
3095
3096static int __init mtk_init(struct net_device *dev)
3097{
3098 struct mtk_mac *mac = netdev_priv(dev);
3099 struct mtk_eth *eth = mac->hw;
3100 const char *mac_addr;
3101
3102 mac_addr = of_get_mac_address(mac->of_node);
3103 if (!IS_ERR(mac_addr))
3104 ether_addr_copy(dev->dev_addr, mac_addr);
3105
3106 /* If the mac address is invalid, use random mac address */
3107 if (!is_valid_ether_addr(dev->dev_addr)) {
3108 eth_hw_addr_random(dev);
3109 dev_err(eth->dev, "generated random MAC address %pM\n",
3110 dev->dev_addr);
3111 }
3112
3113 return 0;
3114}
3115
3116static void mtk_uninit(struct net_device *dev)
3117{
3118 struct mtk_mac *mac = netdev_priv(dev);
3119 struct mtk_eth *eth = mac->hw;
3120
3121 phylink_disconnect_phy(mac->phylink);
3122 mtk_tx_irq_disable(eth, ~0);
3123 mtk_rx_irq_disable(eth, ~0);
3124}
3125
3126static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3127{
3128 struct mtk_mac *mac = netdev_priv(dev);
3129
3130 switch (cmd) {
3131 case SIOCGMIIPHY:
3132 case SIOCGMIIREG:
3133 case SIOCSMIIREG:
3134 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3135 default:
3136 /* default invoke the mtk_eth_dbg handler */
3137 return mtk_do_priv_ioctl(dev, ifr, cmd);
3138 break;
3139 }
3140
3141 return -EOPNOTSUPP;
3142}
3143
3144static void mtk_pending_work(struct work_struct *work)
3145{
3146 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer8051e042022-04-08 13:26:36 +08003147 struct device_node *phy_node = NULL;
3148 struct mtk_mac *mac = NULL;
3149 int err, i = 0;
developerfd40db22021-04-29 10:08:25 +08003150 unsigned long restart = 0;
developer8051e042022-04-08 13:26:36 +08003151 u32 val = 0;
3152
3153 atomic_inc(&reset_lock);
3154 val = mtk_r32(eth, MTK_FE_INT_STATUS);
3155 if (!mtk_check_reset_event(eth, val)) {
3156 atomic_dec(&reset_lock);
3157 pr_info("[%s] No need to do FE reset !\n", __func__);
3158 return;
3159 }
developerfd40db22021-04-29 10:08:25 +08003160
3161 rtnl_lock();
3162
developer8051e042022-04-08 13:26:36 +08003163 /* Disabe FE P3 and P4 */
3164 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3165 val |= MTK_FE_LINK_DOWN_P3;
3166 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3167 val |= MTK_FE_LINK_DOWN_P4;
3168 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3169
3170 /* Adjust PPE configurations to prepare for reset */
3171 mtk_prepare_reset_ppe(eth, 0);
3172 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3173 mtk_prepare_reset_ppe(eth, 1);
3174
3175 /* Adjust FE configurations to prepare for reset */
3176 mtk_prepare_reset_fe(eth);
3177
3178 /* Trigger Wifi SER reset */
3179 call_netdevice_notifiers(MTK_FE_START_RESET, eth->netdev[0]);
3180 rtnl_unlock();
3181 wait_for_completion_timeout(&wait_ser_done, 5000);
3182 rtnl_lock();
developerfd40db22021-04-29 10:08:25 +08003183
3184 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
3185 cpu_relax();
3186
developer8051e042022-04-08 13:26:36 +08003187 del_timer_sync(&eth->mtk_dma_monitor_timer);
3188 pr_info("[%s] mtk_stop starts !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003189 /* stop all devices to make sure that dma is properly shut down */
3190 for (i = 0; i < MTK_MAC_COUNT; i++) {
3191 if (!eth->netdev[i])
3192 continue;
3193 mtk_stop(eth->netdev[i]);
3194 __set_bit(i, &restart);
3195 }
developer8051e042022-04-08 13:26:36 +08003196 pr_info("[%s] mtk_stop ends !\n", __func__);
3197 mdelay(15);
developerfd40db22021-04-29 10:08:25 +08003198
3199 if (eth->dev->pins)
3200 pinctrl_select_state(eth->dev->pins->p,
3201 eth->dev->pins->default_state);
developer8051e042022-04-08 13:26:36 +08003202
3203 pr_info("[%s] mtk_hw_init starts !\n", __func__);
3204 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
3205 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003206
3207 /* restart DMA and enable IRQs */
3208 for (i = 0; i < MTK_MAC_COUNT; i++) {
3209 if (!test_bit(i, &restart))
3210 continue;
3211 err = mtk_open(eth->netdev[i]);
3212 if (err) {
3213 netif_alert(eth, ifup, eth->netdev[i],
3214 "Driver up/down cycle failed, closing device.\n");
3215 dev_close(eth->netdev[i]);
3216 }
3217 }
3218
developer8051e042022-04-08 13:26:36 +08003219 /* Set KA tick select */
3220 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(0));
3221 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3222 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(1));
3223
3224 /* Enabe FE P3 and P4*/
3225 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3226 val &= ~MTK_FE_LINK_DOWN_P3;
3227 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3228 val &= ~MTK_FE_LINK_DOWN_P4;
3229 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3230
3231 /* Power up sgmii */
3232 for (i = 0; i < MTK_MAC_COUNT; i++) {
3233 mac = netdev_priv(eth->netdev[i]);
3234 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer793f7b42022-05-20 13:54:51 +08003235 if (!phy_node && eth->sgmii->regmap[i]) {
developer8051e042022-04-08 13:26:36 +08003236 mtk_gmac_sgmii_path_setup(eth, i);
3237 regmap_write(eth->sgmii->regmap[i], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
3238 }
3239 }
3240
3241 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE, eth->netdev[0]);
3242 pr_info("[%s] HNAT reset done !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003243
developer8051e042022-04-08 13:26:36 +08003244 call_netdevice_notifiers(MTK_FE_RESET_DONE, eth->netdev[0]);
3245 pr_info("[%s] WiFi SER reset done !\n", __func__);
3246
3247 atomic_dec(&reset_lock);
3248 if (atomic_read(&force) > 0)
3249 atomic_dec(&force);
3250
3251 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3252 eth->mtk_dma_monitor_timer.expires = jiffies;
3253 add_timer(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08003254 clear_bit_unlock(MTK_RESETTING, &eth->state);
3255
3256 rtnl_unlock();
3257}
3258
3259static int mtk_free_dev(struct mtk_eth *eth)
3260{
3261 int i;
3262
3263 for (i = 0; i < MTK_MAC_COUNT; i++) {
3264 if (!eth->netdev[i])
3265 continue;
3266 free_netdev(eth->netdev[i]);
3267 }
3268
3269 return 0;
3270}
3271
3272static int mtk_unreg_dev(struct mtk_eth *eth)
3273{
3274 int i;
3275
3276 for (i = 0; i < MTK_MAC_COUNT; i++) {
3277 if (!eth->netdev[i])
3278 continue;
3279 unregister_netdev(eth->netdev[i]);
3280 }
3281
3282 return 0;
3283}
3284
3285static int mtk_cleanup(struct mtk_eth *eth)
3286{
3287 mtk_unreg_dev(eth);
3288 mtk_free_dev(eth);
3289 cancel_work_sync(&eth->pending_work);
3290
3291 return 0;
3292}
3293
3294static int mtk_get_link_ksettings(struct net_device *ndev,
3295 struct ethtool_link_ksettings *cmd)
3296{
3297 struct mtk_mac *mac = netdev_priv(ndev);
3298
3299 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3300 return -EBUSY;
3301
3302 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3303}
3304
3305static int mtk_set_link_ksettings(struct net_device *ndev,
3306 const struct ethtool_link_ksettings *cmd)
3307{
3308 struct mtk_mac *mac = netdev_priv(ndev);
3309
3310 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3311 return -EBUSY;
3312
3313 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3314}
3315
3316static void mtk_get_drvinfo(struct net_device *dev,
3317 struct ethtool_drvinfo *info)
3318{
3319 struct mtk_mac *mac = netdev_priv(dev);
3320
3321 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3322 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3323 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3324}
3325
3326static u32 mtk_get_msglevel(struct net_device *dev)
3327{
3328 struct mtk_mac *mac = netdev_priv(dev);
3329
3330 return mac->hw->msg_enable;
3331}
3332
3333static void mtk_set_msglevel(struct net_device *dev, u32 value)
3334{
3335 struct mtk_mac *mac = netdev_priv(dev);
3336
3337 mac->hw->msg_enable = value;
3338}
3339
3340static int mtk_nway_reset(struct net_device *dev)
3341{
3342 struct mtk_mac *mac = netdev_priv(dev);
3343
3344 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3345 return -EBUSY;
3346
3347 if (!mac->phylink)
3348 return -ENOTSUPP;
3349
3350 return phylink_ethtool_nway_reset(mac->phylink);
3351}
3352
3353static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3354{
3355 int i;
3356
3357 switch (stringset) {
3358 case ETH_SS_STATS:
3359 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3360 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3361 data += ETH_GSTRING_LEN;
3362 }
3363 break;
3364 }
3365}
3366
3367static int mtk_get_sset_count(struct net_device *dev, int sset)
3368{
3369 switch (sset) {
3370 case ETH_SS_STATS:
3371 return ARRAY_SIZE(mtk_ethtool_stats);
3372 default:
3373 return -EOPNOTSUPP;
3374 }
3375}
3376
3377static void mtk_get_ethtool_stats(struct net_device *dev,
3378 struct ethtool_stats *stats, u64 *data)
3379{
3380 struct mtk_mac *mac = netdev_priv(dev);
3381 struct mtk_hw_stats *hwstats = mac->hw_stats;
3382 u64 *data_src, *data_dst;
3383 unsigned int start;
3384 int i;
3385
3386 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3387 return;
3388
3389 if (netif_running(dev) && netif_device_present(dev)) {
3390 if (spin_trylock_bh(&hwstats->stats_lock)) {
3391 mtk_stats_update_mac(mac);
3392 spin_unlock_bh(&hwstats->stats_lock);
3393 }
3394 }
3395
3396 data_src = (u64 *)hwstats;
3397
3398 do {
3399 data_dst = data;
3400 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
3401
3402 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3403 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3404 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
3405}
3406
3407static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3408 u32 *rule_locs)
3409{
3410 int ret = -EOPNOTSUPP;
3411
3412 switch (cmd->cmd) {
3413 case ETHTOOL_GRXRINGS:
3414 if (dev->hw_features & NETIF_F_LRO) {
3415 cmd->data = MTK_MAX_RX_RING_NUM;
3416 ret = 0;
3417 }
3418 break;
3419 case ETHTOOL_GRXCLSRLCNT:
3420 if (dev->hw_features & NETIF_F_LRO) {
3421 struct mtk_mac *mac = netdev_priv(dev);
3422
3423 cmd->rule_cnt = mac->hwlro_ip_cnt;
3424 ret = 0;
3425 }
3426 break;
3427 case ETHTOOL_GRXCLSRULE:
3428 if (dev->hw_features & NETIF_F_LRO)
3429 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3430 break;
3431 case ETHTOOL_GRXCLSRLALL:
3432 if (dev->hw_features & NETIF_F_LRO)
3433 ret = mtk_hwlro_get_fdir_all(dev, cmd,
3434 rule_locs);
3435 break;
3436 default:
3437 break;
3438 }
3439
3440 return ret;
3441}
3442
3443static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3444{
3445 int ret = -EOPNOTSUPP;
3446
3447 switch (cmd->cmd) {
3448 case ETHTOOL_SRXCLSRLINS:
3449 if (dev->hw_features & NETIF_F_LRO)
3450 ret = mtk_hwlro_add_ipaddr(dev, cmd);
3451 break;
3452 case ETHTOOL_SRXCLSRLDEL:
3453 if (dev->hw_features & NETIF_F_LRO)
3454 ret = mtk_hwlro_del_ipaddr(dev, cmd);
3455 break;
3456 default:
3457 break;
3458 }
3459
3460 return ret;
3461}
3462
developer6c5cbb52022-08-12 11:37:45 +08003463static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3464{
3465 struct mtk_mac *mac = netdev_priv(dev);
3466
3467 phylink_ethtool_get_pauseparam(mac->phylink, pause);
3468}
3469
3470static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3471{
3472 struct mtk_mac *mac = netdev_priv(dev);
3473
3474 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
3475}
3476
developerfd40db22021-04-29 10:08:25 +08003477static const struct ethtool_ops mtk_ethtool_ops = {
3478 .get_link_ksettings = mtk_get_link_ksettings,
3479 .set_link_ksettings = mtk_set_link_ksettings,
3480 .get_drvinfo = mtk_get_drvinfo,
3481 .get_msglevel = mtk_get_msglevel,
3482 .set_msglevel = mtk_set_msglevel,
3483 .nway_reset = mtk_nway_reset,
3484 .get_link = ethtool_op_get_link,
3485 .get_strings = mtk_get_strings,
3486 .get_sset_count = mtk_get_sset_count,
3487 .get_ethtool_stats = mtk_get_ethtool_stats,
3488 .get_rxnfc = mtk_get_rxnfc,
3489 .set_rxnfc = mtk_set_rxnfc,
developer6c5cbb52022-08-12 11:37:45 +08003490 .get_pauseparam = mtk_get_pauseparam,
3491 .set_pauseparam = mtk_set_pauseparam,
developerfd40db22021-04-29 10:08:25 +08003492};
3493
3494static const struct net_device_ops mtk_netdev_ops = {
3495 .ndo_init = mtk_init,
3496 .ndo_uninit = mtk_uninit,
3497 .ndo_open = mtk_open,
3498 .ndo_stop = mtk_stop,
3499 .ndo_start_xmit = mtk_start_xmit,
3500 .ndo_set_mac_address = mtk_set_mac_address,
3501 .ndo_validate_addr = eth_validate_addr,
3502 .ndo_do_ioctl = mtk_do_ioctl,
3503 .ndo_tx_timeout = mtk_tx_timeout,
3504 .ndo_get_stats64 = mtk_get_stats64,
3505 .ndo_fix_features = mtk_fix_features,
3506 .ndo_set_features = mtk_set_features,
3507#ifdef CONFIG_NET_POLL_CONTROLLER
3508 .ndo_poll_controller = mtk_poll_controller,
3509#endif
3510};
3511
3512static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
3513{
3514 const __be32 *_id = of_get_property(np, "reg", NULL);
3515 struct phylink *phylink;
3516 int phy_mode, id, err;
3517 struct mtk_mac *mac;
developera2613e62022-07-01 18:29:37 +08003518 struct mtk_phylink_priv *phylink_priv;
3519 struct fwnode_handle *fixed_node;
3520 struct gpio_desc *desc;
developerfd40db22021-04-29 10:08:25 +08003521
3522 if (!_id) {
3523 dev_err(eth->dev, "missing mac id\n");
3524 return -EINVAL;
3525 }
3526
3527 id = be32_to_cpup(_id);
developerfb556ca2021-10-13 10:52:09 +08003528 if (id < 0 || id >= MTK_MAC_COUNT) {
developerfd40db22021-04-29 10:08:25 +08003529 dev_err(eth->dev, "%d is not a valid mac id\n", id);
3530 return -EINVAL;
3531 }
3532
3533 if (eth->netdev[id]) {
3534 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
3535 return -EINVAL;
3536 }
3537
3538 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
3539 if (!eth->netdev[id]) {
3540 dev_err(eth->dev, "alloc_etherdev failed\n");
3541 return -ENOMEM;
3542 }
3543 mac = netdev_priv(eth->netdev[id]);
3544 eth->mac[id] = mac;
3545 mac->id = id;
3546 mac->hw = eth;
3547 mac->of_node = np;
3548
3549 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
3550 mac->hwlro_ip_cnt = 0;
3551
3552 mac->hw_stats = devm_kzalloc(eth->dev,
3553 sizeof(*mac->hw_stats),
3554 GFP_KERNEL);
3555 if (!mac->hw_stats) {
3556 dev_err(eth->dev, "failed to allocate counter memory\n");
3557 err = -ENOMEM;
3558 goto free_netdev;
3559 }
3560 spin_lock_init(&mac->hw_stats->stats_lock);
3561 u64_stats_init(&mac->hw_stats->syncp);
3562 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
3563
3564 /* phylink create */
3565 phy_mode = of_get_phy_mode(np);
3566 if (phy_mode < 0) {
3567 dev_err(eth->dev, "incorrect phy-mode\n");
3568 err = -EINVAL;
3569 goto free_netdev;
3570 }
3571
3572 /* mac config is not set */
3573 mac->interface = PHY_INTERFACE_MODE_NA;
3574 mac->mode = MLO_AN_PHY;
3575 mac->speed = SPEED_UNKNOWN;
3576
3577 mac->phylink_config.dev = &eth->netdev[id]->dev;
3578 mac->phylink_config.type = PHYLINK_NETDEV;
3579
3580 phylink = phylink_create(&mac->phylink_config,
3581 of_fwnode_handle(mac->of_node),
3582 phy_mode, &mtk_phylink_ops);
3583 if (IS_ERR(phylink)) {
3584 err = PTR_ERR(phylink);
3585 goto free_netdev;
3586 }
3587
3588 mac->phylink = phylink;
3589
developera2613e62022-07-01 18:29:37 +08003590 fixed_node = fwnode_get_named_child_node(of_fwnode_handle(mac->of_node),
3591 "fixed-link");
3592 if (fixed_node) {
3593 desc = fwnode_get_named_gpiod(fixed_node, "link-gpio",
3594 0, GPIOD_IN, "?");
3595 if (!IS_ERR(desc)) {
3596 struct device_node *phy_np;
3597 const char *label;
3598 int irq, phyaddr;
3599
3600 phylink_priv = &mac->phylink_priv;
3601
3602 phylink_priv->desc = desc;
3603 phylink_priv->id = id;
3604 phylink_priv->link = -1;
3605
3606 irq = gpiod_to_irq(desc);
3607 if (irq > 0) {
3608 devm_request_irq(eth->dev, irq, mtk_handle_irq_fixed_link,
3609 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
3610 "ethernet:fixed link", mac);
3611 }
3612
3613 if (!of_property_read_string(to_of_node(fixed_node), "label", &label))
3614 strcpy(phylink_priv->label, label);
3615
3616 phy_np = of_parse_phandle(to_of_node(fixed_node), "phy-handle", 0);
3617 if (phy_np) {
3618 if (!of_property_read_u32(phy_np, "reg", &phyaddr))
3619 phylink_priv->phyaddr = phyaddr;
3620 }
3621 }
3622 fwnode_handle_put(fixed_node);
3623 }
3624
developerfd40db22021-04-29 10:08:25 +08003625 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
3626 eth->netdev[id]->watchdog_timeo = 5 * HZ;
3627 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
3628 eth->netdev[id]->base_addr = (unsigned long)eth->base;
3629
3630 eth->netdev[id]->hw_features = eth->soc->hw_features;
3631 if (eth->hwlro)
3632 eth->netdev[id]->hw_features |= NETIF_F_LRO;
3633
3634 eth->netdev[id]->vlan_features = eth->soc->hw_features &
3635 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3636 eth->netdev[id]->features |= eth->soc->hw_features;
3637 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
3638
3639 eth->netdev[id]->irq = eth->irq[0];
3640 eth->netdev[id]->dev.of_node = np;
3641
3642 return 0;
3643
3644free_netdev:
3645 free_netdev(eth->netdev[id]);
3646 return err;
3647}
3648
3649static int mtk_probe(struct platform_device *pdev)
3650{
3651 struct device_node *mac_np;
3652 struct mtk_eth *eth;
3653 int err, i;
3654
3655 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3656 if (!eth)
3657 return -ENOMEM;
3658
3659 eth->soc = of_device_get_match_data(&pdev->dev);
3660
3661 eth->dev = &pdev->dev;
3662 eth->base = devm_platform_ioremap_resource(pdev, 0);
3663 if (IS_ERR(eth->base))
3664 return PTR_ERR(eth->base);
3665
3666 if(eth->soc->has_sram) {
3667 struct resource *res;
3668 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
developer4c32b7a2021-11-13 16:46:43 +08003669 if (unlikely(!res))
3670 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +08003671 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
3672 }
3673
3674 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3675 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
3676 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
3677 } else {
3678 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
3679 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
3680 }
3681
3682 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3683 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
3684 eth->ip_align = NET_IP_ALIGN;
3685 } else {
developera2bdbd52021-05-31 19:10:17 +08003686 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08003687 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
3688 else
3689 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
3690 }
3691
3692 spin_lock_init(&eth->page_lock);
3693 spin_lock_init(&eth->tx_irq_lock);
3694 spin_lock_init(&eth->rx_irq_lock);
developerd82e8372022-02-09 15:00:09 +08003695 spin_lock_init(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +08003696
3697 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3698 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3699 "mediatek,ethsys");
3700 if (IS_ERR(eth->ethsys)) {
3701 dev_err(&pdev->dev, "no ethsys regmap found\n");
3702 return PTR_ERR(eth->ethsys);
3703 }
3704 }
3705
3706 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3707 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3708 "mediatek,infracfg");
3709 if (IS_ERR(eth->infra)) {
3710 dev_err(&pdev->dev, "no infracfg regmap found\n");
3711 return PTR_ERR(eth->infra);
3712 }
3713 }
3714
3715 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
3716 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3717 GFP_KERNEL);
3718 if (!eth->sgmii)
3719 return -ENOMEM;
3720
3721 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3722 eth->soc->ana_rgc3);
3723
3724 if (err)
3725 return err;
3726 }
3727
3728 if (eth->soc->required_pctl) {
3729 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3730 "mediatek,pctl");
3731 if (IS_ERR(eth->pctl)) {
3732 dev_err(&pdev->dev, "no pctl regmap found\n");
3733 return PTR_ERR(eth->pctl);
3734 }
3735 }
3736
developer18f46a82021-07-20 21:08:21 +08003737 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08003738 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
3739 eth->irq[i] = eth->irq[0];
3740 else
3741 eth->irq[i] = platform_get_irq(pdev, i);
3742 if (eth->irq[i] < 0) {
3743 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3744 return -ENXIO;
3745 }
3746 }
3747
3748 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
3749 eth->clks[i] = devm_clk_get(eth->dev,
3750 mtk_clks_source_name[i]);
3751 if (IS_ERR(eth->clks[i])) {
3752 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
3753 return -EPROBE_DEFER;
3754 if (eth->soc->required_clks & BIT(i)) {
3755 dev_err(&pdev->dev, "clock %s not found\n",
3756 mtk_clks_source_name[i]);
3757 return -EINVAL;
3758 }
3759 eth->clks[i] = NULL;
3760 }
3761 }
3762
3763 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
3764 INIT_WORK(&eth->pending_work, mtk_pending_work);
3765
developer8051e042022-04-08 13:26:36 +08003766 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerfd40db22021-04-29 10:08:25 +08003767 if (err)
3768 return err;
3769
3770 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3771
3772 for_each_child_of_node(pdev->dev.of_node, mac_np) {
3773 if (!of_device_is_compatible(mac_np,
3774 "mediatek,eth-mac"))
3775 continue;
3776
3777 if (!of_device_is_available(mac_np))
3778 continue;
3779
3780 err = mtk_add_mac(eth, mac_np);
3781 if (err) {
3782 of_node_put(mac_np);
3783 goto err_deinit_hw;
3784 }
3785 }
3786
developer18f46a82021-07-20 21:08:21 +08003787 err = mtk_napi_init(eth);
3788 if (err)
3789 goto err_free_dev;
3790
developerfd40db22021-04-29 10:08:25 +08003791 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3792 err = devm_request_irq(eth->dev, eth->irq[0],
3793 mtk_handle_irq, 0,
3794 dev_name(eth->dev), eth);
3795 } else {
3796 err = devm_request_irq(eth->dev, eth->irq[1],
3797 mtk_handle_irq_tx, 0,
3798 dev_name(eth->dev), eth);
3799 if (err)
3800 goto err_free_dev;
3801
3802 err = devm_request_irq(eth->dev, eth->irq[2],
3803 mtk_handle_irq_rx, 0,
developer18f46a82021-07-20 21:08:21 +08003804 dev_name(eth->dev), &eth->rx_napi[0]);
3805 if (err)
3806 goto err_free_dev;
3807
developer793f7b42022-05-20 13:54:51 +08003808 if (MTK_MAX_IRQ_NUM > 3) {
3809 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3810 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3811 err = devm_request_irq(eth->dev,
3812 eth->irq[2 + i],
3813 mtk_handle_irq_rx, 0,
3814 dev_name(eth->dev),
3815 &eth->rx_napi[i]);
3816 if (err)
3817 goto err_free_dev;
3818 }
3819 } else {
3820 err = devm_request_irq(eth->dev, eth->irq[3],
3821 mtk_handle_fe_irq, 0,
3822 dev_name(eth->dev), eth);
developer18f46a82021-07-20 21:08:21 +08003823 if (err)
3824 goto err_free_dev;
3825 }
3826 }
developerfd40db22021-04-29 10:08:25 +08003827 }
developer8051e042022-04-08 13:26:36 +08003828
developerfd40db22021-04-29 10:08:25 +08003829 if (err)
3830 goto err_free_dev;
3831
3832 /* No MT7628/88 support yet */
3833 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3834 err = mtk_mdio_init(eth);
3835 if (err)
3836 goto err_free_dev;
3837 }
3838
3839 for (i = 0; i < MTK_MAX_DEVS; i++) {
3840 if (!eth->netdev[i])
3841 continue;
3842
3843 err = register_netdev(eth->netdev[i]);
3844 if (err) {
3845 dev_err(eth->dev, "error bringing up device\n");
3846 goto err_deinit_mdio;
3847 } else
3848 netif_info(eth, probe, eth->netdev[i],
3849 "mediatek frame engine at 0x%08lx, irq %d\n",
3850 eth->netdev[i]->base_addr, eth->irq[0]);
3851 }
3852
3853 /* we run 2 devices on the same DMA ring so we need a dummy device
3854 * for NAPI to work
3855 */
3856 init_dummy_netdev(&eth->dummy_dev);
3857 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
3858 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08003859 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08003860 MTK_NAPI_WEIGHT);
3861
developer18f46a82021-07-20 21:08:21 +08003862 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3863 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3864 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[i].napi,
3865 mtk_napi_rx, MTK_NAPI_WEIGHT);
3866 }
3867
developerfd40db22021-04-29 10:08:25 +08003868 mtketh_debugfs_init(eth);
3869 debug_proc_init(eth);
3870
3871 platform_set_drvdata(pdev, eth);
3872
developer8051e042022-04-08 13:26:36 +08003873 register_netdevice_notifier(&mtk_eth_netdevice_nb);
developer793f7b42022-05-20 13:54:51 +08003874#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developer8051e042022-04-08 13:26:36 +08003875 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3876 eth->mtk_dma_monitor_timer.expires = jiffies;
3877 add_timer(&eth->mtk_dma_monitor_timer);
developer793f7b42022-05-20 13:54:51 +08003878#endif
developer8051e042022-04-08 13:26:36 +08003879
developerfd40db22021-04-29 10:08:25 +08003880 return 0;
3881
3882err_deinit_mdio:
3883 mtk_mdio_cleanup(eth);
3884err_free_dev:
3885 mtk_free_dev(eth);
3886err_deinit_hw:
3887 mtk_hw_deinit(eth);
3888
3889 return err;
3890}
3891
3892static int mtk_remove(struct platform_device *pdev)
3893{
3894 struct mtk_eth *eth = platform_get_drvdata(pdev);
3895 struct mtk_mac *mac;
3896 int i;
3897
3898 /* stop all devices to make sure that dma is properly shut down */
3899 for (i = 0; i < MTK_MAC_COUNT; i++) {
3900 if (!eth->netdev[i])
3901 continue;
3902 mtk_stop(eth->netdev[i]);
3903 mac = netdev_priv(eth->netdev[i]);
3904 phylink_disconnect_phy(mac->phylink);
3905 }
3906
3907 mtk_hw_deinit(eth);
3908
3909 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003910 netif_napi_del(&eth->rx_napi[0].napi);
3911
3912 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3913 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3914 netif_napi_del(&eth->rx_napi[i].napi);
3915 }
3916
developerfd40db22021-04-29 10:08:25 +08003917 mtk_cleanup(eth);
3918 mtk_mdio_cleanup(eth);
developer8051e042022-04-08 13:26:36 +08003919 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
3920 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08003921
3922 return 0;
3923}
3924
3925static const struct mtk_soc_data mt2701_data = {
3926 .caps = MT7623_CAPS | MTK_HWLRO,
3927 .hw_features = MTK_HW_FEATURES,
3928 .required_clks = MT7623_CLKS_BITMAP,
3929 .required_pctl = true,
3930 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003931 .txrx = {
3932 .txd_size = sizeof(struct mtk_tx_dma),
3933 .rxd_size = sizeof(struct mtk_rx_dma),
3934 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3935 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3936 },
developerfd40db22021-04-29 10:08:25 +08003937};
3938
3939static const struct mtk_soc_data mt7621_data = {
3940 .caps = MT7621_CAPS,
3941 .hw_features = MTK_HW_FEATURES,
3942 .required_clks = MT7621_CLKS_BITMAP,
3943 .required_pctl = false,
3944 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003945 .txrx = {
3946 .txd_size = sizeof(struct mtk_tx_dma),
3947 .rxd_size = sizeof(struct mtk_rx_dma),
3948 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3949 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3950 },
developerfd40db22021-04-29 10:08:25 +08003951};
3952
3953static const struct mtk_soc_data mt7622_data = {
3954 .ana_rgc3 = 0x2028,
3955 .caps = MT7622_CAPS | MTK_HWLRO,
3956 .hw_features = MTK_HW_FEATURES,
3957 .required_clks = MT7622_CLKS_BITMAP,
3958 .required_pctl = false,
3959 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003960 .txrx = {
3961 .txd_size = sizeof(struct mtk_tx_dma),
3962 .rxd_size = sizeof(struct mtk_rx_dma),
3963 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3964 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3965 },
developerfd40db22021-04-29 10:08:25 +08003966};
3967
3968static const struct mtk_soc_data mt7623_data = {
3969 .caps = MT7623_CAPS | MTK_HWLRO,
3970 .hw_features = MTK_HW_FEATURES,
3971 .required_clks = MT7623_CLKS_BITMAP,
3972 .required_pctl = true,
3973 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003974 .txrx = {
3975 .txd_size = sizeof(struct mtk_tx_dma),
3976 .rxd_size = sizeof(struct mtk_rx_dma),
3977 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3978 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3979 },
developerfd40db22021-04-29 10:08:25 +08003980};
3981
3982static const struct mtk_soc_data mt7629_data = {
3983 .ana_rgc3 = 0x128,
3984 .caps = MT7629_CAPS | MTK_HWLRO,
3985 .hw_features = MTK_HW_FEATURES,
3986 .required_clks = MT7629_CLKS_BITMAP,
3987 .required_pctl = false,
3988 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003989 .txrx = {
3990 .txd_size = sizeof(struct mtk_tx_dma),
3991 .rxd_size = sizeof(struct mtk_rx_dma),
3992 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3993 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3994 },
developerfd40db22021-04-29 10:08:25 +08003995};
3996
3997static const struct mtk_soc_data mt7986_data = {
3998 .ana_rgc3 = 0x128,
3999 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08004000 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08004001 .required_clks = MT7986_CLKS_BITMAP,
4002 .required_pctl = false,
4003 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004004 .txrx = {
4005 .txd_size = sizeof(struct mtk_tx_dma_v2),
4006 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4007 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4008 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4009 },
developerfd40db22021-04-29 10:08:25 +08004010};
4011
developer255bba22021-07-27 15:16:33 +08004012static const struct mtk_soc_data mt7981_data = {
4013 .ana_rgc3 = 0x128,
4014 .caps = MT7981_CAPS,
developer7377b0b2021-11-18 14:54:47 +08004015 .hw_features = MTK_HW_FEATURES,
developer255bba22021-07-27 15:16:33 +08004016 .required_clks = MT7981_CLKS_BITMAP,
4017 .required_pctl = false,
4018 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004019 .txrx = {
4020 .txd_size = sizeof(struct mtk_tx_dma_v2),
4021 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4022 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4023 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4024 },
developer255bba22021-07-27 15:16:33 +08004025};
4026
developerfd40db22021-04-29 10:08:25 +08004027static const struct mtk_soc_data rt5350_data = {
4028 .caps = MT7628_CAPS,
4029 .hw_features = MTK_HW_FEATURES_MT7628,
4030 .required_clks = MT7628_CLKS_BITMAP,
4031 .required_pctl = false,
4032 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004033 .txrx = {
4034 .txd_size = sizeof(struct mtk_tx_dma),
4035 .rxd_size = sizeof(struct mtk_rx_dma),
4036 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4037 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4038 },
developerfd40db22021-04-29 10:08:25 +08004039};
4040
4041const struct of_device_id of_mtk_match[] = {
4042 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4043 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4044 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4045 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4046 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4047 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08004048 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developerfd40db22021-04-29 10:08:25 +08004049 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4050 {},
4051};
4052MODULE_DEVICE_TABLE(of, of_mtk_match);
4053
4054static struct platform_driver mtk_driver = {
4055 .probe = mtk_probe,
4056 .remove = mtk_remove,
4057 .driver = {
4058 .name = "mtk_soc_eth",
4059 .of_match_table = of_mtk_match,
4060 },
4061};
4062
4063module_platform_driver(mtk_driver);
4064
4065MODULE_LICENSE("GPL");
4066MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4067MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");