blob: 6955aad003409270cec1b0c6ffd493389679d222 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
12#include <linux/mfd/syscon.h>
13#include <linux/regmap.h>
14#include <linux/clk.h>
15#include <linux/pm_runtime.h>
16#include <linux/if_vlan.h>
17#include <linux/reset.h>
18#include <linux/tcp.h>
19#include <linux/interrupt.h>
20#include <linux/pinctrl/devinfo.h>
21#include <linux/phylink.h>
developera2613e62022-07-01 18:29:37 +080022#include <linux/gpio/consumer.h>
developerfd40db22021-04-29 10:08:25 +080023#include <net/dsa.h>
24
25#include "mtk_eth_soc.h"
26#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080027#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080028
29#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
30#include "mtk_hnat/nf_hnat_mtk.h"
31#endif
32
33static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080034atomic_t reset_lock = ATOMIC_INIT(0);
35atomic_t force = ATOMIC_INIT(0);
36
developerfd40db22021-04-29 10:08:25 +080037module_param_named(msg_level, mtk_msg_level, int, 0);
38MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080039DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080040
41#define MTK_ETHTOOL_STAT(x) { #x, \
42 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
43
44/* strings used by ethtool */
45static const struct mtk_ethtool_stats {
46 char str[ETH_GSTRING_LEN];
47 u32 offset;
48} mtk_ethtool_stats[] = {
49 MTK_ETHTOOL_STAT(tx_bytes),
50 MTK_ETHTOOL_STAT(tx_packets),
51 MTK_ETHTOOL_STAT(tx_skip),
52 MTK_ETHTOOL_STAT(tx_collisions),
53 MTK_ETHTOOL_STAT(rx_bytes),
54 MTK_ETHTOOL_STAT(rx_packets),
55 MTK_ETHTOOL_STAT(rx_overflow),
56 MTK_ETHTOOL_STAT(rx_fcs_errors),
57 MTK_ETHTOOL_STAT(rx_short_errors),
58 MTK_ETHTOOL_STAT(rx_long_errors),
59 MTK_ETHTOOL_STAT(rx_checksum_errors),
60 MTK_ETHTOOL_STAT(rx_flow_control_packets),
61};
62
63static const char * const mtk_clks_source_name[] = {
64 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
65 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
66 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
67 "sgmii_ck", "eth2pll", "wocpu0","wocpu1",
68};
69
70void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
71{
72 __raw_writel(val, eth->base + reg);
73}
74
75u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
76{
77 return __raw_readl(eth->base + reg);
78}
79
80u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
81{
82 u32 val;
83
84 val = mtk_r32(eth, reg);
85 val &= ~mask;
86 val |= set;
87 mtk_w32(eth, val, reg);
88 return reg;
89}
90
91static int mtk_mdio_busy_wait(struct mtk_eth *eth)
92{
93 unsigned long t_start = jiffies;
94
95 while (1) {
96 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
97 return 0;
98 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
99 break;
developerc4671b22021-05-28 13:16:42 +0800100 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800101 }
102
103 dev_err(eth->dev, "mdio: MDIO timeout\n");
104 return -1;
105}
106
developer599cda42022-05-24 15:13:31 +0800107u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
108 int phy_reg, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800109{
110 if (mtk_mdio_busy_wait(eth))
111 return -1;
112
113 write_data &= 0xffff;
114
developer599cda42022-05-24 15:13:31 +0800115 if (phy_reg & MII_ADDR_C45) {
116 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
117 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
118 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
119 MTK_PHY_IAC);
120
121 if (mtk_mdio_busy_wait(eth))
122 return -1;
123
124 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
125 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
126 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
127 MTK_PHY_IAC);
128 } else {
129 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
130 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
131 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
132 MTK_PHY_IAC);
133 }
developerfd40db22021-04-29 10:08:25 +0800134
135 if (mtk_mdio_busy_wait(eth))
136 return -1;
137
138 return 0;
139}
140
developer599cda42022-05-24 15:13:31 +0800141u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerfd40db22021-04-29 10:08:25 +0800142{
143 u32 d;
144
145 if (mtk_mdio_busy_wait(eth))
146 return 0xffff;
147
developer599cda42022-05-24 15:13:31 +0800148 if (phy_reg & MII_ADDR_C45) {
149 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
150 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
151 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
152 MTK_PHY_IAC);
153
154 if (mtk_mdio_busy_wait(eth))
155 return 0xffff;
156
157 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
158 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
159 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
160 MTK_PHY_IAC);
161 } else {
162 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
163 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
164 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
165 MTK_PHY_IAC);
166 }
developerfd40db22021-04-29 10:08:25 +0800167
168 if (mtk_mdio_busy_wait(eth))
169 return 0xffff;
170
171 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
172
173 return d;
174}
175
176static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
177 int phy_reg, u16 val)
178{
179 struct mtk_eth *eth = bus->priv;
180
181 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
182}
183
184static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
185{
186 struct mtk_eth *eth = bus->priv;
187
188 return _mtk_mdio_read(eth, phy_addr, phy_reg);
189}
190
developerabeadd52022-08-15 11:26:44 +0800191static int mtk_mdio_reset(struct mii_bus *bus)
192{
193 /* The mdiobus_register will trigger a reset pulse when enabling Bus reset,
194 * we just need to wait until device ready.
195 */
196 mdelay(20);
197
198 return 0;
199}
200
developerfd40db22021-04-29 10:08:25 +0800201static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
202 phy_interface_t interface)
203{
204 u32 val;
205
206 /* Check DDR memory type.
207 * Currently TRGMII mode with DDR2 memory is not supported.
208 */
209 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
210 if (interface == PHY_INTERFACE_MODE_TRGMII &&
211 val & SYSCFG_DRAM_TYPE_DDR2) {
212 dev_err(eth->dev,
213 "TRGMII mode with DDR2 memory is not supported!\n");
214 return -EOPNOTSUPP;
215 }
216
217 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
218 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
219
220 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
221 ETHSYS_TRGMII_MT7621_MASK, val);
222
223 return 0;
224}
225
226static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
227 phy_interface_t interface, int speed)
228{
229 u32 val;
230 int ret;
231
232 if (interface == PHY_INTERFACE_MODE_TRGMII) {
233 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
234 val = 500000000;
235 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
236 if (ret)
237 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
238 return;
239 }
240
241 val = (speed == SPEED_1000) ?
242 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
243 mtk_w32(eth, val, INTF_MODE);
244
245 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
246 ETHSYS_TRGMII_CLK_SEL362_5,
247 ETHSYS_TRGMII_CLK_SEL362_5);
248
249 val = (speed == SPEED_1000) ? 250000000 : 500000000;
250 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
251 if (ret)
252 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
253
254 val = (speed == SPEED_1000) ?
255 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
256 mtk_w32(eth, val, TRGMII_RCK_CTRL);
257
258 val = (speed == SPEED_1000) ?
259 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
260 mtk_w32(eth, val, TRGMII_TCK_CTRL);
261}
262
263static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
264 const struct phylink_link_state *state)
265{
266 struct mtk_mac *mac = container_of(config, struct mtk_mac,
267 phylink_config);
268 struct mtk_eth *eth = mac->hw;
269 u32 mcr_cur, mcr_new, sid, i;
developerfb556ca2021-10-13 10:52:09 +0800270 int val, ge_mode, err=0;
developerfd40db22021-04-29 10:08:25 +0800271
272 /* MT76x8 has no hardware settings between for the MAC */
273 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
274 mac->interface != state->interface) {
275 /* Setup soc pin functions */
276 switch (state->interface) {
277 case PHY_INTERFACE_MODE_TRGMII:
278 if (mac->id)
279 goto err_phy;
280 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
281 MTK_GMAC1_TRGMII))
282 goto err_phy;
283 /* fall through */
284 case PHY_INTERFACE_MODE_RGMII_TXID:
285 case PHY_INTERFACE_MODE_RGMII_RXID:
286 case PHY_INTERFACE_MODE_RGMII_ID:
287 case PHY_INTERFACE_MODE_RGMII:
288 case PHY_INTERFACE_MODE_MII:
289 case PHY_INTERFACE_MODE_REVMII:
290 case PHY_INTERFACE_MODE_RMII:
291 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
292 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
293 if (err)
294 goto init_err;
295 }
296 break;
297 case PHY_INTERFACE_MODE_1000BASEX:
298 case PHY_INTERFACE_MODE_2500BASEX:
299 case PHY_INTERFACE_MODE_SGMII:
300 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
301 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
302 if (err)
303 goto init_err;
304 }
305 break;
306 case PHY_INTERFACE_MODE_GMII:
307 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
308 err = mtk_gmac_gephy_path_setup(eth, mac->id);
309 if (err)
310 goto init_err;
311 }
312 break;
313 default:
314 goto err_phy;
315 }
316
317 /* Setup clock for 1st gmac */
318 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
319 !phy_interface_mode_is_8023z(state->interface) &&
320 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
321 if (MTK_HAS_CAPS(mac->hw->soc->caps,
322 MTK_TRGMII_MT7621_CLK)) {
323 if (mt7621_gmac0_rgmii_adjust(mac->hw,
324 state->interface))
325 goto err_phy;
326 } else {
327 mtk_gmac0_rgmii_adjust(mac->hw,
328 state->interface,
329 state->speed);
330
331 /* mt7623_pad_clk_setup */
332 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
333 mtk_w32(mac->hw,
334 TD_DM_DRVP(8) | TD_DM_DRVN(8),
335 TRGMII_TD_ODT(i));
336
337 /* Assert/release MT7623 RXC reset */
338 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
339 TRGMII_RCK_CTRL);
340 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
341 }
342 }
343
344 ge_mode = 0;
345 switch (state->interface) {
346 case PHY_INTERFACE_MODE_MII:
347 case PHY_INTERFACE_MODE_GMII:
348 ge_mode = 1;
349 break;
350 case PHY_INTERFACE_MODE_REVMII:
351 ge_mode = 2;
352 break;
353 case PHY_INTERFACE_MODE_RMII:
354 if (mac->id)
355 goto err_phy;
356 ge_mode = 3;
357 break;
358 default:
359 break;
360 }
361
362 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800363 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800364 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
365 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
366 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
367 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800368 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800369
370 mac->interface = state->interface;
371 }
372
373 /* SGMII */
374 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
375 phy_interface_mode_is_8023z(state->interface)) {
376 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
377 * being setup done.
378 */
developerd82e8372022-02-09 15:00:09 +0800379 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800380 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
381
382 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
383 SYSCFG0_SGMII_MASK,
384 ~(u32)SYSCFG0_SGMII_MASK);
385
386 /* Decide how GMAC and SGMIISYS be mapped */
387 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
388 0 : mac->id;
389
390 /* Setup SGMIISYS with the determined property */
391 if (state->interface != PHY_INTERFACE_MODE_SGMII)
392 err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
393 state);
developer2fbee452022-08-12 13:58:20 +0800394 else
developerfd40db22021-04-29 10:08:25 +0800395 err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
396
developerd82e8372022-02-09 15:00:09 +0800397 if (err) {
398 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800399 goto init_err;
developerd82e8372022-02-09 15:00:09 +0800400 }
developerfd40db22021-04-29 10:08:25 +0800401
402 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
403 SYSCFG0_SGMII_MASK, val);
developerd82e8372022-02-09 15:00:09 +0800404 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800405 } else if (phylink_autoneg_inband(mode)) {
406 dev_err(eth->dev,
407 "In-band mode not supported in non SGMII mode!\n");
408 return;
409 }
410
411 /* Setup gmac */
412 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
413 mcr_new = mcr_cur;
414 mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
415 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
416 MAC_MCR_FORCE_RX_FC);
417 mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
418 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
419
420 switch (state->speed) {
421 case SPEED_2500:
422 case SPEED_1000:
423 mcr_new |= MAC_MCR_SPEED_1000;
424 break;
425 case SPEED_100:
426 mcr_new |= MAC_MCR_SPEED_100;
427 break;
428 }
429 if (state->duplex == DUPLEX_FULL) {
430 mcr_new |= MAC_MCR_FORCE_DPX;
431 if (state->pause & MLO_PAUSE_TX)
432 mcr_new |= MAC_MCR_FORCE_TX_FC;
433 if (state->pause & MLO_PAUSE_RX)
434 mcr_new |= MAC_MCR_FORCE_RX_FC;
435 }
436
437 /* Only update control register when needed! */
438 if (mcr_new != mcr_cur)
439 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
440
441 return;
442
443err_phy:
444 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
445 mac->id, phy_modes(state->interface));
446 return;
447
448init_err:
449 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
450 mac->id, phy_modes(state->interface), err);
451}
452
453static int mtk_mac_link_state(struct phylink_config *config,
454 struct phylink_link_state *state)
455{
456 struct mtk_mac *mac = container_of(config, struct mtk_mac,
457 phylink_config);
458 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
459
460 state->link = (pmsr & MAC_MSR_LINK);
461 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
462
463 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
464 case 0:
465 state->speed = SPEED_10;
466 break;
467 case MAC_MSR_SPEED_100:
468 state->speed = SPEED_100;
469 break;
470 case MAC_MSR_SPEED_1000:
471 state->speed = SPEED_1000;
472 break;
473 default:
474 state->speed = SPEED_UNKNOWN;
475 break;
476 }
477
478 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
479 if (pmsr & MAC_MSR_RX_FC)
480 state->pause |= MLO_PAUSE_RX;
481 if (pmsr & MAC_MSR_TX_FC)
482 state->pause |= MLO_PAUSE_TX;
483
484 return 1;
485}
486
487static void mtk_mac_an_restart(struct phylink_config *config)
488{
489 struct mtk_mac *mac = container_of(config, struct mtk_mac,
490 phylink_config);
491
492 mtk_sgmii_restart_an(mac->hw, mac->id);
493}
494
495static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
496 phy_interface_t interface)
497{
498 struct mtk_mac *mac = container_of(config, struct mtk_mac,
499 phylink_config);
500 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
501
502 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
503 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
504}
505
506static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
507 phy_interface_t interface,
508 struct phy_device *phy)
509{
510 struct mtk_mac *mac = container_of(config, struct mtk_mac,
511 phylink_config);
512 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
513
514 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
515 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
516}
517
518static void mtk_validate(struct phylink_config *config,
519 unsigned long *supported,
520 struct phylink_link_state *state)
521{
522 struct mtk_mac *mac = container_of(config, struct mtk_mac,
523 phylink_config);
524 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
525
526 if (state->interface != PHY_INTERFACE_MODE_NA &&
527 state->interface != PHY_INTERFACE_MODE_MII &&
528 state->interface != PHY_INTERFACE_MODE_GMII &&
529 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
530 phy_interface_mode_is_rgmii(state->interface)) &&
531 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
532 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
533 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
534 (state->interface == PHY_INTERFACE_MODE_SGMII ||
535 phy_interface_mode_is_8023z(state->interface)))) {
536 linkmode_zero(supported);
537 return;
538 }
539
540 phylink_set_port_modes(mask);
541 phylink_set(mask, Autoneg);
542
543 switch (state->interface) {
544 case PHY_INTERFACE_MODE_TRGMII:
545 phylink_set(mask, 1000baseT_Full);
546 break;
547 case PHY_INTERFACE_MODE_1000BASEX:
548 case PHY_INTERFACE_MODE_2500BASEX:
549 phylink_set(mask, 1000baseX_Full);
550 phylink_set(mask, 2500baseX_Full);
developer2fbee452022-08-12 13:58:20 +0800551 phylink_set(mask, 2500baseT_Full);
552 /* fall through; */
developerfd40db22021-04-29 10:08:25 +0800553 case PHY_INTERFACE_MODE_GMII:
554 case PHY_INTERFACE_MODE_RGMII:
555 case PHY_INTERFACE_MODE_RGMII_ID:
556 case PHY_INTERFACE_MODE_RGMII_RXID:
557 case PHY_INTERFACE_MODE_RGMII_TXID:
558 phylink_set(mask, 1000baseT_Half);
559 /* fall through */
560 case PHY_INTERFACE_MODE_SGMII:
561 phylink_set(mask, 1000baseT_Full);
562 phylink_set(mask, 1000baseX_Full);
563 /* fall through */
564 case PHY_INTERFACE_MODE_MII:
565 case PHY_INTERFACE_MODE_RMII:
566 case PHY_INTERFACE_MODE_REVMII:
567 case PHY_INTERFACE_MODE_NA:
568 default:
569 phylink_set(mask, 10baseT_Half);
570 phylink_set(mask, 10baseT_Full);
571 phylink_set(mask, 100baseT_Half);
572 phylink_set(mask, 100baseT_Full);
573 break;
574 }
575
576 if (state->interface == PHY_INTERFACE_MODE_NA) {
577 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
578 phylink_set(mask, 1000baseT_Full);
579 phylink_set(mask, 1000baseX_Full);
580 phylink_set(mask, 2500baseX_Full);
581 }
582 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
583 phylink_set(mask, 1000baseT_Full);
584 phylink_set(mask, 1000baseT_Half);
585 phylink_set(mask, 1000baseX_Full);
586 }
587 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
588 phylink_set(mask, 1000baseT_Full);
589 phylink_set(mask, 1000baseT_Half);
590 }
591 }
592
593 phylink_set(mask, Pause);
594 phylink_set(mask, Asym_Pause);
595
596 linkmode_and(supported, supported, mask);
597 linkmode_and(state->advertising, state->advertising, mask);
598
599 /* We can only operate at 2500BaseX or 1000BaseX. If requested
600 * to advertise both, only report advertising at 2500BaseX.
601 */
602 phylink_helper_basex_speed(state);
603}
604
605static const struct phylink_mac_ops mtk_phylink_ops = {
606 .validate = mtk_validate,
607 .mac_link_state = mtk_mac_link_state,
608 .mac_an_restart = mtk_mac_an_restart,
609 .mac_config = mtk_mac_config,
610 .mac_link_down = mtk_mac_link_down,
611 .mac_link_up = mtk_mac_link_up,
612};
613
614static int mtk_mdio_init(struct mtk_eth *eth)
615{
616 struct device_node *mii_np;
617 int ret;
618
619 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
620 if (!mii_np) {
621 dev_err(eth->dev, "no %s child node found", "mdio-bus");
622 return -ENODEV;
623 }
624
625 if (!of_device_is_available(mii_np)) {
626 ret = -ENODEV;
627 goto err_put_node;
628 }
629
630 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
631 if (!eth->mii_bus) {
632 ret = -ENOMEM;
633 goto err_put_node;
634 }
635
636 eth->mii_bus->name = "mdio";
637 eth->mii_bus->read = mtk_mdio_read;
638 eth->mii_bus->write = mtk_mdio_write;
developerabeadd52022-08-15 11:26:44 +0800639 eth->mii_bus->reset = mtk_mdio_reset;
developerfd40db22021-04-29 10:08:25 +0800640 eth->mii_bus->priv = eth;
641 eth->mii_bus->parent = eth->dev;
642
developer6fd46562021-10-14 15:04:34 +0800643 if(snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
developerfb556ca2021-10-13 10:52:09 +0800644 ret = -ENOMEM;
645 goto err_put_node;
646 }
developerfd40db22021-04-29 10:08:25 +0800647 ret = of_mdiobus_register(eth->mii_bus, mii_np);
648
649err_put_node:
650 of_node_put(mii_np);
651 return ret;
652}
653
654static void mtk_mdio_cleanup(struct mtk_eth *eth)
655{
656 if (!eth->mii_bus)
657 return;
658
659 mdiobus_unregister(eth->mii_bus);
660}
661
662static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
663{
664 unsigned long flags;
665 u32 val;
666
667 spin_lock_irqsave(&eth->tx_irq_lock, flags);
668 val = mtk_r32(eth, eth->tx_int_mask_reg);
669 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
670 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
671}
672
673static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
674{
675 unsigned long flags;
676 u32 val;
677
678 spin_lock_irqsave(&eth->tx_irq_lock, flags);
679 val = mtk_r32(eth, eth->tx_int_mask_reg);
680 mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
681 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
682}
683
684static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
685{
686 unsigned long flags;
687 u32 val;
688
689 spin_lock_irqsave(&eth->rx_irq_lock, flags);
690 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
691 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
692 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
693}
694
695static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
696{
697 unsigned long flags;
698 u32 val;
699
700 spin_lock_irqsave(&eth->rx_irq_lock, flags);
701 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
702 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
703 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
704}
705
706static int mtk_set_mac_address(struct net_device *dev, void *p)
707{
708 int ret = eth_mac_addr(dev, p);
709 struct mtk_mac *mac = netdev_priv(dev);
710 struct mtk_eth *eth = mac->hw;
711 const char *macaddr = dev->dev_addr;
712
713 if (ret)
714 return ret;
715
716 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
717 return -EBUSY;
718
719 spin_lock_bh(&mac->hw->page_lock);
720 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
721 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
722 MT7628_SDM_MAC_ADRH);
723 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
724 (macaddr[4] << 8) | macaddr[5],
725 MT7628_SDM_MAC_ADRL);
726 } else {
727 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
728 MTK_GDMA_MAC_ADRH(mac->id));
729 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
730 (macaddr[4] << 8) | macaddr[5],
731 MTK_GDMA_MAC_ADRL(mac->id));
732 }
733 spin_unlock_bh(&mac->hw->page_lock);
734
735 return 0;
736}
737
738void mtk_stats_update_mac(struct mtk_mac *mac)
739{
740 struct mtk_hw_stats *hw_stats = mac->hw_stats;
741 unsigned int base = MTK_GDM1_TX_GBCNT;
742 u64 stats;
743
744 base += hw_stats->reg_offset;
745
746 u64_stats_update_begin(&hw_stats->syncp);
747
748 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
749 stats = mtk_r32(mac->hw, base + 0x04);
750 if (stats)
751 hw_stats->rx_bytes += (stats << 32);
752 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
753 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
754 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
755 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
756 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
757 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
758 hw_stats->rx_flow_control_packets +=
759 mtk_r32(mac->hw, base + 0x24);
760 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
761 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
762 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
763 stats = mtk_r32(mac->hw, base + 0x34);
764 if (stats)
765 hw_stats->tx_bytes += (stats << 32);
766 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
767 u64_stats_update_end(&hw_stats->syncp);
768}
769
770static void mtk_stats_update(struct mtk_eth *eth)
771{
772 int i;
773
774 for (i = 0; i < MTK_MAC_COUNT; i++) {
775 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
776 continue;
777 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
778 mtk_stats_update_mac(eth->mac[i]);
779 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
780 }
781 }
782}
783
784static void mtk_get_stats64(struct net_device *dev,
785 struct rtnl_link_stats64 *storage)
786{
787 struct mtk_mac *mac = netdev_priv(dev);
788 struct mtk_hw_stats *hw_stats = mac->hw_stats;
789 unsigned int start;
790
791 if (netif_running(dev) && netif_device_present(dev)) {
792 if (spin_trylock_bh(&hw_stats->stats_lock)) {
793 mtk_stats_update_mac(mac);
794 spin_unlock_bh(&hw_stats->stats_lock);
795 }
796 }
797
798 do {
799 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
800 storage->rx_packets = hw_stats->rx_packets;
801 storage->tx_packets = hw_stats->tx_packets;
802 storage->rx_bytes = hw_stats->rx_bytes;
803 storage->tx_bytes = hw_stats->tx_bytes;
804 storage->collisions = hw_stats->tx_collisions;
805 storage->rx_length_errors = hw_stats->rx_short_errors +
806 hw_stats->rx_long_errors;
807 storage->rx_over_errors = hw_stats->rx_overflow;
808 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
809 storage->rx_errors = hw_stats->rx_checksum_errors;
810 storage->tx_aborted_errors = hw_stats->tx_skip;
811 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
812
813 storage->tx_errors = dev->stats.tx_errors;
814 storage->rx_dropped = dev->stats.rx_dropped;
815 storage->tx_dropped = dev->stats.tx_dropped;
816}
817
818static inline int mtk_max_frag_size(int mtu)
819{
820 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
821 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
822 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
823
824 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
825 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
826}
827
828static inline int mtk_max_buf_size(int frag_size)
829{
830 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
831 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
832
833 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
834
835 return buf_size;
836}
837
developere9356982022-07-04 09:03:20 +0800838static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
839 struct mtk_rx_dma_v2 *dma_rxd)
developerfd40db22021-04-29 10:08:25 +0800840{
developerfd40db22021-04-29 10:08:25 +0800841 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +0800842 if (!(rxd->rxd2 & RX_DMA_DONE))
843 return false;
844
845 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +0800846 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
847 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developere9356982022-07-04 09:03:20 +0800848
849 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
850 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
851 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
852 }
853
developerc4671b22021-05-28 13:16:42 +0800854 return true;
developerfd40db22021-04-29 10:08:25 +0800855}
856
857/* the qdma core needs scratch memory to be setup */
858static int mtk_init_fq_dma(struct mtk_eth *eth)
859{
developere9356982022-07-04 09:03:20 +0800860 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +0800861 dma_addr_t phy_ring_tail;
862 int cnt = MTK_DMA_SIZE;
863 dma_addr_t dma_addr;
864 int i;
865
866 if (!eth->soc->has_sram) {
867 eth->scratch_ring = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +0800868 cnt * soc->txrx.txd_size,
developerfd40db22021-04-29 10:08:25 +0800869 &eth->phy_scratch_ring,
developere9356982022-07-04 09:03:20 +0800870 GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +0800871 } else {
872 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
873 }
874
875 if (unlikely(!eth->scratch_ring))
876 return -ENOMEM;
877
developere9356982022-07-04 09:03:20 +0800878 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +0800879 if (unlikely(!eth->scratch_head))
880 return -ENOMEM;
881
882 dma_addr = dma_map_single(eth->dev,
883 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
884 DMA_FROM_DEVICE);
885 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
886 return -ENOMEM;
887
developere9356982022-07-04 09:03:20 +0800888 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
developerfd40db22021-04-29 10:08:25 +0800889
890 for (i = 0; i < cnt; i++) {
developere9356982022-07-04 09:03:20 +0800891 struct mtk_tx_dma_v2 *txd;
892
893 txd = eth->scratch_ring + i * soc->txrx.txd_size;
894 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerfd40db22021-04-29 10:08:25 +0800895 if (i < cnt - 1)
developere9356982022-07-04 09:03:20 +0800896 txd->txd2 = eth->phy_scratch_ring +
897 (i + 1) * soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +0800898
developere9356982022-07-04 09:03:20 +0800899 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
900 txd->txd4 = 0;
901
902 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
903 txd->txd5 = 0;
904 txd->txd6 = 0;
905 txd->txd7 = 0;
906 txd->txd8 = 0;
developerfd40db22021-04-29 10:08:25 +0800907 }
developerfd40db22021-04-29 10:08:25 +0800908 }
909
910 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
911 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
912 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
913 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
914
915 return 0;
916}
917
918static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
919{
developere9356982022-07-04 09:03:20 +0800920 return ring->dma + (desc - ring->phys);
developerfd40db22021-04-29 10:08:25 +0800921}
922
923static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +0800924 void *txd, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +0800925{
developere9356982022-07-04 09:03:20 +0800926 int idx = (txd - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +0800927
928 return &ring->buf[idx];
929}
930
931static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +0800932 void *dma)
developerfd40db22021-04-29 10:08:25 +0800933{
934 return ring->dma_pdma - ring->dma + dma;
935}
936
developere9356982022-07-04 09:03:20 +0800937static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +0800938{
developere9356982022-07-04 09:03:20 +0800939 return (dma - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +0800940}
941
developerc4671b22021-05-28 13:16:42 +0800942static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
943 bool napi)
developerfd40db22021-04-29 10:08:25 +0800944{
945 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
946 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
947 dma_unmap_single(eth->dev,
948 dma_unmap_addr(tx_buf, dma_addr0),
949 dma_unmap_len(tx_buf, dma_len0),
950 DMA_TO_DEVICE);
951 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
952 dma_unmap_page(eth->dev,
953 dma_unmap_addr(tx_buf, dma_addr0),
954 dma_unmap_len(tx_buf, dma_len0),
955 DMA_TO_DEVICE);
956 }
957 } else {
958 if (dma_unmap_len(tx_buf, dma_len0)) {
959 dma_unmap_page(eth->dev,
960 dma_unmap_addr(tx_buf, dma_addr0),
961 dma_unmap_len(tx_buf, dma_len0),
962 DMA_TO_DEVICE);
963 }
964
965 if (dma_unmap_len(tx_buf, dma_len1)) {
966 dma_unmap_page(eth->dev,
967 dma_unmap_addr(tx_buf, dma_addr1),
968 dma_unmap_len(tx_buf, dma_len1),
969 DMA_TO_DEVICE);
970 }
971 }
972
973 tx_buf->flags = 0;
974 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +0800975 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
976 if (napi)
977 napi_consume_skb(tx_buf->skb, napi);
978 else
979 dev_kfree_skb_any(tx_buf->skb);
980 }
developerfd40db22021-04-29 10:08:25 +0800981 tx_buf->skb = NULL;
982}
983
984static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
985 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
986 size_t size, int idx)
987{
988 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
989 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
990 dma_unmap_len_set(tx_buf, dma_len0, size);
991 } else {
992 if (idx & 1) {
993 txd->txd3 = mapped_addr;
994 txd->txd2 |= TX_DMA_PLEN1(size);
995 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
996 dma_unmap_len_set(tx_buf, dma_len1, size);
997 } else {
998 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
999 txd->txd1 = mapped_addr;
1000 txd->txd2 = TX_DMA_PLEN0(size);
1001 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1002 dma_unmap_len_set(tx_buf, dma_len0, size);
1003 }
1004 }
1005}
1006
developere9356982022-07-04 09:03:20 +08001007static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
1008 struct mtk_tx_dma_desc_info *info)
1009{
1010 struct mtk_mac *mac = netdev_priv(dev);
1011 struct mtk_eth *eth = mac->hw;
1012 struct mtk_tx_dma *desc = txd;
1013 u32 data;
1014
1015 WRITE_ONCE(desc->txd1, info->addr);
1016
1017 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1018 if (info->last)
1019 data |= TX_DMA_LS0;
1020 WRITE_ONCE(desc->txd3, data);
1021
1022 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1023 data |= QID_HIGH_BITS(info->qid);
1024 if (info->first) {
1025 if (info->gso)
1026 data |= TX_DMA_TSO;
1027 /* tx checksum offload */
1028 if (info->csum)
1029 data |= TX_DMA_CHKSUM;
1030 /* vlan header offload */
1031 if (info->vlan)
1032 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1033 }
1034
1035#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1036 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1037 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1038 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1039 }
1040
1041 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1042 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1043#endif
1044 WRITE_ONCE(desc->txd4, data);
1045}
1046
1047static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1048 struct mtk_tx_dma_desc_info *info)
1049{
1050 struct mtk_mac *mac = netdev_priv(dev);
1051 struct mtk_eth *eth = mac->hw;
1052 struct mtk_tx_dma_v2 *desc = txd;
1053 u32 data = 0;
1054 u16 qid;
1055
1056 if(!info->qid && mac->id)
1057 qid = MTK_QDMA_GMAC2_QID;
1058
1059 WRITE_ONCE(desc->txd1, info->addr);
1060
1061 data = TX_DMA_PLEN0(info->size);
1062 if (info->last)
1063 data |= TX_DMA_LS0;
1064 WRITE_ONCE(desc->txd3, data);
1065
1066 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1067 data |= TX_DMA_SWC_V2 | QID_BITS_V2(qid);
1068#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1069 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1070 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1071 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1072 }
1073
1074 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1075 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1076#endif
1077 WRITE_ONCE(desc->txd4, data);
1078
1079 data = 0;
1080 if (info->first) {
1081 if (info->gso)
1082 data |= TX_DMA_TSO_V2;
1083 /* tx checksum offload */
1084 if (info->csum)
1085 data |= TX_DMA_CHKSUM_V2;
1086 }
1087 WRITE_ONCE(desc->txd5, data);
1088
1089 data = 0;
1090 if (info->first && info->vlan)
1091 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1092 WRITE_ONCE(desc->txd6, data);
1093
1094 WRITE_ONCE(desc->txd7, 0);
1095 WRITE_ONCE(desc->txd8, 0);
1096}
1097
1098static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
1099 struct mtk_tx_dma_desc_info *info)
1100{
1101 struct mtk_mac *mac = netdev_priv(dev);
1102 struct mtk_eth *eth = mac->hw;
1103
1104 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1105 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
1106 else
1107 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
1108}
1109
developerfd40db22021-04-29 10:08:25 +08001110static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1111 int tx_num, struct mtk_tx_ring *ring, bool gso)
1112{
developere9356982022-07-04 09:03:20 +08001113 struct mtk_tx_dma_desc_info txd_info = {
1114 .size = skb_headlen(skb),
1115 .qid = skb->mark & MTK_QDMA_TX_MASK,
1116 .gso = gso,
1117 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1118 .vlan = skb_vlan_tag_present(skb),
1119 .vlan_tci = skb_vlan_tag_get(skb),
1120 .first = true,
1121 .last = !skb_is_nonlinear(skb),
1122 };
developerfd40db22021-04-29 10:08:25 +08001123 struct mtk_mac *mac = netdev_priv(dev);
1124 struct mtk_eth *eth = mac->hw;
developere9356982022-07-04 09:03:20 +08001125 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001126 struct mtk_tx_dma *itxd, *txd;
1127 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1128 struct mtk_tx_buf *itx_buf, *tx_buf;
developerfd40db22021-04-29 10:08:25 +08001129 int i, n_desc = 1;
developerfd40db22021-04-29 10:08:25 +08001130 int k = 0;
1131
1132 itxd = ring->next_free;
1133 itxd_pdma = qdma_to_pdma(ring, itxd);
1134 if (itxd == ring->last_free)
1135 return -ENOMEM;
1136
developere9356982022-07-04 09:03:20 +08001137 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001138 memset(itx_buf, 0, sizeof(*itx_buf));
1139
developere9356982022-07-04 09:03:20 +08001140 txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
1141 DMA_TO_DEVICE);
1142 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
developerfd40db22021-04-29 10:08:25 +08001143 return -ENOMEM;
1144
developere9356982022-07-04 09:03:20 +08001145 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
1146
developerfd40db22021-04-29 10:08:25 +08001147 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1148 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1149 MTK_TX_FLAGS_FPORT1;
developere9356982022-07-04 09:03:20 +08001150 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerfd40db22021-04-29 10:08:25 +08001151 k++);
1152
developerfd40db22021-04-29 10:08:25 +08001153 /* TX SG offload */
1154 txd = itxd;
1155 txd_pdma = qdma_to_pdma(ring, txd);
1156
developere9356982022-07-04 09:03:20 +08001157 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerfd40db22021-04-29 10:08:25 +08001158 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1159 unsigned int offset = 0;
1160 int frag_size = skb_frag_size(frag);
1161
1162 while (frag_size) {
developerfd40db22021-04-29 10:08:25 +08001163 bool new_desc = true;
1164
developere9356982022-07-04 09:03:20 +08001165 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerfd40db22021-04-29 10:08:25 +08001166 (i & 0x1)) {
1167 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1168 txd_pdma = qdma_to_pdma(ring, txd);
1169 if (txd == ring->last_free)
1170 goto err_dma;
1171
1172 n_desc++;
1173 } else {
1174 new_desc = false;
1175 }
1176
developere9356982022-07-04 09:03:20 +08001177 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1178 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1179 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1180 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1181 !(frag_size - txd_info.size);
1182 txd_info.addr = skb_frag_dma_map(eth->dev, frag,
1183 offset, txd_info.size,
1184 DMA_TO_DEVICE);
1185 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
1186 goto err_dma;
developerfd40db22021-04-29 10:08:25 +08001187
developere9356982022-07-04 09:03:20 +08001188 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerfd40db22021-04-29 10:08:25 +08001189
developere9356982022-07-04 09:03:20 +08001190 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001191 if (new_desc)
1192 memset(tx_buf, 0, sizeof(*tx_buf));
1193 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1194 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1195 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1196 MTK_TX_FLAGS_FPORT1;
1197
developere9356982022-07-04 09:03:20 +08001198 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1199 txd_info.size, k++);
developerfd40db22021-04-29 10:08:25 +08001200
developere9356982022-07-04 09:03:20 +08001201 frag_size -= txd_info.size;
1202 offset += txd_info.size;
developerfd40db22021-04-29 10:08:25 +08001203 }
1204 }
1205
1206 /* store skb to cleanup */
1207 itx_buf->skb = skb;
1208
developere9356982022-07-04 09:03:20 +08001209 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001210 if (k & 0x1)
1211 txd_pdma->txd2 |= TX_DMA_LS0;
1212 else
1213 txd_pdma->txd2 |= TX_DMA_LS1;
1214 }
1215
1216 netdev_sent_queue(dev, skb->len);
1217 skb_tx_timestamp(skb);
1218
1219 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1220 atomic_sub(n_desc, &ring->free_count);
1221
1222 /* make sure that all changes to the dma ring are flushed before we
1223 * continue
1224 */
1225 wmb();
1226
developere9356982022-07-04 09:03:20 +08001227 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001228 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1229 !netdev_xmit_more())
1230 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1231 } else {
developere9356982022-07-04 09:03:20 +08001232 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
developerfd40db22021-04-29 10:08:25 +08001233 ring->dma_size);
1234 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1235 }
1236
1237 return 0;
1238
1239err_dma:
1240 do {
developere9356982022-07-04 09:03:20 +08001241 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001242
1243 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08001244 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08001245
1246 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
developere9356982022-07-04 09:03:20 +08001247 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
developerfd40db22021-04-29 10:08:25 +08001248 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1249
1250 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1251 itxd_pdma = qdma_to_pdma(ring, itxd);
1252 } while (itxd != txd);
1253
1254 return -ENOMEM;
1255}
1256
1257static inline int mtk_cal_txd_req(struct sk_buff *skb)
1258{
1259 int i, nfrags;
1260 skb_frag_t *frag;
1261
1262 nfrags = 1;
1263 if (skb_is_gso(skb)) {
1264 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1265 frag = &skb_shinfo(skb)->frags[i];
1266 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1267 MTK_TX_DMA_BUF_LEN);
1268 }
1269 } else {
1270 nfrags += skb_shinfo(skb)->nr_frags;
1271 }
1272
1273 return nfrags;
1274}
1275
1276static int mtk_queue_stopped(struct mtk_eth *eth)
1277{
1278 int i;
1279
1280 for (i = 0; i < MTK_MAC_COUNT; i++) {
1281 if (!eth->netdev[i])
1282 continue;
1283 if (netif_queue_stopped(eth->netdev[i]))
1284 return 1;
1285 }
1286
1287 return 0;
1288}
1289
1290static void mtk_wake_queue(struct mtk_eth *eth)
1291{
1292 int i;
1293
1294 for (i = 0; i < MTK_MAC_COUNT; i++) {
1295 if (!eth->netdev[i])
1296 continue;
1297 netif_wake_queue(eth->netdev[i]);
1298 }
1299}
1300
1301static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1302{
1303 struct mtk_mac *mac = netdev_priv(dev);
1304 struct mtk_eth *eth = mac->hw;
1305 struct mtk_tx_ring *ring = &eth->tx_ring;
1306 struct net_device_stats *stats = &dev->stats;
1307 bool gso = false;
1308 int tx_num;
1309
1310 /* normally we can rely on the stack not calling this more than once,
1311 * however we have 2 queues running on the same ring so we need to lock
1312 * the ring access
1313 */
1314 spin_lock(&eth->page_lock);
1315
1316 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1317 goto drop;
1318
1319 tx_num = mtk_cal_txd_req(skb);
1320 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1321 netif_stop_queue(dev);
1322 netif_err(eth, tx_queued, dev,
1323 "Tx Ring full when queue awake!\n");
1324 spin_unlock(&eth->page_lock);
1325 return NETDEV_TX_BUSY;
1326 }
1327
1328 /* TSO: fill MSS info in tcp checksum field */
1329 if (skb_is_gso(skb)) {
1330 if (skb_cow_head(skb, 0)) {
1331 netif_warn(eth, tx_err, dev,
1332 "GSO expand head fail.\n");
1333 goto drop;
1334 }
1335
1336 if (skb_shinfo(skb)->gso_type &
1337 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1338 gso = true;
1339 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1340 }
1341 }
1342
1343 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1344 goto drop;
1345
1346 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1347 netif_stop_queue(dev);
1348
1349 spin_unlock(&eth->page_lock);
1350
1351 return NETDEV_TX_OK;
1352
1353drop:
1354 spin_unlock(&eth->page_lock);
1355 stats->tx_dropped++;
1356 dev_kfree_skb_any(skb);
1357 return NETDEV_TX_OK;
1358}
1359
1360static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1361{
1362 int i;
1363 struct mtk_rx_ring *ring;
1364 int idx;
1365
developerfd40db22021-04-29 10:08:25 +08001366 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
developere9356982022-07-04 09:03:20 +08001367 struct mtk_rx_dma *rxd;
1368
developer77d03a72021-06-06 00:06:00 +08001369 if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i))
1370 continue;
1371
developerfd40db22021-04-29 10:08:25 +08001372 ring = &eth->rx_ring[i];
1373 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001374 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1375 if (rxd->rxd2 & RX_DMA_DONE) {
developerfd40db22021-04-29 10:08:25 +08001376 ring->calc_idx_update = true;
1377 return ring;
1378 }
1379 }
1380
1381 return NULL;
1382}
1383
developer18f46a82021-07-20 21:08:21 +08001384static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08001385{
developerfd40db22021-04-29 10:08:25 +08001386 int i;
1387
developerfb556ca2021-10-13 10:52:09 +08001388 if (!eth->hwlro)
developerfd40db22021-04-29 10:08:25 +08001389 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerfb556ca2021-10-13 10:52:09 +08001390 else {
developerfd40db22021-04-29 10:08:25 +08001391 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1392 ring = &eth->rx_ring[i];
1393 if (ring->calc_idx_update) {
1394 ring->calc_idx_update = false;
1395 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1396 }
1397 }
1398 }
1399}
1400
1401static int mtk_poll_rx(struct napi_struct *napi, int budget,
1402 struct mtk_eth *eth)
1403{
developer18f46a82021-07-20 21:08:21 +08001404 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1405 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001406 int idx;
1407 struct sk_buff *skb;
1408 u8 *data, *new_data;
developere9356982022-07-04 09:03:20 +08001409 struct mtk_rx_dma_v2 *rxd, trxd;
developerfd40db22021-04-29 10:08:25 +08001410 int done = 0;
1411
developer18f46a82021-07-20 21:08:21 +08001412 if (unlikely(!ring))
1413 goto rx_done;
1414
developerfd40db22021-04-29 10:08:25 +08001415 while (done < budget) {
1416 struct net_device *netdev;
1417 unsigned int pktlen;
1418 dma_addr_t dma_addr;
developere9356982022-07-04 09:03:20 +08001419 int mac = 0;
developerfd40db22021-04-29 10:08:25 +08001420
developer18f46a82021-07-20 21:08:21 +08001421 if (eth->hwlro)
1422 ring = mtk_get_rx_ring(eth);
1423
developerfd40db22021-04-29 10:08:25 +08001424 if (unlikely(!ring))
1425 goto rx_done;
1426
1427 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001428 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
developerfd40db22021-04-29 10:08:25 +08001429 data = ring->data[idx];
1430
developere9356982022-07-04 09:03:20 +08001431 if (!mtk_rx_get_desc(eth, &trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08001432 break;
1433
1434 /* find out which mac the packet come from. values start at 1 */
1435 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1436 mac = 0;
1437 } else {
developera2bdbd52021-05-31 19:10:17 +08001438 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developere9356982022-07-04 09:03:20 +08001439 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
developerfd40db22021-04-29 10:08:25 +08001440 else
developerfd40db22021-04-29 10:08:25 +08001441 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
1442 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1443 }
1444
1445 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1446 !eth->netdev[mac]))
1447 goto release_desc;
1448
1449 netdev = eth->netdev[mac];
1450
1451 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1452 goto release_desc;
1453
1454 /* alloc new buffer */
1455 new_data = napi_alloc_frag(ring->frag_size);
1456 if (unlikely(!new_data)) {
1457 netdev->stats.rx_dropped++;
1458 goto release_desc;
1459 }
1460 dma_addr = dma_map_single(eth->dev,
1461 new_data + NET_SKB_PAD +
1462 eth->ip_align,
1463 ring->buf_size,
1464 DMA_FROM_DEVICE);
1465 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1466 skb_free_frag(new_data);
1467 netdev->stats.rx_dropped++;
1468 goto release_desc;
1469 }
1470
developerc4671b22021-05-28 13:16:42 +08001471 dma_unmap_single(eth->dev, trxd.rxd1,
1472 ring->buf_size, DMA_FROM_DEVICE);
1473
developerfd40db22021-04-29 10:08:25 +08001474 /* receive data */
1475 skb = build_skb(data, ring->frag_size);
1476 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08001477 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08001478 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08001479 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08001480 }
1481 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1482
developerfd40db22021-04-29 10:08:25 +08001483 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1484 skb->dev = netdev;
1485 skb_put(skb, pktlen);
1486
developera2bdbd52021-05-31 19:10:17 +08001487 if ((!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
developerfd40db22021-04-29 10:08:25 +08001488 (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
developera2bdbd52021-05-31 19:10:17 +08001489 (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
developerfd40db22021-04-29 10:08:25 +08001490 (trxd.rxd3 & eth->rx_dma_l4_valid)))
1491 skb->ip_summed = CHECKSUM_UNNECESSARY;
1492 else
1493 skb_checksum_none_assert(skb);
1494 skb->protocol = eth_type_trans(skb, netdev);
1495
1496 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developera2bdbd52021-05-31 19:10:17 +08001497 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developer255bba22021-07-27 15:16:33 +08001498 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08001499 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08001500 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08001501 RX_DMA_VID_V2(trxd.rxd4));
1502 } else {
1503 if (trxd.rxd2 & RX_DMA_VTAG)
1504 __vlan_hwaccel_put_tag(skb,
1505 htons(RX_DMA_VPID(trxd.rxd3)),
1506 RX_DMA_VID(trxd.rxd3));
1507 }
1508
1509 /* If netdev is attached to dsa switch, the special
1510 * tag inserted in VLAN field by switch hardware can
1511 * be offload by RX HW VLAN offload. Clears the VLAN
1512 * information from @skb to avoid unexpected 8021d
1513 * handler before packet enter dsa framework.
1514 */
1515 if (netdev_uses_dsa(netdev))
1516 __vlan_hwaccel_clear_tag(skb);
1517 }
1518
1519#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developera2bdbd52021-05-31 19:10:17 +08001520 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08001521 *(u32 *)(skb->head) = trxd.rxd5;
1522 else
developerfd40db22021-04-29 10:08:25 +08001523 *(u32 *)(skb->head) = trxd.rxd4;
1524
1525 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001526 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001527 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1528
1529 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
1530 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
1531 __func__, skb_hnat_reason(skb));
1532 skb->pkt_type = PACKET_HOST;
1533 }
1534
1535 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
1536 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
1537 skb_hnat_reason(skb), skb_hnat_alg(skb));
1538#endif
developer77d03a72021-06-06 00:06:00 +08001539 if (mtk_hwlro_stats_ebl &&
1540 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
1541 hw_lro_stats_update(ring->ring_no, &trxd);
1542 hw_lro_flush_stats_update(ring->ring_no, &trxd);
1543 }
developerfd40db22021-04-29 10:08:25 +08001544
1545 skb_record_rx_queue(skb, 0);
1546 napi_gro_receive(napi, skb);
1547
developerc4671b22021-05-28 13:16:42 +08001548skip_rx:
developerfd40db22021-04-29 10:08:25 +08001549 ring->data[idx] = new_data;
1550 rxd->rxd1 = (unsigned int)dma_addr;
1551
1552release_desc:
1553 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1554 rxd->rxd2 = RX_DMA_LSO;
1555 else
1556 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1557
1558 ring->calc_idx = idx;
1559
1560 done++;
1561 }
1562
1563rx_done:
1564 if (done) {
1565 /* make sure that all changes to the dma ring are flushed before
1566 * we continue
1567 */
1568 wmb();
developer18f46a82021-07-20 21:08:21 +08001569 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08001570 }
1571
1572 return done;
1573}
1574
developerfb556ca2021-10-13 10:52:09 +08001575static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001576 unsigned int *done, unsigned int *bytes)
1577{
developere9356982022-07-04 09:03:20 +08001578 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001579 struct mtk_tx_ring *ring = &eth->tx_ring;
1580 struct mtk_tx_dma *desc;
1581 struct sk_buff *skb;
1582 struct mtk_tx_buf *tx_buf;
1583 u32 cpu, dma;
1584
developerc4671b22021-05-28 13:16:42 +08001585 cpu = ring->last_free_ptr;
developerfd40db22021-04-29 10:08:25 +08001586 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1587
1588 desc = mtk_qdma_phys_to_virt(ring, cpu);
1589
1590 while ((cpu != dma) && budget) {
1591 u32 next_cpu = desc->txd2;
1592 int mac = 0;
1593
1594 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1595 break;
1596
1597 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1598
developere9356982022-07-04 09:03:20 +08001599 tx_buf = mtk_desc_to_tx_buf(ring, desc, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001600 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1601 mac = 1;
1602
1603 skb = tx_buf->skb;
1604 if (!skb)
1605 break;
1606
1607 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1608 bytes[mac] += skb->len;
1609 done[mac]++;
1610 budget--;
1611 }
developerc4671b22021-05-28 13:16:42 +08001612 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001613
1614 ring->last_free = desc;
1615 atomic_inc(&ring->free_count);
1616
1617 cpu = next_cpu;
1618 }
1619
developerc4671b22021-05-28 13:16:42 +08001620 ring->last_free_ptr = cpu;
developerfd40db22021-04-29 10:08:25 +08001621 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001622}
1623
developerfb556ca2021-10-13 10:52:09 +08001624static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001625 unsigned int *done, unsigned int *bytes)
1626{
1627 struct mtk_tx_ring *ring = &eth->tx_ring;
1628 struct mtk_tx_dma *desc;
1629 struct sk_buff *skb;
1630 struct mtk_tx_buf *tx_buf;
1631 u32 cpu, dma;
1632
1633 cpu = ring->cpu_idx;
1634 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1635
1636 while ((cpu != dma) && budget) {
1637 tx_buf = &ring->buf[cpu];
1638 skb = tx_buf->skb;
1639 if (!skb)
1640 break;
1641
1642 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1643 bytes[0] += skb->len;
1644 done[0]++;
1645 budget--;
1646 }
1647
developerc4671b22021-05-28 13:16:42 +08001648 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001649
developere9356982022-07-04 09:03:20 +08001650 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001651 ring->last_free = desc;
1652 atomic_inc(&ring->free_count);
1653
1654 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1655 }
1656
1657 ring->cpu_idx = cpu;
developerfd40db22021-04-29 10:08:25 +08001658}
1659
1660static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1661{
1662 struct mtk_tx_ring *ring = &eth->tx_ring;
1663 unsigned int done[MTK_MAX_DEVS];
1664 unsigned int bytes[MTK_MAX_DEVS];
1665 int total = 0, i;
1666
1667 memset(done, 0, sizeof(done));
1668 memset(bytes, 0, sizeof(bytes));
1669
1670 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developerfb556ca2021-10-13 10:52:09 +08001671 mtk_poll_tx_qdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001672 else
developerfb556ca2021-10-13 10:52:09 +08001673 mtk_poll_tx_pdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001674
1675 for (i = 0; i < MTK_MAC_COUNT; i++) {
1676 if (!eth->netdev[i] || !done[i])
1677 continue;
1678 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1679 total += done[i];
1680 }
1681
1682 if (mtk_queue_stopped(eth) &&
1683 (atomic_read(&ring->free_count) > ring->thresh))
1684 mtk_wake_queue(eth);
1685
1686 return total;
1687}
1688
1689static void mtk_handle_status_irq(struct mtk_eth *eth)
1690{
developer8051e042022-04-08 13:26:36 +08001691 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001692
1693 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1694 mtk_stats_update(eth);
1695 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer8051e042022-04-08 13:26:36 +08001696 MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001697 }
1698}
1699
1700static int mtk_napi_tx(struct napi_struct *napi, int budget)
1701{
1702 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1703 u32 status, mask;
1704 int tx_done = 0;
1705
1706 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1707 mtk_handle_status_irq(eth);
1708 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1709 tx_done = mtk_poll_tx(eth, budget);
1710
1711 if (unlikely(netif_msg_intr(eth))) {
1712 status = mtk_r32(eth, eth->tx_int_status_reg);
1713 mask = mtk_r32(eth, eth->tx_int_mask_reg);
1714 dev_info(eth->dev,
1715 "done tx %d, intr 0x%08x/0x%x\n",
1716 tx_done, status, mask);
1717 }
1718
1719 if (tx_done == budget)
1720 return budget;
1721
1722 status = mtk_r32(eth, eth->tx_int_status_reg);
1723 if (status & MTK_TX_DONE_INT)
1724 return budget;
1725
developerc4671b22021-05-28 13:16:42 +08001726 if (napi_complete(napi))
1727 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developerfd40db22021-04-29 10:08:25 +08001728
1729 return tx_done;
1730}
1731
1732static int mtk_napi_rx(struct napi_struct *napi, int budget)
1733{
developer18f46a82021-07-20 21:08:21 +08001734 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1735 struct mtk_eth *eth = rx_napi->eth;
1736 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001737 u32 status, mask;
1738 int rx_done = 0;
1739 int remain_budget = budget;
1740
1741 mtk_handle_status_irq(eth);
1742
1743poll_again:
developer18f46a82021-07-20 21:08:21 +08001744 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), MTK_PDMA_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001745 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1746
1747 if (unlikely(netif_msg_intr(eth))) {
1748 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1749 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1750 dev_info(eth->dev,
1751 "done rx %d, intr 0x%08x/0x%x\n",
1752 rx_done, status, mask);
1753 }
1754 if (rx_done == remain_budget)
1755 return budget;
1756
1757 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
developer18f46a82021-07-20 21:08:21 +08001758 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08001759 remain_budget -= rx_done;
1760 goto poll_again;
1761 }
developerc4671b22021-05-28 13:16:42 +08001762
1763 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08001764 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08001765
1766 return rx_done + budget - remain_budget;
1767}
1768
1769static int mtk_tx_alloc(struct mtk_eth *eth)
1770{
developere9356982022-07-04 09:03:20 +08001771 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001772 struct mtk_tx_ring *ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08001773 int i, sz = soc->txrx.txd_size;
1774 struct mtk_tx_dma_v2 *txd, *pdma_txd;
developerfd40db22021-04-29 10:08:25 +08001775
1776 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1777 GFP_KERNEL);
1778 if (!ring->buf)
1779 goto no_tx_mem;
1780
1781 if (!eth->soc->has_sram)
1782 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08001783 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001784 else {
developere9356982022-07-04 09:03:20 +08001785 ring->dma = eth->scratch_ring + MTK_DMA_SIZE * sz;
developerfd40db22021-04-29 10:08:25 +08001786 ring->phys = eth->phy_scratch_ring + MTK_DMA_SIZE * sz;
1787 }
1788
1789 if (!ring->dma)
1790 goto no_tx_mem;
1791
1792 for (i = 0; i < MTK_DMA_SIZE; i++) {
1793 int next = (i + 1) % MTK_DMA_SIZE;
1794 u32 next_ptr = ring->phys + next * sz;
1795
developere9356982022-07-04 09:03:20 +08001796 txd = ring->dma + i * sz;
1797 txd->txd2 = next_ptr;
1798 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1799 txd->txd4 = 0;
1800
1801 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1802 txd->txd5 = 0;
1803 txd->txd6 = 0;
1804 txd->txd7 = 0;
1805 txd->txd8 = 0;
1806 }
developerfd40db22021-04-29 10:08:25 +08001807 }
1808
1809 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1810 * only as the framework. The real HW descriptors are the PDMA
1811 * descriptors in ring->dma_pdma.
1812 */
1813 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1814 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08001815 &ring->phys_pdma, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001816 if (!ring->dma_pdma)
1817 goto no_tx_mem;
1818
1819 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +08001820 pdma_txd = ring->dma_pdma + i *sz;
1821
1822 pdma_txd->txd2 = TX_DMA_DESP2_DEF;
1823 pdma_txd->txd4 = 0;
developerfd40db22021-04-29 10:08:25 +08001824 }
1825 }
1826
1827 ring->dma_size = MTK_DMA_SIZE;
1828 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
developere9356982022-07-04 09:03:20 +08001829 ring->next_free = ring->dma;
1830 ring->last_free = (void *)txd;
developerc4671b22021-05-28 13:16:42 +08001831 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08001832 ring->thresh = MAX_SKB_FRAGS;
1833
1834 /* make sure that all changes to the dma ring are flushed before we
1835 * continue
1836 */
1837 wmb();
1838
1839 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1840 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1841 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1842 mtk_w32(eth,
1843 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1844 MTK_QTX_CRX_PTR);
developerc4671b22021-05-28 13:16:42 +08001845 mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001846 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1847 MTK_QTX_CFG(0));
1848 } else {
1849 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1850 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1851 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1852 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1853 }
1854
1855 return 0;
1856
1857no_tx_mem:
1858 return -ENOMEM;
1859}
1860
1861static void mtk_tx_clean(struct mtk_eth *eth)
1862{
developere9356982022-07-04 09:03:20 +08001863 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001864 struct mtk_tx_ring *ring = &eth->tx_ring;
1865 int i;
1866
1867 if (ring->buf) {
1868 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08001869 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08001870 kfree(ring->buf);
1871 ring->buf = NULL;
1872 }
1873
1874 if (!eth->soc->has_sram && ring->dma) {
1875 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001876 MTK_DMA_SIZE * soc->txrx.txd_size,
1877 ring->dma, ring->phys);
developerfd40db22021-04-29 10:08:25 +08001878 ring->dma = NULL;
1879 }
1880
1881 if (ring->dma_pdma) {
1882 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001883 MTK_DMA_SIZE * soc->txrx.txd_size,
1884 ring->dma_pdma, ring->phys_pdma);
developerfd40db22021-04-29 10:08:25 +08001885 ring->dma_pdma = NULL;
1886 }
1887}
1888
1889static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1890{
1891 struct mtk_rx_ring *ring;
1892 int rx_data_len, rx_dma_size;
1893 int i;
1894
1895 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1896 if (ring_no)
1897 return -EINVAL;
1898 ring = &eth->rx_ring_qdma;
1899 } else {
1900 ring = &eth->rx_ring[ring_no];
1901 }
1902
1903 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1904 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1905 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1906 } else {
1907 rx_data_len = ETH_DATA_LEN;
1908 rx_dma_size = MTK_DMA_SIZE;
1909 }
1910
1911 ring->frag_size = mtk_max_frag_size(rx_data_len);
1912 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1913 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1914 GFP_KERNEL);
1915 if (!ring->data)
1916 return -ENOMEM;
1917
1918 for (i = 0; i < rx_dma_size; i++) {
1919 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1920 if (!ring->data[i])
1921 return -ENOMEM;
1922 }
1923
1924 if ((!eth->soc->has_sram) || (eth->soc->has_sram
1925 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
1926 ring->dma = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001927 rx_dma_size * eth->soc->txrx.rxd_size,
1928 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001929 else {
1930 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08001931 ring->dma = tx_ring->dma + MTK_DMA_SIZE *
1932 eth->soc->txrx.rxd_size * (ring_no + 1);
developer18f46a82021-07-20 21:08:21 +08001933 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
developere9356982022-07-04 09:03:20 +08001934 eth->soc->txrx.rxd_size * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08001935 }
1936
1937 if (!ring->dma)
1938 return -ENOMEM;
1939
1940 for (i = 0; i < rx_dma_size; i++) {
developere9356982022-07-04 09:03:20 +08001941 struct mtk_rx_dma_v2 *rxd;
1942
developerfd40db22021-04-29 10:08:25 +08001943 dma_addr_t dma_addr = dma_map_single(eth->dev,
1944 ring->data[i] + NET_SKB_PAD + eth->ip_align,
1945 ring->buf_size,
1946 DMA_FROM_DEVICE);
1947 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1948 return -ENOMEM;
developere9356982022-07-04 09:03:20 +08001949
1950 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
1951 rxd->rxd1 = (unsigned int)dma_addr;
developerfd40db22021-04-29 10:08:25 +08001952
1953 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developere9356982022-07-04 09:03:20 +08001954 rxd->rxd2 = RX_DMA_LSO;
developerfd40db22021-04-29 10:08:25 +08001955 else
developere9356982022-07-04 09:03:20 +08001956 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
developerfd40db22021-04-29 10:08:25 +08001957
developere9356982022-07-04 09:03:20 +08001958 rxd->rxd3 = 0;
1959 rxd->rxd4 = 0;
1960
1961 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1962 rxd->rxd5 = 0;
1963 rxd->rxd6 = 0;
1964 rxd->rxd7 = 0;
1965 rxd->rxd8 = 0;
developerfd40db22021-04-29 10:08:25 +08001966 }
developerfd40db22021-04-29 10:08:25 +08001967 }
1968 ring->dma_size = rx_dma_size;
1969 ring->calc_idx_update = false;
1970 ring->calc_idx = rx_dma_size - 1;
1971 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
1972 MTK_QRX_CRX_IDX_CFG(ring_no) :
1973 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08001974 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08001975 /* make sure that all changes to the dma ring are flushed before we
1976 * continue
1977 */
1978 wmb();
1979
1980 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1981 mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
1982 mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
1983 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1984 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
1985 } else {
1986 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
1987 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
1988 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1989 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
1990 }
1991
1992 return 0;
1993}
1994
1995static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
1996{
1997 int i;
1998
1999 if (ring->data && ring->dma) {
2000 for (i = 0; i < ring->dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002001 struct mtk_rx_dma *rxd;
2002
developerfd40db22021-04-29 10:08:25 +08002003 if (!ring->data[i])
2004 continue;
developere9356982022-07-04 09:03:20 +08002005
2006 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2007 if (!rxd->rxd1)
developerfd40db22021-04-29 10:08:25 +08002008 continue;
developere9356982022-07-04 09:03:20 +08002009
developerfd40db22021-04-29 10:08:25 +08002010 dma_unmap_single(eth->dev,
developere9356982022-07-04 09:03:20 +08002011 rxd->rxd1,
developerfd40db22021-04-29 10:08:25 +08002012 ring->buf_size,
2013 DMA_FROM_DEVICE);
2014 skb_free_frag(ring->data[i]);
2015 }
2016 kfree(ring->data);
2017 ring->data = NULL;
2018 }
2019
2020 if(in_sram)
2021 return;
2022
2023 if (ring->dma) {
2024 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002025 ring->dma_size * eth->soc->txrx.rxd_size,
developerfd40db22021-04-29 10:08:25 +08002026 ring->dma,
2027 ring->phys);
2028 ring->dma = NULL;
2029 }
2030}
2031
2032static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2033{
2034 int i;
developer77d03a72021-06-06 00:06:00 +08002035 u32 val;
developerfd40db22021-04-29 10:08:25 +08002036 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2037 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2038
2039 /* set LRO rings to auto-learn modes */
2040 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2041
2042 /* validate LRO ring */
2043 ring_ctrl_dw2 |= MTK_RING_VLD;
2044
2045 /* set AGE timer (unit: 20us) */
2046 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2047 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2048
2049 /* set max AGG timer (unit: 20us) */
2050 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2051
2052 /* set max LRO AGG count */
2053 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2054 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2055
developer77d03a72021-06-06 00:06:00 +08002056 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002057 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2058 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2059 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2060 }
2061
2062 /* IPv4 checksum update enable */
2063 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2064
2065 /* switch priority comparison to packet count mode */
2066 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2067
2068 /* bandwidth threshold setting */
2069 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2070
2071 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08002072 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08002073
2074 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2075 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2076 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2077
developerfd40db22021-04-29 10:08:25 +08002078 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2079 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2080
developer77d03a72021-06-06 00:06:00 +08002081 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2082 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
2083 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
2084 MTK_PDMA_RX_CFG);
2085
2086 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
2087 } else {
2088 /* set HW LRO mode & the max aggregation count for rx packets */
2089 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2090 }
2091
developerfd40db22021-04-29 10:08:25 +08002092 /* enable HW LRO */
2093 lro_ctrl_dw0 |= MTK_LRO_EN;
2094
developer77d03a72021-06-06 00:06:00 +08002095 /* enable cpu reason black list */
2096 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
2097
developerfd40db22021-04-29 10:08:25 +08002098 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2099 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2100
developer77d03a72021-06-06 00:06:00 +08002101 /* no use PPE cpu reason */
2102 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
2103
developerfd40db22021-04-29 10:08:25 +08002104 return 0;
2105}
2106
2107static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2108{
2109 int i;
2110 u32 val;
2111
2112 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08002113 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08002114
2115 /* wait for relinquishments done */
2116 for (i = 0; i < 10; i++) {
2117 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08002118 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer8051e042022-04-08 13:26:36 +08002119 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002120 continue;
2121 }
2122 break;
2123 }
2124
2125 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08002126 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08002127 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2128
2129 /* disable HW LRO */
2130 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2131}
2132
2133static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2134{
2135 u32 reg_val;
2136
developer77d03a72021-06-06 00:06:00 +08002137 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2138 idx += 1;
2139
developerfd40db22021-04-29 10:08:25 +08002140 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2141
2142 /* invalidate the IP setting */
2143 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2144
2145 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2146
2147 /* validate the IP setting */
2148 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2149}
2150
2151static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2152{
2153 u32 reg_val;
2154
developer77d03a72021-06-06 00:06:00 +08002155 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2156 idx += 1;
2157
developerfd40db22021-04-29 10:08:25 +08002158 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2159
2160 /* invalidate the IP setting */
2161 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2162
2163 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2164}
2165
2166static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2167{
2168 int cnt = 0;
2169 int i;
2170
2171 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2172 if (mac->hwlro_ip[i])
2173 cnt++;
2174 }
2175
2176 return cnt;
2177}
2178
2179static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2180 struct ethtool_rxnfc *cmd)
2181{
2182 struct ethtool_rx_flow_spec *fsp =
2183 (struct ethtool_rx_flow_spec *)&cmd->fs;
2184 struct mtk_mac *mac = netdev_priv(dev);
2185 struct mtk_eth *eth = mac->hw;
2186 int hwlro_idx;
2187
2188 if ((fsp->flow_type != TCP_V4_FLOW) ||
2189 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2190 (fsp->location > 1))
2191 return -EINVAL;
2192
2193 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2194 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2195
2196 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2197
2198 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2199
2200 return 0;
2201}
2202
2203static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2204 struct ethtool_rxnfc *cmd)
2205{
2206 struct ethtool_rx_flow_spec *fsp =
2207 (struct ethtool_rx_flow_spec *)&cmd->fs;
2208 struct mtk_mac *mac = netdev_priv(dev);
2209 struct mtk_eth *eth = mac->hw;
2210 int hwlro_idx;
2211
2212 if (fsp->location > 1)
2213 return -EINVAL;
2214
2215 mac->hwlro_ip[fsp->location] = 0;
2216 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2217
2218 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2219
2220 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2221
2222 return 0;
2223}
2224
2225static void mtk_hwlro_netdev_disable(struct net_device *dev)
2226{
2227 struct mtk_mac *mac = netdev_priv(dev);
2228 struct mtk_eth *eth = mac->hw;
2229 int i, hwlro_idx;
2230
2231 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2232 mac->hwlro_ip[i] = 0;
2233 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2234
2235 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2236 }
2237
2238 mac->hwlro_ip_cnt = 0;
2239}
2240
2241static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2242 struct ethtool_rxnfc *cmd)
2243{
2244 struct mtk_mac *mac = netdev_priv(dev);
2245 struct ethtool_rx_flow_spec *fsp =
2246 (struct ethtool_rx_flow_spec *)&cmd->fs;
2247
2248 /* only tcp dst ipv4 is meaningful, others are meaningless */
2249 fsp->flow_type = TCP_V4_FLOW;
2250 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2251 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2252
2253 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2254 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2255 fsp->h_u.tcp_ip4_spec.psrc = 0;
2256 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2257 fsp->h_u.tcp_ip4_spec.pdst = 0;
2258 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2259 fsp->h_u.tcp_ip4_spec.tos = 0;
2260 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2261
2262 return 0;
2263}
2264
2265static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2266 struct ethtool_rxnfc *cmd,
2267 u32 *rule_locs)
2268{
2269 struct mtk_mac *mac = netdev_priv(dev);
2270 int cnt = 0;
2271 int i;
2272
2273 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2274 if (mac->hwlro_ip[i]) {
2275 rule_locs[cnt] = i;
2276 cnt++;
2277 }
2278 }
2279
2280 cmd->rule_cnt = cnt;
2281
2282 return 0;
2283}
2284
developer18f46a82021-07-20 21:08:21 +08002285static int mtk_rss_init(struct mtk_eth *eth)
2286{
2287 u32 val;
2288
2289 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2290 /* Set RSS rings to PSE modes */
2291 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
2292 val |= MTK_RING_PSE_MODE;
2293 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
2294
2295 /* Enable non-lro multiple rx */
2296 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2297 val |= MTK_NON_LRO_MULTI_EN;
2298 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2299
2300 /* Enable RSS dly int supoort */
2301 val |= MTK_LRO_DLY_INT_EN;
2302 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2303
2304 /* Set RSS delay config int ring1 */
2305 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
2306 }
2307
2308 /* Hash Type */
2309 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2310 val |= MTK_RSS_IPV4_STATIC_HASH;
2311 val |= MTK_RSS_IPV6_STATIC_HASH;
2312 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2313
2314 /* Select the size of indirection table */
2315 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW0);
2316 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW1);
2317 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW2);
2318 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW3);
2319 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW4);
2320 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW5);
2321 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW6);
2322 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW7);
2323
2324 /* Pause */
2325 val |= MTK_RSS_CFG_REQ;
2326 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2327
2328 /* Enable RSS*/
2329 val |= MTK_RSS_EN;
2330 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2331
2332 /* Release pause */
2333 val &= ~(MTK_RSS_CFG_REQ);
2334 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2335
2336 /* Set perRSS GRP INT */
2337 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING1), MTK_PDMA_INT_GRP3);
2338
2339 /* Set GRP INT */
2340 mtk_w32(eth, 0x21021030, MTK_FE_INT_GRP);
2341
2342 return 0;
2343}
2344
2345static void mtk_rss_uninit(struct mtk_eth *eth)
2346{
2347 u32 val;
2348
2349 /* Pause */
2350 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2351 val |= MTK_RSS_CFG_REQ;
2352 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2353
2354 /* Disable RSS*/
2355 val &= ~(MTK_RSS_EN);
2356 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2357
2358 /* Release pause */
2359 val &= ~(MTK_RSS_CFG_REQ);
2360 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2361}
2362
developerfd40db22021-04-29 10:08:25 +08002363static netdev_features_t mtk_fix_features(struct net_device *dev,
2364 netdev_features_t features)
2365{
2366 if (!(features & NETIF_F_LRO)) {
2367 struct mtk_mac *mac = netdev_priv(dev);
2368 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2369
2370 if (ip_cnt) {
2371 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2372
2373 features |= NETIF_F_LRO;
2374 }
2375 }
2376
2377 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
2378 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
2379
2380 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2381 }
2382
2383 return features;
2384}
2385
2386static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2387{
2388 struct mtk_mac *mac = netdev_priv(dev);
2389 struct mtk_eth *eth = mac->hw;
2390 int err = 0;
2391
2392 if (!((dev->features ^ features) & MTK_SET_FEATURES))
2393 return 0;
2394
2395 if (!(features & NETIF_F_LRO))
2396 mtk_hwlro_netdev_disable(dev);
2397
2398 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
2399 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
2400 else
2401 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2402
2403 return err;
2404}
2405
2406/* wait for DMA to finish whatever it is doing before we start using it again */
2407static int mtk_dma_busy_wait(struct mtk_eth *eth)
2408{
2409 unsigned long t_start = jiffies;
2410
2411 while (1) {
2412 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2413 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2414 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2415 return 0;
2416 } else {
2417 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2418 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2419 return 0;
2420 }
2421
2422 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2423 break;
2424 }
2425
2426 dev_err(eth->dev, "DMA init timeout\n");
2427 return -1;
2428}
2429
2430static int mtk_dma_init(struct mtk_eth *eth)
2431{
2432 int err;
2433 u32 i;
2434
2435 if (mtk_dma_busy_wait(eth))
2436 return -EBUSY;
2437
2438 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2439 /* QDMA needs scratch memory for internal reordering of the
2440 * descriptors
2441 */
2442 err = mtk_init_fq_dma(eth);
2443 if (err)
2444 return err;
2445 }
2446
2447 err = mtk_tx_alloc(eth);
2448 if (err)
2449 return err;
2450
2451 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2452 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2453 if (err)
2454 return err;
2455 }
2456
2457 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2458 if (err)
2459 return err;
2460
2461 if (eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08002462 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1;
2463 for (; i < MTK_MAX_RX_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002464 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2465 if (err)
2466 return err;
2467 }
2468 err = mtk_hwlro_rx_init(eth);
2469 if (err)
2470 return err;
2471 }
2472
developer18f46a82021-07-20 21:08:21 +08002473 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2474 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2475 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL);
2476 if (err)
2477 return err;
2478 }
2479 err = mtk_rss_init(eth);
2480 if (err)
2481 return err;
2482 }
2483
developerfd40db22021-04-29 10:08:25 +08002484 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2485 /* Enable random early drop and set drop threshold
2486 * automatically
2487 */
2488 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2489 FC_THRES_MIN, MTK_QDMA_FC_THRES);
2490 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2491 }
2492
2493 return 0;
2494}
2495
2496static void mtk_dma_free(struct mtk_eth *eth)
2497{
developere9356982022-07-04 09:03:20 +08002498 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002499 int i;
2500
2501 for (i = 0; i < MTK_MAC_COUNT; i++)
2502 if (eth->netdev[i])
2503 netdev_reset_queue(eth->netdev[i]);
2504 if ( !eth->soc->has_sram && eth->scratch_ring) {
2505 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002506 MTK_DMA_SIZE * soc->txrx.txd_size,
2507 eth->scratch_ring, eth->phy_scratch_ring);
developerfd40db22021-04-29 10:08:25 +08002508 eth->scratch_ring = NULL;
2509 eth->phy_scratch_ring = 0;
2510 }
2511 mtk_tx_clean(eth);
developerb3ce86f2022-06-30 13:31:47 +08002512 mtk_rx_clean(eth, &eth->rx_ring[0],eth->soc->has_sram);
developerfd40db22021-04-29 10:08:25 +08002513 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
2514
2515 if (eth->hwlro) {
2516 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08002517
2518 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1;
2519 for (; i < MTK_MAX_RX_RING_NUM; i++)
2520 mtk_rx_clean(eth, &eth->rx_ring[i], 0);
developerfd40db22021-04-29 10:08:25 +08002521 }
2522
developer18f46a82021-07-20 21:08:21 +08002523 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2524 mtk_rss_uninit(eth);
2525
2526 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
2527 mtk_rx_clean(eth, &eth->rx_ring[i], 1);
2528 }
2529
developer94008d92021-09-23 09:47:41 +08002530 if (eth->scratch_head) {
2531 kfree(eth->scratch_head);
2532 eth->scratch_head = NULL;
2533 }
developerfd40db22021-04-29 10:08:25 +08002534}
2535
2536static void mtk_tx_timeout(struct net_device *dev)
2537{
2538 struct mtk_mac *mac = netdev_priv(dev);
2539 struct mtk_eth *eth = mac->hw;
2540
2541 eth->netdev[mac->id]->stats.tx_errors++;
2542 netif_err(eth, tx_err, dev,
2543 "transmit timed out\n");
developer8051e042022-04-08 13:26:36 +08002544
2545 if (atomic_read(&reset_lock) == 0)
2546 schedule_work(&eth->pending_work);
developerfd40db22021-04-29 10:08:25 +08002547}
2548
developer18f46a82021-07-20 21:08:21 +08002549static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08002550{
developer18f46a82021-07-20 21:08:21 +08002551 struct mtk_napi *rx_napi = priv;
2552 struct mtk_eth *eth = rx_napi->eth;
2553 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002554
developer18f46a82021-07-20 21:08:21 +08002555 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08002556 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08002557 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08002558 }
2559
2560 return IRQ_HANDLED;
2561}
2562
2563static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2564{
2565 struct mtk_eth *eth = _eth;
2566
2567 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developerfd40db22021-04-29 10:08:25 +08002568 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer6bbe70d2021-08-06 09:34:55 +08002569 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08002570 }
2571
2572 return IRQ_HANDLED;
2573}
2574
2575static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2576{
2577 struct mtk_eth *eth = _eth;
2578
developer18f46a82021-07-20 21:08:21 +08002579 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT(0)) {
2580 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT(0))
2581 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002582 }
2583 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2584 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2585 mtk_handle_irq_tx(irq, _eth);
2586 }
2587
2588 return IRQ_HANDLED;
2589}
2590
developera2613e62022-07-01 18:29:37 +08002591static irqreturn_t mtk_handle_irq_fixed_link(int irq, void *_mac)
2592{
2593 struct mtk_mac *mac = _mac;
2594 struct mtk_eth *eth = mac->hw;
2595 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
2596 struct net_device *dev = phylink_priv->dev;
2597 int link_old, link_new;
2598
2599 // clear interrupt status for gpy211
2600 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
2601
2602 link_old = phylink_priv->link;
2603 link_new = _mtk_mdio_read(eth, phylink_priv->phyaddr, MII_BMSR) & BMSR_LSTATUS;
2604
2605 if (link_old != link_new) {
2606 phylink_priv->link = link_new;
2607 if (link_new) {
2608 printk("phylink.%d %s: Link is Up\n", phylink_priv->id, dev->name);
2609 if (dev)
2610 netif_carrier_on(dev);
2611 } else {
2612 printk("phylink.%d %s: Link is Down\n", phylink_priv->id, dev->name);
2613 if (dev)
2614 netif_carrier_off(dev);
2615 }
2616 }
2617
2618 return IRQ_HANDLED;
2619}
2620
developerfd40db22021-04-29 10:08:25 +08002621#ifdef CONFIG_NET_POLL_CONTROLLER
2622static void mtk_poll_controller(struct net_device *dev)
2623{
2624 struct mtk_mac *mac = netdev_priv(dev);
2625 struct mtk_eth *eth = mac->hw;
2626
2627 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002628 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
2629 mtk_handle_irq_rx(eth->irq[2], &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002630 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002631 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002632}
2633#endif
2634
2635static int mtk_start_dma(struct mtk_eth *eth)
2636{
2637 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer77d03a72021-06-06 00:06:00 +08002638 int val, err;
developerfd40db22021-04-29 10:08:25 +08002639
2640 err = mtk_dma_init(eth);
2641 if (err) {
2642 mtk_dma_free(eth);
2643 return err;
2644 }
2645
2646 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer15d0d282021-07-14 16:40:44 +08002647 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08002648 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2649 val &= ~MTK_RESV_BUF_MASK;
developerfd40db22021-04-29 10:08:25 +08002650 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002651 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002652 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
2653 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
2654 MTK_RESV_BUF | MTK_WCOMP_EN |
2655 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
developer1ac65932022-07-19 17:23:32 +08002656 MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08002657 }
developerfd40db22021-04-29 10:08:25 +08002658 else
2659 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002660 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002661 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
2662 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2663 MTK_RX_BT_32DWORDS,
2664 MTK_QDMA_GLO_CFG);
2665
developer15d0d282021-07-14 16:40:44 +08002666 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
developerfd40db22021-04-29 10:08:25 +08002667 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002668 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08002669 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2670 MTK_PDMA_GLO_CFG);
2671 } else {
2672 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2673 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2674 MTK_PDMA_GLO_CFG);
2675 }
2676
developer77d03a72021-06-06 00:06:00 +08002677 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) && eth->hwlro) {
2678 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
2679 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
2680 }
2681
developerfd40db22021-04-29 10:08:25 +08002682 return 0;
2683}
2684
developer8051e042022-04-08 13:26:36 +08002685void mtk_gdm_config(struct mtk_eth *eth, u32 config)
developerfd40db22021-04-29 10:08:25 +08002686{
2687 int i;
2688
2689 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2690 return;
2691
2692 for (i = 0; i < MTK_MAC_COUNT; i++) {
2693 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2694
2695 /* default setup the forward port to send frame to PDMA */
2696 val &= ~0xffff;
2697
2698 /* Enable RX checksum */
2699 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2700
2701 val |= config;
2702
2703 if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
2704 val |= MTK_GDMA_SPECIAL_TAG;
2705
2706 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2707 }
developerfd40db22021-04-29 10:08:25 +08002708}
2709
2710static int mtk_open(struct net_device *dev)
2711{
2712 struct mtk_mac *mac = netdev_priv(dev);
2713 struct mtk_eth *eth = mac->hw;
developera2613e62022-07-01 18:29:37 +08002714 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
developer18f46a82021-07-20 21:08:21 +08002715 int err, i;
developer3a5969e2022-02-09 15:36:36 +08002716 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08002717
2718 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2719 if (err) {
2720 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2721 err);
2722 return err;
2723 }
2724
2725 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2726 if (!refcount_read(&eth->dma_refcnt)) {
2727 int err = mtk_start_dma(eth);
2728
2729 if (err)
2730 return err;
2731
2732 mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
2733
2734 /* Indicates CDM to parse the MTK special tag from CPU */
2735 if (netdev_uses_dsa(dev)) {
2736 u32 val;
2737 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2738 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2739 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
2740 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
2741 }
2742
2743 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002744 napi_enable(&eth->rx_napi[0].napi);
developerfd40db22021-04-29 10:08:25 +08002745 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002746 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
2747
2748 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2749 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2750 napi_enable(&eth->rx_napi[i].napi);
2751 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i));
2752 }
2753 }
2754
developerfd40db22021-04-29 10:08:25 +08002755 refcount_set(&eth->dma_refcnt, 1);
2756 }
2757 else
2758 refcount_inc(&eth->dma_refcnt);
2759
developera2613e62022-07-01 18:29:37 +08002760 if (phylink_priv->desc) {
2761 /*Notice: This programming sequence is only for GPY211 single PHY chip.
2762 If single PHY chip is not GPY211, the following step you should do:
2763 1. Contact your Single PHY chip vendor and get the details of
2764 - how to enables link status change interrupt
2765 - how to clears interrupt source
2766 */
2767
2768 // clear interrupt source for gpy211
2769 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
2770
2771 // enable link status change interrupt for gpy211
2772 _mtk_mdio_write(eth, phylink_priv->phyaddr, 0x19, 0x0001);
2773
2774 phylink_priv->dev = dev;
2775
2776 // override dev pointer for single PHY chip 0
2777 if (phylink_priv->id == 0) {
2778 struct net_device *tmp;
2779
2780 tmp = __dev_get_by_name(&init_net, phylink_priv->label);
2781 if (tmp)
2782 phylink_priv->dev = tmp;
2783 else
2784 phylink_priv->dev = NULL;
2785 }
2786 }
2787
developerfd40db22021-04-29 10:08:25 +08002788 phylink_start(mac->phylink);
2789 netif_start_queue(dev);
developer3a5969e2022-02-09 15:36:36 +08002790 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer793f7b42022-05-20 13:54:51 +08002791 if (!phy_node && eth->sgmii->regmap[mac->id]) {
developer1a63ef92022-04-15 17:17:32 +08002792 regmap_write(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developer3a5969e2022-02-09 15:36:36 +08002793 }
developerfd40db22021-04-29 10:08:25 +08002794 return 0;
2795}
2796
2797static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2798{
2799 u32 val;
2800 int i;
2801
2802 /* stop the dma engine */
2803 spin_lock_bh(&eth->page_lock);
2804 val = mtk_r32(eth, glo_cfg);
2805 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2806 glo_cfg);
2807 spin_unlock_bh(&eth->page_lock);
2808
2809 /* wait for dma stop */
2810 for (i = 0; i < 10; i++) {
2811 val = mtk_r32(eth, glo_cfg);
2812 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer8051e042022-04-08 13:26:36 +08002813 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002814 continue;
2815 }
2816 break;
2817 }
2818}
2819
2820static int mtk_stop(struct net_device *dev)
2821{
2822 struct mtk_mac *mac = netdev_priv(dev);
2823 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08002824 int i;
developer3a5969e2022-02-09 15:36:36 +08002825 u32 val = 0;
2826 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08002827
2828 netif_tx_disable(dev);
2829
developer3a5969e2022-02-09 15:36:36 +08002830 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
2831 if (phy_node) {
2832 val = _mtk_mdio_read(eth, 0, 0);
2833 val |= BMCR_PDOWN;
2834 _mtk_mdio_write(eth, 0, 0, val);
developer793f7b42022-05-20 13:54:51 +08002835 } else if (eth->sgmii->regmap[mac->id]) {
developer1a63ef92022-04-15 17:17:32 +08002836 regmap_read(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer3a5969e2022-02-09 15:36:36 +08002837 val |= SGMII_PHYA_PWD;
developer1a63ef92022-04-15 17:17:32 +08002838 regmap_write(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer3a5969e2022-02-09 15:36:36 +08002839 }
2840
2841 //GMAC RX disable
2842 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
2843 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
2844
2845 phylink_stop(mac->phylink);
2846
developerfd40db22021-04-29 10:08:25 +08002847 phylink_disconnect_phy(mac->phylink);
2848
2849 /* only shutdown DMA if this is the last user */
2850 if (!refcount_dec_and_test(&eth->dma_refcnt))
2851 return 0;
2852
2853 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2854
2855 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002856 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002857 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002858 napi_disable(&eth->rx_napi[0].napi);
2859
2860 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2861 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2862 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i));
2863 napi_disable(&eth->rx_napi[i].napi);
2864 }
2865 }
developerfd40db22021-04-29 10:08:25 +08002866
2867 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2868 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2869 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2870
2871 mtk_dma_free(eth);
2872
2873 return 0;
2874}
2875
developer8051e042022-04-08 13:26:36 +08002876void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerfd40db22021-04-29 10:08:25 +08002877{
developer8051e042022-04-08 13:26:36 +08002878 u32 val = 0, i = 0;
developerfd40db22021-04-29 10:08:25 +08002879
developerfd40db22021-04-29 10:08:25 +08002880 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer8051e042022-04-08 13:26:36 +08002881 reset_bits, reset_bits);
2882
2883 while (i++ < 5000) {
2884 mdelay(1);
2885 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
2886
2887 if ((val & reset_bits) == reset_bits) {
2888 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
2889 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2890 reset_bits, ~reset_bits);
2891 break;
2892 }
2893 }
2894
developerfd40db22021-04-29 10:08:25 +08002895 mdelay(10);
2896}
2897
2898static void mtk_clk_disable(struct mtk_eth *eth)
2899{
2900 int clk;
2901
2902 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2903 clk_disable_unprepare(eth->clks[clk]);
2904}
2905
2906static int mtk_clk_enable(struct mtk_eth *eth)
2907{
2908 int clk, ret;
2909
2910 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2911 ret = clk_prepare_enable(eth->clks[clk]);
2912 if (ret)
2913 goto err_disable_clks;
2914 }
2915
2916 return 0;
2917
2918err_disable_clks:
2919 while (--clk >= 0)
2920 clk_disable_unprepare(eth->clks[clk]);
2921
2922 return ret;
2923}
2924
developer18f46a82021-07-20 21:08:21 +08002925static int mtk_napi_init(struct mtk_eth *eth)
2926{
2927 struct mtk_napi *rx_napi = &eth->rx_napi[0];
2928 int i;
2929
2930 rx_napi->eth = eth;
2931 rx_napi->rx_ring = &eth->rx_ring[0];
2932 rx_napi->irq_grp_no = 2;
2933
2934 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2935 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2936 rx_napi = &eth->rx_napi[i];
2937 rx_napi->eth = eth;
2938 rx_napi->rx_ring = &eth->rx_ring[i];
2939 rx_napi->irq_grp_no = 2 + i;
2940 }
2941 }
2942
2943 return 0;
2944}
2945
developer8051e042022-04-08 13:26:36 +08002946static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerfd40db22021-04-29 10:08:25 +08002947{
developer8051e042022-04-08 13:26:36 +08002948 int i, ret = 0;
developerfd40db22021-04-29 10:08:25 +08002949
developer8051e042022-04-08 13:26:36 +08002950 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
2951 atomic_read(&reset_lock), atomic_read(&force));
developerfd40db22021-04-29 10:08:25 +08002952
developer8051e042022-04-08 13:26:36 +08002953 if (atomic_read(&reset_lock) == 0) {
2954 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
2955 return 0;
developerfd40db22021-04-29 10:08:25 +08002956
developer8051e042022-04-08 13:26:36 +08002957 pm_runtime_enable(eth->dev);
2958 pm_runtime_get_sync(eth->dev);
2959
2960 ret = mtk_clk_enable(eth);
2961 if (ret)
2962 goto err_disable_pm;
2963 }
developerfd40db22021-04-29 10:08:25 +08002964
2965 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2966 ret = device_reset(eth->dev);
2967 if (ret) {
2968 dev_err(eth->dev, "MAC reset failed!\n");
2969 goto err_disable_pm;
2970 }
2971
2972 /* enable interrupt delay for RX */
2973 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2974
2975 /* disable delay and normal interrupt */
2976 mtk_tx_irq_disable(eth, ~0);
2977 mtk_rx_irq_disable(eth, ~0);
2978
2979 return 0;
2980 }
2981
developer8051e042022-04-08 13:26:36 +08002982 pr_info("[%s] execute fe %s reset\n", __func__,
2983 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developer545abf02021-07-15 17:47:01 +08002984
developer8051e042022-04-08 13:26:36 +08002985 if (type == MTK_TYPE_WARM_RESET)
2986 mtk_eth_warm_reset(eth);
developer545abf02021-07-15 17:47:01 +08002987 else
developer8051e042022-04-08 13:26:36 +08002988 mtk_eth_cold_reset(eth);
developer545abf02021-07-15 17:47:01 +08002989
2990 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developer545abf02021-07-15 17:47:01 +08002991 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08002992 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08002993 }
developerfd40db22021-04-29 10:08:25 +08002994
2995 if (eth->pctl) {
2996 /* Set GE2 driving and slew rate */
2997 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2998
2999 /* set GE2 TDSEL */
3000 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3001
3002 /* set GE2 TUNE */
3003 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3004 }
3005
3006 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3007 * up with the more appropriate value when mtk_mac_config call is being
3008 * invoked.
3009 */
3010 for (i = 0; i < MTK_MAC_COUNT; i++)
3011 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3012
3013 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08003014 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
3015 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3016 else
3017 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08003018
3019 /* enable interrupt delay for RX/TX */
3020 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
3021 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
3022
3023 mtk_tx_irq_disable(eth, ~0);
3024 mtk_rx_irq_disable(eth, ~0);
3025
3026 /* FE int grouping */
3027 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003028 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_PDMA_INT_GRP2);
developerfd40db22021-04-29 10:08:25 +08003029 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003030 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_QDMA_INT_GRP2);
developer8051e042022-04-08 13:26:36 +08003031 mtk_w32(eth, 0x21021003, MTK_FE_INT_GRP);
developerbe971722022-05-23 13:51:05 +08003032 mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
developer8051e042022-04-08 13:26:36 +08003033 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
3034 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerfd40db22021-04-29 10:08:25 +08003035
developera2bdbd52021-05-31 19:10:17 +08003036 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08003037 /* PSE Free Queue Flow Control */
3038 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3039
developer459b78e2022-07-01 17:25:10 +08003040 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3041 mtk_w32(eth, 0x00000300, PSE_NO_DROP_CFG);
3042
3043 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3044 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
developer81bcad32021-07-15 14:14:38 +08003045
developerfef9efd2021-06-16 18:28:09 +08003046 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08003047 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3048 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3049 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3050 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3051 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3052 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3053 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
developerfd5f9152022-01-05 16:29:42 +08003054 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
developerfd40db22021-04-29 10:08:25 +08003055
developerfef9efd2021-06-16 18:28:09 +08003056 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08003057 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3058 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3059 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3060 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3061 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3062 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3063 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3064 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08003065
3066 /* GDM and CDM Threshold */
3067 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3068 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3069 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3070 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3071 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3072 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08003073 }
3074
3075 return 0;
3076
3077err_disable_pm:
3078 pm_runtime_put_sync(eth->dev);
3079 pm_runtime_disable(eth->dev);
3080
3081 return ret;
3082}
3083
3084static int mtk_hw_deinit(struct mtk_eth *eth)
3085{
3086 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3087 return 0;
3088
3089 mtk_clk_disable(eth);
3090
3091 pm_runtime_put_sync(eth->dev);
3092 pm_runtime_disable(eth->dev);
3093
3094 return 0;
3095}
3096
3097static int __init mtk_init(struct net_device *dev)
3098{
3099 struct mtk_mac *mac = netdev_priv(dev);
3100 struct mtk_eth *eth = mac->hw;
3101 const char *mac_addr;
3102
3103 mac_addr = of_get_mac_address(mac->of_node);
3104 if (!IS_ERR(mac_addr))
3105 ether_addr_copy(dev->dev_addr, mac_addr);
3106
3107 /* If the mac address is invalid, use random mac address */
3108 if (!is_valid_ether_addr(dev->dev_addr)) {
3109 eth_hw_addr_random(dev);
3110 dev_err(eth->dev, "generated random MAC address %pM\n",
3111 dev->dev_addr);
3112 }
3113
3114 return 0;
3115}
3116
3117static void mtk_uninit(struct net_device *dev)
3118{
3119 struct mtk_mac *mac = netdev_priv(dev);
3120 struct mtk_eth *eth = mac->hw;
3121
3122 phylink_disconnect_phy(mac->phylink);
3123 mtk_tx_irq_disable(eth, ~0);
3124 mtk_rx_irq_disable(eth, ~0);
3125}
3126
3127static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3128{
3129 struct mtk_mac *mac = netdev_priv(dev);
3130
3131 switch (cmd) {
3132 case SIOCGMIIPHY:
3133 case SIOCGMIIREG:
3134 case SIOCSMIIREG:
3135 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3136 default:
3137 /* default invoke the mtk_eth_dbg handler */
3138 return mtk_do_priv_ioctl(dev, ifr, cmd);
3139 break;
3140 }
3141
3142 return -EOPNOTSUPP;
3143}
3144
3145static void mtk_pending_work(struct work_struct *work)
3146{
3147 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer8051e042022-04-08 13:26:36 +08003148 struct device_node *phy_node = NULL;
3149 struct mtk_mac *mac = NULL;
3150 int err, i = 0;
developerfd40db22021-04-29 10:08:25 +08003151 unsigned long restart = 0;
developer8051e042022-04-08 13:26:36 +08003152 u32 val = 0;
3153
3154 atomic_inc(&reset_lock);
3155 val = mtk_r32(eth, MTK_FE_INT_STATUS);
3156 if (!mtk_check_reset_event(eth, val)) {
3157 atomic_dec(&reset_lock);
3158 pr_info("[%s] No need to do FE reset !\n", __func__);
3159 return;
3160 }
developerfd40db22021-04-29 10:08:25 +08003161
3162 rtnl_lock();
3163
developer8051e042022-04-08 13:26:36 +08003164 /* Disabe FE P3 and P4 */
3165 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3166 val |= MTK_FE_LINK_DOWN_P3;
3167 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3168 val |= MTK_FE_LINK_DOWN_P4;
3169 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3170
3171 /* Adjust PPE configurations to prepare for reset */
3172 mtk_prepare_reset_ppe(eth, 0);
3173 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3174 mtk_prepare_reset_ppe(eth, 1);
3175
3176 /* Adjust FE configurations to prepare for reset */
3177 mtk_prepare_reset_fe(eth);
3178
3179 /* Trigger Wifi SER reset */
3180 call_netdevice_notifiers(MTK_FE_START_RESET, eth->netdev[0]);
3181 rtnl_unlock();
3182 wait_for_completion_timeout(&wait_ser_done, 5000);
3183 rtnl_lock();
developerfd40db22021-04-29 10:08:25 +08003184
3185 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
3186 cpu_relax();
3187
developer8051e042022-04-08 13:26:36 +08003188 del_timer_sync(&eth->mtk_dma_monitor_timer);
3189 pr_info("[%s] mtk_stop starts !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003190 /* stop all devices to make sure that dma is properly shut down */
3191 for (i = 0; i < MTK_MAC_COUNT; i++) {
3192 if (!eth->netdev[i])
3193 continue;
3194 mtk_stop(eth->netdev[i]);
3195 __set_bit(i, &restart);
3196 }
developer8051e042022-04-08 13:26:36 +08003197 pr_info("[%s] mtk_stop ends !\n", __func__);
3198 mdelay(15);
developerfd40db22021-04-29 10:08:25 +08003199
3200 if (eth->dev->pins)
3201 pinctrl_select_state(eth->dev->pins->p,
3202 eth->dev->pins->default_state);
developer8051e042022-04-08 13:26:36 +08003203
3204 pr_info("[%s] mtk_hw_init starts !\n", __func__);
3205 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
3206 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003207
3208 /* restart DMA and enable IRQs */
3209 for (i = 0; i < MTK_MAC_COUNT; i++) {
3210 if (!test_bit(i, &restart))
3211 continue;
3212 err = mtk_open(eth->netdev[i]);
3213 if (err) {
3214 netif_alert(eth, ifup, eth->netdev[i],
3215 "Driver up/down cycle failed, closing device.\n");
3216 dev_close(eth->netdev[i]);
3217 }
3218 }
3219
developer8051e042022-04-08 13:26:36 +08003220 /* Set KA tick select */
3221 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(0));
3222 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3223 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(1));
3224
3225 /* Enabe FE P3 and P4*/
3226 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3227 val &= ~MTK_FE_LINK_DOWN_P3;
3228 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3229 val &= ~MTK_FE_LINK_DOWN_P4;
3230 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3231
3232 /* Power up sgmii */
3233 for (i = 0; i < MTK_MAC_COUNT; i++) {
3234 mac = netdev_priv(eth->netdev[i]);
3235 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer793f7b42022-05-20 13:54:51 +08003236 if (!phy_node && eth->sgmii->regmap[i]) {
developer8051e042022-04-08 13:26:36 +08003237 mtk_gmac_sgmii_path_setup(eth, i);
3238 regmap_write(eth->sgmii->regmap[i], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
3239 }
3240 }
3241
3242 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE, eth->netdev[0]);
3243 pr_info("[%s] HNAT reset done !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003244
developer8051e042022-04-08 13:26:36 +08003245 call_netdevice_notifiers(MTK_FE_RESET_DONE, eth->netdev[0]);
3246 pr_info("[%s] WiFi SER reset done !\n", __func__);
3247
3248 atomic_dec(&reset_lock);
3249 if (atomic_read(&force) > 0)
3250 atomic_dec(&force);
3251
3252 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3253 eth->mtk_dma_monitor_timer.expires = jiffies;
3254 add_timer(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08003255 clear_bit_unlock(MTK_RESETTING, &eth->state);
3256
3257 rtnl_unlock();
3258}
3259
3260static int mtk_free_dev(struct mtk_eth *eth)
3261{
3262 int i;
3263
3264 for (i = 0; i < MTK_MAC_COUNT; i++) {
3265 if (!eth->netdev[i])
3266 continue;
3267 free_netdev(eth->netdev[i]);
3268 }
3269
3270 return 0;
3271}
3272
3273static int mtk_unreg_dev(struct mtk_eth *eth)
3274{
3275 int i;
3276
3277 for (i = 0; i < MTK_MAC_COUNT; i++) {
3278 if (!eth->netdev[i])
3279 continue;
3280 unregister_netdev(eth->netdev[i]);
3281 }
3282
3283 return 0;
3284}
3285
3286static int mtk_cleanup(struct mtk_eth *eth)
3287{
3288 mtk_unreg_dev(eth);
3289 mtk_free_dev(eth);
3290 cancel_work_sync(&eth->pending_work);
3291
3292 return 0;
3293}
3294
3295static int mtk_get_link_ksettings(struct net_device *ndev,
3296 struct ethtool_link_ksettings *cmd)
3297{
3298 struct mtk_mac *mac = netdev_priv(ndev);
3299
3300 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3301 return -EBUSY;
3302
3303 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3304}
3305
3306static int mtk_set_link_ksettings(struct net_device *ndev,
3307 const struct ethtool_link_ksettings *cmd)
3308{
3309 struct mtk_mac *mac = netdev_priv(ndev);
3310
3311 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3312 return -EBUSY;
3313
3314 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3315}
3316
3317static void mtk_get_drvinfo(struct net_device *dev,
3318 struct ethtool_drvinfo *info)
3319{
3320 struct mtk_mac *mac = netdev_priv(dev);
3321
3322 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3323 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3324 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3325}
3326
3327static u32 mtk_get_msglevel(struct net_device *dev)
3328{
3329 struct mtk_mac *mac = netdev_priv(dev);
3330
3331 return mac->hw->msg_enable;
3332}
3333
3334static void mtk_set_msglevel(struct net_device *dev, u32 value)
3335{
3336 struct mtk_mac *mac = netdev_priv(dev);
3337
3338 mac->hw->msg_enable = value;
3339}
3340
3341static int mtk_nway_reset(struct net_device *dev)
3342{
3343 struct mtk_mac *mac = netdev_priv(dev);
3344
3345 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3346 return -EBUSY;
3347
3348 if (!mac->phylink)
3349 return -ENOTSUPP;
3350
3351 return phylink_ethtool_nway_reset(mac->phylink);
3352}
3353
3354static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3355{
3356 int i;
3357
3358 switch (stringset) {
3359 case ETH_SS_STATS:
3360 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3361 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3362 data += ETH_GSTRING_LEN;
3363 }
3364 break;
3365 }
3366}
3367
3368static int mtk_get_sset_count(struct net_device *dev, int sset)
3369{
3370 switch (sset) {
3371 case ETH_SS_STATS:
3372 return ARRAY_SIZE(mtk_ethtool_stats);
3373 default:
3374 return -EOPNOTSUPP;
3375 }
3376}
3377
3378static void mtk_get_ethtool_stats(struct net_device *dev,
3379 struct ethtool_stats *stats, u64 *data)
3380{
3381 struct mtk_mac *mac = netdev_priv(dev);
3382 struct mtk_hw_stats *hwstats = mac->hw_stats;
3383 u64 *data_src, *data_dst;
3384 unsigned int start;
3385 int i;
3386
3387 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3388 return;
3389
3390 if (netif_running(dev) && netif_device_present(dev)) {
3391 if (spin_trylock_bh(&hwstats->stats_lock)) {
3392 mtk_stats_update_mac(mac);
3393 spin_unlock_bh(&hwstats->stats_lock);
3394 }
3395 }
3396
3397 data_src = (u64 *)hwstats;
3398
3399 do {
3400 data_dst = data;
3401 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
3402
3403 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3404 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3405 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
3406}
3407
3408static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3409 u32 *rule_locs)
3410{
3411 int ret = -EOPNOTSUPP;
3412
3413 switch (cmd->cmd) {
3414 case ETHTOOL_GRXRINGS:
3415 if (dev->hw_features & NETIF_F_LRO) {
3416 cmd->data = MTK_MAX_RX_RING_NUM;
3417 ret = 0;
3418 }
3419 break;
3420 case ETHTOOL_GRXCLSRLCNT:
3421 if (dev->hw_features & NETIF_F_LRO) {
3422 struct mtk_mac *mac = netdev_priv(dev);
3423
3424 cmd->rule_cnt = mac->hwlro_ip_cnt;
3425 ret = 0;
3426 }
3427 break;
3428 case ETHTOOL_GRXCLSRULE:
3429 if (dev->hw_features & NETIF_F_LRO)
3430 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3431 break;
3432 case ETHTOOL_GRXCLSRLALL:
3433 if (dev->hw_features & NETIF_F_LRO)
3434 ret = mtk_hwlro_get_fdir_all(dev, cmd,
3435 rule_locs);
3436 break;
3437 default:
3438 break;
3439 }
3440
3441 return ret;
3442}
3443
3444static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3445{
3446 int ret = -EOPNOTSUPP;
3447
3448 switch (cmd->cmd) {
3449 case ETHTOOL_SRXCLSRLINS:
3450 if (dev->hw_features & NETIF_F_LRO)
3451 ret = mtk_hwlro_add_ipaddr(dev, cmd);
3452 break;
3453 case ETHTOOL_SRXCLSRLDEL:
3454 if (dev->hw_features & NETIF_F_LRO)
3455 ret = mtk_hwlro_del_ipaddr(dev, cmd);
3456 break;
3457 default:
3458 break;
3459 }
3460
3461 return ret;
3462}
3463
developer6c5cbb52022-08-12 11:37:45 +08003464static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3465{
3466 struct mtk_mac *mac = netdev_priv(dev);
3467
3468 phylink_ethtool_get_pauseparam(mac->phylink, pause);
3469}
3470
3471static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3472{
3473 struct mtk_mac *mac = netdev_priv(dev);
3474
3475 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
3476}
3477
developerfd40db22021-04-29 10:08:25 +08003478static const struct ethtool_ops mtk_ethtool_ops = {
3479 .get_link_ksettings = mtk_get_link_ksettings,
3480 .set_link_ksettings = mtk_set_link_ksettings,
3481 .get_drvinfo = mtk_get_drvinfo,
3482 .get_msglevel = mtk_get_msglevel,
3483 .set_msglevel = mtk_set_msglevel,
3484 .nway_reset = mtk_nway_reset,
3485 .get_link = ethtool_op_get_link,
3486 .get_strings = mtk_get_strings,
3487 .get_sset_count = mtk_get_sset_count,
3488 .get_ethtool_stats = mtk_get_ethtool_stats,
3489 .get_rxnfc = mtk_get_rxnfc,
3490 .set_rxnfc = mtk_set_rxnfc,
developer6c5cbb52022-08-12 11:37:45 +08003491 .get_pauseparam = mtk_get_pauseparam,
3492 .set_pauseparam = mtk_set_pauseparam,
developerfd40db22021-04-29 10:08:25 +08003493};
3494
3495static const struct net_device_ops mtk_netdev_ops = {
3496 .ndo_init = mtk_init,
3497 .ndo_uninit = mtk_uninit,
3498 .ndo_open = mtk_open,
3499 .ndo_stop = mtk_stop,
3500 .ndo_start_xmit = mtk_start_xmit,
3501 .ndo_set_mac_address = mtk_set_mac_address,
3502 .ndo_validate_addr = eth_validate_addr,
3503 .ndo_do_ioctl = mtk_do_ioctl,
3504 .ndo_tx_timeout = mtk_tx_timeout,
3505 .ndo_get_stats64 = mtk_get_stats64,
3506 .ndo_fix_features = mtk_fix_features,
3507 .ndo_set_features = mtk_set_features,
3508#ifdef CONFIG_NET_POLL_CONTROLLER
3509 .ndo_poll_controller = mtk_poll_controller,
3510#endif
3511};
3512
3513static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
3514{
3515 const __be32 *_id = of_get_property(np, "reg", NULL);
3516 struct phylink *phylink;
3517 int phy_mode, id, err;
3518 struct mtk_mac *mac;
developera2613e62022-07-01 18:29:37 +08003519 struct mtk_phylink_priv *phylink_priv;
3520 struct fwnode_handle *fixed_node;
3521 struct gpio_desc *desc;
developerfd40db22021-04-29 10:08:25 +08003522
3523 if (!_id) {
3524 dev_err(eth->dev, "missing mac id\n");
3525 return -EINVAL;
3526 }
3527
3528 id = be32_to_cpup(_id);
developerfb556ca2021-10-13 10:52:09 +08003529 if (id < 0 || id >= MTK_MAC_COUNT) {
developerfd40db22021-04-29 10:08:25 +08003530 dev_err(eth->dev, "%d is not a valid mac id\n", id);
3531 return -EINVAL;
3532 }
3533
3534 if (eth->netdev[id]) {
3535 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
3536 return -EINVAL;
3537 }
3538
3539 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
3540 if (!eth->netdev[id]) {
3541 dev_err(eth->dev, "alloc_etherdev failed\n");
3542 return -ENOMEM;
3543 }
3544 mac = netdev_priv(eth->netdev[id]);
3545 eth->mac[id] = mac;
3546 mac->id = id;
3547 mac->hw = eth;
3548 mac->of_node = np;
3549
3550 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
3551 mac->hwlro_ip_cnt = 0;
3552
3553 mac->hw_stats = devm_kzalloc(eth->dev,
3554 sizeof(*mac->hw_stats),
3555 GFP_KERNEL);
3556 if (!mac->hw_stats) {
3557 dev_err(eth->dev, "failed to allocate counter memory\n");
3558 err = -ENOMEM;
3559 goto free_netdev;
3560 }
3561 spin_lock_init(&mac->hw_stats->stats_lock);
3562 u64_stats_init(&mac->hw_stats->syncp);
3563 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
3564
3565 /* phylink create */
3566 phy_mode = of_get_phy_mode(np);
3567 if (phy_mode < 0) {
3568 dev_err(eth->dev, "incorrect phy-mode\n");
3569 err = -EINVAL;
3570 goto free_netdev;
3571 }
3572
3573 /* mac config is not set */
3574 mac->interface = PHY_INTERFACE_MODE_NA;
3575 mac->mode = MLO_AN_PHY;
3576 mac->speed = SPEED_UNKNOWN;
3577
3578 mac->phylink_config.dev = &eth->netdev[id]->dev;
3579 mac->phylink_config.type = PHYLINK_NETDEV;
3580
3581 phylink = phylink_create(&mac->phylink_config,
3582 of_fwnode_handle(mac->of_node),
3583 phy_mode, &mtk_phylink_ops);
3584 if (IS_ERR(phylink)) {
3585 err = PTR_ERR(phylink);
3586 goto free_netdev;
3587 }
3588
3589 mac->phylink = phylink;
3590
developera2613e62022-07-01 18:29:37 +08003591 fixed_node = fwnode_get_named_child_node(of_fwnode_handle(mac->of_node),
3592 "fixed-link");
3593 if (fixed_node) {
3594 desc = fwnode_get_named_gpiod(fixed_node, "link-gpio",
3595 0, GPIOD_IN, "?");
3596 if (!IS_ERR(desc)) {
3597 struct device_node *phy_np;
3598 const char *label;
3599 int irq, phyaddr;
3600
3601 phylink_priv = &mac->phylink_priv;
3602
3603 phylink_priv->desc = desc;
3604 phylink_priv->id = id;
3605 phylink_priv->link = -1;
3606
3607 irq = gpiod_to_irq(desc);
3608 if (irq > 0) {
3609 devm_request_irq(eth->dev, irq, mtk_handle_irq_fixed_link,
3610 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
3611 "ethernet:fixed link", mac);
3612 }
3613
3614 if (!of_property_read_string(to_of_node(fixed_node), "label", &label))
3615 strcpy(phylink_priv->label, label);
3616
3617 phy_np = of_parse_phandle(to_of_node(fixed_node), "phy-handle", 0);
3618 if (phy_np) {
3619 if (!of_property_read_u32(phy_np, "reg", &phyaddr))
3620 phylink_priv->phyaddr = phyaddr;
3621 }
3622 }
3623 fwnode_handle_put(fixed_node);
3624 }
3625
developerfd40db22021-04-29 10:08:25 +08003626 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
3627 eth->netdev[id]->watchdog_timeo = 5 * HZ;
3628 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
3629 eth->netdev[id]->base_addr = (unsigned long)eth->base;
3630
3631 eth->netdev[id]->hw_features = eth->soc->hw_features;
3632 if (eth->hwlro)
3633 eth->netdev[id]->hw_features |= NETIF_F_LRO;
3634
3635 eth->netdev[id]->vlan_features = eth->soc->hw_features &
3636 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3637 eth->netdev[id]->features |= eth->soc->hw_features;
3638 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
3639
3640 eth->netdev[id]->irq = eth->irq[0];
3641 eth->netdev[id]->dev.of_node = np;
3642
3643 return 0;
3644
3645free_netdev:
3646 free_netdev(eth->netdev[id]);
3647 return err;
3648}
3649
3650static int mtk_probe(struct platform_device *pdev)
3651{
3652 struct device_node *mac_np;
3653 struct mtk_eth *eth;
3654 int err, i;
3655
3656 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3657 if (!eth)
3658 return -ENOMEM;
3659
3660 eth->soc = of_device_get_match_data(&pdev->dev);
3661
3662 eth->dev = &pdev->dev;
3663 eth->base = devm_platform_ioremap_resource(pdev, 0);
3664 if (IS_ERR(eth->base))
3665 return PTR_ERR(eth->base);
3666
3667 if(eth->soc->has_sram) {
3668 struct resource *res;
3669 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
developer4c32b7a2021-11-13 16:46:43 +08003670 if (unlikely(!res))
3671 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +08003672 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
3673 }
3674
3675 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3676 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
3677 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
3678 } else {
3679 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
3680 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
3681 }
3682
3683 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3684 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
3685 eth->ip_align = NET_IP_ALIGN;
3686 } else {
developera2bdbd52021-05-31 19:10:17 +08003687 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08003688 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
3689 else
3690 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
3691 }
3692
3693 spin_lock_init(&eth->page_lock);
3694 spin_lock_init(&eth->tx_irq_lock);
3695 spin_lock_init(&eth->rx_irq_lock);
developerd82e8372022-02-09 15:00:09 +08003696 spin_lock_init(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +08003697
3698 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3699 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3700 "mediatek,ethsys");
3701 if (IS_ERR(eth->ethsys)) {
3702 dev_err(&pdev->dev, "no ethsys regmap found\n");
3703 return PTR_ERR(eth->ethsys);
3704 }
3705 }
3706
3707 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3708 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3709 "mediatek,infracfg");
3710 if (IS_ERR(eth->infra)) {
3711 dev_err(&pdev->dev, "no infracfg regmap found\n");
3712 return PTR_ERR(eth->infra);
3713 }
3714 }
3715
3716 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
3717 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3718 GFP_KERNEL);
3719 if (!eth->sgmii)
3720 return -ENOMEM;
3721
3722 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3723 eth->soc->ana_rgc3);
3724
3725 if (err)
3726 return err;
3727 }
3728
3729 if (eth->soc->required_pctl) {
3730 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3731 "mediatek,pctl");
3732 if (IS_ERR(eth->pctl)) {
3733 dev_err(&pdev->dev, "no pctl regmap found\n");
3734 return PTR_ERR(eth->pctl);
3735 }
3736 }
3737
developer18f46a82021-07-20 21:08:21 +08003738 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08003739 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
3740 eth->irq[i] = eth->irq[0];
3741 else
3742 eth->irq[i] = platform_get_irq(pdev, i);
3743 if (eth->irq[i] < 0) {
3744 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3745 return -ENXIO;
3746 }
3747 }
3748
3749 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
3750 eth->clks[i] = devm_clk_get(eth->dev,
3751 mtk_clks_source_name[i]);
3752 if (IS_ERR(eth->clks[i])) {
3753 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
3754 return -EPROBE_DEFER;
3755 if (eth->soc->required_clks & BIT(i)) {
3756 dev_err(&pdev->dev, "clock %s not found\n",
3757 mtk_clks_source_name[i]);
3758 return -EINVAL;
3759 }
3760 eth->clks[i] = NULL;
3761 }
3762 }
3763
3764 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
3765 INIT_WORK(&eth->pending_work, mtk_pending_work);
3766
developer8051e042022-04-08 13:26:36 +08003767 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerfd40db22021-04-29 10:08:25 +08003768 if (err)
3769 return err;
3770
3771 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3772
3773 for_each_child_of_node(pdev->dev.of_node, mac_np) {
3774 if (!of_device_is_compatible(mac_np,
3775 "mediatek,eth-mac"))
3776 continue;
3777
3778 if (!of_device_is_available(mac_np))
3779 continue;
3780
3781 err = mtk_add_mac(eth, mac_np);
3782 if (err) {
3783 of_node_put(mac_np);
3784 goto err_deinit_hw;
3785 }
3786 }
3787
developer18f46a82021-07-20 21:08:21 +08003788 err = mtk_napi_init(eth);
3789 if (err)
3790 goto err_free_dev;
3791
developerfd40db22021-04-29 10:08:25 +08003792 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3793 err = devm_request_irq(eth->dev, eth->irq[0],
3794 mtk_handle_irq, 0,
3795 dev_name(eth->dev), eth);
3796 } else {
3797 err = devm_request_irq(eth->dev, eth->irq[1],
3798 mtk_handle_irq_tx, 0,
3799 dev_name(eth->dev), eth);
3800 if (err)
3801 goto err_free_dev;
3802
3803 err = devm_request_irq(eth->dev, eth->irq[2],
3804 mtk_handle_irq_rx, 0,
developer18f46a82021-07-20 21:08:21 +08003805 dev_name(eth->dev), &eth->rx_napi[0]);
3806 if (err)
3807 goto err_free_dev;
3808
developer793f7b42022-05-20 13:54:51 +08003809 if (MTK_MAX_IRQ_NUM > 3) {
3810 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3811 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3812 err = devm_request_irq(eth->dev,
3813 eth->irq[2 + i],
3814 mtk_handle_irq_rx, 0,
3815 dev_name(eth->dev),
3816 &eth->rx_napi[i]);
3817 if (err)
3818 goto err_free_dev;
3819 }
3820 } else {
3821 err = devm_request_irq(eth->dev, eth->irq[3],
3822 mtk_handle_fe_irq, 0,
3823 dev_name(eth->dev), eth);
developer18f46a82021-07-20 21:08:21 +08003824 if (err)
3825 goto err_free_dev;
3826 }
3827 }
developerfd40db22021-04-29 10:08:25 +08003828 }
developer8051e042022-04-08 13:26:36 +08003829
developerfd40db22021-04-29 10:08:25 +08003830 if (err)
3831 goto err_free_dev;
3832
3833 /* No MT7628/88 support yet */
3834 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3835 err = mtk_mdio_init(eth);
3836 if (err)
3837 goto err_free_dev;
3838 }
3839
3840 for (i = 0; i < MTK_MAX_DEVS; i++) {
3841 if (!eth->netdev[i])
3842 continue;
3843
3844 err = register_netdev(eth->netdev[i]);
3845 if (err) {
3846 dev_err(eth->dev, "error bringing up device\n");
3847 goto err_deinit_mdio;
3848 } else
3849 netif_info(eth, probe, eth->netdev[i],
3850 "mediatek frame engine at 0x%08lx, irq %d\n",
3851 eth->netdev[i]->base_addr, eth->irq[0]);
3852 }
3853
3854 /* we run 2 devices on the same DMA ring so we need a dummy device
3855 * for NAPI to work
3856 */
3857 init_dummy_netdev(&eth->dummy_dev);
3858 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
3859 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08003860 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08003861 MTK_NAPI_WEIGHT);
3862
developer18f46a82021-07-20 21:08:21 +08003863 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3864 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3865 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[i].napi,
3866 mtk_napi_rx, MTK_NAPI_WEIGHT);
3867 }
3868
developerfd40db22021-04-29 10:08:25 +08003869 mtketh_debugfs_init(eth);
3870 debug_proc_init(eth);
3871
3872 platform_set_drvdata(pdev, eth);
3873
developer8051e042022-04-08 13:26:36 +08003874 register_netdevice_notifier(&mtk_eth_netdevice_nb);
developer793f7b42022-05-20 13:54:51 +08003875#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developer8051e042022-04-08 13:26:36 +08003876 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3877 eth->mtk_dma_monitor_timer.expires = jiffies;
3878 add_timer(&eth->mtk_dma_monitor_timer);
developer793f7b42022-05-20 13:54:51 +08003879#endif
developer8051e042022-04-08 13:26:36 +08003880
developerfd40db22021-04-29 10:08:25 +08003881 return 0;
3882
3883err_deinit_mdio:
3884 mtk_mdio_cleanup(eth);
3885err_free_dev:
3886 mtk_free_dev(eth);
3887err_deinit_hw:
3888 mtk_hw_deinit(eth);
3889
3890 return err;
3891}
3892
3893static int mtk_remove(struct platform_device *pdev)
3894{
3895 struct mtk_eth *eth = platform_get_drvdata(pdev);
3896 struct mtk_mac *mac;
3897 int i;
3898
3899 /* stop all devices to make sure that dma is properly shut down */
3900 for (i = 0; i < MTK_MAC_COUNT; i++) {
3901 if (!eth->netdev[i])
3902 continue;
3903 mtk_stop(eth->netdev[i]);
3904 mac = netdev_priv(eth->netdev[i]);
3905 phylink_disconnect_phy(mac->phylink);
3906 }
3907
3908 mtk_hw_deinit(eth);
3909
3910 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003911 netif_napi_del(&eth->rx_napi[0].napi);
3912
3913 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3914 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3915 netif_napi_del(&eth->rx_napi[i].napi);
3916 }
3917
developerfd40db22021-04-29 10:08:25 +08003918 mtk_cleanup(eth);
3919 mtk_mdio_cleanup(eth);
developer8051e042022-04-08 13:26:36 +08003920 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
3921 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08003922
3923 return 0;
3924}
3925
3926static const struct mtk_soc_data mt2701_data = {
3927 .caps = MT7623_CAPS | MTK_HWLRO,
3928 .hw_features = MTK_HW_FEATURES,
3929 .required_clks = MT7623_CLKS_BITMAP,
3930 .required_pctl = true,
3931 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003932 .txrx = {
3933 .txd_size = sizeof(struct mtk_tx_dma),
3934 .rxd_size = sizeof(struct mtk_rx_dma),
3935 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3936 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3937 },
developerfd40db22021-04-29 10:08:25 +08003938};
3939
3940static const struct mtk_soc_data mt7621_data = {
3941 .caps = MT7621_CAPS,
3942 .hw_features = MTK_HW_FEATURES,
3943 .required_clks = MT7621_CLKS_BITMAP,
3944 .required_pctl = false,
3945 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003946 .txrx = {
3947 .txd_size = sizeof(struct mtk_tx_dma),
3948 .rxd_size = sizeof(struct mtk_rx_dma),
3949 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3950 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3951 },
developerfd40db22021-04-29 10:08:25 +08003952};
3953
3954static const struct mtk_soc_data mt7622_data = {
3955 .ana_rgc3 = 0x2028,
3956 .caps = MT7622_CAPS | MTK_HWLRO,
3957 .hw_features = MTK_HW_FEATURES,
3958 .required_clks = MT7622_CLKS_BITMAP,
3959 .required_pctl = false,
3960 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003961 .txrx = {
3962 .txd_size = sizeof(struct mtk_tx_dma),
3963 .rxd_size = sizeof(struct mtk_rx_dma),
3964 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3965 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3966 },
developerfd40db22021-04-29 10:08:25 +08003967};
3968
3969static const struct mtk_soc_data mt7623_data = {
3970 .caps = MT7623_CAPS | MTK_HWLRO,
3971 .hw_features = MTK_HW_FEATURES,
3972 .required_clks = MT7623_CLKS_BITMAP,
3973 .required_pctl = true,
3974 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003975 .txrx = {
3976 .txd_size = sizeof(struct mtk_tx_dma),
3977 .rxd_size = sizeof(struct mtk_rx_dma),
3978 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3979 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3980 },
developerfd40db22021-04-29 10:08:25 +08003981};
3982
3983static const struct mtk_soc_data mt7629_data = {
3984 .ana_rgc3 = 0x128,
3985 .caps = MT7629_CAPS | MTK_HWLRO,
3986 .hw_features = MTK_HW_FEATURES,
3987 .required_clks = MT7629_CLKS_BITMAP,
3988 .required_pctl = false,
3989 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003990 .txrx = {
3991 .txd_size = sizeof(struct mtk_tx_dma),
3992 .rxd_size = sizeof(struct mtk_rx_dma),
3993 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3994 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3995 },
developerfd40db22021-04-29 10:08:25 +08003996};
3997
3998static const struct mtk_soc_data mt7986_data = {
3999 .ana_rgc3 = 0x128,
4000 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08004001 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08004002 .required_clks = MT7986_CLKS_BITMAP,
4003 .required_pctl = false,
4004 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004005 .txrx = {
4006 .txd_size = sizeof(struct mtk_tx_dma_v2),
4007 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4008 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4009 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4010 },
developerfd40db22021-04-29 10:08:25 +08004011};
4012
developer255bba22021-07-27 15:16:33 +08004013static const struct mtk_soc_data mt7981_data = {
4014 .ana_rgc3 = 0x128,
4015 .caps = MT7981_CAPS,
developer7377b0b2021-11-18 14:54:47 +08004016 .hw_features = MTK_HW_FEATURES,
developer255bba22021-07-27 15:16:33 +08004017 .required_clks = MT7981_CLKS_BITMAP,
4018 .required_pctl = false,
4019 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004020 .txrx = {
4021 .txd_size = sizeof(struct mtk_tx_dma_v2),
4022 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4023 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4024 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4025 },
developer255bba22021-07-27 15:16:33 +08004026};
4027
developerfd40db22021-04-29 10:08:25 +08004028static const struct mtk_soc_data rt5350_data = {
4029 .caps = MT7628_CAPS,
4030 .hw_features = MTK_HW_FEATURES_MT7628,
4031 .required_clks = MT7628_CLKS_BITMAP,
4032 .required_pctl = false,
4033 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004034 .txrx = {
4035 .txd_size = sizeof(struct mtk_tx_dma),
4036 .rxd_size = sizeof(struct mtk_rx_dma),
4037 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4038 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4039 },
developerfd40db22021-04-29 10:08:25 +08004040};
4041
4042const struct of_device_id of_mtk_match[] = {
4043 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4044 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4045 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4046 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4047 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4048 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08004049 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developerfd40db22021-04-29 10:08:25 +08004050 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4051 {},
4052};
4053MODULE_DEVICE_TABLE(of, of_mtk_match);
4054
4055static struct platform_driver mtk_driver = {
4056 .probe = mtk_probe,
4057 .remove = mtk_remove,
4058 .driver = {
4059 .name = "mtk_soc_eth",
4060 .of_match_table = of_mtk_match,
4061 },
4062};
4063
4064module_platform_driver(mtk_driver);
4065
4066MODULE_LICENSE("GPL");
4067MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4068MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");