blob: f1c079e98c0e471b75e9a56ef0fbe75372ad9eb1 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
12#include <linux/mfd/syscon.h>
13#include <linux/regmap.h>
14#include <linux/clk.h>
15#include <linux/pm_runtime.h>
16#include <linux/if_vlan.h>
17#include <linux/reset.h>
18#include <linux/tcp.h>
19#include <linux/interrupt.h>
20#include <linux/pinctrl/devinfo.h>
21#include <linux/phylink.h>
developera2613e62022-07-01 18:29:37 +080022#include <linux/gpio/consumer.h>
developerfd40db22021-04-29 10:08:25 +080023#include <net/dsa.h>
24
25#include "mtk_eth_soc.h"
26#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080027#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080028
29#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
30#include "mtk_hnat/nf_hnat_mtk.h"
31#endif
32
33static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080034atomic_t reset_lock = ATOMIC_INIT(0);
35atomic_t force = ATOMIC_INIT(0);
36
developerfd40db22021-04-29 10:08:25 +080037module_param_named(msg_level, mtk_msg_level, int, 0);
38MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080039DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080040
41#define MTK_ETHTOOL_STAT(x) { #x, \
42 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
43
44/* strings used by ethtool */
45static const struct mtk_ethtool_stats {
46 char str[ETH_GSTRING_LEN];
47 u32 offset;
48} mtk_ethtool_stats[] = {
49 MTK_ETHTOOL_STAT(tx_bytes),
50 MTK_ETHTOOL_STAT(tx_packets),
51 MTK_ETHTOOL_STAT(tx_skip),
52 MTK_ETHTOOL_STAT(tx_collisions),
53 MTK_ETHTOOL_STAT(rx_bytes),
54 MTK_ETHTOOL_STAT(rx_packets),
55 MTK_ETHTOOL_STAT(rx_overflow),
56 MTK_ETHTOOL_STAT(rx_fcs_errors),
57 MTK_ETHTOOL_STAT(rx_short_errors),
58 MTK_ETHTOOL_STAT(rx_long_errors),
59 MTK_ETHTOOL_STAT(rx_checksum_errors),
60 MTK_ETHTOOL_STAT(rx_flow_control_packets),
61};
62
63static const char * const mtk_clks_source_name[] = {
64 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
65 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
66 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
67 "sgmii_ck", "eth2pll", "wocpu0","wocpu1",
68};
69
70void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
71{
72 __raw_writel(val, eth->base + reg);
73}
74
75u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
76{
77 return __raw_readl(eth->base + reg);
78}
79
80u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
81{
82 u32 val;
83
84 val = mtk_r32(eth, reg);
85 val &= ~mask;
86 val |= set;
87 mtk_w32(eth, val, reg);
88 return reg;
89}
90
91static int mtk_mdio_busy_wait(struct mtk_eth *eth)
92{
93 unsigned long t_start = jiffies;
94
95 while (1) {
96 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
97 return 0;
98 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
99 break;
developerc4671b22021-05-28 13:16:42 +0800100 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800101 }
102
103 dev_err(eth->dev, "mdio: MDIO timeout\n");
104 return -1;
105}
106
developer599cda42022-05-24 15:13:31 +0800107u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
108 int phy_reg, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800109{
110 if (mtk_mdio_busy_wait(eth))
111 return -1;
112
113 write_data &= 0xffff;
114
developer599cda42022-05-24 15:13:31 +0800115 if (phy_reg & MII_ADDR_C45) {
116 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
117 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
118 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
119 MTK_PHY_IAC);
120
121 if (mtk_mdio_busy_wait(eth))
122 return -1;
123
124 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
125 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
126 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
127 MTK_PHY_IAC);
128 } else {
129 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
130 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
131 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
132 MTK_PHY_IAC);
133 }
developerfd40db22021-04-29 10:08:25 +0800134
135 if (mtk_mdio_busy_wait(eth))
136 return -1;
137
138 return 0;
139}
140
developer599cda42022-05-24 15:13:31 +0800141u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerfd40db22021-04-29 10:08:25 +0800142{
143 u32 d;
144
145 if (mtk_mdio_busy_wait(eth))
146 return 0xffff;
147
developer599cda42022-05-24 15:13:31 +0800148 if (phy_reg & MII_ADDR_C45) {
149 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
150 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
151 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
152 MTK_PHY_IAC);
153
154 if (mtk_mdio_busy_wait(eth))
155 return 0xffff;
156
157 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
158 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
159 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
160 MTK_PHY_IAC);
161 } else {
162 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
163 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
164 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
165 MTK_PHY_IAC);
166 }
developerfd40db22021-04-29 10:08:25 +0800167
168 if (mtk_mdio_busy_wait(eth))
169 return 0xffff;
170
171 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
172
173 return d;
174}
175
176static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
177 int phy_reg, u16 val)
178{
179 struct mtk_eth *eth = bus->priv;
180
181 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
182}
183
184static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
185{
186 struct mtk_eth *eth = bus->priv;
187
188 return _mtk_mdio_read(eth, phy_addr, phy_reg);
189}
190
developerfd40db22021-04-29 10:08:25 +0800191static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
192 phy_interface_t interface)
193{
194 u32 val;
195
196 /* Check DDR memory type.
197 * Currently TRGMII mode with DDR2 memory is not supported.
198 */
199 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
200 if (interface == PHY_INTERFACE_MODE_TRGMII &&
201 val & SYSCFG_DRAM_TYPE_DDR2) {
202 dev_err(eth->dev,
203 "TRGMII mode with DDR2 memory is not supported!\n");
204 return -EOPNOTSUPP;
205 }
206
207 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
208 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
209
210 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
211 ETHSYS_TRGMII_MT7621_MASK, val);
212
213 return 0;
214}
215
216static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
217 phy_interface_t interface, int speed)
218{
219 u32 val;
220 int ret;
221
222 if (interface == PHY_INTERFACE_MODE_TRGMII) {
223 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
224 val = 500000000;
225 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
226 if (ret)
227 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
228 return;
229 }
230
231 val = (speed == SPEED_1000) ?
232 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
233 mtk_w32(eth, val, INTF_MODE);
234
235 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
236 ETHSYS_TRGMII_CLK_SEL362_5,
237 ETHSYS_TRGMII_CLK_SEL362_5);
238
239 val = (speed == SPEED_1000) ? 250000000 : 500000000;
240 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
241 if (ret)
242 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
243
244 val = (speed == SPEED_1000) ?
245 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
246 mtk_w32(eth, val, TRGMII_RCK_CTRL);
247
248 val = (speed == SPEED_1000) ?
249 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
250 mtk_w32(eth, val, TRGMII_TCK_CTRL);
251}
252
253static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
254 const struct phylink_link_state *state)
255{
256 struct mtk_mac *mac = container_of(config, struct mtk_mac,
257 phylink_config);
258 struct mtk_eth *eth = mac->hw;
259 u32 mcr_cur, mcr_new, sid, i;
developerfb556ca2021-10-13 10:52:09 +0800260 int val, ge_mode, err=0;
developerfd40db22021-04-29 10:08:25 +0800261
262 /* MT76x8 has no hardware settings between for the MAC */
263 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
264 mac->interface != state->interface) {
265 /* Setup soc pin functions */
266 switch (state->interface) {
267 case PHY_INTERFACE_MODE_TRGMII:
268 if (mac->id)
269 goto err_phy;
270 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
271 MTK_GMAC1_TRGMII))
272 goto err_phy;
273 /* fall through */
274 case PHY_INTERFACE_MODE_RGMII_TXID:
275 case PHY_INTERFACE_MODE_RGMII_RXID:
276 case PHY_INTERFACE_MODE_RGMII_ID:
277 case PHY_INTERFACE_MODE_RGMII:
278 case PHY_INTERFACE_MODE_MII:
279 case PHY_INTERFACE_MODE_REVMII:
280 case PHY_INTERFACE_MODE_RMII:
281 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
282 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
283 if (err)
284 goto init_err;
285 }
286 break;
287 case PHY_INTERFACE_MODE_1000BASEX:
288 case PHY_INTERFACE_MODE_2500BASEX:
289 case PHY_INTERFACE_MODE_SGMII:
290 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
291 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
292 if (err)
293 goto init_err;
294 }
295 break;
296 case PHY_INTERFACE_MODE_GMII:
297 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
298 err = mtk_gmac_gephy_path_setup(eth, mac->id);
299 if (err)
300 goto init_err;
301 }
302 break;
303 default:
304 goto err_phy;
305 }
306
307 /* Setup clock for 1st gmac */
308 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
309 !phy_interface_mode_is_8023z(state->interface) &&
310 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
311 if (MTK_HAS_CAPS(mac->hw->soc->caps,
312 MTK_TRGMII_MT7621_CLK)) {
313 if (mt7621_gmac0_rgmii_adjust(mac->hw,
314 state->interface))
315 goto err_phy;
316 } else {
317 mtk_gmac0_rgmii_adjust(mac->hw,
318 state->interface,
319 state->speed);
320
321 /* mt7623_pad_clk_setup */
322 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
323 mtk_w32(mac->hw,
324 TD_DM_DRVP(8) | TD_DM_DRVN(8),
325 TRGMII_TD_ODT(i));
326
327 /* Assert/release MT7623 RXC reset */
328 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
329 TRGMII_RCK_CTRL);
330 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
331 }
332 }
333
334 ge_mode = 0;
335 switch (state->interface) {
336 case PHY_INTERFACE_MODE_MII:
337 case PHY_INTERFACE_MODE_GMII:
338 ge_mode = 1;
339 break;
340 case PHY_INTERFACE_MODE_REVMII:
341 ge_mode = 2;
342 break;
343 case PHY_INTERFACE_MODE_RMII:
344 if (mac->id)
345 goto err_phy;
346 ge_mode = 3;
347 break;
348 default:
349 break;
350 }
351
352 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800353 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800354 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
355 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
356 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
357 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800358 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800359
360 mac->interface = state->interface;
361 }
362
363 /* SGMII */
364 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
365 phy_interface_mode_is_8023z(state->interface)) {
366 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
367 * being setup done.
368 */
developerd82e8372022-02-09 15:00:09 +0800369 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800370 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
371
372 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
373 SYSCFG0_SGMII_MASK,
374 ~(u32)SYSCFG0_SGMII_MASK);
375
376 /* Decide how GMAC and SGMIISYS be mapped */
377 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
378 0 : mac->id;
379
380 /* Setup SGMIISYS with the determined property */
381 if (state->interface != PHY_INTERFACE_MODE_SGMII)
382 err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
383 state);
384 else if (phylink_autoneg_inband(mode))
385 err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
386
developerd82e8372022-02-09 15:00:09 +0800387 if (err) {
388 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800389 goto init_err;
developerd82e8372022-02-09 15:00:09 +0800390 }
developerfd40db22021-04-29 10:08:25 +0800391
392 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
393 SYSCFG0_SGMII_MASK, val);
developerd82e8372022-02-09 15:00:09 +0800394 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800395 } else if (phylink_autoneg_inband(mode)) {
396 dev_err(eth->dev,
397 "In-band mode not supported in non SGMII mode!\n");
398 return;
399 }
400
401 /* Setup gmac */
402 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
403 mcr_new = mcr_cur;
404 mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
405 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
406 MAC_MCR_FORCE_RX_FC);
407 mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
408 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
409
410 switch (state->speed) {
411 case SPEED_2500:
412 case SPEED_1000:
413 mcr_new |= MAC_MCR_SPEED_1000;
414 break;
415 case SPEED_100:
416 mcr_new |= MAC_MCR_SPEED_100;
417 break;
418 }
419 if (state->duplex == DUPLEX_FULL) {
420 mcr_new |= MAC_MCR_FORCE_DPX;
421 if (state->pause & MLO_PAUSE_TX)
422 mcr_new |= MAC_MCR_FORCE_TX_FC;
423 if (state->pause & MLO_PAUSE_RX)
424 mcr_new |= MAC_MCR_FORCE_RX_FC;
425 }
426
427 /* Only update control register when needed! */
428 if (mcr_new != mcr_cur)
429 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
430
431 return;
432
433err_phy:
434 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
435 mac->id, phy_modes(state->interface));
436 return;
437
438init_err:
439 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
440 mac->id, phy_modes(state->interface), err);
441}
442
443static int mtk_mac_link_state(struct phylink_config *config,
444 struct phylink_link_state *state)
445{
446 struct mtk_mac *mac = container_of(config, struct mtk_mac,
447 phylink_config);
448 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
449
450 state->link = (pmsr & MAC_MSR_LINK);
451 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
452
453 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
454 case 0:
455 state->speed = SPEED_10;
456 break;
457 case MAC_MSR_SPEED_100:
458 state->speed = SPEED_100;
459 break;
460 case MAC_MSR_SPEED_1000:
461 state->speed = SPEED_1000;
462 break;
463 default:
464 state->speed = SPEED_UNKNOWN;
465 break;
466 }
467
468 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
469 if (pmsr & MAC_MSR_RX_FC)
470 state->pause |= MLO_PAUSE_RX;
471 if (pmsr & MAC_MSR_TX_FC)
472 state->pause |= MLO_PAUSE_TX;
473
474 return 1;
475}
476
477static void mtk_mac_an_restart(struct phylink_config *config)
478{
479 struct mtk_mac *mac = container_of(config, struct mtk_mac,
480 phylink_config);
481
482 mtk_sgmii_restart_an(mac->hw, mac->id);
483}
484
485static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
486 phy_interface_t interface)
487{
488 struct mtk_mac *mac = container_of(config, struct mtk_mac,
489 phylink_config);
490 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
491
492 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
493 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
494}
495
496static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
497 phy_interface_t interface,
498 struct phy_device *phy)
499{
500 struct mtk_mac *mac = container_of(config, struct mtk_mac,
501 phylink_config);
502 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
503
504 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
505 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
506}
507
508static void mtk_validate(struct phylink_config *config,
509 unsigned long *supported,
510 struct phylink_link_state *state)
511{
512 struct mtk_mac *mac = container_of(config, struct mtk_mac,
513 phylink_config);
514 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
515
516 if (state->interface != PHY_INTERFACE_MODE_NA &&
517 state->interface != PHY_INTERFACE_MODE_MII &&
518 state->interface != PHY_INTERFACE_MODE_GMII &&
519 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
520 phy_interface_mode_is_rgmii(state->interface)) &&
521 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
522 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
523 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
524 (state->interface == PHY_INTERFACE_MODE_SGMII ||
525 phy_interface_mode_is_8023z(state->interface)))) {
526 linkmode_zero(supported);
527 return;
528 }
529
530 phylink_set_port_modes(mask);
531 phylink_set(mask, Autoneg);
532
533 switch (state->interface) {
534 case PHY_INTERFACE_MODE_TRGMII:
535 phylink_set(mask, 1000baseT_Full);
536 break;
537 case PHY_INTERFACE_MODE_1000BASEX:
538 case PHY_INTERFACE_MODE_2500BASEX:
539 phylink_set(mask, 1000baseX_Full);
540 phylink_set(mask, 2500baseX_Full);
541 break;
542 case PHY_INTERFACE_MODE_GMII:
543 case PHY_INTERFACE_MODE_RGMII:
544 case PHY_INTERFACE_MODE_RGMII_ID:
545 case PHY_INTERFACE_MODE_RGMII_RXID:
546 case PHY_INTERFACE_MODE_RGMII_TXID:
547 phylink_set(mask, 1000baseT_Half);
548 /* fall through */
549 case PHY_INTERFACE_MODE_SGMII:
550 phylink_set(mask, 1000baseT_Full);
551 phylink_set(mask, 1000baseX_Full);
552 /* fall through */
553 case PHY_INTERFACE_MODE_MII:
554 case PHY_INTERFACE_MODE_RMII:
555 case PHY_INTERFACE_MODE_REVMII:
556 case PHY_INTERFACE_MODE_NA:
557 default:
558 phylink_set(mask, 10baseT_Half);
559 phylink_set(mask, 10baseT_Full);
560 phylink_set(mask, 100baseT_Half);
561 phylink_set(mask, 100baseT_Full);
562 break;
563 }
564
565 if (state->interface == PHY_INTERFACE_MODE_NA) {
566 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
567 phylink_set(mask, 1000baseT_Full);
568 phylink_set(mask, 1000baseX_Full);
569 phylink_set(mask, 2500baseX_Full);
570 }
571 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
572 phylink_set(mask, 1000baseT_Full);
573 phylink_set(mask, 1000baseT_Half);
574 phylink_set(mask, 1000baseX_Full);
575 }
576 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
577 phylink_set(mask, 1000baseT_Full);
578 phylink_set(mask, 1000baseT_Half);
579 }
580 }
581
582 phylink_set(mask, Pause);
583 phylink_set(mask, Asym_Pause);
584
585 linkmode_and(supported, supported, mask);
586 linkmode_and(state->advertising, state->advertising, mask);
587
588 /* We can only operate at 2500BaseX or 1000BaseX. If requested
589 * to advertise both, only report advertising at 2500BaseX.
590 */
591 phylink_helper_basex_speed(state);
592}
593
594static const struct phylink_mac_ops mtk_phylink_ops = {
595 .validate = mtk_validate,
596 .mac_link_state = mtk_mac_link_state,
597 .mac_an_restart = mtk_mac_an_restart,
598 .mac_config = mtk_mac_config,
599 .mac_link_down = mtk_mac_link_down,
600 .mac_link_up = mtk_mac_link_up,
601};
602
603static int mtk_mdio_init(struct mtk_eth *eth)
604{
605 struct device_node *mii_np;
606 int ret;
607
608 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
609 if (!mii_np) {
610 dev_err(eth->dev, "no %s child node found", "mdio-bus");
611 return -ENODEV;
612 }
613
614 if (!of_device_is_available(mii_np)) {
615 ret = -ENODEV;
616 goto err_put_node;
617 }
618
619 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
620 if (!eth->mii_bus) {
621 ret = -ENOMEM;
622 goto err_put_node;
623 }
624
625 eth->mii_bus->name = "mdio";
626 eth->mii_bus->read = mtk_mdio_read;
627 eth->mii_bus->write = mtk_mdio_write;
628 eth->mii_bus->priv = eth;
629 eth->mii_bus->parent = eth->dev;
630
developer6fd46562021-10-14 15:04:34 +0800631 if(snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
developerfb556ca2021-10-13 10:52:09 +0800632 ret = -ENOMEM;
633 goto err_put_node;
634 }
developerfd40db22021-04-29 10:08:25 +0800635 ret = of_mdiobus_register(eth->mii_bus, mii_np);
636
637err_put_node:
638 of_node_put(mii_np);
639 return ret;
640}
641
642static void mtk_mdio_cleanup(struct mtk_eth *eth)
643{
644 if (!eth->mii_bus)
645 return;
646
647 mdiobus_unregister(eth->mii_bus);
648}
649
650static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
651{
652 unsigned long flags;
653 u32 val;
654
655 spin_lock_irqsave(&eth->tx_irq_lock, flags);
656 val = mtk_r32(eth, eth->tx_int_mask_reg);
657 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
658 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
659}
660
661static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
662{
663 unsigned long flags;
664 u32 val;
665
666 spin_lock_irqsave(&eth->tx_irq_lock, flags);
667 val = mtk_r32(eth, eth->tx_int_mask_reg);
668 mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
669 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
670}
671
672static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
673{
674 unsigned long flags;
675 u32 val;
676
677 spin_lock_irqsave(&eth->rx_irq_lock, flags);
678 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
679 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
680 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
681}
682
683static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
684{
685 unsigned long flags;
686 u32 val;
687
688 spin_lock_irqsave(&eth->rx_irq_lock, flags);
689 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
690 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
691 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
692}
693
694static int mtk_set_mac_address(struct net_device *dev, void *p)
695{
696 int ret = eth_mac_addr(dev, p);
697 struct mtk_mac *mac = netdev_priv(dev);
698 struct mtk_eth *eth = mac->hw;
699 const char *macaddr = dev->dev_addr;
700
701 if (ret)
702 return ret;
703
704 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
705 return -EBUSY;
706
707 spin_lock_bh(&mac->hw->page_lock);
708 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
709 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
710 MT7628_SDM_MAC_ADRH);
711 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
712 (macaddr[4] << 8) | macaddr[5],
713 MT7628_SDM_MAC_ADRL);
714 } else {
715 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
716 MTK_GDMA_MAC_ADRH(mac->id));
717 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
718 (macaddr[4] << 8) | macaddr[5],
719 MTK_GDMA_MAC_ADRL(mac->id));
720 }
721 spin_unlock_bh(&mac->hw->page_lock);
722
723 return 0;
724}
725
726void mtk_stats_update_mac(struct mtk_mac *mac)
727{
728 struct mtk_hw_stats *hw_stats = mac->hw_stats;
729 unsigned int base = MTK_GDM1_TX_GBCNT;
730 u64 stats;
731
732 base += hw_stats->reg_offset;
733
734 u64_stats_update_begin(&hw_stats->syncp);
735
736 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
737 stats = mtk_r32(mac->hw, base + 0x04);
738 if (stats)
739 hw_stats->rx_bytes += (stats << 32);
740 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
741 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
742 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
743 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
744 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
745 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
746 hw_stats->rx_flow_control_packets +=
747 mtk_r32(mac->hw, base + 0x24);
748 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
749 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
750 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
751 stats = mtk_r32(mac->hw, base + 0x34);
752 if (stats)
753 hw_stats->tx_bytes += (stats << 32);
754 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
755 u64_stats_update_end(&hw_stats->syncp);
756}
757
758static void mtk_stats_update(struct mtk_eth *eth)
759{
760 int i;
761
762 for (i = 0; i < MTK_MAC_COUNT; i++) {
763 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
764 continue;
765 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
766 mtk_stats_update_mac(eth->mac[i]);
767 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
768 }
769 }
770}
771
772static void mtk_get_stats64(struct net_device *dev,
773 struct rtnl_link_stats64 *storage)
774{
775 struct mtk_mac *mac = netdev_priv(dev);
776 struct mtk_hw_stats *hw_stats = mac->hw_stats;
777 unsigned int start;
778
779 if (netif_running(dev) && netif_device_present(dev)) {
780 if (spin_trylock_bh(&hw_stats->stats_lock)) {
781 mtk_stats_update_mac(mac);
782 spin_unlock_bh(&hw_stats->stats_lock);
783 }
784 }
785
786 do {
787 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
788 storage->rx_packets = hw_stats->rx_packets;
789 storage->tx_packets = hw_stats->tx_packets;
790 storage->rx_bytes = hw_stats->rx_bytes;
791 storage->tx_bytes = hw_stats->tx_bytes;
792 storage->collisions = hw_stats->tx_collisions;
793 storage->rx_length_errors = hw_stats->rx_short_errors +
794 hw_stats->rx_long_errors;
795 storage->rx_over_errors = hw_stats->rx_overflow;
796 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
797 storage->rx_errors = hw_stats->rx_checksum_errors;
798 storage->tx_aborted_errors = hw_stats->tx_skip;
799 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
800
801 storage->tx_errors = dev->stats.tx_errors;
802 storage->rx_dropped = dev->stats.rx_dropped;
803 storage->tx_dropped = dev->stats.tx_dropped;
804}
805
806static inline int mtk_max_frag_size(int mtu)
807{
808 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
809 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
810 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
811
812 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
813 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
814}
815
816static inline int mtk_max_buf_size(int frag_size)
817{
818 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
819 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
820
821 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
822
823 return buf_size;
824}
825
developere9356982022-07-04 09:03:20 +0800826static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
827 struct mtk_rx_dma_v2 *dma_rxd)
developerfd40db22021-04-29 10:08:25 +0800828{
developerfd40db22021-04-29 10:08:25 +0800829 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +0800830 if (!(rxd->rxd2 & RX_DMA_DONE))
831 return false;
832
833 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +0800834 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
835 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developere9356982022-07-04 09:03:20 +0800836
837 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
838 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
839 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
840 }
841
developerc4671b22021-05-28 13:16:42 +0800842 return true;
developerfd40db22021-04-29 10:08:25 +0800843}
844
845/* the qdma core needs scratch memory to be setup */
846static int mtk_init_fq_dma(struct mtk_eth *eth)
847{
developere9356982022-07-04 09:03:20 +0800848 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +0800849 dma_addr_t phy_ring_tail;
850 int cnt = MTK_DMA_SIZE;
851 dma_addr_t dma_addr;
852 int i;
853
854 if (!eth->soc->has_sram) {
855 eth->scratch_ring = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +0800856 cnt * soc->txrx.txd_size,
developerfd40db22021-04-29 10:08:25 +0800857 &eth->phy_scratch_ring,
developere9356982022-07-04 09:03:20 +0800858 GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +0800859 } else {
860 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
861 }
862
863 if (unlikely(!eth->scratch_ring))
864 return -ENOMEM;
865
developere9356982022-07-04 09:03:20 +0800866 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +0800867 if (unlikely(!eth->scratch_head))
868 return -ENOMEM;
869
870 dma_addr = dma_map_single(eth->dev,
871 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
872 DMA_FROM_DEVICE);
873 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
874 return -ENOMEM;
875
developere9356982022-07-04 09:03:20 +0800876 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
developerfd40db22021-04-29 10:08:25 +0800877
878 for (i = 0; i < cnt; i++) {
developere9356982022-07-04 09:03:20 +0800879 struct mtk_tx_dma_v2 *txd;
880
881 txd = eth->scratch_ring + i * soc->txrx.txd_size;
882 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerfd40db22021-04-29 10:08:25 +0800883 if (i < cnt - 1)
developere9356982022-07-04 09:03:20 +0800884 txd->txd2 = eth->phy_scratch_ring +
885 (i + 1) * soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +0800886
developere9356982022-07-04 09:03:20 +0800887 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
888 txd->txd4 = 0;
889
890 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
891 txd->txd5 = 0;
892 txd->txd6 = 0;
893 txd->txd7 = 0;
894 txd->txd8 = 0;
developerfd40db22021-04-29 10:08:25 +0800895 }
developerfd40db22021-04-29 10:08:25 +0800896 }
897
898 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
899 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
900 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
901 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
902
903 return 0;
904}
905
906static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
907{
developere9356982022-07-04 09:03:20 +0800908 return ring->dma + (desc - ring->phys);
developerfd40db22021-04-29 10:08:25 +0800909}
910
911static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +0800912 void *txd, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +0800913{
developere9356982022-07-04 09:03:20 +0800914 int idx = (txd - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +0800915
916 return &ring->buf[idx];
917}
918
919static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +0800920 void *dma)
developerfd40db22021-04-29 10:08:25 +0800921{
922 return ring->dma_pdma - ring->dma + dma;
923}
924
developere9356982022-07-04 09:03:20 +0800925static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +0800926{
developere9356982022-07-04 09:03:20 +0800927 return (dma - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +0800928}
929
developerc4671b22021-05-28 13:16:42 +0800930static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
931 bool napi)
developerfd40db22021-04-29 10:08:25 +0800932{
933 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
934 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
935 dma_unmap_single(eth->dev,
936 dma_unmap_addr(tx_buf, dma_addr0),
937 dma_unmap_len(tx_buf, dma_len0),
938 DMA_TO_DEVICE);
939 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
940 dma_unmap_page(eth->dev,
941 dma_unmap_addr(tx_buf, dma_addr0),
942 dma_unmap_len(tx_buf, dma_len0),
943 DMA_TO_DEVICE);
944 }
945 } else {
946 if (dma_unmap_len(tx_buf, dma_len0)) {
947 dma_unmap_page(eth->dev,
948 dma_unmap_addr(tx_buf, dma_addr0),
949 dma_unmap_len(tx_buf, dma_len0),
950 DMA_TO_DEVICE);
951 }
952
953 if (dma_unmap_len(tx_buf, dma_len1)) {
954 dma_unmap_page(eth->dev,
955 dma_unmap_addr(tx_buf, dma_addr1),
956 dma_unmap_len(tx_buf, dma_len1),
957 DMA_TO_DEVICE);
958 }
959 }
960
961 tx_buf->flags = 0;
962 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +0800963 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
964 if (napi)
965 napi_consume_skb(tx_buf->skb, napi);
966 else
967 dev_kfree_skb_any(tx_buf->skb);
968 }
developerfd40db22021-04-29 10:08:25 +0800969 tx_buf->skb = NULL;
970}
971
972static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
973 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
974 size_t size, int idx)
975{
976 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
977 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
978 dma_unmap_len_set(tx_buf, dma_len0, size);
979 } else {
980 if (idx & 1) {
981 txd->txd3 = mapped_addr;
982 txd->txd2 |= TX_DMA_PLEN1(size);
983 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
984 dma_unmap_len_set(tx_buf, dma_len1, size);
985 } else {
986 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
987 txd->txd1 = mapped_addr;
988 txd->txd2 = TX_DMA_PLEN0(size);
989 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
990 dma_unmap_len_set(tx_buf, dma_len0, size);
991 }
992 }
993}
994
developere9356982022-07-04 09:03:20 +0800995static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
996 struct mtk_tx_dma_desc_info *info)
997{
998 struct mtk_mac *mac = netdev_priv(dev);
999 struct mtk_eth *eth = mac->hw;
1000 struct mtk_tx_dma *desc = txd;
1001 u32 data;
1002
1003 WRITE_ONCE(desc->txd1, info->addr);
1004
1005 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1006 if (info->last)
1007 data |= TX_DMA_LS0;
1008 WRITE_ONCE(desc->txd3, data);
1009
1010 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1011 data |= QID_HIGH_BITS(info->qid);
1012 if (info->first) {
1013 if (info->gso)
1014 data |= TX_DMA_TSO;
1015 /* tx checksum offload */
1016 if (info->csum)
1017 data |= TX_DMA_CHKSUM;
1018 /* vlan header offload */
1019 if (info->vlan)
1020 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1021 }
1022
1023#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1024 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1025 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1026 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1027 }
1028
1029 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1030 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1031#endif
1032 WRITE_ONCE(desc->txd4, data);
1033}
1034
1035static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1036 struct mtk_tx_dma_desc_info *info)
1037{
1038 struct mtk_mac *mac = netdev_priv(dev);
1039 struct mtk_eth *eth = mac->hw;
1040 struct mtk_tx_dma_v2 *desc = txd;
1041 u32 data = 0;
1042 u16 qid;
1043
1044 if(!info->qid && mac->id)
1045 qid = MTK_QDMA_GMAC2_QID;
1046
1047 WRITE_ONCE(desc->txd1, info->addr);
1048
1049 data = TX_DMA_PLEN0(info->size);
1050 if (info->last)
1051 data |= TX_DMA_LS0;
1052 WRITE_ONCE(desc->txd3, data);
1053
1054 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1055 data |= TX_DMA_SWC_V2 | QID_BITS_V2(qid);
1056#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1057 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1058 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1059 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1060 }
1061
1062 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1063 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1064#endif
1065 WRITE_ONCE(desc->txd4, data);
1066
1067 data = 0;
1068 if (info->first) {
1069 if (info->gso)
1070 data |= TX_DMA_TSO_V2;
1071 /* tx checksum offload */
1072 if (info->csum)
1073 data |= TX_DMA_CHKSUM_V2;
1074 }
1075 WRITE_ONCE(desc->txd5, data);
1076
1077 data = 0;
1078 if (info->first && info->vlan)
1079 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1080 WRITE_ONCE(desc->txd6, data);
1081
1082 WRITE_ONCE(desc->txd7, 0);
1083 WRITE_ONCE(desc->txd8, 0);
1084}
1085
1086static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
1087 struct mtk_tx_dma_desc_info *info)
1088{
1089 struct mtk_mac *mac = netdev_priv(dev);
1090 struct mtk_eth *eth = mac->hw;
1091
1092 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1093 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
1094 else
1095 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
1096}
1097
developerfd40db22021-04-29 10:08:25 +08001098static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1099 int tx_num, struct mtk_tx_ring *ring, bool gso)
1100{
developere9356982022-07-04 09:03:20 +08001101 struct mtk_tx_dma_desc_info txd_info = {
1102 .size = skb_headlen(skb),
1103 .qid = skb->mark & MTK_QDMA_TX_MASK,
1104 .gso = gso,
1105 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1106 .vlan = skb_vlan_tag_present(skb),
1107 .vlan_tci = skb_vlan_tag_get(skb),
1108 .first = true,
1109 .last = !skb_is_nonlinear(skb),
1110 };
developerfd40db22021-04-29 10:08:25 +08001111 struct mtk_mac *mac = netdev_priv(dev);
1112 struct mtk_eth *eth = mac->hw;
developere9356982022-07-04 09:03:20 +08001113 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001114 struct mtk_tx_dma *itxd, *txd;
1115 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1116 struct mtk_tx_buf *itx_buf, *tx_buf;
developerfd40db22021-04-29 10:08:25 +08001117 int i, n_desc = 1;
developerfd40db22021-04-29 10:08:25 +08001118 int k = 0;
1119
1120 itxd = ring->next_free;
1121 itxd_pdma = qdma_to_pdma(ring, itxd);
1122 if (itxd == ring->last_free)
1123 return -ENOMEM;
1124
developere9356982022-07-04 09:03:20 +08001125 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001126 memset(itx_buf, 0, sizeof(*itx_buf));
1127
developere9356982022-07-04 09:03:20 +08001128 txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
1129 DMA_TO_DEVICE);
1130 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
developerfd40db22021-04-29 10:08:25 +08001131 return -ENOMEM;
1132
developere9356982022-07-04 09:03:20 +08001133 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
1134
developerfd40db22021-04-29 10:08:25 +08001135 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1136 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1137 MTK_TX_FLAGS_FPORT1;
developere9356982022-07-04 09:03:20 +08001138 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerfd40db22021-04-29 10:08:25 +08001139 k++);
1140
developerfd40db22021-04-29 10:08:25 +08001141 /* TX SG offload */
1142 txd = itxd;
1143 txd_pdma = qdma_to_pdma(ring, txd);
1144
developere9356982022-07-04 09:03:20 +08001145 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerfd40db22021-04-29 10:08:25 +08001146 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1147 unsigned int offset = 0;
1148 int frag_size = skb_frag_size(frag);
1149
1150 while (frag_size) {
developerfd40db22021-04-29 10:08:25 +08001151 bool new_desc = true;
1152
developere9356982022-07-04 09:03:20 +08001153 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerfd40db22021-04-29 10:08:25 +08001154 (i & 0x1)) {
1155 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1156 txd_pdma = qdma_to_pdma(ring, txd);
1157 if (txd == ring->last_free)
1158 goto err_dma;
1159
1160 n_desc++;
1161 } else {
1162 new_desc = false;
1163 }
1164
developere9356982022-07-04 09:03:20 +08001165 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1166 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1167 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1168 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1169 !(frag_size - txd_info.size);
1170 txd_info.addr = skb_frag_dma_map(eth->dev, frag,
1171 offset, txd_info.size,
1172 DMA_TO_DEVICE);
1173 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
1174 goto err_dma;
developerfd40db22021-04-29 10:08:25 +08001175
developere9356982022-07-04 09:03:20 +08001176 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerfd40db22021-04-29 10:08:25 +08001177
developere9356982022-07-04 09:03:20 +08001178 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001179 if (new_desc)
1180 memset(tx_buf, 0, sizeof(*tx_buf));
1181 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1182 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1183 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1184 MTK_TX_FLAGS_FPORT1;
1185
developere9356982022-07-04 09:03:20 +08001186 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1187 txd_info.size, k++);
developerfd40db22021-04-29 10:08:25 +08001188
developere9356982022-07-04 09:03:20 +08001189 frag_size -= txd_info.size;
1190 offset += txd_info.size;
developerfd40db22021-04-29 10:08:25 +08001191 }
1192 }
1193
1194 /* store skb to cleanup */
1195 itx_buf->skb = skb;
1196
developere9356982022-07-04 09:03:20 +08001197 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001198 if (k & 0x1)
1199 txd_pdma->txd2 |= TX_DMA_LS0;
1200 else
1201 txd_pdma->txd2 |= TX_DMA_LS1;
1202 }
1203
1204 netdev_sent_queue(dev, skb->len);
1205 skb_tx_timestamp(skb);
1206
1207 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1208 atomic_sub(n_desc, &ring->free_count);
1209
1210 /* make sure that all changes to the dma ring are flushed before we
1211 * continue
1212 */
1213 wmb();
1214
developere9356982022-07-04 09:03:20 +08001215 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001216 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1217 !netdev_xmit_more())
1218 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1219 } else {
developere9356982022-07-04 09:03:20 +08001220 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
developerfd40db22021-04-29 10:08:25 +08001221 ring->dma_size);
1222 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1223 }
1224
1225 return 0;
1226
1227err_dma:
1228 do {
developere9356982022-07-04 09:03:20 +08001229 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001230
1231 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08001232 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08001233
1234 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
developere9356982022-07-04 09:03:20 +08001235 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
developerfd40db22021-04-29 10:08:25 +08001236 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1237
1238 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1239 itxd_pdma = qdma_to_pdma(ring, itxd);
1240 } while (itxd != txd);
1241
1242 return -ENOMEM;
1243}
1244
1245static inline int mtk_cal_txd_req(struct sk_buff *skb)
1246{
1247 int i, nfrags;
1248 skb_frag_t *frag;
1249
1250 nfrags = 1;
1251 if (skb_is_gso(skb)) {
1252 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1253 frag = &skb_shinfo(skb)->frags[i];
1254 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1255 MTK_TX_DMA_BUF_LEN);
1256 }
1257 } else {
1258 nfrags += skb_shinfo(skb)->nr_frags;
1259 }
1260
1261 return nfrags;
1262}
1263
1264static int mtk_queue_stopped(struct mtk_eth *eth)
1265{
1266 int i;
1267
1268 for (i = 0; i < MTK_MAC_COUNT; i++) {
1269 if (!eth->netdev[i])
1270 continue;
1271 if (netif_queue_stopped(eth->netdev[i]))
1272 return 1;
1273 }
1274
1275 return 0;
1276}
1277
1278static void mtk_wake_queue(struct mtk_eth *eth)
1279{
1280 int i;
1281
1282 for (i = 0; i < MTK_MAC_COUNT; i++) {
1283 if (!eth->netdev[i])
1284 continue;
1285 netif_wake_queue(eth->netdev[i]);
1286 }
1287}
1288
1289static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1290{
1291 struct mtk_mac *mac = netdev_priv(dev);
1292 struct mtk_eth *eth = mac->hw;
1293 struct mtk_tx_ring *ring = &eth->tx_ring;
1294 struct net_device_stats *stats = &dev->stats;
1295 bool gso = false;
1296 int tx_num;
1297
1298 /* normally we can rely on the stack not calling this more than once,
1299 * however we have 2 queues running on the same ring so we need to lock
1300 * the ring access
1301 */
1302 spin_lock(&eth->page_lock);
1303
1304 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1305 goto drop;
1306
1307 tx_num = mtk_cal_txd_req(skb);
1308 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1309 netif_stop_queue(dev);
1310 netif_err(eth, tx_queued, dev,
1311 "Tx Ring full when queue awake!\n");
1312 spin_unlock(&eth->page_lock);
1313 return NETDEV_TX_BUSY;
1314 }
1315
1316 /* TSO: fill MSS info in tcp checksum field */
1317 if (skb_is_gso(skb)) {
1318 if (skb_cow_head(skb, 0)) {
1319 netif_warn(eth, tx_err, dev,
1320 "GSO expand head fail.\n");
1321 goto drop;
1322 }
1323
1324 if (skb_shinfo(skb)->gso_type &
1325 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1326 gso = true;
1327 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1328 }
1329 }
1330
1331 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1332 goto drop;
1333
1334 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1335 netif_stop_queue(dev);
1336
1337 spin_unlock(&eth->page_lock);
1338
1339 return NETDEV_TX_OK;
1340
1341drop:
1342 spin_unlock(&eth->page_lock);
1343 stats->tx_dropped++;
1344 dev_kfree_skb_any(skb);
1345 return NETDEV_TX_OK;
1346}
1347
1348static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1349{
1350 int i;
1351 struct mtk_rx_ring *ring;
1352 int idx;
1353
developerfd40db22021-04-29 10:08:25 +08001354 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
developere9356982022-07-04 09:03:20 +08001355 struct mtk_rx_dma *rxd;
1356
developer77d03a72021-06-06 00:06:00 +08001357 if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i))
1358 continue;
1359
developerfd40db22021-04-29 10:08:25 +08001360 ring = &eth->rx_ring[i];
1361 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001362 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1363 if (rxd->rxd2 & RX_DMA_DONE) {
developerfd40db22021-04-29 10:08:25 +08001364 ring->calc_idx_update = true;
1365 return ring;
1366 }
1367 }
1368
1369 return NULL;
1370}
1371
developer18f46a82021-07-20 21:08:21 +08001372static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08001373{
developerfd40db22021-04-29 10:08:25 +08001374 int i;
1375
developerfb556ca2021-10-13 10:52:09 +08001376 if (!eth->hwlro)
developerfd40db22021-04-29 10:08:25 +08001377 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerfb556ca2021-10-13 10:52:09 +08001378 else {
developerfd40db22021-04-29 10:08:25 +08001379 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1380 ring = &eth->rx_ring[i];
1381 if (ring->calc_idx_update) {
1382 ring->calc_idx_update = false;
1383 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1384 }
1385 }
1386 }
1387}
1388
1389static int mtk_poll_rx(struct napi_struct *napi, int budget,
1390 struct mtk_eth *eth)
1391{
developer18f46a82021-07-20 21:08:21 +08001392 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1393 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001394 int idx;
1395 struct sk_buff *skb;
1396 u8 *data, *new_data;
developere9356982022-07-04 09:03:20 +08001397 struct mtk_rx_dma_v2 *rxd, trxd;
developerfd40db22021-04-29 10:08:25 +08001398 int done = 0;
1399
developer18f46a82021-07-20 21:08:21 +08001400 if (unlikely(!ring))
1401 goto rx_done;
1402
developerfd40db22021-04-29 10:08:25 +08001403 while (done < budget) {
1404 struct net_device *netdev;
1405 unsigned int pktlen;
1406 dma_addr_t dma_addr;
developere9356982022-07-04 09:03:20 +08001407 int mac = 0;
developerfd40db22021-04-29 10:08:25 +08001408
developer18f46a82021-07-20 21:08:21 +08001409 if (eth->hwlro)
1410 ring = mtk_get_rx_ring(eth);
1411
developerfd40db22021-04-29 10:08:25 +08001412 if (unlikely(!ring))
1413 goto rx_done;
1414
1415 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001416 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
developerfd40db22021-04-29 10:08:25 +08001417 data = ring->data[idx];
1418
developere9356982022-07-04 09:03:20 +08001419 if (!mtk_rx_get_desc(eth, &trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08001420 break;
1421
1422 /* find out which mac the packet come from. values start at 1 */
1423 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1424 mac = 0;
1425 } else {
developera2bdbd52021-05-31 19:10:17 +08001426 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developere9356982022-07-04 09:03:20 +08001427 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
developerfd40db22021-04-29 10:08:25 +08001428 else
developerfd40db22021-04-29 10:08:25 +08001429 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
1430 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1431 }
1432
1433 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1434 !eth->netdev[mac]))
1435 goto release_desc;
1436
1437 netdev = eth->netdev[mac];
1438
1439 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1440 goto release_desc;
1441
1442 /* alloc new buffer */
1443 new_data = napi_alloc_frag(ring->frag_size);
1444 if (unlikely(!new_data)) {
1445 netdev->stats.rx_dropped++;
1446 goto release_desc;
1447 }
1448 dma_addr = dma_map_single(eth->dev,
1449 new_data + NET_SKB_PAD +
1450 eth->ip_align,
1451 ring->buf_size,
1452 DMA_FROM_DEVICE);
1453 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1454 skb_free_frag(new_data);
1455 netdev->stats.rx_dropped++;
1456 goto release_desc;
1457 }
1458
developerc4671b22021-05-28 13:16:42 +08001459 dma_unmap_single(eth->dev, trxd.rxd1,
1460 ring->buf_size, DMA_FROM_DEVICE);
1461
developerfd40db22021-04-29 10:08:25 +08001462 /* receive data */
1463 skb = build_skb(data, ring->frag_size);
1464 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08001465 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08001466 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08001467 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08001468 }
1469 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1470
developerfd40db22021-04-29 10:08:25 +08001471 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1472 skb->dev = netdev;
1473 skb_put(skb, pktlen);
1474
developera2bdbd52021-05-31 19:10:17 +08001475 if ((!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
developerfd40db22021-04-29 10:08:25 +08001476 (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
developera2bdbd52021-05-31 19:10:17 +08001477 (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
developerfd40db22021-04-29 10:08:25 +08001478 (trxd.rxd3 & eth->rx_dma_l4_valid)))
1479 skb->ip_summed = CHECKSUM_UNNECESSARY;
1480 else
1481 skb_checksum_none_assert(skb);
1482 skb->protocol = eth_type_trans(skb, netdev);
1483
1484 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developera2bdbd52021-05-31 19:10:17 +08001485 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developer255bba22021-07-27 15:16:33 +08001486 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08001487 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08001488 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08001489 RX_DMA_VID_V2(trxd.rxd4));
1490 } else {
1491 if (trxd.rxd2 & RX_DMA_VTAG)
1492 __vlan_hwaccel_put_tag(skb,
1493 htons(RX_DMA_VPID(trxd.rxd3)),
1494 RX_DMA_VID(trxd.rxd3));
1495 }
1496
1497 /* If netdev is attached to dsa switch, the special
1498 * tag inserted in VLAN field by switch hardware can
1499 * be offload by RX HW VLAN offload. Clears the VLAN
1500 * information from @skb to avoid unexpected 8021d
1501 * handler before packet enter dsa framework.
1502 */
1503 if (netdev_uses_dsa(netdev))
1504 __vlan_hwaccel_clear_tag(skb);
1505 }
1506
1507#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developera2bdbd52021-05-31 19:10:17 +08001508 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08001509 *(u32 *)(skb->head) = trxd.rxd5;
1510 else
developerfd40db22021-04-29 10:08:25 +08001511 *(u32 *)(skb->head) = trxd.rxd4;
1512
1513 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001514 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001515 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1516
1517 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
1518 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
1519 __func__, skb_hnat_reason(skb));
1520 skb->pkt_type = PACKET_HOST;
1521 }
1522
1523 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
1524 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
1525 skb_hnat_reason(skb), skb_hnat_alg(skb));
1526#endif
developer77d03a72021-06-06 00:06:00 +08001527 if (mtk_hwlro_stats_ebl &&
1528 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
1529 hw_lro_stats_update(ring->ring_no, &trxd);
1530 hw_lro_flush_stats_update(ring->ring_no, &trxd);
1531 }
developerfd40db22021-04-29 10:08:25 +08001532
1533 skb_record_rx_queue(skb, 0);
1534 napi_gro_receive(napi, skb);
1535
developerc4671b22021-05-28 13:16:42 +08001536skip_rx:
developerfd40db22021-04-29 10:08:25 +08001537 ring->data[idx] = new_data;
1538 rxd->rxd1 = (unsigned int)dma_addr;
1539
1540release_desc:
1541 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1542 rxd->rxd2 = RX_DMA_LSO;
1543 else
1544 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1545
1546 ring->calc_idx = idx;
1547
1548 done++;
1549 }
1550
1551rx_done:
1552 if (done) {
1553 /* make sure that all changes to the dma ring are flushed before
1554 * we continue
1555 */
1556 wmb();
developer18f46a82021-07-20 21:08:21 +08001557 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08001558 }
1559
1560 return done;
1561}
1562
developerfb556ca2021-10-13 10:52:09 +08001563static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001564 unsigned int *done, unsigned int *bytes)
1565{
developere9356982022-07-04 09:03:20 +08001566 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001567 struct mtk_tx_ring *ring = &eth->tx_ring;
1568 struct mtk_tx_dma *desc;
1569 struct sk_buff *skb;
1570 struct mtk_tx_buf *tx_buf;
1571 u32 cpu, dma;
1572
developerc4671b22021-05-28 13:16:42 +08001573 cpu = ring->last_free_ptr;
developerfd40db22021-04-29 10:08:25 +08001574 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1575
1576 desc = mtk_qdma_phys_to_virt(ring, cpu);
1577
1578 while ((cpu != dma) && budget) {
1579 u32 next_cpu = desc->txd2;
1580 int mac = 0;
1581
1582 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1583 break;
1584
1585 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1586
developere9356982022-07-04 09:03:20 +08001587 tx_buf = mtk_desc_to_tx_buf(ring, desc, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001588 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1589 mac = 1;
1590
1591 skb = tx_buf->skb;
1592 if (!skb)
1593 break;
1594
1595 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1596 bytes[mac] += skb->len;
1597 done[mac]++;
1598 budget--;
1599 }
developerc4671b22021-05-28 13:16:42 +08001600 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001601
1602 ring->last_free = desc;
1603 atomic_inc(&ring->free_count);
1604
1605 cpu = next_cpu;
1606 }
1607
developerc4671b22021-05-28 13:16:42 +08001608 ring->last_free_ptr = cpu;
developerfd40db22021-04-29 10:08:25 +08001609 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001610}
1611
developerfb556ca2021-10-13 10:52:09 +08001612static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001613 unsigned int *done, unsigned int *bytes)
1614{
1615 struct mtk_tx_ring *ring = &eth->tx_ring;
1616 struct mtk_tx_dma *desc;
1617 struct sk_buff *skb;
1618 struct mtk_tx_buf *tx_buf;
1619 u32 cpu, dma;
1620
1621 cpu = ring->cpu_idx;
1622 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1623
1624 while ((cpu != dma) && budget) {
1625 tx_buf = &ring->buf[cpu];
1626 skb = tx_buf->skb;
1627 if (!skb)
1628 break;
1629
1630 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1631 bytes[0] += skb->len;
1632 done[0]++;
1633 budget--;
1634 }
1635
developerc4671b22021-05-28 13:16:42 +08001636 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001637
developere9356982022-07-04 09:03:20 +08001638 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001639 ring->last_free = desc;
1640 atomic_inc(&ring->free_count);
1641
1642 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1643 }
1644
1645 ring->cpu_idx = cpu;
developerfd40db22021-04-29 10:08:25 +08001646}
1647
1648static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1649{
1650 struct mtk_tx_ring *ring = &eth->tx_ring;
1651 unsigned int done[MTK_MAX_DEVS];
1652 unsigned int bytes[MTK_MAX_DEVS];
1653 int total = 0, i;
1654
1655 memset(done, 0, sizeof(done));
1656 memset(bytes, 0, sizeof(bytes));
1657
1658 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developerfb556ca2021-10-13 10:52:09 +08001659 mtk_poll_tx_qdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001660 else
developerfb556ca2021-10-13 10:52:09 +08001661 mtk_poll_tx_pdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001662
1663 for (i = 0; i < MTK_MAC_COUNT; i++) {
1664 if (!eth->netdev[i] || !done[i])
1665 continue;
1666 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1667 total += done[i];
1668 }
1669
1670 if (mtk_queue_stopped(eth) &&
1671 (atomic_read(&ring->free_count) > ring->thresh))
1672 mtk_wake_queue(eth);
1673
1674 return total;
1675}
1676
1677static void mtk_handle_status_irq(struct mtk_eth *eth)
1678{
developer8051e042022-04-08 13:26:36 +08001679 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001680
1681 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1682 mtk_stats_update(eth);
1683 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer8051e042022-04-08 13:26:36 +08001684 MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001685 }
1686}
1687
1688static int mtk_napi_tx(struct napi_struct *napi, int budget)
1689{
1690 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1691 u32 status, mask;
1692 int tx_done = 0;
1693
1694 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1695 mtk_handle_status_irq(eth);
1696 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1697 tx_done = mtk_poll_tx(eth, budget);
1698
1699 if (unlikely(netif_msg_intr(eth))) {
1700 status = mtk_r32(eth, eth->tx_int_status_reg);
1701 mask = mtk_r32(eth, eth->tx_int_mask_reg);
1702 dev_info(eth->dev,
1703 "done tx %d, intr 0x%08x/0x%x\n",
1704 tx_done, status, mask);
1705 }
1706
1707 if (tx_done == budget)
1708 return budget;
1709
1710 status = mtk_r32(eth, eth->tx_int_status_reg);
1711 if (status & MTK_TX_DONE_INT)
1712 return budget;
1713
developerc4671b22021-05-28 13:16:42 +08001714 if (napi_complete(napi))
1715 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developerfd40db22021-04-29 10:08:25 +08001716
1717 return tx_done;
1718}
1719
1720static int mtk_napi_rx(struct napi_struct *napi, int budget)
1721{
developer18f46a82021-07-20 21:08:21 +08001722 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1723 struct mtk_eth *eth = rx_napi->eth;
1724 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001725 u32 status, mask;
1726 int rx_done = 0;
1727 int remain_budget = budget;
1728
1729 mtk_handle_status_irq(eth);
1730
1731poll_again:
developer18f46a82021-07-20 21:08:21 +08001732 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), MTK_PDMA_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001733 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1734
1735 if (unlikely(netif_msg_intr(eth))) {
1736 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1737 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1738 dev_info(eth->dev,
1739 "done rx %d, intr 0x%08x/0x%x\n",
1740 rx_done, status, mask);
1741 }
1742 if (rx_done == remain_budget)
1743 return budget;
1744
1745 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
developer18f46a82021-07-20 21:08:21 +08001746 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08001747 remain_budget -= rx_done;
1748 goto poll_again;
1749 }
developerc4671b22021-05-28 13:16:42 +08001750
1751 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08001752 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08001753
1754 return rx_done + budget - remain_budget;
1755}
1756
1757static int mtk_tx_alloc(struct mtk_eth *eth)
1758{
developere9356982022-07-04 09:03:20 +08001759 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001760 struct mtk_tx_ring *ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08001761 int i, sz = soc->txrx.txd_size;
1762 struct mtk_tx_dma_v2 *txd, *pdma_txd;
developerfd40db22021-04-29 10:08:25 +08001763
1764 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1765 GFP_KERNEL);
1766 if (!ring->buf)
1767 goto no_tx_mem;
1768
1769 if (!eth->soc->has_sram)
1770 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08001771 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001772 else {
developere9356982022-07-04 09:03:20 +08001773 ring->dma = eth->scratch_ring + MTK_DMA_SIZE * sz;
developerfd40db22021-04-29 10:08:25 +08001774 ring->phys = eth->phy_scratch_ring + MTK_DMA_SIZE * sz;
1775 }
1776
1777 if (!ring->dma)
1778 goto no_tx_mem;
1779
1780 for (i = 0; i < MTK_DMA_SIZE; i++) {
1781 int next = (i + 1) % MTK_DMA_SIZE;
1782 u32 next_ptr = ring->phys + next * sz;
1783
developere9356982022-07-04 09:03:20 +08001784 txd = ring->dma + i * sz;
1785 txd->txd2 = next_ptr;
1786 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1787 txd->txd4 = 0;
1788
1789 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1790 txd->txd5 = 0;
1791 txd->txd6 = 0;
1792 txd->txd7 = 0;
1793 txd->txd8 = 0;
1794 }
developerfd40db22021-04-29 10:08:25 +08001795 }
1796
1797 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1798 * only as the framework. The real HW descriptors are the PDMA
1799 * descriptors in ring->dma_pdma.
1800 */
1801 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1802 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08001803 &ring->phys_pdma, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001804 if (!ring->dma_pdma)
1805 goto no_tx_mem;
1806
1807 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +08001808 pdma_txd = ring->dma_pdma + i *sz;
1809
1810 pdma_txd->txd2 = TX_DMA_DESP2_DEF;
1811 pdma_txd->txd4 = 0;
developerfd40db22021-04-29 10:08:25 +08001812 }
1813 }
1814
1815 ring->dma_size = MTK_DMA_SIZE;
1816 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
developere9356982022-07-04 09:03:20 +08001817 ring->next_free = ring->dma;
1818 ring->last_free = (void *)txd;
developerc4671b22021-05-28 13:16:42 +08001819 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08001820 ring->thresh = MAX_SKB_FRAGS;
1821
1822 /* make sure that all changes to the dma ring are flushed before we
1823 * continue
1824 */
1825 wmb();
1826
1827 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1828 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1829 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1830 mtk_w32(eth,
1831 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1832 MTK_QTX_CRX_PTR);
developerc4671b22021-05-28 13:16:42 +08001833 mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001834 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1835 MTK_QTX_CFG(0));
1836 } else {
1837 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1838 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1839 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1840 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1841 }
1842
1843 return 0;
1844
1845no_tx_mem:
1846 return -ENOMEM;
1847}
1848
1849static void mtk_tx_clean(struct mtk_eth *eth)
1850{
developere9356982022-07-04 09:03:20 +08001851 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001852 struct mtk_tx_ring *ring = &eth->tx_ring;
1853 int i;
1854
1855 if (ring->buf) {
1856 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08001857 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08001858 kfree(ring->buf);
1859 ring->buf = NULL;
1860 }
1861
1862 if (!eth->soc->has_sram && ring->dma) {
1863 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001864 MTK_DMA_SIZE * soc->txrx.txd_size,
1865 ring->dma, ring->phys);
developerfd40db22021-04-29 10:08:25 +08001866 ring->dma = NULL;
1867 }
1868
1869 if (ring->dma_pdma) {
1870 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001871 MTK_DMA_SIZE * soc->txrx.txd_size,
1872 ring->dma_pdma, ring->phys_pdma);
developerfd40db22021-04-29 10:08:25 +08001873 ring->dma_pdma = NULL;
1874 }
1875}
1876
1877static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1878{
1879 struct mtk_rx_ring *ring;
1880 int rx_data_len, rx_dma_size;
1881 int i;
1882
1883 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1884 if (ring_no)
1885 return -EINVAL;
1886 ring = &eth->rx_ring_qdma;
1887 } else {
1888 ring = &eth->rx_ring[ring_no];
1889 }
1890
1891 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1892 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1893 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1894 } else {
1895 rx_data_len = ETH_DATA_LEN;
1896 rx_dma_size = MTK_DMA_SIZE;
1897 }
1898
1899 ring->frag_size = mtk_max_frag_size(rx_data_len);
1900 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1901 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1902 GFP_KERNEL);
1903 if (!ring->data)
1904 return -ENOMEM;
1905
1906 for (i = 0; i < rx_dma_size; i++) {
1907 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1908 if (!ring->data[i])
1909 return -ENOMEM;
1910 }
1911
1912 if ((!eth->soc->has_sram) || (eth->soc->has_sram
1913 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
1914 ring->dma = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001915 rx_dma_size * eth->soc->txrx.rxd_size,
1916 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001917 else {
1918 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08001919 ring->dma = tx_ring->dma + MTK_DMA_SIZE *
1920 eth->soc->txrx.rxd_size * (ring_no + 1);
developer18f46a82021-07-20 21:08:21 +08001921 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
developere9356982022-07-04 09:03:20 +08001922 eth->soc->txrx.rxd_size * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08001923 }
1924
1925 if (!ring->dma)
1926 return -ENOMEM;
1927
1928 for (i = 0; i < rx_dma_size; i++) {
developere9356982022-07-04 09:03:20 +08001929 struct mtk_rx_dma_v2 *rxd;
1930
developerfd40db22021-04-29 10:08:25 +08001931 dma_addr_t dma_addr = dma_map_single(eth->dev,
1932 ring->data[i] + NET_SKB_PAD + eth->ip_align,
1933 ring->buf_size,
1934 DMA_FROM_DEVICE);
1935 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1936 return -ENOMEM;
developere9356982022-07-04 09:03:20 +08001937
1938 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
1939 rxd->rxd1 = (unsigned int)dma_addr;
developerfd40db22021-04-29 10:08:25 +08001940
1941 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developere9356982022-07-04 09:03:20 +08001942 rxd->rxd2 = RX_DMA_LSO;
developerfd40db22021-04-29 10:08:25 +08001943 else
developere9356982022-07-04 09:03:20 +08001944 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
developerfd40db22021-04-29 10:08:25 +08001945
developere9356982022-07-04 09:03:20 +08001946 rxd->rxd3 = 0;
1947 rxd->rxd4 = 0;
1948
1949 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1950 rxd->rxd5 = 0;
1951 rxd->rxd6 = 0;
1952 rxd->rxd7 = 0;
1953 rxd->rxd8 = 0;
developerfd40db22021-04-29 10:08:25 +08001954 }
developerfd40db22021-04-29 10:08:25 +08001955 }
1956 ring->dma_size = rx_dma_size;
1957 ring->calc_idx_update = false;
1958 ring->calc_idx = rx_dma_size - 1;
1959 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
1960 MTK_QRX_CRX_IDX_CFG(ring_no) :
1961 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08001962 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08001963 /* make sure that all changes to the dma ring are flushed before we
1964 * continue
1965 */
1966 wmb();
1967
1968 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1969 mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
1970 mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
1971 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1972 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
1973 } else {
1974 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
1975 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
1976 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1977 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
1978 }
1979
1980 return 0;
1981}
1982
1983static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
1984{
1985 int i;
1986
1987 if (ring->data && ring->dma) {
1988 for (i = 0; i < ring->dma_size; i++) {
developere9356982022-07-04 09:03:20 +08001989 struct mtk_rx_dma *rxd;
1990
developerfd40db22021-04-29 10:08:25 +08001991 if (!ring->data[i])
1992 continue;
developere9356982022-07-04 09:03:20 +08001993
1994 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
1995 if (!rxd->rxd1)
developerfd40db22021-04-29 10:08:25 +08001996 continue;
developere9356982022-07-04 09:03:20 +08001997
developerfd40db22021-04-29 10:08:25 +08001998 dma_unmap_single(eth->dev,
developere9356982022-07-04 09:03:20 +08001999 rxd->rxd1,
developerfd40db22021-04-29 10:08:25 +08002000 ring->buf_size,
2001 DMA_FROM_DEVICE);
2002 skb_free_frag(ring->data[i]);
2003 }
2004 kfree(ring->data);
2005 ring->data = NULL;
2006 }
2007
2008 if(in_sram)
2009 return;
2010
2011 if (ring->dma) {
2012 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002013 ring->dma_size * eth->soc->txrx.rxd_size,
developerfd40db22021-04-29 10:08:25 +08002014 ring->dma,
2015 ring->phys);
2016 ring->dma = NULL;
2017 }
2018}
2019
2020static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2021{
2022 int i;
developer77d03a72021-06-06 00:06:00 +08002023 u32 val;
developerfd40db22021-04-29 10:08:25 +08002024 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2025 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2026
2027 /* set LRO rings to auto-learn modes */
2028 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2029
2030 /* validate LRO ring */
2031 ring_ctrl_dw2 |= MTK_RING_VLD;
2032
2033 /* set AGE timer (unit: 20us) */
2034 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2035 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2036
2037 /* set max AGG timer (unit: 20us) */
2038 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2039
2040 /* set max LRO AGG count */
2041 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2042 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2043
developer77d03a72021-06-06 00:06:00 +08002044 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002045 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2046 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2047 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2048 }
2049
2050 /* IPv4 checksum update enable */
2051 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2052
2053 /* switch priority comparison to packet count mode */
2054 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2055
2056 /* bandwidth threshold setting */
2057 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2058
2059 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08002060 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08002061
2062 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2063 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2064 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2065
developerfd40db22021-04-29 10:08:25 +08002066 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2067 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2068
developer77d03a72021-06-06 00:06:00 +08002069 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2070 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
2071 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
2072 MTK_PDMA_RX_CFG);
2073
2074 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
2075 } else {
2076 /* set HW LRO mode & the max aggregation count for rx packets */
2077 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2078 }
2079
developerfd40db22021-04-29 10:08:25 +08002080 /* enable HW LRO */
2081 lro_ctrl_dw0 |= MTK_LRO_EN;
2082
developer77d03a72021-06-06 00:06:00 +08002083 /* enable cpu reason black list */
2084 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
2085
developerfd40db22021-04-29 10:08:25 +08002086 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2087 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2088
developer77d03a72021-06-06 00:06:00 +08002089 /* no use PPE cpu reason */
2090 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
2091
developerfd40db22021-04-29 10:08:25 +08002092 return 0;
2093}
2094
2095static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2096{
2097 int i;
2098 u32 val;
2099
2100 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08002101 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08002102
2103 /* wait for relinquishments done */
2104 for (i = 0; i < 10; i++) {
2105 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08002106 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer8051e042022-04-08 13:26:36 +08002107 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002108 continue;
2109 }
2110 break;
2111 }
2112
2113 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08002114 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08002115 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2116
2117 /* disable HW LRO */
2118 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2119}
2120
2121static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2122{
2123 u32 reg_val;
2124
developer77d03a72021-06-06 00:06:00 +08002125 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2126 idx += 1;
2127
developerfd40db22021-04-29 10:08:25 +08002128 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2129
2130 /* invalidate the IP setting */
2131 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2132
2133 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2134
2135 /* validate the IP setting */
2136 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2137}
2138
2139static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2140{
2141 u32 reg_val;
2142
developer77d03a72021-06-06 00:06:00 +08002143 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2144 idx += 1;
2145
developerfd40db22021-04-29 10:08:25 +08002146 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2147
2148 /* invalidate the IP setting */
2149 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2150
2151 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2152}
2153
2154static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2155{
2156 int cnt = 0;
2157 int i;
2158
2159 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2160 if (mac->hwlro_ip[i])
2161 cnt++;
2162 }
2163
2164 return cnt;
2165}
2166
2167static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2168 struct ethtool_rxnfc *cmd)
2169{
2170 struct ethtool_rx_flow_spec *fsp =
2171 (struct ethtool_rx_flow_spec *)&cmd->fs;
2172 struct mtk_mac *mac = netdev_priv(dev);
2173 struct mtk_eth *eth = mac->hw;
2174 int hwlro_idx;
2175
2176 if ((fsp->flow_type != TCP_V4_FLOW) ||
2177 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2178 (fsp->location > 1))
2179 return -EINVAL;
2180
2181 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2182 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2183
2184 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2185
2186 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2187
2188 return 0;
2189}
2190
2191static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2192 struct ethtool_rxnfc *cmd)
2193{
2194 struct ethtool_rx_flow_spec *fsp =
2195 (struct ethtool_rx_flow_spec *)&cmd->fs;
2196 struct mtk_mac *mac = netdev_priv(dev);
2197 struct mtk_eth *eth = mac->hw;
2198 int hwlro_idx;
2199
2200 if (fsp->location > 1)
2201 return -EINVAL;
2202
2203 mac->hwlro_ip[fsp->location] = 0;
2204 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2205
2206 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2207
2208 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2209
2210 return 0;
2211}
2212
2213static void mtk_hwlro_netdev_disable(struct net_device *dev)
2214{
2215 struct mtk_mac *mac = netdev_priv(dev);
2216 struct mtk_eth *eth = mac->hw;
2217 int i, hwlro_idx;
2218
2219 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2220 mac->hwlro_ip[i] = 0;
2221 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2222
2223 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2224 }
2225
2226 mac->hwlro_ip_cnt = 0;
2227}
2228
2229static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2230 struct ethtool_rxnfc *cmd)
2231{
2232 struct mtk_mac *mac = netdev_priv(dev);
2233 struct ethtool_rx_flow_spec *fsp =
2234 (struct ethtool_rx_flow_spec *)&cmd->fs;
2235
2236 /* only tcp dst ipv4 is meaningful, others are meaningless */
2237 fsp->flow_type = TCP_V4_FLOW;
2238 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2239 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2240
2241 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2242 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2243 fsp->h_u.tcp_ip4_spec.psrc = 0;
2244 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2245 fsp->h_u.tcp_ip4_spec.pdst = 0;
2246 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2247 fsp->h_u.tcp_ip4_spec.tos = 0;
2248 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2249
2250 return 0;
2251}
2252
2253static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2254 struct ethtool_rxnfc *cmd,
2255 u32 *rule_locs)
2256{
2257 struct mtk_mac *mac = netdev_priv(dev);
2258 int cnt = 0;
2259 int i;
2260
2261 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2262 if (mac->hwlro_ip[i]) {
2263 rule_locs[cnt] = i;
2264 cnt++;
2265 }
2266 }
2267
2268 cmd->rule_cnt = cnt;
2269
2270 return 0;
2271}
2272
developer18f46a82021-07-20 21:08:21 +08002273static int mtk_rss_init(struct mtk_eth *eth)
2274{
2275 u32 val;
2276
2277 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2278 /* Set RSS rings to PSE modes */
2279 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
2280 val |= MTK_RING_PSE_MODE;
2281 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
2282
2283 /* Enable non-lro multiple rx */
2284 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2285 val |= MTK_NON_LRO_MULTI_EN;
2286 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2287
2288 /* Enable RSS dly int supoort */
2289 val |= MTK_LRO_DLY_INT_EN;
2290 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2291
2292 /* Set RSS delay config int ring1 */
2293 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
2294 }
2295
2296 /* Hash Type */
2297 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2298 val |= MTK_RSS_IPV4_STATIC_HASH;
2299 val |= MTK_RSS_IPV6_STATIC_HASH;
2300 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2301
2302 /* Select the size of indirection table */
2303 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW0);
2304 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW1);
2305 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW2);
2306 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW3);
2307 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW4);
2308 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW5);
2309 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW6);
2310 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW7);
2311
2312 /* Pause */
2313 val |= MTK_RSS_CFG_REQ;
2314 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2315
2316 /* Enable RSS*/
2317 val |= MTK_RSS_EN;
2318 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2319
2320 /* Release pause */
2321 val &= ~(MTK_RSS_CFG_REQ);
2322 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2323
2324 /* Set perRSS GRP INT */
2325 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING1), MTK_PDMA_INT_GRP3);
2326
2327 /* Set GRP INT */
2328 mtk_w32(eth, 0x21021030, MTK_FE_INT_GRP);
2329
2330 return 0;
2331}
2332
2333static void mtk_rss_uninit(struct mtk_eth *eth)
2334{
2335 u32 val;
2336
2337 /* Pause */
2338 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2339 val |= MTK_RSS_CFG_REQ;
2340 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2341
2342 /* Disable RSS*/
2343 val &= ~(MTK_RSS_EN);
2344 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2345
2346 /* Release pause */
2347 val &= ~(MTK_RSS_CFG_REQ);
2348 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2349}
2350
developerfd40db22021-04-29 10:08:25 +08002351static netdev_features_t mtk_fix_features(struct net_device *dev,
2352 netdev_features_t features)
2353{
2354 if (!(features & NETIF_F_LRO)) {
2355 struct mtk_mac *mac = netdev_priv(dev);
2356 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2357
2358 if (ip_cnt) {
2359 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2360
2361 features |= NETIF_F_LRO;
2362 }
2363 }
2364
2365 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
2366 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
2367
2368 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2369 }
2370
2371 return features;
2372}
2373
2374static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2375{
2376 struct mtk_mac *mac = netdev_priv(dev);
2377 struct mtk_eth *eth = mac->hw;
2378 int err = 0;
2379
2380 if (!((dev->features ^ features) & MTK_SET_FEATURES))
2381 return 0;
2382
2383 if (!(features & NETIF_F_LRO))
2384 mtk_hwlro_netdev_disable(dev);
2385
2386 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
2387 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
2388 else
2389 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2390
2391 return err;
2392}
2393
2394/* wait for DMA to finish whatever it is doing before we start using it again */
2395static int mtk_dma_busy_wait(struct mtk_eth *eth)
2396{
2397 unsigned long t_start = jiffies;
2398
2399 while (1) {
2400 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2401 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2402 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2403 return 0;
2404 } else {
2405 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2406 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2407 return 0;
2408 }
2409
2410 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2411 break;
2412 }
2413
2414 dev_err(eth->dev, "DMA init timeout\n");
2415 return -1;
2416}
2417
2418static int mtk_dma_init(struct mtk_eth *eth)
2419{
2420 int err;
2421 u32 i;
2422
2423 if (mtk_dma_busy_wait(eth))
2424 return -EBUSY;
2425
2426 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2427 /* QDMA needs scratch memory for internal reordering of the
2428 * descriptors
2429 */
2430 err = mtk_init_fq_dma(eth);
2431 if (err)
2432 return err;
2433 }
2434
2435 err = mtk_tx_alloc(eth);
2436 if (err)
2437 return err;
2438
2439 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2440 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2441 if (err)
2442 return err;
2443 }
2444
2445 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2446 if (err)
2447 return err;
2448
2449 if (eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08002450 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1;
2451 for (; i < MTK_MAX_RX_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002452 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2453 if (err)
2454 return err;
2455 }
2456 err = mtk_hwlro_rx_init(eth);
2457 if (err)
2458 return err;
2459 }
2460
developer18f46a82021-07-20 21:08:21 +08002461 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2462 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2463 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL);
2464 if (err)
2465 return err;
2466 }
2467 err = mtk_rss_init(eth);
2468 if (err)
2469 return err;
2470 }
2471
developerfd40db22021-04-29 10:08:25 +08002472 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2473 /* Enable random early drop and set drop threshold
2474 * automatically
2475 */
2476 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2477 FC_THRES_MIN, MTK_QDMA_FC_THRES);
2478 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2479 }
2480
2481 return 0;
2482}
2483
2484static void mtk_dma_free(struct mtk_eth *eth)
2485{
developere9356982022-07-04 09:03:20 +08002486 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002487 int i;
2488
2489 for (i = 0; i < MTK_MAC_COUNT; i++)
2490 if (eth->netdev[i])
2491 netdev_reset_queue(eth->netdev[i]);
2492 if ( !eth->soc->has_sram && eth->scratch_ring) {
2493 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002494 MTK_DMA_SIZE * soc->txrx.txd_size,
2495 eth->scratch_ring, eth->phy_scratch_ring);
developerfd40db22021-04-29 10:08:25 +08002496 eth->scratch_ring = NULL;
2497 eth->phy_scratch_ring = 0;
2498 }
2499 mtk_tx_clean(eth);
developerb3ce86f2022-06-30 13:31:47 +08002500 mtk_rx_clean(eth, &eth->rx_ring[0],eth->soc->has_sram);
developerfd40db22021-04-29 10:08:25 +08002501 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
2502
2503 if (eth->hwlro) {
2504 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08002505
2506 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1;
2507 for (; i < MTK_MAX_RX_RING_NUM; i++)
2508 mtk_rx_clean(eth, &eth->rx_ring[i], 0);
developerfd40db22021-04-29 10:08:25 +08002509 }
2510
developer18f46a82021-07-20 21:08:21 +08002511 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2512 mtk_rss_uninit(eth);
2513
2514 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
2515 mtk_rx_clean(eth, &eth->rx_ring[i], 1);
2516 }
2517
developer94008d92021-09-23 09:47:41 +08002518 if (eth->scratch_head) {
2519 kfree(eth->scratch_head);
2520 eth->scratch_head = NULL;
2521 }
developerfd40db22021-04-29 10:08:25 +08002522}
2523
2524static void mtk_tx_timeout(struct net_device *dev)
2525{
2526 struct mtk_mac *mac = netdev_priv(dev);
2527 struct mtk_eth *eth = mac->hw;
2528
2529 eth->netdev[mac->id]->stats.tx_errors++;
2530 netif_err(eth, tx_err, dev,
2531 "transmit timed out\n");
developer8051e042022-04-08 13:26:36 +08002532
2533 if (atomic_read(&reset_lock) == 0)
2534 schedule_work(&eth->pending_work);
developerfd40db22021-04-29 10:08:25 +08002535}
2536
developer18f46a82021-07-20 21:08:21 +08002537static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08002538{
developer18f46a82021-07-20 21:08:21 +08002539 struct mtk_napi *rx_napi = priv;
2540 struct mtk_eth *eth = rx_napi->eth;
2541 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002542
developer18f46a82021-07-20 21:08:21 +08002543 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08002544 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08002545 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08002546 }
2547
2548 return IRQ_HANDLED;
2549}
2550
2551static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2552{
2553 struct mtk_eth *eth = _eth;
2554
2555 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developerfd40db22021-04-29 10:08:25 +08002556 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer6bbe70d2021-08-06 09:34:55 +08002557 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08002558 }
2559
2560 return IRQ_HANDLED;
2561}
2562
2563static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2564{
2565 struct mtk_eth *eth = _eth;
2566
developer18f46a82021-07-20 21:08:21 +08002567 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT(0)) {
2568 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT(0))
2569 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002570 }
2571 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2572 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2573 mtk_handle_irq_tx(irq, _eth);
2574 }
2575
2576 return IRQ_HANDLED;
2577}
2578
developera2613e62022-07-01 18:29:37 +08002579static irqreturn_t mtk_handle_irq_fixed_link(int irq, void *_mac)
2580{
2581 struct mtk_mac *mac = _mac;
2582 struct mtk_eth *eth = mac->hw;
2583 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
2584 struct net_device *dev = phylink_priv->dev;
2585 int link_old, link_new;
2586
2587 // clear interrupt status for gpy211
2588 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
2589
2590 link_old = phylink_priv->link;
2591 link_new = _mtk_mdio_read(eth, phylink_priv->phyaddr, MII_BMSR) & BMSR_LSTATUS;
2592
2593 if (link_old != link_new) {
2594 phylink_priv->link = link_new;
2595 if (link_new) {
2596 printk("phylink.%d %s: Link is Up\n", phylink_priv->id, dev->name);
2597 if (dev)
2598 netif_carrier_on(dev);
2599 } else {
2600 printk("phylink.%d %s: Link is Down\n", phylink_priv->id, dev->name);
2601 if (dev)
2602 netif_carrier_off(dev);
2603 }
2604 }
2605
2606 return IRQ_HANDLED;
2607}
2608
developerfd40db22021-04-29 10:08:25 +08002609#ifdef CONFIG_NET_POLL_CONTROLLER
2610static void mtk_poll_controller(struct net_device *dev)
2611{
2612 struct mtk_mac *mac = netdev_priv(dev);
2613 struct mtk_eth *eth = mac->hw;
2614
2615 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002616 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
2617 mtk_handle_irq_rx(eth->irq[2], &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002618 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002619 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002620}
2621#endif
2622
2623static int mtk_start_dma(struct mtk_eth *eth)
2624{
2625 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer77d03a72021-06-06 00:06:00 +08002626 int val, err;
developerfd40db22021-04-29 10:08:25 +08002627
2628 err = mtk_dma_init(eth);
2629 if (err) {
2630 mtk_dma_free(eth);
2631 return err;
2632 }
2633
2634 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer15d0d282021-07-14 16:40:44 +08002635 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08002636 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2637 val &= ~MTK_RESV_BUF_MASK;
developerfd40db22021-04-29 10:08:25 +08002638 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002639 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002640 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
2641 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
2642 MTK_RESV_BUF | MTK_WCOMP_EN |
2643 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
developer1ac65932022-07-19 17:23:32 +08002644 MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08002645 }
developerfd40db22021-04-29 10:08:25 +08002646 else
2647 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002648 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002649 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
2650 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2651 MTK_RX_BT_32DWORDS,
2652 MTK_QDMA_GLO_CFG);
2653
developer15d0d282021-07-14 16:40:44 +08002654 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
developerfd40db22021-04-29 10:08:25 +08002655 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002656 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08002657 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2658 MTK_PDMA_GLO_CFG);
2659 } else {
2660 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2661 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2662 MTK_PDMA_GLO_CFG);
2663 }
2664
developer77d03a72021-06-06 00:06:00 +08002665 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) && eth->hwlro) {
2666 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
2667 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
2668 }
2669
developerfd40db22021-04-29 10:08:25 +08002670 return 0;
2671}
2672
developer8051e042022-04-08 13:26:36 +08002673void mtk_gdm_config(struct mtk_eth *eth, u32 config)
developerfd40db22021-04-29 10:08:25 +08002674{
2675 int i;
2676
2677 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2678 return;
2679
2680 for (i = 0; i < MTK_MAC_COUNT; i++) {
2681 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2682
2683 /* default setup the forward port to send frame to PDMA */
2684 val &= ~0xffff;
2685
2686 /* Enable RX checksum */
2687 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2688
2689 val |= config;
2690
2691 if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
2692 val |= MTK_GDMA_SPECIAL_TAG;
2693
2694 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2695 }
developerfd40db22021-04-29 10:08:25 +08002696}
2697
2698static int mtk_open(struct net_device *dev)
2699{
2700 struct mtk_mac *mac = netdev_priv(dev);
2701 struct mtk_eth *eth = mac->hw;
developera2613e62022-07-01 18:29:37 +08002702 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
developer18f46a82021-07-20 21:08:21 +08002703 int err, i;
developer3a5969e2022-02-09 15:36:36 +08002704 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08002705
2706 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2707 if (err) {
2708 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2709 err);
2710 return err;
2711 }
2712
2713 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2714 if (!refcount_read(&eth->dma_refcnt)) {
2715 int err = mtk_start_dma(eth);
2716
2717 if (err)
2718 return err;
2719
2720 mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
2721
2722 /* Indicates CDM to parse the MTK special tag from CPU */
2723 if (netdev_uses_dsa(dev)) {
2724 u32 val;
2725 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2726 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2727 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
2728 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
2729 }
2730
2731 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002732 napi_enable(&eth->rx_napi[0].napi);
developerfd40db22021-04-29 10:08:25 +08002733 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002734 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
2735
2736 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2737 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2738 napi_enable(&eth->rx_napi[i].napi);
2739 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i));
2740 }
2741 }
2742
developerfd40db22021-04-29 10:08:25 +08002743 refcount_set(&eth->dma_refcnt, 1);
2744 }
2745 else
2746 refcount_inc(&eth->dma_refcnt);
2747
developera2613e62022-07-01 18:29:37 +08002748 if (phylink_priv->desc) {
2749 /*Notice: This programming sequence is only for GPY211 single PHY chip.
2750 If single PHY chip is not GPY211, the following step you should do:
2751 1. Contact your Single PHY chip vendor and get the details of
2752 - how to enables link status change interrupt
2753 - how to clears interrupt source
2754 */
2755
2756 // clear interrupt source for gpy211
2757 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
2758
2759 // enable link status change interrupt for gpy211
2760 _mtk_mdio_write(eth, phylink_priv->phyaddr, 0x19, 0x0001);
2761
2762 phylink_priv->dev = dev;
2763
2764 // override dev pointer for single PHY chip 0
2765 if (phylink_priv->id == 0) {
2766 struct net_device *tmp;
2767
2768 tmp = __dev_get_by_name(&init_net, phylink_priv->label);
2769 if (tmp)
2770 phylink_priv->dev = tmp;
2771 else
2772 phylink_priv->dev = NULL;
2773 }
2774 }
2775
developerfd40db22021-04-29 10:08:25 +08002776 phylink_start(mac->phylink);
2777 netif_start_queue(dev);
developer3a5969e2022-02-09 15:36:36 +08002778 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer793f7b42022-05-20 13:54:51 +08002779 if (!phy_node && eth->sgmii->regmap[mac->id]) {
developer1a63ef92022-04-15 17:17:32 +08002780 regmap_write(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developer3a5969e2022-02-09 15:36:36 +08002781 }
developerfd40db22021-04-29 10:08:25 +08002782 return 0;
2783}
2784
2785static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2786{
2787 u32 val;
2788 int i;
2789
2790 /* stop the dma engine */
2791 spin_lock_bh(&eth->page_lock);
2792 val = mtk_r32(eth, glo_cfg);
2793 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2794 glo_cfg);
2795 spin_unlock_bh(&eth->page_lock);
2796
2797 /* wait for dma stop */
2798 for (i = 0; i < 10; i++) {
2799 val = mtk_r32(eth, glo_cfg);
2800 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer8051e042022-04-08 13:26:36 +08002801 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002802 continue;
2803 }
2804 break;
2805 }
2806}
2807
2808static int mtk_stop(struct net_device *dev)
2809{
2810 struct mtk_mac *mac = netdev_priv(dev);
2811 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08002812 int i;
developer3a5969e2022-02-09 15:36:36 +08002813 u32 val = 0;
2814 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08002815
2816 netif_tx_disable(dev);
2817
developer3a5969e2022-02-09 15:36:36 +08002818 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
2819 if (phy_node) {
2820 val = _mtk_mdio_read(eth, 0, 0);
2821 val |= BMCR_PDOWN;
2822 _mtk_mdio_write(eth, 0, 0, val);
developer793f7b42022-05-20 13:54:51 +08002823 } else if (eth->sgmii->regmap[mac->id]) {
developer1a63ef92022-04-15 17:17:32 +08002824 regmap_read(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer3a5969e2022-02-09 15:36:36 +08002825 val |= SGMII_PHYA_PWD;
developer1a63ef92022-04-15 17:17:32 +08002826 regmap_write(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer3a5969e2022-02-09 15:36:36 +08002827 }
2828
2829 //GMAC RX disable
2830 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
2831 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
2832
2833 phylink_stop(mac->phylink);
2834
developerfd40db22021-04-29 10:08:25 +08002835 phylink_disconnect_phy(mac->phylink);
2836
2837 /* only shutdown DMA if this is the last user */
2838 if (!refcount_dec_and_test(&eth->dma_refcnt))
2839 return 0;
2840
2841 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2842
2843 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002844 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002845 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002846 napi_disable(&eth->rx_napi[0].napi);
2847
2848 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2849 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2850 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i));
2851 napi_disable(&eth->rx_napi[i].napi);
2852 }
2853 }
developerfd40db22021-04-29 10:08:25 +08002854
2855 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2856 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2857 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2858
2859 mtk_dma_free(eth);
2860
2861 return 0;
2862}
2863
developer8051e042022-04-08 13:26:36 +08002864void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerfd40db22021-04-29 10:08:25 +08002865{
developer8051e042022-04-08 13:26:36 +08002866 u32 val = 0, i = 0;
developerfd40db22021-04-29 10:08:25 +08002867
developerfd40db22021-04-29 10:08:25 +08002868 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer8051e042022-04-08 13:26:36 +08002869 reset_bits, reset_bits);
2870
2871 while (i++ < 5000) {
2872 mdelay(1);
2873 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
2874
2875 if ((val & reset_bits) == reset_bits) {
2876 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
2877 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2878 reset_bits, ~reset_bits);
2879 break;
2880 }
2881 }
2882
developerfd40db22021-04-29 10:08:25 +08002883 mdelay(10);
2884}
2885
2886static void mtk_clk_disable(struct mtk_eth *eth)
2887{
2888 int clk;
2889
2890 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2891 clk_disable_unprepare(eth->clks[clk]);
2892}
2893
2894static int mtk_clk_enable(struct mtk_eth *eth)
2895{
2896 int clk, ret;
2897
2898 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2899 ret = clk_prepare_enable(eth->clks[clk]);
2900 if (ret)
2901 goto err_disable_clks;
2902 }
2903
2904 return 0;
2905
2906err_disable_clks:
2907 while (--clk >= 0)
2908 clk_disable_unprepare(eth->clks[clk]);
2909
2910 return ret;
2911}
2912
developer18f46a82021-07-20 21:08:21 +08002913static int mtk_napi_init(struct mtk_eth *eth)
2914{
2915 struct mtk_napi *rx_napi = &eth->rx_napi[0];
2916 int i;
2917
2918 rx_napi->eth = eth;
2919 rx_napi->rx_ring = &eth->rx_ring[0];
2920 rx_napi->irq_grp_no = 2;
2921
2922 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2923 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2924 rx_napi = &eth->rx_napi[i];
2925 rx_napi->eth = eth;
2926 rx_napi->rx_ring = &eth->rx_ring[i];
2927 rx_napi->irq_grp_no = 2 + i;
2928 }
2929 }
2930
2931 return 0;
2932}
2933
developer8051e042022-04-08 13:26:36 +08002934static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerfd40db22021-04-29 10:08:25 +08002935{
developer8051e042022-04-08 13:26:36 +08002936 int i, ret = 0;
developerfd40db22021-04-29 10:08:25 +08002937
developer8051e042022-04-08 13:26:36 +08002938 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
2939 atomic_read(&reset_lock), atomic_read(&force));
developerfd40db22021-04-29 10:08:25 +08002940
developer8051e042022-04-08 13:26:36 +08002941 if (atomic_read(&reset_lock) == 0) {
2942 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
2943 return 0;
developerfd40db22021-04-29 10:08:25 +08002944
developer8051e042022-04-08 13:26:36 +08002945 pm_runtime_enable(eth->dev);
2946 pm_runtime_get_sync(eth->dev);
2947
2948 ret = mtk_clk_enable(eth);
2949 if (ret)
2950 goto err_disable_pm;
2951 }
developerfd40db22021-04-29 10:08:25 +08002952
2953 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2954 ret = device_reset(eth->dev);
2955 if (ret) {
2956 dev_err(eth->dev, "MAC reset failed!\n");
2957 goto err_disable_pm;
2958 }
2959
2960 /* enable interrupt delay for RX */
2961 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2962
2963 /* disable delay and normal interrupt */
2964 mtk_tx_irq_disable(eth, ~0);
2965 mtk_rx_irq_disable(eth, ~0);
2966
2967 return 0;
2968 }
2969
developer8051e042022-04-08 13:26:36 +08002970 pr_info("[%s] execute fe %s reset\n", __func__,
2971 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developer545abf02021-07-15 17:47:01 +08002972
developer8051e042022-04-08 13:26:36 +08002973 if (type == MTK_TYPE_WARM_RESET)
2974 mtk_eth_warm_reset(eth);
developer545abf02021-07-15 17:47:01 +08002975 else
developer8051e042022-04-08 13:26:36 +08002976 mtk_eth_cold_reset(eth);
developer545abf02021-07-15 17:47:01 +08002977
2978 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developer545abf02021-07-15 17:47:01 +08002979 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08002980 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08002981 }
developerfd40db22021-04-29 10:08:25 +08002982
2983 if (eth->pctl) {
2984 /* Set GE2 driving and slew rate */
2985 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2986
2987 /* set GE2 TDSEL */
2988 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2989
2990 /* set GE2 TUNE */
2991 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2992 }
2993
2994 /* Set linkdown as the default for each GMAC. Its own MCR would be set
2995 * up with the more appropriate value when mtk_mac_config call is being
2996 * invoked.
2997 */
2998 for (i = 0; i < MTK_MAC_COUNT; i++)
2999 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3000
3001 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08003002 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
3003 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3004 else
3005 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08003006
3007 /* enable interrupt delay for RX/TX */
3008 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
3009 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
3010
3011 mtk_tx_irq_disable(eth, ~0);
3012 mtk_rx_irq_disable(eth, ~0);
3013
3014 /* FE int grouping */
3015 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003016 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_PDMA_INT_GRP2);
developerfd40db22021-04-29 10:08:25 +08003017 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003018 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_QDMA_INT_GRP2);
developer8051e042022-04-08 13:26:36 +08003019 mtk_w32(eth, 0x21021003, MTK_FE_INT_GRP);
developerbe971722022-05-23 13:51:05 +08003020 mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
developer8051e042022-04-08 13:26:36 +08003021 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
3022 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerfd40db22021-04-29 10:08:25 +08003023
developera2bdbd52021-05-31 19:10:17 +08003024 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08003025 /* PSE Free Queue Flow Control */
3026 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3027
developer459b78e2022-07-01 17:25:10 +08003028 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3029 mtk_w32(eth, 0x00000300, PSE_NO_DROP_CFG);
3030
3031 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3032 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
developer81bcad32021-07-15 14:14:38 +08003033
developerfef9efd2021-06-16 18:28:09 +08003034 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08003035 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3036 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3037 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3038 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3039 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3040 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3041 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
developerfd5f9152022-01-05 16:29:42 +08003042 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
developerfd40db22021-04-29 10:08:25 +08003043
developerfef9efd2021-06-16 18:28:09 +08003044 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08003045 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3046 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3047 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3048 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3049 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3050 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3051 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3052 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08003053
3054 /* GDM and CDM Threshold */
3055 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3056 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3057 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3058 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3059 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3060 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08003061 }
3062
3063 return 0;
3064
3065err_disable_pm:
3066 pm_runtime_put_sync(eth->dev);
3067 pm_runtime_disable(eth->dev);
3068
3069 return ret;
3070}
3071
3072static int mtk_hw_deinit(struct mtk_eth *eth)
3073{
3074 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3075 return 0;
3076
3077 mtk_clk_disable(eth);
3078
3079 pm_runtime_put_sync(eth->dev);
3080 pm_runtime_disable(eth->dev);
3081
3082 return 0;
3083}
3084
3085static int __init mtk_init(struct net_device *dev)
3086{
3087 struct mtk_mac *mac = netdev_priv(dev);
3088 struct mtk_eth *eth = mac->hw;
3089 const char *mac_addr;
3090
3091 mac_addr = of_get_mac_address(mac->of_node);
3092 if (!IS_ERR(mac_addr))
3093 ether_addr_copy(dev->dev_addr, mac_addr);
3094
3095 /* If the mac address is invalid, use random mac address */
3096 if (!is_valid_ether_addr(dev->dev_addr)) {
3097 eth_hw_addr_random(dev);
3098 dev_err(eth->dev, "generated random MAC address %pM\n",
3099 dev->dev_addr);
3100 }
3101
3102 return 0;
3103}
3104
3105static void mtk_uninit(struct net_device *dev)
3106{
3107 struct mtk_mac *mac = netdev_priv(dev);
3108 struct mtk_eth *eth = mac->hw;
3109
3110 phylink_disconnect_phy(mac->phylink);
3111 mtk_tx_irq_disable(eth, ~0);
3112 mtk_rx_irq_disable(eth, ~0);
3113}
3114
3115static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3116{
3117 struct mtk_mac *mac = netdev_priv(dev);
3118
3119 switch (cmd) {
3120 case SIOCGMIIPHY:
3121 case SIOCGMIIREG:
3122 case SIOCSMIIREG:
3123 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3124 default:
3125 /* default invoke the mtk_eth_dbg handler */
3126 return mtk_do_priv_ioctl(dev, ifr, cmd);
3127 break;
3128 }
3129
3130 return -EOPNOTSUPP;
3131}
3132
3133static void mtk_pending_work(struct work_struct *work)
3134{
3135 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer8051e042022-04-08 13:26:36 +08003136 struct device_node *phy_node = NULL;
3137 struct mtk_mac *mac = NULL;
3138 int err, i = 0;
developerfd40db22021-04-29 10:08:25 +08003139 unsigned long restart = 0;
developer8051e042022-04-08 13:26:36 +08003140 u32 val = 0;
3141
3142 atomic_inc(&reset_lock);
3143 val = mtk_r32(eth, MTK_FE_INT_STATUS);
3144 if (!mtk_check_reset_event(eth, val)) {
3145 atomic_dec(&reset_lock);
3146 pr_info("[%s] No need to do FE reset !\n", __func__);
3147 return;
3148 }
developerfd40db22021-04-29 10:08:25 +08003149
3150 rtnl_lock();
3151
developer8051e042022-04-08 13:26:36 +08003152 /* Disabe FE P3 and P4 */
3153 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3154 val |= MTK_FE_LINK_DOWN_P3;
3155 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3156 val |= MTK_FE_LINK_DOWN_P4;
3157 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3158
3159 /* Adjust PPE configurations to prepare for reset */
3160 mtk_prepare_reset_ppe(eth, 0);
3161 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3162 mtk_prepare_reset_ppe(eth, 1);
3163
3164 /* Adjust FE configurations to prepare for reset */
3165 mtk_prepare_reset_fe(eth);
3166
3167 /* Trigger Wifi SER reset */
3168 call_netdevice_notifiers(MTK_FE_START_RESET, eth->netdev[0]);
3169 rtnl_unlock();
3170 wait_for_completion_timeout(&wait_ser_done, 5000);
3171 rtnl_lock();
developerfd40db22021-04-29 10:08:25 +08003172
3173 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
3174 cpu_relax();
3175
developer8051e042022-04-08 13:26:36 +08003176 del_timer_sync(&eth->mtk_dma_monitor_timer);
3177 pr_info("[%s] mtk_stop starts !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003178 /* stop all devices to make sure that dma is properly shut down */
3179 for (i = 0; i < MTK_MAC_COUNT; i++) {
3180 if (!eth->netdev[i])
3181 continue;
3182 mtk_stop(eth->netdev[i]);
3183 __set_bit(i, &restart);
3184 }
developer8051e042022-04-08 13:26:36 +08003185 pr_info("[%s] mtk_stop ends !\n", __func__);
3186 mdelay(15);
developerfd40db22021-04-29 10:08:25 +08003187
3188 if (eth->dev->pins)
3189 pinctrl_select_state(eth->dev->pins->p,
3190 eth->dev->pins->default_state);
developer8051e042022-04-08 13:26:36 +08003191
3192 pr_info("[%s] mtk_hw_init starts !\n", __func__);
3193 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
3194 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003195
3196 /* restart DMA and enable IRQs */
3197 for (i = 0; i < MTK_MAC_COUNT; i++) {
3198 if (!test_bit(i, &restart))
3199 continue;
3200 err = mtk_open(eth->netdev[i]);
3201 if (err) {
3202 netif_alert(eth, ifup, eth->netdev[i],
3203 "Driver up/down cycle failed, closing device.\n");
3204 dev_close(eth->netdev[i]);
3205 }
3206 }
3207
developer8051e042022-04-08 13:26:36 +08003208 /* Set KA tick select */
3209 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(0));
3210 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3211 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(1));
3212
3213 /* Enabe FE P3 and P4*/
3214 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3215 val &= ~MTK_FE_LINK_DOWN_P3;
3216 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3217 val &= ~MTK_FE_LINK_DOWN_P4;
3218 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3219
3220 /* Power up sgmii */
3221 for (i = 0; i < MTK_MAC_COUNT; i++) {
3222 mac = netdev_priv(eth->netdev[i]);
3223 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer793f7b42022-05-20 13:54:51 +08003224 if (!phy_node && eth->sgmii->regmap[i]) {
developer8051e042022-04-08 13:26:36 +08003225 mtk_gmac_sgmii_path_setup(eth, i);
3226 regmap_write(eth->sgmii->regmap[i], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
3227 }
3228 }
3229
3230 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE, eth->netdev[0]);
3231 pr_info("[%s] HNAT reset done !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003232
developer8051e042022-04-08 13:26:36 +08003233 call_netdevice_notifiers(MTK_FE_RESET_DONE, eth->netdev[0]);
3234 pr_info("[%s] WiFi SER reset done !\n", __func__);
3235
3236 atomic_dec(&reset_lock);
3237 if (atomic_read(&force) > 0)
3238 atomic_dec(&force);
3239
3240 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3241 eth->mtk_dma_monitor_timer.expires = jiffies;
3242 add_timer(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08003243 clear_bit_unlock(MTK_RESETTING, &eth->state);
3244
3245 rtnl_unlock();
3246}
3247
3248static int mtk_free_dev(struct mtk_eth *eth)
3249{
3250 int i;
3251
3252 for (i = 0; i < MTK_MAC_COUNT; i++) {
3253 if (!eth->netdev[i])
3254 continue;
3255 free_netdev(eth->netdev[i]);
3256 }
3257
3258 return 0;
3259}
3260
3261static int mtk_unreg_dev(struct mtk_eth *eth)
3262{
3263 int i;
3264
3265 for (i = 0; i < MTK_MAC_COUNT; i++) {
3266 if (!eth->netdev[i])
3267 continue;
3268 unregister_netdev(eth->netdev[i]);
3269 }
3270
3271 return 0;
3272}
3273
3274static int mtk_cleanup(struct mtk_eth *eth)
3275{
3276 mtk_unreg_dev(eth);
3277 mtk_free_dev(eth);
3278 cancel_work_sync(&eth->pending_work);
3279
3280 return 0;
3281}
3282
3283static int mtk_get_link_ksettings(struct net_device *ndev,
3284 struct ethtool_link_ksettings *cmd)
3285{
3286 struct mtk_mac *mac = netdev_priv(ndev);
3287
3288 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3289 return -EBUSY;
3290
3291 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3292}
3293
3294static int mtk_set_link_ksettings(struct net_device *ndev,
3295 const struct ethtool_link_ksettings *cmd)
3296{
3297 struct mtk_mac *mac = netdev_priv(ndev);
3298
3299 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3300 return -EBUSY;
3301
3302 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3303}
3304
3305static void mtk_get_drvinfo(struct net_device *dev,
3306 struct ethtool_drvinfo *info)
3307{
3308 struct mtk_mac *mac = netdev_priv(dev);
3309
3310 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3311 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3312 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3313}
3314
3315static u32 mtk_get_msglevel(struct net_device *dev)
3316{
3317 struct mtk_mac *mac = netdev_priv(dev);
3318
3319 return mac->hw->msg_enable;
3320}
3321
3322static void mtk_set_msglevel(struct net_device *dev, u32 value)
3323{
3324 struct mtk_mac *mac = netdev_priv(dev);
3325
3326 mac->hw->msg_enable = value;
3327}
3328
3329static int mtk_nway_reset(struct net_device *dev)
3330{
3331 struct mtk_mac *mac = netdev_priv(dev);
3332
3333 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3334 return -EBUSY;
3335
3336 if (!mac->phylink)
3337 return -ENOTSUPP;
3338
3339 return phylink_ethtool_nway_reset(mac->phylink);
3340}
3341
3342static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3343{
3344 int i;
3345
3346 switch (stringset) {
3347 case ETH_SS_STATS:
3348 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3349 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3350 data += ETH_GSTRING_LEN;
3351 }
3352 break;
3353 }
3354}
3355
3356static int mtk_get_sset_count(struct net_device *dev, int sset)
3357{
3358 switch (sset) {
3359 case ETH_SS_STATS:
3360 return ARRAY_SIZE(mtk_ethtool_stats);
3361 default:
3362 return -EOPNOTSUPP;
3363 }
3364}
3365
3366static void mtk_get_ethtool_stats(struct net_device *dev,
3367 struct ethtool_stats *stats, u64 *data)
3368{
3369 struct mtk_mac *mac = netdev_priv(dev);
3370 struct mtk_hw_stats *hwstats = mac->hw_stats;
3371 u64 *data_src, *data_dst;
3372 unsigned int start;
3373 int i;
3374
3375 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3376 return;
3377
3378 if (netif_running(dev) && netif_device_present(dev)) {
3379 if (spin_trylock_bh(&hwstats->stats_lock)) {
3380 mtk_stats_update_mac(mac);
3381 spin_unlock_bh(&hwstats->stats_lock);
3382 }
3383 }
3384
3385 data_src = (u64 *)hwstats;
3386
3387 do {
3388 data_dst = data;
3389 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
3390
3391 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3392 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3393 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
3394}
3395
3396static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3397 u32 *rule_locs)
3398{
3399 int ret = -EOPNOTSUPP;
3400
3401 switch (cmd->cmd) {
3402 case ETHTOOL_GRXRINGS:
3403 if (dev->hw_features & NETIF_F_LRO) {
3404 cmd->data = MTK_MAX_RX_RING_NUM;
3405 ret = 0;
3406 }
3407 break;
3408 case ETHTOOL_GRXCLSRLCNT:
3409 if (dev->hw_features & NETIF_F_LRO) {
3410 struct mtk_mac *mac = netdev_priv(dev);
3411
3412 cmd->rule_cnt = mac->hwlro_ip_cnt;
3413 ret = 0;
3414 }
3415 break;
3416 case ETHTOOL_GRXCLSRULE:
3417 if (dev->hw_features & NETIF_F_LRO)
3418 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3419 break;
3420 case ETHTOOL_GRXCLSRLALL:
3421 if (dev->hw_features & NETIF_F_LRO)
3422 ret = mtk_hwlro_get_fdir_all(dev, cmd,
3423 rule_locs);
3424 break;
3425 default:
3426 break;
3427 }
3428
3429 return ret;
3430}
3431
3432static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3433{
3434 int ret = -EOPNOTSUPP;
3435
3436 switch (cmd->cmd) {
3437 case ETHTOOL_SRXCLSRLINS:
3438 if (dev->hw_features & NETIF_F_LRO)
3439 ret = mtk_hwlro_add_ipaddr(dev, cmd);
3440 break;
3441 case ETHTOOL_SRXCLSRLDEL:
3442 if (dev->hw_features & NETIF_F_LRO)
3443 ret = mtk_hwlro_del_ipaddr(dev, cmd);
3444 break;
3445 default:
3446 break;
3447 }
3448
3449 return ret;
3450}
3451
developer6c5cbb52022-08-12 11:37:45 +08003452static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3453{
3454 struct mtk_mac *mac = netdev_priv(dev);
3455
3456 phylink_ethtool_get_pauseparam(mac->phylink, pause);
3457}
3458
3459static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3460{
3461 struct mtk_mac *mac = netdev_priv(dev);
3462
3463 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
3464}
3465
developerfd40db22021-04-29 10:08:25 +08003466static const struct ethtool_ops mtk_ethtool_ops = {
3467 .get_link_ksettings = mtk_get_link_ksettings,
3468 .set_link_ksettings = mtk_set_link_ksettings,
3469 .get_drvinfo = mtk_get_drvinfo,
3470 .get_msglevel = mtk_get_msglevel,
3471 .set_msglevel = mtk_set_msglevel,
3472 .nway_reset = mtk_nway_reset,
3473 .get_link = ethtool_op_get_link,
3474 .get_strings = mtk_get_strings,
3475 .get_sset_count = mtk_get_sset_count,
3476 .get_ethtool_stats = mtk_get_ethtool_stats,
3477 .get_rxnfc = mtk_get_rxnfc,
3478 .set_rxnfc = mtk_set_rxnfc,
developer6c5cbb52022-08-12 11:37:45 +08003479 .get_pauseparam = mtk_get_pauseparam,
3480 .set_pauseparam = mtk_set_pauseparam,
developerfd40db22021-04-29 10:08:25 +08003481};
3482
3483static const struct net_device_ops mtk_netdev_ops = {
3484 .ndo_init = mtk_init,
3485 .ndo_uninit = mtk_uninit,
3486 .ndo_open = mtk_open,
3487 .ndo_stop = mtk_stop,
3488 .ndo_start_xmit = mtk_start_xmit,
3489 .ndo_set_mac_address = mtk_set_mac_address,
3490 .ndo_validate_addr = eth_validate_addr,
3491 .ndo_do_ioctl = mtk_do_ioctl,
3492 .ndo_tx_timeout = mtk_tx_timeout,
3493 .ndo_get_stats64 = mtk_get_stats64,
3494 .ndo_fix_features = mtk_fix_features,
3495 .ndo_set_features = mtk_set_features,
3496#ifdef CONFIG_NET_POLL_CONTROLLER
3497 .ndo_poll_controller = mtk_poll_controller,
3498#endif
3499};
3500
3501static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
3502{
3503 const __be32 *_id = of_get_property(np, "reg", NULL);
3504 struct phylink *phylink;
3505 int phy_mode, id, err;
3506 struct mtk_mac *mac;
developera2613e62022-07-01 18:29:37 +08003507 struct mtk_phylink_priv *phylink_priv;
3508 struct fwnode_handle *fixed_node;
3509 struct gpio_desc *desc;
developerfd40db22021-04-29 10:08:25 +08003510
3511 if (!_id) {
3512 dev_err(eth->dev, "missing mac id\n");
3513 return -EINVAL;
3514 }
3515
3516 id = be32_to_cpup(_id);
developerfb556ca2021-10-13 10:52:09 +08003517 if (id < 0 || id >= MTK_MAC_COUNT) {
developerfd40db22021-04-29 10:08:25 +08003518 dev_err(eth->dev, "%d is not a valid mac id\n", id);
3519 return -EINVAL;
3520 }
3521
3522 if (eth->netdev[id]) {
3523 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
3524 return -EINVAL;
3525 }
3526
3527 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
3528 if (!eth->netdev[id]) {
3529 dev_err(eth->dev, "alloc_etherdev failed\n");
3530 return -ENOMEM;
3531 }
3532 mac = netdev_priv(eth->netdev[id]);
3533 eth->mac[id] = mac;
3534 mac->id = id;
3535 mac->hw = eth;
3536 mac->of_node = np;
3537
3538 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
3539 mac->hwlro_ip_cnt = 0;
3540
3541 mac->hw_stats = devm_kzalloc(eth->dev,
3542 sizeof(*mac->hw_stats),
3543 GFP_KERNEL);
3544 if (!mac->hw_stats) {
3545 dev_err(eth->dev, "failed to allocate counter memory\n");
3546 err = -ENOMEM;
3547 goto free_netdev;
3548 }
3549 spin_lock_init(&mac->hw_stats->stats_lock);
3550 u64_stats_init(&mac->hw_stats->syncp);
3551 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
3552
3553 /* phylink create */
3554 phy_mode = of_get_phy_mode(np);
3555 if (phy_mode < 0) {
3556 dev_err(eth->dev, "incorrect phy-mode\n");
3557 err = -EINVAL;
3558 goto free_netdev;
3559 }
3560
3561 /* mac config is not set */
3562 mac->interface = PHY_INTERFACE_MODE_NA;
3563 mac->mode = MLO_AN_PHY;
3564 mac->speed = SPEED_UNKNOWN;
3565
3566 mac->phylink_config.dev = &eth->netdev[id]->dev;
3567 mac->phylink_config.type = PHYLINK_NETDEV;
3568
3569 phylink = phylink_create(&mac->phylink_config,
3570 of_fwnode_handle(mac->of_node),
3571 phy_mode, &mtk_phylink_ops);
3572 if (IS_ERR(phylink)) {
3573 err = PTR_ERR(phylink);
3574 goto free_netdev;
3575 }
3576
3577 mac->phylink = phylink;
3578
developera2613e62022-07-01 18:29:37 +08003579 fixed_node = fwnode_get_named_child_node(of_fwnode_handle(mac->of_node),
3580 "fixed-link");
3581 if (fixed_node) {
3582 desc = fwnode_get_named_gpiod(fixed_node, "link-gpio",
3583 0, GPIOD_IN, "?");
3584 if (!IS_ERR(desc)) {
3585 struct device_node *phy_np;
3586 const char *label;
3587 int irq, phyaddr;
3588
3589 phylink_priv = &mac->phylink_priv;
3590
3591 phylink_priv->desc = desc;
3592 phylink_priv->id = id;
3593 phylink_priv->link = -1;
3594
3595 irq = gpiod_to_irq(desc);
3596 if (irq > 0) {
3597 devm_request_irq(eth->dev, irq, mtk_handle_irq_fixed_link,
3598 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
3599 "ethernet:fixed link", mac);
3600 }
3601
3602 if (!of_property_read_string(to_of_node(fixed_node), "label", &label))
3603 strcpy(phylink_priv->label, label);
3604
3605 phy_np = of_parse_phandle(to_of_node(fixed_node), "phy-handle", 0);
3606 if (phy_np) {
3607 if (!of_property_read_u32(phy_np, "reg", &phyaddr))
3608 phylink_priv->phyaddr = phyaddr;
3609 }
3610 }
3611 fwnode_handle_put(fixed_node);
3612 }
3613
developerfd40db22021-04-29 10:08:25 +08003614 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
3615 eth->netdev[id]->watchdog_timeo = 5 * HZ;
3616 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
3617 eth->netdev[id]->base_addr = (unsigned long)eth->base;
3618
3619 eth->netdev[id]->hw_features = eth->soc->hw_features;
3620 if (eth->hwlro)
3621 eth->netdev[id]->hw_features |= NETIF_F_LRO;
3622
3623 eth->netdev[id]->vlan_features = eth->soc->hw_features &
3624 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3625 eth->netdev[id]->features |= eth->soc->hw_features;
3626 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
3627
3628 eth->netdev[id]->irq = eth->irq[0];
3629 eth->netdev[id]->dev.of_node = np;
3630
3631 return 0;
3632
3633free_netdev:
3634 free_netdev(eth->netdev[id]);
3635 return err;
3636}
3637
3638static int mtk_probe(struct platform_device *pdev)
3639{
3640 struct device_node *mac_np;
3641 struct mtk_eth *eth;
3642 int err, i;
3643
3644 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3645 if (!eth)
3646 return -ENOMEM;
3647
3648 eth->soc = of_device_get_match_data(&pdev->dev);
3649
3650 eth->dev = &pdev->dev;
3651 eth->base = devm_platform_ioremap_resource(pdev, 0);
3652 if (IS_ERR(eth->base))
3653 return PTR_ERR(eth->base);
3654
3655 if(eth->soc->has_sram) {
3656 struct resource *res;
3657 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
developer4c32b7a2021-11-13 16:46:43 +08003658 if (unlikely(!res))
3659 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +08003660 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
3661 }
3662
3663 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3664 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
3665 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
3666 } else {
3667 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
3668 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
3669 }
3670
3671 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3672 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
3673 eth->ip_align = NET_IP_ALIGN;
3674 } else {
developera2bdbd52021-05-31 19:10:17 +08003675 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08003676 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
3677 else
3678 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
3679 }
3680
3681 spin_lock_init(&eth->page_lock);
3682 spin_lock_init(&eth->tx_irq_lock);
3683 spin_lock_init(&eth->rx_irq_lock);
developerd82e8372022-02-09 15:00:09 +08003684 spin_lock_init(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +08003685
3686 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3687 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3688 "mediatek,ethsys");
3689 if (IS_ERR(eth->ethsys)) {
3690 dev_err(&pdev->dev, "no ethsys regmap found\n");
3691 return PTR_ERR(eth->ethsys);
3692 }
3693 }
3694
3695 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3696 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3697 "mediatek,infracfg");
3698 if (IS_ERR(eth->infra)) {
3699 dev_err(&pdev->dev, "no infracfg regmap found\n");
3700 return PTR_ERR(eth->infra);
3701 }
3702 }
3703
3704 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
3705 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3706 GFP_KERNEL);
3707 if (!eth->sgmii)
3708 return -ENOMEM;
3709
3710 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3711 eth->soc->ana_rgc3);
3712
3713 if (err)
3714 return err;
3715 }
3716
3717 if (eth->soc->required_pctl) {
3718 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3719 "mediatek,pctl");
3720 if (IS_ERR(eth->pctl)) {
3721 dev_err(&pdev->dev, "no pctl regmap found\n");
3722 return PTR_ERR(eth->pctl);
3723 }
3724 }
3725
developer18f46a82021-07-20 21:08:21 +08003726 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08003727 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
3728 eth->irq[i] = eth->irq[0];
3729 else
3730 eth->irq[i] = platform_get_irq(pdev, i);
3731 if (eth->irq[i] < 0) {
3732 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3733 return -ENXIO;
3734 }
3735 }
3736
3737 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
3738 eth->clks[i] = devm_clk_get(eth->dev,
3739 mtk_clks_source_name[i]);
3740 if (IS_ERR(eth->clks[i])) {
3741 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
3742 return -EPROBE_DEFER;
3743 if (eth->soc->required_clks & BIT(i)) {
3744 dev_err(&pdev->dev, "clock %s not found\n",
3745 mtk_clks_source_name[i]);
3746 return -EINVAL;
3747 }
3748 eth->clks[i] = NULL;
3749 }
3750 }
3751
3752 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
3753 INIT_WORK(&eth->pending_work, mtk_pending_work);
3754
developer8051e042022-04-08 13:26:36 +08003755 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerfd40db22021-04-29 10:08:25 +08003756 if (err)
3757 return err;
3758
3759 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3760
3761 for_each_child_of_node(pdev->dev.of_node, mac_np) {
3762 if (!of_device_is_compatible(mac_np,
3763 "mediatek,eth-mac"))
3764 continue;
3765
3766 if (!of_device_is_available(mac_np))
3767 continue;
3768
3769 err = mtk_add_mac(eth, mac_np);
3770 if (err) {
3771 of_node_put(mac_np);
3772 goto err_deinit_hw;
3773 }
3774 }
3775
developer18f46a82021-07-20 21:08:21 +08003776 err = mtk_napi_init(eth);
3777 if (err)
3778 goto err_free_dev;
3779
developerfd40db22021-04-29 10:08:25 +08003780 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3781 err = devm_request_irq(eth->dev, eth->irq[0],
3782 mtk_handle_irq, 0,
3783 dev_name(eth->dev), eth);
3784 } else {
3785 err = devm_request_irq(eth->dev, eth->irq[1],
3786 mtk_handle_irq_tx, 0,
3787 dev_name(eth->dev), eth);
3788 if (err)
3789 goto err_free_dev;
3790
3791 err = devm_request_irq(eth->dev, eth->irq[2],
3792 mtk_handle_irq_rx, 0,
developer18f46a82021-07-20 21:08:21 +08003793 dev_name(eth->dev), &eth->rx_napi[0]);
3794 if (err)
3795 goto err_free_dev;
3796
developer793f7b42022-05-20 13:54:51 +08003797 if (MTK_MAX_IRQ_NUM > 3) {
3798 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3799 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3800 err = devm_request_irq(eth->dev,
3801 eth->irq[2 + i],
3802 mtk_handle_irq_rx, 0,
3803 dev_name(eth->dev),
3804 &eth->rx_napi[i]);
3805 if (err)
3806 goto err_free_dev;
3807 }
3808 } else {
3809 err = devm_request_irq(eth->dev, eth->irq[3],
3810 mtk_handle_fe_irq, 0,
3811 dev_name(eth->dev), eth);
developer18f46a82021-07-20 21:08:21 +08003812 if (err)
3813 goto err_free_dev;
3814 }
3815 }
developerfd40db22021-04-29 10:08:25 +08003816 }
developer8051e042022-04-08 13:26:36 +08003817
developerfd40db22021-04-29 10:08:25 +08003818 if (err)
3819 goto err_free_dev;
3820
3821 /* No MT7628/88 support yet */
3822 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3823 err = mtk_mdio_init(eth);
3824 if (err)
3825 goto err_free_dev;
3826 }
3827
3828 for (i = 0; i < MTK_MAX_DEVS; i++) {
3829 if (!eth->netdev[i])
3830 continue;
3831
3832 err = register_netdev(eth->netdev[i]);
3833 if (err) {
3834 dev_err(eth->dev, "error bringing up device\n");
3835 goto err_deinit_mdio;
3836 } else
3837 netif_info(eth, probe, eth->netdev[i],
3838 "mediatek frame engine at 0x%08lx, irq %d\n",
3839 eth->netdev[i]->base_addr, eth->irq[0]);
3840 }
3841
3842 /* we run 2 devices on the same DMA ring so we need a dummy device
3843 * for NAPI to work
3844 */
3845 init_dummy_netdev(&eth->dummy_dev);
3846 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
3847 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08003848 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08003849 MTK_NAPI_WEIGHT);
3850
developer18f46a82021-07-20 21:08:21 +08003851 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3852 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3853 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[i].napi,
3854 mtk_napi_rx, MTK_NAPI_WEIGHT);
3855 }
3856
developerfd40db22021-04-29 10:08:25 +08003857 mtketh_debugfs_init(eth);
3858 debug_proc_init(eth);
3859
3860 platform_set_drvdata(pdev, eth);
3861
developer8051e042022-04-08 13:26:36 +08003862 register_netdevice_notifier(&mtk_eth_netdevice_nb);
developer793f7b42022-05-20 13:54:51 +08003863#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developer8051e042022-04-08 13:26:36 +08003864 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3865 eth->mtk_dma_monitor_timer.expires = jiffies;
3866 add_timer(&eth->mtk_dma_monitor_timer);
developer793f7b42022-05-20 13:54:51 +08003867#endif
developer8051e042022-04-08 13:26:36 +08003868
developerfd40db22021-04-29 10:08:25 +08003869 return 0;
3870
3871err_deinit_mdio:
3872 mtk_mdio_cleanup(eth);
3873err_free_dev:
3874 mtk_free_dev(eth);
3875err_deinit_hw:
3876 mtk_hw_deinit(eth);
3877
3878 return err;
3879}
3880
3881static int mtk_remove(struct platform_device *pdev)
3882{
3883 struct mtk_eth *eth = platform_get_drvdata(pdev);
3884 struct mtk_mac *mac;
3885 int i;
3886
3887 /* stop all devices to make sure that dma is properly shut down */
3888 for (i = 0; i < MTK_MAC_COUNT; i++) {
3889 if (!eth->netdev[i])
3890 continue;
3891 mtk_stop(eth->netdev[i]);
3892 mac = netdev_priv(eth->netdev[i]);
3893 phylink_disconnect_phy(mac->phylink);
3894 }
3895
3896 mtk_hw_deinit(eth);
3897
3898 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003899 netif_napi_del(&eth->rx_napi[0].napi);
3900
3901 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3902 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3903 netif_napi_del(&eth->rx_napi[i].napi);
3904 }
3905
developerfd40db22021-04-29 10:08:25 +08003906 mtk_cleanup(eth);
3907 mtk_mdio_cleanup(eth);
developer8051e042022-04-08 13:26:36 +08003908 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
3909 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08003910
3911 return 0;
3912}
3913
3914static const struct mtk_soc_data mt2701_data = {
3915 .caps = MT7623_CAPS | MTK_HWLRO,
3916 .hw_features = MTK_HW_FEATURES,
3917 .required_clks = MT7623_CLKS_BITMAP,
3918 .required_pctl = true,
3919 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003920 .txrx = {
3921 .txd_size = sizeof(struct mtk_tx_dma),
3922 .rxd_size = sizeof(struct mtk_rx_dma),
3923 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3924 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3925 },
developerfd40db22021-04-29 10:08:25 +08003926};
3927
3928static const struct mtk_soc_data mt7621_data = {
3929 .caps = MT7621_CAPS,
3930 .hw_features = MTK_HW_FEATURES,
3931 .required_clks = MT7621_CLKS_BITMAP,
3932 .required_pctl = false,
3933 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003934 .txrx = {
3935 .txd_size = sizeof(struct mtk_tx_dma),
3936 .rxd_size = sizeof(struct mtk_rx_dma),
3937 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3938 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3939 },
developerfd40db22021-04-29 10:08:25 +08003940};
3941
3942static const struct mtk_soc_data mt7622_data = {
3943 .ana_rgc3 = 0x2028,
3944 .caps = MT7622_CAPS | MTK_HWLRO,
3945 .hw_features = MTK_HW_FEATURES,
3946 .required_clks = MT7622_CLKS_BITMAP,
3947 .required_pctl = false,
3948 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003949 .txrx = {
3950 .txd_size = sizeof(struct mtk_tx_dma),
3951 .rxd_size = sizeof(struct mtk_rx_dma),
3952 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3953 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3954 },
developerfd40db22021-04-29 10:08:25 +08003955};
3956
3957static const struct mtk_soc_data mt7623_data = {
3958 .caps = MT7623_CAPS | MTK_HWLRO,
3959 .hw_features = MTK_HW_FEATURES,
3960 .required_clks = MT7623_CLKS_BITMAP,
3961 .required_pctl = true,
3962 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003963 .txrx = {
3964 .txd_size = sizeof(struct mtk_tx_dma),
3965 .rxd_size = sizeof(struct mtk_rx_dma),
3966 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3967 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3968 },
developerfd40db22021-04-29 10:08:25 +08003969};
3970
3971static const struct mtk_soc_data mt7629_data = {
3972 .ana_rgc3 = 0x128,
3973 .caps = MT7629_CAPS | MTK_HWLRO,
3974 .hw_features = MTK_HW_FEATURES,
3975 .required_clks = MT7629_CLKS_BITMAP,
3976 .required_pctl = false,
3977 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003978 .txrx = {
3979 .txd_size = sizeof(struct mtk_tx_dma),
3980 .rxd_size = sizeof(struct mtk_rx_dma),
3981 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3982 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3983 },
developerfd40db22021-04-29 10:08:25 +08003984};
3985
3986static const struct mtk_soc_data mt7986_data = {
3987 .ana_rgc3 = 0x128,
3988 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08003989 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08003990 .required_clks = MT7986_CLKS_BITMAP,
3991 .required_pctl = false,
3992 .has_sram = true,
developere9356982022-07-04 09:03:20 +08003993 .txrx = {
3994 .txd_size = sizeof(struct mtk_tx_dma_v2),
3995 .rxd_size = sizeof(struct mtk_rx_dma_v2),
3996 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
3997 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
3998 },
developerfd40db22021-04-29 10:08:25 +08003999};
4000
developer255bba22021-07-27 15:16:33 +08004001static const struct mtk_soc_data mt7981_data = {
4002 .ana_rgc3 = 0x128,
4003 .caps = MT7981_CAPS,
developer7377b0b2021-11-18 14:54:47 +08004004 .hw_features = MTK_HW_FEATURES,
developer255bba22021-07-27 15:16:33 +08004005 .required_clks = MT7981_CLKS_BITMAP,
4006 .required_pctl = false,
4007 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004008 .txrx = {
4009 .txd_size = sizeof(struct mtk_tx_dma_v2),
4010 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4011 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4012 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4013 },
developer255bba22021-07-27 15:16:33 +08004014};
4015
developerfd40db22021-04-29 10:08:25 +08004016static const struct mtk_soc_data rt5350_data = {
4017 .caps = MT7628_CAPS,
4018 .hw_features = MTK_HW_FEATURES_MT7628,
4019 .required_clks = MT7628_CLKS_BITMAP,
4020 .required_pctl = false,
4021 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004022 .txrx = {
4023 .txd_size = sizeof(struct mtk_tx_dma),
4024 .rxd_size = sizeof(struct mtk_rx_dma),
4025 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4026 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4027 },
developerfd40db22021-04-29 10:08:25 +08004028};
4029
4030const struct of_device_id of_mtk_match[] = {
4031 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4032 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4033 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4034 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4035 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4036 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08004037 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developerfd40db22021-04-29 10:08:25 +08004038 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4039 {},
4040};
4041MODULE_DEVICE_TABLE(of, of_mtk_match);
4042
4043static struct platform_driver mtk_driver = {
4044 .probe = mtk_probe,
4045 .remove = mtk_remove,
4046 .driver = {
4047 .name = "mtk_soc_eth",
4048 .of_match_table = of_mtk_match,
4049 },
4050};
4051
4052module_platform_driver(mtk_driver);
4053
4054MODULE_LICENSE("GPL");
4055MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4056MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");