blob: 7149a696b01cb11bf20392d794a99d8549a21ae3 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
12#include <linux/mfd/syscon.h>
13#include <linux/regmap.h>
14#include <linux/clk.h>
15#include <linux/pm_runtime.h>
16#include <linux/if_vlan.h>
17#include <linux/reset.h>
18#include <linux/tcp.h>
19#include <linux/interrupt.h>
20#include <linux/pinctrl/devinfo.h>
21#include <linux/phylink.h>
22#include <net/dsa.h>
23
24#include "mtk_eth_soc.h"
25#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080026#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080027
28#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
29#include "mtk_hnat/nf_hnat_mtk.h"
30#endif
31
32static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080033atomic_t reset_lock = ATOMIC_INIT(0);
34atomic_t force = ATOMIC_INIT(0);
35
developerfd40db22021-04-29 10:08:25 +080036module_param_named(msg_level, mtk_msg_level, int, 0);
37MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080038DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080039
40#define MTK_ETHTOOL_STAT(x) { #x, \
41 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
42
43/* strings used by ethtool */
44static const struct mtk_ethtool_stats {
45 char str[ETH_GSTRING_LEN];
46 u32 offset;
47} mtk_ethtool_stats[] = {
48 MTK_ETHTOOL_STAT(tx_bytes),
49 MTK_ETHTOOL_STAT(tx_packets),
50 MTK_ETHTOOL_STAT(tx_skip),
51 MTK_ETHTOOL_STAT(tx_collisions),
52 MTK_ETHTOOL_STAT(rx_bytes),
53 MTK_ETHTOOL_STAT(rx_packets),
54 MTK_ETHTOOL_STAT(rx_overflow),
55 MTK_ETHTOOL_STAT(rx_fcs_errors),
56 MTK_ETHTOOL_STAT(rx_short_errors),
57 MTK_ETHTOOL_STAT(rx_long_errors),
58 MTK_ETHTOOL_STAT(rx_checksum_errors),
59 MTK_ETHTOOL_STAT(rx_flow_control_packets),
60};
61
62static const char * const mtk_clks_source_name[] = {
63 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
64 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
65 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
66 "sgmii_ck", "eth2pll", "wocpu0","wocpu1",
67};
68
69void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
70{
71 __raw_writel(val, eth->base + reg);
72}
73
74u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
75{
76 return __raw_readl(eth->base + reg);
77}
78
79u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
80{
81 u32 val;
82
83 val = mtk_r32(eth, reg);
84 val &= ~mask;
85 val |= set;
86 mtk_w32(eth, val, reg);
87 return reg;
88}
89
90static int mtk_mdio_busy_wait(struct mtk_eth *eth)
91{
92 unsigned long t_start = jiffies;
93
94 while (1) {
95 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
96 return 0;
97 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
98 break;
developerc4671b22021-05-28 13:16:42 +080099 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800100 }
101
102 dev_err(eth->dev, "mdio: MDIO timeout\n");
103 return -1;
104}
105
developer3957a912021-05-13 16:44:31 +0800106u32 _mtk_mdio_write(struct mtk_eth *eth, u16 phy_addr,
107 u16 phy_register, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800108{
109 if (mtk_mdio_busy_wait(eth))
110 return -1;
111
112 write_data &= 0xffff;
113
114 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
developerfb556ca2021-10-13 10:52:09 +0800115 ((phy_register & 0x1f) << PHY_IAC_REG_SHIFT) |
116 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
developerfd40db22021-04-29 10:08:25 +0800117 MTK_PHY_IAC);
118
119 if (mtk_mdio_busy_wait(eth))
120 return -1;
121
122 return 0;
123}
124
developer3957a912021-05-13 16:44:31 +0800125u32 _mtk_mdio_read(struct mtk_eth *eth, u16 phy_addr, u16 phy_reg)
developerfd40db22021-04-29 10:08:25 +0800126{
127 u32 d;
128
129 if (mtk_mdio_busy_wait(eth))
130 return 0xffff;
131
132 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
developerfb556ca2021-10-13 10:52:09 +0800133 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
134 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
developerfd40db22021-04-29 10:08:25 +0800135 MTK_PHY_IAC);
136
137 if (mtk_mdio_busy_wait(eth))
138 return 0xffff;
139
140 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
141
142 return d;
143}
144
145static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
146 int phy_reg, u16 val)
147{
148 struct mtk_eth *eth = bus->priv;
149
150 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
151}
152
153static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
154{
155 struct mtk_eth *eth = bus->priv;
156
157 return _mtk_mdio_read(eth, phy_addr, phy_reg);
158}
159
developer3957a912021-05-13 16:44:31 +0800160u32 mtk_cl45_ind_read(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data)
developerfd40db22021-04-29 10:08:25 +0800161{
162 mutex_lock(&eth->mii_bus->mdio_lock);
163
164 _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
165 _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
166 _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
167 *data = _mtk_mdio_read(eth, port, MII_MMD_ADDR_DATA_REG);
168
169 mutex_unlock(&eth->mii_bus->mdio_lock);
170
171 return 0;
172}
173
developer3957a912021-05-13 16:44:31 +0800174u32 mtk_cl45_ind_write(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data)
developerfd40db22021-04-29 10:08:25 +0800175{
176 mutex_lock(&eth->mii_bus->mdio_lock);
177
178 _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
179 _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
180 _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
181 _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, data);
182
183 mutex_unlock(&eth->mii_bus->mdio_lock);
184
185 return 0;
186}
187
188static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
189 phy_interface_t interface)
190{
191 u32 val;
192
193 /* Check DDR memory type.
194 * Currently TRGMII mode with DDR2 memory is not supported.
195 */
196 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
197 if (interface == PHY_INTERFACE_MODE_TRGMII &&
198 val & SYSCFG_DRAM_TYPE_DDR2) {
199 dev_err(eth->dev,
200 "TRGMII mode with DDR2 memory is not supported!\n");
201 return -EOPNOTSUPP;
202 }
203
204 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
205 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
206
207 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
208 ETHSYS_TRGMII_MT7621_MASK, val);
209
210 return 0;
211}
212
213static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
214 phy_interface_t interface, int speed)
215{
216 u32 val;
217 int ret;
218
219 if (interface == PHY_INTERFACE_MODE_TRGMII) {
220 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
221 val = 500000000;
222 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
223 if (ret)
224 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
225 return;
226 }
227
228 val = (speed == SPEED_1000) ?
229 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
230 mtk_w32(eth, val, INTF_MODE);
231
232 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
233 ETHSYS_TRGMII_CLK_SEL362_5,
234 ETHSYS_TRGMII_CLK_SEL362_5);
235
236 val = (speed == SPEED_1000) ? 250000000 : 500000000;
237 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
238 if (ret)
239 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
240
241 val = (speed == SPEED_1000) ?
242 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
243 mtk_w32(eth, val, TRGMII_RCK_CTRL);
244
245 val = (speed == SPEED_1000) ?
246 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
247 mtk_w32(eth, val, TRGMII_TCK_CTRL);
248}
249
250static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
251 const struct phylink_link_state *state)
252{
253 struct mtk_mac *mac = container_of(config, struct mtk_mac,
254 phylink_config);
255 struct mtk_eth *eth = mac->hw;
256 u32 mcr_cur, mcr_new, sid, i;
developerfb556ca2021-10-13 10:52:09 +0800257 int val, ge_mode, err=0;
developerfd40db22021-04-29 10:08:25 +0800258
259 /* MT76x8 has no hardware settings between for the MAC */
260 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
261 mac->interface != state->interface) {
262 /* Setup soc pin functions */
263 switch (state->interface) {
264 case PHY_INTERFACE_MODE_TRGMII:
265 if (mac->id)
266 goto err_phy;
267 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
268 MTK_GMAC1_TRGMII))
269 goto err_phy;
270 /* fall through */
271 case PHY_INTERFACE_MODE_RGMII_TXID:
272 case PHY_INTERFACE_MODE_RGMII_RXID:
273 case PHY_INTERFACE_MODE_RGMII_ID:
274 case PHY_INTERFACE_MODE_RGMII:
275 case PHY_INTERFACE_MODE_MII:
276 case PHY_INTERFACE_MODE_REVMII:
277 case PHY_INTERFACE_MODE_RMII:
278 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
279 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
280 if (err)
281 goto init_err;
282 }
283 break;
284 case PHY_INTERFACE_MODE_1000BASEX:
285 case PHY_INTERFACE_MODE_2500BASEX:
286 case PHY_INTERFACE_MODE_SGMII:
287 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
288 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
289 if (err)
290 goto init_err;
291 }
292 break;
293 case PHY_INTERFACE_MODE_GMII:
294 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
295 err = mtk_gmac_gephy_path_setup(eth, mac->id);
296 if (err)
297 goto init_err;
298 }
299 break;
300 default:
301 goto err_phy;
302 }
303
304 /* Setup clock for 1st gmac */
305 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
306 !phy_interface_mode_is_8023z(state->interface) &&
307 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
308 if (MTK_HAS_CAPS(mac->hw->soc->caps,
309 MTK_TRGMII_MT7621_CLK)) {
310 if (mt7621_gmac0_rgmii_adjust(mac->hw,
311 state->interface))
312 goto err_phy;
313 } else {
314 mtk_gmac0_rgmii_adjust(mac->hw,
315 state->interface,
316 state->speed);
317
318 /* mt7623_pad_clk_setup */
319 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
320 mtk_w32(mac->hw,
321 TD_DM_DRVP(8) | TD_DM_DRVN(8),
322 TRGMII_TD_ODT(i));
323
324 /* Assert/release MT7623 RXC reset */
325 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
326 TRGMII_RCK_CTRL);
327 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
328 }
329 }
330
331 ge_mode = 0;
332 switch (state->interface) {
333 case PHY_INTERFACE_MODE_MII:
334 case PHY_INTERFACE_MODE_GMII:
335 ge_mode = 1;
336 break;
337 case PHY_INTERFACE_MODE_REVMII:
338 ge_mode = 2;
339 break;
340 case PHY_INTERFACE_MODE_RMII:
341 if (mac->id)
342 goto err_phy;
343 ge_mode = 3;
344 break;
345 default:
346 break;
347 }
348
349 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800350 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800351 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
352 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
353 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
354 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800355 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800356
357 mac->interface = state->interface;
358 }
359
360 /* SGMII */
361 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
362 phy_interface_mode_is_8023z(state->interface)) {
363 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
364 * being setup done.
365 */
developerd82e8372022-02-09 15:00:09 +0800366 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800367 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
368
369 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
370 SYSCFG0_SGMII_MASK,
371 ~(u32)SYSCFG0_SGMII_MASK);
372
373 /* Decide how GMAC and SGMIISYS be mapped */
374 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
375 0 : mac->id;
376
377 /* Setup SGMIISYS with the determined property */
378 if (state->interface != PHY_INTERFACE_MODE_SGMII)
379 err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
380 state);
381 else if (phylink_autoneg_inband(mode))
382 err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
383
developerd82e8372022-02-09 15:00:09 +0800384 if (err) {
385 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800386 goto init_err;
developerd82e8372022-02-09 15:00:09 +0800387 }
developerfd40db22021-04-29 10:08:25 +0800388
389 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
390 SYSCFG0_SGMII_MASK, val);
developerd82e8372022-02-09 15:00:09 +0800391 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800392 } else if (phylink_autoneg_inband(mode)) {
393 dev_err(eth->dev,
394 "In-band mode not supported in non SGMII mode!\n");
395 return;
396 }
397
398 /* Setup gmac */
399 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
400 mcr_new = mcr_cur;
401 mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
402 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
403 MAC_MCR_FORCE_RX_FC);
404 mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
405 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
406
407 switch (state->speed) {
408 case SPEED_2500:
409 case SPEED_1000:
410 mcr_new |= MAC_MCR_SPEED_1000;
411 break;
412 case SPEED_100:
413 mcr_new |= MAC_MCR_SPEED_100;
414 break;
415 }
416 if (state->duplex == DUPLEX_FULL) {
417 mcr_new |= MAC_MCR_FORCE_DPX;
418 if (state->pause & MLO_PAUSE_TX)
419 mcr_new |= MAC_MCR_FORCE_TX_FC;
420 if (state->pause & MLO_PAUSE_RX)
421 mcr_new |= MAC_MCR_FORCE_RX_FC;
422 }
423
424 /* Only update control register when needed! */
425 if (mcr_new != mcr_cur)
426 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
427
428 return;
429
430err_phy:
431 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
432 mac->id, phy_modes(state->interface));
433 return;
434
435init_err:
436 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
437 mac->id, phy_modes(state->interface), err);
438}
439
440static int mtk_mac_link_state(struct phylink_config *config,
441 struct phylink_link_state *state)
442{
443 struct mtk_mac *mac = container_of(config, struct mtk_mac,
444 phylink_config);
445 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
446
447 state->link = (pmsr & MAC_MSR_LINK);
448 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
449
450 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
451 case 0:
452 state->speed = SPEED_10;
453 break;
454 case MAC_MSR_SPEED_100:
455 state->speed = SPEED_100;
456 break;
457 case MAC_MSR_SPEED_1000:
458 state->speed = SPEED_1000;
459 break;
460 default:
461 state->speed = SPEED_UNKNOWN;
462 break;
463 }
464
465 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
466 if (pmsr & MAC_MSR_RX_FC)
467 state->pause |= MLO_PAUSE_RX;
468 if (pmsr & MAC_MSR_TX_FC)
469 state->pause |= MLO_PAUSE_TX;
470
471 return 1;
472}
473
474static void mtk_mac_an_restart(struct phylink_config *config)
475{
476 struct mtk_mac *mac = container_of(config, struct mtk_mac,
477 phylink_config);
478
479 mtk_sgmii_restart_an(mac->hw, mac->id);
480}
481
482static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
483 phy_interface_t interface)
484{
485 struct mtk_mac *mac = container_of(config, struct mtk_mac,
486 phylink_config);
487 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
488
489 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
490 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
491}
492
493static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
494 phy_interface_t interface,
495 struct phy_device *phy)
496{
497 struct mtk_mac *mac = container_of(config, struct mtk_mac,
498 phylink_config);
499 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
500
501 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
502 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
503}
504
505static void mtk_validate(struct phylink_config *config,
506 unsigned long *supported,
507 struct phylink_link_state *state)
508{
509 struct mtk_mac *mac = container_of(config, struct mtk_mac,
510 phylink_config);
511 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
512
513 if (state->interface != PHY_INTERFACE_MODE_NA &&
514 state->interface != PHY_INTERFACE_MODE_MII &&
515 state->interface != PHY_INTERFACE_MODE_GMII &&
516 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
517 phy_interface_mode_is_rgmii(state->interface)) &&
518 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
519 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
520 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
521 (state->interface == PHY_INTERFACE_MODE_SGMII ||
522 phy_interface_mode_is_8023z(state->interface)))) {
523 linkmode_zero(supported);
524 return;
525 }
526
527 phylink_set_port_modes(mask);
528 phylink_set(mask, Autoneg);
529
530 switch (state->interface) {
531 case PHY_INTERFACE_MODE_TRGMII:
532 phylink_set(mask, 1000baseT_Full);
533 break;
534 case PHY_INTERFACE_MODE_1000BASEX:
535 case PHY_INTERFACE_MODE_2500BASEX:
536 phylink_set(mask, 1000baseX_Full);
537 phylink_set(mask, 2500baseX_Full);
538 break;
539 case PHY_INTERFACE_MODE_GMII:
540 case PHY_INTERFACE_MODE_RGMII:
541 case PHY_INTERFACE_MODE_RGMII_ID:
542 case PHY_INTERFACE_MODE_RGMII_RXID:
543 case PHY_INTERFACE_MODE_RGMII_TXID:
544 phylink_set(mask, 1000baseT_Half);
545 /* fall through */
546 case PHY_INTERFACE_MODE_SGMII:
547 phylink_set(mask, 1000baseT_Full);
548 phylink_set(mask, 1000baseX_Full);
549 /* fall through */
550 case PHY_INTERFACE_MODE_MII:
551 case PHY_INTERFACE_MODE_RMII:
552 case PHY_INTERFACE_MODE_REVMII:
553 case PHY_INTERFACE_MODE_NA:
554 default:
555 phylink_set(mask, 10baseT_Half);
556 phylink_set(mask, 10baseT_Full);
557 phylink_set(mask, 100baseT_Half);
558 phylink_set(mask, 100baseT_Full);
559 break;
560 }
561
562 if (state->interface == PHY_INTERFACE_MODE_NA) {
563 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
564 phylink_set(mask, 1000baseT_Full);
565 phylink_set(mask, 1000baseX_Full);
566 phylink_set(mask, 2500baseX_Full);
567 }
568 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
569 phylink_set(mask, 1000baseT_Full);
570 phylink_set(mask, 1000baseT_Half);
571 phylink_set(mask, 1000baseX_Full);
572 }
573 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
574 phylink_set(mask, 1000baseT_Full);
575 phylink_set(mask, 1000baseT_Half);
576 }
577 }
578
579 phylink_set(mask, Pause);
580 phylink_set(mask, Asym_Pause);
581
582 linkmode_and(supported, supported, mask);
583 linkmode_and(state->advertising, state->advertising, mask);
584
585 /* We can only operate at 2500BaseX or 1000BaseX. If requested
586 * to advertise both, only report advertising at 2500BaseX.
587 */
588 phylink_helper_basex_speed(state);
589}
590
591static const struct phylink_mac_ops mtk_phylink_ops = {
592 .validate = mtk_validate,
593 .mac_link_state = mtk_mac_link_state,
594 .mac_an_restart = mtk_mac_an_restart,
595 .mac_config = mtk_mac_config,
596 .mac_link_down = mtk_mac_link_down,
597 .mac_link_up = mtk_mac_link_up,
598};
599
600static int mtk_mdio_init(struct mtk_eth *eth)
601{
602 struct device_node *mii_np;
603 int ret;
604
605 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
606 if (!mii_np) {
607 dev_err(eth->dev, "no %s child node found", "mdio-bus");
608 return -ENODEV;
609 }
610
611 if (!of_device_is_available(mii_np)) {
612 ret = -ENODEV;
613 goto err_put_node;
614 }
615
616 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
617 if (!eth->mii_bus) {
618 ret = -ENOMEM;
619 goto err_put_node;
620 }
621
622 eth->mii_bus->name = "mdio";
623 eth->mii_bus->read = mtk_mdio_read;
624 eth->mii_bus->write = mtk_mdio_write;
625 eth->mii_bus->priv = eth;
626 eth->mii_bus->parent = eth->dev;
627
developer6fd46562021-10-14 15:04:34 +0800628 if(snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
developerfb556ca2021-10-13 10:52:09 +0800629 ret = -ENOMEM;
630 goto err_put_node;
631 }
developerfd40db22021-04-29 10:08:25 +0800632 ret = of_mdiobus_register(eth->mii_bus, mii_np);
633
634err_put_node:
635 of_node_put(mii_np);
636 return ret;
637}
638
639static void mtk_mdio_cleanup(struct mtk_eth *eth)
640{
641 if (!eth->mii_bus)
642 return;
643
644 mdiobus_unregister(eth->mii_bus);
645}
646
647static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
648{
649 unsigned long flags;
650 u32 val;
651
652 spin_lock_irqsave(&eth->tx_irq_lock, flags);
653 val = mtk_r32(eth, eth->tx_int_mask_reg);
654 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
655 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
656}
657
658static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
659{
660 unsigned long flags;
661 u32 val;
662
663 spin_lock_irqsave(&eth->tx_irq_lock, flags);
664 val = mtk_r32(eth, eth->tx_int_mask_reg);
665 mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
666 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
667}
668
669static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
670{
671 unsigned long flags;
672 u32 val;
673
674 spin_lock_irqsave(&eth->rx_irq_lock, flags);
675 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
676 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
677 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
678}
679
680static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
681{
682 unsigned long flags;
683 u32 val;
684
685 spin_lock_irqsave(&eth->rx_irq_lock, flags);
686 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
687 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
688 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
689}
690
691static int mtk_set_mac_address(struct net_device *dev, void *p)
692{
693 int ret = eth_mac_addr(dev, p);
694 struct mtk_mac *mac = netdev_priv(dev);
695 struct mtk_eth *eth = mac->hw;
696 const char *macaddr = dev->dev_addr;
697
698 if (ret)
699 return ret;
700
701 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
702 return -EBUSY;
703
704 spin_lock_bh(&mac->hw->page_lock);
705 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
706 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
707 MT7628_SDM_MAC_ADRH);
708 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
709 (macaddr[4] << 8) | macaddr[5],
710 MT7628_SDM_MAC_ADRL);
711 } else {
712 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
713 MTK_GDMA_MAC_ADRH(mac->id));
714 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
715 (macaddr[4] << 8) | macaddr[5],
716 MTK_GDMA_MAC_ADRL(mac->id));
717 }
718 spin_unlock_bh(&mac->hw->page_lock);
719
720 return 0;
721}
722
723void mtk_stats_update_mac(struct mtk_mac *mac)
724{
725 struct mtk_hw_stats *hw_stats = mac->hw_stats;
726 unsigned int base = MTK_GDM1_TX_GBCNT;
727 u64 stats;
728
729 base += hw_stats->reg_offset;
730
731 u64_stats_update_begin(&hw_stats->syncp);
732
733 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
734 stats = mtk_r32(mac->hw, base + 0x04);
735 if (stats)
736 hw_stats->rx_bytes += (stats << 32);
737 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
738 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
739 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
740 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
741 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
742 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
743 hw_stats->rx_flow_control_packets +=
744 mtk_r32(mac->hw, base + 0x24);
745 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
746 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
747 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
748 stats = mtk_r32(mac->hw, base + 0x34);
749 if (stats)
750 hw_stats->tx_bytes += (stats << 32);
751 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
752 u64_stats_update_end(&hw_stats->syncp);
753}
754
755static void mtk_stats_update(struct mtk_eth *eth)
756{
757 int i;
758
759 for (i = 0; i < MTK_MAC_COUNT; i++) {
760 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
761 continue;
762 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
763 mtk_stats_update_mac(eth->mac[i]);
764 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
765 }
766 }
767}
768
769static void mtk_get_stats64(struct net_device *dev,
770 struct rtnl_link_stats64 *storage)
771{
772 struct mtk_mac *mac = netdev_priv(dev);
773 struct mtk_hw_stats *hw_stats = mac->hw_stats;
774 unsigned int start;
775
776 if (netif_running(dev) && netif_device_present(dev)) {
777 if (spin_trylock_bh(&hw_stats->stats_lock)) {
778 mtk_stats_update_mac(mac);
779 spin_unlock_bh(&hw_stats->stats_lock);
780 }
781 }
782
783 do {
784 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
785 storage->rx_packets = hw_stats->rx_packets;
786 storage->tx_packets = hw_stats->tx_packets;
787 storage->rx_bytes = hw_stats->rx_bytes;
788 storage->tx_bytes = hw_stats->tx_bytes;
789 storage->collisions = hw_stats->tx_collisions;
790 storage->rx_length_errors = hw_stats->rx_short_errors +
791 hw_stats->rx_long_errors;
792 storage->rx_over_errors = hw_stats->rx_overflow;
793 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
794 storage->rx_errors = hw_stats->rx_checksum_errors;
795 storage->tx_aborted_errors = hw_stats->tx_skip;
796 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
797
798 storage->tx_errors = dev->stats.tx_errors;
799 storage->rx_dropped = dev->stats.rx_dropped;
800 storage->tx_dropped = dev->stats.tx_dropped;
801}
802
803static inline int mtk_max_frag_size(int mtu)
804{
805 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
806 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
807 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
808
809 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
810 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
811}
812
813static inline int mtk_max_buf_size(int frag_size)
814{
815 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
816 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
817
818 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
819
820 return buf_size;
821}
822
developerc4671b22021-05-28 13:16:42 +0800823static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
developerfd40db22021-04-29 10:08:25 +0800824 struct mtk_rx_dma *dma_rxd)
825{
developerfd40db22021-04-29 10:08:25 +0800826 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +0800827 if (!(rxd->rxd2 & RX_DMA_DONE))
828 return false;
829
830 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +0800831 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
832 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developera2bdbd52021-05-31 19:10:17 +0800833#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developerfd40db22021-04-29 10:08:25 +0800834 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
835 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
836#endif
developerc4671b22021-05-28 13:16:42 +0800837 return true;
developerfd40db22021-04-29 10:08:25 +0800838}
839
840/* the qdma core needs scratch memory to be setup */
841static int mtk_init_fq_dma(struct mtk_eth *eth)
842{
843 dma_addr_t phy_ring_tail;
844 int cnt = MTK_DMA_SIZE;
845 dma_addr_t dma_addr;
846 int i;
847
848 if (!eth->soc->has_sram) {
849 eth->scratch_ring = dma_alloc_coherent(eth->dev,
850 cnt * sizeof(struct mtk_tx_dma),
851 &eth->phy_scratch_ring,
852 GFP_ATOMIC);
853 } else {
854 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
855 }
856
857 if (unlikely(!eth->scratch_ring))
858 return -ENOMEM;
859
860 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
861 GFP_KERNEL);
862 if (unlikely(!eth->scratch_head))
863 return -ENOMEM;
864
865 dma_addr = dma_map_single(eth->dev,
866 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
867 DMA_FROM_DEVICE);
868 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
869 return -ENOMEM;
870
871 phy_ring_tail = eth->phy_scratch_ring +
872 (sizeof(struct mtk_tx_dma) * (cnt - 1));
873
874 for (i = 0; i < cnt; i++) {
875 eth->scratch_ring[i].txd1 =
876 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
877 if (i < cnt - 1)
878 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
879 ((i + 1) * sizeof(struct mtk_tx_dma)));
880 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
881
882 eth->scratch_ring[i].txd4 = 0;
883#if defined(CONFIG_MEDIATEK_NETSYS_V2)
884 if (eth->soc->has_sram && ((sizeof(struct mtk_tx_dma)) > 16)) {
885 eth->scratch_ring[i].txd5 = 0;
886 eth->scratch_ring[i].txd6 = 0;
887 eth->scratch_ring[i].txd7 = 0;
888 eth->scratch_ring[i].txd8 = 0;
889 }
890#endif
891 }
892
893 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
894 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
895 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
896 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
897
898 return 0;
899}
900
901static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
902{
903 void *ret = ring->dma;
904
905 return ret + (desc - ring->phys);
906}
907
908static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
909 struct mtk_tx_dma *txd)
910{
911 int idx = txd - ring->dma;
912
913 return &ring->buf[idx];
914}
915
916static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
917 struct mtk_tx_dma *dma)
918{
919 return ring->dma_pdma - ring->dma + dma;
920}
921
922static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
923{
924 return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
925}
926
developerc4671b22021-05-28 13:16:42 +0800927static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
928 bool napi)
developerfd40db22021-04-29 10:08:25 +0800929{
930 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
931 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
932 dma_unmap_single(eth->dev,
933 dma_unmap_addr(tx_buf, dma_addr0),
934 dma_unmap_len(tx_buf, dma_len0),
935 DMA_TO_DEVICE);
936 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
937 dma_unmap_page(eth->dev,
938 dma_unmap_addr(tx_buf, dma_addr0),
939 dma_unmap_len(tx_buf, dma_len0),
940 DMA_TO_DEVICE);
941 }
942 } else {
943 if (dma_unmap_len(tx_buf, dma_len0)) {
944 dma_unmap_page(eth->dev,
945 dma_unmap_addr(tx_buf, dma_addr0),
946 dma_unmap_len(tx_buf, dma_len0),
947 DMA_TO_DEVICE);
948 }
949
950 if (dma_unmap_len(tx_buf, dma_len1)) {
951 dma_unmap_page(eth->dev,
952 dma_unmap_addr(tx_buf, dma_addr1),
953 dma_unmap_len(tx_buf, dma_len1),
954 DMA_TO_DEVICE);
955 }
956 }
957
958 tx_buf->flags = 0;
959 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +0800960 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
961 if (napi)
962 napi_consume_skb(tx_buf->skb, napi);
963 else
964 dev_kfree_skb_any(tx_buf->skb);
965 }
developerfd40db22021-04-29 10:08:25 +0800966 tx_buf->skb = NULL;
967}
968
969static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
970 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
971 size_t size, int idx)
972{
973 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
974 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
975 dma_unmap_len_set(tx_buf, dma_len0, size);
976 } else {
977 if (idx & 1) {
978 txd->txd3 = mapped_addr;
979 txd->txd2 |= TX_DMA_PLEN1(size);
980 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
981 dma_unmap_len_set(tx_buf, dma_len1, size);
982 } else {
983 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
984 txd->txd1 = mapped_addr;
985 txd->txd2 = TX_DMA_PLEN0(size);
986 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
987 dma_unmap_len_set(tx_buf, dma_len0, size);
988 }
989 }
990}
991
992static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
993 int tx_num, struct mtk_tx_ring *ring, bool gso)
994{
995 struct mtk_mac *mac = netdev_priv(dev);
996 struct mtk_eth *eth = mac->hw;
997 struct mtk_tx_dma *itxd, *txd;
998 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
999 struct mtk_tx_buf *itx_buf, *tx_buf;
1000 dma_addr_t mapped_addr;
1001 unsigned int nr_frags;
1002 int i, n_desc = 1;
developer54bf9742021-12-13 15:29:42 +08001003 u32 txd4 = 0, txd5 = 0, txd6 = 0;
1004 u32 fport;
developerfd40db22021-04-29 10:08:25 +08001005 u32 qid = 0;
1006 int k = 0;
1007
1008 itxd = ring->next_free;
1009 itxd_pdma = qdma_to_pdma(ring, itxd);
1010 if (itxd == ring->last_free)
1011 return -ENOMEM;
1012
1013 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
1014 memset(itx_buf, 0, sizeof(*itx_buf));
1015
1016 mapped_addr = dma_map_single(eth->dev, skb->data,
1017 skb_headlen(skb), DMA_TO_DEVICE);
1018 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
1019 return -ENOMEM;
1020
1021 WRITE_ONCE(itxd->txd1, mapped_addr);
1022 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1023 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1024 MTK_TX_FLAGS_FPORT1;
1025 setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
1026 k++);
1027
1028 nr_frags = skb_shinfo(skb)->nr_frags;
1029
developerfd40db22021-04-29 10:08:25 +08001030 qid = skb->mark & (MTK_QDMA_TX_MASK);
developerfd40db22021-04-29 10:08:25 +08001031
developerdc0d45f2021-12-27 13:01:22 +08001032#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1033 if(!qid && mac->id)
1034 qid = MTK_QDMA_GMAC2_QID;
1035#endif
1036
developera2bdbd52021-05-31 19:10:17 +08001037 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +08001038 /* set the forward port */
1039 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2;
1040 txd4 |= fport;
1041
1042 if (gso)
1043 txd5 |= TX_DMA_TSO_V2;
1044
1045 /* TX Checksum offload */
1046 if (skb->ip_summed == CHECKSUM_PARTIAL)
1047 txd5 |= TX_DMA_CHKSUM_V2;
1048
1049 /* VLAN header offload */
1050 if (skb_vlan_tag_present(skb))
1051 txd6 |= TX_DMA_INS_VLAN_V2 | skb_vlan_tag_get(skb);
1052
1053 txd4 = txd4 | TX_DMA_SWC_V2;
developerfd40db22021-04-29 10:08:25 +08001054 } else {
1055 /* set the forward port */
1056 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
1057 txd4 |= fport;
1058
1059 if (gso)
1060 txd4 |= TX_DMA_TSO;
1061
1062 /* TX Checksum offload */
1063 if (skb->ip_summed == CHECKSUM_PARTIAL)
1064 txd4 |= TX_DMA_CHKSUM;
1065
1066 /* VLAN header offload */
1067 if (skb_vlan_tag_present(skb))
1068 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
developerfd40db22021-04-29 10:08:25 +08001069 }
1070 /* TX SG offload */
1071 txd = itxd;
1072 txd_pdma = qdma_to_pdma(ring, txd);
1073
1074#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1075 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
developera2bdbd52021-05-31 19:10:17 +08001076 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +08001077 txd4 &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1078 txd4 |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1079 } else {
1080 txd4 &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1081 txd4 |= 0x4 << TX_DMA_FPORT_SHIFT;
1082 }
1083 }
1084
1085 trace_printk("[%s] nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1086 __func__, nr_frags, HNAT_SKB_CB2(skb)->magic, txd4);
1087#endif
1088
1089 for (i = 0; i < nr_frags; i++) {
1090 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1091 unsigned int offset = 0;
1092 int frag_size = skb_frag_size(frag);
1093
1094 while (frag_size) {
1095 bool last_frag = false;
1096 unsigned int frag_map_size;
1097 bool new_desc = true;
1098
1099 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
1100 (i & 0x1)) {
1101 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1102 txd_pdma = qdma_to_pdma(ring, txd);
1103 if (txd == ring->last_free)
1104 goto err_dma;
1105
1106 n_desc++;
1107 } else {
1108 new_desc = false;
1109 }
1110
1111
1112 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1113 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
1114 frag_map_size,
1115 DMA_TO_DEVICE);
1116 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
1117 goto err_dma;
1118
1119 if (i == nr_frags - 1 &&
1120 (frag_size - frag_map_size) == 0)
1121 last_frag = true;
1122
1123 WRITE_ONCE(txd->txd1, mapped_addr);
1124
developera2bdbd52021-05-31 19:10:17 +08001125 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +08001126 WRITE_ONCE(txd->txd3, (TX_DMA_PLEN0(frag_map_size) |
1127 last_frag * TX_DMA_LS0));
1128 WRITE_ONCE(txd->txd4, fport | TX_DMA_SWC_V2 |
1129 QID_BITS_V2(qid));
1130 } else {
1131 WRITE_ONCE(txd->txd3,
1132 (TX_DMA_SWC | QID_LOW_BITS(qid) |
1133 TX_DMA_PLEN0(frag_map_size) |
1134 last_frag * TX_DMA_LS0));
1135 WRITE_ONCE(txd->txd4,
1136 fport | QID_HIGH_BITS(qid));
1137 }
1138
1139 tx_buf = mtk_desc_to_tx_buf(ring, txd);
1140 if (new_desc)
1141 memset(tx_buf, 0, sizeof(*tx_buf));
1142 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1143 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1144 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1145 MTK_TX_FLAGS_FPORT1;
1146
1147 setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
1148 frag_map_size, k++);
1149
1150 frag_size -= frag_map_size;
1151 offset += frag_map_size;
1152 }
1153 }
1154
1155 /* store skb to cleanup */
1156 itx_buf->skb = skb;
1157
developer54bf9742021-12-13 15:29:42 +08001158#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1159 WRITE_ONCE(itxd->txd5, txd5);
1160 WRITE_ONCE(itxd->txd6, txd6);
1161 WRITE_ONCE(itxd->txd7, 0);
1162 WRITE_ONCE(itxd->txd8, 0);
1163#endif
1164
1165 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +08001166 WRITE_ONCE(itxd->txd4, txd4 | QID_BITS_V2(qid));
developer54bf9742021-12-13 15:29:42 +08001167 WRITE_ONCE(itxd->txd3, (TX_DMA_PLEN0(skb_headlen(skb)) |
1168 (!nr_frags * TX_DMA_LS0)));
1169 } else {
developerfd40db22021-04-29 10:08:25 +08001170 WRITE_ONCE(itxd->txd4, txd4 | QID_HIGH_BITS(qid));
developer54bf9742021-12-13 15:29:42 +08001171 WRITE_ONCE(itxd->txd3,
1172 TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
1173 (!nr_frags * TX_DMA_LS0) | QID_LOW_BITS(qid));
1174 }
developerfd40db22021-04-29 10:08:25 +08001175
1176 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1177 if (k & 0x1)
1178 txd_pdma->txd2 |= TX_DMA_LS0;
1179 else
1180 txd_pdma->txd2 |= TX_DMA_LS1;
1181 }
1182
1183 netdev_sent_queue(dev, skb->len);
1184 skb_tx_timestamp(skb);
1185
1186 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1187 atomic_sub(n_desc, &ring->free_count);
1188
1189 /* make sure that all changes to the dma ring are flushed before we
1190 * continue
1191 */
1192 wmb();
1193
1194 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1195 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1196 !netdev_xmit_more())
1197 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1198 } else {
1199 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
1200 ring->dma_size);
1201 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1202 }
1203
1204 return 0;
1205
1206err_dma:
1207 do {
1208 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
1209
1210 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08001211 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08001212
1213 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1214 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1215 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1216
1217 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1218 itxd_pdma = qdma_to_pdma(ring, itxd);
1219 } while (itxd != txd);
1220
1221 return -ENOMEM;
1222}
1223
1224static inline int mtk_cal_txd_req(struct sk_buff *skb)
1225{
1226 int i, nfrags;
1227 skb_frag_t *frag;
1228
1229 nfrags = 1;
1230 if (skb_is_gso(skb)) {
1231 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1232 frag = &skb_shinfo(skb)->frags[i];
1233 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1234 MTK_TX_DMA_BUF_LEN);
1235 }
1236 } else {
1237 nfrags += skb_shinfo(skb)->nr_frags;
1238 }
1239
1240 return nfrags;
1241}
1242
1243static int mtk_queue_stopped(struct mtk_eth *eth)
1244{
1245 int i;
1246
1247 for (i = 0; i < MTK_MAC_COUNT; i++) {
1248 if (!eth->netdev[i])
1249 continue;
1250 if (netif_queue_stopped(eth->netdev[i]))
1251 return 1;
1252 }
1253
1254 return 0;
1255}
1256
1257static void mtk_wake_queue(struct mtk_eth *eth)
1258{
1259 int i;
1260
1261 for (i = 0; i < MTK_MAC_COUNT; i++) {
1262 if (!eth->netdev[i])
1263 continue;
1264 netif_wake_queue(eth->netdev[i]);
1265 }
1266}
1267
1268static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1269{
1270 struct mtk_mac *mac = netdev_priv(dev);
1271 struct mtk_eth *eth = mac->hw;
1272 struct mtk_tx_ring *ring = &eth->tx_ring;
1273 struct net_device_stats *stats = &dev->stats;
1274 bool gso = false;
1275 int tx_num;
1276
1277 /* normally we can rely on the stack not calling this more than once,
1278 * however we have 2 queues running on the same ring so we need to lock
1279 * the ring access
1280 */
1281 spin_lock(&eth->page_lock);
1282
1283 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1284 goto drop;
1285
1286 tx_num = mtk_cal_txd_req(skb);
1287 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1288 netif_stop_queue(dev);
1289 netif_err(eth, tx_queued, dev,
1290 "Tx Ring full when queue awake!\n");
1291 spin_unlock(&eth->page_lock);
1292 return NETDEV_TX_BUSY;
1293 }
1294
1295 /* TSO: fill MSS info in tcp checksum field */
1296 if (skb_is_gso(skb)) {
1297 if (skb_cow_head(skb, 0)) {
1298 netif_warn(eth, tx_err, dev,
1299 "GSO expand head fail.\n");
1300 goto drop;
1301 }
1302
1303 if (skb_shinfo(skb)->gso_type &
1304 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1305 gso = true;
1306 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1307 }
1308 }
1309
1310 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1311 goto drop;
1312
1313 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1314 netif_stop_queue(dev);
1315
1316 spin_unlock(&eth->page_lock);
1317
1318 return NETDEV_TX_OK;
1319
1320drop:
1321 spin_unlock(&eth->page_lock);
1322 stats->tx_dropped++;
1323 dev_kfree_skb_any(skb);
1324 return NETDEV_TX_OK;
1325}
1326
1327static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1328{
1329 int i;
1330 struct mtk_rx_ring *ring;
1331 int idx;
1332
developerfd40db22021-04-29 10:08:25 +08001333 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
developer77d03a72021-06-06 00:06:00 +08001334 if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i))
1335 continue;
1336
developerfd40db22021-04-29 10:08:25 +08001337 ring = &eth->rx_ring[i];
1338 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1339 if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
1340 ring->calc_idx_update = true;
1341 return ring;
1342 }
1343 }
1344
1345 return NULL;
1346}
1347
developer18f46a82021-07-20 21:08:21 +08001348static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08001349{
developerfd40db22021-04-29 10:08:25 +08001350 int i;
1351
developerfb556ca2021-10-13 10:52:09 +08001352 if (!eth->hwlro)
developerfd40db22021-04-29 10:08:25 +08001353 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerfb556ca2021-10-13 10:52:09 +08001354 else {
developerfd40db22021-04-29 10:08:25 +08001355 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1356 ring = &eth->rx_ring[i];
1357 if (ring->calc_idx_update) {
1358 ring->calc_idx_update = false;
1359 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1360 }
1361 }
1362 }
1363}
1364
1365static int mtk_poll_rx(struct napi_struct *napi, int budget,
1366 struct mtk_eth *eth)
1367{
developer18f46a82021-07-20 21:08:21 +08001368 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1369 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001370 int idx;
1371 struct sk_buff *skb;
1372 u8 *data, *new_data;
1373 struct mtk_rx_dma *rxd, trxd;
1374 int done = 0;
1375
developer18f46a82021-07-20 21:08:21 +08001376 if (unlikely(!ring))
1377 goto rx_done;
1378
developerfd40db22021-04-29 10:08:25 +08001379 while (done < budget) {
1380 struct net_device *netdev;
1381 unsigned int pktlen;
1382 dma_addr_t dma_addr;
1383 int mac;
1384
developer18f46a82021-07-20 21:08:21 +08001385 if (eth->hwlro)
1386 ring = mtk_get_rx_ring(eth);
1387
developerfd40db22021-04-29 10:08:25 +08001388 if (unlikely(!ring))
1389 goto rx_done;
1390
1391 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1392 rxd = &ring->dma[idx];
1393 data = ring->data[idx];
1394
developerc4671b22021-05-28 13:16:42 +08001395 if (!mtk_rx_get_desc(&trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08001396 break;
1397
1398 /* find out which mac the packet come from. values start at 1 */
1399 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1400 mac = 0;
1401 } else {
developera2bdbd52021-05-31 19:10:17 +08001402#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1403 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08001404 mac = RX_DMA_GET_SPORT(trxd.rxd5) - 1;
1405 else
1406#endif
1407 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
1408 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1409 }
1410
1411 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1412 !eth->netdev[mac]))
1413 goto release_desc;
1414
1415 netdev = eth->netdev[mac];
1416
1417 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1418 goto release_desc;
1419
1420 /* alloc new buffer */
1421 new_data = napi_alloc_frag(ring->frag_size);
1422 if (unlikely(!new_data)) {
1423 netdev->stats.rx_dropped++;
1424 goto release_desc;
1425 }
1426 dma_addr = dma_map_single(eth->dev,
1427 new_data + NET_SKB_PAD +
1428 eth->ip_align,
1429 ring->buf_size,
1430 DMA_FROM_DEVICE);
1431 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1432 skb_free_frag(new_data);
1433 netdev->stats.rx_dropped++;
1434 goto release_desc;
1435 }
1436
developerc4671b22021-05-28 13:16:42 +08001437 dma_unmap_single(eth->dev, trxd.rxd1,
1438 ring->buf_size, DMA_FROM_DEVICE);
1439
developerfd40db22021-04-29 10:08:25 +08001440 /* receive data */
1441 skb = build_skb(data, ring->frag_size);
1442 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08001443 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08001444 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08001445 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08001446 }
1447 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1448
developerfd40db22021-04-29 10:08:25 +08001449 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1450 skb->dev = netdev;
1451 skb_put(skb, pktlen);
1452
developera2bdbd52021-05-31 19:10:17 +08001453 if ((!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
developerfd40db22021-04-29 10:08:25 +08001454 (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
developera2bdbd52021-05-31 19:10:17 +08001455 (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
developerfd40db22021-04-29 10:08:25 +08001456 (trxd.rxd3 & eth->rx_dma_l4_valid)))
1457 skb->ip_summed = CHECKSUM_UNNECESSARY;
1458 else
1459 skb_checksum_none_assert(skb);
1460 skb->protocol = eth_type_trans(skb, netdev);
1461
1462 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developera2bdbd52021-05-31 19:10:17 +08001463 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developer255bba22021-07-27 15:16:33 +08001464 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08001465 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08001466 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08001467 RX_DMA_VID_V2(trxd.rxd4));
1468 } else {
1469 if (trxd.rxd2 & RX_DMA_VTAG)
1470 __vlan_hwaccel_put_tag(skb,
1471 htons(RX_DMA_VPID(trxd.rxd3)),
1472 RX_DMA_VID(trxd.rxd3));
1473 }
1474
1475 /* If netdev is attached to dsa switch, the special
1476 * tag inserted in VLAN field by switch hardware can
1477 * be offload by RX HW VLAN offload. Clears the VLAN
1478 * information from @skb to avoid unexpected 8021d
1479 * handler before packet enter dsa framework.
1480 */
1481 if (netdev_uses_dsa(netdev))
1482 __vlan_hwaccel_clear_tag(skb);
1483 }
1484
1485#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developera2bdbd52021-05-31 19:10:17 +08001486#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1487 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08001488 *(u32 *)(skb->head) = trxd.rxd5;
1489 else
1490#endif
1491 *(u32 *)(skb->head) = trxd.rxd4;
1492
1493 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001494 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001495 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1496
1497 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
1498 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
1499 __func__, skb_hnat_reason(skb));
1500 skb->pkt_type = PACKET_HOST;
1501 }
1502
1503 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
1504 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
1505 skb_hnat_reason(skb), skb_hnat_alg(skb));
1506#endif
developer77d03a72021-06-06 00:06:00 +08001507 if (mtk_hwlro_stats_ebl &&
1508 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
1509 hw_lro_stats_update(ring->ring_no, &trxd);
1510 hw_lro_flush_stats_update(ring->ring_no, &trxd);
1511 }
developerfd40db22021-04-29 10:08:25 +08001512
1513 skb_record_rx_queue(skb, 0);
1514 napi_gro_receive(napi, skb);
1515
developerc4671b22021-05-28 13:16:42 +08001516skip_rx:
developerfd40db22021-04-29 10:08:25 +08001517 ring->data[idx] = new_data;
1518 rxd->rxd1 = (unsigned int)dma_addr;
1519
1520release_desc:
1521 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1522 rxd->rxd2 = RX_DMA_LSO;
1523 else
1524 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1525
1526 ring->calc_idx = idx;
1527
1528 done++;
1529 }
1530
1531rx_done:
1532 if (done) {
1533 /* make sure that all changes to the dma ring are flushed before
1534 * we continue
1535 */
1536 wmb();
developer18f46a82021-07-20 21:08:21 +08001537 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08001538 }
1539
1540 return done;
1541}
1542
developerfb556ca2021-10-13 10:52:09 +08001543static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001544 unsigned int *done, unsigned int *bytes)
1545{
1546 struct mtk_tx_ring *ring = &eth->tx_ring;
1547 struct mtk_tx_dma *desc;
1548 struct sk_buff *skb;
1549 struct mtk_tx_buf *tx_buf;
1550 u32 cpu, dma;
1551
developerc4671b22021-05-28 13:16:42 +08001552 cpu = ring->last_free_ptr;
developerfd40db22021-04-29 10:08:25 +08001553 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1554
1555 desc = mtk_qdma_phys_to_virt(ring, cpu);
1556
1557 while ((cpu != dma) && budget) {
1558 u32 next_cpu = desc->txd2;
1559 int mac = 0;
1560
1561 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1562 break;
1563
1564 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1565
1566 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1567 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1568 mac = 1;
1569
1570 skb = tx_buf->skb;
1571 if (!skb)
1572 break;
1573
1574 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1575 bytes[mac] += skb->len;
1576 done[mac]++;
1577 budget--;
1578 }
developerc4671b22021-05-28 13:16:42 +08001579 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001580
1581 ring->last_free = desc;
1582 atomic_inc(&ring->free_count);
1583
1584 cpu = next_cpu;
1585 }
1586
developerc4671b22021-05-28 13:16:42 +08001587 ring->last_free_ptr = cpu;
developerfd40db22021-04-29 10:08:25 +08001588 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001589}
1590
developerfb556ca2021-10-13 10:52:09 +08001591static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001592 unsigned int *done, unsigned int *bytes)
1593{
1594 struct mtk_tx_ring *ring = &eth->tx_ring;
1595 struct mtk_tx_dma *desc;
1596 struct sk_buff *skb;
1597 struct mtk_tx_buf *tx_buf;
1598 u32 cpu, dma;
1599
1600 cpu = ring->cpu_idx;
1601 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1602
1603 while ((cpu != dma) && budget) {
1604 tx_buf = &ring->buf[cpu];
1605 skb = tx_buf->skb;
1606 if (!skb)
1607 break;
1608
1609 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1610 bytes[0] += skb->len;
1611 done[0]++;
1612 budget--;
1613 }
1614
developerc4671b22021-05-28 13:16:42 +08001615 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001616
1617 desc = &ring->dma[cpu];
1618 ring->last_free = desc;
1619 atomic_inc(&ring->free_count);
1620
1621 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1622 }
1623
1624 ring->cpu_idx = cpu;
developerfd40db22021-04-29 10:08:25 +08001625}
1626
1627static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1628{
1629 struct mtk_tx_ring *ring = &eth->tx_ring;
1630 unsigned int done[MTK_MAX_DEVS];
1631 unsigned int bytes[MTK_MAX_DEVS];
1632 int total = 0, i;
1633
1634 memset(done, 0, sizeof(done));
1635 memset(bytes, 0, sizeof(bytes));
1636
1637 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developerfb556ca2021-10-13 10:52:09 +08001638 mtk_poll_tx_qdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001639 else
developerfb556ca2021-10-13 10:52:09 +08001640 mtk_poll_tx_pdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001641
1642 for (i = 0; i < MTK_MAC_COUNT; i++) {
1643 if (!eth->netdev[i] || !done[i])
1644 continue;
1645 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1646 total += done[i];
1647 }
1648
1649 if (mtk_queue_stopped(eth) &&
1650 (atomic_read(&ring->free_count) > ring->thresh))
1651 mtk_wake_queue(eth);
1652
1653 return total;
1654}
1655
1656static void mtk_handle_status_irq(struct mtk_eth *eth)
1657{
developer8051e042022-04-08 13:26:36 +08001658 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001659
1660 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1661 mtk_stats_update(eth);
1662 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer8051e042022-04-08 13:26:36 +08001663 MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001664 }
1665}
1666
1667static int mtk_napi_tx(struct napi_struct *napi, int budget)
1668{
1669 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1670 u32 status, mask;
1671 int tx_done = 0;
1672
1673 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1674 mtk_handle_status_irq(eth);
1675 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1676 tx_done = mtk_poll_tx(eth, budget);
1677
1678 if (unlikely(netif_msg_intr(eth))) {
1679 status = mtk_r32(eth, eth->tx_int_status_reg);
1680 mask = mtk_r32(eth, eth->tx_int_mask_reg);
1681 dev_info(eth->dev,
1682 "done tx %d, intr 0x%08x/0x%x\n",
1683 tx_done, status, mask);
1684 }
1685
1686 if (tx_done == budget)
1687 return budget;
1688
1689 status = mtk_r32(eth, eth->tx_int_status_reg);
1690 if (status & MTK_TX_DONE_INT)
1691 return budget;
1692
developerc4671b22021-05-28 13:16:42 +08001693 if (napi_complete(napi))
1694 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developerfd40db22021-04-29 10:08:25 +08001695
1696 return tx_done;
1697}
1698
1699static int mtk_napi_rx(struct napi_struct *napi, int budget)
1700{
developer18f46a82021-07-20 21:08:21 +08001701 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1702 struct mtk_eth *eth = rx_napi->eth;
1703 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001704 u32 status, mask;
1705 int rx_done = 0;
1706 int remain_budget = budget;
1707
1708 mtk_handle_status_irq(eth);
1709
1710poll_again:
developer18f46a82021-07-20 21:08:21 +08001711 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), MTK_PDMA_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001712 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1713
1714 if (unlikely(netif_msg_intr(eth))) {
1715 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1716 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1717 dev_info(eth->dev,
1718 "done rx %d, intr 0x%08x/0x%x\n",
1719 rx_done, status, mask);
1720 }
1721 if (rx_done == remain_budget)
1722 return budget;
1723
1724 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
developer18f46a82021-07-20 21:08:21 +08001725 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08001726 remain_budget -= rx_done;
1727 goto poll_again;
1728 }
developerc4671b22021-05-28 13:16:42 +08001729
1730 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08001731 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08001732
1733 return rx_done + budget - remain_budget;
1734}
1735
1736static int mtk_tx_alloc(struct mtk_eth *eth)
1737{
1738 struct mtk_tx_ring *ring = &eth->tx_ring;
1739 int i, sz = sizeof(*ring->dma);
1740
1741 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1742 GFP_KERNEL);
1743 if (!ring->buf)
1744 goto no_tx_mem;
1745
1746 if (!eth->soc->has_sram)
1747 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1748 &ring->phys, GFP_ATOMIC);
1749 else {
1750 ring->dma = eth->scratch_ring + MTK_DMA_SIZE;
1751 ring->phys = eth->phy_scratch_ring + MTK_DMA_SIZE * sz;
1752 }
1753
1754 if (!ring->dma)
1755 goto no_tx_mem;
1756
1757 for (i = 0; i < MTK_DMA_SIZE; i++) {
1758 int next = (i + 1) % MTK_DMA_SIZE;
1759 u32 next_ptr = ring->phys + next * sz;
1760
1761 ring->dma[i].txd2 = next_ptr;
1762 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1763 ring->dma[i].txd4 = 0;
1764#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1765 if (eth->soc->has_sram && ( sz > 16)) {
1766 ring->dma[i].txd5 = 0;
1767 ring->dma[i].txd6 = 0;
1768 ring->dma[i].txd7 = 0;
1769 ring->dma[i].txd8 = 0;
1770 }
1771#endif
1772 }
1773
1774 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1775 * only as the framework. The real HW descriptors are the PDMA
1776 * descriptors in ring->dma_pdma.
1777 */
1778 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1779 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1780 &ring->phys_pdma,
1781 GFP_ATOMIC);
1782 if (!ring->dma_pdma)
1783 goto no_tx_mem;
1784
1785 for (i = 0; i < MTK_DMA_SIZE; i++) {
1786 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
1787 ring->dma_pdma[i].txd4 = 0;
1788 }
1789 }
1790
1791 ring->dma_size = MTK_DMA_SIZE;
1792 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1793 ring->next_free = &ring->dma[0];
1794 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
developerc4671b22021-05-28 13:16:42 +08001795 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08001796 ring->thresh = MAX_SKB_FRAGS;
1797
1798 /* make sure that all changes to the dma ring are flushed before we
1799 * continue
1800 */
1801 wmb();
1802
1803 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1804 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1805 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1806 mtk_w32(eth,
1807 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1808 MTK_QTX_CRX_PTR);
developerc4671b22021-05-28 13:16:42 +08001809 mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001810 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1811 MTK_QTX_CFG(0));
1812 } else {
1813 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1814 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1815 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1816 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1817 }
1818
1819 return 0;
1820
1821no_tx_mem:
1822 return -ENOMEM;
1823}
1824
1825static void mtk_tx_clean(struct mtk_eth *eth)
1826{
1827 struct mtk_tx_ring *ring = &eth->tx_ring;
1828 int i;
1829
1830 if (ring->buf) {
1831 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08001832 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08001833 kfree(ring->buf);
1834 ring->buf = NULL;
1835 }
1836
1837 if (!eth->soc->has_sram && ring->dma) {
1838 dma_free_coherent(eth->dev,
1839 MTK_DMA_SIZE * sizeof(*ring->dma),
1840 ring->dma,
1841 ring->phys);
1842 ring->dma = NULL;
1843 }
1844
1845 if (ring->dma_pdma) {
1846 dma_free_coherent(eth->dev,
1847 MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
1848 ring->dma_pdma,
1849 ring->phys_pdma);
1850 ring->dma_pdma = NULL;
1851 }
1852}
1853
1854static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1855{
1856 struct mtk_rx_ring *ring;
1857 int rx_data_len, rx_dma_size;
1858 int i;
1859
1860 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1861 if (ring_no)
1862 return -EINVAL;
1863 ring = &eth->rx_ring_qdma;
1864 } else {
1865 ring = &eth->rx_ring[ring_no];
1866 }
1867
1868 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1869 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1870 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1871 } else {
1872 rx_data_len = ETH_DATA_LEN;
1873 rx_dma_size = MTK_DMA_SIZE;
1874 }
1875
1876 ring->frag_size = mtk_max_frag_size(rx_data_len);
1877 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1878 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1879 GFP_KERNEL);
1880 if (!ring->data)
1881 return -ENOMEM;
1882
1883 for (i = 0; i < rx_dma_size; i++) {
1884 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1885 if (!ring->data[i])
1886 return -ENOMEM;
1887 }
1888
1889 if ((!eth->soc->has_sram) || (eth->soc->has_sram
1890 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
1891 ring->dma = dma_alloc_coherent(eth->dev,
1892 rx_dma_size * sizeof(*ring->dma),
1893 &ring->phys, GFP_ATOMIC);
1894 else {
1895 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developer18f46a82021-07-20 21:08:21 +08001896 ring->dma = (struct mtk_rx_dma *)(tx_ring->dma +
1897 MTK_DMA_SIZE * (ring_no + 1));
1898 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
1899 sizeof(*tx_ring->dma) * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08001900 }
1901
1902 if (!ring->dma)
1903 return -ENOMEM;
1904
1905 for (i = 0; i < rx_dma_size; i++) {
1906 dma_addr_t dma_addr = dma_map_single(eth->dev,
1907 ring->data[i] + NET_SKB_PAD + eth->ip_align,
1908 ring->buf_size,
1909 DMA_FROM_DEVICE);
1910 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1911 return -ENOMEM;
1912 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1913
1914 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1915 ring->dma[i].rxd2 = RX_DMA_LSO;
1916 else
1917 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1918
1919 ring->dma[i].rxd3 = 0;
1920 ring->dma[i].rxd4 = 0;
developera2bdbd52021-05-31 19:10:17 +08001921#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developerfd40db22021-04-29 10:08:25 +08001922 if (eth->soc->has_sram && ((sizeof(struct mtk_rx_dma)) > 16)) {
1923 ring->dma[i].rxd5 = 0;
1924 ring->dma[i].rxd6 = 0;
1925 ring->dma[i].rxd7 = 0;
1926 ring->dma[i].rxd8 = 0;
1927 }
1928#endif
1929 }
1930 ring->dma_size = rx_dma_size;
1931 ring->calc_idx_update = false;
1932 ring->calc_idx = rx_dma_size - 1;
1933 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
1934 MTK_QRX_CRX_IDX_CFG(ring_no) :
1935 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08001936 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08001937 /* make sure that all changes to the dma ring are flushed before we
1938 * continue
1939 */
1940 wmb();
1941
1942 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1943 mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
1944 mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
1945 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1946 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
1947 } else {
1948 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
1949 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
1950 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1951 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
1952 }
1953
1954 return 0;
1955}
1956
1957static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
1958{
1959 int i;
1960
1961 if (ring->data && ring->dma) {
1962 for (i = 0; i < ring->dma_size; i++) {
1963 if (!ring->data[i])
1964 continue;
1965 if (!ring->dma[i].rxd1)
1966 continue;
1967 dma_unmap_single(eth->dev,
1968 ring->dma[i].rxd1,
1969 ring->buf_size,
1970 DMA_FROM_DEVICE);
1971 skb_free_frag(ring->data[i]);
1972 }
1973 kfree(ring->data);
1974 ring->data = NULL;
1975 }
1976
1977 if(in_sram)
1978 return;
1979
1980 if (ring->dma) {
1981 dma_free_coherent(eth->dev,
1982 ring->dma_size * sizeof(*ring->dma),
1983 ring->dma,
1984 ring->phys);
1985 ring->dma = NULL;
1986 }
1987}
1988
1989static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1990{
1991 int i;
developer77d03a72021-06-06 00:06:00 +08001992 u32 val;
developerfd40db22021-04-29 10:08:25 +08001993 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1994 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1995
1996 /* set LRO rings to auto-learn modes */
1997 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1998
1999 /* validate LRO ring */
2000 ring_ctrl_dw2 |= MTK_RING_VLD;
2001
2002 /* set AGE timer (unit: 20us) */
2003 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2004 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2005
2006 /* set max AGG timer (unit: 20us) */
2007 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2008
2009 /* set max LRO AGG count */
2010 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2011 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2012
developer77d03a72021-06-06 00:06:00 +08002013 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002014 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2015 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2016 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2017 }
2018
2019 /* IPv4 checksum update enable */
2020 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2021
2022 /* switch priority comparison to packet count mode */
2023 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2024
2025 /* bandwidth threshold setting */
2026 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2027
2028 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08002029 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08002030
2031 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2032 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2033 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2034
developerfd40db22021-04-29 10:08:25 +08002035 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2036 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2037
developer77d03a72021-06-06 00:06:00 +08002038 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2039 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
2040 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
2041 MTK_PDMA_RX_CFG);
2042
2043 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
2044 } else {
2045 /* set HW LRO mode & the max aggregation count for rx packets */
2046 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2047 }
2048
developerfd40db22021-04-29 10:08:25 +08002049 /* enable HW LRO */
2050 lro_ctrl_dw0 |= MTK_LRO_EN;
2051
developer77d03a72021-06-06 00:06:00 +08002052 /* enable cpu reason black list */
2053 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
2054
developerfd40db22021-04-29 10:08:25 +08002055 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2056 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2057
developer77d03a72021-06-06 00:06:00 +08002058 /* no use PPE cpu reason */
2059 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
2060
developerfd40db22021-04-29 10:08:25 +08002061 return 0;
2062}
2063
2064static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2065{
2066 int i;
2067 u32 val;
2068
2069 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08002070 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08002071
2072 /* wait for relinquishments done */
2073 for (i = 0; i < 10; i++) {
2074 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08002075 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer8051e042022-04-08 13:26:36 +08002076 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002077 continue;
2078 }
2079 break;
2080 }
2081
2082 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08002083 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08002084 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2085
2086 /* disable HW LRO */
2087 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2088}
2089
2090static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2091{
2092 u32 reg_val;
2093
developer77d03a72021-06-06 00:06:00 +08002094 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2095 idx += 1;
2096
developerfd40db22021-04-29 10:08:25 +08002097 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2098
2099 /* invalidate the IP setting */
2100 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2101
2102 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2103
2104 /* validate the IP setting */
2105 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2106}
2107
2108static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2109{
2110 u32 reg_val;
2111
developer77d03a72021-06-06 00:06:00 +08002112 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2113 idx += 1;
2114
developerfd40db22021-04-29 10:08:25 +08002115 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2116
2117 /* invalidate the IP setting */
2118 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2119
2120 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2121}
2122
2123static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2124{
2125 int cnt = 0;
2126 int i;
2127
2128 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2129 if (mac->hwlro_ip[i])
2130 cnt++;
2131 }
2132
2133 return cnt;
2134}
2135
2136static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2137 struct ethtool_rxnfc *cmd)
2138{
2139 struct ethtool_rx_flow_spec *fsp =
2140 (struct ethtool_rx_flow_spec *)&cmd->fs;
2141 struct mtk_mac *mac = netdev_priv(dev);
2142 struct mtk_eth *eth = mac->hw;
2143 int hwlro_idx;
2144
2145 if ((fsp->flow_type != TCP_V4_FLOW) ||
2146 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2147 (fsp->location > 1))
2148 return -EINVAL;
2149
2150 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2151 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2152
2153 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2154
2155 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2156
2157 return 0;
2158}
2159
2160static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2161 struct ethtool_rxnfc *cmd)
2162{
2163 struct ethtool_rx_flow_spec *fsp =
2164 (struct ethtool_rx_flow_spec *)&cmd->fs;
2165 struct mtk_mac *mac = netdev_priv(dev);
2166 struct mtk_eth *eth = mac->hw;
2167 int hwlro_idx;
2168
2169 if (fsp->location > 1)
2170 return -EINVAL;
2171
2172 mac->hwlro_ip[fsp->location] = 0;
2173 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2174
2175 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2176
2177 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2178
2179 return 0;
2180}
2181
2182static void mtk_hwlro_netdev_disable(struct net_device *dev)
2183{
2184 struct mtk_mac *mac = netdev_priv(dev);
2185 struct mtk_eth *eth = mac->hw;
2186 int i, hwlro_idx;
2187
2188 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2189 mac->hwlro_ip[i] = 0;
2190 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2191
2192 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2193 }
2194
2195 mac->hwlro_ip_cnt = 0;
2196}
2197
2198static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2199 struct ethtool_rxnfc *cmd)
2200{
2201 struct mtk_mac *mac = netdev_priv(dev);
2202 struct ethtool_rx_flow_spec *fsp =
2203 (struct ethtool_rx_flow_spec *)&cmd->fs;
2204
2205 /* only tcp dst ipv4 is meaningful, others are meaningless */
2206 fsp->flow_type = TCP_V4_FLOW;
2207 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2208 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2209
2210 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2211 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2212 fsp->h_u.tcp_ip4_spec.psrc = 0;
2213 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2214 fsp->h_u.tcp_ip4_spec.pdst = 0;
2215 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2216 fsp->h_u.tcp_ip4_spec.tos = 0;
2217 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2218
2219 return 0;
2220}
2221
2222static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2223 struct ethtool_rxnfc *cmd,
2224 u32 *rule_locs)
2225{
2226 struct mtk_mac *mac = netdev_priv(dev);
2227 int cnt = 0;
2228 int i;
2229
2230 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2231 if (mac->hwlro_ip[i]) {
2232 rule_locs[cnt] = i;
2233 cnt++;
2234 }
2235 }
2236
2237 cmd->rule_cnt = cnt;
2238
2239 return 0;
2240}
2241
developer18f46a82021-07-20 21:08:21 +08002242static int mtk_rss_init(struct mtk_eth *eth)
2243{
2244 u32 val;
2245
2246 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2247 /* Set RSS rings to PSE modes */
2248 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
2249 val |= MTK_RING_PSE_MODE;
2250 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
2251
2252 /* Enable non-lro multiple rx */
2253 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2254 val |= MTK_NON_LRO_MULTI_EN;
2255 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2256
2257 /* Enable RSS dly int supoort */
2258 val |= MTK_LRO_DLY_INT_EN;
2259 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2260
2261 /* Set RSS delay config int ring1 */
2262 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
2263 }
2264
2265 /* Hash Type */
2266 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2267 val |= MTK_RSS_IPV4_STATIC_HASH;
2268 val |= MTK_RSS_IPV6_STATIC_HASH;
2269 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2270
2271 /* Select the size of indirection table */
2272 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW0);
2273 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW1);
2274 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW2);
2275 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW3);
2276 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW4);
2277 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW5);
2278 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW6);
2279 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW7);
2280
2281 /* Pause */
2282 val |= MTK_RSS_CFG_REQ;
2283 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2284
2285 /* Enable RSS*/
2286 val |= MTK_RSS_EN;
2287 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2288
2289 /* Release pause */
2290 val &= ~(MTK_RSS_CFG_REQ);
2291 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2292
2293 /* Set perRSS GRP INT */
2294 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING1), MTK_PDMA_INT_GRP3);
2295
2296 /* Set GRP INT */
2297 mtk_w32(eth, 0x21021030, MTK_FE_INT_GRP);
2298
2299 return 0;
2300}
2301
2302static void mtk_rss_uninit(struct mtk_eth *eth)
2303{
2304 u32 val;
2305
2306 /* Pause */
2307 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2308 val |= MTK_RSS_CFG_REQ;
2309 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2310
2311 /* Disable RSS*/
2312 val &= ~(MTK_RSS_EN);
2313 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2314
2315 /* Release pause */
2316 val &= ~(MTK_RSS_CFG_REQ);
2317 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2318}
2319
developerfd40db22021-04-29 10:08:25 +08002320static netdev_features_t mtk_fix_features(struct net_device *dev,
2321 netdev_features_t features)
2322{
2323 if (!(features & NETIF_F_LRO)) {
2324 struct mtk_mac *mac = netdev_priv(dev);
2325 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2326
2327 if (ip_cnt) {
2328 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2329
2330 features |= NETIF_F_LRO;
2331 }
2332 }
2333
2334 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
2335 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
2336
2337 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2338 }
2339
2340 return features;
2341}
2342
2343static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2344{
2345 struct mtk_mac *mac = netdev_priv(dev);
2346 struct mtk_eth *eth = mac->hw;
2347 int err = 0;
2348
2349 if (!((dev->features ^ features) & MTK_SET_FEATURES))
2350 return 0;
2351
2352 if (!(features & NETIF_F_LRO))
2353 mtk_hwlro_netdev_disable(dev);
2354
2355 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
2356 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
2357 else
2358 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2359
2360 return err;
2361}
2362
2363/* wait for DMA to finish whatever it is doing before we start using it again */
2364static int mtk_dma_busy_wait(struct mtk_eth *eth)
2365{
2366 unsigned long t_start = jiffies;
2367
2368 while (1) {
2369 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2370 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2371 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2372 return 0;
2373 } else {
2374 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2375 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2376 return 0;
2377 }
2378
2379 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2380 break;
2381 }
2382
2383 dev_err(eth->dev, "DMA init timeout\n");
2384 return -1;
2385}
2386
2387static int mtk_dma_init(struct mtk_eth *eth)
2388{
2389 int err;
2390 u32 i;
2391
2392 if (mtk_dma_busy_wait(eth))
2393 return -EBUSY;
2394
2395 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2396 /* QDMA needs scratch memory for internal reordering of the
2397 * descriptors
2398 */
2399 err = mtk_init_fq_dma(eth);
2400 if (err)
2401 return err;
2402 }
2403
2404 err = mtk_tx_alloc(eth);
2405 if (err)
2406 return err;
2407
2408 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2409 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2410 if (err)
2411 return err;
2412 }
2413
2414 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2415 if (err)
2416 return err;
2417
2418 if (eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08002419 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1;
2420 for (; i < MTK_MAX_RX_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002421 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2422 if (err)
2423 return err;
2424 }
2425 err = mtk_hwlro_rx_init(eth);
2426 if (err)
2427 return err;
2428 }
2429
developer18f46a82021-07-20 21:08:21 +08002430 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2431 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2432 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL);
2433 if (err)
2434 return err;
2435 }
2436 err = mtk_rss_init(eth);
2437 if (err)
2438 return err;
2439 }
2440
developerfd40db22021-04-29 10:08:25 +08002441 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2442 /* Enable random early drop and set drop threshold
2443 * automatically
2444 */
2445 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2446 FC_THRES_MIN, MTK_QDMA_FC_THRES);
2447 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2448 }
2449
2450 return 0;
2451}
2452
2453static void mtk_dma_free(struct mtk_eth *eth)
2454{
2455 int i;
2456
2457 for (i = 0; i < MTK_MAC_COUNT; i++)
2458 if (eth->netdev[i])
2459 netdev_reset_queue(eth->netdev[i]);
2460 if ( !eth->soc->has_sram && eth->scratch_ring) {
2461 dma_free_coherent(eth->dev,
2462 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
2463 eth->scratch_ring,
2464 eth->phy_scratch_ring);
2465 eth->scratch_ring = NULL;
2466 eth->phy_scratch_ring = 0;
2467 }
2468 mtk_tx_clean(eth);
2469 mtk_rx_clean(eth, &eth->rx_ring[0],1);
2470 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
2471
2472 if (eth->hwlro) {
2473 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08002474
2475 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1;
2476 for (; i < MTK_MAX_RX_RING_NUM; i++)
2477 mtk_rx_clean(eth, &eth->rx_ring[i], 0);
developerfd40db22021-04-29 10:08:25 +08002478 }
2479
developer18f46a82021-07-20 21:08:21 +08002480 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2481 mtk_rss_uninit(eth);
2482
2483 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
2484 mtk_rx_clean(eth, &eth->rx_ring[i], 1);
2485 }
2486
developer94008d92021-09-23 09:47:41 +08002487 if (eth->scratch_head) {
2488 kfree(eth->scratch_head);
2489 eth->scratch_head = NULL;
2490 }
developerfd40db22021-04-29 10:08:25 +08002491}
2492
2493static void mtk_tx_timeout(struct net_device *dev)
2494{
2495 struct mtk_mac *mac = netdev_priv(dev);
2496 struct mtk_eth *eth = mac->hw;
2497
2498 eth->netdev[mac->id]->stats.tx_errors++;
2499 netif_err(eth, tx_err, dev,
2500 "transmit timed out\n");
developer8051e042022-04-08 13:26:36 +08002501
2502 if (atomic_read(&reset_lock) == 0)
2503 schedule_work(&eth->pending_work);
developerfd40db22021-04-29 10:08:25 +08002504}
2505
developer18f46a82021-07-20 21:08:21 +08002506static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08002507{
developer18f46a82021-07-20 21:08:21 +08002508 struct mtk_napi *rx_napi = priv;
2509 struct mtk_eth *eth = rx_napi->eth;
2510 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002511
developer18f46a82021-07-20 21:08:21 +08002512 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08002513 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08002514 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08002515 }
2516
2517 return IRQ_HANDLED;
2518}
2519
2520static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2521{
2522 struct mtk_eth *eth = _eth;
2523
2524 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developerfd40db22021-04-29 10:08:25 +08002525 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer6bbe70d2021-08-06 09:34:55 +08002526 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08002527 }
2528
2529 return IRQ_HANDLED;
2530}
2531
2532static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2533{
2534 struct mtk_eth *eth = _eth;
2535
developer18f46a82021-07-20 21:08:21 +08002536 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT(0)) {
2537 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT(0))
2538 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002539 }
2540 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2541 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2542 mtk_handle_irq_tx(irq, _eth);
2543 }
2544
2545 return IRQ_HANDLED;
2546}
2547
2548#ifdef CONFIG_NET_POLL_CONTROLLER
2549static void mtk_poll_controller(struct net_device *dev)
2550{
2551 struct mtk_mac *mac = netdev_priv(dev);
2552 struct mtk_eth *eth = mac->hw;
2553
2554 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002555 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
2556 mtk_handle_irq_rx(eth->irq[2], &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002557 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002558 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002559}
2560#endif
2561
2562static int mtk_start_dma(struct mtk_eth *eth)
2563{
2564 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer77d03a72021-06-06 00:06:00 +08002565 int val, err;
developerfd40db22021-04-29 10:08:25 +08002566
2567 err = mtk_dma_init(eth);
2568 if (err) {
2569 mtk_dma_free(eth);
2570 return err;
2571 }
2572
2573 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer15d0d282021-07-14 16:40:44 +08002574 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08002575 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2576 val &= ~MTK_RESV_BUF_MASK;
developerfd40db22021-04-29 10:08:25 +08002577 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002578 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002579 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
2580 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
2581 MTK_RESV_BUF | MTK_WCOMP_EN |
2582 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
2583 MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08002584 }
developerfd40db22021-04-29 10:08:25 +08002585 else
2586 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002587 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002588 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
2589 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2590 MTK_RX_BT_32DWORDS,
2591 MTK_QDMA_GLO_CFG);
2592
developer15d0d282021-07-14 16:40:44 +08002593 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
developerfd40db22021-04-29 10:08:25 +08002594 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002595 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08002596 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2597 MTK_PDMA_GLO_CFG);
2598 } else {
2599 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2600 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2601 MTK_PDMA_GLO_CFG);
2602 }
2603
developer77d03a72021-06-06 00:06:00 +08002604 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) && eth->hwlro) {
2605 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
2606 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
2607 }
2608
developerfd40db22021-04-29 10:08:25 +08002609 return 0;
2610}
2611
developer8051e042022-04-08 13:26:36 +08002612void mtk_gdm_config(struct mtk_eth *eth, u32 config)
developerfd40db22021-04-29 10:08:25 +08002613{
2614 int i;
2615
2616 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2617 return;
2618
2619 for (i = 0; i < MTK_MAC_COUNT; i++) {
2620 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2621
2622 /* default setup the forward port to send frame to PDMA */
2623 val &= ~0xffff;
2624
2625 /* Enable RX checksum */
2626 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2627
2628 val |= config;
2629
2630 if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
2631 val |= MTK_GDMA_SPECIAL_TAG;
2632
2633 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2634 }
developerfd40db22021-04-29 10:08:25 +08002635}
2636
2637static int mtk_open(struct net_device *dev)
2638{
2639 struct mtk_mac *mac = netdev_priv(dev);
2640 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08002641 int err, i;
developer3a5969e2022-02-09 15:36:36 +08002642 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08002643
2644 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2645 if (err) {
2646 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2647 err);
2648 return err;
2649 }
2650
2651 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2652 if (!refcount_read(&eth->dma_refcnt)) {
2653 int err = mtk_start_dma(eth);
2654
2655 if (err)
2656 return err;
2657
2658 mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
2659
2660 /* Indicates CDM to parse the MTK special tag from CPU */
2661 if (netdev_uses_dsa(dev)) {
2662 u32 val;
2663 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2664 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2665 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
2666 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
2667 }
2668
2669 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002670 napi_enable(&eth->rx_napi[0].napi);
developerfd40db22021-04-29 10:08:25 +08002671 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002672 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
2673
2674 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2675 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2676 napi_enable(&eth->rx_napi[i].napi);
2677 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i));
2678 }
2679 }
2680
developerfd40db22021-04-29 10:08:25 +08002681 refcount_set(&eth->dma_refcnt, 1);
2682 }
2683 else
2684 refcount_inc(&eth->dma_refcnt);
2685
2686 phylink_start(mac->phylink);
2687 netif_start_queue(dev);
developer3a5969e2022-02-09 15:36:36 +08002688 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
2689 if (!phy_node) {
developer1a63ef92022-04-15 17:17:32 +08002690 regmap_write(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developer3a5969e2022-02-09 15:36:36 +08002691 }
developerfd40db22021-04-29 10:08:25 +08002692 return 0;
2693}
2694
2695static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2696{
2697 u32 val;
2698 int i;
2699
2700 /* stop the dma engine */
2701 spin_lock_bh(&eth->page_lock);
2702 val = mtk_r32(eth, glo_cfg);
2703 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2704 glo_cfg);
2705 spin_unlock_bh(&eth->page_lock);
2706
2707 /* wait for dma stop */
2708 for (i = 0; i < 10; i++) {
2709 val = mtk_r32(eth, glo_cfg);
2710 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer8051e042022-04-08 13:26:36 +08002711 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002712 continue;
2713 }
2714 break;
2715 }
2716}
2717
2718static int mtk_stop(struct net_device *dev)
2719{
2720 struct mtk_mac *mac = netdev_priv(dev);
2721 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08002722 int i;
developer3a5969e2022-02-09 15:36:36 +08002723 u32 val = 0;
2724 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08002725
2726 netif_tx_disable(dev);
2727
developer3a5969e2022-02-09 15:36:36 +08002728 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
2729 if (phy_node) {
2730 val = _mtk_mdio_read(eth, 0, 0);
2731 val |= BMCR_PDOWN;
2732 _mtk_mdio_write(eth, 0, 0, val);
2733 }else {
developer1a63ef92022-04-15 17:17:32 +08002734 regmap_read(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer3a5969e2022-02-09 15:36:36 +08002735 val |= SGMII_PHYA_PWD;
developer1a63ef92022-04-15 17:17:32 +08002736 regmap_write(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer3a5969e2022-02-09 15:36:36 +08002737 }
2738
2739 //GMAC RX disable
2740 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
2741 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
2742
2743 phylink_stop(mac->phylink);
2744
developerfd40db22021-04-29 10:08:25 +08002745 phylink_disconnect_phy(mac->phylink);
2746
2747 /* only shutdown DMA if this is the last user */
2748 if (!refcount_dec_and_test(&eth->dma_refcnt))
2749 return 0;
2750
2751 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2752
2753 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002754 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002755 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002756 napi_disable(&eth->rx_napi[0].napi);
2757
2758 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2759 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2760 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i));
2761 napi_disable(&eth->rx_napi[i].napi);
2762 }
2763 }
developerfd40db22021-04-29 10:08:25 +08002764
2765 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2766 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2767 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2768
2769 mtk_dma_free(eth);
2770
2771 return 0;
2772}
2773
developer8051e042022-04-08 13:26:36 +08002774void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerfd40db22021-04-29 10:08:25 +08002775{
developer8051e042022-04-08 13:26:36 +08002776 u32 val = 0, i = 0;
developerfd40db22021-04-29 10:08:25 +08002777
developerfd40db22021-04-29 10:08:25 +08002778 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer8051e042022-04-08 13:26:36 +08002779 reset_bits, reset_bits);
2780
2781 while (i++ < 5000) {
2782 mdelay(1);
2783 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
2784
2785 if ((val & reset_bits) == reset_bits) {
2786 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
2787 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2788 reset_bits, ~reset_bits);
2789 break;
2790 }
2791 }
2792
developerfd40db22021-04-29 10:08:25 +08002793 mdelay(10);
2794}
2795
2796static void mtk_clk_disable(struct mtk_eth *eth)
2797{
2798 int clk;
2799
2800 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2801 clk_disable_unprepare(eth->clks[clk]);
2802}
2803
2804static int mtk_clk_enable(struct mtk_eth *eth)
2805{
2806 int clk, ret;
2807
2808 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2809 ret = clk_prepare_enable(eth->clks[clk]);
2810 if (ret)
2811 goto err_disable_clks;
2812 }
2813
2814 return 0;
2815
2816err_disable_clks:
2817 while (--clk >= 0)
2818 clk_disable_unprepare(eth->clks[clk]);
2819
2820 return ret;
2821}
2822
developer18f46a82021-07-20 21:08:21 +08002823static int mtk_napi_init(struct mtk_eth *eth)
2824{
2825 struct mtk_napi *rx_napi = &eth->rx_napi[0];
2826 int i;
2827
2828 rx_napi->eth = eth;
2829 rx_napi->rx_ring = &eth->rx_ring[0];
2830 rx_napi->irq_grp_no = 2;
2831
2832 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2833 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2834 rx_napi = &eth->rx_napi[i];
2835 rx_napi->eth = eth;
2836 rx_napi->rx_ring = &eth->rx_ring[i];
2837 rx_napi->irq_grp_no = 2 + i;
2838 }
2839 }
2840
2841 return 0;
2842}
2843
developer8051e042022-04-08 13:26:36 +08002844static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerfd40db22021-04-29 10:08:25 +08002845{
developer8051e042022-04-08 13:26:36 +08002846 int i, ret = 0;
developerfd40db22021-04-29 10:08:25 +08002847
developer8051e042022-04-08 13:26:36 +08002848 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
2849 atomic_read(&reset_lock), atomic_read(&force));
developerfd40db22021-04-29 10:08:25 +08002850
developer8051e042022-04-08 13:26:36 +08002851 if (atomic_read(&reset_lock) == 0) {
2852 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
2853 return 0;
developerfd40db22021-04-29 10:08:25 +08002854
developer8051e042022-04-08 13:26:36 +08002855 pm_runtime_enable(eth->dev);
2856 pm_runtime_get_sync(eth->dev);
2857
2858 ret = mtk_clk_enable(eth);
2859 if (ret)
2860 goto err_disable_pm;
2861 }
developerfd40db22021-04-29 10:08:25 +08002862
2863 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2864 ret = device_reset(eth->dev);
2865 if (ret) {
2866 dev_err(eth->dev, "MAC reset failed!\n");
2867 goto err_disable_pm;
2868 }
2869
2870 /* enable interrupt delay for RX */
2871 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2872
2873 /* disable delay and normal interrupt */
2874 mtk_tx_irq_disable(eth, ~0);
2875 mtk_rx_irq_disable(eth, ~0);
2876
2877 return 0;
2878 }
2879
developer8051e042022-04-08 13:26:36 +08002880 pr_info("[%s] execute fe %s reset\n", __func__,
2881 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developer545abf02021-07-15 17:47:01 +08002882
developer8051e042022-04-08 13:26:36 +08002883 if (type == MTK_TYPE_WARM_RESET)
2884 mtk_eth_warm_reset(eth);
developer545abf02021-07-15 17:47:01 +08002885 else
developer8051e042022-04-08 13:26:36 +08002886 mtk_eth_cold_reset(eth);
developer545abf02021-07-15 17:47:01 +08002887
2888 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developer545abf02021-07-15 17:47:01 +08002889 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08002890 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08002891 }
developerfd40db22021-04-29 10:08:25 +08002892
2893 if (eth->pctl) {
2894 /* Set GE2 driving and slew rate */
2895 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2896
2897 /* set GE2 TDSEL */
2898 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2899
2900 /* set GE2 TUNE */
2901 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2902 }
2903
2904 /* Set linkdown as the default for each GMAC. Its own MCR would be set
2905 * up with the more appropriate value when mtk_mac_config call is being
2906 * invoked.
2907 */
2908 for (i = 0; i < MTK_MAC_COUNT; i++)
2909 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
2910
2911 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08002912 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
2913 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2914 else
2915 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08002916
2917 /* enable interrupt delay for RX/TX */
2918 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
2919 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
2920
2921 mtk_tx_irq_disable(eth, ~0);
2922 mtk_rx_irq_disable(eth, ~0);
2923
2924 /* FE int grouping */
2925 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08002926 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_PDMA_INT_GRP2);
developerfd40db22021-04-29 10:08:25 +08002927 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08002928 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_QDMA_INT_GRP2);
developer8051e042022-04-08 13:26:36 +08002929 mtk_w32(eth, 0x21021003, MTK_FE_INT_GRP);
2930 mtk_w32(eth, MTK_FE_INT_FQ_EMPTY | MTK_FE_INT_TSO_FAIL |
2931 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
2932 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerfd40db22021-04-29 10:08:25 +08002933
developera2bdbd52021-05-31 19:10:17 +08002934 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08002935 /* PSE Free Queue Flow Control */
2936 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
2937
developer81bcad32021-07-15 14:14:38 +08002938 /* PSE should not drop port8 and port9 packets */
2939 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
2940
developerfef9efd2021-06-16 18:28:09 +08002941 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08002942 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
2943 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
2944 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
2945 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
2946 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
2947 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
2948 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
developerfd5f9152022-01-05 16:29:42 +08002949 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
developerfd40db22021-04-29 10:08:25 +08002950
developerfef9efd2021-06-16 18:28:09 +08002951 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08002952 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
2953 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
2954 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
2955 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
2956 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
2957 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
2958 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
2959 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08002960
2961 /* GDM and CDM Threshold */
2962 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
2963 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
2964 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
2965 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
2966 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
2967 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08002968 }
2969
2970 return 0;
2971
2972err_disable_pm:
2973 pm_runtime_put_sync(eth->dev);
2974 pm_runtime_disable(eth->dev);
2975
2976 return ret;
2977}
2978
2979static int mtk_hw_deinit(struct mtk_eth *eth)
2980{
2981 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
2982 return 0;
2983
2984 mtk_clk_disable(eth);
2985
2986 pm_runtime_put_sync(eth->dev);
2987 pm_runtime_disable(eth->dev);
2988
2989 return 0;
2990}
2991
2992static int __init mtk_init(struct net_device *dev)
2993{
2994 struct mtk_mac *mac = netdev_priv(dev);
2995 struct mtk_eth *eth = mac->hw;
2996 const char *mac_addr;
2997
2998 mac_addr = of_get_mac_address(mac->of_node);
2999 if (!IS_ERR(mac_addr))
3000 ether_addr_copy(dev->dev_addr, mac_addr);
3001
3002 /* If the mac address is invalid, use random mac address */
3003 if (!is_valid_ether_addr(dev->dev_addr)) {
3004 eth_hw_addr_random(dev);
3005 dev_err(eth->dev, "generated random MAC address %pM\n",
3006 dev->dev_addr);
3007 }
3008
3009 return 0;
3010}
3011
3012static void mtk_uninit(struct net_device *dev)
3013{
3014 struct mtk_mac *mac = netdev_priv(dev);
3015 struct mtk_eth *eth = mac->hw;
3016
3017 phylink_disconnect_phy(mac->phylink);
3018 mtk_tx_irq_disable(eth, ~0);
3019 mtk_rx_irq_disable(eth, ~0);
3020}
3021
3022static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3023{
3024 struct mtk_mac *mac = netdev_priv(dev);
3025
3026 switch (cmd) {
3027 case SIOCGMIIPHY:
3028 case SIOCGMIIREG:
3029 case SIOCSMIIREG:
3030 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3031 default:
3032 /* default invoke the mtk_eth_dbg handler */
3033 return mtk_do_priv_ioctl(dev, ifr, cmd);
3034 break;
3035 }
3036
3037 return -EOPNOTSUPP;
3038}
3039
3040static void mtk_pending_work(struct work_struct *work)
3041{
3042 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer8051e042022-04-08 13:26:36 +08003043 struct device_node *phy_node = NULL;
3044 struct mtk_mac *mac = NULL;
3045 int err, i = 0;
developerfd40db22021-04-29 10:08:25 +08003046 unsigned long restart = 0;
developer8051e042022-04-08 13:26:36 +08003047 u32 val = 0;
3048
3049 atomic_inc(&reset_lock);
3050 val = mtk_r32(eth, MTK_FE_INT_STATUS);
3051 if (!mtk_check_reset_event(eth, val)) {
3052 atomic_dec(&reset_lock);
3053 pr_info("[%s] No need to do FE reset !\n", __func__);
3054 return;
3055 }
developerfd40db22021-04-29 10:08:25 +08003056
3057 rtnl_lock();
3058
developer8051e042022-04-08 13:26:36 +08003059 /* Disabe FE P3 and P4 */
3060 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3061 val |= MTK_FE_LINK_DOWN_P3;
3062 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3063 val |= MTK_FE_LINK_DOWN_P4;
3064 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3065
3066 /* Adjust PPE configurations to prepare for reset */
3067 mtk_prepare_reset_ppe(eth, 0);
3068 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3069 mtk_prepare_reset_ppe(eth, 1);
3070
3071 /* Adjust FE configurations to prepare for reset */
3072 mtk_prepare_reset_fe(eth);
3073
3074 /* Trigger Wifi SER reset */
3075 call_netdevice_notifiers(MTK_FE_START_RESET, eth->netdev[0]);
3076 rtnl_unlock();
3077 wait_for_completion_timeout(&wait_ser_done, 5000);
3078 rtnl_lock();
developerfd40db22021-04-29 10:08:25 +08003079
3080 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
3081 cpu_relax();
3082
developer8051e042022-04-08 13:26:36 +08003083 del_timer_sync(&eth->mtk_dma_monitor_timer);
3084 pr_info("[%s] mtk_stop starts !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003085 /* stop all devices to make sure that dma is properly shut down */
3086 for (i = 0; i < MTK_MAC_COUNT; i++) {
3087 if (!eth->netdev[i])
3088 continue;
3089 mtk_stop(eth->netdev[i]);
3090 __set_bit(i, &restart);
3091 }
developer8051e042022-04-08 13:26:36 +08003092 pr_info("[%s] mtk_stop ends !\n", __func__);
3093 mdelay(15);
developerfd40db22021-04-29 10:08:25 +08003094
3095 if (eth->dev->pins)
3096 pinctrl_select_state(eth->dev->pins->p,
3097 eth->dev->pins->default_state);
developer8051e042022-04-08 13:26:36 +08003098
3099 pr_info("[%s] mtk_hw_init starts !\n", __func__);
3100 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
3101 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003102
3103 /* restart DMA and enable IRQs */
3104 for (i = 0; i < MTK_MAC_COUNT; i++) {
3105 if (!test_bit(i, &restart))
3106 continue;
3107 err = mtk_open(eth->netdev[i]);
3108 if (err) {
3109 netif_alert(eth, ifup, eth->netdev[i],
3110 "Driver up/down cycle failed, closing device.\n");
3111 dev_close(eth->netdev[i]);
3112 }
3113 }
3114
developer8051e042022-04-08 13:26:36 +08003115 /* Set KA tick select */
3116 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(0));
3117 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3118 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(1));
3119
3120 /* Enabe FE P3 and P4*/
3121 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3122 val &= ~MTK_FE_LINK_DOWN_P3;
3123 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3124 val &= ~MTK_FE_LINK_DOWN_P4;
3125 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3126
3127 /* Power up sgmii */
3128 for (i = 0; i < MTK_MAC_COUNT; i++) {
3129 mac = netdev_priv(eth->netdev[i]);
3130 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
3131 if (!phy_node) {
3132 mtk_gmac_sgmii_path_setup(eth, i);
3133 regmap_write(eth->sgmii->regmap[i], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
3134 }
3135 }
3136
3137 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE, eth->netdev[0]);
3138 pr_info("[%s] HNAT reset done !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003139
developer8051e042022-04-08 13:26:36 +08003140 call_netdevice_notifiers(MTK_FE_RESET_DONE, eth->netdev[0]);
3141 pr_info("[%s] WiFi SER reset done !\n", __func__);
3142
3143 atomic_dec(&reset_lock);
3144 if (atomic_read(&force) > 0)
3145 atomic_dec(&force);
3146
3147 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3148 eth->mtk_dma_monitor_timer.expires = jiffies;
3149 add_timer(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08003150 clear_bit_unlock(MTK_RESETTING, &eth->state);
3151
3152 rtnl_unlock();
3153}
3154
3155static int mtk_free_dev(struct mtk_eth *eth)
3156{
3157 int i;
3158
3159 for (i = 0; i < MTK_MAC_COUNT; i++) {
3160 if (!eth->netdev[i])
3161 continue;
3162 free_netdev(eth->netdev[i]);
3163 }
3164
3165 return 0;
3166}
3167
3168static int mtk_unreg_dev(struct mtk_eth *eth)
3169{
3170 int i;
3171
3172 for (i = 0; i < MTK_MAC_COUNT; i++) {
3173 if (!eth->netdev[i])
3174 continue;
3175 unregister_netdev(eth->netdev[i]);
3176 }
3177
3178 return 0;
3179}
3180
3181static int mtk_cleanup(struct mtk_eth *eth)
3182{
3183 mtk_unreg_dev(eth);
3184 mtk_free_dev(eth);
3185 cancel_work_sync(&eth->pending_work);
3186
3187 return 0;
3188}
3189
3190static int mtk_get_link_ksettings(struct net_device *ndev,
3191 struct ethtool_link_ksettings *cmd)
3192{
3193 struct mtk_mac *mac = netdev_priv(ndev);
3194
3195 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3196 return -EBUSY;
3197
3198 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3199}
3200
3201static int mtk_set_link_ksettings(struct net_device *ndev,
3202 const struct ethtool_link_ksettings *cmd)
3203{
3204 struct mtk_mac *mac = netdev_priv(ndev);
3205
3206 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3207 return -EBUSY;
3208
3209 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3210}
3211
3212static void mtk_get_drvinfo(struct net_device *dev,
3213 struct ethtool_drvinfo *info)
3214{
3215 struct mtk_mac *mac = netdev_priv(dev);
3216
3217 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3218 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3219 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3220}
3221
3222static u32 mtk_get_msglevel(struct net_device *dev)
3223{
3224 struct mtk_mac *mac = netdev_priv(dev);
3225
3226 return mac->hw->msg_enable;
3227}
3228
3229static void mtk_set_msglevel(struct net_device *dev, u32 value)
3230{
3231 struct mtk_mac *mac = netdev_priv(dev);
3232
3233 mac->hw->msg_enable = value;
3234}
3235
3236static int mtk_nway_reset(struct net_device *dev)
3237{
3238 struct mtk_mac *mac = netdev_priv(dev);
3239
3240 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3241 return -EBUSY;
3242
3243 if (!mac->phylink)
3244 return -ENOTSUPP;
3245
3246 return phylink_ethtool_nway_reset(mac->phylink);
3247}
3248
3249static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3250{
3251 int i;
3252
3253 switch (stringset) {
3254 case ETH_SS_STATS:
3255 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3256 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3257 data += ETH_GSTRING_LEN;
3258 }
3259 break;
3260 }
3261}
3262
3263static int mtk_get_sset_count(struct net_device *dev, int sset)
3264{
3265 switch (sset) {
3266 case ETH_SS_STATS:
3267 return ARRAY_SIZE(mtk_ethtool_stats);
3268 default:
3269 return -EOPNOTSUPP;
3270 }
3271}
3272
3273static void mtk_get_ethtool_stats(struct net_device *dev,
3274 struct ethtool_stats *stats, u64 *data)
3275{
3276 struct mtk_mac *mac = netdev_priv(dev);
3277 struct mtk_hw_stats *hwstats = mac->hw_stats;
3278 u64 *data_src, *data_dst;
3279 unsigned int start;
3280 int i;
3281
3282 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3283 return;
3284
3285 if (netif_running(dev) && netif_device_present(dev)) {
3286 if (spin_trylock_bh(&hwstats->stats_lock)) {
3287 mtk_stats_update_mac(mac);
3288 spin_unlock_bh(&hwstats->stats_lock);
3289 }
3290 }
3291
3292 data_src = (u64 *)hwstats;
3293
3294 do {
3295 data_dst = data;
3296 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
3297
3298 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3299 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3300 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
3301}
3302
3303static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3304 u32 *rule_locs)
3305{
3306 int ret = -EOPNOTSUPP;
3307
3308 switch (cmd->cmd) {
3309 case ETHTOOL_GRXRINGS:
3310 if (dev->hw_features & NETIF_F_LRO) {
3311 cmd->data = MTK_MAX_RX_RING_NUM;
3312 ret = 0;
3313 }
3314 break;
3315 case ETHTOOL_GRXCLSRLCNT:
3316 if (dev->hw_features & NETIF_F_LRO) {
3317 struct mtk_mac *mac = netdev_priv(dev);
3318
3319 cmd->rule_cnt = mac->hwlro_ip_cnt;
3320 ret = 0;
3321 }
3322 break;
3323 case ETHTOOL_GRXCLSRULE:
3324 if (dev->hw_features & NETIF_F_LRO)
3325 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3326 break;
3327 case ETHTOOL_GRXCLSRLALL:
3328 if (dev->hw_features & NETIF_F_LRO)
3329 ret = mtk_hwlro_get_fdir_all(dev, cmd,
3330 rule_locs);
3331 break;
3332 default:
3333 break;
3334 }
3335
3336 return ret;
3337}
3338
3339static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3340{
3341 int ret = -EOPNOTSUPP;
3342
3343 switch (cmd->cmd) {
3344 case ETHTOOL_SRXCLSRLINS:
3345 if (dev->hw_features & NETIF_F_LRO)
3346 ret = mtk_hwlro_add_ipaddr(dev, cmd);
3347 break;
3348 case ETHTOOL_SRXCLSRLDEL:
3349 if (dev->hw_features & NETIF_F_LRO)
3350 ret = mtk_hwlro_del_ipaddr(dev, cmd);
3351 break;
3352 default:
3353 break;
3354 }
3355
3356 return ret;
3357}
3358
3359static const struct ethtool_ops mtk_ethtool_ops = {
3360 .get_link_ksettings = mtk_get_link_ksettings,
3361 .set_link_ksettings = mtk_set_link_ksettings,
3362 .get_drvinfo = mtk_get_drvinfo,
3363 .get_msglevel = mtk_get_msglevel,
3364 .set_msglevel = mtk_set_msglevel,
3365 .nway_reset = mtk_nway_reset,
3366 .get_link = ethtool_op_get_link,
3367 .get_strings = mtk_get_strings,
3368 .get_sset_count = mtk_get_sset_count,
3369 .get_ethtool_stats = mtk_get_ethtool_stats,
3370 .get_rxnfc = mtk_get_rxnfc,
3371 .set_rxnfc = mtk_set_rxnfc,
3372};
3373
3374static const struct net_device_ops mtk_netdev_ops = {
3375 .ndo_init = mtk_init,
3376 .ndo_uninit = mtk_uninit,
3377 .ndo_open = mtk_open,
3378 .ndo_stop = mtk_stop,
3379 .ndo_start_xmit = mtk_start_xmit,
3380 .ndo_set_mac_address = mtk_set_mac_address,
3381 .ndo_validate_addr = eth_validate_addr,
3382 .ndo_do_ioctl = mtk_do_ioctl,
3383 .ndo_tx_timeout = mtk_tx_timeout,
3384 .ndo_get_stats64 = mtk_get_stats64,
3385 .ndo_fix_features = mtk_fix_features,
3386 .ndo_set_features = mtk_set_features,
3387#ifdef CONFIG_NET_POLL_CONTROLLER
3388 .ndo_poll_controller = mtk_poll_controller,
3389#endif
3390};
3391
3392static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
3393{
3394 const __be32 *_id = of_get_property(np, "reg", NULL);
3395 struct phylink *phylink;
3396 int phy_mode, id, err;
3397 struct mtk_mac *mac;
3398
3399 if (!_id) {
3400 dev_err(eth->dev, "missing mac id\n");
3401 return -EINVAL;
3402 }
3403
3404 id = be32_to_cpup(_id);
developerfb556ca2021-10-13 10:52:09 +08003405 if (id < 0 || id >= MTK_MAC_COUNT) {
developerfd40db22021-04-29 10:08:25 +08003406 dev_err(eth->dev, "%d is not a valid mac id\n", id);
3407 return -EINVAL;
3408 }
3409
3410 if (eth->netdev[id]) {
3411 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
3412 return -EINVAL;
3413 }
3414
3415 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
3416 if (!eth->netdev[id]) {
3417 dev_err(eth->dev, "alloc_etherdev failed\n");
3418 return -ENOMEM;
3419 }
3420 mac = netdev_priv(eth->netdev[id]);
3421 eth->mac[id] = mac;
3422 mac->id = id;
3423 mac->hw = eth;
3424 mac->of_node = np;
3425
3426 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
3427 mac->hwlro_ip_cnt = 0;
3428
3429 mac->hw_stats = devm_kzalloc(eth->dev,
3430 sizeof(*mac->hw_stats),
3431 GFP_KERNEL);
3432 if (!mac->hw_stats) {
3433 dev_err(eth->dev, "failed to allocate counter memory\n");
3434 err = -ENOMEM;
3435 goto free_netdev;
3436 }
3437 spin_lock_init(&mac->hw_stats->stats_lock);
3438 u64_stats_init(&mac->hw_stats->syncp);
3439 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
3440
3441 /* phylink create */
3442 phy_mode = of_get_phy_mode(np);
3443 if (phy_mode < 0) {
3444 dev_err(eth->dev, "incorrect phy-mode\n");
3445 err = -EINVAL;
3446 goto free_netdev;
3447 }
3448
3449 /* mac config is not set */
3450 mac->interface = PHY_INTERFACE_MODE_NA;
3451 mac->mode = MLO_AN_PHY;
3452 mac->speed = SPEED_UNKNOWN;
3453
3454 mac->phylink_config.dev = &eth->netdev[id]->dev;
3455 mac->phylink_config.type = PHYLINK_NETDEV;
3456
3457 phylink = phylink_create(&mac->phylink_config,
3458 of_fwnode_handle(mac->of_node),
3459 phy_mode, &mtk_phylink_ops);
3460 if (IS_ERR(phylink)) {
3461 err = PTR_ERR(phylink);
3462 goto free_netdev;
3463 }
3464
3465 mac->phylink = phylink;
3466
3467 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
3468 eth->netdev[id]->watchdog_timeo = 5 * HZ;
3469 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
3470 eth->netdev[id]->base_addr = (unsigned long)eth->base;
3471
3472 eth->netdev[id]->hw_features = eth->soc->hw_features;
3473 if (eth->hwlro)
3474 eth->netdev[id]->hw_features |= NETIF_F_LRO;
3475
3476 eth->netdev[id]->vlan_features = eth->soc->hw_features &
3477 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3478 eth->netdev[id]->features |= eth->soc->hw_features;
3479 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
3480
3481 eth->netdev[id]->irq = eth->irq[0];
3482 eth->netdev[id]->dev.of_node = np;
3483
3484 return 0;
3485
3486free_netdev:
3487 free_netdev(eth->netdev[id]);
3488 return err;
3489}
3490
3491static int mtk_probe(struct platform_device *pdev)
3492{
3493 struct device_node *mac_np;
3494 struct mtk_eth *eth;
3495 int err, i;
3496
3497 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3498 if (!eth)
3499 return -ENOMEM;
3500
3501 eth->soc = of_device_get_match_data(&pdev->dev);
3502
3503 eth->dev = &pdev->dev;
3504 eth->base = devm_platform_ioremap_resource(pdev, 0);
3505 if (IS_ERR(eth->base))
3506 return PTR_ERR(eth->base);
3507
3508 if(eth->soc->has_sram) {
3509 struct resource *res;
3510 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
developer4c32b7a2021-11-13 16:46:43 +08003511 if (unlikely(!res))
3512 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +08003513 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
3514 }
3515
3516 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3517 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
3518 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
3519 } else {
3520 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
3521 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
3522 }
3523
3524 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3525 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
3526 eth->ip_align = NET_IP_ALIGN;
3527 } else {
developera2bdbd52021-05-31 19:10:17 +08003528 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08003529 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
3530 else
3531 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
3532 }
3533
3534 spin_lock_init(&eth->page_lock);
3535 spin_lock_init(&eth->tx_irq_lock);
3536 spin_lock_init(&eth->rx_irq_lock);
developerd82e8372022-02-09 15:00:09 +08003537 spin_lock_init(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +08003538
3539 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3540 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3541 "mediatek,ethsys");
3542 if (IS_ERR(eth->ethsys)) {
3543 dev_err(&pdev->dev, "no ethsys regmap found\n");
3544 return PTR_ERR(eth->ethsys);
3545 }
3546 }
3547
3548 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3549 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3550 "mediatek,infracfg");
3551 if (IS_ERR(eth->infra)) {
3552 dev_err(&pdev->dev, "no infracfg regmap found\n");
3553 return PTR_ERR(eth->infra);
3554 }
3555 }
3556
3557 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
3558 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3559 GFP_KERNEL);
3560 if (!eth->sgmii)
3561 return -ENOMEM;
3562
3563 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3564 eth->soc->ana_rgc3);
3565
3566 if (err)
3567 return err;
3568 }
3569
3570 if (eth->soc->required_pctl) {
3571 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3572 "mediatek,pctl");
3573 if (IS_ERR(eth->pctl)) {
3574 dev_err(&pdev->dev, "no pctl regmap found\n");
3575 return PTR_ERR(eth->pctl);
3576 }
3577 }
3578
developer18f46a82021-07-20 21:08:21 +08003579 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08003580 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
3581 eth->irq[i] = eth->irq[0];
3582 else
3583 eth->irq[i] = platform_get_irq(pdev, i);
3584 if (eth->irq[i] < 0) {
3585 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3586 return -ENXIO;
3587 }
3588 }
3589
3590 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
3591 eth->clks[i] = devm_clk_get(eth->dev,
3592 mtk_clks_source_name[i]);
3593 if (IS_ERR(eth->clks[i])) {
3594 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
3595 return -EPROBE_DEFER;
3596 if (eth->soc->required_clks & BIT(i)) {
3597 dev_err(&pdev->dev, "clock %s not found\n",
3598 mtk_clks_source_name[i]);
3599 return -EINVAL;
3600 }
3601 eth->clks[i] = NULL;
3602 }
3603 }
3604
3605 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
3606 INIT_WORK(&eth->pending_work, mtk_pending_work);
3607
developer8051e042022-04-08 13:26:36 +08003608 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerfd40db22021-04-29 10:08:25 +08003609 if (err)
3610 return err;
3611
3612 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3613
3614 for_each_child_of_node(pdev->dev.of_node, mac_np) {
3615 if (!of_device_is_compatible(mac_np,
3616 "mediatek,eth-mac"))
3617 continue;
3618
3619 if (!of_device_is_available(mac_np))
3620 continue;
3621
3622 err = mtk_add_mac(eth, mac_np);
3623 if (err) {
3624 of_node_put(mac_np);
3625 goto err_deinit_hw;
3626 }
3627 }
3628
developer18f46a82021-07-20 21:08:21 +08003629 err = mtk_napi_init(eth);
3630 if (err)
3631 goto err_free_dev;
3632
developerfd40db22021-04-29 10:08:25 +08003633 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3634 err = devm_request_irq(eth->dev, eth->irq[0],
3635 mtk_handle_irq, 0,
3636 dev_name(eth->dev), eth);
3637 } else {
3638 err = devm_request_irq(eth->dev, eth->irq[1],
3639 mtk_handle_irq_tx, 0,
3640 dev_name(eth->dev), eth);
3641 if (err)
3642 goto err_free_dev;
3643
3644 err = devm_request_irq(eth->dev, eth->irq[2],
3645 mtk_handle_irq_rx, 0,
developer18f46a82021-07-20 21:08:21 +08003646 dev_name(eth->dev), &eth->rx_napi[0]);
3647 if (err)
3648 goto err_free_dev;
3649
3650 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3651 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3652 err = devm_request_irq(eth->dev,
3653 eth->irq[2 + i],
3654 mtk_handle_irq_rx, 0,
3655 dev_name(eth->dev),
3656 &eth->rx_napi[i]);
3657 if (err)
3658 goto err_free_dev;
3659 }
developer8051e042022-04-08 13:26:36 +08003660 } else {
3661 err = devm_request_irq(eth->dev, eth->irq[3],
3662 mtk_handle_fe_irq, 0,
3663 dev_name(eth->dev), eth);
3664 if (err)
3665 goto err_free_dev;
developer18f46a82021-07-20 21:08:21 +08003666 }
developerfd40db22021-04-29 10:08:25 +08003667 }
developer8051e042022-04-08 13:26:36 +08003668
developerfd40db22021-04-29 10:08:25 +08003669 if (err)
3670 goto err_free_dev;
3671
3672 /* No MT7628/88 support yet */
3673 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3674 err = mtk_mdio_init(eth);
3675 if (err)
3676 goto err_free_dev;
3677 }
3678
3679 for (i = 0; i < MTK_MAX_DEVS; i++) {
3680 if (!eth->netdev[i])
3681 continue;
3682
3683 err = register_netdev(eth->netdev[i]);
3684 if (err) {
3685 dev_err(eth->dev, "error bringing up device\n");
3686 goto err_deinit_mdio;
3687 } else
3688 netif_info(eth, probe, eth->netdev[i],
3689 "mediatek frame engine at 0x%08lx, irq %d\n",
3690 eth->netdev[i]->base_addr, eth->irq[0]);
3691 }
3692
3693 /* we run 2 devices on the same DMA ring so we need a dummy device
3694 * for NAPI to work
3695 */
3696 init_dummy_netdev(&eth->dummy_dev);
3697 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
3698 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08003699 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08003700 MTK_NAPI_WEIGHT);
3701
developer18f46a82021-07-20 21:08:21 +08003702 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3703 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3704 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[i].napi,
3705 mtk_napi_rx, MTK_NAPI_WEIGHT);
3706 }
3707
developerfd40db22021-04-29 10:08:25 +08003708 mtketh_debugfs_init(eth);
3709 debug_proc_init(eth);
3710
3711 platform_set_drvdata(pdev, eth);
3712
developer8051e042022-04-08 13:26:36 +08003713 register_netdevice_notifier(&mtk_eth_netdevice_nb);
3714 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3715 eth->mtk_dma_monitor_timer.expires = jiffies;
3716 add_timer(&eth->mtk_dma_monitor_timer);
3717
developerfd40db22021-04-29 10:08:25 +08003718 return 0;
3719
3720err_deinit_mdio:
3721 mtk_mdio_cleanup(eth);
3722err_free_dev:
3723 mtk_free_dev(eth);
3724err_deinit_hw:
3725 mtk_hw_deinit(eth);
3726
3727 return err;
3728}
3729
3730static int mtk_remove(struct platform_device *pdev)
3731{
3732 struct mtk_eth *eth = platform_get_drvdata(pdev);
3733 struct mtk_mac *mac;
3734 int i;
3735
3736 /* stop all devices to make sure that dma is properly shut down */
3737 for (i = 0; i < MTK_MAC_COUNT; i++) {
3738 if (!eth->netdev[i])
3739 continue;
3740 mtk_stop(eth->netdev[i]);
3741 mac = netdev_priv(eth->netdev[i]);
3742 phylink_disconnect_phy(mac->phylink);
3743 }
3744
3745 mtk_hw_deinit(eth);
3746
3747 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003748 netif_napi_del(&eth->rx_napi[0].napi);
3749
3750 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3751 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3752 netif_napi_del(&eth->rx_napi[i].napi);
3753 }
3754
developerfd40db22021-04-29 10:08:25 +08003755 mtk_cleanup(eth);
3756 mtk_mdio_cleanup(eth);
developer8051e042022-04-08 13:26:36 +08003757 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
3758 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08003759
3760 return 0;
3761}
3762
3763static const struct mtk_soc_data mt2701_data = {
3764 .caps = MT7623_CAPS | MTK_HWLRO,
3765 .hw_features = MTK_HW_FEATURES,
3766 .required_clks = MT7623_CLKS_BITMAP,
3767 .required_pctl = true,
3768 .has_sram = false,
3769};
3770
3771static const struct mtk_soc_data mt7621_data = {
3772 .caps = MT7621_CAPS,
3773 .hw_features = MTK_HW_FEATURES,
3774 .required_clks = MT7621_CLKS_BITMAP,
3775 .required_pctl = false,
3776 .has_sram = false,
3777};
3778
3779static const struct mtk_soc_data mt7622_data = {
3780 .ana_rgc3 = 0x2028,
3781 .caps = MT7622_CAPS | MTK_HWLRO,
3782 .hw_features = MTK_HW_FEATURES,
3783 .required_clks = MT7622_CLKS_BITMAP,
3784 .required_pctl = false,
3785 .has_sram = false,
3786};
3787
3788static const struct mtk_soc_data mt7623_data = {
3789 .caps = MT7623_CAPS | MTK_HWLRO,
3790 .hw_features = MTK_HW_FEATURES,
3791 .required_clks = MT7623_CLKS_BITMAP,
3792 .required_pctl = true,
3793 .has_sram = false,
3794};
3795
3796static const struct mtk_soc_data mt7629_data = {
3797 .ana_rgc3 = 0x128,
3798 .caps = MT7629_CAPS | MTK_HWLRO,
3799 .hw_features = MTK_HW_FEATURES,
3800 .required_clks = MT7629_CLKS_BITMAP,
3801 .required_pctl = false,
3802 .has_sram = false,
3803};
3804
3805static const struct mtk_soc_data mt7986_data = {
3806 .ana_rgc3 = 0x128,
3807 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08003808 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08003809 .required_clks = MT7986_CLKS_BITMAP,
3810 .required_pctl = false,
3811 .has_sram = true,
3812};
3813
developer255bba22021-07-27 15:16:33 +08003814static const struct mtk_soc_data mt7981_data = {
3815 .ana_rgc3 = 0x128,
3816 .caps = MT7981_CAPS,
developer7377b0b2021-11-18 14:54:47 +08003817 .hw_features = MTK_HW_FEATURES,
developer255bba22021-07-27 15:16:33 +08003818 .required_clks = MT7981_CLKS_BITMAP,
3819 .required_pctl = false,
3820 .has_sram = true,
3821};
3822
developerfd40db22021-04-29 10:08:25 +08003823static const struct mtk_soc_data rt5350_data = {
3824 .caps = MT7628_CAPS,
3825 .hw_features = MTK_HW_FEATURES_MT7628,
3826 .required_clks = MT7628_CLKS_BITMAP,
3827 .required_pctl = false,
3828 .has_sram = false,
3829};
3830
3831const struct of_device_id of_mtk_match[] = {
3832 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
3833 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
3834 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
3835 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
3836 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
3837 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08003838 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developerfd40db22021-04-29 10:08:25 +08003839 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
3840 {},
3841};
3842MODULE_DEVICE_TABLE(of, of_mtk_match);
3843
3844static struct platform_driver mtk_driver = {
3845 .probe = mtk_probe,
3846 .remove = mtk_remove,
3847 .driver = {
3848 .name = "mtk_soc_eth",
3849 .of_match_table = of_mtk_match,
3850 },
3851};
3852
3853module_platform_driver(mtk_driver);
3854
3855MODULE_LICENSE("GPL");
3856MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
3857MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");