blob: 642f2049cbc729b8d24d2c9133e219c5e2087438 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
12#include <linux/mfd/syscon.h>
13#include <linux/regmap.h>
14#include <linux/clk.h>
15#include <linux/pm_runtime.h>
16#include <linux/if_vlan.h>
17#include <linux/reset.h>
18#include <linux/tcp.h>
19#include <linux/interrupt.h>
20#include <linux/pinctrl/devinfo.h>
21#include <linux/phylink.h>
22#include <net/dsa.h>
23
24#include "mtk_eth_soc.h"
25#include "mtk_eth_dbg.h"
26
27#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
28#include "mtk_hnat/nf_hnat_mtk.h"
29#endif
30
31static int mtk_msg_level = -1;
32module_param_named(msg_level, mtk_msg_level, int, 0);
33MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
34
35#define MTK_ETHTOOL_STAT(x) { #x, \
36 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
37
38/* strings used by ethtool */
39static const struct mtk_ethtool_stats {
40 char str[ETH_GSTRING_LEN];
41 u32 offset;
42} mtk_ethtool_stats[] = {
43 MTK_ETHTOOL_STAT(tx_bytes),
44 MTK_ETHTOOL_STAT(tx_packets),
45 MTK_ETHTOOL_STAT(tx_skip),
46 MTK_ETHTOOL_STAT(tx_collisions),
47 MTK_ETHTOOL_STAT(rx_bytes),
48 MTK_ETHTOOL_STAT(rx_packets),
49 MTK_ETHTOOL_STAT(rx_overflow),
50 MTK_ETHTOOL_STAT(rx_fcs_errors),
51 MTK_ETHTOOL_STAT(rx_short_errors),
52 MTK_ETHTOOL_STAT(rx_long_errors),
53 MTK_ETHTOOL_STAT(rx_checksum_errors),
54 MTK_ETHTOOL_STAT(rx_flow_control_packets),
55};
56
57static const char * const mtk_clks_source_name[] = {
58 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
59 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
60 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
61 "sgmii_ck", "eth2pll", "wocpu0","wocpu1",
62};
63
64void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
65{
66 __raw_writel(val, eth->base + reg);
67}
68
69u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
70{
71 return __raw_readl(eth->base + reg);
72}
73
74u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
75{
76 u32 val;
77
78 val = mtk_r32(eth, reg);
79 val &= ~mask;
80 val |= set;
81 mtk_w32(eth, val, reg);
82 return reg;
83}
84
85static int mtk_mdio_busy_wait(struct mtk_eth *eth)
86{
87 unsigned long t_start = jiffies;
88
89 while (1) {
90 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
91 return 0;
92 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
93 break;
developerc4671b22021-05-28 13:16:42 +080094 cond_resched();
developerfd40db22021-04-29 10:08:25 +080095 }
96
97 dev_err(eth->dev, "mdio: MDIO timeout\n");
98 return -1;
99}
100
developer3957a912021-05-13 16:44:31 +0800101u32 _mtk_mdio_write(struct mtk_eth *eth, u16 phy_addr,
102 u16 phy_register, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800103{
104 if (mtk_mdio_busy_wait(eth))
105 return -1;
106
107 write_data &= 0xffff;
108
109 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
110 (phy_register << PHY_IAC_REG_SHIFT) |
111 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
112 MTK_PHY_IAC);
113
114 if (mtk_mdio_busy_wait(eth))
115 return -1;
116
117 return 0;
118}
119
developer3957a912021-05-13 16:44:31 +0800120u32 _mtk_mdio_read(struct mtk_eth *eth, u16 phy_addr, u16 phy_reg)
developerfd40db22021-04-29 10:08:25 +0800121{
122 u32 d;
123
124 if (mtk_mdio_busy_wait(eth))
125 return 0xffff;
126
127 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
128 (phy_reg << PHY_IAC_REG_SHIFT) |
129 (phy_addr << PHY_IAC_ADDR_SHIFT),
130 MTK_PHY_IAC);
131
132 if (mtk_mdio_busy_wait(eth))
133 return 0xffff;
134
135 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
136
137 return d;
138}
139
140static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
141 int phy_reg, u16 val)
142{
143 struct mtk_eth *eth = bus->priv;
144
145 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
146}
147
148static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
149{
150 struct mtk_eth *eth = bus->priv;
151
152 return _mtk_mdio_read(eth, phy_addr, phy_reg);
153}
154
developer3957a912021-05-13 16:44:31 +0800155u32 mtk_cl45_ind_read(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data)
developerfd40db22021-04-29 10:08:25 +0800156{
157 mutex_lock(&eth->mii_bus->mdio_lock);
158
159 _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
160 _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
161 _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
162 *data = _mtk_mdio_read(eth, port, MII_MMD_ADDR_DATA_REG);
163
164 mutex_unlock(&eth->mii_bus->mdio_lock);
165
166 return 0;
167}
168
developer3957a912021-05-13 16:44:31 +0800169u32 mtk_cl45_ind_write(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data)
developerfd40db22021-04-29 10:08:25 +0800170{
171 mutex_lock(&eth->mii_bus->mdio_lock);
172
173 _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
174 _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
175 _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
176 _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, data);
177
178 mutex_unlock(&eth->mii_bus->mdio_lock);
179
180 return 0;
181}
182
183static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
184 phy_interface_t interface)
185{
186 u32 val;
187
188 /* Check DDR memory type.
189 * Currently TRGMII mode with DDR2 memory is not supported.
190 */
191 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
192 if (interface == PHY_INTERFACE_MODE_TRGMII &&
193 val & SYSCFG_DRAM_TYPE_DDR2) {
194 dev_err(eth->dev,
195 "TRGMII mode with DDR2 memory is not supported!\n");
196 return -EOPNOTSUPP;
197 }
198
199 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
200 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
201
202 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
203 ETHSYS_TRGMII_MT7621_MASK, val);
204
205 return 0;
206}
207
208static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
209 phy_interface_t interface, int speed)
210{
211 u32 val;
212 int ret;
213
214 if (interface == PHY_INTERFACE_MODE_TRGMII) {
215 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
216 val = 500000000;
217 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
218 if (ret)
219 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
220 return;
221 }
222
223 val = (speed == SPEED_1000) ?
224 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
225 mtk_w32(eth, val, INTF_MODE);
226
227 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
228 ETHSYS_TRGMII_CLK_SEL362_5,
229 ETHSYS_TRGMII_CLK_SEL362_5);
230
231 val = (speed == SPEED_1000) ? 250000000 : 500000000;
232 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
233 if (ret)
234 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
235
236 val = (speed == SPEED_1000) ?
237 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
238 mtk_w32(eth, val, TRGMII_RCK_CTRL);
239
240 val = (speed == SPEED_1000) ?
241 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
242 mtk_w32(eth, val, TRGMII_TCK_CTRL);
243}
244
245static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
246 const struct phylink_link_state *state)
247{
248 struct mtk_mac *mac = container_of(config, struct mtk_mac,
249 phylink_config);
250 struct mtk_eth *eth = mac->hw;
251 u32 mcr_cur, mcr_new, sid, i;
252 int val, ge_mode, err;
253
254 /* MT76x8 has no hardware settings between for the MAC */
255 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
256 mac->interface != state->interface) {
257 /* Setup soc pin functions */
258 switch (state->interface) {
259 case PHY_INTERFACE_MODE_TRGMII:
260 if (mac->id)
261 goto err_phy;
262 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
263 MTK_GMAC1_TRGMII))
264 goto err_phy;
265 /* fall through */
266 case PHY_INTERFACE_MODE_RGMII_TXID:
267 case PHY_INTERFACE_MODE_RGMII_RXID:
268 case PHY_INTERFACE_MODE_RGMII_ID:
269 case PHY_INTERFACE_MODE_RGMII:
270 case PHY_INTERFACE_MODE_MII:
271 case PHY_INTERFACE_MODE_REVMII:
272 case PHY_INTERFACE_MODE_RMII:
273 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
274 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
275 if (err)
276 goto init_err;
277 }
278 break;
279 case PHY_INTERFACE_MODE_1000BASEX:
280 case PHY_INTERFACE_MODE_2500BASEX:
281 case PHY_INTERFACE_MODE_SGMII:
282 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
283 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
284 if (err)
285 goto init_err;
286 }
287 break;
288 case PHY_INTERFACE_MODE_GMII:
289 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
290 err = mtk_gmac_gephy_path_setup(eth, mac->id);
291 if (err)
292 goto init_err;
293 }
294 break;
295 default:
296 goto err_phy;
297 }
298
299 /* Setup clock for 1st gmac */
300 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
301 !phy_interface_mode_is_8023z(state->interface) &&
302 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
303 if (MTK_HAS_CAPS(mac->hw->soc->caps,
304 MTK_TRGMII_MT7621_CLK)) {
305 if (mt7621_gmac0_rgmii_adjust(mac->hw,
306 state->interface))
307 goto err_phy;
308 } else {
309 mtk_gmac0_rgmii_adjust(mac->hw,
310 state->interface,
311 state->speed);
312
313 /* mt7623_pad_clk_setup */
314 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
315 mtk_w32(mac->hw,
316 TD_DM_DRVP(8) | TD_DM_DRVN(8),
317 TRGMII_TD_ODT(i));
318
319 /* Assert/release MT7623 RXC reset */
320 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
321 TRGMII_RCK_CTRL);
322 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
323 }
324 }
325
326 ge_mode = 0;
327 switch (state->interface) {
328 case PHY_INTERFACE_MODE_MII:
329 case PHY_INTERFACE_MODE_GMII:
330 ge_mode = 1;
331 break;
332 case PHY_INTERFACE_MODE_REVMII:
333 ge_mode = 2;
334 break;
335 case PHY_INTERFACE_MODE_RMII:
336 if (mac->id)
337 goto err_phy;
338 ge_mode = 3;
339 break;
340 default:
341 break;
342 }
343
344 /* put the gmac into the right mode */
345 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
346 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
347 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
348 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
349
350 mac->interface = state->interface;
351 }
352
353 /* SGMII */
354 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
355 phy_interface_mode_is_8023z(state->interface)) {
356 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
357 * being setup done.
358 */
359 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
360
361 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
362 SYSCFG0_SGMII_MASK,
363 ~(u32)SYSCFG0_SGMII_MASK);
364
365 /* Decide how GMAC and SGMIISYS be mapped */
366 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
367 0 : mac->id;
368
369 /* Setup SGMIISYS with the determined property */
370 if (state->interface != PHY_INTERFACE_MODE_SGMII)
371 err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
372 state);
373 else if (phylink_autoneg_inband(mode))
374 err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
375
376 if (err)
377 goto init_err;
378
379 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
380 SYSCFG0_SGMII_MASK, val);
381 } else if (phylink_autoneg_inband(mode)) {
382 dev_err(eth->dev,
383 "In-band mode not supported in non SGMII mode!\n");
384 return;
385 }
386
387 /* Setup gmac */
388 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
389 mcr_new = mcr_cur;
390 mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
391 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
392 MAC_MCR_FORCE_RX_FC);
393 mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
394 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
395
396 switch (state->speed) {
397 case SPEED_2500:
398 case SPEED_1000:
399 mcr_new |= MAC_MCR_SPEED_1000;
400 break;
401 case SPEED_100:
402 mcr_new |= MAC_MCR_SPEED_100;
403 break;
404 }
405 if (state->duplex == DUPLEX_FULL) {
406 mcr_new |= MAC_MCR_FORCE_DPX;
407 if (state->pause & MLO_PAUSE_TX)
408 mcr_new |= MAC_MCR_FORCE_TX_FC;
409 if (state->pause & MLO_PAUSE_RX)
410 mcr_new |= MAC_MCR_FORCE_RX_FC;
411 }
412
413 /* Only update control register when needed! */
414 if (mcr_new != mcr_cur)
415 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
416
417 return;
418
419err_phy:
420 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
421 mac->id, phy_modes(state->interface));
422 return;
423
424init_err:
425 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
426 mac->id, phy_modes(state->interface), err);
427}
428
429static int mtk_mac_link_state(struct phylink_config *config,
430 struct phylink_link_state *state)
431{
432 struct mtk_mac *mac = container_of(config, struct mtk_mac,
433 phylink_config);
434 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
435
436 state->link = (pmsr & MAC_MSR_LINK);
437 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
438
439 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
440 case 0:
441 state->speed = SPEED_10;
442 break;
443 case MAC_MSR_SPEED_100:
444 state->speed = SPEED_100;
445 break;
446 case MAC_MSR_SPEED_1000:
447 state->speed = SPEED_1000;
448 break;
449 default:
450 state->speed = SPEED_UNKNOWN;
451 break;
452 }
453
454 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
455 if (pmsr & MAC_MSR_RX_FC)
456 state->pause |= MLO_PAUSE_RX;
457 if (pmsr & MAC_MSR_TX_FC)
458 state->pause |= MLO_PAUSE_TX;
459
460 return 1;
461}
462
463static void mtk_mac_an_restart(struct phylink_config *config)
464{
465 struct mtk_mac *mac = container_of(config, struct mtk_mac,
466 phylink_config);
467
468 mtk_sgmii_restart_an(mac->hw, mac->id);
469}
470
471static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
472 phy_interface_t interface)
473{
474 struct mtk_mac *mac = container_of(config, struct mtk_mac,
475 phylink_config);
476 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
477
478 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
479 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
480}
481
482static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
483 phy_interface_t interface,
484 struct phy_device *phy)
485{
486 struct mtk_mac *mac = container_of(config, struct mtk_mac,
487 phylink_config);
488 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
489
490 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
491 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
492}
493
494static void mtk_validate(struct phylink_config *config,
495 unsigned long *supported,
496 struct phylink_link_state *state)
497{
498 struct mtk_mac *mac = container_of(config, struct mtk_mac,
499 phylink_config);
500 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
501
502 if (state->interface != PHY_INTERFACE_MODE_NA &&
503 state->interface != PHY_INTERFACE_MODE_MII &&
504 state->interface != PHY_INTERFACE_MODE_GMII &&
505 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
506 phy_interface_mode_is_rgmii(state->interface)) &&
507 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
508 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
509 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
510 (state->interface == PHY_INTERFACE_MODE_SGMII ||
511 phy_interface_mode_is_8023z(state->interface)))) {
512 linkmode_zero(supported);
513 return;
514 }
515
516 phylink_set_port_modes(mask);
517 phylink_set(mask, Autoneg);
518
519 switch (state->interface) {
520 case PHY_INTERFACE_MODE_TRGMII:
521 phylink_set(mask, 1000baseT_Full);
522 break;
523 case PHY_INTERFACE_MODE_1000BASEX:
524 case PHY_INTERFACE_MODE_2500BASEX:
525 phylink_set(mask, 1000baseX_Full);
526 phylink_set(mask, 2500baseX_Full);
527 break;
528 case PHY_INTERFACE_MODE_GMII:
529 case PHY_INTERFACE_MODE_RGMII:
530 case PHY_INTERFACE_MODE_RGMII_ID:
531 case PHY_INTERFACE_MODE_RGMII_RXID:
532 case PHY_INTERFACE_MODE_RGMII_TXID:
533 phylink_set(mask, 1000baseT_Half);
534 /* fall through */
535 case PHY_INTERFACE_MODE_SGMII:
536 phylink_set(mask, 1000baseT_Full);
537 phylink_set(mask, 1000baseX_Full);
538 /* fall through */
539 case PHY_INTERFACE_MODE_MII:
540 case PHY_INTERFACE_MODE_RMII:
541 case PHY_INTERFACE_MODE_REVMII:
542 case PHY_INTERFACE_MODE_NA:
543 default:
544 phylink_set(mask, 10baseT_Half);
545 phylink_set(mask, 10baseT_Full);
546 phylink_set(mask, 100baseT_Half);
547 phylink_set(mask, 100baseT_Full);
548 break;
549 }
550
551 if (state->interface == PHY_INTERFACE_MODE_NA) {
552 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
553 phylink_set(mask, 1000baseT_Full);
554 phylink_set(mask, 1000baseX_Full);
555 phylink_set(mask, 2500baseX_Full);
556 }
557 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
558 phylink_set(mask, 1000baseT_Full);
559 phylink_set(mask, 1000baseT_Half);
560 phylink_set(mask, 1000baseX_Full);
561 }
562 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
563 phylink_set(mask, 1000baseT_Full);
564 phylink_set(mask, 1000baseT_Half);
565 }
566 }
567
568 phylink_set(mask, Pause);
569 phylink_set(mask, Asym_Pause);
570
571 linkmode_and(supported, supported, mask);
572 linkmode_and(state->advertising, state->advertising, mask);
573
574 /* We can only operate at 2500BaseX or 1000BaseX. If requested
575 * to advertise both, only report advertising at 2500BaseX.
576 */
577 phylink_helper_basex_speed(state);
578}
579
580static const struct phylink_mac_ops mtk_phylink_ops = {
581 .validate = mtk_validate,
582 .mac_link_state = mtk_mac_link_state,
583 .mac_an_restart = mtk_mac_an_restart,
584 .mac_config = mtk_mac_config,
585 .mac_link_down = mtk_mac_link_down,
586 .mac_link_up = mtk_mac_link_up,
587};
588
589static int mtk_mdio_init(struct mtk_eth *eth)
590{
591 struct device_node *mii_np;
592 int ret;
593
594 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
595 if (!mii_np) {
596 dev_err(eth->dev, "no %s child node found", "mdio-bus");
597 return -ENODEV;
598 }
599
600 if (!of_device_is_available(mii_np)) {
601 ret = -ENODEV;
602 goto err_put_node;
603 }
604
605 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
606 if (!eth->mii_bus) {
607 ret = -ENOMEM;
608 goto err_put_node;
609 }
610
611 eth->mii_bus->name = "mdio";
612 eth->mii_bus->read = mtk_mdio_read;
613 eth->mii_bus->write = mtk_mdio_write;
614 eth->mii_bus->priv = eth;
615 eth->mii_bus->parent = eth->dev;
616
617 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
618 ret = of_mdiobus_register(eth->mii_bus, mii_np);
619
620err_put_node:
621 of_node_put(mii_np);
622 return ret;
623}
624
625static void mtk_mdio_cleanup(struct mtk_eth *eth)
626{
627 if (!eth->mii_bus)
628 return;
629
630 mdiobus_unregister(eth->mii_bus);
631}
632
633static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
634{
635 unsigned long flags;
636 u32 val;
637
638 spin_lock_irqsave(&eth->tx_irq_lock, flags);
639 val = mtk_r32(eth, eth->tx_int_mask_reg);
640 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
641 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
642}
643
644static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
645{
646 unsigned long flags;
647 u32 val;
648
649 spin_lock_irqsave(&eth->tx_irq_lock, flags);
650 val = mtk_r32(eth, eth->tx_int_mask_reg);
651 mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
652 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
653}
654
655static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
656{
657 unsigned long flags;
658 u32 val;
659
660 spin_lock_irqsave(&eth->rx_irq_lock, flags);
661 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
662 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
663 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
664}
665
666static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
667{
668 unsigned long flags;
669 u32 val;
670
671 spin_lock_irqsave(&eth->rx_irq_lock, flags);
672 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
673 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
674 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
675}
676
677static int mtk_set_mac_address(struct net_device *dev, void *p)
678{
679 int ret = eth_mac_addr(dev, p);
680 struct mtk_mac *mac = netdev_priv(dev);
681 struct mtk_eth *eth = mac->hw;
682 const char *macaddr = dev->dev_addr;
683
684 if (ret)
685 return ret;
686
687 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
688 return -EBUSY;
689
690 spin_lock_bh(&mac->hw->page_lock);
691 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
692 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
693 MT7628_SDM_MAC_ADRH);
694 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
695 (macaddr[4] << 8) | macaddr[5],
696 MT7628_SDM_MAC_ADRL);
697 } else {
698 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
699 MTK_GDMA_MAC_ADRH(mac->id));
700 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
701 (macaddr[4] << 8) | macaddr[5],
702 MTK_GDMA_MAC_ADRL(mac->id));
703 }
704 spin_unlock_bh(&mac->hw->page_lock);
705
706 return 0;
707}
708
709void mtk_stats_update_mac(struct mtk_mac *mac)
710{
711 struct mtk_hw_stats *hw_stats = mac->hw_stats;
712 unsigned int base = MTK_GDM1_TX_GBCNT;
713 u64 stats;
714
715 base += hw_stats->reg_offset;
716
717 u64_stats_update_begin(&hw_stats->syncp);
718
719 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
720 stats = mtk_r32(mac->hw, base + 0x04);
721 if (stats)
722 hw_stats->rx_bytes += (stats << 32);
723 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
724 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
725 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
726 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
727 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
728 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
729 hw_stats->rx_flow_control_packets +=
730 mtk_r32(mac->hw, base + 0x24);
731 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
732 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
733 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
734 stats = mtk_r32(mac->hw, base + 0x34);
735 if (stats)
736 hw_stats->tx_bytes += (stats << 32);
737 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
738 u64_stats_update_end(&hw_stats->syncp);
739}
740
741static void mtk_stats_update(struct mtk_eth *eth)
742{
743 int i;
744
745 for (i = 0; i < MTK_MAC_COUNT; i++) {
746 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
747 continue;
748 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
749 mtk_stats_update_mac(eth->mac[i]);
750 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
751 }
752 }
753}
754
755static void mtk_get_stats64(struct net_device *dev,
756 struct rtnl_link_stats64 *storage)
757{
758 struct mtk_mac *mac = netdev_priv(dev);
759 struct mtk_hw_stats *hw_stats = mac->hw_stats;
760 unsigned int start;
761
762 if (netif_running(dev) && netif_device_present(dev)) {
763 if (spin_trylock_bh(&hw_stats->stats_lock)) {
764 mtk_stats_update_mac(mac);
765 spin_unlock_bh(&hw_stats->stats_lock);
766 }
767 }
768
769 do {
770 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
771 storage->rx_packets = hw_stats->rx_packets;
772 storage->tx_packets = hw_stats->tx_packets;
773 storage->rx_bytes = hw_stats->rx_bytes;
774 storage->tx_bytes = hw_stats->tx_bytes;
775 storage->collisions = hw_stats->tx_collisions;
776 storage->rx_length_errors = hw_stats->rx_short_errors +
777 hw_stats->rx_long_errors;
778 storage->rx_over_errors = hw_stats->rx_overflow;
779 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
780 storage->rx_errors = hw_stats->rx_checksum_errors;
781 storage->tx_aborted_errors = hw_stats->tx_skip;
782 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
783
784 storage->tx_errors = dev->stats.tx_errors;
785 storage->rx_dropped = dev->stats.rx_dropped;
786 storage->tx_dropped = dev->stats.tx_dropped;
787}
788
789static inline int mtk_max_frag_size(int mtu)
790{
791 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
792 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
793 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
794
795 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
796 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
797}
798
799static inline int mtk_max_buf_size(int frag_size)
800{
801 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
802 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
803
804 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
805
806 return buf_size;
807}
808
developerc4671b22021-05-28 13:16:42 +0800809static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
developerfd40db22021-04-29 10:08:25 +0800810 struct mtk_rx_dma *dma_rxd)
811{
developerfd40db22021-04-29 10:08:25 +0800812 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +0800813 if (!(rxd->rxd2 & RX_DMA_DONE))
814 return false;
815
816 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +0800817 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
818 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developera2bdbd52021-05-31 19:10:17 +0800819#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developerfd40db22021-04-29 10:08:25 +0800820 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
821 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
822#endif
developerc4671b22021-05-28 13:16:42 +0800823 return true;
developerfd40db22021-04-29 10:08:25 +0800824}
825
826/* the qdma core needs scratch memory to be setup */
827static int mtk_init_fq_dma(struct mtk_eth *eth)
828{
829 dma_addr_t phy_ring_tail;
830 int cnt = MTK_DMA_SIZE;
831 dma_addr_t dma_addr;
832 int i;
833
834 if (!eth->soc->has_sram) {
835 eth->scratch_ring = dma_alloc_coherent(eth->dev,
836 cnt * sizeof(struct mtk_tx_dma),
837 &eth->phy_scratch_ring,
838 GFP_ATOMIC);
839 } else {
840 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
841 }
842
843 if (unlikely(!eth->scratch_ring))
844 return -ENOMEM;
845
846 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
847 GFP_KERNEL);
848 if (unlikely(!eth->scratch_head))
849 return -ENOMEM;
850
851 dma_addr = dma_map_single(eth->dev,
852 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
853 DMA_FROM_DEVICE);
854 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
855 return -ENOMEM;
856
857 phy_ring_tail = eth->phy_scratch_ring +
858 (sizeof(struct mtk_tx_dma) * (cnt - 1));
859
860 for (i = 0; i < cnt; i++) {
861 eth->scratch_ring[i].txd1 =
862 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
863 if (i < cnt - 1)
864 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
865 ((i + 1) * sizeof(struct mtk_tx_dma)));
866 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
867
868 eth->scratch_ring[i].txd4 = 0;
869#if defined(CONFIG_MEDIATEK_NETSYS_V2)
870 if (eth->soc->has_sram && ((sizeof(struct mtk_tx_dma)) > 16)) {
871 eth->scratch_ring[i].txd5 = 0;
872 eth->scratch_ring[i].txd6 = 0;
873 eth->scratch_ring[i].txd7 = 0;
874 eth->scratch_ring[i].txd8 = 0;
875 }
876#endif
877 }
878
879 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
880 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
881 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
882 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
883
884 return 0;
885}
886
887static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
888{
889 void *ret = ring->dma;
890
891 return ret + (desc - ring->phys);
892}
893
894static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
895 struct mtk_tx_dma *txd)
896{
897 int idx = txd - ring->dma;
898
899 return &ring->buf[idx];
900}
901
902static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
903 struct mtk_tx_dma *dma)
904{
905 return ring->dma_pdma - ring->dma + dma;
906}
907
908static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
909{
910 return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
911}
912
developerc4671b22021-05-28 13:16:42 +0800913static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
914 bool napi)
developerfd40db22021-04-29 10:08:25 +0800915{
916 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
917 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
918 dma_unmap_single(eth->dev,
919 dma_unmap_addr(tx_buf, dma_addr0),
920 dma_unmap_len(tx_buf, dma_len0),
921 DMA_TO_DEVICE);
922 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
923 dma_unmap_page(eth->dev,
924 dma_unmap_addr(tx_buf, dma_addr0),
925 dma_unmap_len(tx_buf, dma_len0),
926 DMA_TO_DEVICE);
927 }
928 } else {
929 if (dma_unmap_len(tx_buf, dma_len0)) {
930 dma_unmap_page(eth->dev,
931 dma_unmap_addr(tx_buf, dma_addr0),
932 dma_unmap_len(tx_buf, dma_len0),
933 DMA_TO_DEVICE);
934 }
935
936 if (dma_unmap_len(tx_buf, dma_len1)) {
937 dma_unmap_page(eth->dev,
938 dma_unmap_addr(tx_buf, dma_addr1),
939 dma_unmap_len(tx_buf, dma_len1),
940 DMA_TO_DEVICE);
941 }
942 }
943
944 tx_buf->flags = 0;
945 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +0800946 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
947 if (napi)
948 napi_consume_skb(tx_buf->skb, napi);
949 else
950 dev_kfree_skb_any(tx_buf->skb);
951 }
developerfd40db22021-04-29 10:08:25 +0800952 tx_buf->skb = NULL;
953}
954
955static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
956 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
957 size_t size, int idx)
958{
959 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
960 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
961 dma_unmap_len_set(tx_buf, dma_len0, size);
962 } else {
963 if (idx & 1) {
964 txd->txd3 = mapped_addr;
965 txd->txd2 |= TX_DMA_PLEN1(size);
966 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
967 dma_unmap_len_set(tx_buf, dma_len1, size);
968 } else {
969 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
970 txd->txd1 = mapped_addr;
971 txd->txd2 = TX_DMA_PLEN0(size);
972 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
973 dma_unmap_len_set(tx_buf, dma_len0, size);
974 }
975 }
976}
977
978static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
979 int tx_num, struct mtk_tx_ring *ring, bool gso)
980{
981 struct mtk_mac *mac = netdev_priv(dev);
982 struct mtk_eth *eth = mac->hw;
983 struct mtk_tx_dma *itxd, *txd;
984 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
985 struct mtk_tx_buf *itx_buf, *tx_buf;
986 dma_addr_t mapped_addr;
987 unsigned int nr_frags;
988 int i, n_desc = 1;
989 u32 txd4 = 0, fport;
990 u32 qid = 0;
991 int k = 0;
992
993 itxd = ring->next_free;
994 itxd_pdma = qdma_to_pdma(ring, itxd);
995 if (itxd == ring->last_free)
996 return -ENOMEM;
997
998 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
999 memset(itx_buf, 0, sizeof(*itx_buf));
1000
1001 mapped_addr = dma_map_single(eth->dev, skb->data,
1002 skb_headlen(skb), DMA_TO_DEVICE);
1003 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
1004 return -ENOMEM;
1005
1006 WRITE_ONCE(itxd->txd1, mapped_addr);
1007 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1008 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1009 MTK_TX_FLAGS_FPORT1;
1010 setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
1011 k++);
1012
1013 nr_frags = skb_shinfo(skb)->nr_frags;
1014
1015#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
1016 qid = skb->mark & (MTK_QDMA_TX_MASK);
1017#endif
1018
developera2bdbd52021-05-31 19:10:17 +08001019 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +08001020 u32 txd5 = 0, txd6 = 0;
1021 /* set the forward port */
1022 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2;
1023 txd4 |= fport;
1024
1025 if (gso)
1026 txd5 |= TX_DMA_TSO_V2;
1027
1028 /* TX Checksum offload */
1029 if (skb->ip_summed == CHECKSUM_PARTIAL)
1030 txd5 |= TX_DMA_CHKSUM_V2;
1031
1032 /* VLAN header offload */
1033 if (skb_vlan_tag_present(skb))
1034 txd6 |= TX_DMA_INS_VLAN_V2 | skb_vlan_tag_get(skb);
1035
1036 txd4 = txd4 | TX_DMA_SWC_V2;
1037
1038 WRITE_ONCE(itxd->txd3, (TX_DMA_PLEN0(skb_headlen(skb)) |
1039 (!nr_frags * TX_DMA_LS0)));
1040
1041#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1042 WRITE_ONCE(itxd->txd5, txd5);
1043 WRITE_ONCE(itxd->txd6, txd6);
1044#endif
1045 } else {
1046 /* set the forward port */
1047 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
1048 txd4 |= fport;
1049
1050 if (gso)
1051 txd4 |= TX_DMA_TSO;
1052
1053 /* TX Checksum offload */
1054 if (skb->ip_summed == CHECKSUM_PARTIAL)
1055 txd4 |= TX_DMA_CHKSUM;
1056
1057 /* VLAN header offload */
1058 if (skb_vlan_tag_present(skb))
1059 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
1060
1061 WRITE_ONCE(itxd->txd3,
1062 TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
1063 (!nr_frags * TX_DMA_LS0) | QID_LOW_BITS(qid));
1064 }
1065 /* TX SG offload */
1066 txd = itxd;
1067 txd_pdma = qdma_to_pdma(ring, txd);
1068
1069#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1070 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
developera2bdbd52021-05-31 19:10:17 +08001071 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +08001072 txd4 &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1073 txd4 |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1074 } else {
1075 txd4 &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1076 txd4 |= 0x4 << TX_DMA_FPORT_SHIFT;
1077 }
1078 }
1079
1080 trace_printk("[%s] nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1081 __func__, nr_frags, HNAT_SKB_CB2(skb)->magic, txd4);
1082#endif
1083
1084 for (i = 0; i < nr_frags; i++) {
1085 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1086 unsigned int offset = 0;
1087 int frag_size = skb_frag_size(frag);
1088
1089 while (frag_size) {
1090 bool last_frag = false;
1091 unsigned int frag_map_size;
1092 bool new_desc = true;
1093
1094 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
1095 (i & 0x1)) {
1096 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1097 txd_pdma = qdma_to_pdma(ring, txd);
1098 if (txd == ring->last_free)
1099 goto err_dma;
1100
1101 n_desc++;
1102 } else {
1103 new_desc = false;
1104 }
1105
1106
1107 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1108 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
1109 frag_map_size,
1110 DMA_TO_DEVICE);
1111 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
1112 goto err_dma;
1113
1114 if (i == nr_frags - 1 &&
1115 (frag_size - frag_map_size) == 0)
1116 last_frag = true;
1117
1118 WRITE_ONCE(txd->txd1, mapped_addr);
1119
developera2bdbd52021-05-31 19:10:17 +08001120 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +08001121 WRITE_ONCE(txd->txd3, (TX_DMA_PLEN0(frag_map_size) |
1122 last_frag * TX_DMA_LS0));
1123 WRITE_ONCE(txd->txd4, fport | TX_DMA_SWC_V2 |
1124 QID_BITS_V2(qid));
1125 } else {
1126 WRITE_ONCE(txd->txd3,
1127 (TX_DMA_SWC | QID_LOW_BITS(qid) |
1128 TX_DMA_PLEN0(frag_map_size) |
1129 last_frag * TX_DMA_LS0));
1130 WRITE_ONCE(txd->txd4,
1131 fport | QID_HIGH_BITS(qid));
1132 }
1133
1134 tx_buf = mtk_desc_to_tx_buf(ring, txd);
1135 if (new_desc)
1136 memset(tx_buf, 0, sizeof(*tx_buf));
1137 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1138 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1139 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1140 MTK_TX_FLAGS_FPORT1;
1141
1142 setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
1143 frag_map_size, k++);
1144
1145 frag_size -= frag_map_size;
1146 offset += frag_map_size;
1147 }
1148 }
1149
1150 /* store skb to cleanup */
1151 itx_buf->skb = skb;
1152
developera2bdbd52021-05-31 19:10:17 +08001153 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08001154 WRITE_ONCE(itxd->txd4, txd4 | QID_BITS_V2(qid));
1155 else
1156 WRITE_ONCE(itxd->txd4, txd4 | QID_HIGH_BITS(qid));
1157
1158 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1159 if (k & 0x1)
1160 txd_pdma->txd2 |= TX_DMA_LS0;
1161 else
1162 txd_pdma->txd2 |= TX_DMA_LS1;
1163 }
1164
1165 netdev_sent_queue(dev, skb->len);
1166 skb_tx_timestamp(skb);
1167
1168 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1169 atomic_sub(n_desc, &ring->free_count);
1170
1171 /* make sure that all changes to the dma ring are flushed before we
1172 * continue
1173 */
1174 wmb();
1175
1176 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1177 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1178 !netdev_xmit_more())
1179 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1180 } else {
1181 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
1182 ring->dma_size);
1183 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1184 }
1185
1186 return 0;
1187
1188err_dma:
1189 do {
1190 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
1191
1192 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08001193 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08001194
1195 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1196 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1197 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1198
1199 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1200 itxd_pdma = qdma_to_pdma(ring, itxd);
1201 } while (itxd != txd);
1202
1203 return -ENOMEM;
1204}
1205
1206static inline int mtk_cal_txd_req(struct sk_buff *skb)
1207{
1208 int i, nfrags;
1209 skb_frag_t *frag;
1210
1211 nfrags = 1;
1212 if (skb_is_gso(skb)) {
1213 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1214 frag = &skb_shinfo(skb)->frags[i];
1215 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1216 MTK_TX_DMA_BUF_LEN);
1217 }
1218 } else {
1219 nfrags += skb_shinfo(skb)->nr_frags;
1220 }
1221
1222 return nfrags;
1223}
1224
1225static int mtk_queue_stopped(struct mtk_eth *eth)
1226{
1227 int i;
1228
1229 for (i = 0; i < MTK_MAC_COUNT; i++) {
1230 if (!eth->netdev[i])
1231 continue;
1232 if (netif_queue_stopped(eth->netdev[i]))
1233 return 1;
1234 }
1235
1236 return 0;
1237}
1238
1239static void mtk_wake_queue(struct mtk_eth *eth)
1240{
1241 int i;
1242
1243 for (i = 0; i < MTK_MAC_COUNT; i++) {
1244 if (!eth->netdev[i])
1245 continue;
1246 netif_wake_queue(eth->netdev[i]);
1247 }
1248}
1249
1250static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1251{
1252 struct mtk_mac *mac = netdev_priv(dev);
1253 struct mtk_eth *eth = mac->hw;
1254 struct mtk_tx_ring *ring = &eth->tx_ring;
1255 struct net_device_stats *stats = &dev->stats;
1256 bool gso = false;
1257 int tx_num;
1258
1259 /* normally we can rely on the stack not calling this more than once,
1260 * however we have 2 queues running on the same ring so we need to lock
1261 * the ring access
1262 */
1263 spin_lock(&eth->page_lock);
1264
1265 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1266 goto drop;
1267
1268 tx_num = mtk_cal_txd_req(skb);
1269 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1270 netif_stop_queue(dev);
1271 netif_err(eth, tx_queued, dev,
1272 "Tx Ring full when queue awake!\n");
1273 spin_unlock(&eth->page_lock);
1274 return NETDEV_TX_BUSY;
1275 }
1276
1277 /* TSO: fill MSS info in tcp checksum field */
1278 if (skb_is_gso(skb)) {
1279 if (skb_cow_head(skb, 0)) {
1280 netif_warn(eth, tx_err, dev,
1281 "GSO expand head fail.\n");
1282 goto drop;
1283 }
1284
1285 if (skb_shinfo(skb)->gso_type &
1286 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1287 gso = true;
1288 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1289 }
1290 }
1291
1292 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1293 goto drop;
1294
1295 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1296 netif_stop_queue(dev);
1297
1298 spin_unlock(&eth->page_lock);
1299
1300 return NETDEV_TX_OK;
1301
1302drop:
1303 spin_unlock(&eth->page_lock);
1304 stats->tx_dropped++;
1305 dev_kfree_skb_any(skb);
1306 return NETDEV_TX_OK;
1307}
1308
1309static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1310{
1311 int i;
1312 struct mtk_rx_ring *ring;
1313 int idx;
1314
developerfd40db22021-04-29 10:08:25 +08001315 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
developer77d03a72021-06-06 00:06:00 +08001316 if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i))
1317 continue;
1318
developerfd40db22021-04-29 10:08:25 +08001319 ring = &eth->rx_ring[i];
1320 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1321 if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
1322 ring->calc_idx_update = true;
1323 return ring;
1324 }
1325 }
1326
1327 return NULL;
1328}
1329
developer18f46a82021-07-20 21:08:21 +08001330static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08001331{
developerfd40db22021-04-29 10:08:25 +08001332 int i;
1333
1334 if (!eth->hwlro) {
developer18f46a82021-07-20 21:08:21 +08001335 if (unlikely(!ring))
1336 dev_info(eth->dev, "Update Rx cpu index failed !\n");
1337
developerfd40db22021-04-29 10:08:25 +08001338 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1339 } else {
1340 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1341 ring = &eth->rx_ring[i];
1342 if (ring->calc_idx_update) {
1343 ring->calc_idx_update = false;
1344 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1345 }
1346 }
1347 }
1348}
1349
1350static int mtk_poll_rx(struct napi_struct *napi, int budget,
1351 struct mtk_eth *eth)
1352{
developer18f46a82021-07-20 21:08:21 +08001353 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1354 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001355 int idx;
1356 struct sk_buff *skb;
1357 u8 *data, *new_data;
1358 struct mtk_rx_dma *rxd, trxd;
1359 int done = 0;
1360
developer18f46a82021-07-20 21:08:21 +08001361 if (unlikely(!ring))
1362 goto rx_done;
1363
developerfd40db22021-04-29 10:08:25 +08001364 while (done < budget) {
1365 struct net_device *netdev;
1366 unsigned int pktlen;
1367 dma_addr_t dma_addr;
1368 int mac;
1369
developer18f46a82021-07-20 21:08:21 +08001370 if (eth->hwlro)
1371 ring = mtk_get_rx_ring(eth);
1372
developerfd40db22021-04-29 10:08:25 +08001373 if (unlikely(!ring))
1374 goto rx_done;
1375
1376 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1377 rxd = &ring->dma[idx];
1378 data = ring->data[idx];
1379
developerc4671b22021-05-28 13:16:42 +08001380 if (!mtk_rx_get_desc(&trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08001381 break;
1382
1383 /* find out which mac the packet come from. values start at 1 */
1384 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1385 mac = 0;
1386 } else {
developera2bdbd52021-05-31 19:10:17 +08001387#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1388 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08001389 mac = RX_DMA_GET_SPORT(trxd.rxd5) - 1;
1390 else
1391#endif
1392 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
1393 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1394 }
1395
1396 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1397 !eth->netdev[mac]))
1398 goto release_desc;
1399
1400 netdev = eth->netdev[mac];
1401
1402 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1403 goto release_desc;
1404
1405 /* alloc new buffer */
1406 new_data = napi_alloc_frag(ring->frag_size);
1407 if (unlikely(!new_data)) {
1408 netdev->stats.rx_dropped++;
1409 goto release_desc;
1410 }
1411 dma_addr = dma_map_single(eth->dev,
1412 new_data + NET_SKB_PAD +
1413 eth->ip_align,
1414 ring->buf_size,
1415 DMA_FROM_DEVICE);
1416 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1417 skb_free_frag(new_data);
1418 netdev->stats.rx_dropped++;
1419 goto release_desc;
1420 }
1421
developerc4671b22021-05-28 13:16:42 +08001422 dma_unmap_single(eth->dev, trxd.rxd1,
1423 ring->buf_size, DMA_FROM_DEVICE);
1424
developerfd40db22021-04-29 10:08:25 +08001425 /* receive data */
1426 skb = build_skb(data, ring->frag_size);
1427 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08001428 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08001429 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08001430 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08001431 }
1432 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1433
developerfd40db22021-04-29 10:08:25 +08001434 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1435 skb->dev = netdev;
1436 skb_put(skb, pktlen);
1437
developera2bdbd52021-05-31 19:10:17 +08001438 if ((!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
developerfd40db22021-04-29 10:08:25 +08001439 (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
developera2bdbd52021-05-31 19:10:17 +08001440 (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
developerfd40db22021-04-29 10:08:25 +08001441 (trxd.rxd3 & eth->rx_dma_l4_valid)))
1442 skb->ip_summed = CHECKSUM_UNNECESSARY;
1443 else
1444 skb_checksum_none_assert(skb);
1445 skb->protocol = eth_type_trans(skb, netdev);
1446
1447 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developera2bdbd52021-05-31 19:10:17 +08001448 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developer255bba22021-07-27 15:16:33 +08001449 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08001450 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08001451 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08001452 RX_DMA_VID_V2(trxd.rxd4));
1453 } else {
1454 if (trxd.rxd2 & RX_DMA_VTAG)
1455 __vlan_hwaccel_put_tag(skb,
1456 htons(RX_DMA_VPID(trxd.rxd3)),
1457 RX_DMA_VID(trxd.rxd3));
1458 }
1459
1460 /* If netdev is attached to dsa switch, the special
1461 * tag inserted in VLAN field by switch hardware can
1462 * be offload by RX HW VLAN offload. Clears the VLAN
1463 * information from @skb to avoid unexpected 8021d
1464 * handler before packet enter dsa framework.
1465 */
1466 if (netdev_uses_dsa(netdev))
1467 __vlan_hwaccel_clear_tag(skb);
1468 }
1469
1470#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developera2bdbd52021-05-31 19:10:17 +08001471#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1472 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08001473 *(u32 *)(skb->head) = trxd.rxd5;
1474 else
1475#endif
1476 *(u32 *)(skb->head) = trxd.rxd4;
1477
1478 skb_hnat_alg(skb) = 0;
1479 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1480
1481 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
1482 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
1483 __func__, skb_hnat_reason(skb));
1484 skb->pkt_type = PACKET_HOST;
1485 }
1486
1487 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
1488 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
1489 skb_hnat_reason(skb), skb_hnat_alg(skb));
1490#endif
developer77d03a72021-06-06 00:06:00 +08001491 if (mtk_hwlro_stats_ebl &&
1492 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
1493 hw_lro_stats_update(ring->ring_no, &trxd);
1494 hw_lro_flush_stats_update(ring->ring_no, &trxd);
1495 }
developerfd40db22021-04-29 10:08:25 +08001496
1497 skb_record_rx_queue(skb, 0);
1498 napi_gro_receive(napi, skb);
1499
developerc4671b22021-05-28 13:16:42 +08001500skip_rx:
developerfd40db22021-04-29 10:08:25 +08001501 ring->data[idx] = new_data;
1502 rxd->rxd1 = (unsigned int)dma_addr;
1503
1504release_desc:
1505 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1506 rxd->rxd2 = RX_DMA_LSO;
1507 else
1508 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1509
1510 ring->calc_idx = idx;
1511
1512 done++;
1513 }
1514
1515rx_done:
1516 if (done) {
1517 /* make sure that all changes to the dma ring are flushed before
1518 * we continue
1519 */
1520 wmb();
developer18f46a82021-07-20 21:08:21 +08001521 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08001522 }
1523
1524 return done;
1525}
1526
1527static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1528 unsigned int *done, unsigned int *bytes)
1529{
1530 struct mtk_tx_ring *ring = &eth->tx_ring;
1531 struct mtk_tx_dma *desc;
1532 struct sk_buff *skb;
1533 struct mtk_tx_buf *tx_buf;
1534 u32 cpu, dma;
1535
developerc4671b22021-05-28 13:16:42 +08001536 cpu = ring->last_free_ptr;
developerfd40db22021-04-29 10:08:25 +08001537 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1538
1539 desc = mtk_qdma_phys_to_virt(ring, cpu);
1540
1541 while ((cpu != dma) && budget) {
1542 u32 next_cpu = desc->txd2;
1543 int mac = 0;
1544
1545 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1546 break;
1547
1548 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1549
1550 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1551 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1552 mac = 1;
1553
1554 skb = tx_buf->skb;
1555 if (!skb)
1556 break;
1557
1558 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1559 bytes[mac] += skb->len;
1560 done[mac]++;
1561 budget--;
1562 }
developerc4671b22021-05-28 13:16:42 +08001563 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001564
1565 ring->last_free = desc;
1566 atomic_inc(&ring->free_count);
1567
1568 cpu = next_cpu;
1569 }
1570
developerc4671b22021-05-28 13:16:42 +08001571 ring->last_free_ptr = cpu;
developerfd40db22021-04-29 10:08:25 +08001572 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1573
1574 return budget;
1575}
1576
1577static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
1578 unsigned int *done, unsigned int *bytes)
1579{
1580 struct mtk_tx_ring *ring = &eth->tx_ring;
1581 struct mtk_tx_dma *desc;
1582 struct sk_buff *skb;
1583 struct mtk_tx_buf *tx_buf;
1584 u32 cpu, dma;
1585
1586 cpu = ring->cpu_idx;
1587 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1588
1589 while ((cpu != dma) && budget) {
1590 tx_buf = &ring->buf[cpu];
1591 skb = tx_buf->skb;
1592 if (!skb)
1593 break;
1594
1595 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1596 bytes[0] += skb->len;
1597 done[0]++;
1598 budget--;
1599 }
1600
developerc4671b22021-05-28 13:16:42 +08001601 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001602
1603 desc = &ring->dma[cpu];
1604 ring->last_free = desc;
1605 atomic_inc(&ring->free_count);
1606
1607 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1608 }
1609
1610 ring->cpu_idx = cpu;
1611
1612 return budget;
1613}
1614
1615static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1616{
1617 struct mtk_tx_ring *ring = &eth->tx_ring;
1618 unsigned int done[MTK_MAX_DEVS];
1619 unsigned int bytes[MTK_MAX_DEVS];
1620 int total = 0, i;
1621
1622 memset(done, 0, sizeof(done));
1623 memset(bytes, 0, sizeof(bytes));
1624
1625 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1626 budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
1627 else
1628 budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
1629
1630 for (i = 0; i < MTK_MAC_COUNT; i++) {
1631 if (!eth->netdev[i] || !done[i])
1632 continue;
1633 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1634 total += done[i];
1635 }
1636
1637 if (mtk_queue_stopped(eth) &&
1638 (atomic_read(&ring->free_count) > ring->thresh))
1639 mtk_wake_queue(eth);
1640
1641 return total;
1642}
1643
1644static void mtk_handle_status_irq(struct mtk_eth *eth)
1645{
1646 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1647
1648 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1649 mtk_stats_update(eth);
1650 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1651 MTK_INT_STATUS2);
1652 }
1653}
1654
1655static int mtk_napi_tx(struct napi_struct *napi, int budget)
1656{
1657 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1658 u32 status, mask;
1659 int tx_done = 0;
1660
1661 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1662 mtk_handle_status_irq(eth);
1663 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1664 tx_done = mtk_poll_tx(eth, budget);
1665
1666 if (unlikely(netif_msg_intr(eth))) {
1667 status = mtk_r32(eth, eth->tx_int_status_reg);
1668 mask = mtk_r32(eth, eth->tx_int_mask_reg);
1669 dev_info(eth->dev,
1670 "done tx %d, intr 0x%08x/0x%x\n",
1671 tx_done, status, mask);
1672 }
1673
1674 if (tx_done == budget)
1675 return budget;
1676
1677 status = mtk_r32(eth, eth->tx_int_status_reg);
1678 if (status & MTK_TX_DONE_INT)
1679 return budget;
1680
developerc4671b22021-05-28 13:16:42 +08001681 if (napi_complete(napi))
1682 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developerfd40db22021-04-29 10:08:25 +08001683
1684 return tx_done;
1685}
1686
1687static int mtk_napi_rx(struct napi_struct *napi, int budget)
1688{
developer18f46a82021-07-20 21:08:21 +08001689 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1690 struct mtk_eth *eth = rx_napi->eth;
1691 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001692 u32 status, mask;
1693 int rx_done = 0;
1694 int remain_budget = budget;
1695
1696 mtk_handle_status_irq(eth);
1697
1698poll_again:
developer18f46a82021-07-20 21:08:21 +08001699 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), MTK_PDMA_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001700 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1701
1702 if (unlikely(netif_msg_intr(eth))) {
1703 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1704 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1705 dev_info(eth->dev,
1706 "done rx %d, intr 0x%08x/0x%x\n",
1707 rx_done, status, mask);
1708 }
1709 if (rx_done == remain_budget)
1710 return budget;
1711
1712 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
developer18f46a82021-07-20 21:08:21 +08001713 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08001714 remain_budget -= rx_done;
1715 goto poll_again;
1716 }
developerc4671b22021-05-28 13:16:42 +08001717
1718 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08001719 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08001720
1721 return rx_done + budget - remain_budget;
1722}
1723
1724static int mtk_tx_alloc(struct mtk_eth *eth)
1725{
1726 struct mtk_tx_ring *ring = &eth->tx_ring;
1727 int i, sz = sizeof(*ring->dma);
1728
1729 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1730 GFP_KERNEL);
1731 if (!ring->buf)
1732 goto no_tx_mem;
1733
1734 if (!eth->soc->has_sram)
1735 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1736 &ring->phys, GFP_ATOMIC);
1737 else {
1738 ring->dma = eth->scratch_ring + MTK_DMA_SIZE;
1739 ring->phys = eth->phy_scratch_ring + MTK_DMA_SIZE * sz;
1740 }
1741
1742 if (!ring->dma)
1743 goto no_tx_mem;
1744
1745 for (i = 0; i < MTK_DMA_SIZE; i++) {
1746 int next = (i + 1) % MTK_DMA_SIZE;
1747 u32 next_ptr = ring->phys + next * sz;
1748
1749 ring->dma[i].txd2 = next_ptr;
1750 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1751 ring->dma[i].txd4 = 0;
1752#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1753 if (eth->soc->has_sram && ( sz > 16)) {
1754 ring->dma[i].txd5 = 0;
1755 ring->dma[i].txd6 = 0;
1756 ring->dma[i].txd7 = 0;
1757 ring->dma[i].txd8 = 0;
1758 }
1759#endif
1760 }
1761
1762 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1763 * only as the framework. The real HW descriptors are the PDMA
1764 * descriptors in ring->dma_pdma.
1765 */
1766 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1767 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1768 &ring->phys_pdma,
1769 GFP_ATOMIC);
1770 if (!ring->dma_pdma)
1771 goto no_tx_mem;
1772
1773 for (i = 0; i < MTK_DMA_SIZE; i++) {
1774 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
1775 ring->dma_pdma[i].txd4 = 0;
1776 }
1777 }
1778
1779 ring->dma_size = MTK_DMA_SIZE;
1780 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1781 ring->next_free = &ring->dma[0];
1782 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
developerc4671b22021-05-28 13:16:42 +08001783 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08001784 ring->thresh = MAX_SKB_FRAGS;
1785
1786 /* make sure that all changes to the dma ring are flushed before we
1787 * continue
1788 */
1789 wmb();
1790
1791 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1792 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1793 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1794 mtk_w32(eth,
1795 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1796 MTK_QTX_CRX_PTR);
developerc4671b22021-05-28 13:16:42 +08001797 mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001798 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1799 MTK_QTX_CFG(0));
1800 } else {
1801 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1802 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1803 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1804 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1805 }
1806
1807 return 0;
1808
1809no_tx_mem:
1810 return -ENOMEM;
1811}
1812
1813static void mtk_tx_clean(struct mtk_eth *eth)
1814{
1815 struct mtk_tx_ring *ring = &eth->tx_ring;
1816 int i;
1817
1818 if (ring->buf) {
1819 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08001820 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08001821 kfree(ring->buf);
1822 ring->buf = NULL;
1823 }
1824
1825 if (!eth->soc->has_sram && ring->dma) {
1826 dma_free_coherent(eth->dev,
1827 MTK_DMA_SIZE * sizeof(*ring->dma),
1828 ring->dma,
1829 ring->phys);
1830 ring->dma = NULL;
1831 }
1832
1833 if (ring->dma_pdma) {
1834 dma_free_coherent(eth->dev,
1835 MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
1836 ring->dma_pdma,
1837 ring->phys_pdma);
1838 ring->dma_pdma = NULL;
1839 }
1840}
1841
1842static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1843{
1844 struct mtk_rx_ring *ring;
1845 int rx_data_len, rx_dma_size;
1846 int i;
1847
1848 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1849 if (ring_no)
1850 return -EINVAL;
1851 ring = &eth->rx_ring_qdma;
1852 } else {
1853 ring = &eth->rx_ring[ring_no];
1854 }
1855
1856 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1857 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1858 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1859 } else {
1860 rx_data_len = ETH_DATA_LEN;
1861 rx_dma_size = MTK_DMA_SIZE;
1862 }
1863
1864 ring->frag_size = mtk_max_frag_size(rx_data_len);
1865 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1866 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1867 GFP_KERNEL);
1868 if (!ring->data)
1869 return -ENOMEM;
1870
1871 for (i = 0; i < rx_dma_size; i++) {
1872 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1873 if (!ring->data[i])
1874 return -ENOMEM;
1875 }
1876
1877 if ((!eth->soc->has_sram) || (eth->soc->has_sram
1878 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
1879 ring->dma = dma_alloc_coherent(eth->dev,
1880 rx_dma_size * sizeof(*ring->dma),
1881 &ring->phys, GFP_ATOMIC);
1882 else {
1883 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developer18f46a82021-07-20 21:08:21 +08001884 ring->dma = (struct mtk_rx_dma *)(tx_ring->dma +
1885 MTK_DMA_SIZE * (ring_no + 1));
1886 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
1887 sizeof(*tx_ring->dma) * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08001888 }
1889
1890 if (!ring->dma)
1891 return -ENOMEM;
1892
1893 for (i = 0; i < rx_dma_size; i++) {
1894 dma_addr_t dma_addr = dma_map_single(eth->dev,
1895 ring->data[i] + NET_SKB_PAD + eth->ip_align,
1896 ring->buf_size,
1897 DMA_FROM_DEVICE);
1898 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1899 return -ENOMEM;
1900 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1901
1902 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1903 ring->dma[i].rxd2 = RX_DMA_LSO;
1904 else
1905 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1906
1907 ring->dma[i].rxd3 = 0;
1908 ring->dma[i].rxd4 = 0;
developera2bdbd52021-05-31 19:10:17 +08001909#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developerfd40db22021-04-29 10:08:25 +08001910 if (eth->soc->has_sram && ((sizeof(struct mtk_rx_dma)) > 16)) {
1911 ring->dma[i].rxd5 = 0;
1912 ring->dma[i].rxd6 = 0;
1913 ring->dma[i].rxd7 = 0;
1914 ring->dma[i].rxd8 = 0;
1915 }
1916#endif
1917 }
1918 ring->dma_size = rx_dma_size;
1919 ring->calc_idx_update = false;
1920 ring->calc_idx = rx_dma_size - 1;
1921 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
1922 MTK_QRX_CRX_IDX_CFG(ring_no) :
1923 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08001924 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08001925 /* make sure that all changes to the dma ring are flushed before we
1926 * continue
1927 */
1928 wmb();
1929
1930 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1931 mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
1932 mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
1933 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1934 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
1935 } else {
1936 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
1937 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
1938 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1939 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
1940 }
1941
1942 return 0;
1943}
1944
1945static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
1946{
1947 int i;
1948
1949 if (ring->data && ring->dma) {
1950 for (i = 0; i < ring->dma_size; i++) {
1951 if (!ring->data[i])
1952 continue;
1953 if (!ring->dma[i].rxd1)
1954 continue;
1955 dma_unmap_single(eth->dev,
1956 ring->dma[i].rxd1,
1957 ring->buf_size,
1958 DMA_FROM_DEVICE);
1959 skb_free_frag(ring->data[i]);
1960 }
1961 kfree(ring->data);
1962 ring->data = NULL;
1963 }
1964
1965 if(in_sram)
1966 return;
1967
1968 if (ring->dma) {
1969 dma_free_coherent(eth->dev,
1970 ring->dma_size * sizeof(*ring->dma),
1971 ring->dma,
1972 ring->phys);
1973 ring->dma = NULL;
1974 }
1975}
1976
1977static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1978{
1979 int i;
developer77d03a72021-06-06 00:06:00 +08001980 u32 val;
developerfd40db22021-04-29 10:08:25 +08001981 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1982 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1983
1984 /* set LRO rings to auto-learn modes */
1985 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1986
1987 /* validate LRO ring */
1988 ring_ctrl_dw2 |= MTK_RING_VLD;
1989
1990 /* set AGE timer (unit: 20us) */
1991 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1992 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1993
1994 /* set max AGG timer (unit: 20us) */
1995 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1996
1997 /* set max LRO AGG count */
1998 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1999 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2000
developer77d03a72021-06-06 00:06:00 +08002001 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002002 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2003 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2004 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2005 }
2006
2007 /* IPv4 checksum update enable */
2008 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2009
2010 /* switch priority comparison to packet count mode */
2011 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2012
2013 /* bandwidth threshold setting */
2014 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2015
2016 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08002017 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08002018
2019 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2020 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2021 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2022
developerfd40db22021-04-29 10:08:25 +08002023 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2024 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2025
developer77d03a72021-06-06 00:06:00 +08002026 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2027 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
2028 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
2029 MTK_PDMA_RX_CFG);
2030
2031 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
2032 } else {
2033 /* set HW LRO mode & the max aggregation count for rx packets */
2034 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2035 }
2036
developerfd40db22021-04-29 10:08:25 +08002037 /* enable HW LRO */
2038 lro_ctrl_dw0 |= MTK_LRO_EN;
2039
developer77d03a72021-06-06 00:06:00 +08002040 /* enable cpu reason black list */
2041 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
2042
developerfd40db22021-04-29 10:08:25 +08002043 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2044 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2045
developer77d03a72021-06-06 00:06:00 +08002046 /* no use PPE cpu reason */
2047 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
2048
developerfd40db22021-04-29 10:08:25 +08002049 return 0;
2050}
2051
2052static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2053{
2054 int i;
2055 u32 val;
2056
2057 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08002058 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08002059
2060 /* wait for relinquishments done */
2061 for (i = 0; i < 10; i++) {
2062 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08002063 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developerfd40db22021-04-29 10:08:25 +08002064 msleep(20);
2065 continue;
2066 }
2067 break;
2068 }
2069
2070 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08002071 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08002072 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2073
2074 /* disable HW LRO */
2075 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2076}
2077
2078static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2079{
2080 u32 reg_val;
2081
developer77d03a72021-06-06 00:06:00 +08002082 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2083 idx += 1;
2084
developerfd40db22021-04-29 10:08:25 +08002085 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2086
2087 /* invalidate the IP setting */
2088 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2089
2090 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2091
2092 /* validate the IP setting */
2093 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2094}
2095
2096static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2097{
2098 u32 reg_val;
2099
developer77d03a72021-06-06 00:06:00 +08002100 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2101 idx += 1;
2102
developerfd40db22021-04-29 10:08:25 +08002103 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2104
2105 /* invalidate the IP setting */
2106 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2107
2108 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2109}
2110
2111static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2112{
2113 int cnt = 0;
2114 int i;
2115
2116 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2117 if (mac->hwlro_ip[i])
2118 cnt++;
2119 }
2120
2121 return cnt;
2122}
2123
2124static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2125 struct ethtool_rxnfc *cmd)
2126{
2127 struct ethtool_rx_flow_spec *fsp =
2128 (struct ethtool_rx_flow_spec *)&cmd->fs;
2129 struct mtk_mac *mac = netdev_priv(dev);
2130 struct mtk_eth *eth = mac->hw;
2131 int hwlro_idx;
2132
2133 if ((fsp->flow_type != TCP_V4_FLOW) ||
2134 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2135 (fsp->location > 1))
2136 return -EINVAL;
2137
2138 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2139 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2140
2141 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2142
2143 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2144
2145 return 0;
2146}
2147
2148static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2149 struct ethtool_rxnfc *cmd)
2150{
2151 struct ethtool_rx_flow_spec *fsp =
2152 (struct ethtool_rx_flow_spec *)&cmd->fs;
2153 struct mtk_mac *mac = netdev_priv(dev);
2154 struct mtk_eth *eth = mac->hw;
2155 int hwlro_idx;
2156
2157 if (fsp->location > 1)
2158 return -EINVAL;
2159
2160 mac->hwlro_ip[fsp->location] = 0;
2161 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2162
2163 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2164
2165 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2166
2167 return 0;
2168}
2169
2170static void mtk_hwlro_netdev_disable(struct net_device *dev)
2171{
2172 struct mtk_mac *mac = netdev_priv(dev);
2173 struct mtk_eth *eth = mac->hw;
2174 int i, hwlro_idx;
2175
2176 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2177 mac->hwlro_ip[i] = 0;
2178 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2179
2180 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2181 }
2182
2183 mac->hwlro_ip_cnt = 0;
2184}
2185
2186static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2187 struct ethtool_rxnfc *cmd)
2188{
2189 struct mtk_mac *mac = netdev_priv(dev);
2190 struct ethtool_rx_flow_spec *fsp =
2191 (struct ethtool_rx_flow_spec *)&cmd->fs;
2192
2193 /* only tcp dst ipv4 is meaningful, others are meaningless */
2194 fsp->flow_type = TCP_V4_FLOW;
2195 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2196 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2197
2198 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2199 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2200 fsp->h_u.tcp_ip4_spec.psrc = 0;
2201 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2202 fsp->h_u.tcp_ip4_spec.pdst = 0;
2203 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2204 fsp->h_u.tcp_ip4_spec.tos = 0;
2205 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2206
2207 return 0;
2208}
2209
2210static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2211 struct ethtool_rxnfc *cmd,
2212 u32 *rule_locs)
2213{
2214 struct mtk_mac *mac = netdev_priv(dev);
2215 int cnt = 0;
2216 int i;
2217
2218 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2219 if (mac->hwlro_ip[i]) {
2220 rule_locs[cnt] = i;
2221 cnt++;
2222 }
2223 }
2224
2225 cmd->rule_cnt = cnt;
2226
2227 return 0;
2228}
2229
developer18f46a82021-07-20 21:08:21 +08002230static int mtk_rss_init(struct mtk_eth *eth)
2231{
2232 u32 val;
2233
2234 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2235 /* Set RSS rings to PSE modes */
2236 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
2237 val |= MTK_RING_PSE_MODE;
2238 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
2239
2240 /* Enable non-lro multiple rx */
2241 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2242 val |= MTK_NON_LRO_MULTI_EN;
2243 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2244
2245 /* Enable RSS dly int supoort */
2246 val |= MTK_LRO_DLY_INT_EN;
2247 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2248
2249 /* Set RSS delay config int ring1 */
2250 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
2251 }
2252
2253 /* Hash Type */
2254 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2255 val |= MTK_RSS_IPV4_STATIC_HASH;
2256 val |= MTK_RSS_IPV6_STATIC_HASH;
2257 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2258
2259 /* Select the size of indirection table */
2260 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW0);
2261 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW1);
2262 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW2);
2263 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW3);
2264 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW4);
2265 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW5);
2266 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW6);
2267 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW7);
2268
2269 /* Pause */
2270 val |= MTK_RSS_CFG_REQ;
2271 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2272
2273 /* Enable RSS*/
2274 val |= MTK_RSS_EN;
2275 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2276
2277 /* Release pause */
2278 val &= ~(MTK_RSS_CFG_REQ);
2279 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2280
2281 /* Set perRSS GRP INT */
2282 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING1), MTK_PDMA_INT_GRP3);
2283
2284 /* Set GRP INT */
2285 mtk_w32(eth, 0x21021030, MTK_FE_INT_GRP);
2286
2287 return 0;
2288}
2289
2290static void mtk_rss_uninit(struct mtk_eth *eth)
2291{
2292 u32 val;
2293
2294 /* Pause */
2295 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2296 val |= MTK_RSS_CFG_REQ;
2297 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2298
2299 /* Disable RSS*/
2300 val &= ~(MTK_RSS_EN);
2301 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2302
2303 /* Release pause */
2304 val &= ~(MTK_RSS_CFG_REQ);
2305 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2306}
2307
developerfd40db22021-04-29 10:08:25 +08002308static netdev_features_t mtk_fix_features(struct net_device *dev,
2309 netdev_features_t features)
2310{
2311 if (!(features & NETIF_F_LRO)) {
2312 struct mtk_mac *mac = netdev_priv(dev);
2313 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2314
2315 if (ip_cnt) {
2316 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2317
2318 features |= NETIF_F_LRO;
2319 }
2320 }
2321
2322 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
2323 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
2324
2325 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2326 }
2327
2328 return features;
2329}
2330
2331static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2332{
2333 struct mtk_mac *mac = netdev_priv(dev);
2334 struct mtk_eth *eth = mac->hw;
2335 int err = 0;
2336
2337 if (!((dev->features ^ features) & MTK_SET_FEATURES))
2338 return 0;
2339
2340 if (!(features & NETIF_F_LRO))
2341 mtk_hwlro_netdev_disable(dev);
2342
2343 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
2344 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
2345 else
2346 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2347
2348 return err;
2349}
2350
2351/* wait for DMA to finish whatever it is doing before we start using it again */
2352static int mtk_dma_busy_wait(struct mtk_eth *eth)
2353{
2354 unsigned long t_start = jiffies;
2355
2356 while (1) {
2357 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2358 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2359 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2360 return 0;
2361 } else {
2362 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2363 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2364 return 0;
2365 }
2366
2367 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2368 break;
2369 }
2370
2371 dev_err(eth->dev, "DMA init timeout\n");
2372 return -1;
2373}
2374
2375static int mtk_dma_init(struct mtk_eth *eth)
2376{
2377 int err;
2378 u32 i;
2379
2380 if (mtk_dma_busy_wait(eth))
2381 return -EBUSY;
2382
2383 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2384 /* QDMA needs scratch memory for internal reordering of the
2385 * descriptors
2386 */
2387 err = mtk_init_fq_dma(eth);
2388 if (err)
2389 return err;
2390 }
2391
2392 err = mtk_tx_alloc(eth);
2393 if (err)
2394 return err;
2395
2396 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2397 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2398 if (err)
2399 return err;
2400 }
2401
2402 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2403 if (err)
2404 return err;
2405
2406 if (eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08002407 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1;
2408 for (; i < MTK_MAX_RX_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002409 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2410 if (err)
2411 return err;
2412 }
2413 err = mtk_hwlro_rx_init(eth);
2414 if (err)
2415 return err;
2416 }
2417
developer18f46a82021-07-20 21:08:21 +08002418 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2419 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2420 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL);
2421 if (err)
2422 return err;
2423 }
2424 err = mtk_rss_init(eth);
2425 if (err)
2426 return err;
2427 }
2428
developerfd40db22021-04-29 10:08:25 +08002429 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2430 /* Enable random early drop and set drop threshold
2431 * automatically
2432 */
2433 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2434 FC_THRES_MIN, MTK_QDMA_FC_THRES);
2435 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2436 }
2437
2438 return 0;
2439}
2440
2441static void mtk_dma_free(struct mtk_eth *eth)
2442{
2443 int i;
2444
2445 for (i = 0; i < MTK_MAC_COUNT; i++)
2446 if (eth->netdev[i])
2447 netdev_reset_queue(eth->netdev[i]);
2448 if ( !eth->soc->has_sram && eth->scratch_ring) {
2449 dma_free_coherent(eth->dev,
2450 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
2451 eth->scratch_ring,
2452 eth->phy_scratch_ring);
2453 eth->scratch_ring = NULL;
2454 eth->phy_scratch_ring = 0;
2455 }
2456 mtk_tx_clean(eth);
2457 mtk_rx_clean(eth, &eth->rx_ring[0],1);
2458 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
2459
2460 if (eth->hwlro) {
2461 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08002462
2463 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1;
2464 for (; i < MTK_MAX_RX_RING_NUM; i++)
2465 mtk_rx_clean(eth, &eth->rx_ring[i], 0);
developerfd40db22021-04-29 10:08:25 +08002466 }
2467
developer18f46a82021-07-20 21:08:21 +08002468 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2469 mtk_rss_uninit(eth);
2470
2471 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
2472 mtk_rx_clean(eth, &eth->rx_ring[i], 1);
2473 }
2474
developerfd40db22021-04-29 10:08:25 +08002475 kfree(eth->scratch_head);
2476}
2477
2478static void mtk_tx_timeout(struct net_device *dev)
2479{
2480 struct mtk_mac *mac = netdev_priv(dev);
2481 struct mtk_eth *eth = mac->hw;
2482
2483 eth->netdev[mac->id]->stats.tx_errors++;
2484 netif_err(eth, tx_err, dev,
2485 "transmit timed out\n");
2486 schedule_work(&eth->pending_work);
2487}
2488
developer18f46a82021-07-20 21:08:21 +08002489static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08002490{
developer18f46a82021-07-20 21:08:21 +08002491 struct mtk_napi *rx_napi = priv;
2492 struct mtk_eth *eth = rx_napi->eth;
2493 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002494
developer18f46a82021-07-20 21:08:21 +08002495 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08002496 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08002497 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08002498 }
2499
2500 return IRQ_HANDLED;
2501}
2502
2503static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2504{
2505 struct mtk_eth *eth = _eth;
2506
2507 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developerfd40db22021-04-29 10:08:25 +08002508 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer6bbe70d2021-08-06 09:34:55 +08002509 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08002510 }
2511
2512 return IRQ_HANDLED;
2513}
2514
2515static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2516{
2517 struct mtk_eth *eth = _eth;
2518
developer18f46a82021-07-20 21:08:21 +08002519 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT(0)) {
2520 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT(0))
2521 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002522 }
2523 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2524 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2525 mtk_handle_irq_tx(irq, _eth);
2526 }
2527
2528 return IRQ_HANDLED;
2529}
2530
2531#ifdef CONFIG_NET_POLL_CONTROLLER
2532static void mtk_poll_controller(struct net_device *dev)
2533{
2534 struct mtk_mac *mac = netdev_priv(dev);
2535 struct mtk_eth *eth = mac->hw;
2536
2537 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002538 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
2539 mtk_handle_irq_rx(eth->irq[2], &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002540 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002541 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002542}
2543#endif
2544
2545static int mtk_start_dma(struct mtk_eth *eth)
2546{
2547 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer77d03a72021-06-06 00:06:00 +08002548 int val, err;
developerfd40db22021-04-29 10:08:25 +08002549
2550 err = mtk_dma_init(eth);
2551 if (err) {
2552 mtk_dma_free(eth);
2553 return err;
2554 }
2555
2556 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer15d0d282021-07-14 16:40:44 +08002557 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
developera2bdbd52021-05-31 19:10:17 +08002558 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08002559 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002560 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002561 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
2562 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
2563 MTK_RESV_BUF | MTK_WCOMP_EN |
2564 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
2565 MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
2566 else
2567 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002568 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002569 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
2570 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2571 MTK_RX_BT_32DWORDS,
2572 MTK_QDMA_GLO_CFG);
2573
developer15d0d282021-07-14 16:40:44 +08002574 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
developerfd40db22021-04-29 10:08:25 +08002575 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002576 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08002577 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2578 MTK_PDMA_GLO_CFG);
2579 } else {
2580 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2581 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2582 MTK_PDMA_GLO_CFG);
2583 }
2584
developer77d03a72021-06-06 00:06:00 +08002585 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) && eth->hwlro) {
2586 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
2587 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
2588 }
2589
developerfd40db22021-04-29 10:08:25 +08002590 return 0;
2591}
2592
2593static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
2594{
2595 int i;
2596
2597 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2598 return;
2599
2600 for (i = 0; i < MTK_MAC_COUNT; i++) {
2601 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2602
2603 /* default setup the forward port to send frame to PDMA */
2604 val &= ~0xffff;
2605
2606 /* Enable RX checksum */
2607 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2608
2609 val |= config;
2610
2611 if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
2612 val |= MTK_GDMA_SPECIAL_TAG;
2613
2614 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2615 }
2616 /* Reset and enable PSE */
2617 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2618 mtk_w32(eth, 0, MTK_RST_GL);
2619}
2620
2621static int mtk_open(struct net_device *dev)
2622{
2623 struct mtk_mac *mac = netdev_priv(dev);
2624 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08002625 int err, i;
developerfd40db22021-04-29 10:08:25 +08002626
2627 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2628 if (err) {
2629 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2630 err);
2631 return err;
2632 }
2633
2634 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2635 if (!refcount_read(&eth->dma_refcnt)) {
2636 int err = mtk_start_dma(eth);
2637
2638 if (err)
2639 return err;
2640
2641 mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
2642
2643 /* Indicates CDM to parse the MTK special tag from CPU */
2644 if (netdev_uses_dsa(dev)) {
2645 u32 val;
2646 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2647 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2648 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
2649 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
2650 }
2651
2652 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002653 napi_enable(&eth->rx_napi[0].napi);
developerfd40db22021-04-29 10:08:25 +08002654 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002655 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
2656
2657 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2658 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2659 napi_enable(&eth->rx_napi[i].napi);
2660 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i));
2661 }
2662 }
2663
developerfd40db22021-04-29 10:08:25 +08002664 refcount_set(&eth->dma_refcnt, 1);
2665 }
2666 else
2667 refcount_inc(&eth->dma_refcnt);
2668
2669 phylink_start(mac->phylink);
2670 netif_start_queue(dev);
2671 return 0;
2672}
2673
2674static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2675{
2676 u32 val;
2677 int i;
2678
2679 /* stop the dma engine */
2680 spin_lock_bh(&eth->page_lock);
2681 val = mtk_r32(eth, glo_cfg);
2682 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2683 glo_cfg);
2684 spin_unlock_bh(&eth->page_lock);
2685
2686 /* wait for dma stop */
2687 for (i = 0; i < 10; i++) {
2688 val = mtk_r32(eth, glo_cfg);
2689 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
2690 msleep(20);
2691 continue;
2692 }
2693 break;
2694 }
2695}
2696
2697static int mtk_stop(struct net_device *dev)
2698{
2699 struct mtk_mac *mac = netdev_priv(dev);
2700 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08002701 int i;
developerfd40db22021-04-29 10:08:25 +08002702
2703 phylink_stop(mac->phylink);
2704
2705 netif_tx_disable(dev);
2706
2707 phylink_disconnect_phy(mac->phylink);
2708
2709 /* only shutdown DMA if this is the last user */
2710 if (!refcount_dec_and_test(&eth->dma_refcnt))
2711 return 0;
2712
2713 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2714
2715 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002716 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002717 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002718 napi_disable(&eth->rx_napi[0].napi);
2719
2720 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2721 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2722 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i));
2723 napi_disable(&eth->rx_napi[i].napi);
2724 }
2725 }
developerfd40db22021-04-29 10:08:25 +08002726
2727 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2728 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2729 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2730
2731 mtk_dma_free(eth);
2732
2733 return 0;
2734}
2735
2736static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
2737{
2738 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2739 reset_bits,
2740 reset_bits);
2741
2742 usleep_range(1000, 1100);
2743 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2744 reset_bits,
2745 ~reset_bits);
2746 mdelay(10);
2747}
2748
2749static void mtk_clk_disable(struct mtk_eth *eth)
2750{
2751 int clk;
2752
2753 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2754 clk_disable_unprepare(eth->clks[clk]);
2755}
2756
2757static int mtk_clk_enable(struct mtk_eth *eth)
2758{
2759 int clk, ret;
2760
2761 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2762 ret = clk_prepare_enable(eth->clks[clk]);
2763 if (ret)
2764 goto err_disable_clks;
2765 }
2766
2767 return 0;
2768
2769err_disable_clks:
2770 while (--clk >= 0)
2771 clk_disable_unprepare(eth->clks[clk]);
2772
2773 return ret;
2774}
2775
developer18f46a82021-07-20 21:08:21 +08002776static int mtk_napi_init(struct mtk_eth *eth)
2777{
2778 struct mtk_napi *rx_napi = &eth->rx_napi[0];
2779 int i;
2780
2781 rx_napi->eth = eth;
2782 rx_napi->rx_ring = &eth->rx_ring[0];
2783 rx_napi->irq_grp_no = 2;
2784
2785 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2786 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2787 rx_napi = &eth->rx_napi[i];
2788 rx_napi->eth = eth;
2789 rx_napi->rx_ring = &eth->rx_ring[i];
2790 rx_napi->irq_grp_no = 2 + i;
2791 }
2792 }
2793
2794 return 0;
2795}
2796
developerfd40db22021-04-29 10:08:25 +08002797static int mtk_hw_init(struct mtk_eth *eth)
2798{
developer77d03a72021-06-06 00:06:00 +08002799 int i, ret;
developerfd40db22021-04-29 10:08:25 +08002800
2801 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
2802 return 0;
2803
2804 pm_runtime_enable(eth->dev);
2805 pm_runtime_get_sync(eth->dev);
2806
2807 ret = mtk_clk_enable(eth);
2808 if (ret)
2809 goto err_disable_pm;
2810
2811 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2812 ret = device_reset(eth->dev);
2813 if (ret) {
2814 dev_err(eth->dev, "MAC reset failed!\n");
2815 goto err_disable_pm;
2816 }
2817
2818 /* enable interrupt delay for RX */
2819 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2820
2821 /* disable delay and normal interrupt */
2822 mtk_tx_irq_disable(eth, ~0);
2823 mtk_rx_irq_disable(eth, ~0);
2824
2825 return 0;
2826 }
2827
2828 /* Non-MT7628 handling... */
developera2bdbd52021-05-31 19:10:17 +08002829 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developer545abf02021-07-15 17:47:01 +08002830 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
2831
2832 if(MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
2833 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE | RSTCTRL_PPE1);
2834 else
2835 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE);
2836
2837 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2838 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff);
2839
2840 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08002841 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08002842 }
developerfd40db22021-04-29 10:08:25 +08002843
2844 if (eth->pctl) {
2845 /* Set GE2 driving and slew rate */
2846 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2847
2848 /* set GE2 TDSEL */
2849 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2850
2851 /* set GE2 TUNE */
2852 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2853 }
2854
2855 /* Set linkdown as the default for each GMAC. Its own MCR would be set
2856 * up with the more appropriate value when mtk_mac_config call is being
2857 * invoked.
2858 */
2859 for (i = 0; i < MTK_MAC_COUNT; i++)
2860 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
2861
2862 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08002863 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
2864 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2865 else
2866 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08002867
2868 /* enable interrupt delay for RX/TX */
2869 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
2870 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
2871
2872 mtk_tx_irq_disable(eth, ~0);
2873 mtk_rx_irq_disable(eth, ~0);
2874
2875 /* FE int grouping */
2876 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08002877 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_PDMA_INT_GRP2);
developerfd40db22021-04-29 10:08:25 +08002878 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08002879 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_QDMA_INT_GRP2);
developerfd40db22021-04-29 10:08:25 +08002880 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
2881
developera2bdbd52021-05-31 19:10:17 +08002882 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08002883 /* PSE Free Queue Flow Control */
2884 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
2885
developer81bcad32021-07-15 14:14:38 +08002886 /* PSE should not drop port8 and port9 packets */
2887 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
2888
developerfef9efd2021-06-16 18:28:09 +08002889 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08002890 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
2891 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
2892 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
2893 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
2894 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
2895 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
2896 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
2897 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
2898
developerfef9efd2021-06-16 18:28:09 +08002899 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08002900 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
2901 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
2902 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
2903 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
2904 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
2905 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
2906 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
2907 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08002908
2909 /* GDM and CDM Threshold */
2910 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
2911 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
2912 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
2913 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
2914 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
2915 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08002916 }
2917
2918 return 0;
2919
2920err_disable_pm:
2921 pm_runtime_put_sync(eth->dev);
2922 pm_runtime_disable(eth->dev);
2923
2924 return ret;
2925}
2926
2927static int mtk_hw_deinit(struct mtk_eth *eth)
2928{
2929 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
2930 return 0;
2931
2932 mtk_clk_disable(eth);
2933
2934 pm_runtime_put_sync(eth->dev);
2935 pm_runtime_disable(eth->dev);
2936
2937 return 0;
2938}
2939
2940static int __init mtk_init(struct net_device *dev)
2941{
2942 struct mtk_mac *mac = netdev_priv(dev);
2943 struct mtk_eth *eth = mac->hw;
2944 const char *mac_addr;
2945
2946 mac_addr = of_get_mac_address(mac->of_node);
2947 if (!IS_ERR(mac_addr))
2948 ether_addr_copy(dev->dev_addr, mac_addr);
2949
2950 /* If the mac address is invalid, use random mac address */
2951 if (!is_valid_ether_addr(dev->dev_addr)) {
2952 eth_hw_addr_random(dev);
2953 dev_err(eth->dev, "generated random MAC address %pM\n",
2954 dev->dev_addr);
2955 }
2956
2957 return 0;
2958}
2959
2960static void mtk_uninit(struct net_device *dev)
2961{
2962 struct mtk_mac *mac = netdev_priv(dev);
2963 struct mtk_eth *eth = mac->hw;
2964
2965 phylink_disconnect_phy(mac->phylink);
2966 mtk_tx_irq_disable(eth, ~0);
2967 mtk_rx_irq_disable(eth, ~0);
2968}
2969
2970static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2971{
2972 struct mtk_mac *mac = netdev_priv(dev);
2973
2974 switch (cmd) {
2975 case SIOCGMIIPHY:
2976 case SIOCGMIIREG:
2977 case SIOCSMIIREG:
2978 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
2979 default:
2980 /* default invoke the mtk_eth_dbg handler */
2981 return mtk_do_priv_ioctl(dev, ifr, cmd);
2982 break;
2983 }
2984
2985 return -EOPNOTSUPP;
2986}
2987
2988static void mtk_pending_work(struct work_struct *work)
2989{
2990 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2991 int err, i;
2992 unsigned long restart = 0;
2993
2994 rtnl_lock();
2995
2996 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2997
2998 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
2999 cpu_relax();
3000
3001 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
3002 /* stop all devices to make sure that dma is properly shut down */
3003 for (i = 0; i < MTK_MAC_COUNT; i++) {
3004 if (!eth->netdev[i])
3005 continue;
3006 mtk_stop(eth->netdev[i]);
3007 __set_bit(i, &restart);
3008 }
3009 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
3010
3011 /* restart underlying hardware such as power, clock, pin mux
3012 * and the connected phy
3013 */
3014 mtk_hw_deinit(eth);
3015
3016 if (eth->dev->pins)
3017 pinctrl_select_state(eth->dev->pins->p,
3018 eth->dev->pins->default_state);
3019 mtk_hw_init(eth);
3020
3021 /* restart DMA and enable IRQs */
3022 for (i = 0; i < MTK_MAC_COUNT; i++) {
3023 if (!test_bit(i, &restart))
3024 continue;
3025 err = mtk_open(eth->netdev[i]);
3026 if (err) {
3027 netif_alert(eth, ifup, eth->netdev[i],
3028 "Driver up/down cycle failed, closing device.\n");
3029 dev_close(eth->netdev[i]);
3030 }
3031 }
3032
3033 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
3034
3035 clear_bit_unlock(MTK_RESETTING, &eth->state);
3036
3037 rtnl_unlock();
3038}
3039
3040static int mtk_free_dev(struct mtk_eth *eth)
3041{
3042 int i;
3043
3044 for (i = 0; i < MTK_MAC_COUNT; i++) {
3045 if (!eth->netdev[i])
3046 continue;
3047 free_netdev(eth->netdev[i]);
3048 }
3049
3050 return 0;
3051}
3052
3053static int mtk_unreg_dev(struct mtk_eth *eth)
3054{
3055 int i;
3056
3057 for (i = 0; i < MTK_MAC_COUNT; i++) {
3058 if (!eth->netdev[i])
3059 continue;
3060 unregister_netdev(eth->netdev[i]);
3061 }
3062
3063 return 0;
3064}
3065
3066static int mtk_cleanup(struct mtk_eth *eth)
3067{
3068 mtk_unreg_dev(eth);
3069 mtk_free_dev(eth);
3070 cancel_work_sync(&eth->pending_work);
3071
3072 return 0;
3073}
3074
3075static int mtk_get_link_ksettings(struct net_device *ndev,
3076 struct ethtool_link_ksettings *cmd)
3077{
3078 struct mtk_mac *mac = netdev_priv(ndev);
3079
3080 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3081 return -EBUSY;
3082
3083 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3084}
3085
3086static int mtk_set_link_ksettings(struct net_device *ndev,
3087 const struct ethtool_link_ksettings *cmd)
3088{
3089 struct mtk_mac *mac = netdev_priv(ndev);
3090
3091 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3092 return -EBUSY;
3093
3094 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3095}
3096
3097static void mtk_get_drvinfo(struct net_device *dev,
3098 struct ethtool_drvinfo *info)
3099{
3100 struct mtk_mac *mac = netdev_priv(dev);
3101
3102 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3103 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3104 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3105}
3106
3107static u32 mtk_get_msglevel(struct net_device *dev)
3108{
3109 struct mtk_mac *mac = netdev_priv(dev);
3110
3111 return mac->hw->msg_enable;
3112}
3113
3114static void mtk_set_msglevel(struct net_device *dev, u32 value)
3115{
3116 struct mtk_mac *mac = netdev_priv(dev);
3117
3118 mac->hw->msg_enable = value;
3119}
3120
3121static int mtk_nway_reset(struct net_device *dev)
3122{
3123 struct mtk_mac *mac = netdev_priv(dev);
3124
3125 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3126 return -EBUSY;
3127
3128 if (!mac->phylink)
3129 return -ENOTSUPP;
3130
3131 return phylink_ethtool_nway_reset(mac->phylink);
3132}
3133
3134static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3135{
3136 int i;
3137
3138 switch (stringset) {
3139 case ETH_SS_STATS:
3140 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3141 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3142 data += ETH_GSTRING_LEN;
3143 }
3144 break;
3145 }
3146}
3147
3148static int mtk_get_sset_count(struct net_device *dev, int sset)
3149{
3150 switch (sset) {
3151 case ETH_SS_STATS:
3152 return ARRAY_SIZE(mtk_ethtool_stats);
3153 default:
3154 return -EOPNOTSUPP;
3155 }
3156}
3157
3158static void mtk_get_ethtool_stats(struct net_device *dev,
3159 struct ethtool_stats *stats, u64 *data)
3160{
3161 struct mtk_mac *mac = netdev_priv(dev);
3162 struct mtk_hw_stats *hwstats = mac->hw_stats;
3163 u64 *data_src, *data_dst;
3164 unsigned int start;
3165 int i;
3166
3167 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3168 return;
3169
3170 if (netif_running(dev) && netif_device_present(dev)) {
3171 if (spin_trylock_bh(&hwstats->stats_lock)) {
3172 mtk_stats_update_mac(mac);
3173 spin_unlock_bh(&hwstats->stats_lock);
3174 }
3175 }
3176
3177 data_src = (u64 *)hwstats;
3178
3179 do {
3180 data_dst = data;
3181 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
3182
3183 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3184 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3185 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
3186}
3187
3188static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3189 u32 *rule_locs)
3190{
3191 int ret = -EOPNOTSUPP;
3192
3193 switch (cmd->cmd) {
3194 case ETHTOOL_GRXRINGS:
3195 if (dev->hw_features & NETIF_F_LRO) {
3196 cmd->data = MTK_MAX_RX_RING_NUM;
3197 ret = 0;
3198 }
3199 break;
3200 case ETHTOOL_GRXCLSRLCNT:
3201 if (dev->hw_features & NETIF_F_LRO) {
3202 struct mtk_mac *mac = netdev_priv(dev);
3203
3204 cmd->rule_cnt = mac->hwlro_ip_cnt;
3205 ret = 0;
3206 }
3207 break;
3208 case ETHTOOL_GRXCLSRULE:
3209 if (dev->hw_features & NETIF_F_LRO)
3210 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3211 break;
3212 case ETHTOOL_GRXCLSRLALL:
3213 if (dev->hw_features & NETIF_F_LRO)
3214 ret = mtk_hwlro_get_fdir_all(dev, cmd,
3215 rule_locs);
3216 break;
3217 default:
3218 break;
3219 }
3220
3221 return ret;
3222}
3223
3224static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3225{
3226 int ret = -EOPNOTSUPP;
3227
3228 switch (cmd->cmd) {
3229 case ETHTOOL_SRXCLSRLINS:
3230 if (dev->hw_features & NETIF_F_LRO)
3231 ret = mtk_hwlro_add_ipaddr(dev, cmd);
3232 break;
3233 case ETHTOOL_SRXCLSRLDEL:
3234 if (dev->hw_features & NETIF_F_LRO)
3235 ret = mtk_hwlro_del_ipaddr(dev, cmd);
3236 break;
3237 default:
3238 break;
3239 }
3240
3241 return ret;
3242}
3243
3244static const struct ethtool_ops mtk_ethtool_ops = {
3245 .get_link_ksettings = mtk_get_link_ksettings,
3246 .set_link_ksettings = mtk_set_link_ksettings,
3247 .get_drvinfo = mtk_get_drvinfo,
3248 .get_msglevel = mtk_get_msglevel,
3249 .set_msglevel = mtk_set_msglevel,
3250 .nway_reset = mtk_nway_reset,
3251 .get_link = ethtool_op_get_link,
3252 .get_strings = mtk_get_strings,
3253 .get_sset_count = mtk_get_sset_count,
3254 .get_ethtool_stats = mtk_get_ethtool_stats,
3255 .get_rxnfc = mtk_get_rxnfc,
3256 .set_rxnfc = mtk_set_rxnfc,
3257};
3258
3259static const struct net_device_ops mtk_netdev_ops = {
3260 .ndo_init = mtk_init,
3261 .ndo_uninit = mtk_uninit,
3262 .ndo_open = mtk_open,
3263 .ndo_stop = mtk_stop,
3264 .ndo_start_xmit = mtk_start_xmit,
3265 .ndo_set_mac_address = mtk_set_mac_address,
3266 .ndo_validate_addr = eth_validate_addr,
3267 .ndo_do_ioctl = mtk_do_ioctl,
3268 .ndo_tx_timeout = mtk_tx_timeout,
3269 .ndo_get_stats64 = mtk_get_stats64,
3270 .ndo_fix_features = mtk_fix_features,
3271 .ndo_set_features = mtk_set_features,
3272#ifdef CONFIG_NET_POLL_CONTROLLER
3273 .ndo_poll_controller = mtk_poll_controller,
3274#endif
3275};
3276
3277static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
3278{
3279 const __be32 *_id = of_get_property(np, "reg", NULL);
3280 struct phylink *phylink;
3281 int phy_mode, id, err;
3282 struct mtk_mac *mac;
3283
3284 if (!_id) {
3285 dev_err(eth->dev, "missing mac id\n");
3286 return -EINVAL;
3287 }
3288
3289 id = be32_to_cpup(_id);
3290 if (id >= MTK_MAC_COUNT) {
3291 dev_err(eth->dev, "%d is not a valid mac id\n", id);
3292 return -EINVAL;
3293 }
3294
3295 if (eth->netdev[id]) {
3296 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
3297 return -EINVAL;
3298 }
3299
3300 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
3301 if (!eth->netdev[id]) {
3302 dev_err(eth->dev, "alloc_etherdev failed\n");
3303 return -ENOMEM;
3304 }
3305 mac = netdev_priv(eth->netdev[id]);
3306 eth->mac[id] = mac;
3307 mac->id = id;
3308 mac->hw = eth;
3309 mac->of_node = np;
3310
3311 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
3312 mac->hwlro_ip_cnt = 0;
3313
3314 mac->hw_stats = devm_kzalloc(eth->dev,
3315 sizeof(*mac->hw_stats),
3316 GFP_KERNEL);
3317 if (!mac->hw_stats) {
3318 dev_err(eth->dev, "failed to allocate counter memory\n");
3319 err = -ENOMEM;
3320 goto free_netdev;
3321 }
3322 spin_lock_init(&mac->hw_stats->stats_lock);
3323 u64_stats_init(&mac->hw_stats->syncp);
3324 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
3325
3326 /* phylink create */
3327 phy_mode = of_get_phy_mode(np);
3328 if (phy_mode < 0) {
3329 dev_err(eth->dev, "incorrect phy-mode\n");
3330 err = -EINVAL;
3331 goto free_netdev;
3332 }
3333
3334 /* mac config is not set */
3335 mac->interface = PHY_INTERFACE_MODE_NA;
3336 mac->mode = MLO_AN_PHY;
3337 mac->speed = SPEED_UNKNOWN;
3338
3339 mac->phylink_config.dev = &eth->netdev[id]->dev;
3340 mac->phylink_config.type = PHYLINK_NETDEV;
3341
3342 phylink = phylink_create(&mac->phylink_config,
3343 of_fwnode_handle(mac->of_node),
3344 phy_mode, &mtk_phylink_ops);
3345 if (IS_ERR(phylink)) {
3346 err = PTR_ERR(phylink);
3347 goto free_netdev;
3348 }
3349
3350 mac->phylink = phylink;
3351
3352 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
3353 eth->netdev[id]->watchdog_timeo = 5 * HZ;
3354 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
3355 eth->netdev[id]->base_addr = (unsigned long)eth->base;
3356
3357 eth->netdev[id]->hw_features = eth->soc->hw_features;
3358 if (eth->hwlro)
3359 eth->netdev[id]->hw_features |= NETIF_F_LRO;
3360
3361 eth->netdev[id]->vlan_features = eth->soc->hw_features &
3362 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3363 eth->netdev[id]->features |= eth->soc->hw_features;
3364 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
3365
3366 eth->netdev[id]->irq = eth->irq[0];
3367 eth->netdev[id]->dev.of_node = np;
3368
3369 return 0;
3370
3371free_netdev:
3372 free_netdev(eth->netdev[id]);
3373 return err;
3374}
3375
3376static int mtk_probe(struct platform_device *pdev)
3377{
3378 struct device_node *mac_np;
3379 struct mtk_eth *eth;
3380 int err, i;
3381
3382 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3383 if (!eth)
3384 return -ENOMEM;
3385
3386 eth->soc = of_device_get_match_data(&pdev->dev);
3387
3388 eth->dev = &pdev->dev;
3389 eth->base = devm_platform_ioremap_resource(pdev, 0);
3390 if (IS_ERR(eth->base))
3391 return PTR_ERR(eth->base);
3392
3393 if(eth->soc->has_sram) {
3394 struct resource *res;
3395 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3396 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
3397 }
3398
3399 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3400 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
3401 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
3402 } else {
3403 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
3404 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
3405 }
3406
3407 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3408 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
3409 eth->ip_align = NET_IP_ALIGN;
3410 } else {
developera2bdbd52021-05-31 19:10:17 +08003411 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08003412 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
3413 else
3414 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
3415 }
3416
3417 spin_lock_init(&eth->page_lock);
3418 spin_lock_init(&eth->tx_irq_lock);
3419 spin_lock_init(&eth->rx_irq_lock);
3420
3421 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3422 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3423 "mediatek,ethsys");
3424 if (IS_ERR(eth->ethsys)) {
3425 dev_err(&pdev->dev, "no ethsys regmap found\n");
3426 return PTR_ERR(eth->ethsys);
3427 }
3428 }
3429
3430 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3431 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3432 "mediatek,infracfg");
3433 if (IS_ERR(eth->infra)) {
3434 dev_err(&pdev->dev, "no infracfg regmap found\n");
3435 return PTR_ERR(eth->infra);
3436 }
3437 }
3438
3439 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
3440 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3441 GFP_KERNEL);
3442 if (!eth->sgmii)
3443 return -ENOMEM;
3444
3445 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3446 eth->soc->ana_rgc3);
3447
3448 if (err)
3449 return err;
3450 }
3451
3452 if (eth->soc->required_pctl) {
3453 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3454 "mediatek,pctl");
3455 if (IS_ERR(eth->pctl)) {
3456 dev_err(&pdev->dev, "no pctl regmap found\n");
3457 return PTR_ERR(eth->pctl);
3458 }
3459 }
3460
developer18f46a82021-07-20 21:08:21 +08003461 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08003462 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
3463 eth->irq[i] = eth->irq[0];
3464 else
3465 eth->irq[i] = platform_get_irq(pdev, i);
3466 if (eth->irq[i] < 0) {
3467 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3468 return -ENXIO;
3469 }
3470 }
3471
3472 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
3473 eth->clks[i] = devm_clk_get(eth->dev,
3474 mtk_clks_source_name[i]);
3475 if (IS_ERR(eth->clks[i])) {
3476 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
3477 return -EPROBE_DEFER;
3478 if (eth->soc->required_clks & BIT(i)) {
3479 dev_err(&pdev->dev, "clock %s not found\n",
3480 mtk_clks_source_name[i]);
3481 return -EINVAL;
3482 }
3483 eth->clks[i] = NULL;
3484 }
3485 }
3486
3487 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
3488 INIT_WORK(&eth->pending_work, mtk_pending_work);
3489
3490 err = mtk_hw_init(eth);
3491 if (err)
3492 return err;
3493
3494 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3495
3496 for_each_child_of_node(pdev->dev.of_node, mac_np) {
3497 if (!of_device_is_compatible(mac_np,
3498 "mediatek,eth-mac"))
3499 continue;
3500
3501 if (!of_device_is_available(mac_np))
3502 continue;
3503
3504 err = mtk_add_mac(eth, mac_np);
3505 if (err) {
3506 of_node_put(mac_np);
3507 goto err_deinit_hw;
3508 }
3509 }
3510
developer18f46a82021-07-20 21:08:21 +08003511 err = mtk_napi_init(eth);
3512 if (err)
3513 goto err_free_dev;
3514
developerfd40db22021-04-29 10:08:25 +08003515 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3516 err = devm_request_irq(eth->dev, eth->irq[0],
3517 mtk_handle_irq, 0,
3518 dev_name(eth->dev), eth);
3519 } else {
3520 err = devm_request_irq(eth->dev, eth->irq[1],
3521 mtk_handle_irq_tx, 0,
3522 dev_name(eth->dev), eth);
3523 if (err)
3524 goto err_free_dev;
3525
3526 err = devm_request_irq(eth->dev, eth->irq[2],
3527 mtk_handle_irq_rx, 0,
developer18f46a82021-07-20 21:08:21 +08003528 dev_name(eth->dev), &eth->rx_napi[0]);
3529 if (err)
3530 goto err_free_dev;
3531
3532 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3533 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3534 err = devm_request_irq(eth->dev,
3535 eth->irq[2 + i],
3536 mtk_handle_irq_rx, 0,
3537 dev_name(eth->dev),
3538 &eth->rx_napi[i]);
3539 if (err)
3540 goto err_free_dev;
3541 }
3542 }
developerfd40db22021-04-29 10:08:25 +08003543 }
3544 if (err)
3545 goto err_free_dev;
3546
3547 /* No MT7628/88 support yet */
3548 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3549 err = mtk_mdio_init(eth);
3550 if (err)
3551 goto err_free_dev;
3552 }
3553
3554 for (i = 0; i < MTK_MAX_DEVS; i++) {
3555 if (!eth->netdev[i])
3556 continue;
3557
3558 err = register_netdev(eth->netdev[i]);
3559 if (err) {
3560 dev_err(eth->dev, "error bringing up device\n");
3561 goto err_deinit_mdio;
3562 } else
3563 netif_info(eth, probe, eth->netdev[i],
3564 "mediatek frame engine at 0x%08lx, irq %d\n",
3565 eth->netdev[i]->base_addr, eth->irq[0]);
3566 }
3567
3568 /* we run 2 devices on the same DMA ring so we need a dummy device
3569 * for NAPI to work
3570 */
3571 init_dummy_netdev(&eth->dummy_dev);
3572 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
3573 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08003574 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08003575 MTK_NAPI_WEIGHT);
3576
developer18f46a82021-07-20 21:08:21 +08003577 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3578 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3579 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[i].napi,
3580 mtk_napi_rx, MTK_NAPI_WEIGHT);
3581 }
3582
developerfd40db22021-04-29 10:08:25 +08003583 mtketh_debugfs_init(eth);
3584 debug_proc_init(eth);
3585
3586 platform_set_drvdata(pdev, eth);
3587
3588 return 0;
3589
3590err_deinit_mdio:
3591 mtk_mdio_cleanup(eth);
3592err_free_dev:
3593 mtk_free_dev(eth);
3594err_deinit_hw:
3595 mtk_hw_deinit(eth);
3596
3597 return err;
3598}
3599
3600static int mtk_remove(struct platform_device *pdev)
3601{
3602 struct mtk_eth *eth = platform_get_drvdata(pdev);
3603 struct mtk_mac *mac;
3604 int i;
3605
3606 /* stop all devices to make sure that dma is properly shut down */
3607 for (i = 0; i < MTK_MAC_COUNT; i++) {
3608 if (!eth->netdev[i])
3609 continue;
3610 mtk_stop(eth->netdev[i]);
3611 mac = netdev_priv(eth->netdev[i]);
3612 phylink_disconnect_phy(mac->phylink);
3613 }
3614
3615 mtk_hw_deinit(eth);
3616
3617 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003618 netif_napi_del(&eth->rx_napi[0].napi);
3619
3620 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3621 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3622 netif_napi_del(&eth->rx_napi[i].napi);
3623 }
3624
developerfd40db22021-04-29 10:08:25 +08003625 mtk_cleanup(eth);
3626 mtk_mdio_cleanup(eth);
3627
3628 return 0;
3629}
3630
3631static const struct mtk_soc_data mt2701_data = {
3632 .caps = MT7623_CAPS | MTK_HWLRO,
3633 .hw_features = MTK_HW_FEATURES,
3634 .required_clks = MT7623_CLKS_BITMAP,
3635 .required_pctl = true,
3636 .has_sram = false,
3637};
3638
3639static const struct mtk_soc_data mt7621_data = {
3640 .caps = MT7621_CAPS,
3641 .hw_features = MTK_HW_FEATURES,
3642 .required_clks = MT7621_CLKS_BITMAP,
3643 .required_pctl = false,
3644 .has_sram = false,
3645};
3646
3647static const struct mtk_soc_data mt7622_data = {
3648 .ana_rgc3 = 0x2028,
3649 .caps = MT7622_CAPS | MTK_HWLRO,
3650 .hw_features = MTK_HW_FEATURES,
3651 .required_clks = MT7622_CLKS_BITMAP,
3652 .required_pctl = false,
3653 .has_sram = false,
3654};
3655
3656static const struct mtk_soc_data mt7623_data = {
3657 .caps = MT7623_CAPS | MTK_HWLRO,
3658 .hw_features = MTK_HW_FEATURES,
3659 .required_clks = MT7623_CLKS_BITMAP,
3660 .required_pctl = true,
3661 .has_sram = false,
3662};
3663
3664static const struct mtk_soc_data mt7629_data = {
3665 .ana_rgc3 = 0x128,
3666 .caps = MT7629_CAPS | MTK_HWLRO,
3667 .hw_features = MTK_HW_FEATURES,
3668 .required_clks = MT7629_CLKS_BITMAP,
3669 .required_pctl = false,
3670 .has_sram = false,
3671};
3672
3673static const struct mtk_soc_data mt7986_data = {
3674 .ana_rgc3 = 0x128,
3675 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08003676 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08003677 .required_clks = MT7986_CLKS_BITMAP,
3678 .required_pctl = false,
3679 .has_sram = true,
3680};
3681
developer255bba22021-07-27 15:16:33 +08003682static const struct mtk_soc_data mt7981_data = {
3683 .ana_rgc3 = 0x128,
3684 .caps = MT7981_CAPS,
3685 .hw_features = MTK_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_RX,
3686 .required_clks = MT7981_CLKS_BITMAP,
3687 .required_pctl = false,
3688 .has_sram = true,
3689};
3690
developerfd40db22021-04-29 10:08:25 +08003691static const struct mtk_soc_data rt5350_data = {
3692 .caps = MT7628_CAPS,
3693 .hw_features = MTK_HW_FEATURES_MT7628,
3694 .required_clks = MT7628_CLKS_BITMAP,
3695 .required_pctl = false,
3696 .has_sram = false,
3697};
3698
3699const struct of_device_id of_mtk_match[] = {
3700 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
3701 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
3702 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
3703 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
3704 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
3705 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08003706 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developerfd40db22021-04-29 10:08:25 +08003707 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
3708 {},
3709};
3710MODULE_DEVICE_TABLE(of, of_mtk_match);
3711
3712static struct platform_driver mtk_driver = {
3713 .probe = mtk_probe,
3714 .remove = mtk_remove,
3715 .driver = {
3716 .name = "mtk_soc_eth",
3717 .of_match_table = of_mtk_match,
3718 },
3719};
3720
3721module_platform_driver(mtk_driver);
3722
3723MODULE_LICENSE("GPL");
3724MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
3725MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");