blob: bd70441b4727f489c3358666c5b042e6faa01a79 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
12#include <linux/mfd/syscon.h>
13#include <linux/regmap.h>
14#include <linux/clk.h>
15#include <linux/pm_runtime.h>
16#include <linux/if_vlan.h>
17#include <linux/reset.h>
18#include <linux/tcp.h>
19#include <linux/interrupt.h>
20#include <linux/pinctrl/devinfo.h>
21#include <linux/phylink.h>
developera2613e62022-07-01 18:29:37 +080022#include <linux/gpio/consumer.h>
developerfd40db22021-04-29 10:08:25 +080023#include <net/dsa.h>
24
25#include "mtk_eth_soc.h"
26#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080027#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080028
29#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
30#include "mtk_hnat/nf_hnat_mtk.h"
31#endif
32
33static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080034atomic_t reset_lock = ATOMIC_INIT(0);
35atomic_t force = ATOMIC_INIT(0);
36
developerfd40db22021-04-29 10:08:25 +080037module_param_named(msg_level, mtk_msg_level, int, 0);
38MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080039DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080040
41#define MTK_ETHTOOL_STAT(x) { #x, \
42 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
43
44/* strings used by ethtool */
45static const struct mtk_ethtool_stats {
46 char str[ETH_GSTRING_LEN];
47 u32 offset;
48} mtk_ethtool_stats[] = {
49 MTK_ETHTOOL_STAT(tx_bytes),
50 MTK_ETHTOOL_STAT(tx_packets),
51 MTK_ETHTOOL_STAT(tx_skip),
52 MTK_ETHTOOL_STAT(tx_collisions),
53 MTK_ETHTOOL_STAT(rx_bytes),
54 MTK_ETHTOOL_STAT(rx_packets),
55 MTK_ETHTOOL_STAT(rx_overflow),
56 MTK_ETHTOOL_STAT(rx_fcs_errors),
57 MTK_ETHTOOL_STAT(rx_short_errors),
58 MTK_ETHTOOL_STAT(rx_long_errors),
59 MTK_ETHTOOL_STAT(rx_checksum_errors),
60 MTK_ETHTOOL_STAT(rx_flow_control_packets),
61};
62
63static const char * const mtk_clks_source_name[] = {
64 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
65 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
66 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
67 "sgmii_ck", "eth2pll", "wocpu0","wocpu1",
68};
69
70void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
71{
72 __raw_writel(val, eth->base + reg);
73}
74
75u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
76{
77 return __raw_readl(eth->base + reg);
78}
79
80u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
81{
82 u32 val;
83
84 val = mtk_r32(eth, reg);
85 val &= ~mask;
86 val |= set;
87 mtk_w32(eth, val, reg);
88 return reg;
89}
90
91static int mtk_mdio_busy_wait(struct mtk_eth *eth)
92{
93 unsigned long t_start = jiffies;
94
95 while (1) {
96 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
97 return 0;
98 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
99 break;
developerc4671b22021-05-28 13:16:42 +0800100 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800101 }
102
103 dev_err(eth->dev, "mdio: MDIO timeout\n");
104 return -1;
105}
106
developer599cda42022-05-24 15:13:31 +0800107u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
108 int phy_reg, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800109{
110 if (mtk_mdio_busy_wait(eth))
111 return -1;
112
113 write_data &= 0xffff;
114
developer599cda42022-05-24 15:13:31 +0800115 if (phy_reg & MII_ADDR_C45) {
116 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
117 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
118 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
119 MTK_PHY_IAC);
120
121 if (mtk_mdio_busy_wait(eth))
122 return -1;
123
124 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
125 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
126 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
127 MTK_PHY_IAC);
128 } else {
129 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
130 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
131 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
132 MTK_PHY_IAC);
133 }
developerfd40db22021-04-29 10:08:25 +0800134
135 if (mtk_mdio_busy_wait(eth))
136 return -1;
137
138 return 0;
139}
140
developer599cda42022-05-24 15:13:31 +0800141u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerfd40db22021-04-29 10:08:25 +0800142{
143 u32 d;
144
145 if (mtk_mdio_busy_wait(eth))
146 return 0xffff;
147
developer599cda42022-05-24 15:13:31 +0800148 if (phy_reg & MII_ADDR_C45) {
149 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
150 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
151 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
152 MTK_PHY_IAC);
153
154 if (mtk_mdio_busy_wait(eth))
155 return 0xffff;
156
157 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
158 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
159 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
160 MTK_PHY_IAC);
161 } else {
162 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
163 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
164 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
165 MTK_PHY_IAC);
166 }
developerfd40db22021-04-29 10:08:25 +0800167
168 if (mtk_mdio_busy_wait(eth))
169 return 0xffff;
170
171 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
172
173 return d;
174}
175
176static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
177 int phy_reg, u16 val)
178{
179 struct mtk_eth *eth = bus->priv;
180
181 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
182}
183
184static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
185{
186 struct mtk_eth *eth = bus->priv;
187
188 return _mtk_mdio_read(eth, phy_addr, phy_reg);
189}
190
developerfd40db22021-04-29 10:08:25 +0800191static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
192 phy_interface_t interface)
193{
194 u32 val;
195
196 /* Check DDR memory type.
197 * Currently TRGMII mode with DDR2 memory is not supported.
198 */
199 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
200 if (interface == PHY_INTERFACE_MODE_TRGMII &&
201 val & SYSCFG_DRAM_TYPE_DDR2) {
202 dev_err(eth->dev,
203 "TRGMII mode with DDR2 memory is not supported!\n");
204 return -EOPNOTSUPP;
205 }
206
207 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
208 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
209
210 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
211 ETHSYS_TRGMII_MT7621_MASK, val);
212
213 return 0;
214}
215
216static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
217 phy_interface_t interface, int speed)
218{
219 u32 val;
220 int ret;
221
222 if (interface == PHY_INTERFACE_MODE_TRGMII) {
223 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
224 val = 500000000;
225 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
226 if (ret)
227 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
228 return;
229 }
230
231 val = (speed == SPEED_1000) ?
232 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
233 mtk_w32(eth, val, INTF_MODE);
234
235 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
236 ETHSYS_TRGMII_CLK_SEL362_5,
237 ETHSYS_TRGMII_CLK_SEL362_5);
238
239 val = (speed == SPEED_1000) ? 250000000 : 500000000;
240 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
241 if (ret)
242 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
243
244 val = (speed == SPEED_1000) ?
245 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
246 mtk_w32(eth, val, TRGMII_RCK_CTRL);
247
248 val = (speed == SPEED_1000) ?
249 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
250 mtk_w32(eth, val, TRGMII_TCK_CTRL);
251}
252
253static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
254 const struct phylink_link_state *state)
255{
256 struct mtk_mac *mac = container_of(config, struct mtk_mac,
257 phylink_config);
258 struct mtk_eth *eth = mac->hw;
259 u32 mcr_cur, mcr_new, sid, i;
developerfb556ca2021-10-13 10:52:09 +0800260 int val, ge_mode, err=0;
developerfd40db22021-04-29 10:08:25 +0800261
262 /* MT76x8 has no hardware settings between for the MAC */
263 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
264 mac->interface != state->interface) {
265 /* Setup soc pin functions */
266 switch (state->interface) {
267 case PHY_INTERFACE_MODE_TRGMII:
268 if (mac->id)
269 goto err_phy;
270 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
271 MTK_GMAC1_TRGMII))
272 goto err_phy;
273 /* fall through */
274 case PHY_INTERFACE_MODE_RGMII_TXID:
275 case PHY_INTERFACE_MODE_RGMII_RXID:
276 case PHY_INTERFACE_MODE_RGMII_ID:
277 case PHY_INTERFACE_MODE_RGMII:
278 case PHY_INTERFACE_MODE_MII:
279 case PHY_INTERFACE_MODE_REVMII:
280 case PHY_INTERFACE_MODE_RMII:
281 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
282 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
283 if (err)
284 goto init_err;
285 }
286 break;
287 case PHY_INTERFACE_MODE_1000BASEX:
288 case PHY_INTERFACE_MODE_2500BASEX:
289 case PHY_INTERFACE_MODE_SGMII:
290 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
291 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
292 if (err)
293 goto init_err;
294 }
295 break;
296 case PHY_INTERFACE_MODE_GMII:
297 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
298 err = mtk_gmac_gephy_path_setup(eth, mac->id);
299 if (err)
300 goto init_err;
301 }
302 break;
303 default:
304 goto err_phy;
305 }
306
307 /* Setup clock for 1st gmac */
308 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
309 !phy_interface_mode_is_8023z(state->interface) &&
310 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
311 if (MTK_HAS_CAPS(mac->hw->soc->caps,
312 MTK_TRGMII_MT7621_CLK)) {
313 if (mt7621_gmac0_rgmii_adjust(mac->hw,
314 state->interface))
315 goto err_phy;
316 } else {
317 mtk_gmac0_rgmii_adjust(mac->hw,
318 state->interface,
319 state->speed);
320
321 /* mt7623_pad_clk_setup */
322 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
323 mtk_w32(mac->hw,
324 TD_DM_DRVP(8) | TD_DM_DRVN(8),
325 TRGMII_TD_ODT(i));
326
327 /* Assert/release MT7623 RXC reset */
328 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
329 TRGMII_RCK_CTRL);
330 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
331 }
332 }
333
334 ge_mode = 0;
335 switch (state->interface) {
336 case PHY_INTERFACE_MODE_MII:
337 case PHY_INTERFACE_MODE_GMII:
338 ge_mode = 1;
339 break;
340 case PHY_INTERFACE_MODE_REVMII:
341 ge_mode = 2;
342 break;
343 case PHY_INTERFACE_MODE_RMII:
344 if (mac->id)
345 goto err_phy;
346 ge_mode = 3;
347 break;
348 default:
349 break;
350 }
351
352 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800353 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800354 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
355 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
356 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
357 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800358 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800359
360 mac->interface = state->interface;
361 }
362
363 /* SGMII */
364 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
365 phy_interface_mode_is_8023z(state->interface)) {
366 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
367 * being setup done.
368 */
developerd82e8372022-02-09 15:00:09 +0800369 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800370 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
371
372 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
373 SYSCFG0_SGMII_MASK,
374 ~(u32)SYSCFG0_SGMII_MASK);
375
376 /* Decide how GMAC and SGMIISYS be mapped */
377 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
378 0 : mac->id;
379
380 /* Setup SGMIISYS with the determined property */
381 if (state->interface != PHY_INTERFACE_MODE_SGMII)
382 err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
383 state);
developer2fbee452022-08-12 13:58:20 +0800384 else
developerfd40db22021-04-29 10:08:25 +0800385 err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
386
developerd82e8372022-02-09 15:00:09 +0800387 if (err) {
388 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800389 goto init_err;
developerd82e8372022-02-09 15:00:09 +0800390 }
developerfd40db22021-04-29 10:08:25 +0800391
392 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
393 SYSCFG0_SGMII_MASK, val);
developerd82e8372022-02-09 15:00:09 +0800394 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800395 } else if (phylink_autoneg_inband(mode)) {
396 dev_err(eth->dev,
397 "In-band mode not supported in non SGMII mode!\n");
398 return;
399 }
400
401 /* Setup gmac */
402 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
403 mcr_new = mcr_cur;
404 mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
405 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
406 MAC_MCR_FORCE_RX_FC);
407 mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
408 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
409
410 switch (state->speed) {
411 case SPEED_2500:
412 case SPEED_1000:
413 mcr_new |= MAC_MCR_SPEED_1000;
414 break;
415 case SPEED_100:
416 mcr_new |= MAC_MCR_SPEED_100;
417 break;
418 }
419 if (state->duplex == DUPLEX_FULL) {
420 mcr_new |= MAC_MCR_FORCE_DPX;
421 if (state->pause & MLO_PAUSE_TX)
422 mcr_new |= MAC_MCR_FORCE_TX_FC;
423 if (state->pause & MLO_PAUSE_RX)
424 mcr_new |= MAC_MCR_FORCE_RX_FC;
425 }
426
427 /* Only update control register when needed! */
428 if (mcr_new != mcr_cur)
429 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
430
431 return;
432
433err_phy:
434 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
435 mac->id, phy_modes(state->interface));
436 return;
437
438init_err:
439 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
440 mac->id, phy_modes(state->interface), err);
441}
442
443static int mtk_mac_link_state(struct phylink_config *config,
444 struct phylink_link_state *state)
445{
446 struct mtk_mac *mac = container_of(config, struct mtk_mac,
447 phylink_config);
448 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
449
450 state->link = (pmsr & MAC_MSR_LINK);
451 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
452
453 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
454 case 0:
455 state->speed = SPEED_10;
456 break;
457 case MAC_MSR_SPEED_100:
458 state->speed = SPEED_100;
459 break;
460 case MAC_MSR_SPEED_1000:
461 state->speed = SPEED_1000;
462 break;
463 default:
464 state->speed = SPEED_UNKNOWN;
465 break;
466 }
467
468 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
469 if (pmsr & MAC_MSR_RX_FC)
470 state->pause |= MLO_PAUSE_RX;
471 if (pmsr & MAC_MSR_TX_FC)
472 state->pause |= MLO_PAUSE_TX;
473
474 return 1;
475}
476
477static void mtk_mac_an_restart(struct phylink_config *config)
478{
479 struct mtk_mac *mac = container_of(config, struct mtk_mac,
480 phylink_config);
481
482 mtk_sgmii_restart_an(mac->hw, mac->id);
483}
484
485static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
486 phy_interface_t interface)
487{
488 struct mtk_mac *mac = container_of(config, struct mtk_mac,
489 phylink_config);
490 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
491
492 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
493 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
494}
495
496static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
497 phy_interface_t interface,
498 struct phy_device *phy)
499{
500 struct mtk_mac *mac = container_of(config, struct mtk_mac,
501 phylink_config);
502 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
503
504 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
505 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
506}
507
508static void mtk_validate(struct phylink_config *config,
509 unsigned long *supported,
510 struct phylink_link_state *state)
511{
512 struct mtk_mac *mac = container_of(config, struct mtk_mac,
513 phylink_config);
514 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
515
516 if (state->interface != PHY_INTERFACE_MODE_NA &&
517 state->interface != PHY_INTERFACE_MODE_MII &&
518 state->interface != PHY_INTERFACE_MODE_GMII &&
519 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
520 phy_interface_mode_is_rgmii(state->interface)) &&
521 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
522 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
523 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
524 (state->interface == PHY_INTERFACE_MODE_SGMII ||
525 phy_interface_mode_is_8023z(state->interface)))) {
526 linkmode_zero(supported);
527 return;
528 }
529
530 phylink_set_port_modes(mask);
531 phylink_set(mask, Autoneg);
532
533 switch (state->interface) {
534 case PHY_INTERFACE_MODE_TRGMII:
535 phylink_set(mask, 1000baseT_Full);
536 break;
537 case PHY_INTERFACE_MODE_1000BASEX:
538 case PHY_INTERFACE_MODE_2500BASEX:
539 phylink_set(mask, 1000baseX_Full);
540 phylink_set(mask, 2500baseX_Full);
developer2fbee452022-08-12 13:58:20 +0800541 phylink_set(mask, 2500baseT_Full);
542 /* fall through; */
developerfd40db22021-04-29 10:08:25 +0800543 case PHY_INTERFACE_MODE_GMII:
544 case PHY_INTERFACE_MODE_RGMII:
545 case PHY_INTERFACE_MODE_RGMII_ID:
546 case PHY_INTERFACE_MODE_RGMII_RXID:
547 case PHY_INTERFACE_MODE_RGMII_TXID:
548 phylink_set(mask, 1000baseT_Half);
549 /* fall through */
550 case PHY_INTERFACE_MODE_SGMII:
551 phylink_set(mask, 1000baseT_Full);
552 phylink_set(mask, 1000baseX_Full);
553 /* fall through */
554 case PHY_INTERFACE_MODE_MII:
555 case PHY_INTERFACE_MODE_RMII:
556 case PHY_INTERFACE_MODE_REVMII:
557 case PHY_INTERFACE_MODE_NA:
558 default:
559 phylink_set(mask, 10baseT_Half);
560 phylink_set(mask, 10baseT_Full);
561 phylink_set(mask, 100baseT_Half);
562 phylink_set(mask, 100baseT_Full);
563 break;
564 }
565
566 if (state->interface == PHY_INTERFACE_MODE_NA) {
567 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
568 phylink_set(mask, 1000baseT_Full);
569 phylink_set(mask, 1000baseX_Full);
570 phylink_set(mask, 2500baseX_Full);
571 }
572 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
573 phylink_set(mask, 1000baseT_Full);
574 phylink_set(mask, 1000baseT_Half);
575 phylink_set(mask, 1000baseX_Full);
576 }
577 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
578 phylink_set(mask, 1000baseT_Full);
579 phylink_set(mask, 1000baseT_Half);
580 }
581 }
582
583 phylink_set(mask, Pause);
584 phylink_set(mask, Asym_Pause);
585
586 linkmode_and(supported, supported, mask);
587 linkmode_and(state->advertising, state->advertising, mask);
588
589 /* We can only operate at 2500BaseX or 1000BaseX. If requested
590 * to advertise both, only report advertising at 2500BaseX.
591 */
592 phylink_helper_basex_speed(state);
593}
594
595static const struct phylink_mac_ops mtk_phylink_ops = {
596 .validate = mtk_validate,
597 .mac_link_state = mtk_mac_link_state,
598 .mac_an_restart = mtk_mac_an_restart,
599 .mac_config = mtk_mac_config,
600 .mac_link_down = mtk_mac_link_down,
601 .mac_link_up = mtk_mac_link_up,
602};
603
604static int mtk_mdio_init(struct mtk_eth *eth)
605{
606 struct device_node *mii_np;
607 int ret;
608
609 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
610 if (!mii_np) {
611 dev_err(eth->dev, "no %s child node found", "mdio-bus");
612 return -ENODEV;
613 }
614
615 if (!of_device_is_available(mii_np)) {
616 ret = -ENODEV;
617 goto err_put_node;
618 }
619
620 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
621 if (!eth->mii_bus) {
622 ret = -ENOMEM;
623 goto err_put_node;
624 }
625
626 eth->mii_bus->name = "mdio";
627 eth->mii_bus->read = mtk_mdio_read;
628 eth->mii_bus->write = mtk_mdio_write;
629 eth->mii_bus->priv = eth;
630 eth->mii_bus->parent = eth->dev;
631
developer6fd46562021-10-14 15:04:34 +0800632 if(snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
developerfb556ca2021-10-13 10:52:09 +0800633 ret = -ENOMEM;
634 goto err_put_node;
635 }
developerfd40db22021-04-29 10:08:25 +0800636 ret = of_mdiobus_register(eth->mii_bus, mii_np);
637
638err_put_node:
639 of_node_put(mii_np);
640 return ret;
641}
642
643static void mtk_mdio_cleanup(struct mtk_eth *eth)
644{
645 if (!eth->mii_bus)
646 return;
647
648 mdiobus_unregister(eth->mii_bus);
649}
650
651static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
652{
653 unsigned long flags;
654 u32 val;
655
656 spin_lock_irqsave(&eth->tx_irq_lock, flags);
657 val = mtk_r32(eth, eth->tx_int_mask_reg);
658 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
659 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
660}
661
662static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
663{
664 unsigned long flags;
665 u32 val;
666
667 spin_lock_irqsave(&eth->tx_irq_lock, flags);
668 val = mtk_r32(eth, eth->tx_int_mask_reg);
669 mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
670 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
671}
672
673static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
674{
675 unsigned long flags;
676 u32 val;
677
678 spin_lock_irqsave(&eth->rx_irq_lock, flags);
679 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
680 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
681 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
682}
683
684static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
685{
686 unsigned long flags;
687 u32 val;
688
689 spin_lock_irqsave(&eth->rx_irq_lock, flags);
690 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
691 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
692 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
693}
694
695static int mtk_set_mac_address(struct net_device *dev, void *p)
696{
697 int ret = eth_mac_addr(dev, p);
698 struct mtk_mac *mac = netdev_priv(dev);
699 struct mtk_eth *eth = mac->hw;
700 const char *macaddr = dev->dev_addr;
701
702 if (ret)
703 return ret;
704
705 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
706 return -EBUSY;
707
708 spin_lock_bh(&mac->hw->page_lock);
709 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
710 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
711 MT7628_SDM_MAC_ADRH);
712 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
713 (macaddr[4] << 8) | macaddr[5],
714 MT7628_SDM_MAC_ADRL);
715 } else {
716 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
717 MTK_GDMA_MAC_ADRH(mac->id));
718 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
719 (macaddr[4] << 8) | macaddr[5],
720 MTK_GDMA_MAC_ADRL(mac->id));
721 }
722 spin_unlock_bh(&mac->hw->page_lock);
723
724 return 0;
725}
726
727void mtk_stats_update_mac(struct mtk_mac *mac)
728{
729 struct mtk_hw_stats *hw_stats = mac->hw_stats;
730 unsigned int base = MTK_GDM1_TX_GBCNT;
731 u64 stats;
732
733 base += hw_stats->reg_offset;
734
735 u64_stats_update_begin(&hw_stats->syncp);
736
737 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
738 stats = mtk_r32(mac->hw, base + 0x04);
739 if (stats)
740 hw_stats->rx_bytes += (stats << 32);
741 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
742 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
743 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
744 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
745 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
746 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
747 hw_stats->rx_flow_control_packets +=
748 mtk_r32(mac->hw, base + 0x24);
749 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
750 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
751 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
752 stats = mtk_r32(mac->hw, base + 0x34);
753 if (stats)
754 hw_stats->tx_bytes += (stats << 32);
755 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
756 u64_stats_update_end(&hw_stats->syncp);
757}
758
759static void mtk_stats_update(struct mtk_eth *eth)
760{
761 int i;
762
763 for (i = 0; i < MTK_MAC_COUNT; i++) {
764 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
765 continue;
766 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
767 mtk_stats_update_mac(eth->mac[i]);
768 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
769 }
770 }
771}
772
773static void mtk_get_stats64(struct net_device *dev,
774 struct rtnl_link_stats64 *storage)
775{
776 struct mtk_mac *mac = netdev_priv(dev);
777 struct mtk_hw_stats *hw_stats = mac->hw_stats;
778 unsigned int start;
779
780 if (netif_running(dev) && netif_device_present(dev)) {
781 if (spin_trylock_bh(&hw_stats->stats_lock)) {
782 mtk_stats_update_mac(mac);
783 spin_unlock_bh(&hw_stats->stats_lock);
784 }
785 }
786
787 do {
788 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
789 storage->rx_packets = hw_stats->rx_packets;
790 storage->tx_packets = hw_stats->tx_packets;
791 storage->rx_bytes = hw_stats->rx_bytes;
792 storage->tx_bytes = hw_stats->tx_bytes;
793 storage->collisions = hw_stats->tx_collisions;
794 storage->rx_length_errors = hw_stats->rx_short_errors +
795 hw_stats->rx_long_errors;
796 storage->rx_over_errors = hw_stats->rx_overflow;
797 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
798 storage->rx_errors = hw_stats->rx_checksum_errors;
799 storage->tx_aborted_errors = hw_stats->tx_skip;
800 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
801
802 storage->tx_errors = dev->stats.tx_errors;
803 storage->rx_dropped = dev->stats.rx_dropped;
804 storage->tx_dropped = dev->stats.tx_dropped;
805}
806
807static inline int mtk_max_frag_size(int mtu)
808{
809 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
810 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
811 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
812
813 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
814 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
815}
816
817static inline int mtk_max_buf_size(int frag_size)
818{
819 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
820 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
821
822 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
823
824 return buf_size;
825}
826
developere9356982022-07-04 09:03:20 +0800827static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
828 struct mtk_rx_dma_v2 *dma_rxd)
developerfd40db22021-04-29 10:08:25 +0800829{
developerfd40db22021-04-29 10:08:25 +0800830 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +0800831 if (!(rxd->rxd2 & RX_DMA_DONE))
832 return false;
833
834 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +0800835 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
836 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developere9356982022-07-04 09:03:20 +0800837
838 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
839 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
840 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
841 }
842
developerc4671b22021-05-28 13:16:42 +0800843 return true;
developerfd40db22021-04-29 10:08:25 +0800844}
845
846/* the qdma core needs scratch memory to be setup */
847static int mtk_init_fq_dma(struct mtk_eth *eth)
848{
developere9356982022-07-04 09:03:20 +0800849 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +0800850 dma_addr_t phy_ring_tail;
851 int cnt = MTK_DMA_SIZE;
852 dma_addr_t dma_addr;
853 int i;
854
855 if (!eth->soc->has_sram) {
856 eth->scratch_ring = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +0800857 cnt * soc->txrx.txd_size,
developerfd40db22021-04-29 10:08:25 +0800858 &eth->phy_scratch_ring,
developere9356982022-07-04 09:03:20 +0800859 GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +0800860 } else {
861 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
862 }
863
864 if (unlikely(!eth->scratch_ring))
865 return -ENOMEM;
866
developere9356982022-07-04 09:03:20 +0800867 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +0800868 if (unlikely(!eth->scratch_head))
869 return -ENOMEM;
870
871 dma_addr = dma_map_single(eth->dev,
872 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
873 DMA_FROM_DEVICE);
874 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
875 return -ENOMEM;
876
developere9356982022-07-04 09:03:20 +0800877 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
developerfd40db22021-04-29 10:08:25 +0800878
879 for (i = 0; i < cnt; i++) {
developere9356982022-07-04 09:03:20 +0800880 struct mtk_tx_dma_v2 *txd;
881
882 txd = eth->scratch_ring + i * soc->txrx.txd_size;
883 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerfd40db22021-04-29 10:08:25 +0800884 if (i < cnt - 1)
developere9356982022-07-04 09:03:20 +0800885 txd->txd2 = eth->phy_scratch_ring +
886 (i + 1) * soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +0800887
developere9356982022-07-04 09:03:20 +0800888 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
889 txd->txd4 = 0;
890
891 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
892 txd->txd5 = 0;
893 txd->txd6 = 0;
894 txd->txd7 = 0;
895 txd->txd8 = 0;
developerfd40db22021-04-29 10:08:25 +0800896 }
developerfd40db22021-04-29 10:08:25 +0800897 }
898
899 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
900 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
901 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
902 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
903
904 return 0;
905}
906
907static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
908{
developere9356982022-07-04 09:03:20 +0800909 return ring->dma + (desc - ring->phys);
developerfd40db22021-04-29 10:08:25 +0800910}
911
912static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +0800913 void *txd, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +0800914{
developere9356982022-07-04 09:03:20 +0800915 int idx = (txd - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +0800916
917 return &ring->buf[idx];
918}
919
920static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +0800921 void *dma)
developerfd40db22021-04-29 10:08:25 +0800922{
923 return ring->dma_pdma - ring->dma + dma;
924}
925
developere9356982022-07-04 09:03:20 +0800926static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +0800927{
developere9356982022-07-04 09:03:20 +0800928 return (dma - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +0800929}
930
developerc4671b22021-05-28 13:16:42 +0800931static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
932 bool napi)
developerfd40db22021-04-29 10:08:25 +0800933{
934 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
935 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
936 dma_unmap_single(eth->dev,
937 dma_unmap_addr(tx_buf, dma_addr0),
938 dma_unmap_len(tx_buf, dma_len0),
939 DMA_TO_DEVICE);
940 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
941 dma_unmap_page(eth->dev,
942 dma_unmap_addr(tx_buf, dma_addr0),
943 dma_unmap_len(tx_buf, dma_len0),
944 DMA_TO_DEVICE);
945 }
946 } else {
947 if (dma_unmap_len(tx_buf, dma_len0)) {
948 dma_unmap_page(eth->dev,
949 dma_unmap_addr(tx_buf, dma_addr0),
950 dma_unmap_len(tx_buf, dma_len0),
951 DMA_TO_DEVICE);
952 }
953
954 if (dma_unmap_len(tx_buf, dma_len1)) {
955 dma_unmap_page(eth->dev,
956 dma_unmap_addr(tx_buf, dma_addr1),
957 dma_unmap_len(tx_buf, dma_len1),
958 DMA_TO_DEVICE);
959 }
960 }
961
962 tx_buf->flags = 0;
963 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +0800964 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
965 if (napi)
966 napi_consume_skb(tx_buf->skb, napi);
967 else
968 dev_kfree_skb_any(tx_buf->skb);
969 }
developerfd40db22021-04-29 10:08:25 +0800970 tx_buf->skb = NULL;
971}
972
973static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
974 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
975 size_t size, int idx)
976{
977 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
978 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
979 dma_unmap_len_set(tx_buf, dma_len0, size);
980 } else {
981 if (idx & 1) {
982 txd->txd3 = mapped_addr;
983 txd->txd2 |= TX_DMA_PLEN1(size);
984 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
985 dma_unmap_len_set(tx_buf, dma_len1, size);
986 } else {
987 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
988 txd->txd1 = mapped_addr;
989 txd->txd2 = TX_DMA_PLEN0(size);
990 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
991 dma_unmap_len_set(tx_buf, dma_len0, size);
992 }
993 }
994}
995
developere9356982022-07-04 09:03:20 +0800996static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
997 struct mtk_tx_dma_desc_info *info)
998{
999 struct mtk_mac *mac = netdev_priv(dev);
1000 struct mtk_eth *eth = mac->hw;
1001 struct mtk_tx_dma *desc = txd;
1002 u32 data;
1003
1004 WRITE_ONCE(desc->txd1, info->addr);
1005
1006 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1007 if (info->last)
1008 data |= TX_DMA_LS0;
1009 WRITE_ONCE(desc->txd3, data);
1010
1011 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1012 data |= QID_HIGH_BITS(info->qid);
1013 if (info->first) {
1014 if (info->gso)
1015 data |= TX_DMA_TSO;
1016 /* tx checksum offload */
1017 if (info->csum)
1018 data |= TX_DMA_CHKSUM;
1019 /* vlan header offload */
1020 if (info->vlan)
1021 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1022 }
1023
1024#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1025 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1026 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1027 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1028 }
1029
1030 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1031 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1032#endif
1033 WRITE_ONCE(desc->txd4, data);
1034}
1035
1036static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1037 struct mtk_tx_dma_desc_info *info)
1038{
1039 struct mtk_mac *mac = netdev_priv(dev);
1040 struct mtk_eth *eth = mac->hw;
1041 struct mtk_tx_dma_v2 *desc = txd;
1042 u32 data = 0;
1043 u16 qid;
1044
1045 if(!info->qid && mac->id)
1046 qid = MTK_QDMA_GMAC2_QID;
1047
1048 WRITE_ONCE(desc->txd1, info->addr);
1049
1050 data = TX_DMA_PLEN0(info->size);
1051 if (info->last)
1052 data |= TX_DMA_LS0;
1053 WRITE_ONCE(desc->txd3, data);
1054
1055 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1056 data |= TX_DMA_SWC_V2 | QID_BITS_V2(qid);
1057#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1058 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1059 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1060 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1061 }
1062
1063 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1064 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1065#endif
1066 WRITE_ONCE(desc->txd4, data);
1067
1068 data = 0;
1069 if (info->first) {
1070 if (info->gso)
1071 data |= TX_DMA_TSO_V2;
1072 /* tx checksum offload */
1073 if (info->csum)
1074 data |= TX_DMA_CHKSUM_V2;
1075 }
1076 WRITE_ONCE(desc->txd5, data);
1077
1078 data = 0;
1079 if (info->first && info->vlan)
1080 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1081 WRITE_ONCE(desc->txd6, data);
1082
1083 WRITE_ONCE(desc->txd7, 0);
1084 WRITE_ONCE(desc->txd8, 0);
1085}
1086
1087static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
1088 struct mtk_tx_dma_desc_info *info)
1089{
1090 struct mtk_mac *mac = netdev_priv(dev);
1091 struct mtk_eth *eth = mac->hw;
1092
1093 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1094 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
1095 else
1096 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
1097}
1098
developerfd40db22021-04-29 10:08:25 +08001099static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1100 int tx_num, struct mtk_tx_ring *ring, bool gso)
1101{
developere9356982022-07-04 09:03:20 +08001102 struct mtk_tx_dma_desc_info txd_info = {
1103 .size = skb_headlen(skb),
1104 .qid = skb->mark & MTK_QDMA_TX_MASK,
1105 .gso = gso,
1106 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1107 .vlan = skb_vlan_tag_present(skb),
1108 .vlan_tci = skb_vlan_tag_get(skb),
1109 .first = true,
1110 .last = !skb_is_nonlinear(skb),
1111 };
developerfd40db22021-04-29 10:08:25 +08001112 struct mtk_mac *mac = netdev_priv(dev);
1113 struct mtk_eth *eth = mac->hw;
developere9356982022-07-04 09:03:20 +08001114 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001115 struct mtk_tx_dma *itxd, *txd;
1116 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1117 struct mtk_tx_buf *itx_buf, *tx_buf;
developerfd40db22021-04-29 10:08:25 +08001118 int i, n_desc = 1;
developerfd40db22021-04-29 10:08:25 +08001119 int k = 0;
1120
1121 itxd = ring->next_free;
1122 itxd_pdma = qdma_to_pdma(ring, itxd);
1123 if (itxd == ring->last_free)
1124 return -ENOMEM;
1125
developere9356982022-07-04 09:03:20 +08001126 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001127 memset(itx_buf, 0, sizeof(*itx_buf));
1128
developere9356982022-07-04 09:03:20 +08001129 txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
1130 DMA_TO_DEVICE);
1131 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
developerfd40db22021-04-29 10:08:25 +08001132 return -ENOMEM;
1133
developere9356982022-07-04 09:03:20 +08001134 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
1135
developerfd40db22021-04-29 10:08:25 +08001136 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1137 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1138 MTK_TX_FLAGS_FPORT1;
developere9356982022-07-04 09:03:20 +08001139 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerfd40db22021-04-29 10:08:25 +08001140 k++);
1141
developerfd40db22021-04-29 10:08:25 +08001142 /* TX SG offload */
1143 txd = itxd;
1144 txd_pdma = qdma_to_pdma(ring, txd);
1145
developere9356982022-07-04 09:03:20 +08001146 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerfd40db22021-04-29 10:08:25 +08001147 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1148 unsigned int offset = 0;
1149 int frag_size = skb_frag_size(frag);
1150
1151 while (frag_size) {
developerfd40db22021-04-29 10:08:25 +08001152 bool new_desc = true;
1153
developere9356982022-07-04 09:03:20 +08001154 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerfd40db22021-04-29 10:08:25 +08001155 (i & 0x1)) {
1156 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1157 txd_pdma = qdma_to_pdma(ring, txd);
1158 if (txd == ring->last_free)
1159 goto err_dma;
1160
1161 n_desc++;
1162 } else {
1163 new_desc = false;
1164 }
1165
developere9356982022-07-04 09:03:20 +08001166 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1167 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1168 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1169 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1170 !(frag_size - txd_info.size);
1171 txd_info.addr = skb_frag_dma_map(eth->dev, frag,
1172 offset, txd_info.size,
1173 DMA_TO_DEVICE);
1174 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
1175 goto err_dma;
developerfd40db22021-04-29 10:08:25 +08001176
developere9356982022-07-04 09:03:20 +08001177 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerfd40db22021-04-29 10:08:25 +08001178
developere9356982022-07-04 09:03:20 +08001179 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001180 if (new_desc)
1181 memset(tx_buf, 0, sizeof(*tx_buf));
1182 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1183 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1184 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1185 MTK_TX_FLAGS_FPORT1;
1186
developere9356982022-07-04 09:03:20 +08001187 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1188 txd_info.size, k++);
developerfd40db22021-04-29 10:08:25 +08001189
developere9356982022-07-04 09:03:20 +08001190 frag_size -= txd_info.size;
1191 offset += txd_info.size;
developerfd40db22021-04-29 10:08:25 +08001192 }
1193 }
1194
1195 /* store skb to cleanup */
1196 itx_buf->skb = skb;
1197
developere9356982022-07-04 09:03:20 +08001198 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001199 if (k & 0x1)
1200 txd_pdma->txd2 |= TX_DMA_LS0;
1201 else
1202 txd_pdma->txd2 |= TX_DMA_LS1;
1203 }
1204
1205 netdev_sent_queue(dev, skb->len);
1206 skb_tx_timestamp(skb);
1207
1208 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1209 atomic_sub(n_desc, &ring->free_count);
1210
1211 /* make sure that all changes to the dma ring are flushed before we
1212 * continue
1213 */
1214 wmb();
1215
developere9356982022-07-04 09:03:20 +08001216 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001217 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1218 !netdev_xmit_more())
1219 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1220 } else {
developere9356982022-07-04 09:03:20 +08001221 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
developerfd40db22021-04-29 10:08:25 +08001222 ring->dma_size);
1223 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1224 }
1225
1226 return 0;
1227
1228err_dma:
1229 do {
developere9356982022-07-04 09:03:20 +08001230 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001231
1232 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08001233 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08001234
1235 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
developere9356982022-07-04 09:03:20 +08001236 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
developerfd40db22021-04-29 10:08:25 +08001237 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1238
1239 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1240 itxd_pdma = qdma_to_pdma(ring, itxd);
1241 } while (itxd != txd);
1242
1243 return -ENOMEM;
1244}
1245
1246static inline int mtk_cal_txd_req(struct sk_buff *skb)
1247{
1248 int i, nfrags;
1249 skb_frag_t *frag;
1250
1251 nfrags = 1;
1252 if (skb_is_gso(skb)) {
1253 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1254 frag = &skb_shinfo(skb)->frags[i];
1255 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1256 MTK_TX_DMA_BUF_LEN);
1257 }
1258 } else {
1259 nfrags += skb_shinfo(skb)->nr_frags;
1260 }
1261
1262 return nfrags;
1263}
1264
1265static int mtk_queue_stopped(struct mtk_eth *eth)
1266{
1267 int i;
1268
1269 for (i = 0; i < MTK_MAC_COUNT; i++) {
1270 if (!eth->netdev[i])
1271 continue;
1272 if (netif_queue_stopped(eth->netdev[i]))
1273 return 1;
1274 }
1275
1276 return 0;
1277}
1278
1279static void mtk_wake_queue(struct mtk_eth *eth)
1280{
1281 int i;
1282
1283 for (i = 0; i < MTK_MAC_COUNT; i++) {
1284 if (!eth->netdev[i])
1285 continue;
1286 netif_wake_queue(eth->netdev[i]);
1287 }
1288}
1289
1290static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1291{
1292 struct mtk_mac *mac = netdev_priv(dev);
1293 struct mtk_eth *eth = mac->hw;
1294 struct mtk_tx_ring *ring = &eth->tx_ring;
1295 struct net_device_stats *stats = &dev->stats;
1296 bool gso = false;
1297 int tx_num;
1298
1299 /* normally we can rely on the stack not calling this more than once,
1300 * however we have 2 queues running on the same ring so we need to lock
1301 * the ring access
1302 */
1303 spin_lock(&eth->page_lock);
1304
1305 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1306 goto drop;
1307
1308 tx_num = mtk_cal_txd_req(skb);
1309 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1310 netif_stop_queue(dev);
1311 netif_err(eth, tx_queued, dev,
1312 "Tx Ring full when queue awake!\n");
1313 spin_unlock(&eth->page_lock);
1314 return NETDEV_TX_BUSY;
1315 }
1316
1317 /* TSO: fill MSS info in tcp checksum field */
1318 if (skb_is_gso(skb)) {
1319 if (skb_cow_head(skb, 0)) {
1320 netif_warn(eth, tx_err, dev,
1321 "GSO expand head fail.\n");
1322 goto drop;
1323 }
1324
1325 if (skb_shinfo(skb)->gso_type &
1326 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1327 gso = true;
1328 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1329 }
1330 }
1331
1332 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1333 goto drop;
1334
1335 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1336 netif_stop_queue(dev);
1337
1338 spin_unlock(&eth->page_lock);
1339
1340 return NETDEV_TX_OK;
1341
1342drop:
1343 spin_unlock(&eth->page_lock);
1344 stats->tx_dropped++;
1345 dev_kfree_skb_any(skb);
1346 return NETDEV_TX_OK;
1347}
1348
1349static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1350{
1351 int i;
1352 struct mtk_rx_ring *ring;
1353 int idx;
1354
developerfd40db22021-04-29 10:08:25 +08001355 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
developere9356982022-07-04 09:03:20 +08001356 struct mtk_rx_dma *rxd;
1357
developer77d03a72021-06-06 00:06:00 +08001358 if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i))
1359 continue;
1360
developerfd40db22021-04-29 10:08:25 +08001361 ring = &eth->rx_ring[i];
1362 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001363 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1364 if (rxd->rxd2 & RX_DMA_DONE) {
developerfd40db22021-04-29 10:08:25 +08001365 ring->calc_idx_update = true;
1366 return ring;
1367 }
1368 }
1369
1370 return NULL;
1371}
1372
developer18f46a82021-07-20 21:08:21 +08001373static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08001374{
developerfd40db22021-04-29 10:08:25 +08001375 int i;
1376
developerfb556ca2021-10-13 10:52:09 +08001377 if (!eth->hwlro)
developerfd40db22021-04-29 10:08:25 +08001378 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerfb556ca2021-10-13 10:52:09 +08001379 else {
developerfd40db22021-04-29 10:08:25 +08001380 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1381 ring = &eth->rx_ring[i];
1382 if (ring->calc_idx_update) {
1383 ring->calc_idx_update = false;
1384 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1385 }
1386 }
1387 }
1388}
1389
1390static int mtk_poll_rx(struct napi_struct *napi, int budget,
1391 struct mtk_eth *eth)
1392{
developer18f46a82021-07-20 21:08:21 +08001393 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1394 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001395 int idx;
1396 struct sk_buff *skb;
1397 u8 *data, *new_data;
developere9356982022-07-04 09:03:20 +08001398 struct mtk_rx_dma_v2 *rxd, trxd;
developerfd40db22021-04-29 10:08:25 +08001399 int done = 0;
1400
developer18f46a82021-07-20 21:08:21 +08001401 if (unlikely(!ring))
1402 goto rx_done;
1403
developerfd40db22021-04-29 10:08:25 +08001404 while (done < budget) {
1405 struct net_device *netdev;
1406 unsigned int pktlen;
1407 dma_addr_t dma_addr;
developere9356982022-07-04 09:03:20 +08001408 int mac = 0;
developerfd40db22021-04-29 10:08:25 +08001409
developer18f46a82021-07-20 21:08:21 +08001410 if (eth->hwlro)
1411 ring = mtk_get_rx_ring(eth);
1412
developerfd40db22021-04-29 10:08:25 +08001413 if (unlikely(!ring))
1414 goto rx_done;
1415
1416 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001417 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
developerfd40db22021-04-29 10:08:25 +08001418 data = ring->data[idx];
1419
developere9356982022-07-04 09:03:20 +08001420 if (!mtk_rx_get_desc(eth, &trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08001421 break;
1422
1423 /* find out which mac the packet come from. values start at 1 */
1424 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1425 mac = 0;
1426 } else {
developera2bdbd52021-05-31 19:10:17 +08001427 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developere9356982022-07-04 09:03:20 +08001428 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
developerfd40db22021-04-29 10:08:25 +08001429 else
developerfd40db22021-04-29 10:08:25 +08001430 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
1431 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1432 }
1433
1434 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1435 !eth->netdev[mac]))
1436 goto release_desc;
1437
1438 netdev = eth->netdev[mac];
1439
1440 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1441 goto release_desc;
1442
1443 /* alloc new buffer */
1444 new_data = napi_alloc_frag(ring->frag_size);
1445 if (unlikely(!new_data)) {
1446 netdev->stats.rx_dropped++;
1447 goto release_desc;
1448 }
1449 dma_addr = dma_map_single(eth->dev,
1450 new_data + NET_SKB_PAD +
1451 eth->ip_align,
1452 ring->buf_size,
1453 DMA_FROM_DEVICE);
1454 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1455 skb_free_frag(new_data);
1456 netdev->stats.rx_dropped++;
1457 goto release_desc;
1458 }
1459
developerc4671b22021-05-28 13:16:42 +08001460 dma_unmap_single(eth->dev, trxd.rxd1,
1461 ring->buf_size, DMA_FROM_DEVICE);
1462
developerfd40db22021-04-29 10:08:25 +08001463 /* receive data */
1464 skb = build_skb(data, ring->frag_size);
1465 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08001466 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08001467 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08001468 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08001469 }
1470 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1471
developerfd40db22021-04-29 10:08:25 +08001472 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1473 skb->dev = netdev;
1474 skb_put(skb, pktlen);
1475
developera2bdbd52021-05-31 19:10:17 +08001476 if ((!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
developerfd40db22021-04-29 10:08:25 +08001477 (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
developera2bdbd52021-05-31 19:10:17 +08001478 (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
developerfd40db22021-04-29 10:08:25 +08001479 (trxd.rxd3 & eth->rx_dma_l4_valid)))
1480 skb->ip_summed = CHECKSUM_UNNECESSARY;
1481 else
1482 skb_checksum_none_assert(skb);
1483 skb->protocol = eth_type_trans(skb, netdev);
1484
1485 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developera2bdbd52021-05-31 19:10:17 +08001486 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developer255bba22021-07-27 15:16:33 +08001487 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08001488 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08001489 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08001490 RX_DMA_VID_V2(trxd.rxd4));
1491 } else {
1492 if (trxd.rxd2 & RX_DMA_VTAG)
1493 __vlan_hwaccel_put_tag(skb,
1494 htons(RX_DMA_VPID(trxd.rxd3)),
1495 RX_DMA_VID(trxd.rxd3));
1496 }
1497
1498 /* If netdev is attached to dsa switch, the special
1499 * tag inserted in VLAN field by switch hardware can
1500 * be offload by RX HW VLAN offload. Clears the VLAN
1501 * information from @skb to avoid unexpected 8021d
1502 * handler before packet enter dsa framework.
1503 */
1504 if (netdev_uses_dsa(netdev))
1505 __vlan_hwaccel_clear_tag(skb);
1506 }
1507
1508#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developera2bdbd52021-05-31 19:10:17 +08001509 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08001510 *(u32 *)(skb->head) = trxd.rxd5;
1511 else
developerfd40db22021-04-29 10:08:25 +08001512 *(u32 *)(skb->head) = trxd.rxd4;
1513
1514 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001515 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001516 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1517
1518 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
1519 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
1520 __func__, skb_hnat_reason(skb));
1521 skb->pkt_type = PACKET_HOST;
1522 }
1523
1524 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
1525 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
1526 skb_hnat_reason(skb), skb_hnat_alg(skb));
1527#endif
developer77d03a72021-06-06 00:06:00 +08001528 if (mtk_hwlro_stats_ebl &&
1529 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
1530 hw_lro_stats_update(ring->ring_no, &trxd);
1531 hw_lro_flush_stats_update(ring->ring_no, &trxd);
1532 }
developerfd40db22021-04-29 10:08:25 +08001533
1534 skb_record_rx_queue(skb, 0);
1535 napi_gro_receive(napi, skb);
1536
developerc4671b22021-05-28 13:16:42 +08001537skip_rx:
developerfd40db22021-04-29 10:08:25 +08001538 ring->data[idx] = new_data;
1539 rxd->rxd1 = (unsigned int)dma_addr;
1540
1541release_desc:
1542 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1543 rxd->rxd2 = RX_DMA_LSO;
1544 else
1545 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1546
1547 ring->calc_idx = idx;
1548
1549 done++;
1550 }
1551
1552rx_done:
1553 if (done) {
1554 /* make sure that all changes to the dma ring are flushed before
1555 * we continue
1556 */
1557 wmb();
developer18f46a82021-07-20 21:08:21 +08001558 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08001559 }
1560
1561 return done;
1562}
1563
developerfb556ca2021-10-13 10:52:09 +08001564static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001565 unsigned int *done, unsigned int *bytes)
1566{
developere9356982022-07-04 09:03:20 +08001567 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001568 struct mtk_tx_ring *ring = &eth->tx_ring;
1569 struct mtk_tx_dma *desc;
1570 struct sk_buff *skb;
1571 struct mtk_tx_buf *tx_buf;
1572 u32 cpu, dma;
1573
developerc4671b22021-05-28 13:16:42 +08001574 cpu = ring->last_free_ptr;
developerfd40db22021-04-29 10:08:25 +08001575 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1576
1577 desc = mtk_qdma_phys_to_virt(ring, cpu);
1578
1579 while ((cpu != dma) && budget) {
1580 u32 next_cpu = desc->txd2;
1581 int mac = 0;
1582
1583 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1584 break;
1585
1586 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1587
developere9356982022-07-04 09:03:20 +08001588 tx_buf = mtk_desc_to_tx_buf(ring, desc, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001589 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1590 mac = 1;
1591
1592 skb = tx_buf->skb;
1593 if (!skb)
1594 break;
1595
1596 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1597 bytes[mac] += skb->len;
1598 done[mac]++;
1599 budget--;
1600 }
developerc4671b22021-05-28 13:16:42 +08001601 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001602
1603 ring->last_free = desc;
1604 atomic_inc(&ring->free_count);
1605
1606 cpu = next_cpu;
1607 }
1608
developerc4671b22021-05-28 13:16:42 +08001609 ring->last_free_ptr = cpu;
developerfd40db22021-04-29 10:08:25 +08001610 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001611}
1612
developerfb556ca2021-10-13 10:52:09 +08001613static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001614 unsigned int *done, unsigned int *bytes)
1615{
1616 struct mtk_tx_ring *ring = &eth->tx_ring;
1617 struct mtk_tx_dma *desc;
1618 struct sk_buff *skb;
1619 struct mtk_tx_buf *tx_buf;
1620 u32 cpu, dma;
1621
1622 cpu = ring->cpu_idx;
1623 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1624
1625 while ((cpu != dma) && budget) {
1626 tx_buf = &ring->buf[cpu];
1627 skb = tx_buf->skb;
1628 if (!skb)
1629 break;
1630
1631 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1632 bytes[0] += skb->len;
1633 done[0]++;
1634 budget--;
1635 }
1636
developerc4671b22021-05-28 13:16:42 +08001637 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001638
developere9356982022-07-04 09:03:20 +08001639 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001640 ring->last_free = desc;
1641 atomic_inc(&ring->free_count);
1642
1643 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1644 }
1645
1646 ring->cpu_idx = cpu;
developerfd40db22021-04-29 10:08:25 +08001647}
1648
1649static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1650{
1651 struct mtk_tx_ring *ring = &eth->tx_ring;
1652 unsigned int done[MTK_MAX_DEVS];
1653 unsigned int bytes[MTK_MAX_DEVS];
1654 int total = 0, i;
1655
1656 memset(done, 0, sizeof(done));
1657 memset(bytes, 0, sizeof(bytes));
1658
1659 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developerfb556ca2021-10-13 10:52:09 +08001660 mtk_poll_tx_qdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001661 else
developerfb556ca2021-10-13 10:52:09 +08001662 mtk_poll_tx_pdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001663
1664 for (i = 0; i < MTK_MAC_COUNT; i++) {
1665 if (!eth->netdev[i] || !done[i])
1666 continue;
1667 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1668 total += done[i];
1669 }
1670
1671 if (mtk_queue_stopped(eth) &&
1672 (atomic_read(&ring->free_count) > ring->thresh))
1673 mtk_wake_queue(eth);
1674
1675 return total;
1676}
1677
1678static void mtk_handle_status_irq(struct mtk_eth *eth)
1679{
developer8051e042022-04-08 13:26:36 +08001680 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001681
1682 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1683 mtk_stats_update(eth);
1684 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer8051e042022-04-08 13:26:36 +08001685 MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001686 }
1687}
1688
1689static int mtk_napi_tx(struct napi_struct *napi, int budget)
1690{
1691 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1692 u32 status, mask;
1693 int tx_done = 0;
1694
1695 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1696 mtk_handle_status_irq(eth);
1697 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1698 tx_done = mtk_poll_tx(eth, budget);
1699
1700 if (unlikely(netif_msg_intr(eth))) {
1701 status = mtk_r32(eth, eth->tx_int_status_reg);
1702 mask = mtk_r32(eth, eth->tx_int_mask_reg);
1703 dev_info(eth->dev,
1704 "done tx %d, intr 0x%08x/0x%x\n",
1705 tx_done, status, mask);
1706 }
1707
1708 if (tx_done == budget)
1709 return budget;
1710
1711 status = mtk_r32(eth, eth->tx_int_status_reg);
1712 if (status & MTK_TX_DONE_INT)
1713 return budget;
1714
developerc4671b22021-05-28 13:16:42 +08001715 if (napi_complete(napi))
1716 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developerfd40db22021-04-29 10:08:25 +08001717
1718 return tx_done;
1719}
1720
1721static int mtk_napi_rx(struct napi_struct *napi, int budget)
1722{
developer18f46a82021-07-20 21:08:21 +08001723 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1724 struct mtk_eth *eth = rx_napi->eth;
1725 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001726 u32 status, mask;
1727 int rx_done = 0;
1728 int remain_budget = budget;
1729
1730 mtk_handle_status_irq(eth);
1731
1732poll_again:
developer18f46a82021-07-20 21:08:21 +08001733 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), MTK_PDMA_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001734 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1735
1736 if (unlikely(netif_msg_intr(eth))) {
1737 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1738 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1739 dev_info(eth->dev,
1740 "done rx %d, intr 0x%08x/0x%x\n",
1741 rx_done, status, mask);
1742 }
1743 if (rx_done == remain_budget)
1744 return budget;
1745
1746 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
developer18f46a82021-07-20 21:08:21 +08001747 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08001748 remain_budget -= rx_done;
1749 goto poll_again;
1750 }
developerc4671b22021-05-28 13:16:42 +08001751
1752 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08001753 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08001754
1755 return rx_done + budget - remain_budget;
1756}
1757
1758static int mtk_tx_alloc(struct mtk_eth *eth)
1759{
developere9356982022-07-04 09:03:20 +08001760 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001761 struct mtk_tx_ring *ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08001762 int i, sz = soc->txrx.txd_size;
1763 struct mtk_tx_dma_v2 *txd, *pdma_txd;
developerfd40db22021-04-29 10:08:25 +08001764
1765 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1766 GFP_KERNEL);
1767 if (!ring->buf)
1768 goto no_tx_mem;
1769
1770 if (!eth->soc->has_sram)
1771 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08001772 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001773 else {
developere9356982022-07-04 09:03:20 +08001774 ring->dma = eth->scratch_ring + MTK_DMA_SIZE * sz;
developerfd40db22021-04-29 10:08:25 +08001775 ring->phys = eth->phy_scratch_ring + MTK_DMA_SIZE * sz;
1776 }
1777
1778 if (!ring->dma)
1779 goto no_tx_mem;
1780
1781 for (i = 0; i < MTK_DMA_SIZE; i++) {
1782 int next = (i + 1) % MTK_DMA_SIZE;
1783 u32 next_ptr = ring->phys + next * sz;
1784
developere9356982022-07-04 09:03:20 +08001785 txd = ring->dma + i * sz;
1786 txd->txd2 = next_ptr;
1787 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1788 txd->txd4 = 0;
1789
1790 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1791 txd->txd5 = 0;
1792 txd->txd6 = 0;
1793 txd->txd7 = 0;
1794 txd->txd8 = 0;
1795 }
developerfd40db22021-04-29 10:08:25 +08001796 }
1797
1798 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1799 * only as the framework. The real HW descriptors are the PDMA
1800 * descriptors in ring->dma_pdma.
1801 */
1802 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1803 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08001804 &ring->phys_pdma, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001805 if (!ring->dma_pdma)
1806 goto no_tx_mem;
1807
1808 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +08001809 pdma_txd = ring->dma_pdma + i *sz;
1810
1811 pdma_txd->txd2 = TX_DMA_DESP2_DEF;
1812 pdma_txd->txd4 = 0;
developerfd40db22021-04-29 10:08:25 +08001813 }
1814 }
1815
1816 ring->dma_size = MTK_DMA_SIZE;
1817 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
developere9356982022-07-04 09:03:20 +08001818 ring->next_free = ring->dma;
1819 ring->last_free = (void *)txd;
developerc4671b22021-05-28 13:16:42 +08001820 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08001821 ring->thresh = MAX_SKB_FRAGS;
1822
1823 /* make sure that all changes to the dma ring are flushed before we
1824 * continue
1825 */
1826 wmb();
1827
1828 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1829 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1830 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1831 mtk_w32(eth,
1832 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1833 MTK_QTX_CRX_PTR);
developerc4671b22021-05-28 13:16:42 +08001834 mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001835 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1836 MTK_QTX_CFG(0));
1837 } else {
1838 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1839 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1840 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1841 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1842 }
1843
1844 return 0;
1845
1846no_tx_mem:
1847 return -ENOMEM;
1848}
1849
1850static void mtk_tx_clean(struct mtk_eth *eth)
1851{
developere9356982022-07-04 09:03:20 +08001852 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001853 struct mtk_tx_ring *ring = &eth->tx_ring;
1854 int i;
1855
1856 if (ring->buf) {
1857 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08001858 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08001859 kfree(ring->buf);
1860 ring->buf = NULL;
1861 }
1862
1863 if (!eth->soc->has_sram && ring->dma) {
1864 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001865 MTK_DMA_SIZE * soc->txrx.txd_size,
1866 ring->dma, ring->phys);
developerfd40db22021-04-29 10:08:25 +08001867 ring->dma = NULL;
1868 }
1869
1870 if (ring->dma_pdma) {
1871 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001872 MTK_DMA_SIZE * soc->txrx.txd_size,
1873 ring->dma_pdma, ring->phys_pdma);
developerfd40db22021-04-29 10:08:25 +08001874 ring->dma_pdma = NULL;
1875 }
1876}
1877
1878static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1879{
1880 struct mtk_rx_ring *ring;
1881 int rx_data_len, rx_dma_size;
1882 int i;
1883
1884 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1885 if (ring_no)
1886 return -EINVAL;
1887 ring = &eth->rx_ring_qdma;
1888 } else {
1889 ring = &eth->rx_ring[ring_no];
1890 }
1891
1892 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1893 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1894 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1895 } else {
1896 rx_data_len = ETH_DATA_LEN;
1897 rx_dma_size = MTK_DMA_SIZE;
1898 }
1899
1900 ring->frag_size = mtk_max_frag_size(rx_data_len);
1901 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1902 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1903 GFP_KERNEL);
1904 if (!ring->data)
1905 return -ENOMEM;
1906
1907 for (i = 0; i < rx_dma_size; i++) {
1908 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1909 if (!ring->data[i])
1910 return -ENOMEM;
1911 }
1912
1913 if ((!eth->soc->has_sram) || (eth->soc->has_sram
1914 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
1915 ring->dma = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001916 rx_dma_size * eth->soc->txrx.rxd_size,
1917 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001918 else {
1919 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08001920 ring->dma = tx_ring->dma + MTK_DMA_SIZE *
1921 eth->soc->txrx.rxd_size * (ring_no + 1);
developer18f46a82021-07-20 21:08:21 +08001922 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
developere9356982022-07-04 09:03:20 +08001923 eth->soc->txrx.rxd_size * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08001924 }
1925
1926 if (!ring->dma)
1927 return -ENOMEM;
1928
1929 for (i = 0; i < rx_dma_size; i++) {
developere9356982022-07-04 09:03:20 +08001930 struct mtk_rx_dma_v2 *rxd;
1931
developerfd40db22021-04-29 10:08:25 +08001932 dma_addr_t dma_addr = dma_map_single(eth->dev,
1933 ring->data[i] + NET_SKB_PAD + eth->ip_align,
1934 ring->buf_size,
1935 DMA_FROM_DEVICE);
1936 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1937 return -ENOMEM;
developere9356982022-07-04 09:03:20 +08001938
1939 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
1940 rxd->rxd1 = (unsigned int)dma_addr;
developerfd40db22021-04-29 10:08:25 +08001941
1942 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developere9356982022-07-04 09:03:20 +08001943 rxd->rxd2 = RX_DMA_LSO;
developerfd40db22021-04-29 10:08:25 +08001944 else
developere9356982022-07-04 09:03:20 +08001945 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
developerfd40db22021-04-29 10:08:25 +08001946
developere9356982022-07-04 09:03:20 +08001947 rxd->rxd3 = 0;
1948 rxd->rxd4 = 0;
1949
1950 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1951 rxd->rxd5 = 0;
1952 rxd->rxd6 = 0;
1953 rxd->rxd7 = 0;
1954 rxd->rxd8 = 0;
developerfd40db22021-04-29 10:08:25 +08001955 }
developerfd40db22021-04-29 10:08:25 +08001956 }
1957 ring->dma_size = rx_dma_size;
1958 ring->calc_idx_update = false;
1959 ring->calc_idx = rx_dma_size - 1;
1960 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
1961 MTK_QRX_CRX_IDX_CFG(ring_no) :
1962 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08001963 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08001964 /* make sure that all changes to the dma ring are flushed before we
1965 * continue
1966 */
1967 wmb();
1968
1969 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1970 mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
1971 mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
1972 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1973 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
1974 } else {
1975 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
1976 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
1977 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1978 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
1979 }
1980
1981 return 0;
1982}
1983
1984static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
1985{
1986 int i;
1987
1988 if (ring->data && ring->dma) {
1989 for (i = 0; i < ring->dma_size; i++) {
developere9356982022-07-04 09:03:20 +08001990 struct mtk_rx_dma *rxd;
1991
developerfd40db22021-04-29 10:08:25 +08001992 if (!ring->data[i])
1993 continue;
developere9356982022-07-04 09:03:20 +08001994
1995 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
1996 if (!rxd->rxd1)
developerfd40db22021-04-29 10:08:25 +08001997 continue;
developere9356982022-07-04 09:03:20 +08001998
developerfd40db22021-04-29 10:08:25 +08001999 dma_unmap_single(eth->dev,
developere9356982022-07-04 09:03:20 +08002000 rxd->rxd1,
developerfd40db22021-04-29 10:08:25 +08002001 ring->buf_size,
2002 DMA_FROM_DEVICE);
2003 skb_free_frag(ring->data[i]);
2004 }
2005 kfree(ring->data);
2006 ring->data = NULL;
2007 }
2008
2009 if(in_sram)
2010 return;
2011
2012 if (ring->dma) {
2013 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002014 ring->dma_size * eth->soc->txrx.rxd_size,
developerfd40db22021-04-29 10:08:25 +08002015 ring->dma,
2016 ring->phys);
2017 ring->dma = NULL;
2018 }
2019}
2020
2021static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2022{
2023 int i;
developer77d03a72021-06-06 00:06:00 +08002024 u32 val;
developerfd40db22021-04-29 10:08:25 +08002025 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2026 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2027
2028 /* set LRO rings to auto-learn modes */
2029 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2030
2031 /* validate LRO ring */
2032 ring_ctrl_dw2 |= MTK_RING_VLD;
2033
2034 /* set AGE timer (unit: 20us) */
2035 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2036 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2037
2038 /* set max AGG timer (unit: 20us) */
2039 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2040
2041 /* set max LRO AGG count */
2042 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2043 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2044
developer77d03a72021-06-06 00:06:00 +08002045 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002046 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2047 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2048 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2049 }
2050
2051 /* IPv4 checksum update enable */
2052 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2053
2054 /* switch priority comparison to packet count mode */
2055 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2056
2057 /* bandwidth threshold setting */
2058 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2059
2060 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08002061 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08002062
2063 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2064 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2065 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2066
developerfd40db22021-04-29 10:08:25 +08002067 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2068 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2069
developer77d03a72021-06-06 00:06:00 +08002070 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2071 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
2072 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
2073 MTK_PDMA_RX_CFG);
2074
2075 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
2076 } else {
2077 /* set HW LRO mode & the max aggregation count for rx packets */
2078 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2079 }
2080
developerfd40db22021-04-29 10:08:25 +08002081 /* enable HW LRO */
2082 lro_ctrl_dw0 |= MTK_LRO_EN;
2083
developer77d03a72021-06-06 00:06:00 +08002084 /* enable cpu reason black list */
2085 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
2086
developerfd40db22021-04-29 10:08:25 +08002087 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2088 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2089
developer77d03a72021-06-06 00:06:00 +08002090 /* no use PPE cpu reason */
2091 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
2092
developerfd40db22021-04-29 10:08:25 +08002093 return 0;
2094}
2095
2096static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2097{
2098 int i;
2099 u32 val;
2100
2101 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08002102 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08002103
2104 /* wait for relinquishments done */
2105 for (i = 0; i < 10; i++) {
2106 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08002107 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer8051e042022-04-08 13:26:36 +08002108 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002109 continue;
2110 }
2111 break;
2112 }
2113
2114 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08002115 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08002116 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2117
2118 /* disable HW LRO */
2119 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2120}
2121
2122static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2123{
2124 u32 reg_val;
2125
developer77d03a72021-06-06 00:06:00 +08002126 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2127 idx += 1;
2128
developerfd40db22021-04-29 10:08:25 +08002129 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2130
2131 /* invalidate the IP setting */
2132 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2133
2134 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2135
2136 /* validate the IP setting */
2137 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2138}
2139
2140static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2141{
2142 u32 reg_val;
2143
developer77d03a72021-06-06 00:06:00 +08002144 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2145 idx += 1;
2146
developerfd40db22021-04-29 10:08:25 +08002147 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2148
2149 /* invalidate the IP setting */
2150 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2151
2152 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2153}
2154
2155static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2156{
2157 int cnt = 0;
2158 int i;
2159
2160 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2161 if (mac->hwlro_ip[i])
2162 cnt++;
2163 }
2164
2165 return cnt;
2166}
2167
2168static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2169 struct ethtool_rxnfc *cmd)
2170{
2171 struct ethtool_rx_flow_spec *fsp =
2172 (struct ethtool_rx_flow_spec *)&cmd->fs;
2173 struct mtk_mac *mac = netdev_priv(dev);
2174 struct mtk_eth *eth = mac->hw;
2175 int hwlro_idx;
2176
2177 if ((fsp->flow_type != TCP_V4_FLOW) ||
2178 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2179 (fsp->location > 1))
2180 return -EINVAL;
2181
2182 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2183 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2184
2185 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2186
2187 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2188
2189 return 0;
2190}
2191
2192static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2193 struct ethtool_rxnfc *cmd)
2194{
2195 struct ethtool_rx_flow_spec *fsp =
2196 (struct ethtool_rx_flow_spec *)&cmd->fs;
2197 struct mtk_mac *mac = netdev_priv(dev);
2198 struct mtk_eth *eth = mac->hw;
2199 int hwlro_idx;
2200
2201 if (fsp->location > 1)
2202 return -EINVAL;
2203
2204 mac->hwlro_ip[fsp->location] = 0;
2205 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2206
2207 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2208
2209 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2210
2211 return 0;
2212}
2213
2214static void mtk_hwlro_netdev_disable(struct net_device *dev)
2215{
2216 struct mtk_mac *mac = netdev_priv(dev);
2217 struct mtk_eth *eth = mac->hw;
2218 int i, hwlro_idx;
2219
2220 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2221 mac->hwlro_ip[i] = 0;
2222 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2223
2224 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2225 }
2226
2227 mac->hwlro_ip_cnt = 0;
2228}
2229
2230static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2231 struct ethtool_rxnfc *cmd)
2232{
2233 struct mtk_mac *mac = netdev_priv(dev);
2234 struct ethtool_rx_flow_spec *fsp =
2235 (struct ethtool_rx_flow_spec *)&cmd->fs;
2236
2237 /* only tcp dst ipv4 is meaningful, others are meaningless */
2238 fsp->flow_type = TCP_V4_FLOW;
2239 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2240 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2241
2242 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2243 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2244 fsp->h_u.tcp_ip4_spec.psrc = 0;
2245 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2246 fsp->h_u.tcp_ip4_spec.pdst = 0;
2247 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2248 fsp->h_u.tcp_ip4_spec.tos = 0;
2249 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2250
2251 return 0;
2252}
2253
2254static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2255 struct ethtool_rxnfc *cmd,
2256 u32 *rule_locs)
2257{
2258 struct mtk_mac *mac = netdev_priv(dev);
2259 int cnt = 0;
2260 int i;
2261
2262 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2263 if (mac->hwlro_ip[i]) {
2264 rule_locs[cnt] = i;
2265 cnt++;
2266 }
2267 }
2268
2269 cmd->rule_cnt = cnt;
2270
2271 return 0;
2272}
2273
developer18f46a82021-07-20 21:08:21 +08002274static int mtk_rss_init(struct mtk_eth *eth)
2275{
2276 u32 val;
2277
2278 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2279 /* Set RSS rings to PSE modes */
2280 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
2281 val |= MTK_RING_PSE_MODE;
2282 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
2283
2284 /* Enable non-lro multiple rx */
2285 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2286 val |= MTK_NON_LRO_MULTI_EN;
2287 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2288
2289 /* Enable RSS dly int supoort */
2290 val |= MTK_LRO_DLY_INT_EN;
2291 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2292
2293 /* Set RSS delay config int ring1 */
2294 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
2295 }
2296
2297 /* Hash Type */
2298 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2299 val |= MTK_RSS_IPV4_STATIC_HASH;
2300 val |= MTK_RSS_IPV6_STATIC_HASH;
2301 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2302
2303 /* Select the size of indirection table */
2304 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW0);
2305 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW1);
2306 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW2);
2307 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW3);
2308 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW4);
2309 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW5);
2310 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW6);
2311 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW7);
2312
2313 /* Pause */
2314 val |= MTK_RSS_CFG_REQ;
2315 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2316
2317 /* Enable RSS*/
2318 val |= MTK_RSS_EN;
2319 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2320
2321 /* Release pause */
2322 val &= ~(MTK_RSS_CFG_REQ);
2323 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2324
2325 /* Set perRSS GRP INT */
2326 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING1), MTK_PDMA_INT_GRP3);
2327
2328 /* Set GRP INT */
2329 mtk_w32(eth, 0x21021030, MTK_FE_INT_GRP);
2330
2331 return 0;
2332}
2333
2334static void mtk_rss_uninit(struct mtk_eth *eth)
2335{
2336 u32 val;
2337
2338 /* Pause */
2339 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2340 val |= MTK_RSS_CFG_REQ;
2341 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2342
2343 /* Disable RSS*/
2344 val &= ~(MTK_RSS_EN);
2345 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2346
2347 /* Release pause */
2348 val &= ~(MTK_RSS_CFG_REQ);
2349 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2350}
2351
developerfd40db22021-04-29 10:08:25 +08002352static netdev_features_t mtk_fix_features(struct net_device *dev,
2353 netdev_features_t features)
2354{
2355 if (!(features & NETIF_F_LRO)) {
2356 struct mtk_mac *mac = netdev_priv(dev);
2357 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2358
2359 if (ip_cnt) {
2360 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2361
2362 features |= NETIF_F_LRO;
2363 }
2364 }
2365
2366 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
2367 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
2368
2369 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2370 }
2371
2372 return features;
2373}
2374
2375static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2376{
2377 struct mtk_mac *mac = netdev_priv(dev);
2378 struct mtk_eth *eth = mac->hw;
2379 int err = 0;
2380
2381 if (!((dev->features ^ features) & MTK_SET_FEATURES))
2382 return 0;
2383
2384 if (!(features & NETIF_F_LRO))
2385 mtk_hwlro_netdev_disable(dev);
2386
2387 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
2388 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
2389 else
2390 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2391
2392 return err;
2393}
2394
2395/* wait for DMA to finish whatever it is doing before we start using it again */
2396static int mtk_dma_busy_wait(struct mtk_eth *eth)
2397{
2398 unsigned long t_start = jiffies;
2399
2400 while (1) {
2401 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2402 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2403 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2404 return 0;
2405 } else {
2406 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2407 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2408 return 0;
2409 }
2410
2411 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2412 break;
2413 }
2414
2415 dev_err(eth->dev, "DMA init timeout\n");
2416 return -1;
2417}
2418
2419static int mtk_dma_init(struct mtk_eth *eth)
2420{
2421 int err;
2422 u32 i;
2423
2424 if (mtk_dma_busy_wait(eth))
2425 return -EBUSY;
2426
2427 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2428 /* QDMA needs scratch memory for internal reordering of the
2429 * descriptors
2430 */
2431 err = mtk_init_fq_dma(eth);
2432 if (err)
2433 return err;
2434 }
2435
2436 err = mtk_tx_alloc(eth);
2437 if (err)
2438 return err;
2439
2440 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2441 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2442 if (err)
2443 return err;
2444 }
2445
2446 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2447 if (err)
2448 return err;
2449
2450 if (eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08002451 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1;
2452 for (; i < MTK_MAX_RX_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002453 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2454 if (err)
2455 return err;
2456 }
2457 err = mtk_hwlro_rx_init(eth);
2458 if (err)
2459 return err;
2460 }
2461
developer18f46a82021-07-20 21:08:21 +08002462 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2463 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2464 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL);
2465 if (err)
2466 return err;
2467 }
2468 err = mtk_rss_init(eth);
2469 if (err)
2470 return err;
2471 }
2472
developerfd40db22021-04-29 10:08:25 +08002473 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2474 /* Enable random early drop and set drop threshold
2475 * automatically
2476 */
2477 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2478 FC_THRES_MIN, MTK_QDMA_FC_THRES);
2479 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2480 }
2481
2482 return 0;
2483}
2484
2485static void mtk_dma_free(struct mtk_eth *eth)
2486{
developere9356982022-07-04 09:03:20 +08002487 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002488 int i;
2489
2490 for (i = 0; i < MTK_MAC_COUNT; i++)
2491 if (eth->netdev[i])
2492 netdev_reset_queue(eth->netdev[i]);
2493 if ( !eth->soc->has_sram && eth->scratch_ring) {
2494 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002495 MTK_DMA_SIZE * soc->txrx.txd_size,
2496 eth->scratch_ring, eth->phy_scratch_ring);
developerfd40db22021-04-29 10:08:25 +08002497 eth->scratch_ring = NULL;
2498 eth->phy_scratch_ring = 0;
2499 }
2500 mtk_tx_clean(eth);
developerb3ce86f2022-06-30 13:31:47 +08002501 mtk_rx_clean(eth, &eth->rx_ring[0],eth->soc->has_sram);
developerfd40db22021-04-29 10:08:25 +08002502 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
2503
2504 if (eth->hwlro) {
2505 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08002506
2507 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1;
2508 for (; i < MTK_MAX_RX_RING_NUM; i++)
2509 mtk_rx_clean(eth, &eth->rx_ring[i], 0);
developerfd40db22021-04-29 10:08:25 +08002510 }
2511
developer18f46a82021-07-20 21:08:21 +08002512 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2513 mtk_rss_uninit(eth);
2514
2515 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
2516 mtk_rx_clean(eth, &eth->rx_ring[i], 1);
2517 }
2518
developer94008d92021-09-23 09:47:41 +08002519 if (eth->scratch_head) {
2520 kfree(eth->scratch_head);
2521 eth->scratch_head = NULL;
2522 }
developerfd40db22021-04-29 10:08:25 +08002523}
2524
2525static void mtk_tx_timeout(struct net_device *dev)
2526{
2527 struct mtk_mac *mac = netdev_priv(dev);
2528 struct mtk_eth *eth = mac->hw;
2529
2530 eth->netdev[mac->id]->stats.tx_errors++;
2531 netif_err(eth, tx_err, dev,
2532 "transmit timed out\n");
developer8051e042022-04-08 13:26:36 +08002533
2534 if (atomic_read(&reset_lock) == 0)
2535 schedule_work(&eth->pending_work);
developerfd40db22021-04-29 10:08:25 +08002536}
2537
developer18f46a82021-07-20 21:08:21 +08002538static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08002539{
developer18f46a82021-07-20 21:08:21 +08002540 struct mtk_napi *rx_napi = priv;
2541 struct mtk_eth *eth = rx_napi->eth;
2542 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002543
developer18f46a82021-07-20 21:08:21 +08002544 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08002545 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08002546 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08002547 }
2548
2549 return IRQ_HANDLED;
2550}
2551
2552static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2553{
2554 struct mtk_eth *eth = _eth;
2555
2556 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developerfd40db22021-04-29 10:08:25 +08002557 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer6bbe70d2021-08-06 09:34:55 +08002558 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08002559 }
2560
2561 return IRQ_HANDLED;
2562}
2563
2564static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2565{
2566 struct mtk_eth *eth = _eth;
2567
developer18f46a82021-07-20 21:08:21 +08002568 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT(0)) {
2569 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT(0))
2570 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002571 }
2572 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2573 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2574 mtk_handle_irq_tx(irq, _eth);
2575 }
2576
2577 return IRQ_HANDLED;
2578}
2579
developera2613e62022-07-01 18:29:37 +08002580static irqreturn_t mtk_handle_irq_fixed_link(int irq, void *_mac)
2581{
2582 struct mtk_mac *mac = _mac;
2583 struct mtk_eth *eth = mac->hw;
2584 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
2585 struct net_device *dev = phylink_priv->dev;
2586 int link_old, link_new;
2587
2588 // clear interrupt status for gpy211
2589 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
2590
2591 link_old = phylink_priv->link;
2592 link_new = _mtk_mdio_read(eth, phylink_priv->phyaddr, MII_BMSR) & BMSR_LSTATUS;
2593
2594 if (link_old != link_new) {
2595 phylink_priv->link = link_new;
2596 if (link_new) {
2597 printk("phylink.%d %s: Link is Up\n", phylink_priv->id, dev->name);
2598 if (dev)
2599 netif_carrier_on(dev);
2600 } else {
2601 printk("phylink.%d %s: Link is Down\n", phylink_priv->id, dev->name);
2602 if (dev)
2603 netif_carrier_off(dev);
2604 }
2605 }
2606
2607 return IRQ_HANDLED;
2608}
2609
developerfd40db22021-04-29 10:08:25 +08002610#ifdef CONFIG_NET_POLL_CONTROLLER
2611static void mtk_poll_controller(struct net_device *dev)
2612{
2613 struct mtk_mac *mac = netdev_priv(dev);
2614 struct mtk_eth *eth = mac->hw;
2615
2616 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002617 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
2618 mtk_handle_irq_rx(eth->irq[2], &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002619 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002620 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002621}
2622#endif
2623
2624static int mtk_start_dma(struct mtk_eth *eth)
2625{
2626 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer77d03a72021-06-06 00:06:00 +08002627 int val, err;
developerfd40db22021-04-29 10:08:25 +08002628
2629 err = mtk_dma_init(eth);
2630 if (err) {
2631 mtk_dma_free(eth);
2632 return err;
2633 }
2634
2635 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer15d0d282021-07-14 16:40:44 +08002636 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08002637 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2638 val &= ~MTK_RESV_BUF_MASK;
developerfd40db22021-04-29 10:08:25 +08002639 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002640 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002641 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
2642 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
2643 MTK_RESV_BUF | MTK_WCOMP_EN |
2644 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
developer1ac65932022-07-19 17:23:32 +08002645 MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08002646 }
developerfd40db22021-04-29 10:08:25 +08002647 else
2648 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002649 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002650 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
2651 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2652 MTK_RX_BT_32DWORDS,
2653 MTK_QDMA_GLO_CFG);
2654
developer15d0d282021-07-14 16:40:44 +08002655 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
developerfd40db22021-04-29 10:08:25 +08002656 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002657 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08002658 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2659 MTK_PDMA_GLO_CFG);
2660 } else {
2661 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2662 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2663 MTK_PDMA_GLO_CFG);
2664 }
2665
developer77d03a72021-06-06 00:06:00 +08002666 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) && eth->hwlro) {
2667 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
2668 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
2669 }
2670
developerfd40db22021-04-29 10:08:25 +08002671 return 0;
2672}
2673
developer8051e042022-04-08 13:26:36 +08002674void mtk_gdm_config(struct mtk_eth *eth, u32 config)
developerfd40db22021-04-29 10:08:25 +08002675{
2676 int i;
2677
2678 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2679 return;
2680
2681 for (i = 0; i < MTK_MAC_COUNT; i++) {
2682 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2683
2684 /* default setup the forward port to send frame to PDMA */
2685 val &= ~0xffff;
2686
2687 /* Enable RX checksum */
2688 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2689
2690 val |= config;
2691
2692 if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
2693 val |= MTK_GDMA_SPECIAL_TAG;
2694
2695 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2696 }
developerfd40db22021-04-29 10:08:25 +08002697}
2698
2699static int mtk_open(struct net_device *dev)
2700{
2701 struct mtk_mac *mac = netdev_priv(dev);
2702 struct mtk_eth *eth = mac->hw;
developera2613e62022-07-01 18:29:37 +08002703 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
developer18f46a82021-07-20 21:08:21 +08002704 int err, i;
developer3a5969e2022-02-09 15:36:36 +08002705 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08002706
2707 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2708 if (err) {
2709 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2710 err);
2711 return err;
2712 }
2713
2714 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2715 if (!refcount_read(&eth->dma_refcnt)) {
2716 int err = mtk_start_dma(eth);
2717
2718 if (err)
2719 return err;
2720
2721 mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
2722
2723 /* Indicates CDM to parse the MTK special tag from CPU */
2724 if (netdev_uses_dsa(dev)) {
2725 u32 val;
2726 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2727 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2728 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
2729 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
2730 }
2731
2732 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002733 napi_enable(&eth->rx_napi[0].napi);
developerfd40db22021-04-29 10:08:25 +08002734 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002735 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
2736
2737 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2738 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2739 napi_enable(&eth->rx_napi[i].napi);
2740 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i));
2741 }
2742 }
2743
developerfd40db22021-04-29 10:08:25 +08002744 refcount_set(&eth->dma_refcnt, 1);
2745 }
2746 else
2747 refcount_inc(&eth->dma_refcnt);
2748
developera2613e62022-07-01 18:29:37 +08002749 if (phylink_priv->desc) {
2750 /*Notice: This programming sequence is only for GPY211 single PHY chip.
2751 If single PHY chip is not GPY211, the following step you should do:
2752 1. Contact your Single PHY chip vendor and get the details of
2753 - how to enables link status change interrupt
2754 - how to clears interrupt source
2755 */
2756
2757 // clear interrupt source for gpy211
2758 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
2759
2760 // enable link status change interrupt for gpy211
2761 _mtk_mdio_write(eth, phylink_priv->phyaddr, 0x19, 0x0001);
2762
2763 phylink_priv->dev = dev;
2764
2765 // override dev pointer for single PHY chip 0
2766 if (phylink_priv->id == 0) {
2767 struct net_device *tmp;
2768
2769 tmp = __dev_get_by_name(&init_net, phylink_priv->label);
2770 if (tmp)
2771 phylink_priv->dev = tmp;
2772 else
2773 phylink_priv->dev = NULL;
2774 }
2775 }
2776
developerfd40db22021-04-29 10:08:25 +08002777 phylink_start(mac->phylink);
2778 netif_start_queue(dev);
developer3a5969e2022-02-09 15:36:36 +08002779 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer793f7b42022-05-20 13:54:51 +08002780 if (!phy_node && eth->sgmii->regmap[mac->id]) {
developer1a63ef92022-04-15 17:17:32 +08002781 regmap_write(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developer3a5969e2022-02-09 15:36:36 +08002782 }
developerfd40db22021-04-29 10:08:25 +08002783 return 0;
2784}
2785
2786static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2787{
2788 u32 val;
2789 int i;
2790
2791 /* stop the dma engine */
2792 spin_lock_bh(&eth->page_lock);
2793 val = mtk_r32(eth, glo_cfg);
2794 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2795 glo_cfg);
2796 spin_unlock_bh(&eth->page_lock);
2797
2798 /* wait for dma stop */
2799 for (i = 0; i < 10; i++) {
2800 val = mtk_r32(eth, glo_cfg);
2801 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer8051e042022-04-08 13:26:36 +08002802 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002803 continue;
2804 }
2805 break;
2806 }
2807}
2808
2809static int mtk_stop(struct net_device *dev)
2810{
2811 struct mtk_mac *mac = netdev_priv(dev);
2812 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08002813 int i;
developer3a5969e2022-02-09 15:36:36 +08002814 u32 val = 0;
2815 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08002816
2817 netif_tx_disable(dev);
2818
developer3a5969e2022-02-09 15:36:36 +08002819 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
2820 if (phy_node) {
2821 val = _mtk_mdio_read(eth, 0, 0);
2822 val |= BMCR_PDOWN;
2823 _mtk_mdio_write(eth, 0, 0, val);
developer793f7b42022-05-20 13:54:51 +08002824 } else if (eth->sgmii->regmap[mac->id]) {
developer1a63ef92022-04-15 17:17:32 +08002825 regmap_read(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer3a5969e2022-02-09 15:36:36 +08002826 val |= SGMII_PHYA_PWD;
developer1a63ef92022-04-15 17:17:32 +08002827 regmap_write(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer3a5969e2022-02-09 15:36:36 +08002828 }
2829
2830 //GMAC RX disable
2831 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
2832 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
2833
2834 phylink_stop(mac->phylink);
2835
developerfd40db22021-04-29 10:08:25 +08002836 phylink_disconnect_phy(mac->phylink);
2837
2838 /* only shutdown DMA if this is the last user */
2839 if (!refcount_dec_and_test(&eth->dma_refcnt))
2840 return 0;
2841
2842 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2843
2844 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002845 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002846 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08002847 napi_disable(&eth->rx_napi[0].napi);
2848
2849 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2850 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2851 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i));
2852 napi_disable(&eth->rx_napi[i].napi);
2853 }
2854 }
developerfd40db22021-04-29 10:08:25 +08002855
2856 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2857 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2858 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2859
2860 mtk_dma_free(eth);
2861
2862 return 0;
2863}
2864
developer8051e042022-04-08 13:26:36 +08002865void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerfd40db22021-04-29 10:08:25 +08002866{
developer8051e042022-04-08 13:26:36 +08002867 u32 val = 0, i = 0;
developerfd40db22021-04-29 10:08:25 +08002868
developerfd40db22021-04-29 10:08:25 +08002869 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer8051e042022-04-08 13:26:36 +08002870 reset_bits, reset_bits);
2871
2872 while (i++ < 5000) {
2873 mdelay(1);
2874 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
2875
2876 if ((val & reset_bits) == reset_bits) {
2877 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
2878 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2879 reset_bits, ~reset_bits);
2880 break;
2881 }
2882 }
2883
developerfd40db22021-04-29 10:08:25 +08002884 mdelay(10);
2885}
2886
2887static void mtk_clk_disable(struct mtk_eth *eth)
2888{
2889 int clk;
2890
2891 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2892 clk_disable_unprepare(eth->clks[clk]);
2893}
2894
2895static int mtk_clk_enable(struct mtk_eth *eth)
2896{
2897 int clk, ret;
2898
2899 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2900 ret = clk_prepare_enable(eth->clks[clk]);
2901 if (ret)
2902 goto err_disable_clks;
2903 }
2904
2905 return 0;
2906
2907err_disable_clks:
2908 while (--clk >= 0)
2909 clk_disable_unprepare(eth->clks[clk]);
2910
2911 return ret;
2912}
2913
developer18f46a82021-07-20 21:08:21 +08002914static int mtk_napi_init(struct mtk_eth *eth)
2915{
2916 struct mtk_napi *rx_napi = &eth->rx_napi[0];
2917 int i;
2918
2919 rx_napi->eth = eth;
2920 rx_napi->rx_ring = &eth->rx_ring[0];
2921 rx_napi->irq_grp_no = 2;
2922
2923 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2924 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2925 rx_napi = &eth->rx_napi[i];
2926 rx_napi->eth = eth;
2927 rx_napi->rx_ring = &eth->rx_ring[i];
2928 rx_napi->irq_grp_no = 2 + i;
2929 }
2930 }
2931
2932 return 0;
2933}
2934
developer8051e042022-04-08 13:26:36 +08002935static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerfd40db22021-04-29 10:08:25 +08002936{
developer8051e042022-04-08 13:26:36 +08002937 int i, ret = 0;
developerfd40db22021-04-29 10:08:25 +08002938
developer8051e042022-04-08 13:26:36 +08002939 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
2940 atomic_read(&reset_lock), atomic_read(&force));
developerfd40db22021-04-29 10:08:25 +08002941
developer8051e042022-04-08 13:26:36 +08002942 if (atomic_read(&reset_lock) == 0) {
2943 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
2944 return 0;
developerfd40db22021-04-29 10:08:25 +08002945
developer8051e042022-04-08 13:26:36 +08002946 pm_runtime_enable(eth->dev);
2947 pm_runtime_get_sync(eth->dev);
2948
2949 ret = mtk_clk_enable(eth);
2950 if (ret)
2951 goto err_disable_pm;
2952 }
developerfd40db22021-04-29 10:08:25 +08002953
2954 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2955 ret = device_reset(eth->dev);
2956 if (ret) {
2957 dev_err(eth->dev, "MAC reset failed!\n");
2958 goto err_disable_pm;
2959 }
2960
2961 /* enable interrupt delay for RX */
2962 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2963
2964 /* disable delay and normal interrupt */
2965 mtk_tx_irq_disable(eth, ~0);
2966 mtk_rx_irq_disable(eth, ~0);
2967
2968 return 0;
2969 }
2970
developer8051e042022-04-08 13:26:36 +08002971 pr_info("[%s] execute fe %s reset\n", __func__,
2972 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developer545abf02021-07-15 17:47:01 +08002973
developer8051e042022-04-08 13:26:36 +08002974 if (type == MTK_TYPE_WARM_RESET)
2975 mtk_eth_warm_reset(eth);
developer545abf02021-07-15 17:47:01 +08002976 else
developer8051e042022-04-08 13:26:36 +08002977 mtk_eth_cold_reset(eth);
developer545abf02021-07-15 17:47:01 +08002978
2979 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developer545abf02021-07-15 17:47:01 +08002980 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08002981 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08002982 }
developerfd40db22021-04-29 10:08:25 +08002983
2984 if (eth->pctl) {
2985 /* Set GE2 driving and slew rate */
2986 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2987
2988 /* set GE2 TDSEL */
2989 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2990
2991 /* set GE2 TUNE */
2992 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2993 }
2994
2995 /* Set linkdown as the default for each GMAC. Its own MCR would be set
2996 * up with the more appropriate value when mtk_mac_config call is being
2997 * invoked.
2998 */
2999 for (i = 0; i < MTK_MAC_COUNT; i++)
3000 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3001
3002 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08003003 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
3004 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3005 else
3006 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08003007
3008 /* enable interrupt delay for RX/TX */
3009 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
3010 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
3011
3012 mtk_tx_irq_disable(eth, ~0);
3013 mtk_rx_irq_disable(eth, ~0);
3014
3015 /* FE int grouping */
3016 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003017 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_PDMA_INT_GRP2);
developerfd40db22021-04-29 10:08:25 +08003018 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003019 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_QDMA_INT_GRP2);
developer8051e042022-04-08 13:26:36 +08003020 mtk_w32(eth, 0x21021003, MTK_FE_INT_GRP);
developerbe971722022-05-23 13:51:05 +08003021 mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
developer8051e042022-04-08 13:26:36 +08003022 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
3023 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerfd40db22021-04-29 10:08:25 +08003024
developera2bdbd52021-05-31 19:10:17 +08003025 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08003026 /* PSE Free Queue Flow Control */
3027 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3028
developer459b78e2022-07-01 17:25:10 +08003029 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3030 mtk_w32(eth, 0x00000300, PSE_NO_DROP_CFG);
3031
3032 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3033 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
developer81bcad32021-07-15 14:14:38 +08003034
developerfef9efd2021-06-16 18:28:09 +08003035 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08003036 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3037 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3038 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3039 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3040 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3041 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3042 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
developerfd5f9152022-01-05 16:29:42 +08003043 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
developerfd40db22021-04-29 10:08:25 +08003044
developerfef9efd2021-06-16 18:28:09 +08003045 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08003046 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3047 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3048 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3049 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3050 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3051 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3052 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3053 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08003054
3055 /* GDM and CDM Threshold */
3056 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3057 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3058 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3059 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3060 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3061 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08003062 }
3063
3064 return 0;
3065
3066err_disable_pm:
3067 pm_runtime_put_sync(eth->dev);
3068 pm_runtime_disable(eth->dev);
3069
3070 return ret;
3071}
3072
3073static int mtk_hw_deinit(struct mtk_eth *eth)
3074{
3075 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3076 return 0;
3077
3078 mtk_clk_disable(eth);
3079
3080 pm_runtime_put_sync(eth->dev);
3081 pm_runtime_disable(eth->dev);
3082
3083 return 0;
3084}
3085
3086static int __init mtk_init(struct net_device *dev)
3087{
3088 struct mtk_mac *mac = netdev_priv(dev);
3089 struct mtk_eth *eth = mac->hw;
3090 const char *mac_addr;
3091
3092 mac_addr = of_get_mac_address(mac->of_node);
3093 if (!IS_ERR(mac_addr))
3094 ether_addr_copy(dev->dev_addr, mac_addr);
3095
3096 /* If the mac address is invalid, use random mac address */
3097 if (!is_valid_ether_addr(dev->dev_addr)) {
3098 eth_hw_addr_random(dev);
3099 dev_err(eth->dev, "generated random MAC address %pM\n",
3100 dev->dev_addr);
3101 }
3102
3103 return 0;
3104}
3105
3106static void mtk_uninit(struct net_device *dev)
3107{
3108 struct mtk_mac *mac = netdev_priv(dev);
3109 struct mtk_eth *eth = mac->hw;
3110
3111 phylink_disconnect_phy(mac->phylink);
3112 mtk_tx_irq_disable(eth, ~0);
3113 mtk_rx_irq_disable(eth, ~0);
3114}
3115
3116static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3117{
3118 struct mtk_mac *mac = netdev_priv(dev);
3119
3120 switch (cmd) {
3121 case SIOCGMIIPHY:
3122 case SIOCGMIIREG:
3123 case SIOCSMIIREG:
3124 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3125 default:
3126 /* default invoke the mtk_eth_dbg handler */
3127 return mtk_do_priv_ioctl(dev, ifr, cmd);
3128 break;
3129 }
3130
3131 return -EOPNOTSUPP;
3132}
3133
3134static void mtk_pending_work(struct work_struct *work)
3135{
3136 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer8051e042022-04-08 13:26:36 +08003137 struct device_node *phy_node = NULL;
3138 struct mtk_mac *mac = NULL;
3139 int err, i = 0;
developerfd40db22021-04-29 10:08:25 +08003140 unsigned long restart = 0;
developer8051e042022-04-08 13:26:36 +08003141 u32 val = 0;
3142
3143 atomic_inc(&reset_lock);
3144 val = mtk_r32(eth, MTK_FE_INT_STATUS);
3145 if (!mtk_check_reset_event(eth, val)) {
3146 atomic_dec(&reset_lock);
3147 pr_info("[%s] No need to do FE reset !\n", __func__);
3148 return;
3149 }
developerfd40db22021-04-29 10:08:25 +08003150
3151 rtnl_lock();
3152
developer8051e042022-04-08 13:26:36 +08003153 /* Disabe FE P3 and P4 */
3154 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3155 val |= MTK_FE_LINK_DOWN_P3;
3156 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3157 val |= MTK_FE_LINK_DOWN_P4;
3158 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3159
3160 /* Adjust PPE configurations to prepare for reset */
3161 mtk_prepare_reset_ppe(eth, 0);
3162 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3163 mtk_prepare_reset_ppe(eth, 1);
3164
3165 /* Adjust FE configurations to prepare for reset */
3166 mtk_prepare_reset_fe(eth);
3167
3168 /* Trigger Wifi SER reset */
3169 call_netdevice_notifiers(MTK_FE_START_RESET, eth->netdev[0]);
3170 rtnl_unlock();
3171 wait_for_completion_timeout(&wait_ser_done, 5000);
3172 rtnl_lock();
developerfd40db22021-04-29 10:08:25 +08003173
3174 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
3175 cpu_relax();
3176
developer8051e042022-04-08 13:26:36 +08003177 del_timer_sync(&eth->mtk_dma_monitor_timer);
3178 pr_info("[%s] mtk_stop starts !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003179 /* stop all devices to make sure that dma is properly shut down */
3180 for (i = 0; i < MTK_MAC_COUNT; i++) {
3181 if (!eth->netdev[i])
3182 continue;
3183 mtk_stop(eth->netdev[i]);
3184 __set_bit(i, &restart);
3185 }
developer8051e042022-04-08 13:26:36 +08003186 pr_info("[%s] mtk_stop ends !\n", __func__);
3187 mdelay(15);
developerfd40db22021-04-29 10:08:25 +08003188
3189 if (eth->dev->pins)
3190 pinctrl_select_state(eth->dev->pins->p,
3191 eth->dev->pins->default_state);
developer8051e042022-04-08 13:26:36 +08003192
3193 pr_info("[%s] mtk_hw_init starts !\n", __func__);
3194 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
3195 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003196
3197 /* restart DMA and enable IRQs */
3198 for (i = 0; i < MTK_MAC_COUNT; i++) {
3199 if (!test_bit(i, &restart))
3200 continue;
3201 err = mtk_open(eth->netdev[i]);
3202 if (err) {
3203 netif_alert(eth, ifup, eth->netdev[i],
3204 "Driver up/down cycle failed, closing device.\n");
3205 dev_close(eth->netdev[i]);
3206 }
3207 }
3208
developer8051e042022-04-08 13:26:36 +08003209 /* Set KA tick select */
3210 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(0));
3211 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3212 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(1));
3213
3214 /* Enabe FE P3 and P4*/
3215 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3216 val &= ~MTK_FE_LINK_DOWN_P3;
3217 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3218 val &= ~MTK_FE_LINK_DOWN_P4;
3219 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3220
3221 /* Power up sgmii */
3222 for (i = 0; i < MTK_MAC_COUNT; i++) {
3223 mac = netdev_priv(eth->netdev[i]);
3224 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer793f7b42022-05-20 13:54:51 +08003225 if (!phy_node && eth->sgmii->regmap[i]) {
developer8051e042022-04-08 13:26:36 +08003226 mtk_gmac_sgmii_path_setup(eth, i);
3227 regmap_write(eth->sgmii->regmap[i], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
3228 }
3229 }
3230
3231 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE, eth->netdev[0]);
3232 pr_info("[%s] HNAT reset done !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003233
developer8051e042022-04-08 13:26:36 +08003234 call_netdevice_notifiers(MTK_FE_RESET_DONE, eth->netdev[0]);
3235 pr_info("[%s] WiFi SER reset done !\n", __func__);
3236
3237 atomic_dec(&reset_lock);
3238 if (atomic_read(&force) > 0)
3239 atomic_dec(&force);
3240
3241 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3242 eth->mtk_dma_monitor_timer.expires = jiffies;
3243 add_timer(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08003244 clear_bit_unlock(MTK_RESETTING, &eth->state);
3245
3246 rtnl_unlock();
3247}
3248
3249static int mtk_free_dev(struct mtk_eth *eth)
3250{
3251 int i;
3252
3253 for (i = 0; i < MTK_MAC_COUNT; i++) {
3254 if (!eth->netdev[i])
3255 continue;
3256 free_netdev(eth->netdev[i]);
3257 }
3258
3259 return 0;
3260}
3261
3262static int mtk_unreg_dev(struct mtk_eth *eth)
3263{
3264 int i;
3265
3266 for (i = 0; i < MTK_MAC_COUNT; i++) {
3267 if (!eth->netdev[i])
3268 continue;
3269 unregister_netdev(eth->netdev[i]);
3270 }
3271
3272 return 0;
3273}
3274
3275static int mtk_cleanup(struct mtk_eth *eth)
3276{
3277 mtk_unreg_dev(eth);
3278 mtk_free_dev(eth);
3279 cancel_work_sync(&eth->pending_work);
3280
3281 return 0;
3282}
3283
3284static int mtk_get_link_ksettings(struct net_device *ndev,
3285 struct ethtool_link_ksettings *cmd)
3286{
3287 struct mtk_mac *mac = netdev_priv(ndev);
3288
3289 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3290 return -EBUSY;
3291
3292 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3293}
3294
3295static int mtk_set_link_ksettings(struct net_device *ndev,
3296 const struct ethtool_link_ksettings *cmd)
3297{
3298 struct mtk_mac *mac = netdev_priv(ndev);
3299
3300 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3301 return -EBUSY;
3302
3303 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3304}
3305
3306static void mtk_get_drvinfo(struct net_device *dev,
3307 struct ethtool_drvinfo *info)
3308{
3309 struct mtk_mac *mac = netdev_priv(dev);
3310
3311 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3312 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3313 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3314}
3315
3316static u32 mtk_get_msglevel(struct net_device *dev)
3317{
3318 struct mtk_mac *mac = netdev_priv(dev);
3319
3320 return mac->hw->msg_enable;
3321}
3322
3323static void mtk_set_msglevel(struct net_device *dev, u32 value)
3324{
3325 struct mtk_mac *mac = netdev_priv(dev);
3326
3327 mac->hw->msg_enable = value;
3328}
3329
3330static int mtk_nway_reset(struct net_device *dev)
3331{
3332 struct mtk_mac *mac = netdev_priv(dev);
3333
3334 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3335 return -EBUSY;
3336
3337 if (!mac->phylink)
3338 return -ENOTSUPP;
3339
3340 return phylink_ethtool_nway_reset(mac->phylink);
3341}
3342
3343static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3344{
3345 int i;
3346
3347 switch (stringset) {
3348 case ETH_SS_STATS:
3349 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3350 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3351 data += ETH_GSTRING_LEN;
3352 }
3353 break;
3354 }
3355}
3356
3357static int mtk_get_sset_count(struct net_device *dev, int sset)
3358{
3359 switch (sset) {
3360 case ETH_SS_STATS:
3361 return ARRAY_SIZE(mtk_ethtool_stats);
3362 default:
3363 return -EOPNOTSUPP;
3364 }
3365}
3366
3367static void mtk_get_ethtool_stats(struct net_device *dev,
3368 struct ethtool_stats *stats, u64 *data)
3369{
3370 struct mtk_mac *mac = netdev_priv(dev);
3371 struct mtk_hw_stats *hwstats = mac->hw_stats;
3372 u64 *data_src, *data_dst;
3373 unsigned int start;
3374 int i;
3375
3376 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3377 return;
3378
3379 if (netif_running(dev) && netif_device_present(dev)) {
3380 if (spin_trylock_bh(&hwstats->stats_lock)) {
3381 mtk_stats_update_mac(mac);
3382 spin_unlock_bh(&hwstats->stats_lock);
3383 }
3384 }
3385
3386 data_src = (u64 *)hwstats;
3387
3388 do {
3389 data_dst = data;
3390 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
3391
3392 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3393 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3394 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
3395}
3396
3397static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3398 u32 *rule_locs)
3399{
3400 int ret = -EOPNOTSUPP;
3401
3402 switch (cmd->cmd) {
3403 case ETHTOOL_GRXRINGS:
3404 if (dev->hw_features & NETIF_F_LRO) {
3405 cmd->data = MTK_MAX_RX_RING_NUM;
3406 ret = 0;
3407 }
3408 break;
3409 case ETHTOOL_GRXCLSRLCNT:
3410 if (dev->hw_features & NETIF_F_LRO) {
3411 struct mtk_mac *mac = netdev_priv(dev);
3412
3413 cmd->rule_cnt = mac->hwlro_ip_cnt;
3414 ret = 0;
3415 }
3416 break;
3417 case ETHTOOL_GRXCLSRULE:
3418 if (dev->hw_features & NETIF_F_LRO)
3419 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3420 break;
3421 case ETHTOOL_GRXCLSRLALL:
3422 if (dev->hw_features & NETIF_F_LRO)
3423 ret = mtk_hwlro_get_fdir_all(dev, cmd,
3424 rule_locs);
3425 break;
3426 default:
3427 break;
3428 }
3429
3430 return ret;
3431}
3432
3433static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3434{
3435 int ret = -EOPNOTSUPP;
3436
3437 switch (cmd->cmd) {
3438 case ETHTOOL_SRXCLSRLINS:
3439 if (dev->hw_features & NETIF_F_LRO)
3440 ret = mtk_hwlro_add_ipaddr(dev, cmd);
3441 break;
3442 case ETHTOOL_SRXCLSRLDEL:
3443 if (dev->hw_features & NETIF_F_LRO)
3444 ret = mtk_hwlro_del_ipaddr(dev, cmd);
3445 break;
3446 default:
3447 break;
3448 }
3449
3450 return ret;
3451}
3452
developer6c5cbb52022-08-12 11:37:45 +08003453static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3454{
3455 struct mtk_mac *mac = netdev_priv(dev);
3456
3457 phylink_ethtool_get_pauseparam(mac->phylink, pause);
3458}
3459
3460static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3461{
3462 struct mtk_mac *mac = netdev_priv(dev);
3463
3464 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
3465}
3466
developerfd40db22021-04-29 10:08:25 +08003467static const struct ethtool_ops mtk_ethtool_ops = {
3468 .get_link_ksettings = mtk_get_link_ksettings,
3469 .set_link_ksettings = mtk_set_link_ksettings,
3470 .get_drvinfo = mtk_get_drvinfo,
3471 .get_msglevel = mtk_get_msglevel,
3472 .set_msglevel = mtk_set_msglevel,
3473 .nway_reset = mtk_nway_reset,
3474 .get_link = ethtool_op_get_link,
3475 .get_strings = mtk_get_strings,
3476 .get_sset_count = mtk_get_sset_count,
3477 .get_ethtool_stats = mtk_get_ethtool_stats,
3478 .get_rxnfc = mtk_get_rxnfc,
3479 .set_rxnfc = mtk_set_rxnfc,
developer6c5cbb52022-08-12 11:37:45 +08003480 .get_pauseparam = mtk_get_pauseparam,
3481 .set_pauseparam = mtk_set_pauseparam,
developerfd40db22021-04-29 10:08:25 +08003482};
3483
3484static const struct net_device_ops mtk_netdev_ops = {
3485 .ndo_init = mtk_init,
3486 .ndo_uninit = mtk_uninit,
3487 .ndo_open = mtk_open,
3488 .ndo_stop = mtk_stop,
3489 .ndo_start_xmit = mtk_start_xmit,
3490 .ndo_set_mac_address = mtk_set_mac_address,
3491 .ndo_validate_addr = eth_validate_addr,
3492 .ndo_do_ioctl = mtk_do_ioctl,
3493 .ndo_tx_timeout = mtk_tx_timeout,
3494 .ndo_get_stats64 = mtk_get_stats64,
3495 .ndo_fix_features = mtk_fix_features,
3496 .ndo_set_features = mtk_set_features,
3497#ifdef CONFIG_NET_POLL_CONTROLLER
3498 .ndo_poll_controller = mtk_poll_controller,
3499#endif
3500};
3501
3502static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
3503{
3504 const __be32 *_id = of_get_property(np, "reg", NULL);
3505 struct phylink *phylink;
3506 int phy_mode, id, err;
3507 struct mtk_mac *mac;
developera2613e62022-07-01 18:29:37 +08003508 struct mtk_phylink_priv *phylink_priv;
3509 struct fwnode_handle *fixed_node;
3510 struct gpio_desc *desc;
developerfd40db22021-04-29 10:08:25 +08003511
3512 if (!_id) {
3513 dev_err(eth->dev, "missing mac id\n");
3514 return -EINVAL;
3515 }
3516
3517 id = be32_to_cpup(_id);
developerfb556ca2021-10-13 10:52:09 +08003518 if (id < 0 || id >= MTK_MAC_COUNT) {
developerfd40db22021-04-29 10:08:25 +08003519 dev_err(eth->dev, "%d is not a valid mac id\n", id);
3520 return -EINVAL;
3521 }
3522
3523 if (eth->netdev[id]) {
3524 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
3525 return -EINVAL;
3526 }
3527
3528 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
3529 if (!eth->netdev[id]) {
3530 dev_err(eth->dev, "alloc_etherdev failed\n");
3531 return -ENOMEM;
3532 }
3533 mac = netdev_priv(eth->netdev[id]);
3534 eth->mac[id] = mac;
3535 mac->id = id;
3536 mac->hw = eth;
3537 mac->of_node = np;
3538
3539 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
3540 mac->hwlro_ip_cnt = 0;
3541
3542 mac->hw_stats = devm_kzalloc(eth->dev,
3543 sizeof(*mac->hw_stats),
3544 GFP_KERNEL);
3545 if (!mac->hw_stats) {
3546 dev_err(eth->dev, "failed to allocate counter memory\n");
3547 err = -ENOMEM;
3548 goto free_netdev;
3549 }
3550 spin_lock_init(&mac->hw_stats->stats_lock);
3551 u64_stats_init(&mac->hw_stats->syncp);
3552 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
3553
3554 /* phylink create */
3555 phy_mode = of_get_phy_mode(np);
3556 if (phy_mode < 0) {
3557 dev_err(eth->dev, "incorrect phy-mode\n");
3558 err = -EINVAL;
3559 goto free_netdev;
3560 }
3561
3562 /* mac config is not set */
3563 mac->interface = PHY_INTERFACE_MODE_NA;
3564 mac->mode = MLO_AN_PHY;
3565 mac->speed = SPEED_UNKNOWN;
3566
3567 mac->phylink_config.dev = &eth->netdev[id]->dev;
3568 mac->phylink_config.type = PHYLINK_NETDEV;
3569
3570 phylink = phylink_create(&mac->phylink_config,
3571 of_fwnode_handle(mac->of_node),
3572 phy_mode, &mtk_phylink_ops);
3573 if (IS_ERR(phylink)) {
3574 err = PTR_ERR(phylink);
3575 goto free_netdev;
3576 }
3577
3578 mac->phylink = phylink;
3579
developera2613e62022-07-01 18:29:37 +08003580 fixed_node = fwnode_get_named_child_node(of_fwnode_handle(mac->of_node),
3581 "fixed-link");
3582 if (fixed_node) {
3583 desc = fwnode_get_named_gpiod(fixed_node, "link-gpio",
3584 0, GPIOD_IN, "?");
3585 if (!IS_ERR(desc)) {
3586 struct device_node *phy_np;
3587 const char *label;
3588 int irq, phyaddr;
3589
3590 phylink_priv = &mac->phylink_priv;
3591
3592 phylink_priv->desc = desc;
3593 phylink_priv->id = id;
3594 phylink_priv->link = -1;
3595
3596 irq = gpiod_to_irq(desc);
3597 if (irq > 0) {
3598 devm_request_irq(eth->dev, irq, mtk_handle_irq_fixed_link,
3599 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
3600 "ethernet:fixed link", mac);
3601 }
3602
3603 if (!of_property_read_string(to_of_node(fixed_node), "label", &label))
3604 strcpy(phylink_priv->label, label);
3605
3606 phy_np = of_parse_phandle(to_of_node(fixed_node), "phy-handle", 0);
3607 if (phy_np) {
3608 if (!of_property_read_u32(phy_np, "reg", &phyaddr))
3609 phylink_priv->phyaddr = phyaddr;
3610 }
3611 }
3612 fwnode_handle_put(fixed_node);
3613 }
3614
developerfd40db22021-04-29 10:08:25 +08003615 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
3616 eth->netdev[id]->watchdog_timeo = 5 * HZ;
3617 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
3618 eth->netdev[id]->base_addr = (unsigned long)eth->base;
3619
3620 eth->netdev[id]->hw_features = eth->soc->hw_features;
3621 if (eth->hwlro)
3622 eth->netdev[id]->hw_features |= NETIF_F_LRO;
3623
3624 eth->netdev[id]->vlan_features = eth->soc->hw_features &
3625 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3626 eth->netdev[id]->features |= eth->soc->hw_features;
3627 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
3628
3629 eth->netdev[id]->irq = eth->irq[0];
3630 eth->netdev[id]->dev.of_node = np;
3631
3632 return 0;
3633
3634free_netdev:
3635 free_netdev(eth->netdev[id]);
3636 return err;
3637}
3638
3639static int mtk_probe(struct platform_device *pdev)
3640{
3641 struct device_node *mac_np;
3642 struct mtk_eth *eth;
3643 int err, i;
3644
3645 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3646 if (!eth)
3647 return -ENOMEM;
3648
3649 eth->soc = of_device_get_match_data(&pdev->dev);
3650
3651 eth->dev = &pdev->dev;
3652 eth->base = devm_platform_ioremap_resource(pdev, 0);
3653 if (IS_ERR(eth->base))
3654 return PTR_ERR(eth->base);
3655
3656 if(eth->soc->has_sram) {
3657 struct resource *res;
3658 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
developer4c32b7a2021-11-13 16:46:43 +08003659 if (unlikely(!res))
3660 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +08003661 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
3662 }
3663
3664 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3665 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
3666 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
3667 } else {
3668 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
3669 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
3670 }
3671
3672 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3673 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
3674 eth->ip_align = NET_IP_ALIGN;
3675 } else {
developera2bdbd52021-05-31 19:10:17 +08003676 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developerfd40db22021-04-29 10:08:25 +08003677 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
3678 else
3679 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
3680 }
3681
3682 spin_lock_init(&eth->page_lock);
3683 spin_lock_init(&eth->tx_irq_lock);
3684 spin_lock_init(&eth->rx_irq_lock);
developerd82e8372022-02-09 15:00:09 +08003685 spin_lock_init(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +08003686
3687 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3688 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3689 "mediatek,ethsys");
3690 if (IS_ERR(eth->ethsys)) {
3691 dev_err(&pdev->dev, "no ethsys regmap found\n");
3692 return PTR_ERR(eth->ethsys);
3693 }
3694 }
3695
3696 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3697 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3698 "mediatek,infracfg");
3699 if (IS_ERR(eth->infra)) {
3700 dev_err(&pdev->dev, "no infracfg regmap found\n");
3701 return PTR_ERR(eth->infra);
3702 }
3703 }
3704
3705 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
3706 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3707 GFP_KERNEL);
3708 if (!eth->sgmii)
3709 return -ENOMEM;
3710
3711 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3712 eth->soc->ana_rgc3);
3713
3714 if (err)
3715 return err;
3716 }
3717
3718 if (eth->soc->required_pctl) {
3719 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3720 "mediatek,pctl");
3721 if (IS_ERR(eth->pctl)) {
3722 dev_err(&pdev->dev, "no pctl regmap found\n");
3723 return PTR_ERR(eth->pctl);
3724 }
3725 }
3726
developer18f46a82021-07-20 21:08:21 +08003727 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08003728 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
3729 eth->irq[i] = eth->irq[0];
3730 else
3731 eth->irq[i] = platform_get_irq(pdev, i);
3732 if (eth->irq[i] < 0) {
3733 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3734 return -ENXIO;
3735 }
3736 }
3737
3738 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
3739 eth->clks[i] = devm_clk_get(eth->dev,
3740 mtk_clks_source_name[i]);
3741 if (IS_ERR(eth->clks[i])) {
3742 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
3743 return -EPROBE_DEFER;
3744 if (eth->soc->required_clks & BIT(i)) {
3745 dev_err(&pdev->dev, "clock %s not found\n",
3746 mtk_clks_source_name[i]);
3747 return -EINVAL;
3748 }
3749 eth->clks[i] = NULL;
3750 }
3751 }
3752
3753 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
3754 INIT_WORK(&eth->pending_work, mtk_pending_work);
3755
developer8051e042022-04-08 13:26:36 +08003756 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerfd40db22021-04-29 10:08:25 +08003757 if (err)
3758 return err;
3759
3760 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3761
3762 for_each_child_of_node(pdev->dev.of_node, mac_np) {
3763 if (!of_device_is_compatible(mac_np,
3764 "mediatek,eth-mac"))
3765 continue;
3766
3767 if (!of_device_is_available(mac_np))
3768 continue;
3769
3770 err = mtk_add_mac(eth, mac_np);
3771 if (err) {
3772 of_node_put(mac_np);
3773 goto err_deinit_hw;
3774 }
3775 }
3776
developer18f46a82021-07-20 21:08:21 +08003777 err = mtk_napi_init(eth);
3778 if (err)
3779 goto err_free_dev;
3780
developerfd40db22021-04-29 10:08:25 +08003781 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3782 err = devm_request_irq(eth->dev, eth->irq[0],
3783 mtk_handle_irq, 0,
3784 dev_name(eth->dev), eth);
3785 } else {
3786 err = devm_request_irq(eth->dev, eth->irq[1],
3787 mtk_handle_irq_tx, 0,
3788 dev_name(eth->dev), eth);
3789 if (err)
3790 goto err_free_dev;
3791
3792 err = devm_request_irq(eth->dev, eth->irq[2],
3793 mtk_handle_irq_rx, 0,
developer18f46a82021-07-20 21:08:21 +08003794 dev_name(eth->dev), &eth->rx_napi[0]);
3795 if (err)
3796 goto err_free_dev;
3797
developer793f7b42022-05-20 13:54:51 +08003798 if (MTK_MAX_IRQ_NUM > 3) {
3799 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3800 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3801 err = devm_request_irq(eth->dev,
3802 eth->irq[2 + i],
3803 mtk_handle_irq_rx, 0,
3804 dev_name(eth->dev),
3805 &eth->rx_napi[i]);
3806 if (err)
3807 goto err_free_dev;
3808 }
3809 } else {
3810 err = devm_request_irq(eth->dev, eth->irq[3],
3811 mtk_handle_fe_irq, 0,
3812 dev_name(eth->dev), eth);
developer18f46a82021-07-20 21:08:21 +08003813 if (err)
3814 goto err_free_dev;
3815 }
3816 }
developerfd40db22021-04-29 10:08:25 +08003817 }
developer8051e042022-04-08 13:26:36 +08003818
developerfd40db22021-04-29 10:08:25 +08003819 if (err)
3820 goto err_free_dev;
3821
3822 /* No MT7628/88 support yet */
3823 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3824 err = mtk_mdio_init(eth);
3825 if (err)
3826 goto err_free_dev;
3827 }
3828
3829 for (i = 0; i < MTK_MAX_DEVS; i++) {
3830 if (!eth->netdev[i])
3831 continue;
3832
3833 err = register_netdev(eth->netdev[i]);
3834 if (err) {
3835 dev_err(eth->dev, "error bringing up device\n");
3836 goto err_deinit_mdio;
3837 } else
3838 netif_info(eth, probe, eth->netdev[i],
3839 "mediatek frame engine at 0x%08lx, irq %d\n",
3840 eth->netdev[i]->base_addr, eth->irq[0]);
3841 }
3842
3843 /* we run 2 devices on the same DMA ring so we need a dummy device
3844 * for NAPI to work
3845 */
3846 init_dummy_netdev(&eth->dummy_dev);
3847 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
3848 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08003849 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08003850 MTK_NAPI_WEIGHT);
3851
developer18f46a82021-07-20 21:08:21 +08003852 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3853 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3854 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[i].napi,
3855 mtk_napi_rx, MTK_NAPI_WEIGHT);
3856 }
3857
developerfd40db22021-04-29 10:08:25 +08003858 mtketh_debugfs_init(eth);
3859 debug_proc_init(eth);
3860
3861 platform_set_drvdata(pdev, eth);
3862
developer8051e042022-04-08 13:26:36 +08003863 register_netdevice_notifier(&mtk_eth_netdevice_nb);
developer793f7b42022-05-20 13:54:51 +08003864#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developer8051e042022-04-08 13:26:36 +08003865 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3866 eth->mtk_dma_monitor_timer.expires = jiffies;
3867 add_timer(&eth->mtk_dma_monitor_timer);
developer793f7b42022-05-20 13:54:51 +08003868#endif
developer8051e042022-04-08 13:26:36 +08003869
developerfd40db22021-04-29 10:08:25 +08003870 return 0;
3871
3872err_deinit_mdio:
3873 mtk_mdio_cleanup(eth);
3874err_free_dev:
3875 mtk_free_dev(eth);
3876err_deinit_hw:
3877 mtk_hw_deinit(eth);
3878
3879 return err;
3880}
3881
3882static int mtk_remove(struct platform_device *pdev)
3883{
3884 struct mtk_eth *eth = platform_get_drvdata(pdev);
3885 struct mtk_mac *mac;
3886 int i;
3887
3888 /* stop all devices to make sure that dma is properly shut down */
3889 for (i = 0; i < MTK_MAC_COUNT; i++) {
3890 if (!eth->netdev[i])
3891 continue;
3892 mtk_stop(eth->netdev[i]);
3893 mac = netdev_priv(eth->netdev[i]);
3894 phylink_disconnect_phy(mac->phylink);
3895 }
3896
3897 mtk_hw_deinit(eth);
3898
3899 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003900 netif_napi_del(&eth->rx_napi[0].napi);
3901
3902 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3903 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3904 netif_napi_del(&eth->rx_napi[i].napi);
3905 }
3906
developerfd40db22021-04-29 10:08:25 +08003907 mtk_cleanup(eth);
3908 mtk_mdio_cleanup(eth);
developer8051e042022-04-08 13:26:36 +08003909 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
3910 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08003911
3912 return 0;
3913}
3914
3915static const struct mtk_soc_data mt2701_data = {
3916 .caps = MT7623_CAPS | MTK_HWLRO,
3917 .hw_features = MTK_HW_FEATURES,
3918 .required_clks = MT7623_CLKS_BITMAP,
3919 .required_pctl = true,
3920 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003921 .txrx = {
3922 .txd_size = sizeof(struct mtk_tx_dma),
3923 .rxd_size = sizeof(struct mtk_rx_dma),
3924 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3925 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3926 },
developerfd40db22021-04-29 10:08:25 +08003927};
3928
3929static const struct mtk_soc_data mt7621_data = {
3930 .caps = MT7621_CAPS,
3931 .hw_features = MTK_HW_FEATURES,
3932 .required_clks = MT7621_CLKS_BITMAP,
3933 .required_pctl = false,
3934 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003935 .txrx = {
3936 .txd_size = sizeof(struct mtk_tx_dma),
3937 .rxd_size = sizeof(struct mtk_rx_dma),
3938 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3939 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3940 },
developerfd40db22021-04-29 10:08:25 +08003941};
3942
3943static const struct mtk_soc_data mt7622_data = {
3944 .ana_rgc3 = 0x2028,
3945 .caps = MT7622_CAPS | MTK_HWLRO,
3946 .hw_features = MTK_HW_FEATURES,
3947 .required_clks = MT7622_CLKS_BITMAP,
3948 .required_pctl = false,
3949 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003950 .txrx = {
3951 .txd_size = sizeof(struct mtk_tx_dma),
3952 .rxd_size = sizeof(struct mtk_rx_dma),
3953 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3954 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3955 },
developerfd40db22021-04-29 10:08:25 +08003956};
3957
3958static const struct mtk_soc_data mt7623_data = {
3959 .caps = MT7623_CAPS | MTK_HWLRO,
3960 .hw_features = MTK_HW_FEATURES,
3961 .required_clks = MT7623_CLKS_BITMAP,
3962 .required_pctl = true,
3963 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003964 .txrx = {
3965 .txd_size = sizeof(struct mtk_tx_dma),
3966 .rxd_size = sizeof(struct mtk_rx_dma),
3967 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3968 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3969 },
developerfd40db22021-04-29 10:08:25 +08003970};
3971
3972static const struct mtk_soc_data mt7629_data = {
3973 .ana_rgc3 = 0x128,
3974 .caps = MT7629_CAPS | MTK_HWLRO,
3975 .hw_features = MTK_HW_FEATURES,
3976 .required_clks = MT7629_CLKS_BITMAP,
3977 .required_pctl = false,
3978 .has_sram = false,
developere9356982022-07-04 09:03:20 +08003979 .txrx = {
3980 .txd_size = sizeof(struct mtk_tx_dma),
3981 .rxd_size = sizeof(struct mtk_rx_dma),
3982 .dma_max_len = MTK_TX_DMA_BUF_LEN,
3983 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
3984 },
developerfd40db22021-04-29 10:08:25 +08003985};
3986
3987static const struct mtk_soc_data mt7986_data = {
3988 .ana_rgc3 = 0x128,
3989 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08003990 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08003991 .required_clks = MT7986_CLKS_BITMAP,
3992 .required_pctl = false,
3993 .has_sram = true,
developere9356982022-07-04 09:03:20 +08003994 .txrx = {
3995 .txd_size = sizeof(struct mtk_tx_dma_v2),
3996 .rxd_size = sizeof(struct mtk_rx_dma_v2),
3997 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
3998 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
3999 },
developerfd40db22021-04-29 10:08:25 +08004000};
4001
developer255bba22021-07-27 15:16:33 +08004002static const struct mtk_soc_data mt7981_data = {
4003 .ana_rgc3 = 0x128,
4004 .caps = MT7981_CAPS,
developer7377b0b2021-11-18 14:54:47 +08004005 .hw_features = MTK_HW_FEATURES,
developer255bba22021-07-27 15:16:33 +08004006 .required_clks = MT7981_CLKS_BITMAP,
4007 .required_pctl = false,
4008 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004009 .txrx = {
4010 .txd_size = sizeof(struct mtk_tx_dma_v2),
4011 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4012 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4013 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4014 },
developer255bba22021-07-27 15:16:33 +08004015};
4016
developerfd40db22021-04-29 10:08:25 +08004017static const struct mtk_soc_data rt5350_data = {
4018 .caps = MT7628_CAPS,
4019 .hw_features = MTK_HW_FEATURES_MT7628,
4020 .required_clks = MT7628_CLKS_BITMAP,
4021 .required_pctl = false,
4022 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004023 .txrx = {
4024 .txd_size = sizeof(struct mtk_tx_dma),
4025 .rxd_size = sizeof(struct mtk_rx_dma),
4026 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4027 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4028 },
developerfd40db22021-04-29 10:08:25 +08004029};
4030
4031const struct of_device_id of_mtk_match[] = {
4032 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4033 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4034 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4035 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4036 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4037 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08004038 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developerfd40db22021-04-29 10:08:25 +08004039 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4040 {},
4041};
4042MODULE_DEVICE_TABLE(of, of_mtk_match);
4043
4044static struct platform_driver mtk_driver = {
4045 .probe = mtk_probe,
4046 .remove = mtk_remove,
4047 .driver = {
4048 .name = "mtk_soc_eth",
4049 .of_match_table = of_mtk_match,
4050 },
4051};
4052
4053module_platform_driver(mtk_driver);
4054
4055MODULE_LICENSE("GPL");
4056MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4057MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");