blob: 080e53ef23f13da142f806267b8ff5a63700db40 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
12#include <linux/mfd/syscon.h>
13#include <linux/regmap.h>
14#include <linux/clk.h>
15#include <linux/pm_runtime.h>
16#include <linux/if_vlan.h>
17#include <linux/reset.h>
18#include <linux/tcp.h>
19#include <linux/interrupt.h>
20#include <linux/pinctrl/devinfo.h>
21#include <linux/phylink.h>
developera2613e62022-07-01 18:29:37 +080022#include <linux/gpio/consumer.h>
developerfd40db22021-04-29 10:08:25 +080023#include <net/dsa.h>
24
25#include "mtk_eth_soc.h"
26#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080027#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080028
29#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
30#include "mtk_hnat/nf_hnat_mtk.h"
31#endif
32
33static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080034atomic_t reset_lock = ATOMIC_INIT(0);
35atomic_t force = ATOMIC_INIT(0);
36
developerfd40db22021-04-29 10:08:25 +080037module_param_named(msg_level, mtk_msg_level, int, 0);
38MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080039DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080040
41#define MTK_ETHTOOL_STAT(x) { #x, \
42 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
43
44/* strings used by ethtool */
45static const struct mtk_ethtool_stats {
46 char str[ETH_GSTRING_LEN];
47 u32 offset;
48} mtk_ethtool_stats[] = {
49 MTK_ETHTOOL_STAT(tx_bytes),
50 MTK_ETHTOOL_STAT(tx_packets),
51 MTK_ETHTOOL_STAT(tx_skip),
52 MTK_ETHTOOL_STAT(tx_collisions),
53 MTK_ETHTOOL_STAT(rx_bytes),
54 MTK_ETHTOOL_STAT(rx_packets),
55 MTK_ETHTOOL_STAT(rx_overflow),
56 MTK_ETHTOOL_STAT(rx_fcs_errors),
57 MTK_ETHTOOL_STAT(rx_short_errors),
58 MTK_ETHTOOL_STAT(rx_long_errors),
59 MTK_ETHTOOL_STAT(rx_checksum_errors),
60 MTK_ETHTOOL_STAT(rx_flow_control_packets),
61};
62
63static const char * const mtk_clks_source_name[] = {
64 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
65 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
66 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
67 "sgmii_ck", "eth2pll", "wocpu0","wocpu1",
68};
69
70void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
71{
72 __raw_writel(val, eth->base + reg);
73}
74
75u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
76{
77 return __raw_readl(eth->base + reg);
78}
79
80u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
81{
82 u32 val;
83
84 val = mtk_r32(eth, reg);
85 val &= ~mask;
86 val |= set;
87 mtk_w32(eth, val, reg);
88 return reg;
89}
90
91static int mtk_mdio_busy_wait(struct mtk_eth *eth)
92{
93 unsigned long t_start = jiffies;
94
95 while (1) {
96 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
97 return 0;
98 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
99 break;
developerc4671b22021-05-28 13:16:42 +0800100 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800101 }
102
103 dev_err(eth->dev, "mdio: MDIO timeout\n");
104 return -1;
105}
106
developer599cda42022-05-24 15:13:31 +0800107u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
108 int phy_reg, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800109{
110 if (mtk_mdio_busy_wait(eth))
111 return -1;
112
113 write_data &= 0xffff;
114
developer599cda42022-05-24 15:13:31 +0800115 if (phy_reg & MII_ADDR_C45) {
116 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
117 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
118 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
119 MTK_PHY_IAC);
120
121 if (mtk_mdio_busy_wait(eth))
122 return -1;
123
124 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
125 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
126 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
127 MTK_PHY_IAC);
128 } else {
129 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
130 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
131 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
132 MTK_PHY_IAC);
133 }
developerfd40db22021-04-29 10:08:25 +0800134
135 if (mtk_mdio_busy_wait(eth))
136 return -1;
137
138 return 0;
139}
140
developer599cda42022-05-24 15:13:31 +0800141u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerfd40db22021-04-29 10:08:25 +0800142{
143 u32 d;
144
145 if (mtk_mdio_busy_wait(eth))
146 return 0xffff;
147
developer599cda42022-05-24 15:13:31 +0800148 if (phy_reg & MII_ADDR_C45) {
149 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
150 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
151 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
152 MTK_PHY_IAC);
153
154 if (mtk_mdio_busy_wait(eth))
155 return 0xffff;
156
157 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
158 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
159 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
160 MTK_PHY_IAC);
161 } else {
162 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
163 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
164 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
165 MTK_PHY_IAC);
166 }
developerfd40db22021-04-29 10:08:25 +0800167
168 if (mtk_mdio_busy_wait(eth))
169 return 0xffff;
170
171 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
172
173 return d;
174}
175
176static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
177 int phy_reg, u16 val)
178{
179 struct mtk_eth *eth = bus->priv;
180
181 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
182}
183
184static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
185{
186 struct mtk_eth *eth = bus->priv;
187
188 return _mtk_mdio_read(eth, phy_addr, phy_reg);
189}
190
developerabeadd52022-08-15 11:26:44 +0800191static int mtk_mdio_reset(struct mii_bus *bus)
192{
193 /* The mdiobus_register will trigger a reset pulse when enabling Bus reset,
194 * we just need to wait until device ready.
195 */
196 mdelay(20);
197
198 return 0;
199}
200
developerfd40db22021-04-29 10:08:25 +0800201static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
202 phy_interface_t interface)
203{
204 u32 val;
205
206 /* Check DDR memory type.
207 * Currently TRGMII mode with DDR2 memory is not supported.
208 */
209 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
210 if (interface == PHY_INTERFACE_MODE_TRGMII &&
211 val & SYSCFG_DRAM_TYPE_DDR2) {
212 dev_err(eth->dev,
213 "TRGMII mode with DDR2 memory is not supported!\n");
214 return -EOPNOTSUPP;
215 }
216
217 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
218 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
219
220 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
221 ETHSYS_TRGMII_MT7621_MASK, val);
222
223 return 0;
224}
225
226static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
227 phy_interface_t interface, int speed)
228{
229 u32 val;
230 int ret;
231
232 if (interface == PHY_INTERFACE_MODE_TRGMII) {
233 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
234 val = 500000000;
235 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
236 if (ret)
237 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
238 return;
239 }
240
241 val = (speed == SPEED_1000) ?
242 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
243 mtk_w32(eth, val, INTF_MODE);
244
245 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
246 ETHSYS_TRGMII_CLK_SEL362_5,
247 ETHSYS_TRGMII_CLK_SEL362_5);
248
249 val = (speed == SPEED_1000) ? 250000000 : 500000000;
250 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
251 if (ret)
252 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
253
254 val = (speed == SPEED_1000) ?
255 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
256 mtk_w32(eth, val, TRGMII_RCK_CTRL);
257
258 val = (speed == SPEED_1000) ?
259 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
260 mtk_w32(eth, val, TRGMII_TCK_CTRL);
261}
262
developer089e8852022-09-28 14:43:46 +0800263static void mtk_setup_bridge_switch(struct mtk_eth *eth)
264{
265 int val;
266
267 /* Force Port1 XGMAC Link Up */
268 val = mtk_r32(eth, MTK_XGMAC_STS(MTK_GMAC1_ID));
269 mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK,
270 MTK_XGMAC_STS(MTK_GMAC1_ID));
271
272 /* Adjust GSW bridge IPG to 11*/
273 val = mtk_r32(eth, MTK_GSW_CFG);
274 val &= ~(GSWTX_IPG_MASK | GSWRX_IPG_MASK);
275 val |= (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
276 (GSW_IPG_11 << GSWRX_IPG_SHIFT);
277 mtk_w32(eth, val, MTK_GSW_CFG);
278
279 /* Disable GDM1 RX CRC stripping */
280 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(0));
281 val &= ~MTK_GDMA_STRP_CRC;
282 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(0));
283}
284
developerfd40db22021-04-29 10:08:25 +0800285static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
286 const struct phylink_link_state *state)
287{
288 struct mtk_mac *mac = container_of(config, struct mtk_mac,
289 phylink_config);
290 struct mtk_eth *eth = mac->hw;
developer089e8852022-09-28 14:43:46 +0800291 u32 sid, i;
developerfb556ca2021-10-13 10:52:09 +0800292 int val, ge_mode, err=0;
developerfd40db22021-04-29 10:08:25 +0800293
294 /* MT76x8 has no hardware settings between for the MAC */
295 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
296 mac->interface != state->interface) {
297 /* Setup soc pin functions */
298 switch (state->interface) {
299 case PHY_INTERFACE_MODE_TRGMII:
300 if (mac->id)
301 goto err_phy;
302 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
303 MTK_GMAC1_TRGMII))
304 goto err_phy;
305 /* fall through */
306 case PHY_INTERFACE_MODE_RGMII_TXID:
307 case PHY_INTERFACE_MODE_RGMII_RXID:
308 case PHY_INTERFACE_MODE_RGMII_ID:
309 case PHY_INTERFACE_MODE_RGMII:
310 case PHY_INTERFACE_MODE_MII:
311 case PHY_INTERFACE_MODE_REVMII:
312 case PHY_INTERFACE_MODE_RMII:
313 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
314 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
315 if (err)
316 goto init_err;
317 }
318 break;
319 case PHY_INTERFACE_MODE_1000BASEX:
320 case PHY_INTERFACE_MODE_2500BASEX:
321 case PHY_INTERFACE_MODE_SGMII:
322 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
323 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
324 if (err)
325 goto init_err;
326 }
327 break;
328 case PHY_INTERFACE_MODE_GMII:
329 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
330 err = mtk_gmac_gephy_path_setup(eth, mac->id);
331 if (err)
332 goto init_err;
333 }
334 break;
developer30e13e72022-11-03 10:21:24 +0800335 case PHY_INTERFACE_MODE_XGMII:
336 if (MTK_HAS_CAPS(eth->soc->caps, MTK_XGMII)) {
337 err = mtk_gmac_xgmii_path_setup(eth, mac->id);
338 if (err)
339 goto init_err;
340 }
341 break;
developer089e8852022-09-28 14:43:46 +0800342 case PHY_INTERFACE_MODE_USXGMII:
343 case PHY_INTERFACE_MODE_10GKR:
344 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
345 err = mtk_gmac_usxgmii_path_setup(eth, mac->id);
346 if (err)
347 goto init_err;
348 }
349 break;
developerfd40db22021-04-29 10:08:25 +0800350 default:
351 goto err_phy;
352 }
353
354 /* Setup clock for 1st gmac */
355 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
356 !phy_interface_mode_is_8023z(state->interface) &&
357 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
358 if (MTK_HAS_CAPS(mac->hw->soc->caps,
359 MTK_TRGMII_MT7621_CLK)) {
360 if (mt7621_gmac0_rgmii_adjust(mac->hw,
361 state->interface))
362 goto err_phy;
363 } else {
364 mtk_gmac0_rgmii_adjust(mac->hw,
365 state->interface,
366 state->speed);
367
368 /* mt7623_pad_clk_setup */
369 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
370 mtk_w32(mac->hw,
371 TD_DM_DRVP(8) | TD_DM_DRVN(8),
372 TRGMII_TD_ODT(i));
373
374 /* Assert/release MT7623 RXC reset */
375 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
376 TRGMII_RCK_CTRL);
377 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
378 }
379 }
380
381 ge_mode = 0;
382 switch (state->interface) {
383 case PHY_INTERFACE_MODE_MII:
384 case PHY_INTERFACE_MODE_GMII:
385 ge_mode = 1;
386 break;
387 case PHY_INTERFACE_MODE_REVMII:
388 ge_mode = 2;
389 break;
390 case PHY_INTERFACE_MODE_RMII:
391 if (mac->id)
392 goto err_phy;
393 ge_mode = 3;
394 break;
395 default:
396 break;
397 }
398
399 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800400 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800401 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
402 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
403 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
404 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800405 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800406
407 mac->interface = state->interface;
408 }
409
410 /* SGMII */
411 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
412 phy_interface_mode_is_8023z(state->interface)) {
413 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
414 * being setup done.
415 */
developerd82e8372022-02-09 15:00:09 +0800416 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800417 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
418
419 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
420 SYSCFG0_SGMII_MASK,
421 ~(u32)SYSCFG0_SGMII_MASK);
422
423 /* Decide how GMAC and SGMIISYS be mapped */
424 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
425 0 : mac->id;
426
427 /* Setup SGMIISYS with the determined property */
428 if (state->interface != PHY_INTERFACE_MODE_SGMII)
developer089e8852022-09-28 14:43:46 +0800429 err = mtk_sgmii_setup_mode_force(eth->xgmii, sid,
developerfd40db22021-04-29 10:08:25 +0800430 state);
developer2fbee452022-08-12 13:58:20 +0800431 else
developer089e8852022-09-28 14:43:46 +0800432 err = mtk_sgmii_setup_mode_an(eth->xgmii, sid);
developerfd40db22021-04-29 10:08:25 +0800433
developerd82e8372022-02-09 15:00:09 +0800434 if (err) {
435 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800436 goto init_err;
developerd82e8372022-02-09 15:00:09 +0800437 }
developerfd40db22021-04-29 10:08:25 +0800438
439 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
440 SYSCFG0_SGMII_MASK, val);
developerd82e8372022-02-09 15:00:09 +0800441 spin_unlock(&eth->syscfg0_lock);
developer089e8852022-09-28 14:43:46 +0800442 } else if (state->interface == PHY_INTERFACE_MODE_USXGMII ||
443 state->interface == PHY_INTERFACE_MODE_10GKR) {
444 sid = mac->id;
445
446 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
447 sid != MTK_GMAC1_ID) {
448 if (phylink_autoneg_inband(mode))
449 err = mtk_usxgmii_setup_mode_force(eth->xgmii, sid,
450 SPEED_10000);
451 else
452 err = mtk_usxgmii_setup_mode_an(eth->xgmii, sid,
453 SPEED_10000);
454
455 if (err)
456 goto init_err;
457 }
developerfd40db22021-04-29 10:08:25 +0800458 } else if (phylink_autoneg_inband(mode)) {
459 dev_err(eth->dev,
460 "In-band mode not supported in non SGMII mode!\n");
461 return;
462 }
463
464 /* Setup gmac */
developer30e13e72022-11-03 10:21:24 +0800465 if (mac->type == MTK_XGDM_TYPE) {
developer089e8852022-09-28 14:43:46 +0800466 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
467 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
developerfd40db22021-04-29 10:08:25 +0800468
developer089e8852022-09-28 14:43:46 +0800469 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
470 switch (mac->id) {
471 case MTK_GMAC1_ID:
472 mtk_setup_bridge_switch(eth);
473 break;
474 case MTK_GMAC3_ID:
475 val = mtk_r32(eth, MTK_XGMAC_STS(mac->id));
476 mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK,
477 MTK_XGMAC_STS(mac->id));
478 break;
479 }
480 }
developerfd40db22021-04-29 10:08:25 +0800481 }
482
developerfd40db22021-04-29 10:08:25 +0800483 return;
484
485err_phy:
486 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
487 mac->id, phy_modes(state->interface));
488 return;
489
490init_err:
491 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
492 mac->id, phy_modes(state->interface), err);
493}
494
developer089e8852022-09-28 14:43:46 +0800495static int mtk_mac_pcs_get_state(struct phylink_config *config,
496 struct phylink_link_state *state)
developerfd40db22021-04-29 10:08:25 +0800497{
498 struct mtk_mac *mac = container_of(config, struct mtk_mac,
499 phylink_config);
developerfd40db22021-04-29 10:08:25 +0800500
developer089e8852022-09-28 14:43:46 +0800501 if (mac->type == MTK_XGDM_TYPE) {
502 u32 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
developerfd40db22021-04-29 10:08:25 +0800503
developer089e8852022-09-28 14:43:46 +0800504 if (mac->id == MTK_GMAC2_ID)
505 sts = sts >> 16;
developerfd40db22021-04-29 10:08:25 +0800506
developer089e8852022-09-28 14:43:46 +0800507 state->duplex = 1;
508
509 switch (FIELD_GET(MTK_USXGMII_PCS_MODE, sts)) {
510 case 0:
511 state->speed = SPEED_10000;
512 break;
513 case 1:
514 state->speed = SPEED_5000;
515 break;
516 case 2:
517 state->speed = SPEED_2500;
518 break;
519 case 3:
520 state->speed = SPEED_1000;
521 break;
522 }
523
524 state->link = FIELD_GET(MTK_USXGMII_PCS_LINK, sts);
525 } else if (mac->type == MTK_GDM_TYPE) {
526 struct mtk_eth *eth = mac->hw;
527 struct mtk_xgmii *ss = eth->xgmii;
528 u32 id = mtk_mac2xgmii_id(eth, mac->id);
529 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
530 u32 val;
531
532 regmap_read(ss->regmap_sgmii[id], SGMSYS_PCS_CONTROL_1, &val);
533
534 state->link = FIELD_GET(SGMII_LINK_STATYS, val);
535
536 if (FIELD_GET(SGMII_AN_ENABLE, val)) {
537 regmap_read(ss->regmap_sgmii[id], SGMII_PCS_SPEED_ABILITY, &val);
538
539 val = val >> 16;
540
541 state->duplex = FIELD_GET(SGMII_PCS_SPEED_DUPLEX, val);
542
543 switch (FIELD_GET(SGMII_PCS_SPEED_MASK, val)) {
544 case 0:
545 state->speed = SPEED_10;
546 break;
547 case 1:
548 state->speed = SPEED_100;
549 break;
550 case 2:
551 state->speed = SPEED_1000;
552 break;
553 }
554 } else {
555 regmap_read(ss->regmap_sgmii[id], SGMSYS_SGMII_MODE, &val);
556
557 state->duplex = !FIELD_GET(SGMII_DUPLEX_FULL, val);
558
559 switch (FIELD_GET(SGMII_SPEED_MASK, val)) {
560 case 0:
561 state->speed = SPEED_10;
562 break;
563 case 1:
564 state->speed = SPEED_100;
565 break;
566 case 2:
567 regmap_read(ss->regmap_sgmii[id], ss->ana_rgc3, &val);
568 state->speed = (FIELD_GET(RG_PHY_SPEED_3_125G, val)) ? SPEED_2500 : SPEED_1000;
569 break;
570 }
571 }
572
573 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
574 if (pmsr & MAC_MSR_RX_FC)
575 state->pause |= MLO_PAUSE_RX;
576 if (pmsr & MAC_MSR_TX_FC)
577 state->pause |= MLO_PAUSE_TX;
578 }
developerfd40db22021-04-29 10:08:25 +0800579
580 return 1;
581}
582
583static void mtk_mac_an_restart(struct phylink_config *config)
584{
585 struct mtk_mac *mac = container_of(config, struct mtk_mac,
586 phylink_config);
587
developer089e8852022-09-28 14:43:46 +0800588 if (mac->type != MTK_XGDM_TYPE)
589 mtk_sgmii_restart_an(mac->hw, mac->id);
developerfd40db22021-04-29 10:08:25 +0800590}
591
592static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
593 phy_interface_t interface)
594{
595 struct mtk_mac *mac = container_of(config, struct mtk_mac,
596 phylink_config);
developer089e8852022-09-28 14:43:46 +0800597 u32 mcr;
598
599 if (mac->type == MTK_GDM_TYPE) {
600 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
601 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
602 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
603 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
604 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
developerfd40db22021-04-29 10:08:25 +0800605
developer089e8852022-09-28 14:43:46 +0800606 mcr &= 0xfffffff0;
607 mcr |= XMAC_MCR_TRX_DISABLE;
608 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
609 }
developerfd40db22021-04-29 10:08:25 +0800610}
611
612static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
613 phy_interface_t interface,
614 struct phy_device *phy)
615{
616 struct mtk_mac *mac = container_of(config, struct mtk_mac,
617 phylink_config);
developer089e8852022-09-28 14:43:46 +0800618 u32 mcr, mcr_cur;
619
620 if (mac->type == MTK_GDM_TYPE) {
621 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
622 mcr = mcr_cur;
623 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
624 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
625 MAC_MCR_FORCE_RX_FC);
626 mcr |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
627 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
628
629 /* Configure speed */
630 switch (speed) {
631 case SPEED_2500:
632 case SPEED_1000:
633 mcr |= MAC_MCR_SPEED_1000;
634 break;
635 case SPEED_100:
636 mcr |= MAC_MCR_SPEED_100;
637 break;
638 }
639
640 /* Configure duplex */
641 if (duplex == DUPLEX_FULL)
642 mcr |= MAC_MCR_FORCE_DPX;
643
644 /* Configure pause modes -
645 * phylink will avoid these for half duplex
646 */
647 if (tx_pause)
648 mcr |= MAC_MCR_FORCE_TX_FC;
649 if (rx_pause)
650 mcr |= MAC_MCR_FORCE_RX_FC;
651
652 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
653
654 /* Only update control register when needed! */
655 if (mcr != mcr_cur)
656 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
657 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
658 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
659
660 mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC);
661 /* Configure pause modes -
662 * phylink will avoid these for half duplex
663 */
664 if (tx_pause)
665 mcr |= XMAC_MCR_FORCE_TX_FC;
666 if (rx_pause)
667 mcr |= XMAC_MCR_FORCE_RX_FC;
developerfd40db22021-04-29 10:08:25 +0800668
developer089e8852022-09-28 14:43:46 +0800669 mcr &= ~(XMAC_MCR_TRX_DISABLE);
670 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
671 }
developerfd40db22021-04-29 10:08:25 +0800672}
673
674static void mtk_validate(struct phylink_config *config,
675 unsigned long *supported,
676 struct phylink_link_state *state)
677{
678 struct mtk_mac *mac = container_of(config, struct mtk_mac,
679 phylink_config);
680 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
681
682 if (state->interface != PHY_INTERFACE_MODE_NA &&
683 state->interface != PHY_INTERFACE_MODE_MII &&
684 state->interface != PHY_INTERFACE_MODE_GMII &&
685 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
686 phy_interface_mode_is_rgmii(state->interface)) &&
687 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
688 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
689 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
690 (state->interface == PHY_INTERFACE_MODE_SGMII ||
developer089e8852022-09-28 14:43:46 +0800691 phy_interface_mode_is_8023z(state->interface))) &&
developer30e13e72022-11-03 10:21:24 +0800692 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_XGMII) &&
693 (state->interface == PHY_INTERFACE_MODE_XGMII)) &&
developer089e8852022-09-28 14:43:46 +0800694 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
695 (state->interface == PHY_INTERFACE_MODE_USXGMII)) &&
696 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
697 (state->interface == PHY_INTERFACE_MODE_10GKR))) {
developerfd40db22021-04-29 10:08:25 +0800698 linkmode_zero(supported);
699 return;
700 }
701
702 phylink_set_port_modes(mask);
703 phylink_set(mask, Autoneg);
704
705 switch (state->interface) {
developer089e8852022-09-28 14:43:46 +0800706 case PHY_INTERFACE_MODE_USXGMII:
707 case PHY_INTERFACE_MODE_10GKR:
708 phylink_set(mask, 10000baseKR_Full);
709 phylink_set(mask, 10000baseT_Full);
710 phylink_set(mask, 10000baseCR_Full);
711 phylink_set(mask, 10000baseSR_Full);
712 phylink_set(mask, 10000baseLR_Full);
713 phylink_set(mask, 10000baseLRM_Full);
714 phylink_set(mask, 10000baseER_Full);
715 phylink_set(mask, 100baseT_Half);
716 phylink_set(mask, 100baseT_Full);
717 phylink_set(mask, 1000baseT_Half);
718 phylink_set(mask, 1000baseT_Full);
719 phylink_set(mask, 1000baseX_Full);
developerb88cdb02022-10-12 18:10:03 +0800720 phylink_set(mask, 2500baseT_Full);
721 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +0800722 break;
developerfd40db22021-04-29 10:08:25 +0800723 case PHY_INTERFACE_MODE_TRGMII:
724 phylink_set(mask, 1000baseT_Full);
725 break;
developer30e13e72022-11-03 10:21:24 +0800726 case PHY_INTERFACE_MODE_XGMII:
727 /* fall through */
developerfd40db22021-04-29 10:08:25 +0800728 case PHY_INTERFACE_MODE_1000BASEX:
developerfd40db22021-04-29 10:08:25 +0800729 phylink_set(mask, 1000baseX_Full);
developer089e8852022-09-28 14:43:46 +0800730 /* fall through; */
731 case PHY_INTERFACE_MODE_2500BASEX:
developerfd40db22021-04-29 10:08:25 +0800732 phylink_set(mask, 2500baseX_Full);
developer2fbee452022-08-12 13:58:20 +0800733 phylink_set(mask, 2500baseT_Full);
734 /* fall through; */
developerfd40db22021-04-29 10:08:25 +0800735 case PHY_INTERFACE_MODE_GMII:
736 case PHY_INTERFACE_MODE_RGMII:
737 case PHY_INTERFACE_MODE_RGMII_ID:
738 case PHY_INTERFACE_MODE_RGMII_RXID:
739 case PHY_INTERFACE_MODE_RGMII_TXID:
740 phylink_set(mask, 1000baseT_Half);
741 /* fall through */
742 case PHY_INTERFACE_MODE_SGMII:
743 phylink_set(mask, 1000baseT_Full);
744 phylink_set(mask, 1000baseX_Full);
745 /* fall through */
746 case PHY_INTERFACE_MODE_MII:
747 case PHY_INTERFACE_MODE_RMII:
748 case PHY_INTERFACE_MODE_REVMII:
749 case PHY_INTERFACE_MODE_NA:
750 default:
751 phylink_set(mask, 10baseT_Half);
752 phylink_set(mask, 10baseT_Full);
753 phylink_set(mask, 100baseT_Half);
754 phylink_set(mask, 100baseT_Full);
755 break;
756 }
757
758 if (state->interface == PHY_INTERFACE_MODE_NA) {
developer089e8852022-09-28 14:43:46 +0800759
760 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII)) {
761 phylink_set(mask, 10000baseKR_Full);
762 phylink_set(mask, 10000baseSR_Full);
763 phylink_set(mask, 10000baseLR_Full);
764 phylink_set(mask, 10000baseLRM_Full);
765 phylink_set(mask, 10000baseER_Full);
766 phylink_set(mask, 1000baseKX_Full);
767 phylink_set(mask, 1000baseT_Full);
768 phylink_set(mask, 1000baseX_Full);
769 phylink_set(mask, 2500baseX_Full);
770 }
developerfd40db22021-04-29 10:08:25 +0800771 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
772 phylink_set(mask, 1000baseT_Full);
773 phylink_set(mask, 1000baseX_Full);
774 phylink_set(mask, 2500baseX_Full);
775 }
776 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
777 phylink_set(mask, 1000baseT_Full);
778 phylink_set(mask, 1000baseT_Half);
779 phylink_set(mask, 1000baseX_Full);
780 }
781 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
782 phylink_set(mask, 1000baseT_Full);
783 phylink_set(mask, 1000baseT_Half);
784 }
785 }
786
developer30e13e72022-11-03 10:21:24 +0800787 if (mac->type == MTK_XGDM_TYPE) {
788 phylink_clear(mask, 10baseT_Half);
789 phylink_clear(mask, 100baseT_Half);
790 phylink_clear(mask, 1000baseT_Half);
791 }
792
developerfd40db22021-04-29 10:08:25 +0800793 phylink_set(mask, Pause);
794 phylink_set(mask, Asym_Pause);
795
796 linkmode_and(supported, supported, mask);
797 linkmode_and(state->advertising, state->advertising, mask);
798
799 /* We can only operate at 2500BaseX or 1000BaseX. If requested
800 * to advertise both, only report advertising at 2500BaseX.
801 */
802 phylink_helper_basex_speed(state);
803}
804
805static const struct phylink_mac_ops mtk_phylink_ops = {
806 .validate = mtk_validate,
developer089e8852022-09-28 14:43:46 +0800807 .mac_link_state = mtk_mac_pcs_get_state,
developerfd40db22021-04-29 10:08:25 +0800808 .mac_an_restart = mtk_mac_an_restart,
809 .mac_config = mtk_mac_config,
810 .mac_link_down = mtk_mac_link_down,
811 .mac_link_up = mtk_mac_link_up,
812};
813
814static int mtk_mdio_init(struct mtk_eth *eth)
815{
816 struct device_node *mii_np;
817 int ret;
818
819 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
820 if (!mii_np) {
821 dev_err(eth->dev, "no %s child node found", "mdio-bus");
822 return -ENODEV;
823 }
824
825 if (!of_device_is_available(mii_np)) {
826 ret = -ENODEV;
827 goto err_put_node;
828 }
829
830 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
831 if (!eth->mii_bus) {
832 ret = -ENOMEM;
833 goto err_put_node;
834 }
835
836 eth->mii_bus->name = "mdio";
837 eth->mii_bus->read = mtk_mdio_read;
838 eth->mii_bus->write = mtk_mdio_write;
developerabeadd52022-08-15 11:26:44 +0800839 eth->mii_bus->reset = mtk_mdio_reset;
developerfd40db22021-04-29 10:08:25 +0800840 eth->mii_bus->priv = eth;
841 eth->mii_bus->parent = eth->dev;
842
developer6fd46562021-10-14 15:04:34 +0800843 if(snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
developerfb556ca2021-10-13 10:52:09 +0800844 ret = -ENOMEM;
845 goto err_put_node;
846 }
developerfd40db22021-04-29 10:08:25 +0800847 ret = of_mdiobus_register(eth->mii_bus, mii_np);
848
849err_put_node:
850 of_node_put(mii_np);
851 return ret;
852}
853
854static void mtk_mdio_cleanup(struct mtk_eth *eth)
855{
856 if (!eth->mii_bus)
857 return;
858
859 mdiobus_unregister(eth->mii_bus);
860}
861
862static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
863{
864 unsigned long flags;
865 u32 val;
866
867 spin_lock_irqsave(&eth->tx_irq_lock, flags);
868 val = mtk_r32(eth, eth->tx_int_mask_reg);
869 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
870 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
871}
872
873static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
874{
875 unsigned long flags;
876 u32 val;
877
878 spin_lock_irqsave(&eth->tx_irq_lock, flags);
879 val = mtk_r32(eth, eth->tx_int_mask_reg);
880 mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
881 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
882}
883
884static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
885{
886 unsigned long flags;
887 u32 val;
888
889 spin_lock_irqsave(&eth->rx_irq_lock, flags);
890 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
891 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
892 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
893}
894
895static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
896{
897 unsigned long flags;
898 u32 val;
899
900 spin_lock_irqsave(&eth->rx_irq_lock, flags);
901 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
902 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
903 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
904}
905
906static int mtk_set_mac_address(struct net_device *dev, void *p)
907{
908 int ret = eth_mac_addr(dev, p);
909 struct mtk_mac *mac = netdev_priv(dev);
910 struct mtk_eth *eth = mac->hw;
911 const char *macaddr = dev->dev_addr;
912
913 if (ret)
914 return ret;
915
916 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
917 return -EBUSY;
918
919 spin_lock_bh(&mac->hw->page_lock);
920 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
921 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
922 MT7628_SDM_MAC_ADRH);
923 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
924 (macaddr[4] << 8) | macaddr[5],
925 MT7628_SDM_MAC_ADRL);
926 } else {
927 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
928 MTK_GDMA_MAC_ADRH(mac->id));
929 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
930 (macaddr[4] << 8) | macaddr[5],
931 MTK_GDMA_MAC_ADRL(mac->id));
932 }
933 spin_unlock_bh(&mac->hw->page_lock);
934
935 return 0;
936}
937
938void mtk_stats_update_mac(struct mtk_mac *mac)
939{
developer089e8852022-09-28 14:43:46 +0800940 struct mtk_eth *eth = mac->hw;
developerfd40db22021-04-29 10:08:25 +0800941 struct mtk_hw_stats *hw_stats = mac->hw_stats;
942 unsigned int base = MTK_GDM1_TX_GBCNT;
943 u64 stats;
944
945 base += hw_stats->reg_offset;
946
947 u64_stats_update_begin(&hw_stats->syncp);
948
949 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
950 stats = mtk_r32(mac->hw, base + 0x04);
951 if (stats)
952 hw_stats->rx_bytes += (stats << 32);
953 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
954 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
955 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
956 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
957 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
958 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
959 hw_stats->rx_flow_control_packets +=
960 mtk_r32(mac->hw, base + 0x24);
developer089e8852022-09-28 14:43:46 +0800961
962 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
963 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x50);
964 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x54);
965 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x40);
966 stats = mtk_r32(mac->hw, base + 0x44);
967 if (stats)
968 hw_stats->tx_bytes += (stats << 32);
969 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x48);
970 u64_stats_update_end(&hw_stats->syncp);
971 } else {
972 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
973 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
974 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
975 stats = mtk_r32(mac->hw, base + 0x34);
976 if (stats)
977 hw_stats->tx_bytes += (stats << 32);
978 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
979 u64_stats_update_end(&hw_stats->syncp);
980 }
developerfd40db22021-04-29 10:08:25 +0800981}
982
983static void mtk_stats_update(struct mtk_eth *eth)
984{
985 int i;
986
987 for (i = 0; i < MTK_MAC_COUNT; i++) {
988 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
989 continue;
990 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
991 mtk_stats_update_mac(eth->mac[i]);
992 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
993 }
994 }
995}
996
997static void mtk_get_stats64(struct net_device *dev,
998 struct rtnl_link_stats64 *storage)
999{
1000 struct mtk_mac *mac = netdev_priv(dev);
1001 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1002 unsigned int start;
1003
1004 if (netif_running(dev) && netif_device_present(dev)) {
1005 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1006 mtk_stats_update_mac(mac);
1007 spin_unlock_bh(&hw_stats->stats_lock);
1008 }
1009 }
1010
1011 do {
1012 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
1013 storage->rx_packets = hw_stats->rx_packets;
1014 storage->tx_packets = hw_stats->tx_packets;
1015 storage->rx_bytes = hw_stats->rx_bytes;
1016 storage->tx_bytes = hw_stats->tx_bytes;
1017 storage->collisions = hw_stats->tx_collisions;
1018 storage->rx_length_errors = hw_stats->rx_short_errors +
1019 hw_stats->rx_long_errors;
1020 storage->rx_over_errors = hw_stats->rx_overflow;
1021 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1022 storage->rx_errors = hw_stats->rx_checksum_errors;
1023 storage->tx_aborted_errors = hw_stats->tx_skip;
1024 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
1025
1026 storage->tx_errors = dev->stats.tx_errors;
1027 storage->rx_dropped = dev->stats.rx_dropped;
1028 storage->tx_dropped = dev->stats.tx_dropped;
1029}
1030
1031static inline int mtk_max_frag_size(int mtu)
1032{
1033 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1034 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
1035 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
1036
1037 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1038 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1039}
1040
1041static inline int mtk_max_buf_size(int frag_size)
1042{
1043 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1044 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1045
1046 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
1047
1048 return buf_size;
1049}
1050
developere9356982022-07-04 09:03:20 +08001051static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1052 struct mtk_rx_dma_v2 *dma_rxd)
developerfd40db22021-04-29 10:08:25 +08001053{
developerfd40db22021-04-29 10:08:25 +08001054 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +08001055 if (!(rxd->rxd2 & RX_DMA_DONE))
1056 return false;
1057
1058 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +08001059 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1060 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developere9356982022-07-04 09:03:20 +08001061
developer089e8852022-09-28 14:43:46 +08001062 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1063 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001064 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1065 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
developer006325c2022-10-06 16:39:50 +08001066 rxd->rxd7 = READ_ONCE(dma_rxd->rxd7);
developere9356982022-07-04 09:03:20 +08001067 }
1068
developerc4671b22021-05-28 13:16:42 +08001069 return true;
developerfd40db22021-04-29 10:08:25 +08001070}
1071
1072/* the qdma core needs scratch memory to be setup */
1073static int mtk_init_fq_dma(struct mtk_eth *eth)
1074{
developere9356982022-07-04 09:03:20 +08001075 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001076 dma_addr_t phy_ring_tail;
1077 int cnt = MTK_DMA_SIZE;
1078 dma_addr_t dma_addr;
1079 int i;
1080
1081 if (!eth->soc->has_sram) {
1082 eth->scratch_ring = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001083 cnt * soc->txrx.txd_size,
developerfd40db22021-04-29 10:08:25 +08001084 &eth->phy_scratch_ring,
developere9356982022-07-04 09:03:20 +08001085 GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001086 } else {
developer089e8852022-09-28 14:43:46 +08001087 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1088 eth->scratch_ring = eth->sram_base;
1089 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1090 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
developerfd40db22021-04-29 10:08:25 +08001091 }
1092
1093 if (unlikely(!eth->scratch_ring))
1094 return -ENOMEM;
1095
developere9356982022-07-04 09:03:20 +08001096 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001097 if (unlikely(!eth->scratch_head))
1098 return -ENOMEM;
1099
1100 dma_addr = dma_map_single(eth->dev,
1101 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1102 DMA_FROM_DEVICE);
1103 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1104 return -ENOMEM;
1105
developere9356982022-07-04 09:03:20 +08001106 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
developerfd40db22021-04-29 10:08:25 +08001107
1108 for (i = 0; i < cnt; i++) {
developere9356982022-07-04 09:03:20 +08001109 struct mtk_tx_dma_v2 *txd;
1110
1111 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1112 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerfd40db22021-04-29 10:08:25 +08001113 if (i < cnt - 1)
developere9356982022-07-04 09:03:20 +08001114 txd->txd2 = eth->phy_scratch_ring +
1115 (i + 1) * soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001116
developere9356982022-07-04 09:03:20 +08001117 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1118 txd->txd4 = 0;
1119
developer089e8852022-09-28 14:43:46 +08001120 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1121 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001122 txd->txd5 = 0;
1123 txd->txd6 = 0;
1124 txd->txd7 = 0;
1125 txd->txd8 = 0;
developerfd40db22021-04-29 10:08:25 +08001126 }
developerfd40db22021-04-29 10:08:25 +08001127 }
1128
1129 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
1130 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
1131 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
1132 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
1133
1134 return 0;
1135}
1136
1137static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1138{
developere9356982022-07-04 09:03:20 +08001139 return ring->dma + (desc - ring->phys);
developerfd40db22021-04-29 10:08:25 +08001140}
1141
1142static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001143 void *txd, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001144{
developere9356982022-07-04 09:03:20 +08001145 int idx = (txd - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001146
1147 return &ring->buf[idx];
1148}
1149
1150static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001151 void *dma)
developerfd40db22021-04-29 10:08:25 +08001152{
1153 return ring->dma_pdma - ring->dma + dma;
1154}
1155
developere9356982022-07-04 09:03:20 +08001156static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001157{
developere9356982022-07-04 09:03:20 +08001158 return (dma - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001159}
1160
developerc4671b22021-05-28 13:16:42 +08001161static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1162 bool napi)
developerfd40db22021-04-29 10:08:25 +08001163{
1164 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1165 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1166 dma_unmap_single(eth->dev,
1167 dma_unmap_addr(tx_buf, dma_addr0),
1168 dma_unmap_len(tx_buf, dma_len0),
1169 DMA_TO_DEVICE);
1170 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1171 dma_unmap_page(eth->dev,
1172 dma_unmap_addr(tx_buf, dma_addr0),
1173 dma_unmap_len(tx_buf, dma_len0),
1174 DMA_TO_DEVICE);
1175 }
1176 } else {
1177 if (dma_unmap_len(tx_buf, dma_len0)) {
1178 dma_unmap_page(eth->dev,
1179 dma_unmap_addr(tx_buf, dma_addr0),
1180 dma_unmap_len(tx_buf, dma_len0),
1181 DMA_TO_DEVICE);
1182 }
1183
1184 if (dma_unmap_len(tx_buf, dma_len1)) {
1185 dma_unmap_page(eth->dev,
1186 dma_unmap_addr(tx_buf, dma_addr1),
1187 dma_unmap_len(tx_buf, dma_len1),
1188 DMA_TO_DEVICE);
1189 }
1190 }
1191
1192 tx_buf->flags = 0;
1193 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +08001194 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
1195 if (napi)
1196 napi_consume_skb(tx_buf->skb, napi);
1197 else
1198 dev_kfree_skb_any(tx_buf->skb);
1199 }
developerfd40db22021-04-29 10:08:25 +08001200 tx_buf->skb = NULL;
1201}
1202
1203static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1204 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1205 size_t size, int idx)
1206{
1207 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1208 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1209 dma_unmap_len_set(tx_buf, dma_len0, size);
1210 } else {
1211 if (idx & 1) {
1212 txd->txd3 = mapped_addr;
1213 txd->txd2 |= TX_DMA_PLEN1(size);
1214 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1215 dma_unmap_len_set(tx_buf, dma_len1, size);
1216 } else {
1217 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1218 txd->txd1 = mapped_addr;
1219 txd->txd2 = TX_DMA_PLEN0(size);
1220 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1221 dma_unmap_len_set(tx_buf, dma_len0, size);
1222 }
1223 }
1224}
1225
developere9356982022-07-04 09:03:20 +08001226static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
1227 struct mtk_tx_dma_desc_info *info)
1228{
1229 struct mtk_mac *mac = netdev_priv(dev);
1230 struct mtk_eth *eth = mac->hw;
1231 struct mtk_tx_dma *desc = txd;
1232 u32 data;
1233
1234 WRITE_ONCE(desc->txd1, info->addr);
1235
1236 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1237 if (info->last)
1238 data |= TX_DMA_LS0;
1239 WRITE_ONCE(desc->txd3, data);
1240
1241 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1242 data |= QID_HIGH_BITS(info->qid);
1243 if (info->first) {
1244 if (info->gso)
1245 data |= TX_DMA_TSO;
1246 /* tx checksum offload */
1247 if (info->csum)
1248 data |= TX_DMA_CHKSUM;
1249 /* vlan header offload */
1250 if (info->vlan)
1251 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1252 }
1253
1254#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1255 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1256 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1257 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1258 }
1259
1260 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1261 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1262#endif
1263 WRITE_ONCE(desc->txd4, data);
1264}
1265
1266static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1267 struct mtk_tx_dma_desc_info *info)
1268{
1269 struct mtk_mac *mac = netdev_priv(dev);
1270 struct mtk_eth *eth = mac->hw;
1271 struct mtk_tx_dma_v2 *desc = txd;
developerce08bca2022-10-06 16:21:13 +08001272 u32 data = 0;
1273
1274 if (!info->qid && mac->id)
1275 info->qid = MTK_QDMA_GMAC2_QID;
1276
1277 WRITE_ONCE(desc->txd1, info->addr);
1278
1279 data = TX_DMA_PLEN0(info->size);
1280 if (info->last)
1281 data |= TX_DMA_LS0;
1282 WRITE_ONCE(desc->txd3, data);
1283
1284 data = ((mac->id == MTK_GMAC3_ID) ?
1285 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1286 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1287#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1288 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1289 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1290 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1291 }
1292
1293 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1294 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1295#endif
1296 WRITE_ONCE(desc->txd4, data);
1297
1298 data = 0;
1299 if (info->first) {
1300 if (info->gso)
1301 data |= TX_DMA_TSO_V2;
1302 /* tx checksum offload */
1303 if (info->csum)
1304 data |= TX_DMA_CHKSUM_V2;
1305 }
1306 WRITE_ONCE(desc->txd5, data);
1307
1308 data = 0;
1309 if (info->first && info->vlan)
1310 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1311 WRITE_ONCE(desc->txd6, data);
1312
1313 WRITE_ONCE(desc->txd7, 0);
1314 WRITE_ONCE(desc->txd8, 0);
1315}
1316
1317static void mtk_tx_set_dma_desc_v3(struct sk_buff *skb, struct net_device *dev, void *txd,
1318 struct mtk_tx_dma_desc_info *info)
1319{
1320 struct mtk_mac *mac = netdev_priv(dev);
1321 struct mtk_eth *eth = mac->hw;
1322 struct mtk_tx_dma_v2 *desc = txd;
developer089e8852022-09-28 14:43:46 +08001323 u64 addr64 = 0;
developere9356982022-07-04 09:03:20 +08001324 u32 data = 0;
developere9356982022-07-04 09:03:20 +08001325
developerce08bca2022-10-06 16:21:13 +08001326 if (!info->qid && mac->id)
developerb9463012022-09-14 10:28:45 +08001327 info->qid = MTK_QDMA_GMAC2_QID;
developere9356982022-07-04 09:03:20 +08001328
developer089e8852022-09-28 14:43:46 +08001329 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1330 TX_DMA_SDP1(info->addr) : 0;
1331
developere9356982022-07-04 09:03:20 +08001332 WRITE_ONCE(desc->txd1, info->addr);
1333
1334 data = TX_DMA_PLEN0(info->size);
1335 if (info->last)
1336 data |= TX_DMA_LS0;
developer089e8852022-09-28 14:43:46 +08001337 WRITE_ONCE(desc->txd3, data | addr64);
developere9356982022-07-04 09:03:20 +08001338
developer089e8852022-09-28 14:43:46 +08001339 data = ((mac->id == MTK_GMAC3_ID) ?
1340 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
developerb9463012022-09-14 10:28:45 +08001341 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
developere9356982022-07-04 09:03:20 +08001342#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1343 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1344 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1345 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1346 }
1347
1348 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1349 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1350#endif
1351 WRITE_ONCE(desc->txd4, data);
1352
1353 data = 0;
1354 if (info->first) {
1355 if (info->gso)
1356 data |= TX_DMA_TSO_V2;
1357 /* tx checksum offload */
1358 if (info->csum)
1359 data |= TX_DMA_CHKSUM_V2;
developerce08bca2022-10-06 16:21:13 +08001360
1361 if (netdev_uses_dsa(dev))
1362 data |= TX_DMA_SPTAG_V3;
developere9356982022-07-04 09:03:20 +08001363 }
1364 WRITE_ONCE(desc->txd5, data);
1365
1366 data = 0;
1367 if (info->first && info->vlan)
1368 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1369 WRITE_ONCE(desc->txd6, data);
1370
1371 WRITE_ONCE(desc->txd7, 0);
1372 WRITE_ONCE(desc->txd8, 0);
1373}
1374
1375static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
1376 struct mtk_tx_dma_desc_info *info)
1377{
1378 struct mtk_mac *mac = netdev_priv(dev);
1379 struct mtk_eth *eth = mac->hw;
1380
developerce08bca2022-10-06 16:21:13 +08001381 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1382 mtk_tx_set_dma_desc_v3(skb, dev, txd, info);
1383 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developere9356982022-07-04 09:03:20 +08001384 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
1385 else
1386 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
1387}
1388
developerfd40db22021-04-29 10:08:25 +08001389static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1390 int tx_num, struct mtk_tx_ring *ring, bool gso)
1391{
developere9356982022-07-04 09:03:20 +08001392 struct mtk_tx_dma_desc_info txd_info = {
1393 .size = skb_headlen(skb),
1394 .qid = skb->mark & MTK_QDMA_TX_MASK,
1395 .gso = gso,
1396 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1397 .vlan = skb_vlan_tag_present(skb),
1398 .vlan_tci = skb_vlan_tag_get(skb),
1399 .first = true,
1400 .last = !skb_is_nonlinear(skb),
1401 };
developerfd40db22021-04-29 10:08:25 +08001402 struct mtk_mac *mac = netdev_priv(dev);
1403 struct mtk_eth *eth = mac->hw;
developere9356982022-07-04 09:03:20 +08001404 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001405 struct mtk_tx_dma *itxd, *txd;
1406 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1407 struct mtk_tx_buf *itx_buf, *tx_buf;
developerfd40db22021-04-29 10:08:25 +08001408 int i, n_desc = 1;
developerfd40db22021-04-29 10:08:25 +08001409 int k = 0;
1410
1411 itxd = ring->next_free;
1412 itxd_pdma = qdma_to_pdma(ring, itxd);
1413 if (itxd == ring->last_free)
1414 return -ENOMEM;
1415
developere9356982022-07-04 09:03:20 +08001416 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001417 memset(itx_buf, 0, sizeof(*itx_buf));
1418
developere9356982022-07-04 09:03:20 +08001419 txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
1420 DMA_TO_DEVICE);
1421 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
developerfd40db22021-04-29 10:08:25 +08001422 return -ENOMEM;
1423
developere9356982022-07-04 09:03:20 +08001424 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
1425
developerfd40db22021-04-29 10:08:25 +08001426 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
developer089e8852022-09-28 14:43:46 +08001427 itx_buf->flags |= (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1428 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1429 MTK_TX_FLAGS_FPORT2;
developere9356982022-07-04 09:03:20 +08001430 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerfd40db22021-04-29 10:08:25 +08001431 k++);
1432
developerfd40db22021-04-29 10:08:25 +08001433 /* TX SG offload */
1434 txd = itxd;
1435 txd_pdma = qdma_to_pdma(ring, txd);
1436
developere9356982022-07-04 09:03:20 +08001437 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerfd40db22021-04-29 10:08:25 +08001438 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1439 unsigned int offset = 0;
1440 int frag_size = skb_frag_size(frag);
1441
1442 while (frag_size) {
developerfd40db22021-04-29 10:08:25 +08001443 bool new_desc = true;
1444
developere9356982022-07-04 09:03:20 +08001445 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerfd40db22021-04-29 10:08:25 +08001446 (i & 0x1)) {
1447 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1448 txd_pdma = qdma_to_pdma(ring, txd);
1449 if (txd == ring->last_free)
1450 goto err_dma;
1451
1452 n_desc++;
1453 } else {
1454 new_desc = false;
1455 }
1456
developere9356982022-07-04 09:03:20 +08001457 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1458 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1459 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1460 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1461 !(frag_size - txd_info.size);
1462 txd_info.addr = skb_frag_dma_map(eth->dev, frag,
1463 offset, txd_info.size,
1464 DMA_TO_DEVICE);
1465 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
1466 goto err_dma;
developerfd40db22021-04-29 10:08:25 +08001467
developere9356982022-07-04 09:03:20 +08001468 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerfd40db22021-04-29 10:08:25 +08001469
developere9356982022-07-04 09:03:20 +08001470 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001471 if (new_desc)
1472 memset(tx_buf, 0, sizeof(*tx_buf));
1473 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1474 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
developer089e8852022-09-28 14:43:46 +08001475 tx_buf->flags |=
1476 (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1477 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1478 MTK_TX_FLAGS_FPORT2;
developerfd40db22021-04-29 10:08:25 +08001479
developere9356982022-07-04 09:03:20 +08001480 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1481 txd_info.size, k++);
developerfd40db22021-04-29 10:08:25 +08001482
developere9356982022-07-04 09:03:20 +08001483 frag_size -= txd_info.size;
1484 offset += txd_info.size;
developerfd40db22021-04-29 10:08:25 +08001485 }
1486 }
1487
1488 /* store skb to cleanup */
1489 itx_buf->skb = skb;
1490
developere9356982022-07-04 09:03:20 +08001491 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001492 if (k & 0x1)
1493 txd_pdma->txd2 |= TX_DMA_LS0;
1494 else
1495 txd_pdma->txd2 |= TX_DMA_LS1;
1496 }
1497
1498 netdev_sent_queue(dev, skb->len);
1499 skb_tx_timestamp(skb);
1500
1501 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1502 atomic_sub(n_desc, &ring->free_count);
1503
1504 /* make sure that all changes to the dma ring are flushed before we
1505 * continue
1506 */
1507 wmb();
1508
developere9356982022-07-04 09:03:20 +08001509 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001510 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1511 !netdev_xmit_more())
1512 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1513 } else {
developere9356982022-07-04 09:03:20 +08001514 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
developerfd40db22021-04-29 10:08:25 +08001515 ring->dma_size);
1516 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1517 }
1518
1519 return 0;
1520
1521err_dma:
1522 do {
developere9356982022-07-04 09:03:20 +08001523 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001524
1525 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08001526 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08001527
1528 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
developere9356982022-07-04 09:03:20 +08001529 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
developerfd40db22021-04-29 10:08:25 +08001530 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1531
1532 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1533 itxd_pdma = qdma_to_pdma(ring, itxd);
1534 } while (itxd != txd);
1535
1536 return -ENOMEM;
1537}
1538
1539static inline int mtk_cal_txd_req(struct sk_buff *skb)
1540{
1541 int i, nfrags;
1542 skb_frag_t *frag;
1543
1544 nfrags = 1;
1545 if (skb_is_gso(skb)) {
1546 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1547 frag = &skb_shinfo(skb)->frags[i];
1548 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1549 MTK_TX_DMA_BUF_LEN);
1550 }
1551 } else {
1552 nfrags += skb_shinfo(skb)->nr_frags;
1553 }
1554
1555 return nfrags;
1556}
1557
1558static int mtk_queue_stopped(struct mtk_eth *eth)
1559{
1560 int i;
1561
1562 for (i = 0; i < MTK_MAC_COUNT; i++) {
1563 if (!eth->netdev[i])
1564 continue;
1565 if (netif_queue_stopped(eth->netdev[i]))
1566 return 1;
1567 }
1568
1569 return 0;
1570}
1571
1572static void mtk_wake_queue(struct mtk_eth *eth)
1573{
1574 int i;
1575
1576 for (i = 0; i < MTK_MAC_COUNT; i++) {
1577 if (!eth->netdev[i])
1578 continue;
1579 netif_wake_queue(eth->netdev[i]);
1580 }
1581}
1582
1583static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1584{
1585 struct mtk_mac *mac = netdev_priv(dev);
1586 struct mtk_eth *eth = mac->hw;
1587 struct mtk_tx_ring *ring = &eth->tx_ring;
1588 struct net_device_stats *stats = &dev->stats;
1589 bool gso = false;
1590 int tx_num;
1591
1592 /* normally we can rely on the stack not calling this more than once,
1593 * however we have 2 queues running on the same ring so we need to lock
1594 * the ring access
1595 */
1596 spin_lock(&eth->page_lock);
1597
1598 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1599 goto drop;
1600
1601 tx_num = mtk_cal_txd_req(skb);
1602 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1603 netif_stop_queue(dev);
1604 netif_err(eth, tx_queued, dev,
1605 "Tx Ring full when queue awake!\n");
1606 spin_unlock(&eth->page_lock);
1607 return NETDEV_TX_BUSY;
1608 }
1609
1610 /* TSO: fill MSS info in tcp checksum field */
1611 if (skb_is_gso(skb)) {
1612 if (skb_cow_head(skb, 0)) {
1613 netif_warn(eth, tx_err, dev,
1614 "GSO expand head fail.\n");
1615 goto drop;
1616 }
1617
1618 if (skb_shinfo(skb)->gso_type &
1619 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1620 gso = true;
1621 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1622 }
1623 }
1624
1625 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1626 goto drop;
1627
1628 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1629 netif_stop_queue(dev);
1630
1631 spin_unlock(&eth->page_lock);
1632
1633 return NETDEV_TX_OK;
1634
1635drop:
1636 spin_unlock(&eth->page_lock);
1637 stats->tx_dropped++;
1638 dev_kfree_skb_any(skb);
1639 return NETDEV_TX_OK;
1640}
1641
1642static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1643{
1644 int i;
1645 struct mtk_rx_ring *ring;
1646 int idx;
1647
developerfd40db22021-04-29 10:08:25 +08001648 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
developere9356982022-07-04 09:03:20 +08001649 struct mtk_rx_dma *rxd;
1650
developer77d03a72021-06-06 00:06:00 +08001651 if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i))
1652 continue;
1653
developerfd40db22021-04-29 10:08:25 +08001654 ring = &eth->rx_ring[i];
1655 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001656 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1657 if (rxd->rxd2 & RX_DMA_DONE) {
developerfd40db22021-04-29 10:08:25 +08001658 ring->calc_idx_update = true;
1659 return ring;
1660 }
1661 }
1662
1663 return NULL;
1664}
1665
developer18f46a82021-07-20 21:08:21 +08001666static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08001667{
developerfd40db22021-04-29 10:08:25 +08001668 int i;
1669
developerfb556ca2021-10-13 10:52:09 +08001670 if (!eth->hwlro)
developerfd40db22021-04-29 10:08:25 +08001671 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerfb556ca2021-10-13 10:52:09 +08001672 else {
developerfd40db22021-04-29 10:08:25 +08001673 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1674 ring = &eth->rx_ring[i];
1675 if (ring->calc_idx_update) {
1676 ring->calc_idx_update = false;
1677 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1678 }
1679 }
1680 }
1681}
1682
1683static int mtk_poll_rx(struct napi_struct *napi, int budget,
1684 struct mtk_eth *eth)
1685{
developer18f46a82021-07-20 21:08:21 +08001686 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1687 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001688 int idx;
1689 struct sk_buff *skb;
developer089e8852022-09-28 14:43:46 +08001690 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08001691 u8 *data, *new_data;
developere9356982022-07-04 09:03:20 +08001692 struct mtk_rx_dma_v2 *rxd, trxd;
developerfd40db22021-04-29 10:08:25 +08001693 int done = 0;
1694
developer18f46a82021-07-20 21:08:21 +08001695 if (unlikely(!ring))
1696 goto rx_done;
1697
developerfd40db22021-04-29 10:08:25 +08001698 while (done < budget) {
developer006325c2022-10-06 16:39:50 +08001699 struct net_device *netdev = NULL;
developerfd40db22021-04-29 10:08:25 +08001700 unsigned int pktlen;
1701 dma_addr_t dma_addr;
developere9356982022-07-04 09:03:20 +08001702 int mac = 0;
developerfd40db22021-04-29 10:08:25 +08001703
developer18f46a82021-07-20 21:08:21 +08001704 if (eth->hwlro)
1705 ring = mtk_get_rx_ring(eth);
1706
developerfd40db22021-04-29 10:08:25 +08001707 if (unlikely(!ring))
1708 goto rx_done;
1709
1710 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001711 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
developerfd40db22021-04-29 10:08:25 +08001712 data = ring->data[idx];
1713
developere9356982022-07-04 09:03:20 +08001714 if (!mtk_rx_get_desc(eth, &trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08001715 break;
1716
1717 /* find out which mac the packet come from. values start at 1 */
1718 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1719 mac = 0;
1720 } else {
developer089e8852022-09-28 14:43:46 +08001721 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1722 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1723 switch (RX_DMA_GET_SPORT_V2(trxd.rxd5)) {
1724 case PSE_GDM1_PORT:
1725 case PSE_GDM2_PORT:
1726 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1727 break;
1728 case PSE_GDM3_PORT:
1729 mac = MTK_GMAC3_ID;
1730 break;
1731 }
1732 } else
developerfd40db22021-04-29 10:08:25 +08001733 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
1734 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1735 }
1736
1737 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1738 !eth->netdev[mac]))
1739 goto release_desc;
1740
1741 netdev = eth->netdev[mac];
1742
1743 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1744 goto release_desc;
1745
1746 /* alloc new buffer */
1747 new_data = napi_alloc_frag(ring->frag_size);
1748 if (unlikely(!new_data)) {
1749 netdev->stats.rx_dropped++;
1750 goto release_desc;
1751 }
1752 dma_addr = dma_map_single(eth->dev,
1753 new_data + NET_SKB_PAD +
1754 eth->ip_align,
1755 ring->buf_size,
1756 DMA_FROM_DEVICE);
1757 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1758 skb_free_frag(new_data);
1759 netdev->stats.rx_dropped++;
1760 goto release_desc;
1761 }
1762
developer089e8852022-09-28 14:43:46 +08001763 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1764 ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
1765
1766 dma_unmap_single(eth->dev,
1767 (u64)(trxd.rxd1 | addr64),
developerc4671b22021-05-28 13:16:42 +08001768 ring->buf_size, DMA_FROM_DEVICE);
1769
developerfd40db22021-04-29 10:08:25 +08001770 /* receive data */
1771 skb = build_skb(data, ring->frag_size);
1772 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08001773 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08001774 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08001775 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08001776 }
1777 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1778
developerfd40db22021-04-29 10:08:25 +08001779 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1780 skb->dev = netdev;
1781 skb_put(skb, pktlen);
1782
developer089e8852022-09-28 14:43:46 +08001783 if ((MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) &&
developerfd40db22021-04-29 10:08:25 +08001784 (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
developer089e8852022-09-28 14:43:46 +08001785 (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) &&
developerfd40db22021-04-29 10:08:25 +08001786 (trxd.rxd3 & eth->rx_dma_l4_valid)))
1787 skb->ip_summed = CHECKSUM_UNNECESSARY;
1788 else
1789 skb_checksum_none_assert(skb);
1790 skb->protocol = eth_type_trans(skb, netdev);
1791
1792 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developer089e8852022-09-28 14:43:46 +08001793 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1794 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer255bba22021-07-27 15:16:33 +08001795 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08001796 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08001797 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08001798 RX_DMA_VID_V2(trxd.rxd4));
1799 } else {
1800 if (trxd.rxd2 & RX_DMA_VTAG)
1801 __vlan_hwaccel_put_tag(skb,
1802 htons(RX_DMA_VPID(trxd.rxd3)),
1803 RX_DMA_VID(trxd.rxd3));
1804 }
1805
1806 /* If netdev is attached to dsa switch, the special
1807 * tag inserted in VLAN field by switch hardware can
1808 * be offload by RX HW VLAN offload. Clears the VLAN
1809 * information from @skb to avoid unexpected 8021d
1810 * handler before packet enter dsa framework.
1811 */
1812 if (netdev_uses_dsa(netdev))
1813 __vlan_hwaccel_clear_tag(skb);
1814 }
1815
1816#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developer089e8852022-09-28 14:43:46 +08001817 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1818 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +08001819 *(u32 *)(skb->head) = trxd.rxd5;
1820 else
developerfd40db22021-04-29 10:08:25 +08001821 *(u32 *)(skb->head) = trxd.rxd4;
1822
1823 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001824 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001825 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1826
1827 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
1828 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
1829 __func__, skb_hnat_reason(skb));
1830 skb->pkt_type = PACKET_HOST;
1831 }
1832
1833 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
1834 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
1835 skb_hnat_reason(skb), skb_hnat_alg(skb));
1836#endif
developer77d03a72021-06-06 00:06:00 +08001837 if (mtk_hwlro_stats_ebl &&
1838 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
1839 hw_lro_stats_update(ring->ring_no, &trxd);
1840 hw_lro_flush_stats_update(ring->ring_no, &trxd);
1841 }
developerfd40db22021-04-29 10:08:25 +08001842
1843 skb_record_rx_queue(skb, 0);
1844 napi_gro_receive(napi, skb);
1845
developerc4671b22021-05-28 13:16:42 +08001846skip_rx:
developerfd40db22021-04-29 10:08:25 +08001847 ring->data[idx] = new_data;
1848 rxd->rxd1 = (unsigned int)dma_addr;
1849
1850release_desc:
developer089e8852022-09-28 14:43:46 +08001851 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1852 RX_DMA_SDP1(dma_addr) : 0;
1853
developerfd40db22021-04-29 10:08:25 +08001854 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1855 rxd->rxd2 = RX_DMA_LSO;
1856 else
developer089e8852022-09-28 14:43:46 +08001857 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08001858
1859 ring->calc_idx = idx;
1860
1861 done++;
1862 }
1863
1864rx_done:
1865 if (done) {
1866 /* make sure that all changes to the dma ring are flushed before
1867 * we continue
1868 */
1869 wmb();
developer18f46a82021-07-20 21:08:21 +08001870 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08001871 }
1872
1873 return done;
1874}
1875
developerfb556ca2021-10-13 10:52:09 +08001876static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001877 unsigned int *done, unsigned int *bytes)
1878{
developere9356982022-07-04 09:03:20 +08001879 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001880 struct mtk_tx_ring *ring = &eth->tx_ring;
1881 struct mtk_tx_dma *desc;
1882 struct sk_buff *skb;
1883 struct mtk_tx_buf *tx_buf;
1884 u32 cpu, dma;
1885
developerc4671b22021-05-28 13:16:42 +08001886 cpu = ring->last_free_ptr;
developerfd40db22021-04-29 10:08:25 +08001887 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1888
1889 desc = mtk_qdma_phys_to_virt(ring, cpu);
1890
1891 while ((cpu != dma) && budget) {
1892 u32 next_cpu = desc->txd2;
1893 int mac = 0;
1894
1895 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1896 break;
1897
1898 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1899
developere9356982022-07-04 09:03:20 +08001900 tx_buf = mtk_desc_to_tx_buf(ring, desc, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001901 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
developer089e8852022-09-28 14:43:46 +08001902 mac = MTK_GMAC2_ID;
1903 else if (tx_buf->flags & MTK_TX_FLAGS_FPORT2)
1904 mac = MTK_GMAC3_ID;
developerfd40db22021-04-29 10:08:25 +08001905
1906 skb = tx_buf->skb;
1907 if (!skb)
1908 break;
1909
1910 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1911 bytes[mac] += skb->len;
1912 done[mac]++;
1913 budget--;
1914 }
developerc4671b22021-05-28 13:16:42 +08001915 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001916
1917 ring->last_free = desc;
1918 atomic_inc(&ring->free_count);
1919
1920 cpu = next_cpu;
1921 }
1922
developerc4671b22021-05-28 13:16:42 +08001923 ring->last_free_ptr = cpu;
developerfd40db22021-04-29 10:08:25 +08001924 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
developerfd40db22021-04-29 10:08:25 +08001925}
1926
developerfb556ca2021-10-13 10:52:09 +08001927static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08001928 unsigned int *done, unsigned int *bytes)
1929{
1930 struct mtk_tx_ring *ring = &eth->tx_ring;
1931 struct mtk_tx_dma *desc;
1932 struct sk_buff *skb;
1933 struct mtk_tx_buf *tx_buf;
1934 u32 cpu, dma;
1935
1936 cpu = ring->cpu_idx;
1937 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1938
1939 while ((cpu != dma) && budget) {
1940 tx_buf = &ring->buf[cpu];
1941 skb = tx_buf->skb;
1942 if (!skb)
1943 break;
1944
1945 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1946 bytes[0] += skb->len;
1947 done[0]++;
1948 budget--;
1949 }
1950
developerc4671b22021-05-28 13:16:42 +08001951 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08001952
developere9356982022-07-04 09:03:20 +08001953 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001954 ring->last_free = desc;
1955 atomic_inc(&ring->free_count);
1956
1957 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1958 }
1959
1960 ring->cpu_idx = cpu;
developerfd40db22021-04-29 10:08:25 +08001961}
1962
1963static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1964{
1965 struct mtk_tx_ring *ring = &eth->tx_ring;
1966 unsigned int done[MTK_MAX_DEVS];
1967 unsigned int bytes[MTK_MAX_DEVS];
1968 int total = 0, i;
1969
1970 memset(done, 0, sizeof(done));
1971 memset(bytes, 0, sizeof(bytes));
1972
1973 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developerfb556ca2021-10-13 10:52:09 +08001974 mtk_poll_tx_qdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001975 else
developerfb556ca2021-10-13 10:52:09 +08001976 mtk_poll_tx_pdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08001977
1978 for (i = 0; i < MTK_MAC_COUNT; i++) {
1979 if (!eth->netdev[i] || !done[i])
1980 continue;
1981 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1982 total += done[i];
1983 }
1984
1985 if (mtk_queue_stopped(eth) &&
1986 (atomic_read(&ring->free_count) > ring->thresh))
1987 mtk_wake_queue(eth);
1988
1989 return total;
1990}
1991
1992static void mtk_handle_status_irq(struct mtk_eth *eth)
1993{
developer8051e042022-04-08 13:26:36 +08001994 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08001995
1996 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1997 mtk_stats_update(eth);
1998 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer8051e042022-04-08 13:26:36 +08001999 MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002000 }
2001}
2002
2003static int mtk_napi_tx(struct napi_struct *napi, int budget)
2004{
2005 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2006 u32 status, mask;
2007 int tx_done = 0;
2008
2009 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2010 mtk_handle_status_irq(eth);
2011 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
2012 tx_done = mtk_poll_tx(eth, budget);
2013
2014 if (unlikely(netif_msg_intr(eth))) {
2015 status = mtk_r32(eth, eth->tx_int_status_reg);
2016 mask = mtk_r32(eth, eth->tx_int_mask_reg);
2017 dev_info(eth->dev,
2018 "done tx %d, intr 0x%08x/0x%x\n",
2019 tx_done, status, mask);
2020 }
2021
2022 if (tx_done == budget)
2023 return budget;
2024
2025 status = mtk_r32(eth, eth->tx_int_status_reg);
2026 if (status & MTK_TX_DONE_INT)
2027 return budget;
2028
developerc4671b22021-05-28 13:16:42 +08002029 if (napi_complete(napi))
2030 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developerfd40db22021-04-29 10:08:25 +08002031
2032 return tx_done;
2033}
2034
2035static int mtk_napi_rx(struct napi_struct *napi, int budget)
2036{
developer18f46a82021-07-20 21:08:21 +08002037 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
2038 struct mtk_eth *eth = rx_napi->eth;
2039 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002040 u32 status, mask;
2041 int rx_done = 0;
2042 int remain_budget = budget;
2043
2044 mtk_handle_status_irq(eth);
2045
2046poll_again:
developer18f46a82021-07-20 21:08:21 +08002047 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), MTK_PDMA_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002048 rx_done = mtk_poll_rx(napi, remain_budget, eth);
2049
2050 if (unlikely(netif_msg_intr(eth))) {
2051 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
2052 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
2053 dev_info(eth->dev,
2054 "done rx %d, intr 0x%08x/0x%x\n",
2055 rx_done, status, mask);
2056 }
2057 if (rx_done == remain_budget)
2058 return budget;
2059
2060 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
developer18f46a82021-07-20 21:08:21 +08002061 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08002062 remain_budget -= rx_done;
2063 goto poll_again;
2064 }
developerc4671b22021-05-28 13:16:42 +08002065
2066 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08002067 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08002068
2069 return rx_done + budget - remain_budget;
2070}
2071
2072static int mtk_tx_alloc(struct mtk_eth *eth)
2073{
developere9356982022-07-04 09:03:20 +08002074 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002075 struct mtk_tx_ring *ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002076 int i, sz = soc->txrx.txd_size;
2077 struct mtk_tx_dma_v2 *txd, *pdma_txd;
developerfd40db22021-04-29 10:08:25 +08002078
2079 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
2080 GFP_KERNEL);
2081 if (!ring->buf)
2082 goto no_tx_mem;
2083
2084 if (!eth->soc->has_sram)
2085 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002086 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002087 else {
developere9356982022-07-04 09:03:20 +08002088 ring->dma = eth->scratch_ring + MTK_DMA_SIZE * sz;
developerfd40db22021-04-29 10:08:25 +08002089 ring->phys = eth->phy_scratch_ring + MTK_DMA_SIZE * sz;
2090 }
2091
2092 if (!ring->dma)
2093 goto no_tx_mem;
2094
2095 for (i = 0; i < MTK_DMA_SIZE; i++) {
2096 int next = (i + 1) % MTK_DMA_SIZE;
2097 u32 next_ptr = ring->phys + next * sz;
2098
developere9356982022-07-04 09:03:20 +08002099 txd = ring->dma + i * sz;
2100 txd->txd2 = next_ptr;
2101 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2102 txd->txd4 = 0;
2103
developer089e8852022-09-28 14:43:46 +08002104 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2105 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08002106 txd->txd5 = 0;
2107 txd->txd6 = 0;
2108 txd->txd7 = 0;
2109 txd->txd8 = 0;
2110 }
developerfd40db22021-04-29 10:08:25 +08002111 }
2112
2113 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2114 * only as the framework. The real HW descriptors are the PDMA
2115 * descriptors in ring->dma_pdma.
2116 */
2117 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2118 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002119 &ring->phys_pdma, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002120 if (!ring->dma_pdma)
2121 goto no_tx_mem;
2122
2123 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +08002124 pdma_txd = ring->dma_pdma + i *sz;
2125
2126 pdma_txd->txd2 = TX_DMA_DESP2_DEF;
2127 pdma_txd->txd4 = 0;
developerfd40db22021-04-29 10:08:25 +08002128 }
2129 }
2130
2131 ring->dma_size = MTK_DMA_SIZE;
2132 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
developere9356982022-07-04 09:03:20 +08002133 ring->next_free = ring->dma;
2134 ring->last_free = (void *)txd;
developerc4671b22021-05-28 13:16:42 +08002135 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08002136 ring->thresh = MAX_SKB_FRAGS;
2137
2138 /* make sure that all changes to the dma ring are flushed before we
2139 * continue
2140 */
2141 wmb();
2142
2143 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2144 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
2145 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
2146 mtk_w32(eth,
2147 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
2148 MTK_QTX_CRX_PTR);
developerc4671b22021-05-28 13:16:42 +08002149 mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
developerfd40db22021-04-29 10:08:25 +08002150 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
2151 MTK_QTX_CFG(0));
2152 } else {
2153 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2154 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
2155 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2156 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
2157 }
2158
2159 return 0;
2160
2161no_tx_mem:
2162 return -ENOMEM;
2163}
2164
2165static void mtk_tx_clean(struct mtk_eth *eth)
2166{
developere9356982022-07-04 09:03:20 +08002167 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002168 struct mtk_tx_ring *ring = &eth->tx_ring;
2169 int i;
2170
2171 if (ring->buf) {
2172 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08002173 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08002174 kfree(ring->buf);
2175 ring->buf = NULL;
2176 }
2177
2178 if (!eth->soc->has_sram && ring->dma) {
2179 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002180 MTK_DMA_SIZE * soc->txrx.txd_size,
2181 ring->dma, ring->phys);
developerfd40db22021-04-29 10:08:25 +08002182 ring->dma = NULL;
2183 }
2184
2185 if (ring->dma_pdma) {
2186 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002187 MTK_DMA_SIZE * soc->txrx.txd_size,
2188 ring->dma_pdma, ring->phys_pdma);
developerfd40db22021-04-29 10:08:25 +08002189 ring->dma_pdma = NULL;
2190 }
2191}
2192
2193static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2194{
2195 struct mtk_rx_ring *ring;
2196 int rx_data_len, rx_dma_size;
2197 int i;
developer089e8852022-09-28 14:43:46 +08002198 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002199
2200 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2201 if (ring_no)
2202 return -EINVAL;
2203 ring = &eth->rx_ring_qdma;
2204 } else {
2205 ring = &eth->rx_ring[ring_no];
2206 }
2207
2208 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2209 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2210 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2211 } else {
2212 rx_data_len = ETH_DATA_LEN;
2213 rx_dma_size = MTK_DMA_SIZE;
2214 }
2215
2216 ring->frag_size = mtk_max_frag_size(rx_data_len);
2217 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2218 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2219 GFP_KERNEL);
2220 if (!ring->data)
2221 return -ENOMEM;
2222
2223 for (i = 0; i < rx_dma_size; i++) {
2224 ring->data[i] = netdev_alloc_frag(ring->frag_size);
2225 if (!ring->data[i])
2226 return -ENOMEM;
2227 }
2228
2229 if ((!eth->soc->has_sram) || (eth->soc->has_sram
2230 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
2231 ring->dma = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002232 rx_dma_size * eth->soc->txrx.rxd_size,
2233 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002234 else {
2235 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002236 ring->dma = tx_ring->dma + MTK_DMA_SIZE *
2237 eth->soc->txrx.rxd_size * (ring_no + 1);
developer18f46a82021-07-20 21:08:21 +08002238 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
developere9356982022-07-04 09:03:20 +08002239 eth->soc->txrx.rxd_size * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08002240 }
2241
2242 if (!ring->dma)
2243 return -ENOMEM;
2244
2245 for (i = 0; i < rx_dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002246 struct mtk_rx_dma_v2 *rxd;
2247
developerfd40db22021-04-29 10:08:25 +08002248 dma_addr_t dma_addr = dma_map_single(eth->dev,
2249 ring->data[i] + NET_SKB_PAD + eth->ip_align,
2250 ring->buf_size,
2251 DMA_FROM_DEVICE);
2252 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
2253 return -ENOMEM;
developere9356982022-07-04 09:03:20 +08002254
2255 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2256 rxd->rxd1 = (unsigned int)dma_addr;
developerfd40db22021-04-29 10:08:25 +08002257
developer089e8852022-09-28 14:43:46 +08002258 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2259 RX_DMA_SDP1(dma_addr) : 0;
2260
developerfd40db22021-04-29 10:08:25 +08002261 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developere9356982022-07-04 09:03:20 +08002262 rxd->rxd2 = RX_DMA_LSO;
developerfd40db22021-04-29 10:08:25 +08002263 else
developer089e8852022-09-28 14:43:46 +08002264 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08002265
developere9356982022-07-04 09:03:20 +08002266 rxd->rxd3 = 0;
2267 rxd->rxd4 = 0;
2268
developer089e8852022-09-28 14:43:46 +08002269 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2270 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08002271 rxd->rxd5 = 0;
2272 rxd->rxd6 = 0;
2273 rxd->rxd7 = 0;
2274 rxd->rxd8 = 0;
developerfd40db22021-04-29 10:08:25 +08002275 }
developerfd40db22021-04-29 10:08:25 +08002276 }
2277 ring->dma_size = rx_dma_size;
2278 ring->calc_idx_update = false;
2279 ring->calc_idx = rx_dma_size - 1;
2280 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
2281 MTK_QRX_CRX_IDX_CFG(ring_no) :
2282 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08002283 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08002284 /* make sure that all changes to the dma ring are flushed before we
2285 * continue
2286 */
2287 wmb();
2288
2289 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2290 mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
2291 mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
2292 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2293 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
2294 } else {
2295 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
2296 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
2297 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2298 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
2299 }
2300
2301 return 0;
2302}
2303
2304static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
2305{
2306 int i;
developer089e8852022-09-28 14:43:46 +08002307 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002308
2309 if (ring->data && ring->dma) {
2310 for (i = 0; i < ring->dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002311 struct mtk_rx_dma *rxd;
2312
developerfd40db22021-04-29 10:08:25 +08002313 if (!ring->data[i])
2314 continue;
developere9356982022-07-04 09:03:20 +08002315
2316 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2317 if (!rxd->rxd1)
developerfd40db22021-04-29 10:08:25 +08002318 continue;
developere9356982022-07-04 09:03:20 +08002319
developer089e8852022-09-28 14:43:46 +08002320 addr64 = (MTK_HAS_CAPS(eth->soc->caps,
2321 MTK_8GB_ADDRESSING)) ?
2322 ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
2323
developerfd40db22021-04-29 10:08:25 +08002324 dma_unmap_single(eth->dev,
developer089e8852022-09-28 14:43:46 +08002325 (u64)(rxd->rxd1 | addr64),
developerfd40db22021-04-29 10:08:25 +08002326 ring->buf_size,
2327 DMA_FROM_DEVICE);
2328 skb_free_frag(ring->data[i]);
2329 }
2330 kfree(ring->data);
2331 ring->data = NULL;
2332 }
2333
2334 if(in_sram)
2335 return;
2336
2337 if (ring->dma) {
2338 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002339 ring->dma_size * eth->soc->txrx.rxd_size,
developerfd40db22021-04-29 10:08:25 +08002340 ring->dma,
2341 ring->phys);
2342 ring->dma = NULL;
2343 }
2344}
2345
2346static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2347{
2348 int i;
developer77d03a72021-06-06 00:06:00 +08002349 u32 val;
developerfd40db22021-04-29 10:08:25 +08002350 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2351 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2352
2353 /* set LRO rings to auto-learn modes */
2354 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2355
2356 /* validate LRO ring */
2357 ring_ctrl_dw2 |= MTK_RING_VLD;
2358
2359 /* set AGE timer (unit: 20us) */
2360 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2361 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2362
2363 /* set max AGG timer (unit: 20us) */
2364 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2365
2366 /* set max LRO AGG count */
2367 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2368 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2369
developer77d03a72021-06-06 00:06:00 +08002370 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002371 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2372 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2373 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2374 }
2375
2376 /* IPv4 checksum update enable */
2377 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2378
2379 /* switch priority comparison to packet count mode */
2380 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2381
2382 /* bandwidth threshold setting */
2383 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2384
2385 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08002386 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08002387
2388 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2389 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2390 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2391
developerfd40db22021-04-29 10:08:25 +08002392 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2393 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2394
developer089e8852022-09-28 14:43:46 +08002395 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2396 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer77d03a72021-06-06 00:06:00 +08002397 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
2398 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
2399 MTK_PDMA_RX_CFG);
2400
2401 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
2402 } else {
2403 /* set HW LRO mode & the max aggregation count for rx packets */
2404 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2405 }
2406
developerfd40db22021-04-29 10:08:25 +08002407 /* enable HW LRO */
2408 lro_ctrl_dw0 |= MTK_LRO_EN;
2409
developer77d03a72021-06-06 00:06:00 +08002410 /* enable cpu reason black list */
2411 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
2412
developerfd40db22021-04-29 10:08:25 +08002413 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2414 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2415
developer77d03a72021-06-06 00:06:00 +08002416 /* no use PPE cpu reason */
2417 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
2418
developerfd40db22021-04-29 10:08:25 +08002419 return 0;
2420}
2421
2422static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2423{
2424 int i;
2425 u32 val;
2426
2427 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08002428 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08002429
2430 /* wait for relinquishments done */
2431 for (i = 0; i < 10; i++) {
2432 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08002433 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer8051e042022-04-08 13:26:36 +08002434 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002435 continue;
2436 }
2437 break;
2438 }
2439
2440 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08002441 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08002442 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2443
2444 /* disable HW LRO */
2445 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2446}
2447
2448static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2449{
2450 u32 reg_val;
2451
developer089e8852022-09-28 14:43:46 +08002452 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2453 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77d03a72021-06-06 00:06:00 +08002454 idx += 1;
2455
developerfd40db22021-04-29 10:08:25 +08002456 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2457
2458 /* invalidate the IP setting */
2459 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2460
2461 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2462
2463 /* validate the IP setting */
2464 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2465}
2466
2467static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2468{
2469 u32 reg_val;
2470
developer089e8852022-09-28 14:43:46 +08002471 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2472 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77d03a72021-06-06 00:06:00 +08002473 idx += 1;
2474
developerfd40db22021-04-29 10:08:25 +08002475 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2476
2477 /* invalidate the IP setting */
2478 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2479
2480 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2481}
2482
2483static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2484{
2485 int cnt = 0;
2486 int i;
2487
2488 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2489 if (mac->hwlro_ip[i])
2490 cnt++;
2491 }
2492
2493 return cnt;
2494}
2495
2496static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2497 struct ethtool_rxnfc *cmd)
2498{
2499 struct ethtool_rx_flow_spec *fsp =
2500 (struct ethtool_rx_flow_spec *)&cmd->fs;
2501 struct mtk_mac *mac = netdev_priv(dev);
2502 struct mtk_eth *eth = mac->hw;
2503 int hwlro_idx;
2504
2505 if ((fsp->flow_type != TCP_V4_FLOW) ||
2506 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2507 (fsp->location > 1))
2508 return -EINVAL;
2509
2510 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2511 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2512
2513 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2514
2515 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2516
2517 return 0;
2518}
2519
2520static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2521 struct ethtool_rxnfc *cmd)
2522{
2523 struct ethtool_rx_flow_spec *fsp =
2524 (struct ethtool_rx_flow_spec *)&cmd->fs;
2525 struct mtk_mac *mac = netdev_priv(dev);
2526 struct mtk_eth *eth = mac->hw;
2527 int hwlro_idx;
2528
2529 if (fsp->location > 1)
2530 return -EINVAL;
2531
2532 mac->hwlro_ip[fsp->location] = 0;
2533 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2534
2535 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2536
2537 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2538
2539 return 0;
2540}
2541
2542static void mtk_hwlro_netdev_disable(struct net_device *dev)
2543{
2544 struct mtk_mac *mac = netdev_priv(dev);
2545 struct mtk_eth *eth = mac->hw;
2546 int i, hwlro_idx;
2547
2548 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2549 mac->hwlro_ip[i] = 0;
2550 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2551
2552 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2553 }
2554
2555 mac->hwlro_ip_cnt = 0;
2556}
2557
2558static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2559 struct ethtool_rxnfc *cmd)
2560{
2561 struct mtk_mac *mac = netdev_priv(dev);
2562 struct ethtool_rx_flow_spec *fsp =
2563 (struct ethtool_rx_flow_spec *)&cmd->fs;
2564
2565 /* only tcp dst ipv4 is meaningful, others are meaningless */
2566 fsp->flow_type = TCP_V4_FLOW;
2567 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2568 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2569
2570 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2571 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2572 fsp->h_u.tcp_ip4_spec.psrc = 0;
2573 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2574 fsp->h_u.tcp_ip4_spec.pdst = 0;
2575 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2576 fsp->h_u.tcp_ip4_spec.tos = 0;
2577 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2578
2579 return 0;
2580}
2581
2582static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2583 struct ethtool_rxnfc *cmd,
2584 u32 *rule_locs)
2585{
2586 struct mtk_mac *mac = netdev_priv(dev);
2587 int cnt = 0;
2588 int i;
2589
2590 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2591 if (mac->hwlro_ip[i]) {
2592 rule_locs[cnt] = i;
2593 cnt++;
2594 }
2595 }
2596
2597 cmd->rule_cnt = cnt;
2598
2599 return 0;
2600}
2601
developer18f46a82021-07-20 21:08:21 +08002602static int mtk_rss_init(struct mtk_eth *eth)
2603{
2604 u32 val;
2605
developer089e8852022-09-28 14:43:46 +08002606 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) {
developer18f46a82021-07-20 21:08:21 +08002607 /* Set RSS rings to PSE modes */
2608 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
2609 val |= MTK_RING_PSE_MODE;
2610 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
2611
2612 /* Enable non-lro multiple rx */
2613 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2614 val |= MTK_NON_LRO_MULTI_EN;
2615 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2616
2617 /* Enable RSS dly int supoort */
2618 val |= MTK_LRO_DLY_INT_EN;
2619 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2620
2621 /* Set RSS delay config int ring1 */
2622 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
2623 }
2624
2625 /* Hash Type */
2626 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2627 val |= MTK_RSS_IPV4_STATIC_HASH;
2628 val |= MTK_RSS_IPV6_STATIC_HASH;
2629 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2630
2631 /* Select the size of indirection table */
2632 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW0);
2633 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW1);
2634 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW2);
2635 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW3);
2636 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW4);
2637 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW5);
2638 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW6);
2639 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW7);
2640
2641 /* Pause */
2642 val |= MTK_RSS_CFG_REQ;
2643 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2644
2645 /* Enable RSS*/
2646 val |= MTK_RSS_EN;
2647 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2648
2649 /* Release pause */
2650 val &= ~(MTK_RSS_CFG_REQ);
2651 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2652
2653 /* Set perRSS GRP INT */
2654 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING1), MTK_PDMA_INT_GRP3);
2655
2656 /* Set GRP INT */
2657 mtk_w32(eth, 0x21021030, MTK_FE_INT_GRP);
2658
developer089e8852022-09-28 14:43:46 +08002659 /* Enable RSS delay interrupt */
2660 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_RSS_DELAY_INT);
2661
developer18f46a82021-07-20 21:08:21 +08002662 return 0;
2663}
2664
2665static void mtk_rss_uninit(struct mtk_eth *eth)
2666{
2667 u32 val;
2668
2669 /* Pause */
2670 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2671 val |= MTK_RSS_CFG_REQ;
2672 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2673
2674 /* Disable RSS*/
2675 val &= ~(MTK_RSS_EN);
2676 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2677
2678 /* Release pause */
2679 val &= ~(MTK_RSS_CFG_REQ);
2680 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2681}
2682
developerfd40db22021-04-29 10:08:25 +08002683static netdev_features_t mtk_fix_features(struct net_device *dev,
2684 netdev_features_t features)
2685{
2686 if (!(features & NETIF_F_LRO)) {
2687 struct mtk_mac *mac = netdev_priv(dev);
2688 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2689
2690 if (ip_cnt) {
2691 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2692
2693 features |= NETIF_F_LRO;
2694 }
2695 }
2696
2697 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
2698 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
2699
2700 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2701 }
2702
2703 return features;
2704}
2705
2706static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2707{
2708 struct mtk_mac *mac = netdev_priv(dev);
2709 struct mtk_eth *eth = mac->hw;
2710 int err = 0;
2711
2712 if (!((dev->features ^ features) & MTK_SET_FEATURES))
2713 return 0;
2714
2715 if (!(features & NETIF_F_LRO))
2716 mtk_hwlro_netdev_disable(dev);
2717
2718 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
2719 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
2720 else
2721 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2722
2723 return err;
2724}
2725
2726/* wait for DMA to finish whatever it is doing before we start using it again */
2727static int mtk_dma_busy_wait(struct mtk_eth *eth)
2728{
2729 unsigned long t_start = jiffies;
2730
2731 while (1) {
2732 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2733 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2734 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2735 return 0;
2736 } else {
2737 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2738 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2739 return 0;
2740 }
2741
2742 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2743 break;
2744 }
2745
2746 dev_err(eth->dev, "DMA init timeout\n");
2747 return -1;
2748}
2749
2750static int mtk_dma_init(struct mtk_eth *eth)
2751{
2752 int err;
2753 u32 i;
2754
2755 if (mtk_dma_busy_wait(eth))
2756 return -EBUSY;
2757
2758 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2759 /* QDMA needs scratch memory for internal reordering of the
2760 * descriptors
2761 */
2762 err = mtk_init_fq_dma(eth);
2763 if (err)
2764 return err;
2765 }
2766
2767 err = mtk_tx_alloc(eth);
2768 if (err)
2769 return err;
2770
2771 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2772 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2773 if (err)
2774 return err;
2775 }
2776
2777 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2778 if (err)
2779 return err;
2780
2781 if (eth->hwlro) {
developer089e8852022-09-28 14:43:46 +08002782 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) ? 1 : 4;
developer77d03a72021-06-06 00:06:00 +08002783 for (; i < MTK_MAX_RX_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002784 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2785 if (err)
2786 return err;
2787 }
2788 err = mtk_hwlro_rx_init(eth);
2789 if (err)
2790 return err;
2791 }
2792
developer18f46a82021-07-20 21:08:21 +08002793 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2794 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2795 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL);
2796 if (err)
2797 return err;
2798 }
2799 err = mtk_rss_init(eth);
2800 if (err)
2801 return err;
2802 }
2803
developerfd40db22021-04-29 10:08:25 +08002804 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2805 /* Enable random early drop and set drop threshold
2806 * automatically
2807 */
2808 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2809 FC_THRES_MIN, MTK_QDMA_FC_THRES);
2810 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2811 }
2812
2813 return 0;
2814}
2815
2816static void mtk_dma_free(struct mtk_eth *eth)
2817{
developere9356982022-07-04 09:03:20 +08002818 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002819 int i;
2820
2821 for (i = 0; i < MTK_MAC_COUNT; i++)
2822 if (eth->netdev[i])
2823 netdev_reset_queue(eth->netdev[i]);
2824 if ( !eth->soc->has_sram && eth->scratch_ring) {
2825 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002826 MTK_DMA_SIZE * soc->txrx.txd_size,
2827 eth->scratch_ring, eth->phy_scratch_ring);
developerfd40db22021-04-29 10:08:25 +08002828 eth->scratch_ring = NULL;
2829 eth->phy_scratch_ring = 0;
2830 }
2831 mtk_tx_clean(eth);
developerb3ce86f2022-06-30 13:31:47 +08002832 mtk_rx_clean(eth, &eth->rx_ring[0],eth->soc->has_sram);
developerfd40db22021-04-29 10:08:25 +08002833 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
2834
2835 if (eth->hwlro) {
2836 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08002837
developer089e8852022-09-28 14:43:46 +08002838 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) ? 1 : 4;
developer77d03a72021-06-06 00:06:00 +08002839 for (; i < MTK_MAX_RX_RING_NUM; i++)
2840 mtk_rx_clean(eth, &eth->rx_ring[i], 0);
developerfd40db22021-04-29 10:08:25 +08002841 }
2842
developer18f46a82021-07-20 21:08:21 +08002843 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2844 mtk_rss_uninit(eth);
2845
2846 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
2847 mtk_rx_clean(eth, &eth->rx_ring[i], 1);
2848 }
2849
developer94008d92021-09-23 09:47:41 +08002850 if (eth->scratch_head) {
2851 kfree(eth->scratch_head);
2852 eth->scratch_head = NULL;
2853 }
developerfd40db22021-04-29 10:08:25 +08002854}
2855
2856static void mtk_tx_timeout(struct net_device *dev)
2857{
2858 struct mtk_mac *mac = netdev_priv(dev);
2859 struct mtk_eth *eth = mac->hw;
2860
2861 eth->netdev[mac->id]->stats.tx_errors++;
2862 netif_err(eth, tx_err, dev,
2863 "transmit timed out\n");
developer8051e042022-04-08 13:26:36 +08002864
2865 if (atomic_read(&reset_lock) == 0)
2866 schedule_work(&eth->pending_work);
developerfd40db22021-04-29 10:08:25 +08002867}
2868
developer18f46a82021-07-20 21:08:21 +08002869static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08002870{
developer18f46a82021-07-20 21:08:21 +08002871 struct mtk_napi *rx_napi = priv;
2872 struct mtk_eth *eth = rx_napi->eth;
2873 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002874
developer18f46a82021-07-20 21:08:21 +08002875 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08002876 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08002877 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08002878 }
2879
2880 return IRQ_HANDLED;
2881}
2882
2883static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2884{
2885 struct mtk_eth *eth = _eth;
2886
2887 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developerfd40db22021-04-29 10:08:25 +08002888 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer6bbe70d2021-08-06 09:34:55 +08002889 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08002890 }
2891
2892 return IRQ_HANDLED;
2893}
2894
2895static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2896{
2897 struct mtk_eth *eth = _eth;
2898
developer18f46a82021-07-20 21:08:21 +08002899 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT(0)) {
2900 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT(0))
2901 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002902 }
2903 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2904 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2905 mtk_handle_irq_tx(irq, _eth);
2906 }
2907
2908 return IRQ_HANDLED;
2909}
2910
developera2613e62022-07-01 18:29:37 +08002911static irqreturn_t mtk_handle_irq_fixed_link(int irq, void *_mac)
2912{
2913 struct mtk_mac *mac = _mac;
2914 struct mtk_eth *eth = mac->hw;
2915 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
2916 struct net_device *dev = phylink_priv->dev;
2917 int link_old, link_new;
2918
2919 // clear interrupt status for gpy211
2920 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
2921
2922 link_old = phylink_priv->link;
2923 link_new = _mtk_mdio_read(eth, phylink_priv->phyaddr, MII_BMSR) & BMSR_LSTATUS;
2924
2925 if (link_old != link_new) {
2926 phylink_priv->link = link_new;
2927 if (link_new) {
2928 printk("phylink.%d %s: Link is Up\n", phylink_priv->id, dev->name);
2929 if (dev)
2930 netif_carrier_on(dev);
2931 } else {
2932 printk("phylink.%d %s: Link is Down\n", phylink_priv->id, dev->name);
2933 if (dev)
2934 netif_carrier_off(dev);
2935 }
2936 }
2937
2938 return IRQ_HANDLED;
2939}
2940
developerfd40db22021-04-29 10:08:25 +08002941#ifdef CONFIG_NET_POLL_CONTROLLER
2942static void mtk_poll_controller(struct net_device *dev)
2943{
2944 struct mtk_mac *mac = netdev_priv(dev);
2945 struct mtk_eth *eth = mac->hw;
2946
2947 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002948 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
2949 mtk_handle_irq_rx(eth->irq[2], &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08002950 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08002951 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08002952}
2953#endif
2954
2955static int mtk_start_dma(struct mtk_eth *eth)
2956{
2957 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer77d03a72021-06-06 00:06:00 +08002958 int val, err;
developerfd40db22021-04-29 10:08:25 +08002959
2960 err = mtk_dma_init(eth);
2961 if (err) {
2962 mtk_dma_free(eth);
2963 return err;
2964 }
2965
2966 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer15d0d282021-07-14 16:40:44 +08002967 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
developer089e8852022-09-28 14:43:46 +08002968 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2969 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer19d84562022-04-21 17:01:06 +08002970 val &= ~MTK_RESV_BUF_MASK;
developerfd40db22021-04-29 10:08:25 +08002971 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002972 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002973 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
2974 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
2975 MTK_RESV_BUF | MTK_WCOMP_EN |
2976 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
developer1ac65932022-07-19 17:23:32 +08002977 MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08002978 }
developerfd40db22021-04-29 10:08:25 +08002979 else
2980 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002981 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08002982 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
2983 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2984 MTK_RX_BT_32DWORDS,
2985 MTK_QDMA_GLO_CFG);
2986
developer15d0d282021-07-14 16:40:44 +08002987 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
developerfd40db22021-04-29 10:08:25 +08002988 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08002989 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08002990 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2991 MTK_PDMA_GLO_CFG);
2992 } else {
2993 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2994 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2995 MTK_PDMA_GLO_CFG);
2996 }
2997
developer089e8852022-09-28 14:43:46 +08002998 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) && eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08002999 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
3000 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
3001 }
3002
developerfd40db22021-04-29 10:08:25 +08003003 return 0;
3004}
3005
developer8051e042022-04-08 13:26:36 +08003006void mtk_gdm_config(struct mtk_eth *eth, u32 config)
developerfd40db22021-04-29 10:08:25 +08003007{
3008 int i;
3009
3010 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3011 return;
3012
3013 for (i = 0; i < MTK_MAC_COUNT; i++) {
3014 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
3015
3016 /* default setup the forward port to send frame to PDMA */
3017 val &= ~0xffff;
3018
3019 /* Enable RX checksum */
3020 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3021
3022 val |= config;
3023
3024 if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
3025 val |= MTK_GDMA_SPECIAL_TAG;
3026
3027 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
3028 }
developerfd40db22021-04-29 10:08:25 +08003029}
3030
3031static int mtk_open(struct net_device *dev)
3032{
3033 struct mtk_mac *mac = netdev_priv(dev);
3034 struct mtk_eth *eth = mac->hw;
developera2613e62022-07-01 18:29:37 +08003035 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
developer18f46a82021-07-20 21:08:21 +08003036 int err, i;
developer3a5969e2022-02-09 15:36:36 +08003037 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08003038
3039 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3040 if (err) {
3041 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3042 err);
3043 return err;
3044 }
3045
3046 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3047 if (!refcount_read(&eth->dma_refcnt)) {
3048 int err = mtk_start_dma(eth);
3049
3050 if (err)
3051 return err;
3052
3053 mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
3054
3055 /* Indicates CDM to parse the MTK special tag from CPU */
3056 if (netdev_uses_dsa(dev)) {
3057 u32 val;
3058 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3059 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3060 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3061 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3062 }
3063
3064 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003065 napi_enable(&eth->rx_napi[0].napi);
developerfd40db22021-04-29 10:08:25 +08003066 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003067 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
3068
3069 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3070 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3071 napi_enable(&eth->rx_napi[i].napi);
3072 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i));
3073 }
3074 }
3075
developerfd40db22021-04-29 10:08:25 +08003076 refcount_set(&eth->dma_refcnt, 1);
3077 }
3078 else
3079 refcount_inc(&eth->dma_refcnt);
3080
developera2613e62022-07-01 18:29:37 +08003081 if (phylink_priv->desc) {
3082 /*Notice: This programming sequence is only for GPY211 single PHY chip.
3083 If single PHY chip is not GPY211, the following step you should do:
3084 1. Contact your Single PHY chip vendor and get the details of
3085 - how to enables link status change interrupt
3086 - how to clears interrupt source
3087 */
3088
3089 // clear interrupt source for gpy211
3090 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
3091
3092 // enable link status change interrupt for gpy211
3093 _mtk_mdio_write(eth, phylink_priv->phyaddr, 0x19, 0x0001);
3094
3095 phylink_priv->dev = dev;
3096
3097 // override dev pointer for single PHY chip 0
3098 if (phylink_priv->id == 0) {
3099 struct net_device *tmp;
3100
3101 tmp = __dev_get_by_name(&init_net, phylink_priv->label);
3102 if (tmp)
3103 phylink_priv->dev = tmp;
3104 else
3105 phylink_priv->dev = NULL;
3106 }
3107 }
3108
developerfd40db22021-04-29 10:08:25 +08003109 phylink_start(mac->phylink);
3110 netif_start_queue(dev);
developer3a5969e2022-02-09 15:36:36 +08003111 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer089e8852022-09-28 14:43:46 +08003112 if (!phy_node && eth->xgmii->regmap_sgmii[mac->id])
3113 regmap_write(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
3114
developerfd40db22021-04-29 10:08:25 +08003115 return 0;
3116}
3117
3118static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3119{
3120 u32 val;
3121 int i;
3122
3123 /* stop the dma engine */
3124 spin_lock_bh(&eth->page_lock);
3125 val = mtk_r32(eth, glo_cfg);
3126 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3127 glo_cfg);
3128 spin_unlock_bh(&eth->page_lock);
3129
3130 /* wait for dma stop */
3131 for (i = 0; i < 10; i++) {
3132 val = mtk_r32(eth, glo_cfg);
3133 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer8051e042022-04-08 13:26:36 +08003134 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08003135 continue;
3136 }
3137 break;
3138 }
3139}
3140
3141static int mtk_stop(struct net_device *dev)
3142{
3143 struct mtk_mac *mac = netdev_priv(dev);
3144 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08003145 int i;
developer3a5969e2022-02-09 15:36:36 +08003146 u32 val = 0;
3147 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08003148
3149 netif_tx_disable(dev);
3150
developer3a5969e2022-02-09 15:36:36 +08003151 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
3152 if (phy_node) {
3153 val = _mtk_mdio_read(eth, 0, 0);
3154 val |= BMCR_PDOWN;
3155 _mtk_mdio_write(eth, 0, 0, val);
developer089e8852022-09-28 14:43:46 +08003156 } else if (eth->xgmii->regmap_sgmii[mac->id]) {
3157 regmap_read(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer3a5969e2022-02-09 15:36:36 +08003158 val |= SGMII_PHYA_PWD;
developer089e8852022-09-28 14:43:46 +08003159 regmap_write(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer3a5969e2022-02-09 15:36:36 +08003160 }
3161
3162 //GMAC RX disable
3163 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
3164 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
3165
3166 phylink_stop(mac->phylink);
3167
developerfd40db22021-04-29 10:08:25 +08003168 phylink_disconnect_phy(mac->phylink);
3169
3170 /* only shutdown DMA if this is the last user */
3171 if (!refcount_dec_and_test(&eth->dma_refcnt))
3172 return 0;
3173
3174 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3175
3176 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003177 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08003178 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003179 napi_disable(&eth->rx_napi[0].napi);
3180
3181 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3182 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3183 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i));
3184 napi_disable(&eth->rx_napi[i].napi);
3185 }
3186 }
developerfd40db22021-04-29 10:08:25 +08003187
3188 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3189 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
3190 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
3191
3192 mtk_dma_free(eth);
3193
3194 return 0;
3195}
3196
developer8051e042022-04-08 13:26:36 +08003197void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerfd40db22021-04-29 10:08:25 +08003198{
developer8051e042022-04-08 13:26:36 +08003199 u32 val = 0, i = 0;
developerfd40db22021-04-29 10:08:25 +08003200
developerfd40db22021-04-29 10:08:25 +08003201 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer8051e042022-04-08 13:26:36 +08003202 reset_bits, reset_bits);
3203
3204 while (i++ < 5000) {
3205 mdelay(1);
3206 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3207
3208 if ((val & reset_bits) == reset_bits) {
3209 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
3210 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3211 reset_bits, ~reset_bits);
3212 break;
3213 }
3214 }
3215
developerfd40db22021-04-29 10:08:25 +08003216 mdelay(10);
3217}
3218
3219static void mtk_clk_disable(struct mtk_eth *eth)
3220{
3221 int clk;
3222
3223 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3224 clk_disable_unprepare(eth->clks[clk]);
3225}
3226
3227static int mtk_clk_enable(struct mtk_eth *eth)
3228{
3229 int clk, ret;
3230
3231 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3232 ret = clk_prepare_enable(eth->clks[clk]);
3233 if (ret)
3234 goto err_disable_clks;
3235 }
3236
3237 return 0;
3238
3239err_disable_clks:
3240 while (--clk >= 0)
3241 clk_disable_unprepare(eth->clks[clk]);
3242
3243 return ret;
3244}
3245
developer18f46a82021-07-20 21:08:21 +08003246static int mtk_napi_init(struct mtk_eth *eth)
3247{
3248 struct mtk_napi *rx_napi = &eth->rx_napi[0];
3249 int i;
3250
3251 rx_napi->eth = eth;
3252 rx_napi->rx_ring = &eth->rx_ring[0];
3253 rx_napi->irq_grp_no = 2;
3254
3255 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3256 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3257 rx_napi = &eth->rx_napi[i];
3258 rx_napi->eth = eth;
3259 rx_napi->rx_ring = &eth->rx_ring[i];
3260 rx_napi->irq_grp_no = 2 + i;
3261 }
3262 }
3263
3264 return 0;
3265}
3266
developer8051e042022-04-08 13:26:36 +08003267static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerfd40db22021-04-29 10:08:25 +08003268{
developer8051e042022-04-08 13:26:36 +08003269 int i, ret = 0;
developerfd40db22021-04-29 10:08:25 +08003270
developer8051e042022-04-08 13:26:36 +08003271 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
3272 atomic_read(&reset_lock), atomic_read(&force));
developerfd40db22021-04-29 10:08:25 +08003273
developer8051e042022-04-08 13:26:36 +08003274 if (atomic_read(&reset_lock) == 0) {
3275 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
3276 return 0;
developerfd40db22021-04-29 10:08:25 +08003277
developer8051e042022-04-08 13:26:36 +08003278 pm_runtime_enable(eth->dev);
3279 pm_runtime_get_sync(eth->dev);
3280
3281 ret = mtk_clk_enable(eth);
3282 if (ret)
3283 goto err_disable_pm;
3284 }
developerfd40db22021-04-29 10:08:25 +08003285
3286 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3287 ret = device_reset(eth->dev);
3288 if (ret) {
3289 dev_err(eth->dev, "MAC reset failed!\n");
3290 goto err_disable_pm;
3291 }
3292
3293 /* enable interrupt delay for RX */
3294 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
3295
3296 /* disable delay and normal interrupt */
3297 mtk_tx_irq_disable(eth, ~0);
3298 mtk_rx_irq_disable(eth, ~0);
3299
3300 return 0;
3301 }
3302
developer8051e042022-04-08 13:26:36 +08003303 pr_info("[%s] execute fe %s reset\n", __func__,
3304 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developer545abf02021-07-15 17:47:01 +08003305
developer8051e042022-04-08 13:26:36 +08003306 if (type == MTK_TYPE_WARM_RESET)
3307 mtk_eth_warm_reset(eth);
developer545abf02021-07-15 17:47:01 +08003308 else
developer8051e042022-04-08 13:26:36 +08003309 mtk_eth_cold_reset(eth);
developer545abf02021-07-15 17:47:01 +08003310
developer089e8852022-09-28 14:43:46 +08003311 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
3312 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer545abf02021-07-15 17:47:01 +08003313 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08003314 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08003315 }
developerfd40db22021-04-29 10:08:25 +08003316
3317 if (eth->pctl) {
3318 /* Set GE2 driving and slew rate */
3319 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3320
3321 /* set GE2 TDSEL */
3322 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3323
3324 /* set GE2 TUNE */
3325 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3326 }
3327
3328 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3329 * up with the more appropriate value when mtk_mac_config call is being
3330 * invoked.
3331 */
3332 for (i = 0; i < MTK_MAC_COUNT; i++)
3333 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3334
3335 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08003336 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
3337 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3338 else
3339 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08003340
3341 /* enable interrupt delay for RX/TX */
3342 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
3343 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
3344
3345 mtk_tx_irq_disable(eth, ~0);
3346 mtk_rx_irq_disable(eth, ~0);
3347
3348 /* FE int grouping */
3349 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003350 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_PDMA_INT_GRP2);
developerfd40db22021-04-29 10:08:25 +08003351 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003352 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_QDMA_INT_GRP2);
developer8051e042022-04-08 13:26:36 +08003353 mtk_w32(eth, 0x21021003, MTK_FE_INT_GRP);
developerbe971722022-05-23 13:51:05 +08003354 mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
developer8051e042022-04-08 13:26:36 +08003355 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
3356 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerfd40db22021-04-29 10:08:25 +08003357
developer089e8852022-09-28 14:43:46 +08003358 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer15f760a2022-10-12 15:57:21 +08003359 /* PSE dummy page mechanism */
3360 mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
3361 PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
3362
developer089e8852022-09-28 14:43:46 +08003363 /* PSE should not drop port1, port8 and port9 packets */
3364 mtk_w32(eth, 0x00000302, PSE_NO_DROP_CFG);
3365
developer15f760a2022-10-12 15:57:21 +08003366 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3367 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3368
developer089e8852022-09-28 14:43:46 +08003369 /* GDM and CDM Threshold */
3370 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
3371 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
3372
3373 /* PSE GDM3 MIB counter has incorrect hw default values,
3374 * so the driver ought to read clear the values beforehand
3375 * in case ethtool retrieve wrong mib values.
3376 */
3377 for (i = 0; i < MTK_STAT_OFFSET; i += 0x4)
3378 mtk_r32(eth,
3379 MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * 2 + i);
3380 } else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08003381 /* PSE Free Queue Flow Control */
3382 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3383
developer459b78e2022-07-01 17:25:10 +08003384 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3385 mtk_w32(eth, 0x00000300, PSE_NO_DROP_CFG);
3386
3387 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3388 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
developer81bcad32021-07-15 14:14:38 +08003389
developerfef9efd2021-06-16 18:28:09 +08003390 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08003391 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3392 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3393 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3394 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3395 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3396 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3397 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
developerfd5f9152022-01-05 16:29:42 +08003398 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
developerfd40db22021-04-29 10:08:25 +08003399
developerfef9efd2021-06-16 18:28:09 +08003400 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08003401 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3402 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3403 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3404 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3405 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3406 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3407 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3408 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08003409
3410 /* GDM and CDM Threshold */
3411 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3412 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3413 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3414 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3415 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3416 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08003417 }
3418
3419 return 0;
3420
3421err_disable_pm:
3422 pm_runtime_put_sync(eth->dev);
3423 pm_runtime_disable(eth->dev);
3424
3425 return ret;
3426}
3427
3428static int mtk_hw_deinit(struct mtk_eth *eth)
3429{
3430 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3431 return 0;
3432
3433 mtk_clk_disable(eth);
3434
3435 pm_runtime_put_sync(eth->dev);
3436 pm_runtime_disable(eth->dev);
3437
3438 return 0;
3439}
3440
3441static int __init mtk_init(struct net_device *dev)
3442{
3443 struct mtk_mac *mac = netdev_priv(dev);
3444 struct mtk_eth *eth = mac->hw;
3445 const char *mac_addr;
3446
3447 mac_addr = of_get_mac_address(mac->of_node);
3448 if (!IS_ERR(mac_addr))
3449 ether_addr_copy(dev->dev_addr, mac_addr);
3450
3451 /* If the mac address is invalid, use random mac address */
3452 if (!is_valid_ether_addr(dev->dev_addr)) {
3453 eth_hw_addr_random(dev);
3454 dev_err(eth->dev, "generated random MAC address %pM\n",
3455 dev->dev_addr);
3456 }
3457
3458 return 0;
3459}
3460
3461static void mtk_uninit(struct net_device *dev)
3462{
3463 struct mtk_mac *mac = netdev_priv(dev);
3464 struct mtk_eth *eth = mac->hw;
3465
3466 phylink_disconnect_phy(mac->phylink);
3467 mtk_tx_irq_disable(eth, ~0);
3468 mtk_rx_irq_disable(eth, ~0);
3469}
3470
3471static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3472{
3473 struct mtk_mac *mac = netdev_priv(dev);
3474
3475 switch (cmd) {
3476 case SIOCGMIIPHY:
3477 case SIOCGMIIREG:
3478 case SIOCSMIIREG:
3479 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3480 default:
3481 /* default invoke the mtk_eth_dbg handler */
3482 return mtk_do_priv_ioctl(dev, ifr, cmd);
3483 break;
3484 }
3485
3486 return -EOPNOTSUPP;
3487}
3488
3489static void mtk_pending_work(struct work_struct *work)
3490{
3491 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer8051e042022-04-08 13:26:36 +08003492 struct device_node *phy_node = NULL;
3493 struct mtk_mac *mac = NULL;
3494 int err, i = 0;
developerfd40db22021-04-29 10:08:25 +08003495 unsigned long restart = 0;
developer8051e042022-04-08 13:26:36 +08003496 u32 val = 0;
3497
3498 atomic_inc(&reset_lock);
3499 val = mtk_r32(eth, MTK_FE_INT_STATUS);
3500 if (!mtk_check_reset_event(eth, val)) {
3501 atomic_dec(&reset_lock);
3502 pr_info("[%s] No need to do FE reset !\n", __func__);
3503 return;
3504 }
developerfd40db22021-04-29 10:08:25 +08003505
3506 rtnl_lock();
3507
developer8051e042022-04-08 13:26:36 +08003508 /* Disabe FE P3 and P4 */
3509 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3510 val |= MTK_FE_LINK_DOWN_P3;
3511 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3512 val |= MTK_FE_LINK_DOWN_P4;
3513 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3514
3515 /* Adjust PPE configurations to prepare for reset */
3516 mtk_prepare_reset_ppe(eth, 0);
3517 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3518 mtk_prepare_reset_ppe(eth, 1);
3519
3520 /* Adjust FE configurations to prepare for reset */
3521 mtk_prepare_reset_fe(eth);
3522
3523 /* Trigger Wifi SER reset */
3524 call_netdevice_notifiers(MTK_FE_START_RESET, eth->netdev[0]);
3525 rtnl_unlock();
3526 wait_for_completion_timeout(&wait_ser_done, 5000);
3527 rtnl_lock();
developerfd40db22021-04-29 10:08:25 +08003528
3529 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
3530 cpu_relax();
3531
developer8051e042022-04-08 13:26:36 +08003532 del_timer_sync(&eth->mtk_dma_monitor_timer);
3533 pr_info("[%s] mtk_stop starts !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003534 /* stop all devices to make sure that dma is properly shut down */
3535 for (i = 0; i < MTK_MAC_COUNT; i++) {
3536 if (!eth->netdev[i])
3537 continue;
3538 mtk_stop(eth->netdev[i]);
3539 __set_bit(i, &restart);
3540 }
developer8051e042022-04-08 13:26:36 +08003541 pr_info("[%s] mtk_stop ends !\n", __func__);
3542 mdelay(15);
developerfd40db22021-04-29 10:08:25 +08003543
3544 if (eth->dev->pins)
3545 pinctrl_select_state(eth->dev->pins->p,
3546 eth->dev->pins->default_state);
developer8051e042022-04-08 13:26:36 +08003547
3548 pr_info("[%s] mtk_hw_init starts !\n", __func__);
3549 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
3550 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003551
3552 /* restart DMA and enable IRQs */
3553 for (i = 0; i < MTK_MAC_COUNT; i++) {
3554 if (!test_bit(i, &restart))
3555 continue;
3556 err = mtk_open(eth->netdev[i]);
3557 if (err) {
3558 netif_alert(eth, ifup, eth->netdev[i],
3559 "Driver up/down cycle failed, closing device.\n");
3560 dev_close(eth->netdev[i]);
3561 }
3562 }
3563
developer8051e042022-04-08 13:26:36 +08003564 /* Set KA tick select */
3565 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(0));
3566 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3567 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(1));
3568
3569 /* Enabe FE P3 and P4*/
3570 val = mtk_r32(eth, MTK_FE_GLO_CFG);
3571 val &= ~MTK_FE_LINK_DOWN_P3;
3572 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3573 val &= ~MTK_FE_LINK_DOWN_P4;
3574 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3575
3576 /* Power up sgmii */
3577 for (i = 0; i < MTK_MAC_COUNT; i++) {
3578 mac = netdev_priv(eth->netdev[i]);
3579 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer089e8852022-09-28 14:43:46 +08003580 if (!phy_node && eth->xgmii->regmap_sgmii[i]) {
developer8051e042022-04-08 13:26:36 +08003581 mtk_gmac_sgmii_path_setup(eth, i);
developer089e8852022-09-28 14:43:46 +08003582 regmap_write(eth->xgmii->regmap_sgmii[i], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developer8051e042022-04-08 13:26:36 +08003583 }
3584 }
3585
3586 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE, eth->netdev[0]);
3587 pr_info("[%s] HNAT reset done !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003588
developer8051e042022-04-08 13:26:36 +08003589 call_netdevice_notifiers(MTK_FE_RESET_DONE, eth->netdev[0]);
3590 pr_info("[%s] WiFi SER reset done !\n", __func__);
3591
3592 atomic_dec(&reset_lock);
3593 if (atomic_read(&force) > 0)
3594 atomic_dec(&force);
3595
3596 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3597 eth->mtk_dma_monitor_timer.expires = jiffies;
3598 add_timer(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08003599 clear_bit_unlock(MTK_RESETTING, &eth->state);
3600
3601 rtnl_unlock();
3602}
3603
3604static int mtk_free_dev(struct mtk_eth *eth)
3605{
3606 int i;
3607
3608 for (i = 0; i < MTK_MAC_COUNT; i++) {
3609 if (!eth->netdev[i])
3610 continue;
3611 free_netdev(eth->netdev[i]);
3612 }
3613
3614 return 0;
3615}
3616
3617static int mtk_unreg_dev(struct mtk_eth *eth)
3618{
3619 int i;
3620
3621 for (i = 0; i < MTK_MAC_COUNT; i++) {
3622 if (!eth->netdev[i])
3623 continue;
3624 unregister_netdev(eth->netdev[i]);
3625 }
3626
3627 return 0;
3628}
3629
3630static int mtk_cleanup(struct mtk_eth *eth)
3631{
3632 mtk_unreg_dev(eth);
3633 mtk_free_dev(eth);
3634 cancel_work_sync(&eth->pending_work);
3635
3636 return 0;
3637}
3638
3639static int mtk_get_link_ksettings(struct net_device *ndev,
3640 struct ethtool_link_ksettings *cmd)
3641{
3642 struct mtk_mac *mac = netdev_priv(ndev);
3643
3644 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3645 return -EBUSY;
3646
3647 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3648}
3649
3650static int mtk_set_link_ksettings(struct net_device *ndev,
3651 const struct ethtool_link_ksettings *cmd)
3652{
3653 struct mtk_mac *mac = netdev_priv(ndev);
3654
3655 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3656 return -EBUSY;
3657
3658 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3659}
3660
3661static void mtk_get_drvinfo(struct net_device *dev,
3662 struct ethtool_drvinfo *info)
3663{
3664 struct mtk_mac *mac = netdev_priv(dev);
3665
3666 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3667 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3668 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3669}
3670
3671static u32 mtk_get_msglevel(struct net_device *dev)
3672{
3673 struct mtk_mac *mac = netdev_priv(dev);
3674
3675 return mac->hw->msg_enable;
3676}
3677
3678static void mtk_set_msglevel(struct net_device *dev, u32 value)
3679{
3680 struct mtk_mac *mac = netdev_priv(dev);
3681
3682 mac->hw->msg_enable = value;
3683}
3684
3685static int mtk_nway_reset(struct net_device *dev)
3686{
3687 struct mtk_mac *mac = netdev_priv(dev);
3688
3689 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3690 return -EBUSY;
3691
3692 if (!mac->phylink)
3693 return -ENOTSUPP;
3694
3695 return phylink_ethtool_nway_reset(mac->phylink);
3696}
3697
3698static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3699{
3700 int i;
3701
3702 switch (stringset) {
3703 case ETH_SS_STATS:
3704 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3705 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3706 data += ETH_GSTRING_LEN;
3707 }
3708 break;
3709 }
3710}
3711
3712static int mtk_get_sset_count(struct net_device *dev, int sset)
3713{
3714 switch (sset) {
3715 case ETH_SS_STATS:
3716 return ARRAY_SIZE(mtk_ethtool_stats);
3717 default:
3718 return -EOPNOTSUPP;
3719 }
3720}
3721
3722static void mtk_get_ethtool_stats(struct net_device *dev,
3723 struct ethtool_stats *stats, u64 *data)
3724{
3725 struct mtk_mac *mac = netdev_priv(dev);
3726 struct mtk_hw_stats *hwstats = mac->hw_stats;
3727 u64 *data_src, *data_dst;
3728 unsigned int start;
3729 int i;
3730
3731 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3732 return;
3733
3734 if (netif_running(dev) && netif_device_present(dev)) {
3735 if (spin_trylock_bh(&hwstats->stats_lock)) {
3736 mtk_stats_update_mac(mac);
3737 spin_unlock_bh(&hwstats->stats_lock);
3738 }
3739 }
3740
3741 data_src = (u64 *)hwstats;
3742
3743 do {
3744 data_dst = data;
3745 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
3746
3747 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3748 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3749 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
3750}
3751
3752static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3753 u32 *rule_locs)
3754{
3755 int ret = -EOPNOTSUPP;
3756
3757 switch (cmd->cmd) {
3758 case ETHTOOL_GRXRINGS:
3759 if (dev->hw_features & NETIF_F_LRO) {
3760 cmd->data = MTK_MAX_RX_RING_NUM;
3761 ret = 0;
3762 }
3763 break;
3764 case ETHTOOL_GRXCLSRLCNT:
3765 if (dev->hw_features & NETIF_F_LRO) {
3766 struct mtk_mac *mac = netdev_priv(dev);
3767
3768 cmd->rule_cnt = mac->hwlro_ip_cnt;
3769 ret = 0;
3770 }
3771 break;
3772 case ETHTOOL_GRXCLSRULE:
3773 if (dev->hw_features & NETIF_F_LRO)
3774 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3775 break;
3776 case ETHTOOL_GRXCLSRLALL:
3777 if (dev->hw_features & NETIF_F_LRO)
3778 ret = mtk_hwlro_get_fdir_all(dev, cmd,
3779 rule_locs);
3780 break;
3781 default:
3782 break;
3783 }
3784
3785 return ret;
3786}
3787
3788static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3789{
3790 int ret = -EOPNOTSUPP;
3791
3792 switch (cmd->cmd) {
3793 case ETHTOOL_SRXCLSRLINS:
3794 if (dev->hw_features & NETIF_F_LRO)
3795 ret = mtk_hwlro_add_ipaddr(dev, cmd);
3796 break;
3797 case ETHTOOL_SRXCLSRLDEL:
3798 if (dev->hw_features & NETIF_F_LRO)
3799 ret = mtk_hwlro_del_ipaddr(dev, cmd);
3800 break;
3801 default:
3802 break;
3803 }
3804
3805 return ret;
3806}
3807
developer6c5cbb52022-08-12 11:37:45 +08003808static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3809{
3810 struct mtk_mac *mac = netdev_priv(dev);
3811
3812 phylink_ethtool_get_pauseparam(mac->phylink, pause);
3813}
3814
3815static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3816{
3817 struct mtk_mac *mac = netdev_priv(dev);
3818
3819 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
3820}
3821
developerfd40db22021-04-29 10:08:25 +08003822static const struct ethtool_ops mtk_ethtool_ops = {
3823 .get_link_ksettings = mtk_get_link_ksettings,
3824 .set_link_ksettings = mtk_set_link_ksettings,
3825 .get_drvinfo = mtk_get_drvinfo,
3826 .get_msglevel = mtk_get_msglevel,
3827 .set_msglevel = mtk_set_msglevel,
3828 .nway_reset = mtk_nway_reset,
3829 .get_link = ethtool_op_get_link,
3830 .get_strings = mtk_get_strings,
3831 .get_sset_count = mtk_get_sset_count,
3832 .get_ethtool_stats = mtk_get_ethtool_stats,
3833 .get_rxnfc = mtk_get_rxnfc,
3834 .set_rxnfc = mtk_set_rxnfc,
developer6c5cbb52022-08-12 11:37:45 +08003835 .get_pauseparam = mtk_get_pauseparam,
3836 .set_pauseparam = mtk_set_pauseparam,
developerfd40db22021-04-29 10:08:25 +08003837};
3838
3839static const struct net_device_ops mtk_netdev_ops = {
3840 .ndo_init = mtk_init,
3841 .ndo_uninit = mtk_uninit,
3842 .ndo_open = mtk_open,
3843 .ndo_stop = mtk_stop,
3844 .ndo_start_xmit = mtk_start_xmit,
3845 .ndo_set_mac_address = mtk_set_mac_address,
3846 .ndo_validate_addr = eth_validate_addr,
3847 .ndo_do_ioctl = mtk_do_ioctl,
3848 .ndo_tx_timeout = mtk_tx_timeout,
3849 .ndo_get_stats64 = mtk_get_stats64,
3850 .ndo_fix_features = mtk_fix_features,
3851 .ndo_set_features = mtk_set_features,
3852#ifdef CONFIG_NET_POLL_CONTROLLER
3853 .ndo_poll_controller = mtk_poll_controller,
3854#endif
3855};
3856
3857static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
3858{
3859 const __be32 *_id = of_get_property(np, "reg", NULL);
developer30e13e72022-11-03 10:21:24 +08003860 const char *label;
developerfd40db22021-04-29 10:08:25 +08003861 struct phylink *phylink;
developer30e13e72022-11-03 10:21:24 +08003862 int mac_type, phy_mode, id, err;
developerfd40db22021-04-29 10:08:25 +08003863 struct mtk_mac *mac;
developera2613e62022-07-01 18:29:37 +08003864 struct mtk_phylink_priv *phylink_priv;
3865 struct fwnode_handle *fixed_node;
3866 struct gpio_desc *desc;
developerfd40db22021-04-29 10:08:25 +08003867
3868 if (!_id) {
3869 dev_err(eth->dev, "missing mac id\n");
3870 return -EINVAL;
3871 }
3872
3873 id = be32_to_cpup(_id);
developerfb556ca2021-10-13 10:52:09 +08003874 if (id < 0 || id >= MTK_MAC_COUNT) {
developerfd40db22021-04-29 10:08:25 +08003875 dev_err(eth->dev, "%d is not a valid mac id\n", id);
3876 return -EINVAL;
3877 }
3878
3879 if (eth->netdev[id]) {
3880 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
3881 return -EINVAL;
3882 }
3883
3884 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
3885 if (!eth->netdev[id]) {
3886 dev_err(eth->dev, "alloc_etherdev failed\n");
3887 return -ENOMEM;
3888 }
3889 mac = netdev_priv(eth->netdev[id]);
3890 eth->mac[id] = mac;
3891 mac->id = id;
3892 mac->hw = eth;
3893 mac->of_node = np;
3894
3895 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
3896 mac->hwlro_ip_cnt = 0;
3897
3898 mac->hw_stats = devm_kzalloc(eth->dev,
3899 sizeof(*mac->hw_stats),
3900 GFP_KERNEL);
3901 if (!mac->hw_stats) {
3902 dev_err(eth->dev, "failed to allocate counter memory\n");
3903 err = -ENOMEM;
3904 goto free_netdev;
3905 }
3906 spin_lock_init(&mac->hw_stats->stats_lock);
3907 u64_stats_init(&mac->hw_stats->syncp);
3908 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
3909
3910 /* phylink create */
3911 phy_mode = of_get_phy_mode(np);
3912 if (phy_mode < 0) {
3913 dev_err(eth->dev, "incorrect phy-mode\n");
3914 err = -EINVAL;
3915 goto free_netdev;
3916 }
3917
3918 /* mac config is not set */
3919 mac->interface = PHY_INTERFACE_MODE_NA;
3920 mac->mode = MLO_AN_PHY;
3921 mac->speed = SPEED_UNKNOWN;
3922
3923 mac->phylink_config.dev = &eth->netdev[id]->dev;
3924 mac->phylink_config.type = PHYLINK_NETDEV;
3925
developer30e13e72022-11-03 10:21:24 +08003926 mac->type = 0;
3927 if (!of_property_read_string(np, "mac-type", &label)) {
3928 for (mac_type = 0; mac_type < MTK_GDM_TYPE_MAX; mac_type++) {
3929 if (!strcasecmp(label, gdm_type(mac_type)))
3930 break;
3931 }
3932
3933 switch (mac_type) {
3934 case 0:
3935 mac->type = MTK_GDM_TYPE;
3936 break;
3937 case 1:
3938 mac->type = MTK_XGDM_TYPE;
3939 break;
3940 default:
3941 dev_warn(eth->dev, "incorrect mac-type\n");
3942 break;
3943 };
3944 }
developer089e8852022-09-28 14:43:46 +08003945
developerfd40db22021-04-29 10:08:25 +08003946 phylink = phylink_create(&mac->phylink_config,
3947 of_fwnode_handle(mac->of_node),
3948 phy_mode, &mtk_phylink_ops);
3949 if (IS_ERR(phylink)) {
3950 err = PTR_ERR(phylink);
3951 goto free_netdev;
3952 }
3953
3954 mac->phylink = phylink;
3955
developera2613e62022-07-01 18:29:37 +08003956 fixed_node = fwnode_get_named_child_node(of_fwnode_handle(mac->of_node),
3957 "fixed-link");
3958 if (fixed_node) {
3959 desc = fwnode_get_named_gpiod(fixed_node, "link-gpio",
3960 0, GPIOD_IN, "?");
3961 if (!IS_ERR(desc)) {
3962 struct device_node *phy_np;
3963 const char *label;
3964 int irq, phyaddr;
3965
3966 phylink_priv = &mac->phylink_priv;
3967
3968 phylink_priv->desc = desc;
3969 phylink_priv->id = id;
3970 phylink_priv->link = -1;
3971
3972 irq = gpiod_to_irq(desc);
3973 if (irq > 0) {
3974 devm_request_irq(eth->dev, irq, mtk_handle_irq_fixed_link,
3975 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
3976 "ethernet:fixed link", mac);
3977 }
3978
3979 if (!of_property_read_string(to_of_node(fixed_node), "label", &label))
3980 strcpy(phylink_priv->label, label);
3981
3982 phy_np = of_parse_phandle(to_of_node(fixed_node), "phy-handle", 0);
3983 if (phy_np) {
3984 if (!of_property_read_u32(phy_np, "reg", &phyaddr))
3985 phylink_priv->phyaddr = phyaddr;
3986 }
3987 }
3988 fwnode_handle_put(fixed_node);
3989 }
3990
developerfd40db22021-04-29 10:08:25 +08003991 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
3992 eth->netdev[id]->watchdog_timeo = 5 * HZ;
3993 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
3994 eth->netdev[id]->base_addr = (unsigned long)eth->base;
3995
3996 eth->netdev[id]->hw_features = eth->soc->hw_features;
3997 if (eth->hwlro)
3998 eth->netdev[id]->hw_features |= NETIF_F_LRO;
3999
4000 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4001 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
4002 eth->netdev[id]->features |= eth->soc->hw_features;
4003 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4004
4005 eth->netdev[id]->irq = eth->irq[0];
4006 eth->netdev[id]->dev.of_node = np;
4007
4008 return 0;
4009
4010free_netdev:
4011 free_netdev(eth->netdev[id]);
4012 return err;
4013}
4014
4015static int mtk_probe(struct platform_device *pdev)
4016{
4017 struct device_node *mac_np;
4018 struct mtk_eth *eth;
4019 int err, i;
4020
4021 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4022 if (!eth)
4023 return -ENOMEM;
4024
4025 eth->soc = of_device_get_match_data(&pdev->dev);
4026
4027 eth->dev = &pdev->dev;
4028 eth->base = devm_platform_ioremap_resource(pdev, 0);
4029 if (IS_ERR(eth->base))
4030 return PTR_ERR(eth->base);
4031
developer089e8852022-09-28 14:43:46 +08004032 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
4033 eth->sram_base = devm_platform_ioremap_resource(pdev, 1);
4034 if (IS_ERR(eth->sram_base))
4035 return PTR_ERR(eth->sram_base);
4036 }
4037
developerfd40db22021-04-29 10:08:25 +08004038 if(eth->soc->has_sram) {
4039 struct resource *res;
4040 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
developer4c32b7a2021-11-13 16:46:43 +08004041 if (unlikely(!res))
4042 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +08004043 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
4044 }
4045
4046 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4047 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
4048 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
4049 } else {
4050 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
4051 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
4052 }
4053
4054 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4055 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
4056 eth->ip_align = NET_IP_ALIGN;
4057 } else {
developer089e8852022-09-28 14:43:46 +08004058 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
4059 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +08004060 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
4061 else
4062 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
4063 }
4064
developer089e8852022-09-28 14:43:46 +08004065 if (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) {
4066 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
4067 if (!err) {
4068 err = dma_set_coherent_mask(&pdev->dev,
4069 DMA_BIT_MASK(36));
4070 if (err) {
4071 dev_err(&pdev->dev, "Wrong DMA config\n");
4072 return -EINVAL;
4073 }
4074 }
4075 }
4076
developerfd40db22021-04-29 10:08:25 +08004077 spin_lock_init(&eth->page_lock);
4078 spin_lock_init(&eth->tx_irq_lock);
4079 spin_lock_init(&eth->rx_irq_lock);
developerd82e8372022-02-09 15:00:09 +08004080 spin_lock_init(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +08004081
4082 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4083 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4084 "mediatek,ethsys");
4085 if (IS_ERR(eth->ethsys)) {
4086 dev_err(&pdev->dev, "no ethsys regmap found\n");
4087 return PTR_ERR(eth->ethsys);
4088 }
4089 }
4090
4091 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4092 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4093 "mediatek,infracfg");
4094 if (IS_ERR(eth->infra)) {
4095 dev_err(&pdev->dev, "no infracfg regmap found\n");
4096 return PTR_ERR(eth->infra);
4097 }
4098 }
4099
4100 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
developer089e8852022-09-28 14:43:46 +08004101 eth->xgmii = devm_kzalloc(eth->dev, sizeof(*eth->xgmii),
developerfd40db22021-04-29 10:08:25 +08004102 GFP_KERNEL);
developer089e8852022-09-28 14:43:46 +08004103 if (!eth->xgmii)
developerfd40db22021-04-29 10:08:25 +08004104 return -ENOMEM;
4105
developer089e8852022-09-28 14:43:46 +08004106 eth->xgmii->eth = eth;
4107 err = mtk_sgmii_init(eth->xgmii, pdev->dev.of_node,
developerfd40db22021-04-29 10:08:25 +08004108 eth->soc->ana_rgc3);
4109
developer089e8852022-09-28 14:43:46 +08004110 if (err)
4111 return err;
4112 }
4113
4114 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
4115 err = mtk_usxgmii_init(eth->xgmii, pdev->dev.of_node);
4116 if (err)
4117 return err;
4118
4119 err = mtk_xfi_pextp_init(eth->xgmii, pdev->dev.of_node);
4120 if (err)
4121 return err;
4122
4123 err = mtk_xfi_pll_init(eth->xgmii, pdev->dev.of_node);
4124 if (err)
4125 return err;
4126
4127 err = mtk_toprgu_init(eth, pdev->dev.of_node);
developerfd40db22021-04-29 10:08:25 +08004128 if (err)
4129 return err;
4130 }
4131
4132 if (eth->soc->required_pctl) {
4133 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4134 "mediatek,pctl");
4135 if (IS_ERR(eth->pctl)) {
4136 dev_err(&pdev->dev, "no pctl regmap found\n");
4137 return PTR_ERR(eth->pctl);
4138 }
4139 }
4140
developer18f46a82021-07-20 21:08:21 +08004141 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08004142 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4143 eth->irq[i] = eth->irq[0];
4144 else
4145 eth->irq[i] = platform_get_irq(pdev, i);
4146 if (eth->irq[i] < 0) {
4147 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4148 return -ENXIO;
4149 }
4150 }
4151
4152 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4153 eth->clks[i] = devm_clk_get(eth->dev,
4154 mtk_clks_source_name[i]);
4155 if (IS_ERR(eth->clks[i])) {
4156 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
4157 return -EPROBE_DEFER;
4158 if (eth->soc->required_clks & BIT(i)) {
4159 dev_err(&pdev->dev, "clock %s not found\n",
4160 mtk_clks_source_name[i]);
4161 return -EINVAL;
4162 }
4163 eth->clks[i] = NULL;
4164 }
4165 }
4166
4167 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4168 INIT_WORK(&eth->pending_work, mtk_pending_work);
4169
developer8051e042022-04-08 13:26:36 +08004170 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerfd40db22021-04-29 10:08:25 +08004171 if (err)
4172 return err;
4173
4174 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4175
4176 for_each_child_of_node(pdev->dev.of_node, mac_np) {
4177 if (!of_device_is_compatible(mac_np,
4178 "mediatek,eth-mac"))
4179 continue;
4180
4181 if (!of_device_is_available(mac_np))
4182 continue;
4183
4184 err = mtk_add_mac(eth, mac_np);
4185 if (err) {
4186 of_node_put(mac_np);
4187 goto err_deinit_hw;
4188 }
4189 }
4190
developer18f46a82021-07-20 21:08:21 +08004191 err = mtk_napi_init(eth);
4192 if (err)
4193 goto err_free_dev;
4194
developerfd40db22021-04-29 10:08:25 +08004195 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4196 err = devm_request_irq(eth->dev, eth->irq[0],
4197 mtk_handle_irq, 0,
4198 dev_name(eth->dev), eth);
4199 } else {
4200 err = devm_request_irq(eth->dev, eth->irq[1],
4201 mtk_handle_irq_tx, 0,
4202 dev_name(eth->dev), eth);
4203 if (err)
4204 goto err_free_dev;
4205
4206 err = devm_request_irq(eth->dev, eth->irq[2],
4207 mtk_handle_irq_rx, 0,
developer18f46a82021-07-20 21:08:21 +08004208 dev_name(eth->dev), &eth->rx_napi[0]);
4209 if (err)
4210 goto err_free_dev;
4211
developer793f7b42022-05-20 13:54:51 +08004212 if (MTK_MAX_IRQ_NUM > 3) {
4213 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4214 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
4215 err = devm_request_irq(eth->dev,
4216 eth->irq[2 + i],
4217 mtk_handle_irq_rx, 0,
4218 dev_name(eth->dev),
4219 &eth->rx_napi[i]);
4220 if (err)
4221 goto err_free_dev;
4222 }
4223 } else {
4224 err = devm_request_irq(eth->dev, eth->irq[3],
4225 mtk_handle_fe_irq, 0,
4226 dev_name(eth->dev), eth);
developer18f46a82021-07-20 21:08:21 +08004227 if (err)
4228 goto err_free_dev;
4229 }
4230 }
developerfd40db22021-04-29 10:08:25 +08004231 }
developer8051e042022-04-08 13:26:36 +08004232
developerfd40db22021-04-29 10:08:25 +08004233 if (err)
4234 goto err_free_dev;
4235
4236 /* No MT7628/88 support yet */
4237 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4238 err = mtk_mdio_init(eth);
4239 if (err)
4240 goto err_free_dev;
4241 }
4242
4243 for (i = 0; i < MTK_MAX_DEVS; i++) {
4244 if (!eth->netdev[i])
4245 continue;
4246
4247 err = register_netdev(eth->netdev[i]);
4248 if (err) {
4249 dev_err(eth->dev, "error bringing up device\n");
4250 goto err_deinit_mdio;
4251 } else
4252 netif_info(eth, probe, eth->netdev[i],
4253 "mediatek frame engine at 0x%08lx, irq %d\n",
4254 eth->netdev[i]->base_addr, eth->irq[0]);
4255 }
4256
4257 /* we run 2 devices on the same DMA ring so we need a dummy device
4258 * for NAPI to work
4259 */
4260 init_dummy_netdev(&eth->dummy_dev);
4261 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
4262 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08004263 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08004264 MTK_NAPI_WEIGHT);
4265
developer18f46a82021-07-20 21:08:21 +08004266 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4267 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
4268 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[i].napi,
4269 mtk_napi_rx, MTK_NAPI_WEIGHT);
4270 }
4271
developerfd40db22021-04-29 10:08:25 +08004272 mtketh_debugfs_init(eth);
4273 debug_proc_init(eth);
4274
4275 platform_set_drvdata(pdev, eth);
4276
developer8051e042022-04-08 13:26:36 +08004277 register_netdevice_notifier(&mtk_eth_netdevice_nb);
developer793f7b42022-05-20 13:54:51 +08004278#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developer8051e042022-04-08 13:26:36 +08004279 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
4280 eth->mtk_dma_monitor_timer.expires = jiffies;
4281 add_timer(&eth->mtk_dma_monitor_timer);
developer793f7b42022-05-20 13:54:51 +08004282#endif
developer8051e042022-04-08 13:26:36 +08004283
developerfd40db22021-04-29 10:08:25 +08004284 return 0;
4285
4286err_deinit_mdio:
4287 mtk_mdio_cleanup(eth);
4288err_free_dev:
4289 mtk_free_dev(eth);
4290err_deinit_hw:
4291 mtk_hw_deinit(eth);
4292
4293 return err;
4294}
4295
4296static int mtk_remove(struct platform_device *pdev)
4297{
4298 struct mtk_eth *eth = platform_get_drvdata(pdev);
4299 struct mtk_mac *mac;
4300 int i;
4301
4302 /* stop all devices to make sure that dma is properly shut down */
4303 for (i = 0; i < MTK_MAC_COUNT; i++) {
4304 if (!eth->netdev[i])
4305 continue;
4306 mtk_stop(eth->netdev[i]);
4307 mac = netdev_priv(eth->netdev[i]);
4308 phylink_disconnect_phy(mac->phylink);
4309 }
4310
4311 mtk_hw_deinit(eth);
4312
4313 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08004314 netif_napi_del(&eth->rx_napi[0].napi);
4315
4316 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4317 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
4318 netif_napi_del(&eth->rx_napi[i].napi);
4319 }
4320
developerfd40db22021-04-29 10:08:25 +08004321 mtk_cleanup(eth);
4322 mtk_mdio_cleanup(eth);
developer8051e042022-04-08 13:26:36 +08004323 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
4324 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08004325
4326 return 0;
4327}
4328
4329static const struct mtk_soc_data mt2701_data = {
4330 .caps = MT7623_CAPS | MTK_HWLRO,
4331 .hw_features = MTK_HW_FEATURES,
4332 .required_clks = MT7623_CLKS_BITMAP,
4333 .required_pctl = true,
4334 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004335 .txrx = {
4336 .txd_size = sizeof(struct mtk_tx_dma),
4337 .rxd_size = sizeof(struct mtk_rx_dma),
4338 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4339 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4340 },
developerfd40db22021-04-29 10:08:25 +08004341};
4342
4343static const struct mtk_soc_data mt7621_data = {
4344 .caps = MT7621_CAPS,
4345 .hw_features = MTK_HW_FEATURES,
4346 .required_clks = MT7621_CLKS_BITMAP,
4347 .required_pctl = false,
4348 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004349 .txrx = {
4350 .txd_size = sizeof(struct mtk_tx_dma),
4351 .rxd_size = sizeof(struct mtk_rx_dma),
4352 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4353 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4354 },
developerfd40db22021-04-29 10:08:25 +08004355};
4356
4357static const struct mtk_soc_data mt7622_data = {
4358 .ana_rgc3 = 0x2028,
4359 .caps = MT7622_CAPS | MTK_HWLRO,
4360 .hw_features = MTK_HW_FEATURES,
4361 .required_clks = MT7622_CLKS_BITMAP,
4362 .required_pctl = false,
4363 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004364 .txrx = {
4365 .txd_size = sizeof(struct mtk_tx_dma),
4366 .rxd_size = sizeof(struct mtk_rx_dma),
4367 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4368 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4369 },
developerfd40db22021-04-29 10:08:25 +08004370};
4371
4372static const struct mtk_soc_data mt7623_data = {
4373 .caps = MT7623_CAPS | MTK_HWLRO,
4374 .hw_features = MTK_HW_FEATURES,
4375 .required_clks = MT7623_CLKS_BITMAP,
4376 .required_pctl = true,
4377 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004378 .txrx = {
4379 .txd_size = sizeof(struct mtk_tx_dma),
4380 .rxd_size = sizeof(struct mtk_rx_dma),
4381 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4382 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4383 },
developerfd40db22021-04-29 10:08:25 +08004384};
4385
4386static const struct mtk_soc_data mt7629_data = {
4387 .ana_rgc3 = 0x128,
4388 .caps = MT7629_CAPS | MTK_HWLRO,
4389 .hw_features = MTK_HW_FEATURES,
4390 .required_clks = MT7629_CLKS_BITMAP,
4391 .required_pctl = false,
4392 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004393 .txrx = {
4394 .txd_size = sizeof(struct mtk_tx_dma),
4395 .rxd_size = sizeof(struct mtk_rx_dma),
4396 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4397 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4398 },
developerfd40db22021-04-29 10:08:25 +08004399};
4400
4401static const struct mtk_soc_data mt7986_data = {
4402 .ana_rgc3 = 0x128,
4403 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08004404 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08004405 .required_clks = MT7986_CLKS_BITMAP,
4406 .required_pctl = false,
4407 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004408 .txrx = {
4409 .txd_size = sizeof(struct mtk_tx_dma_v2),
4410 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4411 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4412 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4413 },
developerfd40db22021-04-29 10:08:25 +08004414};
4415
developer255bba22021-07-27 15:16:33 +08004416static const struct mtk_soc_data mt7981_data = {
4417 .ana_rgc3 = 0x128,
4418 .caps = MT7981_CAPS,
developer7377b0b2021-11-18 14:54:47 +08004419 .hw_features = MTK_HW_FEATURES,
developer255bba22021-07-27 15:16:33 +08004420 .required_clks = MT7981_CLKS_BITMAP,
4421 .required_pctl = false,
4422 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004423 .txrx = {
4424 .txd_size = sizeof(struct mtk_tx_dma_v2),
4425 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4426 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4427 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4428 },
developer255bba22021-07-27 15:16:33 +08004429};
4430
developer089e8852022-09-28 14:43:46 +08004431static const struct mtk_soc_data mt7988_data = {
4432 .ana_rgc3 = 0x128,
4433 .caps = MT7988_CAPS,
4434 .hw_features = MTK_HW_FEATURES,
4435 .required_clks = MT7988_CLKS_BITMAP,
4436 .required_pctl = false,
4437 .has_sram = true,
4438 .txrx = {
4439 .txd_size = sizeof(struct mtk_tx_dma_v2),
4440 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4441 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4442 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4443 },
4444};
4445
developerfd40db22021-04-29 10:08:25 +08004446static const struct mtk_soc_data rt5350_data = {
4447 .caps = MT7628_CAPS,
4448 .hw_features = MTK_HW_FEATURES_MT7628,
4449 .required_clks = MT7628_CLKS_BITMAP,
4450 .required_pctl = false,
4451 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004452 .txrx = {
4453 .txd_size = sizeof(struct mtk_tx_dma),
4454 .rxd_size = sizeof(struct mtk_rx_dma),
4455 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4456 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4457 },
developerfd40db22021-04-29 10:08:25 +08004458};
4459
4460const struct of_device_id of_mtk_match[] = {
4461 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4462 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4463 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4464 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4465 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4466 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08004467 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developer089e8852022-09-28 14:43:46 +08004468 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data},
developerfd40db22021-04-29 10:08:25 +08004469 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4470 {},
4471};
4472MODULE_DEVICE_TABLE(of, of_mtk_match);
4473
4474static struct platform_driver mtk_driver = {
4475 .probe = mtk_probe,
4476 .remove = mtk_remove,
4477 .driver = {
4478 .name = "mtk_soc_eth",
4479 .of_match_table = of_mtk_match,
4480 },
4481};
4482
4483module_platform_driver(mtk_driver);
4484
4485MODULE_LICENSE("GPL");
4486MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4487MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");