blob: 042916f754d63c4890ba85e6bea1040c9e130129 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
12#include <linux/mfd/syscon.h>
13#include <linux/regmap.h>
14#include <linux/clk.h>
15#include <linux/pm_runtime.h>
16#include <linux/if_vlan.h>
17#include <linux/reset.h>
18#include <linux/tcp.h>
19#include <linux/interrupt.h>
20#include <linux/pinctrl/devinfo.h>
21#include <linux/phylink.h>
developera2613e62022-07-01 18:29:37 +080022#include <linux/gpio/consumer.h>
developerfd40db22021-04-29 10:08:25 +080023#include <net/dsa.h>
24
25#include "mtk_eth_soc.h"
26#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080027#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080028
29#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
30#include "mtk_hnat/nf_hnat_mtk.h"
31#endif
32
developer75e4dad2022-11-16 15:17:14 +080033#if defined(CONFIG_XFRM_OFFLOAD)
34#include <crypto/sha.h>
35#include <net/xfrm.h>
36#include "mtk_ipsec.h"
37#endif
38
developerfd40db22021-04-29 10:08:25 +080039static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080040atomic_t reset_lock = ATOMIC_INIT(0);
41atomic_t force = ATOMIC_INIT(0);
developer82eae452023-02-13 10:04:09 +080042atomic_t reset_pending = ATOMIC_INIT(0);
developer8051e042022-04-08 13:26:36 +080043
developerfd40db22021-04-29 10:08:25 +080044module_param_named(msg_level, mtk_msg_level, int, 0);
45MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080046DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080047
48#define MTK_ETHTOOL_STAT(x) { #x, \
49 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
50
51/* strings used by ethtool */
52static const struct mtk_ethtool_stats {
53 char str[ETH_GSTRING_LEN];
54 u32 offset;
55} mtk_ethtool_stats[] = {
56 MTK_ETHTOOL_STAT(tx_bytes),
57 MTK_ETHTOOL_STAT(tx_packets),
58 MTK_ETHTOOL_STAT(tx_skip),
59 MTK_ETHTOOL_STAT(tx_collisions),
60 MTK_ETHTOOL_STAT(rx_bytes),
61 MTK_ETHTOOL_STAT(rx_packets),
62 MTK_ETHTOOL_STAT(rx_overflow),
63 MTK_ETHTOOL_STAT(rx_fcs_errors),
64 MTK_ETHTOOL_STAT(rx_short_errors),
65 MTK_ETHTOOL_STAT(rx_long_errors),
66 MTK_ETHTOOL_STAT(rx_checksum_errors),
67 MTK_ETHTOOL_STAT(rx_flow_control_packets),
68};
69
70static const char * const mtk_clks_source_name[] = {
developer1bbcf512022-11-18 16:09:33 +080071 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "gp3",
72 "xgp1", "xgp2", "xgp3", "crypto", "fe", "trgpll",
developerfd40db22021-04-29 10:08:25 +080073 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
74 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
developer5cfc67a2022-12-29 19:06:51 +080075 "sgmii_ck", "eth2pll", "wocpu0", "wocpu1",
76 "ethwarp_wocpu2", "ethwarp_wocpu1", "ethwarp_wocpu0",
77 "top_usxgmii0_sel", "top_usxgmii1_sel", "top_sgm0_sel", "top_sgm1_sel",
78 "top_xfi_phy0_xtal_sel", "top_xfi_phy1_xtal_sel", "top_eth_gmii_sel",
79 "top_eth_refck_50m_sel", "top_eth_sys_200m_sel", "top_eth_sys_sel",
80 "top_eth_xgmii_sel", "top_eth_mii_sel", "top_netsys_sel",
81 "top_netsys_500m_sel", "top_netsys_pao_2x_sel",
82 "top_netsys_sync_250m_sel", "top_netsys_ppefb_250m_sel",
83 "top_netsys_warp_sel",
developerfd40db22021-04-29 10:08:25 +080084};
85
86void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
87{
88 __raw_writel(val, eth->base + reg);
89}
90
91u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
92{
93 return __raw_readl(eth->base + reg);
94}
95
96u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
97{
98 u32 val;
99
100 val = mtk_r32(eth, reg);
101 val &= ~mask;
102 val |= set;
103 mtk_w32(eth, val, reg);
104 return reg;
105}
106
107static int mtk_mdio_busy_wait(struct mtk_eth *eth)
108{
109 unsigned long t_start = jiffies;
110
111 while (1) {
112 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
113 return 0;
114 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
115 break;
developerc4671b22021-05-28 13:16:42 +0800116 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800117 }
118
119 dev_err(eth->dev, "mdio: MDIO timeout\n");
120 return -1;
121}
122
developer599cda42022-05-24 15:13:31 +0800123u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
124 int phy_reg, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800125{
126 if (mtk_mdio_busy_wait(eth))
127 return -1;
128
129 write_data &= 0xffff;
130
developer599cda42022-05-24 15:13:31 +0800131 if (phy_reg & MII_ADDR_C45) {
132 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
133 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
134 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
135 MTK_PHY_IAC);
136
137 if (mtk_mdio_busy_wait(eth))
138 return -1;
139
140 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
141 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
142 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
143 MTK_PHY_IAC);
144 } else {
145 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
146 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
147 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
148 MTK_PHY_IAC);
149 }
developerfd40db22021-04-29 10:08:25 +0800150
151 if (mtk_mdio_busy_wait(eth))
152 return -1;
153
154 return 0;
155}
156
developer599cda42022-05-24 15:13:31 +0800157u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerfd40db22021-04-29 10:08:25 +0800158{
159 u32 d;
160
161 if (mtk_mdio_busy_wait(eth))
162 return 0xffff;
163
developer599cda42022-05-24 15:13:31 +0800164 if (phy_reg & MII_ADDR_C45) {
165 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
166 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
167 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
168 MTK_PHY_IAC);
169
170 if (mtk_mdio_busy_wait(eth))
171 return 0xffff;
172
173 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
174 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
175 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
176 MTK_PHY_IAC);
177 } else {
178 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
179 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
180 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
181 MTK_PHY_IAC);
182 }
developerfd40db22021-04-29 10:08:25 +0800183
184 if (mtk_mdio_busy_wait(eth))
185 return 0xffff;
186
187 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
188
189 return d;
190}
191
192static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
193 int phy_reg, u16 val)
194{
195 struct mtk_eth *eth = bus->priv;
196
197 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
198}
199
200static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
201{
202 struct mtk_eth *eth = bus->priv;
203
204 return _mtk_mdio_read(eth, phy_addr, phy_reg);
205}
206
developerabeadd52022-08-15 11:26:44 +0800207static int mtk_mdio_reset(struct mii_bus *bus)
208{
209 /* The mdiobus_register will trigger a reset pulse when enabling Bus reset,
210 * we just need to wait until device ready.
211 */
212 mdelay(20);
213
214 return 0;
215}
216
developerfd40db22021-04-29 10:08:25 +0800217static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
218 phy_interface_t interface)
219{
developer543e7922022-12-01 11:24:47 +0800220 u32 val = 0;
developerfd40db22021-04-29 10:08:25 +0800221
222 /* Check DDR memory type.
223 * Currently TRGMII mode with DDR2 memory is not supported.
224 */
225 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
226 if (interface == PHY_INTERFACE_MODE_TRGMII &&
227 val & SYSCFG_DRAM_TYPE_DDR2) {
228 dev_err(eth->dev,
229 "TRGMII mode with DDR2 memory is not supported!\n");
230 return -EOPNOTSUPP;
231 }
232
233 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
234 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
235
236 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
237 ETHSYS_TRGMII_MT7621_MASK, val);
238
239 return 0;
240}
241
242static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
243 phy_interface_t interface, int speed)
244{
245 u32 val;
246 int ret;
247
248 if (interface == PHY_INTERFACE_MODE_TRGMII) {
249 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
250 val = 500000000;
251 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
252 if (ret)
253 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
254 return;
255 }
256
257 val = (speed == SPEED_1000) ?
258 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
259 mtk_w32(eth, val, INTF_MODE);
260
261 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
262 ETHSYS_TRGMII_CLK_SEL362_5,
263 ETHSYS_TRGMII_CLK_SEL362_5);
264
265 val = (speed == SPEED_1000) ? 250000000 : 500000000;
266 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
267 if (ret)
268 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
269
270 val = (speed == SPEED_1000) ?
271 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
272 mtk_w32(eth, val, TRGMII_RCK_CTRL);
273
274 val = (speed == SPEED_1000) ?
275 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
276 mtk_w32(eth, val, TRGMII_TCK_CTRL);
277}
278
developer089e8852022-09-28 14:43:46 +0800279static void mtk_setup_bridge_switch(struct mtk_eth *eth)
280{
281 int val;
282
283 /* Force Port1 XGMAC Link Up */
284 val = mtk_r32(eth, MTK_XGMAC_STS(MTK_GMAC1_ID));
285 mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK,
286 MTK_XGMAC_STS(MTK_GMAC1_ID));
287
288 /* Adjust GSW bridge IPG to 11*/
289 val = mtk_r32(eth, MTK_GSW_CFG);
290 val &= ~(GSWTX_IPG_MASK | GSWRX_IPG_MASK);
291 val |= (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
292 (GSW_IPG_11 << GSWRX_IPG_SHIFT);
293 mtk_w32(eth, val, MTK_GSW_CFG);
developer089e8852022-09-28 14:43:46 +0800294}
295
developer9b725932022-11-24 16:25:56 +0800296static void mtk_setup_eee(struct mtk_mac *mac, bool enable)
297{
298 struct mtk_eth *eth = mac->hw;
299 u32 mcr, mcr_cur;
300 u32 val;
301
302 mcr = mcr_cur = mtk_r32(eth, MTK_MAC_MCR(mac->id));
303 mcr &= ~(MAC_MCR_FORCE_EEE100 | MAC_MCR_FORCE_EEE1000);
304
305 if (enable) {
306 mac->tx_lpi_enabled = 1;
307
308 val = FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 19) |
309 FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 33) |
310 FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD,
311 mac->tx_lpi_timer) |
312 FIELD_PREP(MAC_EEE_RESV0, 14);
313 mtk_w32(eth, val, MTK_MAC_EEE(mac->id));
314
315 switch (mac->speed) {
316 case SPEED_1000:
317 mcr |= MAC_MCR_FORCE_EEE1000;
318 break;
319 case SPEED_100:
320 mcr |= MAC_MCR_FORCE_EEE100;
321 break;
322 };
323 } else {
324 mac->tx_lpi_enabled = 0;
325
326 mtk_w32(eth, 0x00000002, MTK_MAC_EEE(mac->id));
327 }
328
329 /* Only update control register when needed! */
330 if (mcr != mcr_cur)
331 mtk_w32(eth, mcr, MTK_MAC_MCR(mac->id));
332}
333
developerfd40db22021-04-29 10:08:25 +0800334static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
335 const struct phylink_link_state *state)
336{
337 struct mtk_mac *mac = container_of(config, struct mtk_mac,
338 phylink_config);
339 struct mtk_eth *eth = mac->hw;
developer089e8852022-09-28 14:43:46 +0800340 u32 sid, i;
developer543e7922022-12-01 11:24:47 +0800341 int val = 0, ge_mode, err = 0;
developer82eae452023-02-13 10:04:09 +0800342 unsigned int mac_type = mac->type;
developerfd40db22021-04-29 10:08:25 +0800343
344 /* MT76x8 has no hardware settings between for the MAC */
345 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
346 mac->interface != state->interface) {
347 /* Setup soc pin functions */
348 switch (state->interface) {
349 case PHY_INTERFACE_MODE_TRGMII:
350 if (mac->id)
351 goto err_phy;
352 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
353 MTK_GMAC1_TRGMII))
354 goto err_phy;
355 /* fall through */
356 case PHY_INTERFACE_MODE_RGMII_TXID:
357 case PHY_INTERFACE_MODE_RGMII_RXID:
358 case PHY_INTERFACE_MODE_RGMII_ID:
359 case PHY_INTERFACE_MODE_RGMII:
360 case PHY_INTERFACE_MODE_MII:
361 case PHY_INTERFACE_MODE_REVMII:
362 case PHY_INTERFACE_MODE_RMII:
developer82eae452023-02-13 10:04:09 +0800363 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800364 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
365 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
366 if (err)
367 goto init_err;
368 }
369 break;
370 case PHY_INTERFACE_MODE_1000BASEX:
371 case PHY_INTERFACE_MODE_2500BASEX:
372 case PHY_INTERFACE_MODE_SGMII:
developer82eae452023-02-13 10:04:09 +0800373 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800374 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
375 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
376 if (err)
377 goto init_err;
378 }
379 break;
380 case PHY_INTERFACE_MODE_GMII:
developer82eae452023-02-13 10:04:09 +0800381 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800382 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
383 err = mtk_gmac_gephy_path_setup(eth, mac->id);
384 if (err)
385 goto init_err;
386 }
387 break;
developer30e13e72022-11-03 10:21:24 +0800388 case PHY_INTERFACE_MODE_XGMII:
developer82eae452023-02-13 10:04:09 +0800389 mac->type = MTK_XGDM_TYPE;
developer30e13e72022-11-03 10:21:24 +0800390 if (MTK_HAS_CAPS(eth->soc->caps, MTK_XGMII)) {
391 err = mtk_gmac_xgmii_path_setup(eth, mac->id);
392 if (err)
393 goto init_err;
394 }
395 break;
developer089e8852022-09-28 14:43:46 +0800396 case PHY_INTERFACE_MODE_USXGMII:
397 case PHY_INTERFACE_MODE_10GKR:
developercfa104b2023-01-11 17:40:41 +0800398 case PHY_INTERFACE_MODE_5GBASER:
developer82eae452023-02-13 10:04:09 +0800399 mac->type = MTK_XGDM_TYPE;
developer089e8852022-09-28 14:43:46 +0800400 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
401 err = mtk_gmac_usxgmii_path_setup(eth, mac->id);
402 if (err)
403 goto init_err;
404 }
405 break;
developerfd40db22021-04-29 10:08:25 +0800406 default:
407 goto err_phy;
408 }
409
410 /* Setup clock for 1st gmac */
411 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
412 !phy_interface_mode_is_8023z(state->interface) &&
413 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
414 if (MTK_HAS_CAPS(mac->hw->soc->caps,
415 MTK_TRGMII_MT7621_CLK)) {
416 if (mt7621_gmac0_rgmii_adjust(mac->hw,
417 state->interface))
418 goto err_phy;
419 } else {
420 mtk_gmac0_rgmii_adjust(mac->hw,
421 state->interface,
422 state->speed);
423
424 /* mt7623_pad_clk_setup */
425 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
426 mtk_w32(mac->hw,
427 TD_DM_DRVP(8) | TD_DM_DRVN(8),
428 TRGMII_TD_ODT(i));
429
430 /* Assert/release MT7623 RXC reset */
431 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
432 TRGMII_RCK_CTRL);
433 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
434 }
435 }
436
437 ge_mode = 0;
438 switch (state->interface) {
439 case PHY_INTERFACE_MODE_MII:
440 case PHY_INTERFACE_MODE_GMII:
441 ge_mode = 1;
442 break;
443 case PHY_INTERFACE_MODE_REVMII:
444 ge_mode = 2;
445 break;
446 case PHY_INTERFACE_MODE_RMII:
447 if (mac->id)
448 goto err_phy;
449 ge_mode = 3;
450 break;
451 default:
452 break;
453 }
454
455 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800456 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800457 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
458 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
459 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
460 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800461 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800462
463 mac->interface = state->interface;
464 }
465
466 /* SGMII */
467 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
468 phy_interface_mode_is_8023z(state->interface)) {
469 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
470 * being setup done.
471 */
developerd82e8372022-02-09 15:00:09 +0800472 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800473 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
474
475 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
476 SYSCFG0_SGMII_MASK,
477 ~(u32)SYSCFG0_SGMII_MASK);
478
479 /* Decide how GMAC and SGMIISYS be mapped */
480 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
481 0 : mac->id;
482
483 /* Setup SGMIISYS with the determined property */
484 if (state->interface != PHY_INTERFACE_MODE_SGMII)
developer089e8852022-09-28 14:43:46 +0800485 err = mtk_sgmii_setup_mode_force(eth->xgmii, sid,
developerfd40db22021-04-29 10:08:25 +0800486 state);
developer2fbee452022-08-12 13:58:20 +0800487 else
developer089e8852022-09-28 14:43:46 +0800488 err = mtk_sgmii_setup_mode_an(eth->xgmii, sid);
developerfd40db22021-04-29 10:08:25 +0800489
developerd82e8372022-02-09 15:00:09 +0800490 if (err) {
491 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800492 goto init_err;
developerd82e8372022-02-09 15:00:09 +0800493 }
developerfd40db22021-04-29 10:08:25 +0800494
495 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
496 SYSCFG0_SGMII_MASK, val);
developerd82e8372022-02-09 15:00:09 +0800497 spin_unlock(&eth->syscfg0_lock);
developer089e8852022-09-28 14:43:46 +0800498 } else if (state->interface == PHY_INTERFACE_MODE_USXGMII ||
developercfa104b2023-01-11 17:40:41 +0800499 state->interface == PHY_INTERFACE_MODE_10GKR ||
500 state->interface == PHY_INTERFACE_MODE_5GBASER) {
developer089e8852022-09-28 14:43:46 +0800501 sid = mac->id;
502
503 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
504 sid != MTK_GMAC1_ID) {
505 if (phylink_autoneg_inband(mode))
506 err = mtk_usxgmii_setup_mode_force(eth->xgmii, sid,
developercfa104b2023-01-11 17:40:41 +0800507 state);
developer089e8852022-09-28 14:43:46 +0800508 else
509 err = mtk_usxgmii_setup_mode_an(eth->xgmii, sid,
510 SPEED_10000);
511
512 if (err)
513 goto init_err;
514 }
developerfd40db22021-04-29 10:08:25 +0800515 } else if (phylink_autoneg_inband(mode)) {
516 dev_err(eth->dev,
517 "In-band mode not supported in non SGMII mode!\n");
518 return;
519 }
520
521 /* Setup gmac */
developer30e13e72022-11-03 10:21:24 +0800522 if (mac->type == MTK_XGDM_TYPE) {
developer089e8852022-09-28 14:43:46 +0800523 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
524 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
developerfd40db22021-04-29 10:08:25 +0800525
developer089e8852022-09-28 14:43:46 +0800526 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
527 switch (mac->id) {
528 case MTK_GMAC1_ID:
529 mtk_setup_bridge_switch(eth);
530 break;
531 case MTK_GMAC3_ID:
532 val = mtk_r32(eth, MTK_XGMAC_STS(mac->id));
533 mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK,
534 MTK_XGMAC_STS(mac->id));
535 break;
536 }
537 }
developer82eae452023-02-13 10:04:09 +0800538 } else if (mac->type == MTK_GDM_TYPE) {
539 val = mtk_r32(eth, MTK_GDMA_EG_CTRL(mac->id));
540 mtk_w32(eth, val & ~MTK_GDMA_XGDM_SEL,
541 MTK_GDMA_EG_CTRL(mac->id));
542
543 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
544 switch (mac->id) {
545 case MTK_GMAC3_ID:
546 val = mtk_r32(eth, MTK_XGMAC_STS(mac->id));
547 mtk_w32(eth, val & ~MTK_XGMAC_FORCE_LINK,
548 MTK_XGMAC_STS(mac->id));
549 break;
550 }
551 }
552
553 if (mac->type != mac_type) {
554 if (atomic_read(&reset_pending) == 0) {
555 atomic_inc(&force);
556 schedule_work(&eth->pending_work);
557 atomic_inc(&reset_pending);
558 } else
559 atomic_dec(&reset_pending);
560 }
developerfd40db22021-04-29 10:08:25 +0800561 }
562
developerfd40db22021-04-29 10:08:25 +0800563 return;
564
565err_phy:
566 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
567 mac->id, phy_modes(state->interface));
568 return;
569
570init_err:
571 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
572 mac->id, phy_modes(state->interface), err);
573}
574
developer089e8852022-09-28 14:43:46 +0800575static int mtk_mac_pcs_get_state(struct phylink_config *config,
576 struct phylink_link_state *state)
developerfd40db22021-04-29 10:08:25 +0800577{
578 struct mtk_mac *mac = container_of(config, struct mtk_mac,
579 phylink_config);
developerfd40db22021-04-29 10:08:25 +0800580
developer089e8852022-09-28 14:43:46 +0800581 if (mac->type == MTK_XGDM_TYPE) {
582 u32 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
developerfd40db22021-04-29 10:08:25 +0800583
developer089e8852022-09-28 14:43:46 +0800584 if (mac->id == MTK_GMAC2_ID)
585 sts = sts >> 16;
developerfd40db22021-04-29 10:08:25 +0800586
developer089e8852022-09-28 14:43:46 +0800587 state->duplex = 1;
588
589 switch (FIELD_GET(MTK_USXGMII_PCS_MODE, sts)) {
590 case 0:
591 state->speed = SPEED_10000;
592 break;
593 case 1:
594 state->speed = SPEED_5000;
595 break;
596 case 2:
597 state->speed = SPEED_2500;
598 break;
599 case 3:
600 state->speed = SPEED_1000;
601 break;
602 }
603
developer82eae452023-02-13 10:04:09 +0800604 state->interface = mac->interface;
developer089e8852022-09-28 14:43:46 +0800605 state->link = FIELD_GET(MTK_USXGMII_PCS_LINK, sts);
606 } else if (mac->type == MTK_GDM_TYPE) {
607 struct mtk_eth *eth = mac->hw;
608 struct mtk_xgmii *ss = eth->xgmii;
609 u32 id = mtk_mac2xgmii_id(eth, mac->id);
610 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
developer543e7922022-12-01 11:24:47 +0800611 u32 val = 0;
developer089e8852022-09-28 14:43:46 +0800612
613 regmap_read(ss->regmap_sgmii[id], SGMSYS_PCS_CONTROL_1, &val);
614
developer82eae452023-02-13 10:04:09 +0800615 state->interface = mac->interface;
developer089e8852022-09-28 14:43:46 +0800616 state->link = FIELD_GET(SGMII_LINK_STATYS, val);
617
618 if (FIELD_GET(SGMII_AN_ENABLE, val)) {
619 regmap_read(ss->regmap_sgmii[id], SGMII_PCS_SPEED_ABILITY, &val);
620
621 val = val >> 16;
622
623 state->duplex = FIELD_GET(SGMII_PCS_SPEED_DUPLEX, val);
624
625 switch (FIELD_GET(SGMII_PCS_SPEED_MASK, val)) {
626 case 0:
627 state->speed = SPEED_10;
628 break;
629 case 1:
630 state->speed = SPEED_100;
631 break;
632 case 2:
633 state->speed = SPEED_1000;
634 break;
635 }
636 } else {
637 regmap_read(ss->regmap_sgmii[id], SGMSYS_SGMII_MODE, &val);
638
639 state->duplex = !FIELD_GET(SGMII_DUPLEX_FULL, val);
640
641 switch (FIELD_GET(SGMII_SPEED_MASK, val)) {
642 case 0:
643 state->speed = SPEED_10;
644 break;
645 case 1:
646 state->speed = SPEED_100;
647 break;
648 case 2:
649 regmap_read(ss->regmap_sgmii[id], ss->ana_rgc3, &val);
650 state->speed = (FIELD_GET(RG_PHY_SPEED_3_125G, val)) ? SPEED_2500 : SPEED_1000;
651 break;
652 }
653 }
654
655 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
656 if (pmsr & MAC_MSR_RX_FC)
657 state->pause |= MLO_PAUSE_RX;
658 if (pmsr & MAC_MSR_TX_FC)
659 state->pause |= MLO_PAUSE_TX;
660 }
developerfd40db22021-04-29 10:08:25 +0800661
662 return 1;
663}
664
665static void mtk_mac_an_restart(struct phylink_config *config)
666{
667 struct mtk_mac *mac = container_of(config, struct mtk_mac,
668 phylink_config);
669
developer089e8852022-09-28 14:43:46 +0800670 if (mac->type != MTK_XGDM_TYPE)
671 mtk_sgmii_restart_an(mac->hw, mac->id);
developerfd40db22021-04-29 10:08:25 +0800672}
673
674static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
675 phy_interface_t interface)
676{
677 struct mtk_mac *mac = container_of(config, struct mtk_mac,
678 phylink_config);
developer089e8852022-09-28 14:43:46 +0800679 u32 mcr;
680
681 if (mac->type == MTK_GDM_TYPE) {
682 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
683 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
684 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
685 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
686 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
developerfd40db22021-04-29 10:08:25 +0800687
developer089e8852022-09-28 14:43:46 +0800688 mcr &= 0xfffffff0;
689 mcr |= XMAC_MCR_TRX_DISABLE;
690 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
691 }
developerfd40db22021-04-29 10:08:25 +0800692}
693
694static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
695 phy_interface_t interface,
696 struct phy_device *phy)
697{
698 struct mtk_mac *mac = container_of(config, struct mtk_mac,
699 phylink_config);
developer089e8852022-09-28 14:43:46 +0800700 u32 mcr, mcr_cur;
701
developer9b725932022-11-24 16:25:56 +0800702 mac->speed = speed;
703
developer089e8852022-09-28 14:43:46 +0800704 if (mac->type == MTK_GDM_TYPE) {
705 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
706 mcr = mcr_cur;
707 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
708 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
709 MAC_MCR_FORCE_RX_FC);
710 mcr |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
711 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
712
713 /* Configure speed */
714 switch (speed) {
715 case SPEED_2500:
716 case SPEED_1000:
717 mcr |= MAC_MCR_SPEED_1000;
718 break;
719 case SPEED_100:
720 mcr |= MAC_MCR_SPEED_100;
721 break;
722 }
723
724 /* Configure duplex */
725 if (duplex == DUPLEX_FULL)
726 mcr |= MAC_MCR_FORCE_DPX;
727
728 /* Configure pause modes -
729 * phylink will avoid these for half duplex
730 */
731 if (tx_pause)
732 mcr |= MAC_MCR_FORCE_TX_FC;
733 if (rx_pause)
734 mcr |= MAC_MCR_FORCE_RX_FC;
735
736 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
737
738 /* Only update control register when needed! */
739 if (mcr != mcr_cur)
740 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
developer9b725932022-11-24 16:25:56 +0800741
742 if (mode == MLO_AN_PHY && phy)
743 mtk_setup_eee(mac, phy_init_eee(phy, false) >= 0);
developer089e8852022-09-28 14:43:46 +0800744 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
745 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
746
747 mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC);
748 /* Configure pause modes -
749 * phylink will avoid these for half duplex
750 */
751 if (tx_pause)
752 mcr |= XMAC_MCR_FORCE_TX_FC;
753 if (rx_pause)
754 mcr |= XMAC_MCR_FORCE_RX_FC;
developerfd40db22021-04-29 10:08:25 +0800755
developer089e8852022-09-28 14:43:46 +0800756 mcr &= ~(XMAC_MCR_TRX_DISABLE);
757 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
758 }
developerfd40db22021-04-29 10:08:25 +0800759}
760
761static void mtk_validate(struct phylink_config *config,
762 unsigned long *supported,
763 struct phylink_link_state *state)
764{
765 struct mtk_mac *mac = container_of(config, struct mtk_mac,
766 phylink_config);
767 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
768
769 if (state->interface != PHY_INTERFACE_MODE_NA &&
770 state->interface != PHY_INTERFACE_MODE_MII &&
771 state->interface != PHY_INTERFACE_MODE_GMII &&
772 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
773 phy_interface_mode_is_rgmii(state->interface)) &&
774 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
775 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
776 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
777 (state->interface == PHY_INTERFACE_MODE_SGMII ||
developer089e8852022-09-28 14:43:46 +0800778 phy_interface_mode_is_8023z(state->interface))) &&
developer30e13e72022-11-03 10:21:24 +0800779 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_XGMII) &&
780 (state->interface == PHY_INTERFACE_MODE_XGMII)) &&
developer089e8852022-09-28 14:43:46 +0800781 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
782 (state->interface == PHY_INTERFACE_MODE_USXGMII)) &&
783 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
784 (state->interface == PHY_INTERFACE_MODE_10GKR))) {
developerfd40db22021-04-29 10:08:25 +0800785 linkmode_zero(supported);
786 return;
787 }
788
789 phylink_set_port_modes(mask);
790 phylink_set(mask, Autoneg);
791
792 switch (state->interface) {
developer089e8852022-09-28 14:43:46 +0800793 case PHY_INTERFACE_MODE_USXGMII:
794 case PHY_INTERFACE_MODE_10GKR:
795 phylink_set(mask, 10000baseKR_Full);
796 phylink_set(mask, 10000baseT_Full);
797 phylink_set(mask, 10000baseCR_Full);
798 phylink_set(mask, 10000baseSR_Full);
799 phylink_set(mask, 10000baseLR_Full);
800 phylink_set(mask, 10000baseLRM_Full);
801 phylink_set(mask, 10000baseER_Full);
802 phylink_set(mask, 100baseT_Half);
803 phylink_set(mask, 100baseT_Full);
804 phylink_set(mask, 1000baseT_Half);
805 phylink_set(mask, 1000baseT_Full);
806 phylink_set(mask, 1000baseX_Full);
developerb88cdb02022-10-12 18:10:03 +0800807 phylink_set(mask, 2500baseT_Full);
808 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +0800809 break;
developerfd40db22021-04-29 10:08:25 +0800810 case PHY_INTERFACE_MODE_TRGMII:
811 phylink_set(mask, 1000baseT_Full);
812 break;
developer30e13e72022-11-03 10:21:24 +0800813 case PHY_INTERFACE_MODE_XGMII:
814 /* fall through */
developerfd40db22021-04-29 10:08:25 +0800815 case PHY_INTERFACE_MODE_1000BASEX:
developerfd40db22021-04-29 10:08:25 +0800816 phylink_set(mask, 1000baseX_Full);
developer089e8852022-09-28 14:43:46 +0800817 /* fall through; */
818 case PHY_INTERFACE_MODE_2500BASEX:
developerfd40db22021-04-29 10:08:25 +0800819 phylink_set(mask, 2500baseX_Full);
developer2fbee452022-08-12 13:58:20 +0800820 phylink_set(mask, 2500baseT_Full);
821 /* fall through; */
developerfd40db22021-04-29 10:08:25 +0800822 case PHY_INTERFACE_MODE_GMII:
823 case PHY_INTERFACE_MODE_RGMII:
824 case PHY_INTERFACE_MODE_RGMII_ID:
825 case PHY_INTERFACE_MODE_RGMII_RXID:
826 case PHY_INTERFACE_MODE_RGMII_TXID:
827 phylink_set(mask, 1000baseT_Half);
828 /* fall through */
829 case PHY_INTERFACE_MODE_SGMII:
830 phylink_set(mask, 1000baseT_Full);
831 phylink_set(mask, 1000baseX_Full);
832 /* fall through */
833 case PHY_INTERFACE_MODE_MII:
834 case PHY_INTERFACE_MODE_RMII:
835 case PHY_INTERFACE_MODE_REVMII:
836 case PHY_INTERFACE_MODE_NA:
837 default:
838 phylink_set(mask, 10baseT_Half);
839 phylink_set(mask, 10baseT_Full);
840 phylink_set(mask, 100baseT_Half);
841 phylink_set(mask, 100baseT_Full);
842 break;
843 }
844
845 if (state->interface == PHY_INTERFACE_MODE_NA) {
developer089e8852022-09-28 14:43:46 +0800846
847 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII)) {
848 phylink_set(mask, 10000baseKR_Full);
developerc9bd9ae2022-12-23 16:54:36 +0800849 phylink_set(mask, 10000baseT_Full);
developer089e8852022-09-28 14:43:46 +0800850 phylink_set(mask, 10000baseSR_Full);
851 phylink_set(mask, 10000baseLR_Full);
852 phylink_set(mask, 10000baseLRM_Full);
853 phylink_set(mask, 10000baseER_Full);
854 phylink_set(mask, 1000baseKX_Full);
855 phylink_set(mask, 1000baseT_Full);
856 phylink_set(mask, 1000baseX_Full);
857 phylink_set(mask, 2500baseX_Full);
developercfa104b2023-01-11 17:40:41 +0800858 phylink_set(mask, 2500baseT_Full);
859 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +0800860 }
developerfd40db22021-04-29 10:08:25 +0800861 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
862 phylink_set(mask, 1000baseT_Full);
863 phylink_set(mask, 1000baseX_Full);
864 phylink_set(mask, 2500baseX_Full);
865 }
866 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
867 phylink_set(mask, 1000baseT_Full);
868 phylink_set(mask, 1000baseT_Half);
869 phylink_set(mask, 1000baseX_Full);
870 }
871 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
872 phylink_set(mask, 1000baseT_Full);
873 phylink_set(mask, 1000baseT_Half);
874 }
875 }
876
developer30e13e72022-11-03 10:21:24 +0800877 if (mac->type == MTK_XGDM_TYPE) {
878 phylink_clear(mask, 10baseT_Half);
879 phylink_clear(mask, 100baseT_Half);
880 phylink_clear(mask, 1000baseT_Half);
881 }
882
developerfd40db22021-04-29 10:08:25 +0800883 phylink_set(mask, Pause);
884 phylink_set(mask, Asym_Pause);
885
886 linkmode_and(supported, supported, mask);
887 linkmode_and(state->advertising, state->advertising, mask);
888
889 /* We can only operate at 2500BaseX or 1000BaseX. If requested
890 * to advertise both, only report advertising at 2500BaseX.
891 */
892 phylink_helper_basex_speed(state);
893}
894
895static const struct phylink_mac_ops mtk_phylink_ops = {
896 .validate = mtk_validate,
developer089e8852022-09-28 14:43:46 +0800897 .mac_link_state = mtk_mac_pcs_get_state,
developerfd40db22021-04-29 10:08:25 +0800898 .mac_an_restart = mtk_mac_an_restart,
899 .mac_config = mtk_mac_config,
900 .mac_link_down = mtk_mac_link_down,
901 .mac_link_up = mtk_mac_link_up,
902};
903
904static int mtk_mdio_init(struct mtk_eth *eth)
905{
906 struct device_node *mii_np;
developerc8acd8d2022-11-10 09:07:10 +0800907 int clk = 25000000, max_clk = 2500000, divider = 1;
developerfd40db22021-04-29 10:08:25 +0800908 int ret;
developerc8acd8d2022-11-10 09:07:10 +0800909 u32 val;
developerfd40db22021-04-29 10:08:25 +0800910
911 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
912 if (!mii_np) {
913 dev_err(eth->dev, "no %s child node found", "mdio-bus");
914 return -ENODEV;
915 }
916
917 if (!of_device_is_available(mii_np)) {
918 ret = -ENODEV;
919 goto err_put_node;
920 }
921
922 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
923 if (!eth->mii_bus) {
924 ret = -ENOMEM;
925 goto err_put_node;
926 }
927
928 eth->mii_bus->name = "mdio";
929 eth->mii_bus->read = mtk_mdio_read;
930 eth->mii_bus->write = mtk_mdio_write;
developerabeadd52022-08-15 11:26:44 +0800931 eth->mii_bus->reset = mtk_mdio_reset;
developerfd40db22021-04-29 10:08:25 +0800932 eth->mii_bus->priv = eth;
933 eth->mii_bus->parent = eth->dev;
934
developer6fd46562021-10-14 15:04:34 +0800935 if(snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
developerfb556ca2021-10-13 10:52:09 +0800936 ret = -ENOMEM;
937 goto err_put_node;
938 }
developerc8acd8d2022-11-10 09:07:10 +0800939
940 if (!of_property_read_u32(mii_np, "mdc-max-frequency", &val))
941 max_clk = val;
942
943 while (clk / divider > max_clk) {
944 if (divider >= 63)
945 break;
946
947 divider++;
948 };
949
950 /* Configure MDC Turbo Mode */
951 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
952 val = mtk_r32(eth, MTK_MAC_MISC);
953 val |= MISC_MDC_TURBO;
954 mtk_w32(eth, val, MTK_MAC_MISC);
955 } else {
956 val = mtk_r32(eth, MTK_PPSC);
957 val |= PPSC_MDC_TURBO;
958 mtk_w32(eth, val, MTK_PPSC);
959 }
960
961 /* Configure MDC Divider */
962 val = mtk_r32(eth, MTK_PPSC);
963 val &= ~PPSC_MDC_CFG;
964 val |= FIELD_PREP(PPSC_MDC_CFG, divider);
965 mtk_w32(eth, val, MTK_PPSC);
966
967 dev_info(eth->dev, "MDC is running on %d Hz\n", clk / divider);
968
developerfd40db22021-04-29 10:08:25 +0800969 ret = of_mdiobus_register(eth->mii_bus, mii_np);
970
971err_put_node:
972 of_node_put(mii_np);
973 return ret;
974}
975
976static void mtk_mdio_cleanup(struct mtk_eth *eth)
977{
978 if (!eth->mii_bus)
979 return;
980
981 mdiobus_unregister(eth->mii_bus);
982}
983
984static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
985{
986 unsigned long flags;
987 u32 val;
988
989 spin_lock_irqsave(&eth->tx_irq_lock, flags);
990 val = mtk_r32(eth, eth->tx_int_mask_reg);
991 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
992 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
993}
994
995static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
996{
997 unsigned long flags;
998 u32 val;
999
1000 spin_lock_irqsave(&eth->tx_irq_lock, flags);
1001 val = mtk_r32(eth, eth->tx_int_mask_reg);
1002 mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
1003 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1004}
1005
1006static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
1007{
1008 unsigned long flags;
1009 u32 val;
1010
1011 spin_lock_irqsave(&eth->rx_irq_lock, flags);
1012 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
1013 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
1014 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1015}
1016
1017static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
1018{
1019 unsigned long flags;
1020 u32 val;
1021
1022 spin_lock_irqsave(&eth->rx_irq_lock, flags);
1023 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
1024 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
1025 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1026}
1027
1028static int mtk_set_mac_address(struct net_device *dev, void *p)
1029{
1030 int ret = eth_mac_addr(dev, p);
1031 struct mtk_mac *mac = netdev_priv(dev);
1032 struct mtk_eth *eth = mac->hw;
1033 const char *macaddr = dev->dev_addr;
1034
1035 if (ret)
1036 return ret;
1037
1038 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
1039 return -EBUSY;
1040
1041 spin_lock_bh(&mac->hw->page_lock);
1042 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1043 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1044 MT7628_SDM_MAC_ADRH);
1045 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1046 (macaddr[4] << 8) | macaddr[5],
1047 MT7628_SDM_MAC_ADRL);
1048 } else {
1049 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1050 MTK_GDMA_MAC_ADRH(mac->id));
1051 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1052 (macaddr[4] << 8) | macaddr[5],
1053 MTK_GDMA_MAC_ADRL(mac->id));
1054 }
1055 spin_unlock_bh(&mac->hw->page_lock);
1056
1057 return 0;
1058}
1059
1060void mtk_stats_update_mac(struct mtk_mac *mac)
1061{
developer089e8852022-09-28 14:43:46 +08001062 struct mtk_eth *eth = mac->hw;
developerfd40db22021-04-29 10:08:25 +08001063 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1064 unsigned int base = MTK_GDM1_TX_GBCNT;
1065 u64 stats;
1066
1067 base += hw_stats->reg_offset;
1068
1069 u64_stats_update_begin(&hw_stats->syncp);
1070
1071 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
1072 stats = mtk_r32(mac->hw, base + 0x04);
1073 if (stats)
1074 hw_stats->rx_bytes += (stats << 32);
1075 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
1076 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
1077 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
1078 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
1079 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
1080 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
1081 hw_stats->rx_flow_control_packets +=
1082 mtk_r32(mac->hw, base + 0x24);
developer089e8852022-09-28 14:43:46 +08001083
1084 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1085 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x50);
1086 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x54);
1087 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x40);
1088 stats = mtk_r32(mac->hw, base + 0x44);
1089 if (stats)
1090 hw_stats->tx_bytes += (stats << 32);
1091 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x48);
1092 u64_stats_update_end(&hw_stats->syncp);
1093 } else {
1094 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
1095 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
1096 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
1097 stats = mtk_r32(mac->hw, base + 0x34);
1098 if (stats)
1099 hw_stats->tx_bytes += (stats << 32);
1100 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
1101 u64_stats_update_end(&hw_stats->syncp);
1102 }
developerfd40db22021-04-29 10:08:25 +08001103}
1104
1105static void mtk_stats_update(struct mtk_eth *eth)
1106{
1107 int i;
1108
1109 for (i = 0; i < MTK_MAC_COUNT; i++) {
1110 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1111 continue;
1112 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1113 mtk_stats_update_mac(eth->mac[i]);
1114 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1115 }
1116 }
1117}
1118
1119static void mtk_get_stats64(struct net_device *dev,
1120 struct rtnl_link_stats64 *storage)
1121{
1122 struct mtk_mac *mac = netdev_priv(dev);
1123 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1124 unsigned int start;
1125
1126 if (netif_running(dev) && netif_device_present(dev)) {
1127 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1128 mtk_stats_update_mac(mac);
1129 spin_unlock_bh(&hw_stats->stats_lock);
1130 }
1131 }
1132
1133 do {
1134 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
1135 storage->rx_packets = hw_stats->rx_packets;
1136 storage->tx_packets = hw_stats->tx_packets;
1137 storage->rx_bytes = hw_stats->rx_bytes;
1138 storage->tx_bytes = hw_stats->tx_bytes;
1139 storage->collisions = hw_stats->tx_collisions;
1140 storage->rx_length_errors = hw_stats->rx_short_errors +
1141 hw_stats->rx_long_errors;
1142 storage->rx_over_errors = hw_stats->rx_overflow;
1143 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1144 storage->rx_errors = hw_stats->rx_checksum_errors;
1145 storage->tx_aborted_errors = hw_stats->tx_skip;
1146 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
1147
1148 storage->tx_errors = dev->stats.tx_errors;
1149 storage->rx_dropped = dev->stats.rx_dropped;
1150 storage->tx_dropped = dev->stats.tx_dropped;
1151}
1152
1153static inline int mtk_max_frag_size(int mtu)
1154{
1155 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1156 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
1157 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
1158
1159 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1160 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1161}
1162
1163static inline int mtk_max_buf_size(int frag_size)
1164{
1165 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1166 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1167
1168 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
1169
1170 return buf_size;
1171}
1172
developere9356982022-07-04 09:03:20 +08001173static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1174 struct mtk_rx_dma_v2 *dma_rxd)
developerfd40db22021-04-29 10:08:25 +08001175{
developerfd40db22021-04-29 10:08:25 +08001176 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +08001177 if (!(rxd->rxd2 & RX_DMA_DONE))
1178 return false;
1179
1180 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +08001181 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1182 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developere9356982022-07-04 09:03:20 +08001183
developer089e8852022-09-28 14:43:46 +08001184 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1185 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001186 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1187 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
developer006325c2022-10-06 16:39:50 +08001188 rxd->rxd7 = READ_ONCE(dma_rxd->rxd7);
developere9356982022-07-04 09:03:20 +08001189 }
1190
developerc4671b22021-05-28 13:16:42 +08001191 return true;
developerfd40db22021-04-29 10:08:25 +08001192}
1193
1194/* the qdma core needs scratch memory to be setup */
1195static int mtk_init_fq_dma(struct mtk_eth *eth)
1196{
developere9356982022-07-04 09:03:20 +08001197 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001198 dma_addr_t phy_ring_tail;
1199 int cnt = MTK_DMA_SIZE;
1200 dma_addr_t dma_addr;
1201 int i;
1202
1203 if (!eth->soc->has_sram) {
1204 eth->scratch_ring = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08001205 cnt * soc->txrx.txd_size,
developerfd40db22021-04-29 10:08:25 +08001206 &eth->phy_scratch_ring,
developere9356982022-07-04 09:03:20 +08001207 GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001208 } else {
developer089e8852022-09-28 14:43:46 +08001209 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1210 eth->scratch_ring = eth->sram_base;
1211 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1212 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
developerfd40db22021-04-29 10:08:25 +08001213 }
1214
1215 if (unlikely(!eth->scratch_ring))
1216 return -ENOMEM;
1217
developere9356982022-07-04 09:03:20 +08001218 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001219 if (unlikely(!eth->scratch_head))
1220 return -ENOMEM;
1221
1222 dma_addr = dma_map_single(eth->dev,
1223 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1224 DMA_FROM_DEVICE);
1225 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1226 return -ENOMEM;
1227
developer8b6f2402022-11-28 13:42:34 +08001228 phy_ring_tail = eth->phy_scratch_ring +
1229 (dma_addr_t)soc->txrx.txd_size * (cnt - 1);
developerfd40db22021-04-29 10:08:25 +08001230
1231 for (i = 0; i < cnt; i++) {
developere9356982022-07-04 09:03:20 +08001232 struct mtk_tx_dma_v2 *txd;
1233
1234 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1235 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerfd40db22021-04-29 10:08:25 +08001236 if (i < cnt - 1)
developere9356982022-07-04 09:03:20 +08001237 txd->txd2 = eth->phy_scratch_ring +
1238 (i + 1) * soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001239
developere9356982022-07-04 09:03:20 +08001240 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1241 txd->txd4 = 0;
1242
developer089e8852022-09-28 14:43:46 +08001243 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1244 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001245 txd->txd5 = 0;
1246 txd->txd6 = 0;
1247 txd->txd7 = 0;
1248 txd->txd8 = 0;
developerfd40db22021-04-29 10:08:25 +08001249 }
developerfd40db22021-04-29 10:08:25 +08001250 }
1251
1252 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
1253 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
1254 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
1255 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
1256
1257 return 0;
1258}
1259
1260static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1261{
developere9356982022-07-04 09:03:20 +08001262 return ring->dma + (desc - ring->phys);
developerfd40db22021-04-29 10:08:25 +08001263}
1264
1265static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001266 void *txd, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001267{
developere9356982022-07-04 09:03:20 +08001268 int idx = (txd - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001269
1270 return &ring->buf[idx];
1271}
1272
1273static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001274 void *dma)
developerfd40db22021-04-29 10:08:25 +08001275{
1276 return ring->dma_pdma - ring->dma + dma;
1277}
1278
developere9356982022-07-04 09:03:20 +08001279static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001280{
developere9356982022-07-04 09:03:20 +08001281 return (dma - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001282}
1283
developerc4671b22021-05-28 13:16:42 +08001284static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1285 bool napi)
developerfd40db22021-04-29 10:08:25 +08001286{
1287 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1288 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1289 dma_unmap_single(eth->dev,
1290 dma_unmap_addr(tx_buf, dma_addr0),
1291 dma_unmap_len(tx_buf, dma_len0),
1292 DMA_TO_DEVICE);
1293 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1294 dma_unmap_page(eth->dev,
1295 dma_unmap_addr(tx_buf, dma_addr0),
1296 dma_unmap_len(tx_buf, dma_len0),
1297 DMA_TO_DEVICE);
1298 }
1299 } else {
1300 if (dma_unmap_len(tx_buf, dma_len0)) {
1301 dma_unmap_page(eth->dev,
1302 dma_unmap_addr(tx_buf, dma_addr0),
1303 dma_unmap_len(tx_buf, dma_len0),
1304 DMA_TO_DEVICE);
1305 }
1306
1307 if (dma_unmap_len(tx_buf, dma_len1)) {
1308 dma_unmap_page(eth->dev,
1309 dma_unmap_addr(tx_buf, dma_addr1),
1310 dma_unmap_len(tx_buf, dma_len1),
1311 DMA_TO_DEVICE);
1312 }
1313 }
1314
1315 tx_buf->flags = 0;
1316 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +08001317 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
1318 if (napi)
1319 napi_consume_skb(tx_buf->skb, napi);
1320 else
1321 dev_kfree_skb_any(tx_buf->skb);
1322 }
developerfd40db22021-04-29 10:08:25 +08001323 tx_buf->skb = NULL;
1324}
1325
1326static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1327 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1328 size_t size, int idx)
1329{
1330 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1331 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1332 dma_unmap_len_set(tx_buf, dma_len0, size);
1333 } else {
1334 if (idx & 1) {
1335 txd->txd3 = mapped_addr;
1336 txd->txd2 |= TX_DMA_PLEN1(size);
1337 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1338 dma_unmap_len_set(tx_buf, dma_len1, size);
1339 } else {
1340 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1341 txd->txd1 = mapped_addr;
1342 txd->txd2 = TX_DMA_PLEN0(size);
1343 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1344 dma_unmap_len_set(tx_buf, dma_len0, size);
1345 }
1346 }
1347}
1348
developere9356982022-07-04 09:03:20 +08001349static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
1350 struct mtk_tx_dma_desc_info *info)
1351{
1352 struct mtk_mac *mac = netdev_priv(dev);
1353 struct mtk_eth *eth = mac->hw;
1354 struct mtk_tx_dma *desc = txd;
1355 u32 data;
1356
1357 WRITE_ONCE(desc->txd1, info->addr);
1358
1359 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1360 if (info->last)
1361 data |= TX_DMA_LS0;
1362 WRITE_ONCE(desc->txd3, data);
1363
1364 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1365 data |= QID_HIGH_BITS(info->qid);
1366 if (info->first) {
1367 if (info->gso)
1368 data |= TX_DMA_TSO;
1369 /* tx checksum offload */
1370 if (info->csum)
1371 data |= TX_DMA_CHKSUM;
1372 /* vlan header offload */
1373 if (info->vlan)
1374 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1375 }
1376
1377#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1378 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1379 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1380 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1381 }
1382
1383 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1384 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1385#endif
1386 WRITE_ONCE(desc->txd4, data);
1387}
1388
1389static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1390 struct mtk_tx_dma_desc_info *info)
1391{
1392 struct mtk_mac *mac = netdev_priv(dev);
1393 struct mtk_eth *eth = mac->hw;
1394 struct mtk_tx_dma_v2 *desc = txd;
developerce08bca2022-10-06 16:21:13 +08001395 u32 data = 0;
1396
1397 if (!info->qid && mac->id)
1398 info->qid = MTK_QDMA_GMAC2_QID;
1399
1400 WRITE_ONCE(desc->txd1, info->addr);
1401
1402 data = TX_DMA_PLEN0(info->size);
1403 if (info->last)
1404 data |= TX_DMA_LS0;
1405 WRITE_ONCE(desc->txd3, data);
1406
1407 data = ((mac->id == MTK_GMAC3_ID) ?
1408 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1409 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1410#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1411 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1412 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1413 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1414 }
1415
1416 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1417 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1418#endif
1419 WRITE_ONCE(desc->txd4, data);
1420
1421 data = 0;
1422 if (info->first) {
1423 if (info->gso)
1424 data |= TX_DMA_TSO_V2;
1425 /* tx checksum offload */
1426 if (info->csum)
1427 data |= TX_DMA_CHKSUM_V2;
1428 }
1429 WRITE_ONCE(desc->txd5, data);
1430
1431 data = 0;
1432 if (info->first && info->vlan)
1433 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1434 WRITE_ONCE(desc->txd6, data);
1435
1436 WRITE_ONCE(desc->txd7, 0);
1437 WRITE_ONCE(desc->txd8, 0);
1438}
1439
1440static void mtk_tx_set_dma_desc_v3(struct sk_buff *skb, struct net_device *dev, void *txd,
1441 struct mtk_tx_dma_desc_info *info)
1442{
1443 struct mtk_mac *mac = netdev_priv(dev);
1444 struct mtk_eth *eth = mac->hw;
1445 struct mtk_tx_dma_v2 *desc = txd;
developer089e8852022-09-28 14:43:46 +08001446 u64 addr64 = 0;
developere9356982022-07-04 09:03:20 +08001447 u32 data = 0;
developere9356982022-07-04 09:03:20 +08001448
developerce08bca2022-10-06 16:21:13 +08001449 if (!info->qid && mac->id)
developerb9463012022-09-14 10:28:45 +08001450 info->qid = MTK_QDMA_GMAC2_QID;
developere9356982022-07-04 09:03:20 +08001451
developer089e8852022-09-28 14:43:46 +08001452 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1453 TX_DMA_SDP1(info->addr) : 0;
1454
developere9356982022-07-04 09:03:20 +08001455 WRITE_ONCE(desc->txd1, info->addr);
1456
1457 data = TX_DMA_PLEN0(info->size);
1458 if (info->last)
1459 data |= TX_DMA_LS0;
developer089e8852022-09-28 14:43:46 +08001460 WRITE_ONCE(desc->txd3, data | addr64);
developere9356982022-07-04 09:03:20 +08001461
developer089e8852022-09-28 14:43:46 +08001462 data = ((mac->id == MTK_GMAC3_ID) ?
1463 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
developerb9463012022-09-14 10:28:45 +08001464 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
developere9356982022-07-04 09:03:20 +08001465#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1466 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1467 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1468 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1469 }
1470
1471 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1472 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1473#endif
1474 WRITE_ONCE(desc->txd4, data);
1475
1476 data = 0;
1477 if (info->first) {
1478 if (info->gso)
1479 data |= TX_DMA_TSO_V2;
1480 /* tx checksum offload */
1481 if (info->csum)
1482 data |= TX_DMA_CHKSUM_V2;
developerce08bca2022-10-06 16:21:13 +08001483
1484 if (netdev_uses_dsa(dev))
1485 data |= TX_DMA_SPTAG_V3;
developere9356982022-07-04 09:03:20 +08001486 }
1487 WRITE_ONCE(desc->txd5, data);
1488
1489 data = 0;
1490 if (info->first && info->vlan)
1491 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1492 WRITE_ONCE(desc->txd6, data);
1493
1494 WRITE_ONCE(desc->txd7, 0);
1495 WRITE_ONCE(desc->txd8, 0);
1496}
1497
1498static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
1499 struct mtk_tx_dma_desc_info *info)
1500{
1501 struct mtk_mac *mac = netdev_priv(dev);
1502 struct mtk_eth *eth = mac->hw;
1503
developerce08bca2022-10-06 16:21:13 +08001504 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1505 mtk_tx_set_dma_desc_v3(skb, dev, txd, info);
1506 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developere9356982022-07-04 09:03:20 +08001507 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
1508 else
1509 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
1510}
1511
developerfd40db22021-04-29 10:08:25 +08001512static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1513 int tx_num, struct mtk_tx_ring *ring, bool gso)
1514{
developere9356982022-07-04 09:03:20 +08001515 struct mtk_tx_dma_desc_info txd_info = {
1516 .size = skb_headlen(skb),
1517 .qid = skb->mark & MTK_QDMA_TX_MASK,
1518 .gso = gso,
1519 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1520 .vlan = skb_vlan_tag_present(skb),
1521 .vlan_tci = skb_vlan_tag_get(skb),
1522 .first = true,
1523 .last = !skb_is_nonlinear(skb),
1524 };
developerfd40db22021-04-29 10:08:25 +08001525 struct mtk_mac *mac = netdev_priv(dev);
1526 struct mtk_eth *eth = mac->hw;
developere9356982022-07-04 09:03:20 +08001527 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001528 struct mtk_tx_dma *itxd, *txd;
1529 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1530 struct mtk_tx_buf *itx_buf, *tx_buf;
developerfd40db22021-04-29 10:08:25 +08001531 int i, n_desc = 1;
developerfd40db22021-04-29 10:08:25 +08001532 int k = 0;
1533
developerb3a9e7b2023-02-08 15:18:10 +08001534 if (skb->len < 32) {
1535 if (skb_put_padto(skb, MTK_MIN_TX_LENGTH))
1536 return -ENOMEM;
1537
1538 txd_info.size = skb_headlen(skb);
1539 }
1540
developerfd40db22021-04-29 10:08:25 +08001541 itxd = ring->next_free;
1542 itxd_pdma = qdma_to_pdma(ring, itxd);
1543 if (itxd == ring->last_free)
1544 return -ENOMEM;
1545
developere9356982022-07-04 09:03:20 +08001546 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001547 memset(itx_buf, 0, sizeof(*itx_buf));
1548
developere9356982022-07-04 09:03:20 +08001549 txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
1550 DMA_TO_DEVICE);
1551 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
developerfd40db22021-04-29 10:08:25 +08001552 return -ENOMEM;
1553
developere9356982022-07-04 09:03:20 +08001554 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
1555
developerfd40db22021-04-29 10:08:25 +08001556 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
developer089e8852022-09-28 14:43:46 +08001557 itx_buf->flags |= (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1558 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1559 MTK_TX_FLAGS_FPORT2;
developere9356982022-07-04 09:03:20 +08001560 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerfd40db22021-04-29 10:08:25 +08001561 k++);
1562
developerfd40db22021-04-29 10:08:25 +08001563 /* TX SG offload */
1564 txd = itxd;
1565 txd_pdma = qdma_to_pdma(ring, txd);
1566
developere9356982022-07-04 09:03:20 +08001567 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerfd40db22021-04-29 10:08:25 +08001568 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1569 unsigned int offset = 0;
1570 int frag_size = skb_frag_size(frag);
1571
1572 while (frag_size) {
developerfd40db22021-04-29 10:08:25 +08001573 bool new_desc = true;
1574
developere9356982022-07-04 09:03:20 +08001575 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerfd40db22021-04-29 10:08:25 +08001576 (i & 0x1)) {
1577 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1578 txd_pdma = qdma_to_pdma(ring, txd);
1579 if (txd == ring->last_free)
1580 goto err_dma;
1581
1582 n_desc++;
1583 } else {
1584 new_desc = false;
1585 }
1586
developere9356982022-07-04 09:03:20 +08001587 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1588 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1589 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1590 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1591 !(frag_size - txd_info.size);
1592 txd_info.addr = skb_frag_dma_map(eth->dev, frag,
1593 offset, txd_info.size,
1594 DMA_TO_DEVICE);
1595 if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
1596 goto err_dma;
developerfd40db22021-04-29 10:08:25 +08001597
developere9356982022-07-04 09:03:20 +08001598 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerfd40db22021-04-29 10:08:25 +08001599
developere9356982022-07-04 09:03:20 +08001600 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001601 if (new_desc)
1602 memset(tx_buf, 0, sizeof(*tx_buf));
1603 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1604 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
developer089e8852022-09-28 14:43:46 +08001605 tx_buf->flags |=
1606 (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1607 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1608 MTK_TX_FLAGS_FPORT2;
developerfd40db22021-04-29 10:08:25 +08001609
developere9356982022-07-04 09:03:20 +08001610 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1611 txd_info.size, k++);
developerfd40db22021-04-29 10:08:25 +08001612
developere9356982022-07-04 09:03:20 +08001613 frag_size -= txd_info.size;
1614 offset += txd_info.size;
developerfd40db22021-04-29 10:08:25 +08001615 }
1616 }
1617
1618 /* store skb to cleanup */
1619 itx_buf->skb = skb;
1620
developere9356982022-07-04 09:03:20 +08001621 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001622 if (k & 0x1)
1623 txd_pdma->txd2 |= TX_DMA_LS0;
1624 else
1625 txd_pdma->txd2 |= TX_DMA_LS1;
1626 }
1627
1628 netdev_sent_queue(dev, skb->len);
1629 skb_tx_timestamp(skb);
1630
1631 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1632 atomic_sub(n_desc, &ring->free_count);
1633
1634 /* make sure that all changes to the dma ring are flushed before we
1635 * continue
1636 */
1637 wmb();
1638
developere9356982022-07-04 09:03:20 +08001639 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001640 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1641 !netdev_xmit_more())
1642 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1643 } else {
developere9356982022-07-04 09:03:20 +08001644 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
developerfd40db22021-04-29 10:08:25 +08001645 ring->dma_size);
1646 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1647 }
1648
1649 return 0;
1650
1651err_dma:
1652 do {
developere9356982022-07-04 09:03:20 +08001653 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001654
1655 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08001656 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08001657
1658 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
developere9356982022-07-04 09:03:20 +08001659 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
developerfd40db22021-04-29 10:08:25 +08001660 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1661
1662 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1663 itxd_pdma = qdma_to_pdma(ring, itxd);
1664 } while (itxd != txd);
1665
1666 return -ENOMEM;
1667}
1668
1669static inline int mtk_cal_txd_req(struct sk_buff *skb)
1670{
1671 int i, nfrags;
1672 skb_frag_t *frag;
1673
1674 nfrags = 1;
1675 if (skb_is_gso(skb)) {
1676 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1677 frag = &skb_shinfo(skb)->frags[i];
1678 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1679 MTK_TX_DMA_BUF_LEN);
1680 }
1681 } else {
1682 nfrags += skb_shinfo(skb)->nr_frags;
1683 }
1684
1685 return nfrags;
1686}
1687
1688static int mtk_queue_stopped(struct mtk_eth *eth)
1689{
1690 int i;
1691
1692 for (i = 0; i < MTK_MAC_COUNT; i++) {
1693 if (!eth->netdev[i])
1694 continue;
1695 if (netif_queue_stopped(eth->netdev[i]))
1696 return 1;
1697 }
1698
1699 return 0;
1700}
1701
1702static void mtk_wake_queue(struct mtk_eth *eth)
1703{
1704 int i;
1705
1706 for (i = 0; i < MTK_MAC_COUNT; i++) {
1707 if (!eth->netdev[i])
1708 continue;
1709 netif_wake_queue(eth->netdev[i]);
1710 }
1711}
1712
1713static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1714{
1715 struct mtk_mac *mac = netdev_priv(dev);
1716 struct mtk_eth *eth = mac->hw;
1717 struct mtk_tx_ring *ring = &eth->tx_ring;
1718 struct net_device_stats *stats = &dev->stats;
1719 bool gso = false;
1720 int tx_num;
1721
1722 /* normally we can rely on the stack not calling this more than once,
1723 * however we have 2 queues running on the same ring so we need to lock
1724 * the ring access
1725 */
1726 spin_lock(&eth->page_lock);
1727
1728 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1729 goto drop;
1730
1731 tx_num = mtk_cal_txd_req(skb);
1732 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1733 netif_stop_queue(dev);
1734 netif_err(eth, tx_queued, dev,
1735 "Tx Ring full when queue awake!\n");
1736 spin_unlock(&eth->page_lock);
1737 return NETDEV_TX_BUSY;
1738 }
1739
1740 /* TSO: fill MSS info in tcp checksum field */
1741 if (skb_is_gso(skb)) {
1742 if (skb_cow_head(skb, 0)) {
1743 netif_warn(eth, tx_err, dev,
1744 "GSO expand head fail.\n");
1745 goto drop;
1746 }
1747
1748 if (skb_shinfo(skb)->gso_type &
1749 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1750 gso = true;
1751 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1752 }
1753 }
1754
1755 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1756 goto drop;
1757
1758 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1759 netif_stop_queue(dev);
1760
1761 spin_unlock(&eth->page_lock);
1762
1763 return NETDEV_TX_OK;
1764
1765drop:
1766 spin_unlock(&eth->page_lock);
1767 stats->tx_dropped++;
1768 dev_kfree_skb_any(skb);
1769 return NETDEV_TX_OK;
1770}
1771
1772static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1773{
1774 int i;
1775 struct mtk_rx_ring *ring;
1776 int idx;
1777
developerfd40db22021-04-29 10:08:25 +08001778 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
developere9356982022-07-04 09:03:20 +08001779 struct mtk_rx_dma *rxd;
1780
developer77d03a72021-06-06 00:06:00 +08001781 if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i))
1782 continue;
1783
developerfd40db22021-04-29 10:08:25 +08001784 ring = &eth->rx_ring[i];
1785 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001786 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1787 if (rxd->rxd2 & RX_DMA_DONE) {
developerfd40db22021-04-29 10:08:25 +08001788 ring->calc_idx_update = true;
1789 return ring;
1790 }
1791 }
1792
1793 return NULL;
1794}
1795
developer18f46a82021-07-20 21:08:21 +08001796static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08001797{
developerfd40db22021-04-29 10:08:25 +08001798 int i;
1799
developerfb556ca2021-10-13 10:52:09 +08001800 if (!eth->hwlro)
developerfd40db22021-04-29 10:08:25 +08001801 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerfb556ca2021-10-13 10:52:09 +08001802 else {
developerfd40db22021-04-29 10:08:25 +08001803 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1804 ring = &eth->rx_ring[i];
1805 if (ring->calc_idx_update) {
1806 ring->calc_idx_update = false;
1807 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1808 }
1809 }
1810 }
1811}
1812
1813static int mtk_poll_rx(struct napi_struct *napi, int budget,
1814 struct mtk_eth *eth)
1815{
developer18f46a82021-07-20 21:08:21 +08001816 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
1817 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08001818 int idx;
1819 struct sk_buff *skb;
developer089e8852022-09-28 14:43:46 +08001820 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08001821 u8 *data, *new_data;
developere9356982022-07-04 09:03:20 +08001822 struct mtk_rx_dma_v2 *rxd, trxd;
developerfd40db22021-04-29 10:08:25 +08001823 int done = 0;
1824
developer18f46a82021-07-20 21:08:21 +08001825 if (unlikely(!ring))
1826 goto rx_done;
1827
developerfd40db22021-04-29 10:08:25 +08001828 while (done < budget) {
developer006325c2022-10-06 16:39:50 +08001829 struct net_device *netdev = NULL;
developerfd40db22021-04-29 10:08:25 +08001830 unsigned int pktlen;
developer8b6f2402022-11-28 13:42:34 +08001831 dma_addr_t dma_addr = 0;
developere9356982022-07-04 09:03:20 +08001832 int mac = 0;
developerfd40db22021-04-29 10:08:25 +08001833
developer18f46a82021-07-20 21:08:21 +08001834 if (eth->hwlro)
1835 ring = mtk_get_rx_ring(eth);
1836
developerfd40db22021-04-29 10:08:25 +08001837 if (unlikely(!ring))
1838 goto rx_done;
1839
1840 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001841 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
developerfd40db22021-04-29 10:08:25 +08001842 data = ring->data[idx];
1843
developere9356982022-07-04 09:03:20 +08001844 if (!mtk_rx_get_desc(eth, &trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08001845 break;
1846
1847 /* find out which mac the packet come from. values start at 1 */
1848 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1849 mac = 0;
1850 } else {
developer089e8852022-09-28 14:43:46 +08001851 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1852 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1853 switch (RX_DMA_GET_SPORT_V2(trxd.rxd5)) {
1854 case PSE_GDM1_PORT:
1855 case PSE_GDM2_PORT:
1856 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1857 break;
1858 case PSE_GDM3_PORT:
1859 mac = MTK_GMAC3_ID;
1860 break;
1861 }
1862 } else
developerfd40db22021-04-29 10:08:25 +08001863 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
1864 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1865 }
1866
1867 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1868 !eth->netdev[mac]))
1869 goto release_desc;
1870
1871 netdev = eth->netdev[mac];
1872
1873 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1874 goto release_desc;
1875
1876 /* alloc new buffer */
1877 new_data = napi_alloc_frag(ring->frag_size);
1878 if (unlikely(!new_data)) {
1879 netdev->stats.rx_dropped++;
1880 goto release_desc;
1881 }
1882 dma_addr = dma_map_single(eth->dev,
1883 new_data + NET_SKB_PAD +
1884 eth->ip_align,
1885 ring->buf_size,
1886 DMA_FROM_DEVICE);
1887 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1888 skb_free_frag(new_data);
1889 netdev->stats.rx_dropped++;
1890 goto release_desc;
1891 }
1892
developer089e8852022-09-28 14:43:46 +08001893 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1894 ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
1895
1896 dma_unmap_single(eth->dev,
1897 (u64)(trxd.rxd1 | addr64),
developerc4671b22021-05-28 13:16:42 +08001898 ring->buf_size, DMA_FROM_DEVICE);
1899
developerfd40db22021-04-29 10:08:25 +08001900 /* receive data */
1901 skb = build_skb(data, ring->frag_size);
1902 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08001903 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08001904 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08001905 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08001906 }
1907 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1908
developerfd40db22021-04-29 10:08:25 +08001909 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1910 skb->dev = netdev;
1911 skb_put(skb, pktlen);
1912
developer089e8852022-09-28 14:43:46 +08001913 if ((MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) &&
developerfd40db22021-04-29 10:08:25 +08001914 (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
developer089e8852022-09-28 14:43:46 +08001915 (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) &&
developerfd40db22021-04-29 10:08:25 +08001916 (trxd.rxd3 & eth->rx_dma_l4_valid)))
1917 skb->ip_summed = CHECKSUM_UNNECESSARY;
1918 else
1919 skb_checksum_none_assert(skb);
1920 skb->protocol = eth_type_trans(skb, netdev);
1921
1922 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developer089e8852022-09-28 14:43:46 +08001923 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1924 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer255bba22021-07-27 15:16:33 +08001925 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08001926 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08001927 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08001928 RX_DMA_VID_V2(trxd.rxd4));
1929 } else {
1930 if (trxd.rxd2 & RX_DMA_VTAG)
1931 __vlan_hwaccel_put_tag(skb,
1932 htons(RX_DMA_VPID(trxd.rxd3)),
1933 RX_DMA_VID(trxd.rxd3));
1934 }
1935
1936 /* If netdev is attached to dsa switch, the special
1937 * tag inserted in VLAN field by switch hardware can
1938 * be offload by RX HW VLAN offload. Clears the VLAN
1939 * information from @skb to avoid unexpected 8021d
1940 * handler before packet enter dsa framework.
1941 */
1942 if (netdev_uses_dsa(netdev))
1943 __vlan_hwaccel_clear_tag(skb);
1944 }
1945
1946#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developer089e8852022-09-28 14:43:46 +08001947 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1948 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +08001949 *(u32 *)(skb->head) = trxd.rxd5;
1950 else
developerfd40db22021-04-29 10:08:25 +08001951 *(u32 *)(skb->head) = trxd.rxd4;
1952
1953 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001954 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001955 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1956
1957 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
1958 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
1959 __func__, skb_hnat_reason(skb));
1960 skb->pkt_type = PACKET_HOST;
1961 }
1962
1963 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
1964 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
1965 skb_hnat_reason(skb), skb_hnat_alg(skb));
1966#endif
developer77d03a72021-06-06 00:06:00 +08001967 if (mtk_hwlro_stats_ebl &&
1968 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
1969 hw_lro_stats_update(ring->ring_no, &trxd);
1970 hw_lro_flush_stats_update(ring->ring_no, &trxd);
1971 }
developerfd40db22021-04-29 10:08:25 +08001972
1973 skb_record_rx_queue(skb, 0);
1974 napi_gro_receive(napi, skb);
1975
developerc4671b22021-05-28 13:16:42 +08001976skip_rx:
developerfd40db22021-04-29 10:08:25 +08001977 ring->data[idx] = new_data;
1978 rxd->rxd1 = (unsigned int)dma_addr;
1979
1980release_desc:
developer089e8852022-09-28 14:43:46 +08001981 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1982 RX_DMA_SDP1(dma_addr) : 0;
1983
developerfd40db22021-04-29 10:08:25 +08001984 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1985 rxd->rxd2 = RX_DMA_LSO;
1986 else
developer089e8852022-09-28 14:43:46 +08001987 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08001988
1989 ring->calc_idx = idx;
1990
1991 done++;
1992 }
1993
1994rx_done:
1995 if (done) {
1996 /* make sure that all changes to the dma ring are flushed before
1997 * we continue
1998 */
1999 wmb();
developer18f46a82021-07-20 21:08:21 +08002000 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08002001 }
2002
2003 return done;
2004}
2005
developerfb556ca2021-10-13 10:52:09 +08002006static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08002007 unsigned int *done, unsigned int *bytes)
2008{
developere9356982022-07-04 09:03:20 +08002009 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002010 struct mtk_tx_ring *ring = &eth->tx_ring;
2011 struct mtk_tx_dma *desc;
2012 struct sk_buff *skb;
2013 struct mtk_tx_buf *tx_buf;
2014 u32 cpu, dma;
2015
developerc4671b22021-05-28 13:16:42 +08002016 cpu = ring->last_free_ptr;
developerfd40db22021-04-29 10:08:25 +08002017 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
2018
2019 desc = mtk_qdma_phys_to_virt(ring, cpu);
2020
2021 while ((cpu != dma) && budget) {
2022 u32 next_cpu = desc->txd2;
2023 int mac = 0;
2024
2025 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2026 break;
2027
2028 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2029
developere9356982022-07-04 09:03:20 +08002030 tx_buf = mtk_desc_to_tx_buf(ring, desc, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08002031 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
developer089e8852022-09-28 14:43:46 +08002032 mac = MTK_GMAC2_ID;
2033 else if (tx_buf->flags & MTK_TX_FLAGS_FPORT2)
2034 mac = MTK_GMAC3_ID;
developerfd40db22021-04-29 10:08:25 +08002035
2036 skb = tx_buf->skb;
2037 if (!skb)
2038 break;
2039
2040 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
2041 bytes[mac] += skb->len;
2042 done[mac]++;
2043 budget--;
2044 }
developerc4671b22021-05-28 13:16:42 +08002045 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08002046
2047 ring->last_free = desc;
2048 atomic_inc(&ring->free_count);
2049
2050 cpu = next_cpu;
2051 }
2052
developerc4671b22021-05-28 13:16:42 +08002053 ring->last_free_ptr = cpu;
developerfd40db22021-04-29 10:08:25 +08002054 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
developerfd40db22021-04-29 10:08:25 +08002055}
2056
developerfb556ca2021-10-13 10:52:09 +08002057static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08002058 unsigned int *done, unsigned int *bytes)
2059{
2060 struct mtk_tx_ring *ring = &eth->tx_ring;
2061 struct mtk_tx_dma *desc;
2062 struct sk_buff *skb;
2063 struct mtk_tx_buf *tx_buf;
2064 u32 cpu, dma;
2065
2066 cpu = ring->cpu_idx;
2067 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2068
2069 while ((cpu != dma) && budget) {
2070 tx_buf = &ring->buf[cpu];
2071 skb = tx_buf->skb;
2072 if (!skb)
2073 break;
2074
2075 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
2076 bytes[0] += skb->len;
2077 done[0]++;
2078 budget--;
2079 }
2080
developerc4671b22021-05-28 13:16:42 +08002081 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08002082
developere9356982022-07-04 09:03:20 +08002083 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08002084 ring->last_free = desc;
2085 atomic_inc(&ring->free_count);
2086
2087 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2088 }
2089
2090 ring->cpu_idx = cpu;
developerfd40db22021-04-29 10:08:25 +08002091}
2092
2093static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2094{
2095 struct mtk_tx_ring *ring = &eth->tx_ring;
2096 unsigned int done[MTK_MAX_DEVS];
2097 unsigned int bytes[MTK_MAX_DEVS];
2098 int total = 0, i;
2099
2100 memset(done, 0, sizeof(done));
2101 memset(bytes, 0, sizeof(bytes));
2102
2103 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developerfb556ca2021-10-13 10:52:09 +08002104 mtk_poll_tx_qdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08002105 else
developerfb556ca2021-10-13 10:52:09 +08002106 mtk_poll_tx_pdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08002107
2108 for (i = 0; i < MTK_MAC_COUNT; i++) {
2109 if (!eth->netdev[i] || !done[i])
2110 continue;
2111 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
2112 total += done[i];
2113 }
2114
2115 if (mtk_queue_stopped(eth) &&
2116 (atomic_read(&ring->free_count) > ring->thresh))
2117 mtk_wake_queue(eth);
2118
2119 return total;
2120}
2121
2122static void mtk_handle_status_irq(struct mtk_eth *eth)
2123{
developer8051e042022-04-08 13:26:36 +08002124 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002125
2126 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2127 mtk_stats_update(eth);
2128 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer8051e042022-04-08 13:26:36 +08002129 MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002130 }
2131}
2132
2133static int mtk_napi_tx(struct napi_struct *napi, int budget)
2134{
2135 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2136 u32 status, mask;
2137 int tx_done = 0;
2138
2139 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2140 mtk_handle_status_irq(eth);
2141 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
2142 tx_done = mtk_poll_tx(eth, budget);
2143
2144 if (unlikely(netif_msg_intr(eth))) {
2145 status = mtk_r32(eth, eth->tx_int_status_reg);
2146 mask = mtk_r32(eth, eth->tx_int_mask_reg);
2147 dev_info(eth->dev,
2148 "done tx %d, intr 0x%08x/0x%x\n",
2149 tx_done, status, mask);
2150 }
2151
2152 if (tx_done == budget)
2153 return budget;
2154
2155 status = mtk_r32(eth, eth->tx_int_status_reg);
2156 if (status & MTK_TX_DONE_INT)
2157 return budget;
2158
developerc4671b22021-05-28 13:16:42 +08002159 if (napi_complete(napi))
2160 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developerfd40db22021-04-29 10:08:25 +08002161
2162 return tx_done;
2163}
2164
2165static int mtk_napi_rx(struct napi_struct *napi, int budget)
2166{
developer18f46a82021-07-20 21:08:21 +08002167 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
2168 struct mtk_eth *eth = rx_napi->eth;
2169 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002170 u32 status, mask;
2171 int rx_done = 0;
2172 int remain_budget = budget;
2173
2174 mtk_handle_status_irq(eth);
2175
2176poll_again:
developer18f46a82021-07-20 21:08:21 +08002177 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), MTK_PDMA_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002178 rx_done = mtk_poll_rx(napi, remain_budget, eth);
2179
2180 if (unlikely(netif_msg_intr(eth))) {
2181 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
2182 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
2183 dev_info(eth->dev,
2184 "done rx %d, intr 0x%08x/0x%x\n",
2185 rx_done, status, mask);
2186 }
2187 if (rx_done == remain_budget)
2188 return budget;
2189
2190 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
developer18f46a82021-07-20 21:08:21 +08002191 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08002192 remain_budget -= rx_done;
2193 goto poll_again;
2194 }
developerc4671b22021-05-28 13:16:42 +08002195
2196 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08002197 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08002198
2199 return rx_done + budget - remain_budget;
2200}
2201
2202static int mtk_tx_alloc(struct mtk_eth *eth)
2203{
developere9356982022-07-04 09:03:20 +08002204 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002205 struct mtk_tx_ring *ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002206 int i, sz = soc->txrx.txd_size;
2207 struct mtk_tx_dma_v2 *txd, *pdma_txd;
developerfd40db22021-04-29 10:08:25 +08002208
2209 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
2210 GFP_KERNEL);
2211 if (!ring->buf)
2212 goto no_tx_mem;
2213
2214 if (!eth->soc->has_sram)
2215 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002216 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002217 else {
developere9356982022-07-04 09:03:20 +08002218 ring->dma = eth->scratch_ring + MTK_DMA_SIZE * sz;
developer8b6f2402022-11-28 13:42:34 +08002219 ring->phys = eth->phy_scratch_ring +
2220 MTK_DMA_SIZE * (dma_addr_t)sz;
developerfd40db22021-04-29 10:08:25 +08002221 }
2222
2223 if (!ring->dma)
2224 goto no_tx_mem;
2225
2226 for (i = 0; i < MTK_DMA_SIZE; i++) {
2227 int next = (i + 1) % MTK_DMA_SIZE;
2228 u32 next_ptr = ring->phys + next * sz;
2229
developere9356982022-07-04 09:03:20 +08002230 txd = ring->dma + i * sz;
2231 txd->txd2 = next_ptr;
2232 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2233 txd->txd4 = 0;
2234
developer089e8852022-09-28 14:43:46 +08002235 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2236 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08002237 txd->txd5 = 0;
2238 txd->txd6 = 0;
2239 txd->txd7 = 0;
2240 txd->txd8 = 0;
2241 }
developerfd40db22021-04-29 10:08:25 +08002242 }
2243
2244 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2245 * only as the framework. The real HW descriptors are the PDMA
2246 * descriptors in ring->dma_pdma.
2247 */
2248 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2249 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002250 &ring->phys_pdma, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002251 if (!ring->dma_pdma)
2252 goto no_tx_mem;
2253
2254 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +08002255 pdma_txd = ring->dma_pdma + i *sz;
2256
2257 pdma_txd->txd2 = TX_DMA_DESP2_DEF;
2258 pdma_txd->txd4 = 0;
developerfd40db22021-04-29 10:08:25 +08002259 }
2260 }
2261
2262 ring->dma_size = MTK_DMA_SIZE;
2263 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
developere9356982022-07-04 09:03:20 +08002264 ring->next_free = ring->dma;
2265 ring->last_free = (void *)txd;
developerc4671b22021-05-28 13:16:42 +08002266 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08002267 ring->thresh = MAX_SKB_FRAGS;
2268
2269 /* make sure that all changes to the dma ring are flushed before we
2270 * continue
2271 */
2272 wmb();
2273
2274 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2275 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
2276 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
2277 mtk_w32(eth,
2278 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
2279 MTK_QTX_CRX_PTR);
developerc4671b22021-05-28 13:16:42 +08002280 mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
developerfd40db22021-04-29 10:08:25 +08002281 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
2282 MTK_QTX_CFG(0));
2283 } else {
2284 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2285 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
2286 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2287 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
2288 }
2289
2290 return 0;
2291
2292no_tx_mem:
2293 return -ENOMEM;
2294}
2295
2296static void mtk_tx_clean(struct mtk_eth *eth)
2297{
developere9356982022-07-04 09:03:20 +08002298 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002299 struct mtk_tx_ring *ring = &eth->tx_ring;
2300 int i;
2301
2302 if (ring->buf) {
2303 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08002304 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08002305 kfree(ring->buf);
2306 ring->buf = NULL;
2307 }
2308
2309 if (!eth->soc->has_sram && ring->dma) {
2310 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002311 MTK_DMA_SIZE * soc->txrx.txd_size,
2312 ring->dma, ring->phys);
developerfd40db22021-04-29 10:08:25 +08002313 ring->dma = NULL;
2314 }
2315
2316 if (ring->dma_pdma) {
2317 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002318 MTK_DMA_SIZE * soc->txrx.txd_size,
2319 ring->dma_pdma, ring->phys_pdma);
developerfd40db22021-04-29 10:08:25 +08002320 ring->dma_pdma = NULL;
2321 }
2322}
2323
2324static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2325{
2326 struct mtk_rx_ring *ring;
2327 int rx_data_len, rx_dma_size;
2328 int i;
developer089e8852022-09-28 14:43:46 +08002329 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002330
2331 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2332 if (ring_no)
2333 return -EINVAL;
2334 ring = &eth->rx_ring_qdma;
2335 } else {
2336 ring = &eth->rx_ring[ring_no];
2337 }
2338
2339 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2340 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2341 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2342 } else {
2343 rx_data_len = ETH_DATA_LEN;
2344 rx_dma_size = MTK_DMA_SIZE;
2345 }
2346
2347 ring->frag_size = mtk_max_frag_size(rx_data_len);
2348 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2349 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2350 GFP_KERNEL);
2351 if (!ring->data)
2352 return -ENOMEM;
2353
2354 for (i = 0; i < rx_dma_size; i++) {
2355 ring->data[i] = netdev_alloc_frag(ring->frag_size);
2356 if (!ring->data[i])
2357 return -ENOMEM;
2358 }
2359
2360 if ((!eth->soc->has_sram) || (eth->soc->has_sram
2361 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
2362 ring->dma = dma_alloc_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002363 rx_dma_size * eth->soc->txrx.rxd_size,
2364 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002365 else {
2366 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002367 ring->dma = tx_ring->dma + MTK_DMA_SIZE *
2368 eth->soc->txrx.rxd_size * (ring_no + 1);
developer18f46a82021-07-20 21:08:21 +08002369 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
developere9356982022-07-04 09:03:20 +08002370 eth->soc->txrx.rxd_size * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08002371 }
2372
2373 if (!ring->dma)
2374 return -ENOMEM;
2375
2376 for (i = 0; i < rx_dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002377 struct mtk_rx_dma_v2 *rxd;
2378
developerfd40db22021-04-29 10:08:25 +08002379 dma_addr_t dma_addr = dma_map_single(eth->dev,
2380 ring->data[i] + NET_SKB_PAD + eth->ip_align,
2381 ring->buf_size,
2382 DMA_FROM_DEVICE);
2383 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
2384 return -ENOMEM;
developere9356982022-07-04 09:03:20 +08002385
2386 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2387 rxd->rxd1 = (unsigned int)dma_addr;
developerfd40db22021-04-29 10:08:25 +08002388
developer089e8852022-09-28 14:43:46 +08002389 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2390 RX_DMA_SDP1(dma_addr) : 0;
2391
developerfd40db22021-04-29 10:08:25 +08002392 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developere9356982022-07-04 09:03:20 +08002393 rxd->rxd2 = RX_DMA_LSO;
developerfd40db22021-04-29 10:08:25 +08002394 else
developer089e8852022-09-28 14:43:46 +08002395 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08002396
developere9356982022-07-04 09:03:20 +08002397 rxd->rxd3 = 0;
2398 rxd->rxd4 = 0;
2399
developer089e8852022-09-28 14:43:46 +08002400 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2401 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08002402 rxd->rxd5 = 0;
2403 rxd->rxd6 = 0;
2404 rxd->rxd7 = 0;
2405 rxd->rxd8 = 0;
developerfd40db22021-04-29 10:08:25 +08002406 }
developerfd40db22021-04-29 10:08:25 +08002407 }
2408 ring->dma_size = rx_dma_size;
2409 ring->calc_idx_update = false;
2410 ring->calc_idx = rx_dma_size - 1;
2411 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
2412 MTK_QRX_CRX_IDX_CFG(ring_no) :
2413 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08002414 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08002415 /* make sure that all changes to the dma ring are flushed before we
2416 * continue
2417 */
2418 wmb();
2419
2420 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2421 mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
2422 mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
2423 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2424 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
2425 } else {
2426 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
2427 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
2428 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2429 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
2430 }
2431
2432 return 0;
2433}
2434
2435static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
2436{
2437 int i;
developer089e8852022-09-28 14:43:46 +08002438 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002439
2440 if (ring->data && ring->dma) {
2441 for (i = 0; i < ring->dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002442 struct mtk_rx_dma *rxd;
2443
developerfd40db22021-04-29 10:08:25 +08002444 if (!ring->data[i])
2445 continue;
developere9356982022-07-04 09:03:20 +08002446
2447 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2448 if (!rxd->rxd1)
developerfd40db22021-04-29 10:08:25 +08002449 continue;
developere9356982022-07-04 09:03:20 +08002450
developer089e8852022-09-28 14:43:46 +08002451 addr64 = (MTK_HAS_CAPS(eth->soc->caps,
2452 MTK_8GB_ADDRESSING)) ?
2453 ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
2454
developerfd40db22021-04-29 10:08:25 +08002455 dma_unmap_single(eth->dev,
developer089e8852022-09-28 14:43:46 +08002456 (u64)(rxd->rxd1 | addr64),
developerfd40db22021-04-29 10:08:25 +08002457 ring->buf_size,
2458 DMA_FROM_DEVICE);
2459 skb_free_frag(ring->data[i]);
2460 }
2461 kfree(ring->data);
2462 ring->data = NULL;
2463 }
2464
2465 if(in_sram)
2466 return;
2467
2468 if (ring->dma) {
2469 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002470 ring->dma_size * eth->soc->txrx.rxd_size,
developerfd40db22021-04-29 10:08:25 +08002471 ring->dma,
2472 ring->phys);
2473 ring->dma = NULL;
2474 }
2475}
2476
2477static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2478{
2479 int i;
developer77d03a72021-06-06 00:06:00 +08002480 u32 val;
developerfd40db22021-04-29 10:08:25 +08002481 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2482 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2483
2484 /* set LRO rings to auto-learn modes */
2485 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2486
2487 /* validate LRO ring */
2488 ring_ctrl_dw2 |= MTK_RING_VLD;
2489
2490 /* set AGE timer (unit: 20us) */
2491 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2492 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2493
2494 /* set max AGG timer (unit: 20us) */
2495 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2496
2497 /* set max LRO AGG count */
2498 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2499 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2500
developer77d03a72021-06-06 00:06:00 +08002501 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002502 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2503 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2504 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2505 }
2506
2507 /* IPv4 checksum update enable */
2508 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2509
2510 /* switch priority comparison to packet count mode */
2511 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2512
2513 /* bandwidth threshold setting */
2514 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2515
2516 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08002517 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08002518
2519 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2520 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2521 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2522
developerfd40db22021-04-29 10:08:25 +08002523 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2524 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2525
developer089e8852022-09-28 14:43:46 +08002526 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2527 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer77d03a72021-06-06 00:06:00 +08002528 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
2529 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
2530 MTK_PDMA_RX_CFG);
2531
2532 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
2533 } else {
2534 /* set HW LRO mode & the max aggregation count for rx packets */
2535 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2536 }
2537
developerfd40db22021-04-29 10:08:25 +08002538 /* enable HW LRO */
2539 lro_ctrl_dw0 |= MTK_LRO_EN;
2540
developer77d03a72021-06-06 00:06:00 +08002541 /* enable cpu reason black list */
2542 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
2543
developerfd40db22021-04-29 10:08:25 +08002544 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2545 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2546
developer77d03a72021-06-06 00:06:00 +08002547 /* no use PPE cpu reason */
2548 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
2549
developerfd40db22021-04-29 10:08:25 +08002550 return 0;
2551}
2552
2553static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2554{
2555 int i;
2556 u32 val;
2557
2558 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08002559 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08002560
2561 /* wait for relinquishments done */
2562 for (i = 0; i < 10; i++) {
2563 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08002564 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer8051e042022-04-08 13:26:36 +08002565 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002566 continue;
2567 }
2568 break;
2569 }
2570
2571 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08002572 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08002573 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2574
2575 /* disable HW LRO */
2576 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2577}
2578
2579static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2580{
2581 u32 reg_val;
2582
developer089e8852022-09-28 14:43:46 +08002583 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2584 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77d03a72021-06-06 00:06:00 +08002585 idx += 1;
2586
developerfd40db22021-04-29 10:08:25 +08002587 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2588
2589 /* invalidate the IP setting */
2590 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2591
2592 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2593
2594 /* validate the IP setting */
2595 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2596}
2597
2598static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2599{
2600 u32 reg_val;
2601
developer089e8852022-09-28 14:43:46 +08002602 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2603 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77d03a72021-06-06 00:06:00 +08002604 idx += 1;
2605
developerfd40db22021-04-29 10:08:25 +08002606 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2607
2608 /* invalidate the IP setting */
2609 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2610
2611 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2612}
2613
2614static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2615{
2616 int cnt = 0;
2617 int i;
2618
2619 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2620 if (mac->hwlro_ip[i])
2621 cnt++;
2622 }
2623
2624 return cnt;
2625}
2626
2627static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2628 struct ethtool_rxnfc *cmd)
2629{
2630 struct ethtool_rx_flow_spec *fsp =
2631 (struct ethtool_rx_flow_spec *)&cmd->fs;
2632 struct mtk_mac *mac = netdev_priv(dev);
2633 struct mtk_eth *eth = mac->hw;
2634 int hwlro_idx;
2635
2636 if ((fsp->flow_type != TCP_V4_FLOW) ||
2637 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2638 (fsp->location > 1))
2639 return -EINVAL;
2640
2641 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2642 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2643
2644 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2645
2646 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2647
2648 return 0;
2649}
2650
2651static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2652 struct ethtool_rxnfc *cmd)
2653{
2654 struct ethtool_rx_flow_spec *fsp =
2655 (struct ethtool_rx_flow_spec *)&cmd->fs;
2656 struct mtk_mac *mac = netdev_priv(dev);
2657 struct mtk_eth *eth = mac->hw;
2658 int hwlro_idx;
2659
2660 if (fsp->location > 1)
2661 return -EINVAL;
2662
2663 mac->hwlro_ip[fsp->location] = 0;
2664 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2665
2666 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2667
2668 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2669
2670 return 0;
2671}
2672
2673static void mtk_hwlro_netdev_disable(struct net_device *dev)
2674{
2675 struct mtk_mac *mac = netdev_priv(dev);
2676 struct mtk_eth *eth = mac->hw;
2677 int i, hwlro_idx;
2678
2679 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2680 mac->hwlro_ip[i] = 0;
2681 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2682
2683 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2684 }
2685
2686 mac->hwlro_ip_cnt = 0;
2687}
2688
2689static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2690 struct ethtool_rxnfc *cmd)
2691{
2692 struct mtk_mac *mac = netdev_priv(dev);
2693 struct ethtool_rx_flow_spec *fsp =
2694 (struct ethtool_rx_flow_spec *)&cmd->fs;
2695
2696 /* only tcp dst ipv4 is meaningful, others are meaningless */
2697 fsp->flow_type = TCP_V4_FLOW;
2698 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2699 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2700
2701 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2702 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2703 fsp->h_u.tcp_ip4_spec.psrc = 0;
2704 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2705 fsp->h_u.tcp_ip4_spec.pdst = 0;
2706 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2707 fsp->h_u.tcp_ip4_spec.tos = 0;
2708 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2709
2710 return 0;
2711}
2712
2713static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2714 struct ethtool_rxnfc *cmd,
2715 u32 *rule_locs)
2716{
2717 struct mtk_mac *mac = netdev_priv(dev);
2718 int cnt = 0;
2719 int i;
2720
2721 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2722 if (mac->hwlro_ip[i]) {
2723 rule_locs[cnt] = i;
2724 cnt++;
2725 }
2726 }
2727
2728 cmd->rule_cnt = cnt;
2729
2730 return 0;
2731}
2732
developer18f46a82021-07-20 21:08:21 +08002733static int mtk_rss_init(struct mtk_eth *eth)
2734{
2735 u32 val;
2736
developer089e8852022-09-28 14:43:46 +08002737 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) {
developer18f46a82021-07-20 21:08:21 +08002738 /* Set RSS rings to PSE modes */
2739 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
2740 val |= MTK_RING_PSE_MODE;
2741 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
2742
2743 /* Enable non-lro multiple rx */
2744 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2745 val |= MTK_NON_LRO_MULTI_EN;
2746 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2747
2748 /* Enable RSS dly int supoort */
2749 val |= MTK_LRO_DLY_INT_EN;
2750 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2751
2752 /* Set RSS delay config int ring1 */
2753 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
2754 }
2755
2756 /* Hash Type */
2757 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2758 val |= MTK_RSS_IPV4_STATIC_HASH;
2759 val |= MTK_RSS_IPV6_STATIC_HASH;
2760 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2761
2762 /* Select the size of indirection table */
2763 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW0);
2764 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW1);
2765 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW2);
2766 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW3);
2767 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW4);
2768 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW5);
2769 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW6);
2770 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW7);
2771
2772 /* Pause */
2773 val |= MTK_RSS_CFG_REQ;
2774 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2775
2776 /* Enable RSS*/
2777 val |= MTK_RSS_EN;
2778 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2779
2780 /* Release pause */
2781 val &= ~(MTK_RSS_CFG_REQ);
2782 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2783
2784 /* Set perRSS GRP INT */
2785 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING1), MTK_PDMA_INT_GRP3);
2786
2787 /* Set GRP INT */
2788 mtk_w32(eth, 0x21021030, MTK_FE_INT_GRP);
2789
developer089e8852022-09-28 14:43:46 +08002790 /* Enable RSS delay interrupt */
2791 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_RSS_DELAY_INT);
2792
developer18f46a82021-07-20 21:08:21 +08002793 return 0;
2794}
2795
2796static void mtk_rss_uninit(struct mtk_eth *eth)
2797{
2798 u32 val;
2799
2800 /* Pause */
2801 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2802 val |= MTK_RSS_CFG_REQ;
2803 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2804
2805 /* Disable RSS*/
2806 val &= ~(MTK_RSS_EN);
2807 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2808
2809 /* Release pause */
2810 val &= ~(MTK_RSS_CFG_REQ);
2811 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2812}
2813
developerfd40db22021-04-29 10:08:25 +08002814static netdev_features_t mtk_fix_features(struct net_device *dev,
2815 netdev_features_t features)
2816{
2817 if (!(features & NETIF_F_LRO)) {
2818 struct mtk_mac *mac = netdev_priv(dev);
2819 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2820
2821 if (ip_cnt) {
2822 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2823
2824 features |= NETIF_F_LRO;
2825 }
2826 }
2827
2828 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
2829 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
2830
2831 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2832 }
2833
2834 return features;
2835}
2836
2837static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2838{
2839 struct mtk_mac *mac = netdev_priv(dev);
2840 struct mtk_eth *eth = mac->hw;
2841 int err = 0;
2842
2843 if (!((dev->features ^ features) & MTK_SET_FEATURES))
2844 return 0;
2845
2846 if (!(features & NETIF_F_LRO))
2847 mtk_hwlro_netdev_disable(dev);
2848
2849 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
2850 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
2851 else
2852 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2853
2854 return err;
2855}
2856
2857/* wait for DMA to finish whatever it is doing before we start using it again */
2858static int mtk_dma_busy_wait(struct mtk_eth *eth)
2859{
2860 unsigned long t_start = jiffies;
2861
2862 while (1) {
2863 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2864 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2865 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2866 return 0;
2867 } else {
2868 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2869 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2870 return 0;
2871 }
2872
2873 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2874 break;
2875 }
2876
2877 dev_err(eth->dev, "DMA init timeout\n");
2878 return -1;
2879}
2880
2881static int mtk_dma_init(struct mtk_eth *eth)
2882{
2883 int err;
2884 u32 i;
2885
2886 if (mtk_dma_busy_wait(eth))
2887 return -EBUSY;
2888
2889 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2890 /* QDMA needs scratch memory for internal reordering of the
2891 * descriptors
2892 */
2893 err = mtk_init_fq_dma(eth);
2894 if (err)
2895 return err;
2896 }
2897
2898 err = mtk_tx_alloc(eth);
2899 if (err)
2900 return err;
2901
2902 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2903 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2904 if (err)
2905 return err;
2906 }
2907
2908 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2909 if (err)
2910 return err;
2911
2912 if (eth->hwlro) {
developer089e8852022-09-28 14:43:46 +08002913 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) ? 1 : 4;
developer77d03a72021-06-06 00:06:00 +08002914 for (; i < MTK_MAX_RX_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002915 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2916 if (err)
2917 return err;
2918 }
2919 err = mtk_hwlro_rx_init(eth);
2920 if (err)
2921 return err;
2922 }
2923
developer18f46a82021-07-20 21:08:21 +08002924 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2925 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
2926 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL);
2927 if (err)
2928 return err;
2929 }
2930 err = mtk_rss_init(eth);
2931 if (err)
2932 return err;
2933 }
2934
developerfd40db22021-04-29 10:08:25 +08002935 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2936 /* Enable random early drop and set drop threshold
2937 * automatically
2938 */
2939 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2940 FC_THRES_MIN, MTK_QDMA_FC_THRES);
2941 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2942 }
2943
2944 return 0;
2945}
2946
2947static void mtk_dma_free(struct mtk_eth *eth)
2948{
developere9356982022-07-04 09:03:20 +08002949 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002950 int i;
2951
2952 for (i = 0; i < MTK_MAC_COUNT; i++)
2953 if (eth->netdev[i])
2954 netdev_reset_queue(eth->netdev[i]);
2955 if ( !eth->soc->has_sram && eth->scratch_ring) {
2956 dma_free_coherent(eth->dev,
developere9356982022-07-04 09:03:20 +08002957 MTK_DMA_SIZE * soc->txrx.txd_size,
2958 eth->scratch_ring, eth->phy_scratch_ring);
developerfd40db22021-04-29 10:08:25 +08002959 eth->scratch_ring = NULL;
2960 eth->phy_scratch_ring = 0;
2961 }
2962 mtk_tx_clean(eth);
developerb3ce86f2022-06-30 13:31:47 +08002963 mtk_rx_clean(eth, &eth->rx_ring[0],eth->soc->has_sram);
developerfd40db22021-04-29 10:08:25 +08002964 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
2965
2966 if (eth->hwlro) {
2967 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08002968
developer089e8852022-09-28 14:43:46 +08002969 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) ? 1 : 4;
developer77d03a72021-06-06 00:06:00 +08002970 for (; i < MTK_MAX_RX_RING_NUM; i++)
2971 mtk_rx_clean(eth, &eth->rx_ring[i], 0);
developerfd40db22021-04-29 10:08:25 +08002972 }
2973
developer18f46a82021-07-20 21:08:21 +08002974 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
2975 mtk_rss_uninit(eth);
2976
2977 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
2978 mtk_rx_clean(eth, &eth->rx_ring[i], 1);
2979 }
2980
developer94008d92021-09-23 09:47:41 +08002981 if (eth->scratch_head) {
2982 kfree(eth->scratch_head);
2983 eth->scratch_head = NULL;
2984 }
developerfd40db22021-04-29 10:08:25 +08002985}
2986
2987static void mtk_tx_timeout(struct net_device *dev)
2988{
2989 struct mtk_mac *mac = netdev_priv(dev);
2990 struct mtk_eth *eth = mac->hw;
2991
2992 eth->netdev[mac->id]->stats.tx_errors++;
2993 netif_err(eth, tx_err, dev,
2994 "transmit timed out\n");
developer8051e042022-04-08 13:26:36 +08002995
2996 if (atomic_read(&reset_lock) == 0)
2997 schedule_work(&eth->pending_work);
developerfd40db22021-04-29 10:08:25 +08002998}
2999
developer18f46a82021-07-20 21:08:21 +08003000static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08003001{
developer18f46a82021-07-20 21:08:21 +08003002 struct mtk_napi *rx_napi = priv;
3003 struct mtk_eth *eth = rx_napi->eth;
3004 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08003005
developer18f46a82021-07-20 21:08:21 +08003006 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08003007 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08003008 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08003009 }
3010
3011 return IRQ_HANDLED;
3012}
3013
3014static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3015{
3016 struct mtk_eth *eth = _eth;
3017
3018 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developerfd40db22021-04-29 10:08:25 +08003019 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer6bbe70d2021-08-06 09:34:55 +08003020 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08003021 }
3022
3023 return IRQ_HANDLED;
3024}
3025
3026static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3027{
3028 struct mtk_eth *eth = _eth;
3029
developer18f46a82021-07-20 21:08:21 +08003030 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT(0)) {
3031 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT(0))
3032 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08003033 }
3034 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
3035 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
3036 mtk_handle_irq_tx(irq, _eth);
3037 }
3038
3039 return IRQ_HANDLED;
3040}
3041
developera2613e62022-07-01 18:29:37 +08003042static irqreturn_t mtk_handle_irq_fixed_link(int irq, void *_mac)
3043{
3044 struct mtk_mac *mac = _mac;
3045 struct mtk_eth *eth = mac->hw;
3046 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
3047 struct net_device *dev = phylink_priv->dev;
3048 int link_old, link_new;
3049
3050 // clear interrupt status for gpy211
3051 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
3052
3053 link_old = phylink_priv->link;
3054 link_new = _mtk_mdio_read(eth, phylink_priv->phyaddr, MII_BMSR) & BMSR_LSTATUS;
3055
3056 if (link_old != link_new) {
3057 phylink_priv->link = link_new;
3058 if (link_new) {
3059 printk("phylink.%d %s: Link is Up\n", phylink_priv->id, dev->name);
3060 if (dev)
3061 netif_carrier_on(dev);
3062 } else {
3063 printk("phylink.%d %s: Link is Down\n", phylink_priv->id, dev->name);
3064 if (dev)
3065 netif_carrier_off(dev);
3066 }
3067 }
3068
3069 return IRQ_HANDLED;
3070}
3071
developerfd40db22021-04-29 10:08:25 +08003072#ifdef CONFIG_NET_POLL_CONTROLLER
3073static void mtk_poll_controller(struct net_device *dev)
3074{
3075 struct mtk_mac *mac = netdev_priv(dev);
3076 struct mtk_eth *eth = mac->hw;
3077
3078 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003079 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
3080 mtk_handle_irq_rx(eth->irq[2], &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08003081 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003082 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08003083}
3084#endif
3085
3086static int mtk_start_dma(struct mtk_eth *eth)
3087{
3088 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer77d03a72021-06-06 00:06:00 +08003089 int val, err;
developerfd40db22021-04-29 10:08:25 +08003090
3091 err = mtk_dma_init(eth);
3092 if (err) {
3093 mtk_dma_free(eth);
3094 return err;
3095 }
3096
3097 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer15d0d282021-07-14 16:40:44 +08003098 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
developer089e8852022-09-28 14:43:46 +08003099 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
3100 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer19d84562022-04-21 17:01:06 +08003101 val &= ~MTK_RESV_BUF_MASK;
developerfd40db22021-04-29 10:08:25 +08003102 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003103 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08003104 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
3105 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
3106 MTK_RESV_BUF | MTK_WCOMP_EN |
3107 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
developer1ac65932022-07-19 17:23:32 +08003108 MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
developer19d84562022-04-21 17:01:06 +08003109 }
developerfd40db22021-04-29 10:08:25 +08003110 else
3111 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003112 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08003113 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
3114 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
3115 MTK_RX_BT_32DWORDS,
3116 MTK_QDMA_GLO_CFG);
3117
developer15d0d282021-07-14 16:40:44 +08003118 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
developerfd40db22021-04-29 10:08:25 +08003119 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003120 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08003121 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3122 MTK_PDMA_GLO_CFG);
3123 } else {
3124 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3125 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3126 MTK_PDMA_GLO_CFG);
3127 }
3128
developer089e8852022-09-28 14:43:46 +08003129 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) && eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08003130 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
3131 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
3132 }
3133
developerfd40db22021-04-29 10:08:25 +08003134 return 0;
3135}
3136
developerdca0fde2022-12-14 11:40:35 +08003137void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
developerfd40db22021-04-29 10:08:25 +08003138{
developerdca0fde2022-12-14 11:40:35 +08003139 u32 val;
developerfd40db22021-04-29 10:08:25 +08003140
3141 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3142 return;
3143
developerdca0fde2022-12-14 11:40:35 +08003144 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
developerfd40db22021-04-29 10:08:25 +08003145
developerdca0fde2022-12-14 11:40:35 +08003146 /* default setup the forward port to send frame to PDMA */
3147 val &= ~0xffff;
developerfd40db22021-04-29 10:08:25 +08003148
developerdca0fde2022-12-14 11:40:35 +08003149 /* Enable RX checksum */
3150 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
developerfd40db22021-04-29 10:08:25 +08003151
developerdca0fde2022-12-14 11:40:35 +08003152 val |= config;
developerfd40db22021-04-29 10:08:25 +08003153
developerdca0fde2022-12-14 11:40:35 +08003154 if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3155 val |= MTK_GDMA_SPECIAL_TAG;
developerfd40db22021-04-29 10:08:25 +08003156
developerdca0fde2022-12-14 11:40:35 +08003157 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
developerfd40db22021-04-29 10:08:25 +08003158}
3159
developer7cd7e5e2022-11-17 13:57:32 +08003160void mtk_set_pse_drop(u32 config)
3161{
3162 struct mtk_eth *eth = g_eth;
3163
3164 if (eth)
3165 mtk_w32(eth, config, PSE_PPE0_DROP);
3166}
3167EXPORT_SYMBOL(mtk_set_pse_drop);
3168
developerfd40db22021-04-29 10:08:25 +08003169static int mtk_open(struct net_device *dev)
3170{
3171 struct mtk_mac *mac = netdev_priv(dev);
3172 struct mtk_eth *eth = mac->hw;
developera2613e62022-07-01 18:29:37 +08003173 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
developer18f46a82021-07-20 21:08:21 +08003174 int err, i;
developer3a5969e2022-02-09 15:36:36 +08003175 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08003176
3177 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3178 if (err) {
3179 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3180 err);
3181 return err;
3182 }
3183
3184 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3185 if (!refcount_read(&eth->dma_refcnt)) {
3186 int err = mtk_start_dma(eth);
3187
3188 if (err)
3189 return err;
3190
developerfd40db22021-04-29 10:08:25 +08003191
3192 /* Indicates CDM to parse the MTK special tag from CPU */
3193 if (netdev_uses_dsa(dev)) {
3194 u32 val;
3195 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3196 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3197 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3198 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3199 }
3200
3201 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003202 napi_enable(&eth->rx_napi[0].napi);
developerfd40db22021-04-29 10:08:25 +08003203 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003204 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
3205
3206 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3207 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3208 napi_enable(&eth->rx_napi[i].napi);
3209 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i));
3210 }
3211 }
3212
developerfd40db22021-04-29 10:08:25 +08003213 refcount_set(&eth->dma_refcnt, 1);
3214 }
3215 else
3216 refcount_inc(&eth->dma_refcnt);
3217
developera2613e62022-07-01 18:29:37 +08003218 if (phylink_priv->desc) {
3219 /*Notice: This programming sequence is only for GPY211 single PHY chip.
3220 If single PHY chip is not GPY211, the following step you should do:
3221 1. Contact your Single PHY chip vendor and get the details of
3222 - how to enables link status change interrupt
3223 - how to clears interrupt source
3224 */
3225
3226 // clear interrupt source for gpy211
3227 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
3228
3229 // enable link status change interrupt for gpy211
3230 _mtk_mdio_write(eth, phylink_priv->phyaddr, 0x19, 0x0001);
3231
3232 phylink_priv->dev = dev;
3233
3234 // override dev pointer for single PHY chip 0
3235 if (phylink_priv->id == 0) {
3236 struct net_device *tmp;
3237
3238 tmp = __dev_get_by_name(&init_net, phylink_priv->label);
3239 if (tmp)
3240 phylink_priv->dev = tmp;
3241 else
3242 phylink_priv->dev = NULL;
3243 }
3244 }
3245
developerfd40db22021-04-29 10:08:25 +08003246 phylink_start(mac->phylink);
3247 netif_start_queue(dev);
developer3a5969e2022-02-09 15:36:36 +08003248 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer089e8852022-09-28 14:43:46 +08003249 if (!phy_node && eth->xgmii->regmap_sgmii[mac->id])
3250 regmap_write(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
3251
developerdca0fde2022-12-14 11:40:35 +08003252 mtk_gdm_config(eth, mac->id, MTK_GDMA_TO_PDMA);
3253
developerfd40db22021-04-29 10:08:25 +08003254 return 0;
3255}
3256
3257static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3258{
3259 u32 val;
3260 int i;
3261
3262 /* stop the dma engine */
3263 spin_lock_bh(&eth->page_lock);
3264 val = mtk_r32(eth, glo_cfg);
3265 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3266 glo_cfg);
3267 spin_unlock_bh(&eth->page_lock);
3268
3269 /* wait for dma stop */
3270 for (i = 0; i < 10; i++) {
3271 val = mtk_r32(eth, glo_cfg);
3272 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer8051e042022-04-08 13:26:36 +08003273 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08003274 continue;
3275 }
3276 break;
3277 }
3278}
3279
3280static int mtk_stop(struct net_device *dev)
3281{
3282 struct mtk_mac *mac = netdev_priv(dev);
3283 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08003284 int i;
developer3a5969e2022-02-09 15:36:36 +08003285 u32 val = 0;
3286 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08003287
developerdca0fde2022-12-14 11:40:35 +08003288 mtk_gdm_config(eth, mac->id, MTK_GDMA_DROP_ALL);
developerfd40db22021-04-29 10:08:25 +08003289 netif_tx_disable(dev);
3290
developer3a5969e2022-02-09 15:36:36 +08003291 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
3292 if (phy_node) {
3293 val = _mtk_mdio_read(eth, 0, 0);
3294 val |= BMCR_PDOWN;
3295 _mtk_mdio_write(eth, 0, 0, val);
developer089e8852022-09-28 14:43:46 +08003296 } else if (eth->xgmii->regmap_sgmii[mac->id]) {
3297 regmap_read(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer3a5969e2022-02-09 15:36:36 +08003298 val |= SGMII_PHYA_PWD;
developer089e8852022-09-28 14:43:46 +08003299 regmap_write(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer3a5969e2022-02-09 15:36:36 +08003300 }
3301
3302 //GMAC RX disable
3303 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
3304 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
3305
3306 phylink_stop(mac->phylink);
3307
developerfd40db22021-04-29 10:08:25 +08003308 phylink_disconnect_phy(mac->phylink);
3309
3310 /* only shutdown DMA if this is the last user */
3311 if (!refcount_dec_and_test(&eth->dma_refcnt))
3312 return 0;
3313
developerfd40db22021-04-29 10:08:25 +08003314
3315 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003316 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08003317 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003318 napi_disable(&eth->rx_napi[0].napi);
3319
3320 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3321 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3322 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i));
3323 napi_disable(&eth->rx_napi[i].napi);
3324 }
3325 }
developerfd40db22021-04-29 10:08:25 +08003326
3327 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3328 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
3329 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
3330
3331 mtk_dma_free(eth);
3332
3333 return 0;
3334}
3335
developer8051e042022-04-08 13:26:36 +08003336void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerfd40db22021-04-29 10:08:25 +08003337{
developer8051e042022-04-08 13:26:36 +08003338 u32 val = 0, i = 0;
developerfd40db22021-04-29 10:08:25 +08003339
developerfd40db22021-04-29 10:08:25 +08003340 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer8051e042022-04-08 13:26:36 +08003341 reset_bits, reset_bits);
3342
3343 while (i++ < 5000) {
3344 mdelay(1);
3345 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3346
3347 if ((val & reset_bits) == reset_bits) {
3348 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
3349 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3350 reset_bits, ~reset_bits);
3351 break;
3352 }
3353 }
3354
developerfd40db22021-04-29 10:08:25 +08003355 mdelay(10);
3356}
3357
3358static void mtk_clk_disable(struct mtk_eth *eth)
3359{
3360 int clk;
3361
3362 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3363 clk_disable_unprepare(eth->clks[clk]);
3364}
3365
3366static int mtk_clk_enable(struct mtk_eth *eth)
3367{
3368 int clk, ret;
3369
3370 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3371 ret = clk_prepare_enable(eth->clks[clk]);
3372 if (ret)
3373 goto err_disable_clks;
3374 }
3375
3376 return 0;
3377
3378err_disable_clks:
3379 while (--clk >= 0)
3380 clk_disable_unprepare(eth->clks[clk]);
3381
3382 return ret;
3383}
3384
developer18f46a82021-07-20 21:08:21 +08003385static int mtk_napi_init(struct mtk_eth *eth)
3386{
3387 struct mtk_napi *rx_napi = &eth->rx_napi[0];
3388 int i;
3389
3390 rx_napi->eth = eth;
3391 rx_napi->rx_ring = &eth->rx_ring[0];
3392 rx_napi->irq_grp_no = 2;
3393
3394 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3395 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3396 rx_napi = &eth->rx_napi[i];
3397 rx_napi->eth = eth;
3398 rx_napi->rx_ring = &eth->rx_ring[i];
3399 rx_napi->irq_grp_no = 2 + i;
3400 }
3401 }
3402
3403 return 0;
3404}
3405
developer8051e042022-04-08 13:26:36 +08003406static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerfd40db22021-04-29 10:08:25 +08003407{
developer8051e042022-04-08 13:26:36 +08003408 int i, ret = 0;
developerdca0fde2022-12-14 11:40:35 +08003409 u32 val;
developerfd40db22021-04-29 10:08:25 +08003410
developer8051e042022-04-08 13:26:36 +08003411 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
3412 atomic_read(&reset_lock), atomic_read(&force));
developerfd40db22021-04-29 10:08:25 +08003413
developer8051e042022-04-08 13:26:36 +08003414 if (atomic_read(&reset_lock) == 0) {
3415 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
3416 return 0;
developerfd40db22021-04-29 10:08:25 +08003417
developer8051e042022-04-08 13:26:36 +08003418 pm_runtime_enable(eth->dev);
3419 pm_runtime_get_sync(eth->dev);
3420
3421 ret = mtk_clk_enable(eth);
3422 if (ret)
3423 goto err_disable_pm;
3424 }
developerfd40db22021-04-29 10:08:25 +08003425
3426 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3427 ret = device_reset(eth->dev);
3428 if (ret) {
3429 dev_err(eth->dev, "MAC reset failed!\n");
3430 goto err_disable_pm;
3431 }
3432
3433 /* enable interrupt delay for RX */
3434 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
3435
3436 /* disable delay and normal interrupt */
3437 mtk_tx_irq_disable(eth, ~0);
3438 mtk_rx_irq_disable(eth, ~0);
3439
3440 return 0;
3441 }
3442
developer8051e042022-04-08 13:26:36 +08003443 pr_info("[%s] execute fe %s reset\n", __func__,
3444 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developer545abf02021-07-15 17:47:01 +08003445
developer8051e042022-04-08 13:26:36 +08003446 if (type == MTK_TYPE_WARM_RESET)
3447 mtk_eth_warm_reset(eth);
developer545abf02021-07-15 17:47:01 +08003448 else
developer8051e042022-04-08 13:26:36 +08003449 mtk_eth_cold_reset(eth);
developer545abf02021-07-15 17:47:01 +08003450
developer089e8852022-09-28 14:43:46 +08003451 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
3452 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer545abf02021-07-15 17:47:01 +08003453 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08003454 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08003455 }
developerfd40db22021-04-29 10:08:25 +08003456
3457 if (eth->pctl) {
3458 /* Set GE2 driving and slew rate */
3459 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3460
3461 /* set GE2 TDSEL */
3462 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3463
3464 /* set GE2 TUNE */
3465 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3466 }
3467
3468 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3469 * up with the more appropriate value when mtk_mac_config call is being
3470 * invoked.
3471 */
3472 for (i = 0; i < MTK_MAC_COUNT; i++)
3473 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3474
3475 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08003476 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
3477 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3478 else
3479 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08003480
3481 /* enable interrupt delay for RX/TX */
3482 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
3483 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
3484
3485 mtk_tx_irq_disable(eth, ~0);
3486 mtk_rx_irq_disable(eth, ~0);
3487
3488 /* FE int grouping */
3489 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003490 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_PDMA_INT_GRP2);
developerfd40db22021-04-29 10:08:25 +08003491 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
developer18f46a82021-07-20 21:08:21 +08003492 mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_QDMA_INT_GRP2);
developer8051e042022-04-08 13:26:36 +08003493 mtk_w32(eth, 0x21021003, MTK_FE_INT_GRP);
developerbe971722022-05-23 13:51:05 +08003494 mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
developer8051e042022-04-08 13:26:36 +08003495 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
3496 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerfd40db22021-04-29 10:08:25 +08003497
developer089e8852022-09-28 14:43:46 +08003498 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
3499 /* PSE should not drop port1, port8 and port9 packets */
3500 mtk_w32(eth, 0x00000302, PSE_NO_DROP_CFG);
3501
developer15f760a2022-10-12 15:57:21 +08003502 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3503 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3504
developer84d1e832022-11-24 11:25:05 +08003505 /* PSE free buffer drop threshold */
3506 mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
3507
developer089e8852022-09-28 14:43:46 +08003508 /* GDM and CDM Threshold */
3509 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
3510 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
3511
developerdca0fde2022-12-14 11:40:35 +08003512 /* Disable GDM1 RX CRC stripping */
3513 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(0));
3514 val &= ~MTK_GDMA_STRP_CRC;
3515 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(0));
3516
developer089e8852022-09-28 14:43:46 +08003517 /* PSE GDM3 MIB counter has incorrect hw default values,
3518 * so the driver ought to read clear the values beforehand
3519 * in case ethtool retrieve wrong mib values.
3520 */
3521 for (i = 0; i < MTK_STAT_OFFSET; i += 0x4)
3522 mtk_r32(eth,
3523 MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * 2 + i);
3524 } else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08003525 /* PSE Free Queue Flow Control */
3526 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3527
developer459b78e2022-07-01 17:25:10 +08003528 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3529 mtk_w32(eth, 0x00000300, PSE_NO_DROP_CFG);
3530
3531 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3532 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
developer81bcad32021-07-15 14:14:38 +08003533
developerfef9efd2021-06-16 18:28:09 +08003534 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08003535 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3536 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3537 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3538 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3539 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3540 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3541 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
developerfd5f9152022-01-05 16:29:42 +08003542 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
developerfd40db22021-04-29 10:08:25 +08003543
developerfef9efd2021-06-16 18:28:09 +08003544 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08003545 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3546 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3547 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3548 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3549 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3550 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3551 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3552 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08003553
3554 /* GDM and CDM Threshold */
3555 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3556 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3557 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3558 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3559 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3560 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08003561 }
3562
3563 return 0;
3564
3565err_disable_pm:
3566 pm_runtime_put_sync(eth->dev);
3567 pm_runtime_disable(eth->dev);
3568
3569 return ret;
3570}
3571
3572static int mtk_hw_deinit(struct mtk_eth *eth)
3573{
3574 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3575 return 0;
3576
3577 mtk_clk_disable(eth);
3578
3579 pm_runtime_put_sync(eth->dev);
3580 pm_runtime_disable(eth->dev);
3581
3582 return 0;
3583}
3584
3585static int __init mtk_init(struct net_device *dev)
3586{
3587 struct mtk_mac *mac = netdev_priv(dev);
3588 struct mtk_eth *eth = mac->hw;
3589 const char *mac_addr;
3590
3591 mac_addr = of_get_mac_address(mac->of_node);
3592 if (!IS_ERR(mac_addr))
3593 ether_addr_copy(dev->dev_addr, mac_addr);
3594
3595 /* If the mac address is invalid, use random mac address */
3596 if (!is_valid_ether_addr(dev->dev_addr)) {
3597 eth_hw_addr_random(dev);
3598 dev_err(eth->dev, "generated random MAC address %pM\n",
3599 dev->dev_addr);
3600 }
3601
3602 return 0;
3603}
3604
3605static void mtk_uninit(struct net_device *dev)
3606{
3607 struct mtk_mac *mac = netdev_priv(dev);
3608 struct mtk_eth *eth = mac->hw;
3609
3610 phylink_disconnect_phy(mac->phylink);
3611 mtk_tx_irq_disable(eth, ~0);
3612 mtk_rx_irq_disable(eth, ~0);
3613}
3614
3615static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3616{
3617 struct mtk_mac *mac = netdev_priv(dev);
3618
3619 switch (cmd) {
3620 case SIOCGMIIPHY:
3621 case SIOCGMIIREG:
3622 case SIOCSMIIREG:
3623 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3624 default:
3625 /* default invoke the mtk_eth_dbg handler */
3626 return mtk_do_priv_ioctl(dev, ifr, cmd);
3627 break;
3628 }
3629
3630 return -EOPNOTSUPP;
3631}
3632
developer37482a42022-12-26 13:31:13 +08003633int mtk_phy_config(struct mtk_eth *eth, int enable)
3634{
3635 struct device_node *mii_np = NULL;
3636 struct device_node *child = NULL;
3637 int addr = 0;
3638 u32 val = 0;
3639
3640 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
3641 if (!mii_np) {
3642 dev_err(eth->dev, "no %s child node found", "mdio-bus");
3643 return -ENODEV;
3644 }
3645
3646 if (!of_device_is_available(mii_np)) {
3647 dev_err(eth->dev, "device is not available\n");
3648 return -ENODEV;
3649 }
3650
3651 for_each_available_child_of_node(mii_np, child) {
3652 addr = of_mdio_parse_addr(&eth->mii_bus->dev, child);
3653 if (addr < 0)
3654 continue;
3655 pr_info("%s %d addr:%d name:%s\n",
3656 __func__, __LINE__, addr, child->name);
3657 val = _mtk_mdio_read(eth, addr, mdiobus_c45_addr(0x1e, 0));
3658 if (enable)
3659 val &= ~BMCR_PDOWN;
3660 else
3661 val |= BMCR_PDOWN;
3662 _mtk_mdio_write(eth, addr, mdiobus_c45_addr(0x1e, 0), val);
3663 }
3664
3665 return 0;
3666}
3667
developerfd40db22021-04-29 10:08:25 +08003668static void mtk_pending_work(struct work_struct *work)
3669{
3670 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer8051e042022-04-08 13:26:36 +08003671 struct device_node *phy_node = NULL;
3672 struct mtk_mac *mac = NULL;
3673 int err, i = 0;
developerfd40db22021-04-29 10:08:25 +08003674 unsigned long restart = 0;
developer8051e042022-04-08 13:26:36 +08003675 u32 val = 0;
3676
3677 atomic_inc(&reset_lock);
3678 val = mtk_r32(eth, MTK_FE_INT_STATUS);
3679 if (!mtk_check_reset_event(eth, val)) {
3680 atomic_dec(&reset_lock);
3681 pr_info("[%s] No need to do FE reset !\n", __func__);
3682 return;
3683 }
developerfd40db22021-04-29 10:08:25 +08003684
3685 rtnl_lock();
3686
developer37482a42022-12-26 13:31:13 +08003687 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
3688 cpu_relax();
3689
3690 mtk_phy_config(eth, 0);
developer8051e042022-04-08 13:26:36 +08003691
3692 /* Adjust PPE configurations to prepare for reset */
3693 mtk_prepare_reset_ppe(eth, 0);
3694 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3695 mtk_prepare_reset_ppe(eth, 1);
3696
3697 /* Adjust FE configurations to prepare for reset */
3698 mtk_prepare_reset_fe(eth);
3699
3700 /* Trigger Wifi SER reset */
developer6bb3f3a2022-11-22 09:59:14 +08003701 for (i = 0; i < MTK_MAC_COUNT; i++) {
3702 if (!eth->netdev[i])
3703 continue;
developer37482a42022-12-26 13:31:13 +08003704 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
3705 pr_info("send MTK_FE_STOP_TRAFFIC event\n");
3706 call_netdevice_notifiers(MTK_FE_STOP_TRAFFIC,
3707 eth->netdev[i]);
3708 } else {
3709 pr_info("send MTK_FE_START_RESET event\n");
3710 call_netdevice_notifiers(MTK_FE_START_RESET,
3711 eth->netdev[i]);
3712 }
developer6bb3f3a2022-11-22 09:59:14 +08003713 rtnl_unlock();
developer37482a42022-12-26 13:31:13 +08003714 if (!wait_for_completion_timeout(&wait_ser_done, 3000))
developer0baa6962023-01-31 14:25:23 +08003715 pr_warn("wait for MTK_FE_START_RESET\n");
developer6bb3f3a2022-11-22 09:59:14 +08003716 rtnl_lock();
3717 break;
3718 }
developerfd40db22021-04-29 10:08:25 +08003719
developer8051e042022-04-08 13:26:36 +08003720 del_timer_sync(&eth->mtk_dma_monitor_timer);
3721 pr_info("[%s] mtk_stop starts !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003722 /* stop all devices to make sure that dma is properly shut down */
3723 for (i = 0; i < MTK_MAC_COUNT; i++) {
3724 if (!eth->netdev[i])
3725 continue;
3726 mtk_stop(eth->netdev[i]);
3727 __set_bit(i, &restart);
3728 }
developer8051e042022-04-08 13:26:36 +08003729 pr_info("[%s] mtk_stop ends !\n", __func__);
3730 mdelay(15);
developerfd40db22021-04-29 10:08:25 +08003731
3732 if (eth->dev->pins)
3733 pinctrl_select_state(eth->dev->pins->p,
3734 eth->dev->pins->default_state);
developer8051e042022-04-08 13:26:36 +08003735
3736 pr_info("[%s] mtk_hw_init starts !\n", __func__);
3737 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
3738 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003739
3740 /* restart DMA and enable IRQs */
3741 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer6bb3f3a2022-11-22 09:59:14 +08003742 if (!test_bit(i, &restart) || !eth->netdev[i])
developerfd40db22021-04-29 10:08:25 +08003743 continue;
3744 err = mtk_open(eth->netdev[i]);
3745 if (err) {
3746 netif_alert(eth, ifup, eth->netdev[i],
3747 "Driver up/down cycle failed, closing device.\n");
3748 dev_close(eth->netdev[i]);
3749 }
3750 }
3751
developer8051e042022-04-08 13:26:36 +08003752 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer6bb3f3a2022-11-22 09:59:14 +08003753 if (!eth->netdev[i])
3754 continue;
developer37482a42022-12-26 13:31:13 +08003755 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
3756 pr_info("send MTK_FE_START_TRAFFIC event\n");
3757 call_netdevice_notifiers(MTK_FE_START_TRAFFIC,
3758 eth->netdev[i]);
3759 } else {
3760 pr_info("send MTK_FE_RESET_DONE event\n");
3761 call_netdevice_notifiers(MTK_FE_RESET_DONE,
3762 eth->netdev[i]);
developer8051e042022-04-08 13:26:36 +08003763 }
developer37482a42022-12-26 13:31:13 +08003764 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE,
3765 eth->netdev[i]);
developer6bb3f3a2022-11-22 09:59:14 +08003766 break;
3767 }
developer8051e042022-04-08 13:26:36 +08003768
3769 atomic_dec(&reset_lock);
developer8051e042022-04-08 13:26:36 +08003770
3771 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3772 eth->mtk_dma_monitor_timer.expires = jiffies;
3773 add_timer(&eth->mtk_dma_monitor_timer);
developer37482a42022-12-26 13:31:13 +08003774
3775 mtk_phy_config(eth, 1);
3776 mtk_reset_flag = 0;
developerfd40db22021-04-29 10:08:25 +08003777 clear_bit_unlock(MTK_RESETTING, &eth->state);
3778
3779 rtnl_unlock();
3780}
3781
3782static int mtk_free_dev(struct mtk_eth *eth)
3783{
3784 int i;
3785
3786 for (i = 0; i < MTK_MAC_COUNT; i++) {
3787 if (!eth->netdev[i])
3788 continue;
3789 free_netdev(eth->netdev[i]);
3790 }
3791
3792 return 0;
3793}
3794
3795static int mtk_unreg_dev(struct mtk_eth *eth)
3796{
3797 int i;
3798
3799 for (i = 0; i < MTK_MAC_COUNT; i++) {
3800 if (!eth->netdev[i])
3801 continue;
3802 unregister_netdev(eth->netdev[i]);
3803 }
3804
3805 return 0;
3806}
3807
3808static int mtk_cleanup(struct mtk_eth *eth)
3809{
3810 mtk_unreg_dev(eth);
3811 mtk_free_dev(eth);
3812 cancel_work_sync(&eth->pending_work);
3813
3814 return 0;
3815}
3816
3817static int mtk_get_link_ksettings(struct net_device *ndev,
3818 struct ethtool_link_ksettings *cmd)
3819{
3820 struct mtk_mac *mac = netdev_priv(ndev);
3821
3822 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3823 return -EBUSY;
3824
3825 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3826}
3827
3828static int mtk_set_link_ksettings(struct net_device *ndev,
3829 const struct ethtool_link_ksettings *cmd)
3830{
3831 struct mtk_mac *mac = netdev_priv(ndev);
3832
3833 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3834 return -EBUSY;
3835
3836 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3837}
3838
3839static void mtk_get_drvinfo(struct net_device *dev,
3840 struct ethtool_drvinfo *info)
3841{
3842 struct mtk_mac *mac = netdev_priv(dev);
3843
3844 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3845 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3846 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3847}
3848
3849static u32 mtk_get_msglevel(struct net_device *dev)
3850{
3851 struct mtk_mac *mac = netdev_priv(dev);
3852
3853 return mac->hw->msg_enable;
3854}
3855
3856static void mtk_set_msglevel(struct net_device *dev, u32 value)
3857{
3858 struct mtk_mac *mac = netdev_priv(dev);
3859
3860 mac->hw->msg_enable = value;
3861}
3862
3863static int mtk_nway_reset(struct net_device *dev)
3864{
3865 struct mtk_mac *mac = netdev_priv(dev);
3866
3867 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3868 return -EBUSY;
3869
3870 if (!mac->phylink)
3871 return -ENOTSUPP;
3872
3873 return phylink_ethtool_nway_reset(mac->phylink);
3874}
3875
3876static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3877{
3878 int i;
3879
3880 switch (stringset) {
3881 case ETH_SS_STATS:
3882 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3883 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3884 data += ETH_GSTRING_LEN;
3885 }
3886 break;
3887 }
3888}
3889
3890static int mtk_get_sset_count(struct net_device *dev, int sset)
3891{
3892 switch (sset) {
3893 case ETH_SS_STATS:
3894 return ARRAY_SIZE(mtk_ethtool_stats);
3895 default:
3896 return -EOPNOTSUPP;
3897 }
3898}
3899
3900static void mtk_get_ethtool_stats(struct net_device *dev,
3901 struct ethtool_stats *stats, u64 *data)
3902{
3903 struct mtk_mac *mac = netdev_priv(dev);
3904 struct mtk_hw_stats *hwstats = mac->hw_stats;
3905 u64 *data_src, *data_dst;
3906 unsigned int start;
3907 int i;
3908
3909 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3910 return;
3911
3912 if (netif_running(dev) && netif_device_present(dev)) {
3913 if (spin_trylock_bh(&hwstats->stats_lock)) {
3914 mtk_stats_update_mac(mac);
3915 spin_unlock_bh(&hwstats->stats_lock);
3916 }
3917 }
3918
3919 data_src = (u64 *)hwstats;
3920
3921 do {
3922 data_dst = data;
3923 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
3924
3925 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3926 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3927 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
3928}
3929
3930static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3931 u32 *rule_locs)
3932{
3933 int ret = -EOPNOTSUPP;
3934
3935 switch (cmd->cmd) {
3936 case ETHTOOL_GRXRINGS:
3937 if (dev->hw_features & NETIF_F_LRO) {
3938 cmd->data = MTK_MAX_RX_RING_NUM;
3939 ret = 0;
3940 }
3941 break;
3942 case ETHTOOL_GRXCLSRLCNT:
3943 if (dev->hw_features & NETIF_F_LRO) {
3944 struct mtk_mac *mac = netdev_priv(dev);
3945
3946 cmd->rule_cnt = mac->hwlro_ip_cnt;
3947 ret = 0;
3948 }
3949 break;
3950 case ETHTOOL_GRXCLSRULE:
3951 if (dev->hw_features & NETIF_F_LRO)
3952 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3953 break;
3954 case ETHTOOL_GRXCLSRLALL:
3955 if (dev->hw_features & NETIF_F_LRO)
3956 ret = mtk_hwlro_get_fdir_all(dev, cmd,
3957 rule_locs);
3958 break;
3959 default:
3960 break;
3961 }
3962
3963 return ret;
3964}
3965
3966static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3967{
3968 int ret = -EOPNOTSUPP;
3969
3970 switch (cmd->cmd) {
3971 case ETHTOOL_SRXCLSRLINS:
3972 if (dev->hw_features & NETIF_F_LRO)
3973 ret = mtk_hwlro_add_ipaddr(dev, cmd);
3974 break;
3975 case ETHTOOL_SRXCLSRLDEL:
3976 if (dev->hw_features & NETIF_F_LRO)
3977 ret = mtk_hwlro_del_ipaddr(dev, cmd);
3978 break;
3979 default:
3980 break;
3981 }
3982
3983 return ret;
3984}
3985
developer6c5cbb52022-08-12 11:37:45 +08003986static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
3987{
3988 struct mtk_mac *mac = netdev_priv(dev);
developerf2823bb2022-12-29 18:20:14 +08003989 struct mtk_eth *eth = mac->hw;
3990 u32 val;
3991
3992 pause->autoneg = 0;
3993
3994 if (mac->type == MTK_GDM_TYPE) {
3995 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
3996
3997 pause->rx_pause = !!(val & MAC_MCR_FORCE_RX_FC);
3998 pause->tx_pause = !!(val & MAC_MCR_FORCE_TX_FC);
3999 } else if (mac->type == MTK_XGDM_TYPE) {
4000 val = mtk_r32(eth, MTK_XMAC_MCR(mac->id));
developer6c5cbb52022-08-12 11:37:45 +08004001
developerf2823bb2022-12-29 18:20:14 +08004002 pause->rx_pause = !!(val & XMAC_MCR_FORCE_RX_FC);
4003 pause->tx_pause = !!(val & XMAC_MCR_FORCE_TX_FC);
4004 }
developer6c5cbb52022-08-12 11:37:45 +08004005}
4006
4007static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4008{
4009 struct mtk_mac *mac = netdev_priv(dev);
4010
4011 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4012}
4013
developer9b725932022-11-24 16:25:56 +08004014static int mtk_get_eee(struct net_device *dev, struct ethtool_eee *eee)
4015{
4016 struct mtk_mac *mac = netdev_priv(dev);
4017 struct mtk_eth *eth = mac->hw;
4018 u32 val;
4019
4020 if (mac->type == MTK_GDM_TYPE) {
4021 val = mtk_r32(eth, MTK_MAC_EEE(mac->id));
4022
4023 eee->tx_lpi_enabled = mac->tx_lpi_enabled;
4024 eee->tx_lpi_timer = FIELD_GET(MAC_EEE_LPI_TXIDLE_THD, val);
4025 }
4026
4027 return phylink_ethtool_get_eee(mac->phylink, eee);
4028}
4029
4030static int mtk_set_eee(struct net_device *dev, struct ethtool_eee *eee)
4031{
4032 struct mtk_mac *mac = netdev_priv(dev);
4033 struct mtk_eth *eth = mac->hw;
4034
4035 if (mac->type == MTK_GDM_TYPE) {
4036 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
4037 return -EINVAL;
4038
4039 mac->tx_lpi_timer = eee->tx_lpi_timer;
4040
4041 mtk_setup_eee(mac, eee->eee_enabled && eee->tx_lpi_timer);
4042 }
4043
4044 return phylink_ethtool_set_eee(mac->phylink, eee);
4045}
4046
developerfd40db22021-04-29 10:08:25 +08004047static const struct ethtool_ops mtk_ethtool_ops = {
4048 .get_link_ksettings = mtk_get_link_ksettings,
4049 .set_link_ksettings = mtk_set_link_ksettings,
4050 .get_drvinfo = mtk_get_drvinfo,
4051 .get_msglevel = mtk_get_msglevel,
4052 .set_msglevel = mtk_set_msglevel,
4053 .nway_reset = mtk_nway_reset,
4054 .get_link = ethtool_op_get_link,
4055 .get_strings = mtk_get_strings,
4056 .get_sset_count = mtk_get_sset_count,
4057 .get_ethtool_stats = mtk_get_ethtool_stats,
4058 .get_rxnfc = mtk_get_rxnfc,
4059 .set_rxnfc = mtk_set_rxnfc,
developer6c5cbb52022-08-12 11:37:45 +08004060 .get_pauseparam = mtk_get_pauseparam,
4061 .set_pauseparam = mtk_set_pauseparam,
developer9b725932022-11-24 16:25:56 +08004062 .get_eee = mtk_get_eee,
4063 .set_eee = mtk_set_eee,
developerfd40db22021-04-29 10:08:25 +08004064};
4065
4066static const struct net_device_ops mtk_netdev_ops = {
4067 .ndo_init = mtk_init,
4068 .ndo_uninit = mtk_uninit,
4069 .ndo_open = mtk_open,
4070 .ndo_stop = mtk_stop,
4071 .ndo_start_xmit = mtk_start_xmit,
4072 .ndo_set_mac_address = mtk_set_mac_address,
4073 .ndo_validate_addr = eth_validate_addr,
4074 .ndo_do_ioctl = mtk_do_ioctl,
4075 .ndo_tx_timeout = mtk_tx_timeout,
4076 .ndo_get_stats64 = mtk_get_stats64,
4077 .ndo_fix_features = mtk_fix_features,
4078 .ndo_set_features = mtk_set_features,
4079#ifdef CONFIG_NET_POLL_CONTROLLER
4080 .ndo_poll_controller = mtk_poll_controller,
4081#endif
4082};
4083
4084static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4085{
4086 const __be32 *_id = of_get_property(np, "reg", NULL);
developer30e13e72022-11-03 10:21:24 +08004087 const char *label;
developerfd40db22021-04-29 10:08:25 +08004088 struct phylink *phylink;
developer30e13e72022-11-03 10:21:24 +08004089 int mac_type, phy_mode, id, err;
developerfd40db22021-04-29 10:08:25 +08004090 struct mtk_mac *mac;
developera2613e62022-07-01 18:29:37 +08004091 struct mtk_phylink_priv *phylink_priv;
4092 struct fwnode_handle *fixed_node;
4093 struct gpio_desc *desc;
developerfd40db22021-04-29 10:08:25 +08004094
4095 if (!_id) {
4096 dev_err(eth->dev, "missing mac id\n");
4097 return -EINVAL;
4098 }
4099
4100 id = be32_to_cpup(_id);
developerfb556ca2021-10-13 10:52:09 +08004101 if (id < 0 || id >= MTK_MAC_COUNT) {
developerfd40db22021-04-29 10:08:25 +08004102 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4103 return -EINVAL;
4104 }
4105
4106 if (eth->netdev[id]) {
4107 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4108 return -EINVAL;
4109 }
4110
4111 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
4112 if (!eth->netdev[id]) {
4113 dev_err(eth->dev, "alloc_etherdev failed\n");
4114 return -ENOMEM;
4115 }
4116 mac = netdev_priv(eth->netdev[id]);
4117 eth->mac[id] = mac;
4118 mac->id = id;
4119 mac->hw = eth;
4120 mac->of_node = np;
4121
4122 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4123 mac->hwlro_ip_cnt = 0;
4124
4125 mac->hw_stats = devm_kzalloc(eth->dev,
4126 sizeof(*mac->hw_stats),
4127 GFP_KERNEL);
4128 if (!mac->hw_stats) {
4129 dev_err(eth->dev, "failed to allocate counter memory\n");
4130 err = -ENOMEM;
4131 goto free_netdev;
4132 }
4133 spin_lock_init(&mac->hw_stats->stats_lock);
4134 u64_stats_init(&mac->hw_stats->syncp);
4135 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
4136
4137 /* phylink create */
4138 phy_mode = of_get_phy_mode(np);
4139 if (phy_mode < 0) {
4140 dev_err(eth->dev, "incorrect phy-mode\n");
4141 err = -EINVAL;
4142 goto free_netdev;
4143 }
4144
4145 /* mac config is not set */
4146 mac->interface = PHY_INTERFACE_MODE_NA;
4147 mac->mode = MLO_AN_PHY;
4148 mac->speed = SPEED_UNKNOWN;
4149
developer9b725932022-11-24 16:25:56 +08004150 mac->tx_lpi_timer = 1;
4151
developerfd40db22021-04-29 10:08:25 +08004152 mac->phylink_config.dev = &eth->netdev[id]->dev;
4153 mac->phylink_config.type = PHYLINK_NETDEV;
4154
developer30e13e72022-11-03 10:21:24 +08004155 mac->type = 0;
4156 if (!of_property_read_string(np, "mac-type", &label)) {
4157 for (mac_type = 0; mac_type < MTK_GDM_TYPE_MAX; mac_type++) {
4158 if (!strcasecmp(label, gdm_type(mac_type)))
4159 break;
4160 }
4161
4162 switch (mac_type) {
4163 case 0:
4164 mac->type = MTK_GDM_TYPE;
4165 break;
4166 case 1:
4167 mac->type = MTK_XGDM_TYPE;
4168 break;
4169 default:
4170 dev_warn(eth->dev, "incorrect mac-type\n");
4171 break;
4172 };
4173 }
developer089e8852022-09-28 14:43:46 +08004174
developerfd40db22021-04-29 10:08:25 +08004175 phylink = phylink_create(&mac->phylink_config,
4176 of_fwnode_handle(mac->of_node),
4177 phy_mode, &mtk_phylink_ops);
4178 if (IS_ERR(phylink)) {
4179 err = PTR_ERR(phylink);
4180 goto free_netdev;
4181 }
4182
4183 mac->phylink = phylink;
4184
developera2613e62022-07-01 18:29:37 +08004185 fixed_node = fwnode_get_named_child_node(of_fwnode_handle(mac->of_node),
4186 "fixed-link");
4187 if (fixed_node) {
4188 desc = fwnode_get_named_gpiod(fixed_node, "link-gpio",
4189 0, GPIOD_IN, "?");
4190 if (!IS_ERR(desc)) {
4191 struct device_node *phy_np;
4192 const char *label;
4193 int irq, phyaddr;
4194
4195 phylink_priv = &mac->phylink_priv;
4196
4197 phylink_priv->desc = desc;
4198 phylink_priv->id = id;
4199 phylink_priv->link = -1;
4200
4201 irq = gpiod_to_irq(desc);
4202 if (irq > 0) {
4203 devm_request_irq(eth->dev, irq, mtk_handle_irq_fixed_link,
4204 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
4205 "ethernet:fixed link", mac);
4206 }
4207
developer8b6f2402022-11-28 13:42:34 +08004208 if (!of_property_read_string(to_of_node(fixed_node),
4209 "label", &label)) {
developer659fdeb2022-12-01 23:03:07 +08004210 if (strlen(label) < 16) {
4211 strncpy(phylink_priv->label, label,
4212 strlen(label));
4213 } else
developer8b6f2402022-11-28 13:42:34 +08004214 dev_err(eth->dev, "insufficient space for label!\n");
4215 }
developera2613e62022-07-01 18:29:37 +08004216
4217 phy_np = of_parse_phandle(to_of_node(fixed_node), "phy-handle", 0);
4218 if (phy_np) {
4219 if (!of_property_read_u32(phy_np, "reg", &phyaddr))
4220 phylink_priv->phyaddr = phyaddr;
4221 }
4222 }
4223 fwnode_handle_put(fixed_node);
4224 }
4225
developerfd40db22021-04-29 10:08:25 +08004226 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4227 eth->netdev[id]->watchdog_timeo = 5 * HZ;
4228 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4229 eth->netdev[id]->base_addr = (unsigned long)eth->base;
4230
4231 eth->netdev[id]->hw_features = eth->soc->hw_features;
4232 if (eth->hwlro)
4233 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4234
4235 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4236 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
4237 eth->netdev[id]->features |= eth->soc->hw_features;
4238 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4239
4240 eth->netdev[id]->irq = eth->irq[0];
4241 eth->netdev[id]->dev.of_node = np;
4242
4243 return 0;
4244
4245free_netdev:
4246 free_netdev(eth->netdev[id]);
4247 return err;
4248}
4249
4250static int mtk_probe(struct platform_device *pdev)
4251{
4252 struct device_node *mac_np;
4253 struct mtk_eth *eth;
4254 int err, i;
4255
4256 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4257 if (!eth)
4258 return -ENOMEM;
4259
4260 eth->soc = of_device_get_match_data(&pdev->dev);
4261
4262 eth->dev = &pdev->dev;
4263 eth->base = devm_platform_ioremap_resource(pdev, 0);
4264 if (IS_ERR(eth->base))
4265 return PTR_ERR(eth->base);
4266
developer089e8852022-09-28 14:43:46 +08004267 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
4268 eth->sram_base = devm_platform_ioremap_resource(pdev, 1);
4269 if (IS_ERR(eth->sram_base))
4270 return PTR_ERR(eth->sram_base);
4271 }
4272
developerfd40db22021-04-29 10:08:25 +08004273 if(eth->soc->has_sram) {
4274 struct resource *res;
4275 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
developer4c32b7a2021-11-13 16:46:43 +08004276 if (unlikely(!res))
4277 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +08004278 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
4279 }
4280
4281 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4282 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
4283 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
4284 } else {
4285 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
4286 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
4287 }
4288
4289 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4290 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
4291 eth->ip_align = NET_IP_ALIGN;
4292 } else {
developer089e8852022-09-28 14:43:46 +08004293 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
4294 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +08004295 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
4296 else
4297 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
4298 }
4299
developer089e8852022-09-28 14:43:46 +08004300 if (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) {
4301 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
4302 if (!err) {
4303 err = dma_set_coherent_mask(&pdev->dev,
4304 DMA_BIT_MASK(36));
4305 if (err) {
4306 dev_err(&pdev->dev, "Wrong DMA config\n");
4307 return -EINVAL;
4308 }
4309 }
4310 }
4311
developerfd40db22021-04-29 10:08:25 +08004312 spin_lock_init(&eth->page_lock);
4313 spin_lock_init(&eth->tx_irq_lock);
4314 spin_lock_init(&eth->rx_irq_lock);
developerd82e8372022-02-09 15:00:09 +08004315 spin_lock_init(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +08004316
4317 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4318 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4319 "mediatek,ethsys");
4320 if (IS_ERR(eth->ethsys)) {
4321 dev_err(&pdev->dev, "no ethsys regmap found\n");
4322 return PTR_ERR(eth->ethsys);
4323 }
4324 }
4325
4326 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4327 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4328 "mediatek,infracfg");
4329 if (IS_ERR(eth->infra)) {
4330 dev_err(&pdev->dev, "no infracfg regmap found\n");
4331 return PTR_ERR(eth->infra);
4332 }
4333 }
4334
4335 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
developer089e8852022-09-28 14:43:46 +08004336 eth->xgmii = devm_kzalloc(eth->dev, sizeof(*eth->xgmii),
developerfd40db22021-04-29 10:08:25 +08004337 GFP_KERNEL);
developer089e8852022-09-28 14:43:46 +08004338 if (!eth->xgmii)
developerfd40db22021-04-29 10:08:25 +08004339 return -ENOMEM;
4340
developer089e8852022-09-28 14:43:46 +08004341 eth->xgmii->eth = eth;
4342 err = mtk_sgmii_init(eth->xgmii, pdev->dev.of_node,
developerfd40db22021-04-29 10:08:25 +08004343 eth->soc->ana_rgc3);
4344
developer089e8852022-09-28 14:43:46 +08004345 if (err)
4346 return err;
4347 }
4348
4349 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
4350 err = mtk_usxgmii_init(eth->xgmii, pdev->dev.of_node);
4351 if (err)
4352 return err;
4353
4354 err = mtk_xfi_pextp_init(eth->xgmii, pdev->dev.of_node);
4355 if (err)
4356 return err;
4357
4358 err = mtk_xfi_pll_init(eth->xgmii, pdev->dev.of_node);
4359 if (err)
4360 return err;
4361
4362 err = mtk_toprgu_init(eth, pdev->dev.of_node);
developerfd40db22021-04-29 10:08:25 +08004363 if (err)
4364 return err;
4365 }
4366
4367 if (eth->soc->required_pctl) {
4368 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4369 "mediatek,pctl");
4370 if (IS_ERR(eth->pctl)) {
4371 dev_err(&pdev->dev, "no pctl regmap found\n");
4372 return PTR_ERR(eth->pctl);
4373 }
4374 }
4375
developer18f46a82021-07-20 21:08:21 +08004376 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08004377 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4378 eth->irq[i] = eth->irq[0];
4379 else
4380 eth->irq[i] = platform_get_irq(pdev, i);
4381 if (eth->irq[i] < 0) {
4382 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4383 return -ENXIO;
4384 }
4385 }
4386
4387 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4388 eth->clks[i] = devm_clk_get(eth->dev,
4389 mtk_clks_source_name[i]);
4390 if (IS_ERR(eth->clks[i])) {
4391 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
4392 return -EPROBE_DEFER;
4393 if (eth->soc->required_clks & BIT(i)) {
4394 dev_err(&pdev->dev, "clock %s not found\n",
4395 mtk_clks_source_name[i]);
4396 return -EINVAL;
4397 }
4398 eth->clks[i] = NULL;
4399 }
4400 }
4401
4402 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4403 INIT_WORK(&eth->pending_work, mtk_pending_work);
4404
developer8051e042022-04-08 13:26:36 +08004405 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerfd40db22021-04-29 10:08:25 +08004406 if (err)
4407 return err;
4408
4409 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4410
4411 for_each_child_of_node(pdev->dev.of_node, mac_np) {
4412 if (!of_device_is_compatible(mac_np,
4413 "mediatek,eth-mac"))
4414 continue;
4415
4416 if (!of_device_is_available(mac_np))
4417 continue;
4418
4419 err = mtk_add_mac(eth, mac_np);
4420 if (err) {
4421 of_node_put(mac_np);
4422 goto err_deinit_hw;
4423 }
4424 }
4425
developer18f46a82021-07-20 21:08:21 +08004426 err = mtk_napi_init(eth);
4427 if (err)
4428 goto err_free_dev;
4429
developerfd40db22021-04-29 10:08:25 +08004430 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4431 err = devm_request_irq(eth->dev, eth->irq[0],
4432 mtk_handle_irq, 0,
4433 dev_name(eth->dev), eth);
4434 } else {
4435 err = devm_request_irq(eth->dev, eth->irq[1],
4436 mtk_handle_irq_tx, 0,
4437 dev_name(eth->dev), eth);
4438 if (err)
4439 goto err_free_dev;
4440
4441 err = devm_request_irq(eth->dev, eth->irq[2],
4442 mtk_handle_irq_rx, 0,
developer18f46a82021-07-20 21:08:21 +08004443 dev_name(eth->dev), &eth->rx_napi[0]);
4444 if (err)
4445 goto err_free_dev;
4446
developer793f7b42022-05-20 13:54:51 +08004447 if (MTK_MAX_IRQ_NUM > 3) {
4448 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4449 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
4450 err = devm_request_irq(eth->dev,
4451 eth->irq[2 + i],
4452 mtk_handle_irq_rx, 0,
4453 dev_name(eth->dev),
4454 &eth->rx_napi[i]);
4455 if (err)
4456 goto err_free_dev;
4457 }
4458 } else {
4459 err = devm_request_irq(eth->dev, eth->irq[3],
4460 mtk_handle_fe_irq, 0,
4461 dev_name(eth->dev), eth);
developer18f46a82021-07-20 21:08:21 +08004462 if (err)
4463 goto err_free_dev;
4464 }
4465 }
developerfd40db22021-04-29 10:08:25 +08004466 }
developer8051e042022-04-08 13:26:36 +08004467
developerfd40db22021-04-29 10:08:25 +08004468 if (err)
4469 goto err_free_dev;
4470
4471 /* No MT7628/88 support yet */
4472 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4473 err = mtk_mdio_init(eth);
4474 if (err)
4475 goto err_free_dev;
4476 }
4477
4478 for (i = 0; i < MTK_MAX_DEVS; i++) {
4479 if (!eth->netdev[i])
4480 continue;
4481
4482 err = register_netdev(eth->netdev[i]);
4483 if (err) {
4484 dev_err(eth->dev, "error bringing up device\n");
4485 goto err_deinit_mdio;
4486 } else
4487 netif_info(eth, probe, eth->netdev[i],
4488 "mediatek frame engine at 0x%08lx, irq %d\n",
4489 eth->netdev[i]->base_addr, eth->irq[0]);
4490 }
4491
4492 /* we run 2 devices on the same DMA ring so we need a dummy device
4493 * for NAPI to work
4494 */
4495 init_dummy_netdev(&eth->dummy_dev);
4496 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
4497 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08004498 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08004499 MTK_NAPI_WEIGHT);
4500
developer18f46a82021-07-20 21:08:21 +08004501 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4502 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
4503 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[i].napi,
4504 mtk_napi_rx, MTK_NAPI_WEIGHT);
4505 }
4506
developer75e4dad2022-11-16 15:17:14 +08004507#if defined(CONFIG_XFRM_OFFLOAD)
4508 mtk_ipsec_offload_init(eth);
4509#endif
developerfd40db22021-04-29 10:08:25 +08004510 mtketh_debugfs_init(eth);
4511 debug_proc_init(eth);
4512
4513 platform_set_drvdata(pdev, eth);
4514
developer8051e042022-04-08 13:26:36 +08004515 register_netdevice_notifier(&mtk_eth_netdevice_nb);
developer37482a42022-12-26 13:31:13 +08004516#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer8051e042022-04-08 13:26:36 +08004517 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
4518 eth->mtk_dma_monitor_timer.expires = jiffies;
4519 add_timer(&eth->mtk_dma_monitor_timer);
developer793f7b42022-05-20 13:54:51 +08004520#endif
developer8051e042022-04-08 13:26:36 +08004521
developerfd40db22021-04-29 10:08:25 +08004522 return 0;
4523
4524err_deinit_mdio:
4525 mtk_mdio_cleanup(eth);
4526err_free_dev:
4527 mtk_free_dev(eth);
4528err_deinit_hw:
4529 mtk_hw_deinit(eth);
4530
4531 return err;
4532}
4533
4534static int mtk_remove(struct platform_device *pdev)
4535{
4536 struct mtk_eth *eth = platform_get_drvdata(pdev);
4537 struct mtk_mac *mac;
4538 int i;
4539
4540 /* stop all devices to make sure that dma is properly shut down */
4541 for (i = 0; i < MTK_MAC_COUNT; i++) {
4542 if (!eth->netdev[i])
4543 continue;
4544 mtk_stop(eth->netdev[i]);
4545 mac = netdev_priv(eth->netdev[i]);
4546 phylink_disconnect_phy(mac->phylink);
4547 }
4548
4549 mtk_hw_deinit(eth);
4550
4551 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08004552 netif_napi_del(&eth->rx_napi[0].napi);
4553
4554 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4555 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
4556 netif_napi_del(&eth->rx_napi[i].napi);
4557 }
4558
developerfd40db22021-04-29 10:08:25 +08004559 mtk_cleanup(eth);
4560 mtk_mdio_cleanup(eth);
developer8051e042022-04-08 13:26:36 +08004561 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
4562 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08004563
4564 return 0;
4565}
4566
4567static const struct mtk_soc_data mt2701_data = {
4568 .caps = MT7623_CAPS | MTK_HWLRO,
4569 .hw_features = MTK_HW_FEATURES,
4570 .required_clks = MT7623_CLKS_BITMAP,
4571 .required_pctl = true,
4572 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004573 .txrx = {
4574 .txd_size = sizeof(struct mtk_tx_dma),
4575 .rxd_size = sizeof(struct mtk_rx_dma),
4576 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4577 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4578 },
developerfd40db22021-04-29 10:08:25 +08004579};
4580
4581static const struct mtk_soc_data mt7621_data = {
4582 .caps = MT7621_CAPS,
4583 .hw_features = MTK_HW_FEATURES,
4584 .required_clks = MT7621_CLKS_BITMAP,
4585 .required_pctl = false,
4586 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004587 .txrx = {
4588 .txd_size = sizeof(struct mtk_tx_dma),
4589 .rxd_size = sizeof(struct mtk_rx_dma),
4590 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4591 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4592 },
developerfd40db22021-04-29 10:08:25 +08004593};
4594
4595static const struct mtk_soc_data mt7622_data = {
4596 .ana_rgc3 = 0x2028,
4597 .caps = MT7622_CAPS | MTK_HWLRO,
4598 .hw_features = MTK_HW_FEATURES,
4599 .required_clks = MT7622_CLKS_BITMAP,
4600 .required_pctl = false,
4601 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004602 .txrx = {
4603 .txd_size = sizeof(struct mtk_tx_dma),
4604 .rxd_size = sizeof(struct mtk_rx_dma),
4605 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4606 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4607 },
developerfd40db22021-04-29 10:08:25 +08004608};
4609
4610static const struct mtk_soc_data mt7623_data = {
4611 .caps = MT7623_CAPS | MTK_HWLRO,
4612 .hw_features = MTK_HW_FEATURES,
4613 .required_clks = MT7623_CLKS_BITMAP,
4614 .required_pctl = true,
4615 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004616 .txrx = {
4617 .txd_size = sizeof(struct mtk_tx_dma),
4618 .rxd_size = sizeof(struct mtk_rx_dma),
4619 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4620 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4621 },
developerfd40db22021-04-29 10:08:25 +08004622};
4623
4624static const struct mtk_soc_data mt7629_data = {
4625 .ana_rgc3 = 0x128,
4626 .caps = MT7629_CAPS | MTK_HWLRO,
4627 .hw_features = MTK_HW_FEATURES,
4628 .required_clks = MT7629_CLKS_BITMAP,
4629 .required_pctl = false,
4630 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004631 .txrx = {
4632 .txd_size = sizeof(struct mtk_tx_dma),
4633 .rxd_size = sizeof(struct mtk_rx_dma),
4634 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4635 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4636 },
developerfd40db22021-04-29 10:08:25 +08004637};
4638
4639static const struct mtk_soc_data mt7986_data = {
4640 .ana_rgc3 = 0x128,
4641 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08004642 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08004643 .required_clks = MT7986_CLKS_BITMAP,
4644 .required_pctl = false,
4645 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004646 .txrx = {
4647 .txd_size = sizeof(struct mtk_tx_dma_v2),
4648 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4649 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4650 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4651 },
developerfd40db22021-04-29 10:08:25 +08004652};
4653
developer255bba22021-07-27 15:16:33 +08004654static const struct mtk_soc_data mt7981_data = {
4655 .ana_rgc3 = 0x128,
4656 .caps = MT7981_CAPS,
developer7377b0b2021-11-18 14:54:47 +08004657 .hw_features = MTK_HW_FEATURES,
developer255bba22021-07-27 15:16:33 +08004658 .required_clks = MT7981_CLKS_BITMAP,
4659 .required_pctl = false,
4660 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004661 .txrx = {
4662 .txd_size = sizeof(struct mtk_tx_dma_v2),
4663 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4664 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4665 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4666 },
developer255bba22021-07-27 15:16:33 +08004667};
4668
developer089e8852022-09-28 14:43:46 +08004669static const struct mtk_soc_data mt7988_data = {
4670 .ana_rgc3 = 0x128,
4671 .caps = MT7988_CAPS,
4672 .hw_features = MTK_HW_FEATURES,
4673 .required_clks = MT7988_CLKS_BITMAP,
4674 .required_pctl = false,
4675 .has_sram = true,
4676 .txrx = {
4677 .txd_size = sizeof(struct mtk_tx_dma_v2),
4678 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4679 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4680 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4681 },
4682};
4683
developerfd40db22021-04-29 10:08:25 +08004684static const struct mtk_soc_data rt5350_data = {
4685 .caps = MT7628_CAPS,
4686 .hw_features = MTK_HW_FEATURES_MT7628,
4687 .required_clks = MT7628_CLKS_BITMAP,
4688 .required_pctl = false,
4689 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004690 .txrx = {
4691 .txd_size = sizeof(struct mtk_tx_dma),
4692 .rxd_size = sizeof(struct mtk_rx_dma),
4693 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4694 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4695 },
developerfd40db22021-04-29 10:08:25 +08004696};
4697
4698const struct of_device_id of_mtk_match[] = {
4699 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4700 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4701 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4702 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4703 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4704 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08004705 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developer089e8852022-09-28 14:43:46 +08004706 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data},
developerfd40db22021-04-29 10:08:25 +08004707 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4708 {},
4709};
4710MODULE_DEVICE_TABLE(of, of_mtk_match);
4711
4712static struct platform_driver mtk_driver = {
4713 .probe = mtk_probe,
4714 .remove = mtk_remove,
4715 .driver = {
4716 .name = "mtk_soc_eth",
4717 .of_match_table = of_mtk_match,
4718 },
4719};
4720
4721module_platform_driver(mtk_driver);
4722
4723MODULE_LICENSE("GPL");
4724MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4725MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");