blob: 2ae369f2005f0d5cb37070bcf9ce9011538c2f88 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
developer3f28d382023-03-07 16:06:30 +080012#include <linux/of_address.h>
developerfd40db22021-04-29 10:08:25 +080013#include <linux/mfd/syscon.h>
14#include <linux/regmap.h>
15#include <linux/clk.h>
16#include <linux/pm_runtime.h>
17#include <linux/if_vlan.h>
18#include <linux/reset.h>
19#include <linux/tcp.h>
20#include <linux/interrupt.h>
21#include <linux/pinctrl/devinfo.h>
22#include <linux/phylink.h>
developera2613e62022-07-01 18:29:37 +080023#include <linux/gpio/consumer.h>
developerfd40db22021-04-29 10:08:25 +080024#include <net/dsa.h>
25
26#include "mtk_eth_soc.h"
27#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080028#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080029
30#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
31#include "mtk_hnat/nf_hnat_mtk.h"
32#endif
33
developer75e4dad2022-11-16 15:17:14 +080034#if defined(CONFIG_XFRM_OFFLOAD)
35#include <crypto/sha.h>
36#include <net/xfrm.h>
37#include "mtk_ipsec.h"
38#endif
39
developerfd40db22021-04-29 10:08:25 +080040static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080041atomic_t reset_lock = ATOMIC_INIT(0);
42atomic_t force = ATOMIC_INIT(0);
developer82eae452023-02-13 10:04:09 +080043atomic_t reset_pending = ATOMIC_INIT(0);
developer8051e042022-04-08 13:26:36 +080044
developerfd40db22021-04-29 10:08:25 +080045module_param_named(msg_level, mtk_msg_level, int, 0);
46MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080047DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080048
49#define MTK_ETHTOOL_STAT(x) { #x, \
50 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
51
developer68ce74f2023-01-03 16:11:57 +080052static const struct mtk_reg_map mtk_reg_map = {
53 .tx_irq_mask = 0x1a1c,
54 .tx_irq_status = 0x1a18,
55 .pdma = {
56 .rx_ptr = 0x0900,
57 .rx_cnt_cfg = 0x0904,
58 .pcrx_ptr = 0x0908,
59 .glo_cfg = 0x0a04,
60 .rst_idx = 0x0a08,
61 .delay_irq = 0x0a0c,
62 .irq_status = 0x0a20,
63 .irq_mask = 0x0a28,
64 .int_grp = 0x0a50,
65 .int_grp2 = 0x0a54,
66 },
67 .qdma = {
68 .qtx_cfg = 0x1800,
69 .qtx_sch = 0x1804,
70 .rx_ptr = 0x1900,
71 .rx_cnt_cfg = 0x1904,
72 .qcrx_ptr = 0x1908,
73 .glo_cfg = 0x1a04,
74 .rst_idx = 0x1a08,
75 .delay_irq = 0x1a0c,
76 .fc_th = 0x1a10,
77 .tx_sch_rate = 0x1a14,
78 .int_grp = 0x1a20,
79 .int_grp2 = 0x1a24,
80 .hred2 = 0x1a44,
81 .ctx_ptr = 0x1b00,
82 .dtx_ptr = 0x1b04,
83 .crx_ptr = 0x1b10,
84 .drx_ptr = 0x1b14,
85 .fq_head = 0x1b20,
86 .fq_tail = 0x1b24,
87 .fq_count = 0x1b28,
88 .fq_blen = 0x1b2c,
89 },
90 .gdm1_cnt = 0x2400,
91 .gdma_to_ppe0 = 0x4444,
92 .ppe_base = {
93 [0] = 0x0c00,
94 },
95 .wdma_base = {
96 [0] = 0x2800,
97 [1] = 0x2c00,
98 },
99};
100
101static const struct mtk_reg_map mt7628_reg_map = {
102 .tx_irq_mask = 0x0a28,
103 .tx_irq_status = 0x0a20,
104 .pdma = {
105 .rx_ptr = 0x0900,
106 .rx_cnt_cfg = 0x0904,
107 .pcrx_ptr = 0x0908,
108 .glo_cfg = 0x0a04,
109 .rst_idx = 0x0a08,
110 .delay_irq = 0x0a0c,
111 .irq_status = 0x0a20,
112 .irq_mask = 0x0a28,
113 .int_grp = 0x0a50,
114 .int_grp2 = 0x0a54,
115 },
116};
117
118static const struct mtk_reg_map mt7986_reg_map = {
119 .tx_irq_mask = 0x461c,
120 .tx_irq_status = 0x4618,
121 .pdma = {
122 .rx_ptr = 0x6100,
123 .rx_cnt_cfg = 0x6104,
124 .pcrx_ptr = 0x6108,
125 .glo_cfg = 0x6204,
126 .rst_idx = 0x6208,
127 .delay_irq = 0x620c,
128 .irq_status = 0x6220,
129 .irq_mask = 0x6228,
130 .int_grp = 0x6250,
131 .int_grp2 = 0x6254,
132 },
133 .qdma = {
134 .qtx_cfg = 0x4400,
135 .qtx_sch = 0x4404,
136 .rx_ptr = 0x4500,
137 .rx_cnt_cfg = 0x4504,
138 .qcrx_ptr = 0x4508,
139 .glo_cfg = 0x4604,
140 .rst_idx = 0x4608,
141 .delay_irq = 0x460c,
142 .fc_th = 0x4610,
143 .int_grp = 0x4620,
144 .int_grp2 = 0x4624,
145 .hred2 = 0x4644,
146 .ctx_ptr = 0x4700,
147 .dtx_ptr = 0x4704,
148 .crx_ptr = 0x4710,
149 .drx_ptr = 0x4714,
150 .fq_head = 0x4720,
151 .fq_tail = 0x4724,
152 .fq_count = 0x4728,
153 .fq_blen = 0x472c,
154 .tx_sch_rate = 0x4798,
155 },
156 .gdm1_cnt = 0x1c00,
157 .gdma_to_ppe0 = 0x3333,
158 .ppe_base = {
159 [0] = 0x2000,
160 [1] = 0x2400,
161 },
162 .wdma_base = {
163 [0] = 0x4800,
164 [1] = 0x4c00,
165 },
166};
167
168static const struct mtk_reg_map mt7988_reg_map = {
169 .tx_irq_mask = 0x461c,
170 .tx_irq_status = 0x4618,
171 .pdma = {
172 .rx_ptr = 0x6900,
173 .rx_cnt_cfg = 0x6904,
174 .pcrx_ptr = 0x6908,
175 .glo_cfg = 0x6a04,
176 .rst_idx = 0x6a08,
177 .delay_irq = 0x6a0c,
178 .irq_status = 0x6a20,
179 .irq_mask = 0x6a28,
180 .int_grp = 0x6a50,
181 .int_grp2 = 0x6a54,
182 },
183 .qdma = {
184 .qtx_cfg = 0x4400,
185 .qtx_sch = 0x4404,
186 .rx_ptr = 0x4500,
187 .rx_cnt_cfg = 0x4504,
188 .qcrx_ptr = 0x4508,
189 .glo_cfg = 0x4604,
190 .rst_idx = 0x4608,
191 .delay_irq = 0x460c,
192 .fc_th = 0x4610,
193 .int_grp = 0x4620,
194 .int_grp2 = 0x4624,
195 .hred2 = 0x4644,
196 .ctx_ptr = 0x4700,
197 .dtx_ptr = 0x4704,
198 .crx_ptr = 0x4710,
199 .drx_ptr = 0x4714,
200 .fq_head = 0x4720,
201 .fq_tail = 0x4724,
202 .fq_count = 0x4728,
203 .fq_blen = 0x472c,
204 .tx_sch_rate = 0x4798,
205 },
206 .gdm1_cnt = 0x1c00,
207 .gdma_to_ppe0 = 0x3333,
208 .ppe_base = {
209 [0] = 0x2000,
210 [1] = 0x2400,
211 [2] = 0x2c00,
212 },
213 .wdma_base = {
214 [0] = 0x4800,
215 [1] = 0x4c00,
216 [2] = 0x5000,
217 },
218};
219
developerfd40db22021-04-29 10:08:25 +0800220/* strings used by ethtool */
221static const struct mtk_ethtool_stats {
222 char str[ETH_GSTRING_LEN];
223 u32 offset;
224} mtk_ethtool_stats[] = {
225 MTK_ETHTOOL_STAT(tx_bytes),
226 MTK_ETHTOOL_STAT(tx_packets),
227 MTK_ETHTOOL_STAT(tx_skip),
228 MTK_ETHTOOL_STAT(tx_collisions),
229 MTK_ETHTOOL_STAT(rx_bytes),
230 MTK_ETHTOOL_STAT(rx_packets),
231 MTK_ETHTOOL_STAT(rx_overflow),
232 MTK_ETHTOOL_STAT(rx_fcs_errors),
233 MTK_ETHTOOL_STAT(rx_short_errors),
234 MTK_ETHTOOL_STAT(rx_long_errors),
235 MTK_ETHTOOL_STAT(rx_checksum_errors),
236 MTK_ETHTOOL_STAT(rx_flow_control_packets),
237};
238
239static const char * const mtk_clks_source_name[] = {
developer1bbcf512022-11-18 16:09:33 +0800240 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "gp3",
241 "xgp1", "xgp2", "xgp3", "crypto", "fe", "trgpll",
developerfd40db22021-04-29 10:08:25 +0800242 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
243 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
developer5cfc67a2022-12-29 19:06:51 +0800244 "sgmii_ck", "eth2pll", "wocpu0", "wocpu1",
245 "ethwarp_wocpu2", "ethwarp_wocpu1", "ethwarp_wocpu0",
246 "top_usxgmii0_sel", "top_usxgmii1_sel", "top_sgm0_sel", "top_sgm1_sel",
247 "top_xfi_phy0_xtal_sel", "top_xfi_phy1_xtal_sel", "top_eth_gmii_sel",
248 "top_eth_refck_50m_sel", "top_eth_sys_200m_sel", "top_eth_sys_sel",
249 "top_eth_xgmii_sel", "top_eth_mii_sel", "top_netsys_sel",
250 "top_netsys_500m_sel", "top_netsys_pao_2x_sel",
251 "top_netsys_sync_250m_sel", "top_netsys_ppefb_250m_sel",
252 "top_netsys_warp_sel",
developerfd40db22021-04-29 10:08:25 +0800253};
254
255void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
256{
257 __raw_writel(val, eth->base + reg);
258}
259
260u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
261{
262 return __raw_readl(eth->base + reg);
263}
264
265u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
266{
267 u32 val;
268
269 val = mtk_r32(eth, reg);
270 val &= ~mask;
271 val |= set;
272 mtk_w32(eth, val, reg);
273 return reg;
274}
275
276static int mtk_mdio_busy_wait(struct mtk_eth *eth)
277{
278 unsigned long t_start = jiffies;
279
280 while (1) {
281 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
282 return 0;
283 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
284 break;
developerc4671b22021-05-28 13:16:42 +0800285 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800286 }
287
288 dev_err(eth->dev, "mdio: MDIO timeout\n");
289 return -1;
290}
291
developer599cda42022-05-24 15:13:31 +0800292u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
293 int phy_reg, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800294{
295 if (mtk_mdio_busy_wait(eth))
296 return -1;
297
298 write_data &= 0xffff;
299
developer599cda42022-05-24 15:13:31 +0800300 if (phy_reg & MII_ADDR_C45) {
301 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
302 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
303 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
304 MTK_PHY_IAC);
305
306 if (mtk_mdio_busy_wait(eth))
307 return -1;
308
309 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
310 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
311 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
312 MTK_PHY_IAC);
313 } else {
314 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
315 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
316 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
317 MTK_PHY_IAC);
318 }
developerfd40db22021-04-29 10:08:25 +0800319
320 if (mtk_mdio_busy_wait(eth))
321 return -1;
322
323 return 0;
324}
325
developer599cda42022-05-24 15:13:31 +0800326u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerfd40db22021-04-29 10:08:25 +0800327{
328 u32 d;
329
330 if (mtk_mdio_busy_wait(eth))
331 return 0xffff;
332
developer599cda42022-05-24 15:13:31 +0800333 if (phy_reg & MII_ADDR_C45) {
334 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
335 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
336 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
337 MTK_PHY_IAC);
338
339 if (mtk_mdio_busy_wait(eth))
340 return 0xffff;
341
342 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
343 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
344 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
345 MTK_PHY_IAC);
346 } else {
347 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
348 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
349 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
350 MTK_PHY_IAC);
351 }
developerfd40db22021-04-29 10:08:25 +0800352
353 if (mtk_mdio_busy_wait(eth))
354 return 0xffff;
355
356 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
357
358 return d;
359}
360
361static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
362 int phy_reg, u16 val)
363{
364 struct mtk_eth *eth = bus->priv;
365
366 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
367}
368
369static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
370{
371 struct mtk_eth *eth = bus->priv;
372
373 return _mtk_mdio_read(eth, phy_addr, phy_reg);
374}
375
developerabeadd52022-08-15 11:26:44 +0800376static int mtk_mdio_reset(struct mii_bus *bus)
377{
378 /* The mdiobus_register will trigger a reset pulse when enabling Bus reset,
379 * we just need to wait until device ready.
380 */
381 mdelay(20);
382
383 return 0;
384}
385
developerfd40db22021-04-29 10:08:25 +0800386static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
387 phy_interface_t interface)
388{
developer543e7922022-12-01 11:24:47 +0800389 u32 val = 0;
developerfd40db22021-04-29 10:08:25 +0800390
391 /* Check DDR memory type.
392 * Currently TRGMII mode with DDR2 memory is not supported.
393 */
394 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
395 if (interface == PHY_INTERFACE_MODE_TRGMII &&
396 val & SYSCFG_DRAM_TYPE_DDR2) {
397 dev_err(eth->dev,
398 "TRGMII mode with DDR2 memory is not supported!\n");
399 return -EOPNOTSUPP;
400 }
401
402 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
403 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
404
405 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
406 ETHSYS_TRGMII_MT7621_MASK, val);
407
408 return 0;
409}
410
411static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
412 phy_interface_t interface, int speed)
413{
414 u32 val;
415 int ret;
416
417 if (interface == PHY_INTERFACE_MODE_TRGMII) {
418 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
419 val = 500000000;
420 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
421 if (ret)
422 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
423 return;
424 }
425
426 val = (speed == SPEED_1000) ?
427 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
428 mtk_w32(eth, val, INTF_MODE);
429
430 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
431 ETHSYS_TRGMII_CLK_SEL362_5,
432 ETHSYS_TRGMII_CLK_SEL362_5);
433
434 val = (speed == SPEED_1000) ? 250000000 : 500000000;
435 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
436 if (ret)
437 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
438
439 val = (speed == SPEED_1000) ?
440 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
441 mtk_w32(eth, val, TRGMII_RCK_CTRL);
442
443 val = (speed == SPEED_1000) ?
444 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
445 mtk_w32(eth, val, TRGMII_TCK_CTRL);
446}
447
developer089e8852022-09-28 14:43:46 +0800448static void mtk_setup_bridge_switch(struct mtk_eth *eth)
449{
450 int val;
451
452 /* Force Port1 XGMAC Link Up */
453 val = mtk_r32(eth, MTK_XGMAC_STS(MTK_GMAC1_ID));
454 mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK,
455 MTK_XGMAC_STS(MTK_GMAC1_ID));
456
457 /* Adjust GSW bridge IPG to 11*/
458 val = mtk_r32(eth, MTK_GSW_CFG);
459 val &= ~(GSWTX_IPG_MASK | GSWRX_IPG_MASK);
460 val |= (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
461 (GSW_IPG_11 << GSWRX_IPG_SHIFT);
462 mtk_w32(eth, val, MTK_GSW_CFG);
developer089e8852022-09-28 14:43:46 +0800463}
464
developer9b725932022-11-24 16:25:56 +0800465static void mtk_setup_eee(struct mtk_mac *mac, bool enable)
466{
467 struct mtk_eth *eth = mac->hw;
468 u32 mcr, mcr_cur;
469 u32 val;
470
471 mcr = mcr_cur = mtk_r32(eth, MTK_MAC_MCR(mac->id));
472 mcr &= ~(MAC_MCR_FORCE_EEE100 | MAC_MCR_FORCE_EEE1000);
473
474 if (enable) {
475 mac->tx_lpi_enabled = 1;
476
477 val = FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 19) |
478 FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 33) |
479 FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD,
480 mac->tx_lpi_timer) |
481 FIELD_PREP(MAC_EEE_RESV0, 14);
482 mtk_w32(eth, val, MTK_MAC_EEE(mac->id));
483
484 switch (mac->speed) {
485 case SPEED_1000:
486 mcr |= MAC_MCR_FORCE_EEE1000;
487 break;
488 case SPEED_100:
489 mcr |= MAC_MCR_FORCE_EEE100;
490 break;
491 };
492 } else {
493 mac->tx_lpi_enabled = 0;
494
495 mtk_w32(eth, 0x00000002, MTK_MAC_EEE(mac->id));
496 }
497
498 /* Only update control register when needed! */
499 if (mcr != mcr_cur)
500 mtk_w32(eth, mcr, MTK_MAC_MCR(mac->id));
501}
502
developerfd40db22021-04-29 10:08:25 +0800503static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
504 const struct phylink_link_state *state)
505{
506 struct mtk_mac *mac = container_of(config, struct mtk_mac,
507 phylink_config);
508 struct mtk_eth *eth = mac->hw;
developer089e8852022-09-28 14:43:46 +0800509 u32 sid, i;
developer543e7922022-12-01 11:24:47 +0800510 int val = 0, ge_mode, err = 0;
developer82eae452023-02-13 10:04:09 +0800511 unsigned int mac_type = mac->type;
developerfd40db22021-04-29 10:08:25 +0800512
513 /* MT76x8 has no hardware settings between for the MAC */
514 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
515 mac->interface != state->interface) {
516 /* Setup soc pin functions */
517 switch (state->interface) {
518 case PHY_INTERFACE_MODE_TRGMII:
519 if (mac->id)
520 goto err_phy;
521 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
522 MTK_GMAC1_TRGMII))
523 goto err_phy;
524 /* fall through */
525 case PHY_INTERFACE_MODE_RGMII_TXID:
526 case PHY_INTERFACE_MODE_RGMII_RXID:
527 case PHY_INTERFACE_MODE_RGMII_ID:
528 case PHY_INTERFACE_MODE_RGMII:
529 case PHY_INTERFACE_MODE_MII:
530 case PHY_INTERFACE_MODE_REVMII:
531 case PHY_INTERFACE_MODE_RMII:
developer82eae452023-02-13 10:04:09 +0800532 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800533 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
534 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
535 if (err)
536 goto init_err;
537 }
538 break;
539 case PHY_INTERFACE_MODE_1000BASEX:
540 case PHY_INTERFACE_MODE_2500BASEX:
541 case PHY_INTERFACE_MODE_SGMII:
developer82eae452023-02-13 10:04:09 +0800542 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800543 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
544 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
545 if (err)
546 goto init_err;
547 }
548 break;
549 case PHY_INTERFACE_MODE_GMII:
developer82eae452023-02-13 10:04:09 +0800550 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800551 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
552 err = mtk_gmac_gephy_path_setup(eth, mac->id);
553 if (err)
554 goto init_err;
555 }
556 break;
developer30e13e72022-11-03 10:21:24 +0800557 case PHY_INTERFACE_MODE_XGMII:
developer82eae452023-02-13 10:04:09 +0800558 mac->type = MTK_XGDM_TYPE;
developer30e13e72022-11-03 10:21:24 +0800559 if (MTK_HAS_CAPS(eth->soc->caps, MTK_XGMII)) {
560 err = mtk_gmac_xgmii_path_setup(eth, mac->id);
561 if (err)
562 goto init_err;
563 }
564 break;
developer089e8852022-09-28 14:43:46 +0800565 case PHY_INTERFACE_MODE_USXGMII:
566 case PHY_INTERFACE_MODE_10GKR:
developercfa104b2023-01-11 17:40:41 +0800567 case PHY_INTERFACE_MODE_5GBASER:
developer82eae452023-02-13 10:04:09 +0800568 mac->type = MTK_XGDM_TYPE;
developer089e8852022-09-28 14:43:46 +0800569 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
570 err = mtk_gmac_usxgmii_path_setup(eth, mac->id);
571 if (err)
572 goto init_err;
573 }
574 break;
developerfd40db22021-04-29 10:08:25 +0800575 default:
576 goto err_phy;
577 }
578
579 /* Setup clock for 1st gmac */
580 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
581 !phy_interface_mode_is_8023z(state->interface) &&
582 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
583 if (MTK_HAS_CAPS(mac->hw->soc->caps,
584 MTK_TRGMII_MT7621_CLK)) {
585 if (mt7621_gmac0_rgmii_adjust(mac->hw,
586 state->interface))
587 goto err_phy;
588 } else {
589 mtk_gmac0_rgmii_adjust(mac->hw,
590 state->interface,
591 state->speed);
592
593 /* mt7623_pad_clk_setup */
594 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
595 mtk_w32(mac->hw,
596 TD_DM_DRVP(8) | TD_DM_DRVN(8),
597 TRGMII_TD_ODT(i));
598
599 /* Assert/release MT7623 RXC reset */
600 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
601 TRGMII_RCK_CTRL);
602 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
603 }
604 }
605
606 ge_mode = 0;
607 switch (state->interface) {
608 case PHY_INTERFACE_MODE_MII:
609 case PHY_INTERFACE_MODE_GMII:
610 ge_mode = 1;
611 break;
612 case PHY_INTERFACE_MODE_REVMII:
613 ge_mode = 2;
614 break;
615 case PHY_INTERFACE_MODE_RMII:
616 if (mac->id)
617 goto err_phy;
618 ge_mode = 3;
619 break;
620 default:
621 break;
622 }
623
624 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800625 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800626 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
627 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
628 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
629 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800630 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800631
632 mac->interface = state->interface;
633 }
634
635 /* SGMII */
636 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
637 phy_interface_mode_is_8023z(state->interface)) {
638 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
639 * being setup done.
640 */
developerd82e8372022-02-09 15:00:09 +0800641 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800642 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
643
644 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
645 SYSCFG0_SGMII_MASK,
646 ~(u32)SYSCFG0_SGMII_MASK);
647
648 /* Decide how GMAC and SGMIISYS be mapped */
649 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
650 0 : mac->id;
651
652 /* Setup SGMIISYS with the determined property */
653 if (state->interface != PHY_INTERFACE_MODE_SGMII)
developer089e8852022-09-28 14:43:46 +0800654 err = mtk_sgmii_setup_mode_force(eth->xgmii, sid,
developerfd40db22021-04-29 10:08:25 +0800655 state);
developer2fbee452022-08-12 13:58:20 +0800656 else
developer089e8852022-09-28 14:43:46 +0800657 err = mtk_sgmii_setup_mode_an(eth->xgmii, sid);
developerfd40db22021-04-29 10:08:25 +0800658
developerd82e8372022-02-09 15:00:09 +0800659 if (err) {
660 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800661 goto init_err;
developerd82e8372022-02-09 15:00:09 +0800662 }
developerfd40db22021-04-29 10:08:25 +0800663
664 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
665 SYSCFG0_SGMII_MASK, val);
developerd82e8372022-02-09 15:00:09 +0800666 spin_unlock(&eth->syscfg0_lock);
developer089e8852022-09-28 14:43:46 +0800667 } else if (state->interface == PHY_INTERFACE_MODE_USXGMII ||
developercfa104b2023-01-11 17:40:41 +0800668 state->interface == PHY_INTERFACE_MODE_10GKR ||
669 state->interface == PHY_INTERFACE_MODE_5GBASER) {
developer089e8852022-09-28 14:43:46 +0800670 sid = mac->id;
671
672 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
673 sid != MTK_GMAC1_ID) {
674 if (phylink_autoneg_inband(mode))
675 err = mtk_usxgmii_setup_mode_force(eth->xgmii, sid,
developercfa104b2023-01-11 17:40:41 +0800676 state);
developer089e8852022-09-28 14:43:46 +0800677 else
678 err = mtk_usxgmii_setup_mode_an(eth->xgmii, sid,
679 SPEED_10000);
680
681 if (err)
682 goto init_err;
683 }
developerfd40db22021-04-29 10:08:25 +0800684 } else if (phylink_autoneg_inband(mode)) {
685 dev_err(eth->dev,
686 "In-band mode not supported in non SGMII mode!\n");
687 return;
688 }
689
690 /* Setup gmac */
developer30e13e72022-11-03 10:21:24 +0800691 if (mac->type == MTK_XGDM_TYPE) {
developer089e8852022-09-28 14:43:46 +0800692 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
693 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
developerfd40db22021-04-29 10:08:25 +0800694
developer089e8852022-09-28 14:43:46 +0800695 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
696 switch (mac->id) {
697 case MTK_GMAC1_ID:
698 mtk_setup_bridge_switch(eth);
699 break;
700 case MTK_GMAC3_ID:
701 val = mtk_r32(eth, MTK_XGMAC_STS(mac->id));
702 mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK,
703 MTK_XGMAC_STS(mac->id));
704 break;
705 }
706 }
developer82eae452023-02-13 10:04:09 +0800707 } else if (mac->type == MTK_GDM_TYPE) {
708 val = mtk_r32(eth, MTK_GDMA_EG_CTRL(mac->id));
709 mtk_w32(eth, val & ~MTK_GDMA_XGDM_SEL,
710 MTK_GDMA_EG_CTRL(mac->id));
711
712 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
713 switch (mac->id) {
714 case MTK_GMAC3_ID:
715 val = mtk_r32(eth, MTK_XGMAC_STS(mac->id));
716 mtk_w32(eth, val & ~MTK_XGMAC_FORCE_LINK,
717 MTK_XGMAC_STS(mac->id));
718 break;
719 }
720 }
721
722 if (mac->type != mac_type) {
723 if (atomic_read(&reset_pending) == 0) {
724 atomic_inc(&force);
725 schedule_work(&eth->pending_work);
726 atomic_inc(&reset_pending);
727 } else
728 atomic_dec(&reset_pending);
729 }
developerfd40db22021-04-29 10:08:25 +0800730 }
731
developerfd40db22021-04-29 10:08:25 +0800732 return;
733
734err_phy:
735 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
736 mac->id, phy_modes(state->interface));
737 return;
738
739init_err:
740 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
741 mac->id, phy_modes(state->interface), err);
742}
743
developer089e8852022-09-28 14:43:46 +0800744static int mtk_mac_pcs_get_state(struct phylink_config *config,
745 struct phylink_link_state *state)
developerfd40db22021-04-29 10:08:25 +0800746{
747 struct mtk_mac *mac = container_of(config, struct mtk_mac,
748 phylink_config);
developerfd40db22021-04-29 10:08:25 +0800749
developer089e8852022-09-28 14:43:46 +0800750 if (mac->type == MTK_XGDM_TYPE) {
751 u32 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
developerfd40db22021-04-29 10:08:25 +0800752
developer089e8852022-09-28 14:43:46 +0800753 if (mac->id == MTK_GMAC2_ID)
754 sts = sts >> 16;
developerfd40db22021-04-29 10:08:25 +0800755
developer089e8852022-09-28 14:43:46 +0800756 state->duplex = 1;
757
758 switch (FIELD_GET(MTK_USXGMII_PCS_MODE, sts)) {
759 case 0:
760 state->speed = SPEED_10000;
761 break;
762 case 1:
763 state->speed = SPEED_5000;
764 break;
765 case 2:
766 state->speed = SPEED_2500;
767 break;
768 case 3:
769 state->speed = SPEED_1000;
770 break;
771 }
772
developer82eae452023-02-13 10:04:09 +0800773 state->interface = mac->interface;
developer089e8852022-09-28 14:43:46 +0800774 state->link = FIELD_GET(MTK_USXGMII_PCS_LINK, sts);
775 } else if (mac->type == MTK_GDM_TYPE) {
776 struct mtk_eth *eth = mac->hw;
777 struct mtk_xgmii *ss = eth->xgmii;
778 u32 id = mtk_mac2xgmii_id(eth, mac->id);
779 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
developer543e7922022-12-01 11:24:47 +0800780 u32 val = 0;
developer089e8852022-09-28 14:43:46 +0800781
782 regmap_read(ss->regmap_sgmii[id], SGMSYS_PCS_CONTROL_1, &val);
783
developer82eae452023-02-13 10:04:09 +0800784 state->interface = mac->interface;
developer089e8852022-09-28 14:43:46 +0800785 state->link = FIELD_GET(SGMII_LINK_STATYS, val);
786
787 if (FIELD_GET(SGMII_AN_ENABLE, val)) {
788 regmap_read(ss->regmap_sgmii[id], SGMII_PCS_SPEED_ABILITY, &val);
789
790 val = val >> 16;
791
792 state->duplex = FIELD_GET(SGMII_PCS_SPEED_DUPLEX, val);
793
794 switch (FIELD_GET(SGMII_PCS_SPEED_MASK, val)) {
795 case 0:
796 state->speed = SPEED_10;
797 break;
798 case 1:
799 state->speed = SPEED_100;
800 break;
801 case 2:
802 state->speed = SPEED_1000;
803 break;
804 }
805 } else {
806 regmap_read(ss->regmap_sgmii[id], SGMSYS_SGMII_MODE, &val);
807
808 state->duplex = !FIELD_GET(SGMII_DUPLEX_FULL, val);
809
810 switch (FIELD_GET(SGMII_SPEED_MASK, val)) {
811 case 0:
812 state->speed = SPEED_10;
813 break;
814 case 1:
815 state->speed = SPEED_100;
816 break;
817 case 2:
818 regmap_read(ss->regmap_sgmii[id], ss->ana_rgc3, &val);
819 state->speed = (FIELD_GET(RG_PHY_SPEED_3_125G, val)) ? SPEED_2500 : SPEED_1000;
820 break;
821 }
822 }
823
824 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
825 if (pmsr & MAC_MSR_RX_FC)
826 state->pause |= MLO_PAUSE_RX;
827 if (pmsr & MAC_MSR_TX_FC)
828 state->pause |= MLO_PAUSE_TX;
829 }
developerfd40db22021-04-29 10:08:25 +0800830
831 return 1;
832}
833
834static void mtk_mac_an_restart(struct phylink_config *config)
835{
836 struct mtk_mac *mac = container_of(config, struct mtk_mac,
837 phylink_config);
838
developer089e8852022-09-28 14:43:46 +0800839 if (mac->type != MTK_XGDM_TYPE)
840 mtk_sgmii_restart_an(mac->hw, mac->id);
developerfd40db22021-04-29 10:08:25 +0800841}
842
843static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
844 phy_interface_t interface)
845{
846 struct mtk_mac *mac = container_of(config, struct mtk_mac,
847 phylink_config);
developer089e8852022-09-28 14:43:46 +0800848 u32 mcr;
849
850 if (mac->type == MTK_GDM_TYPE) {
851 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
852 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
853 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
854 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
855 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
developerfd40db22021-04-29 10:08:25 +0800856
developer089e8852022-09-28 14:43:46 +0800857 mcr &= 0xfffffff0;
858 mcr |= XMAC_MCR_TRX_DISABLE;
859 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
860 }
developerfd40db22021-04-29 10:08:25 +0800861}
862
863static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
864 phy_interface_t interface,
865 struct phy_device *phy)
866{
867 struct mtk_mac *mac = container_of(config, struct mtk_mac,
868 phylink_config);
developer089e8852022-09-28 14:43:46 +0800869 u32 mcr, mcr_cur;
870
developer9b725932022-11-24 16:25:56 +0800871 mac->speed = speed;
872
developer089e8852022-09-28 14:43:46 +0800873 if (mac->type == MTK_GDM_TYPE) {
874 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
875 mcr = mcr_cur;
876 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
877 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
878 MAC_MCR_FORCE_RX_FC);
879 mcr |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
880 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
881
882 /* Configure speed */
883 switch (speed) {
884 case SPEED_2500:
885 case SPEED_1000:
886 mcr |= MAC_MCR_SPEED_1000;
887 break;
888 case SPEED_100:
889 mcr |= MAC_MCR_SPEED_100;
890 break;
891 }
892
893 /* Configure duplex */
894 if (duplex == DUPLEX_FULL)
895 mcr |= MAC_MCR_FORCE_DPX;
896
897 /* Configure pause modes -
898 * phylink will avoid these for half duplex
899 */
900 if (tx_pause)
901 mcr |= MAC_MCR_FORCE_TX_FC;
902 if (rx_pause)
903 mcr |= MAC_MCR_FORCE_RX_FC;
904
905 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
906
907 /* Only update control register when needed! */
908 if (mcr != mcr_cur)
909 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
developer9b725932022-11-24 16:25:56 +0800910
911 if (mode == MLO_AN_PHY && phy)
912 mtk_setup_eee(mac, phy_init_eee(phy, false) >= 0);
developer089e8852022-09-28 14:43:46 +0800913 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
914 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
915
916 mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC);
917 /* Configure pause modes -
918 * phylink will avoid these for half duplex
919 */
920 if (tx_pause)
921 mcr |= XMAC_MCR_FORCE_TX_FC;
922 if (rx_pause)
923 mcr |= XMAC_MCR_FORCE_RX_FC;
developerfd40db22021-04-29 10:08:25 +0800924
developer089e8852022-09-28 14:43:46 +0800925 mcr &= ~(XMAC_MCR_TRX_DISABLE);
926 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
927 }
developerfd40db22021-04-29 10:08:25 +0800928}
929
930static void mtk_validate(struct phylink_config *config,
931 unsigned long *supported,
932 struct phylink_link_state *state)
933{
934 struct mtk_mac *mac = container_of(config, struct mtk_mac,
935 phylink_config);
936 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
937
938 if (state->interface != PHY_INTERFACE_MODE_NA &&
939 state->interface != PHY_INTERFACE_MODE_MII &&
940 state->interface != PHY_INTERFACE_MODE_GMII &&
941 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
942 phy_interface_mode_is_rgmii(state->interface)) &&
943 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
944 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
945 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
946 (state->interface == PHY_INTERFACE_MODE_SGMII ||
developer089e8852022-09-28 14:43:46 +0800947 phy_interface_mode_is_8023z(state->interface))) &&
developer30e13e72022-11-03 10:21:24 +0800948 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_XGMII) &&
949 (state->interface == PHY_INTERFACE_MODE_XGMII)) &&
developer089e8852022-09-28 14:43:46 +0800950 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
951 (state->interface == PHY_INTERFACE_MODE_USXGMII)) &&
952 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
953 (state->interface == PHY_INTERFACE_MODE_10GKR))) {
developerfd40db22021-04-29 10:08:25 +0800954 linkmode_zero(supported);
955 return;
956 }
957
958 phylink_set_port_modes(mask);
959 phylink_set(mask, Autoneg);
960
961 switch (state->interface) {
developer089e8852022-09-28 14:43:46 +0800962 case PHY_INTERFACE_MODE_USXGMII:
963 case PHY_INTERFACE_MODE_10GKR:
964 phylink_set(mask, 10000baseKR_Full);
965 phylink_set(mask, 10000baseT_Full);
966 phylink_set(mask, 10000baseCR_Full);
967 phylink_set(mask, 10000baseSR_Full);
968 phylink_set(mask, 10000baseLR_Full);
969 phylink_set(mask, 10000baseLRM_Full);
970 phylink_set(mask, 10000baseER_Full);
971 phylink_set(mask, 100baseT_Half);
972 phylink_set(mask, 100baseT_Full);
973 phylink_set(mask, 1000baseT_Half);
974 phylink_set(mask, 1000baseT_Full);
975 phylink_set(mask, 1000baseX_Full);
developerb88cdb02022-10-12 18:10:03 +0800976 phylink_set(mask, 2500baseT_Full);
977 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +0800978 break;
developerfd40db22021-04-29 10:08:25 +0800979 case PHY_INTERFACE_MODE_TRGMII:
980 phylink_set(mask, 1000baseT_Full);
981 break;
developer30e13e72022-11-03 10:21:24 +0800982 case PHY_INTERFACE_MODE_XGMII:
983 /* fall through */
developerfd40db22021-04-29 10:08:25 +0800984 case PHY_INTERFACE_MODE_1000BASEX:
developerfd40db22021-04-29 10:08:25 +0800985 phylink_set(mask, 1000baseX_Full);
developer089e8852022-09-28 14:43:46 +0800986 /* fall through; */
987 case PHY_INTERFACE_MODE_2500BASEX:
developerfd40db22021-04-29 10:08:25 +0800988 phylink_set(mask, 2500baseX_Full);
developer2fbee452022-08-12 13:58:20 +0800989 phylink_set(mask, 2500baseT_Full);
990 /* fall through; */
developerfd40db22021-04-29 10:08:25 +0800991 case PHY_INTERFACE_MODE_GMII:
992 case PHY_INTERFACE_MODE_RGMII:
993 case PHY_INTERFACE_MODE_RGMII_ID:
994 case PHY_INTERFACE_MODE_RGMII_RXID:
995 case PHY_INTERFACE_MODE_RGMII_TXID:
996 phylink_set(mask, 1000baseT_Half);
997 /* fall through */
998 case PHY_INTERFACE_MODE_SGMII:
999 phylink_set(mask, 1000baseT_Full);
1000 phylink_set(mask, 1000baseX_Full);
1001 /* fall through */
1002 case PHY_INTERFACE_MODE_MII:
1003 case PHY_INTERFACE_MODE_RMII:
1004 case PHY_INTERFACE_MODE_REVMII:
1005 case PHY_INTERFACE_MODE_NA:
1006 default:
1007 phylink_set(mask, 10baseT_Half);
1008 phylink_set(mask, 10baseT_Full);
1009 phylink_set(mask, 100baseT_Half);
1010 phylink_set(mask, 100baseT_Full);
1011 break;
1012 }
1013
1014 if (state->interface == PHY_INTERFACE_MODE_NA) {
developer089e8852022-09-28 14:43:46 +08001015
1016 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII)) {
1017 phylink_set(mask, 10000baseKR_Full);
developerc9bd9ae2022-12-23 16:54:36 +08001018 phylink_set(mask, 10000baseT_Full);
developer089e8852022-09-28 14:43:46 +08001019 phylink_set(mask, 10000baseSR_Full);
1020 phylink_set(mask, 10000baseLR_Full);
1021 phylink_set(mask, 10000baseLRM_Full);
1022 phylink_set(mask, 10000baseER_Full);
1023 phylink_set(mask, 1000baseKX_Full);
1024 phylink_set(mask, 1000baseT_Full);
1025 phylink_set(mask, 1000baseX_Full);
1026 phylink_set(mask, 2500baseX_Full);
developercfa104b2023-01-11 17:40:41 +08001027 phylink_set(mask, 2500baseT_Full);
1028 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +08001029 }
developerfd40db22021-04-29 10:08:25 +08001030 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
1031 phylink_set(mask, 1000baseT_Full);
1032 phylink_set(mask, 1000baseX_Full);
1033 phylink_set(mask, 2500baseX_Full);
1034 }
1035 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
1036 phylink_set(mask, 1000baseT_Full);
1037 phylink_set(mask, 1000baseT_Half);
1038 phylink_set(mask, 1000baseX_Full);
1039 }
1040 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
1041 phylink_set(mask, 1000baseT_Full);
1042 phylink_set(mask, 1000baseT_Half);
1043 }
1044 }
1045
developer30e13e72022-11-03 10:21:24 +08001046 if (mac->type == MTK_XGDM_TYPE) {
1047 phylink_clear(mask, 10baseT_Half);
1048 phylink_clear(mask, 100baseT_Half);
1049 phylink_clear(mask, 1000baseT_Half);
1050 }
1051
developerfd40db22021-04-29 10:08:25 +08001052 phylink_set(mask, Pause);
1053 phylink_set(mask, Asym_Pause);
1054
1055 linkmode_and(supported, supported, mask);
1056 linkmode_and(state->advertising, state->advertising, mask);
1057
1058 /* We can only operate at 2500BaseX or 1000BaseX. If requested
1059 * to advertise both, only report advertising at 2500BaseX.
1060 */
1061 phylink_helper_basex_speed(state);
1062}
1063
1064static const struct phylink_mac_ops mtk_phylink_ops = {
1065 .validate = mtk_validate,
developer089e8852022-09-28 14:43:46 +08001066 .mac_link_state = mtk_mac_pcs_get_state,
developerfd40db22021-04-29 10:08:25 +08001067 .mac_an_restart = mtk_mac_an_restart,
1068 .mac_config = mtk_mac_config,
1069 .mac_link_down = mtk_mac_link_down,
1070 .mac_link_up = mtk_mac_link_up,
1071};
1072
1073static int mtk_mdio_init(struct mtk_eth *eth)
1074{
1075 struct device_node *mii_np;
developerc8acd8d2022-11-10 09:07:10 +08001076 int clk = 25000000, max_clk = 2500000, divider = 1;
developerfd40db22021-04-29 10:08:25 +08001077 int ret;
developerc8acd8d2022-11-10 09:07:10 +08001078 u32 val;
developerfd40db22021-04-29 10:08:25 +08001079
1080 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
1081 if (!mii_np) {
1082 dev_err(eth->dev, "no %s child node found", "mdio-bus");
1083 return -ENODEV;
1084 }
1085
1086 if (!of_device_is_available(mii_np)) {
1087 ret = -ENODEV;
1088 goto err_put_node;
1089 }
1090
1091 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
1092 if (!eth->mii_bus) {
1093 ret = -ENOMEM;
1094 goto err_put_node;
1095 }
1096
1097 eth->mii_bus->name = "mdio";
1098 eth->mii_bus->read = mtk_mdio_read;
1099 eth->mii_bus->write = mtk_mdio_write;
developerabeadd52022-08-15 11:26:44 +08001100 eth->mii_bus->reset = mtk_mdio_reset;
developerfd40db22021-04-29 10:08:25 +08001101 eth->mii_bus->priv = eth;
1102 eth->mii_bus->parent = eth->dev;
1103
developer6fd46562021-10-14 15:04:34 +08001104 if(snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
developerfb556ca2021-10-13 10:52:09 +08001105 ret = -ENOMEM;
1106 goto err_put_node;
1107 }
developerc8acd8d2022-11-10 09:07:10 +08001108
1109 if (!of_property_read_u32(mii_np, "mdc-max-frequency", &val))
1110 max_clk = val;
1111
1112 while (clk / divider > max_clk) {
1113 if (divider >= 63)
1114 break;
1115
1116 divider++;
1117 };
1118
1119 /* Configure MDC Turbo Mode */
1120 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1121 val = mtk_r32(eth, MTK_MAC_MISC);
1122 val |= MISC_MDC_TURBO;
1123 mtk_w32(eth, val, MTK_MAC_MISC);
1124 } else {
1125 val = mtk_r32(eth, MTK_PPSC);
1126 val |= PPSC_MDC_TURBO;
1127 mtk_w32(eth, val, MTK_PPSC);
1128 }
1129
1130 /* Configure MDC Divider */
1131 val = mtk_r32(eth, MTK_PPSC);
1132 val &= ~PPSC_MDC_CFG;
1133 val |= FIELD_PREP(PPSC_MDC_CFG, divider);
1134 mtk_w32(eth, val, MTK_PPSC);
1135
1136 dev_info(eth->dev, "MDC is running on %d Hz\n", clk / divider);
1137
developerfd40db22021-04-29 10:08:25 +08001138 ret = of_mdiobus_register(eth->mii_bus, mii_np);
1139
1140err_put_node:
1141 of_node_put(mii_np);
1142 return ret;
1143}
1144
1145static void mtk_mdio_cleanup(struct mtk_eth *eth)
1146{
1147 if (!eth->mii_bus)
1148 return;
1149
1150 mdiobus_unregister(eth->mii_bus);
1151}
1152
1153static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
1154{
1155 unsigned long flags;
1156 u32 val;
1157
1158 spin_lock_irqsave(&eth->tx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001159 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1160 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
developerfd40db22021-04-29 10:08:25 +08001161 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1162}
1163
1164static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
1165{
1166 unsigned long flags;
1167 u32 val;
1168
1169 spin_lock_irqsave(&eth->tx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001170 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1171 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
developerfd40db22021-04-29 10:08:25 +08001172 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1173}
1174
1175static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
1176{
1177 unsigned long flags;
1178 u32 val;
1179
1180 spin_lock_irqsave(&eth->rx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001181 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1182 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
developerfd40db22021-04-29 10:08:25 +08001183 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1184}
1185
1186static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
1187{
1188 unsigned long flags;
1189 u32 val;
1190
1191 spin_lock_irqsave(&eth->rx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001192 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1193 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
developerfd40db22021-04-29 10:08:25 +08001194 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1195}
1196
1197static int mtk_set_mac_address(struct net_device *dev, void *p)
1198{
1199 int ret = eth_mac_addr(dev, p);
1200 struct mtk_mac *mac = netdev_priv(dev);
1201 struct mtk_eth *eth = mac->hw;
1202 const char *macaddr = dev->dev_addr;
1203
1204 if (ret)
1205 return ret;
1206
1207 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
1208 return -EBUSY;
1209
1210 spin_lock_bh(&mac->hw->page_lock);
1211 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1212 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1213 MT7628_SDM_MAC_ADRH);
1214 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1215 (macaddr[4] << 8) | macaddr[5],
1216 MT7628_SDM_MAC_ADRL);
1217 } else {
1218 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1219 MTK_GDMA_MAC_ADRH(mac->id));
1220 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1221 (macaddr[4] << 8) | macaddr[5],
1222 MTK_GDMA_MAC_ADRL(mac->id));
1223 }
1224 spin_unlock_bh(&mac->hw->page_lock);
1225
1226 return 0;
1227}
1228
1229void mtk_stats_update_mac(struct mtk_mac *mac)
1230{
developer089e8852022-09-28 14:43:46 +08001231 struct mtk_eth *eth = mac->hw;
developer68ce74f2023-01-03 16:11:57 +08001232 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08001233 struct mtk_hw_stats *hw_stats = mac->hw_stats;
developer68ce74f2023-01-03 16:11:57 +08001234 unsigned int offs = hw_stats->reg_offset;
developerfd40db22021-04-29 10:08:25 +08001235 u64 stats;
1236
developerfd40db22021-04-29 10:08:25 +08001237 u64_stats_update_begin(&hw_stats->syncp);
1238
developer68ce74f2023-01-03 16:11:57 +08001239 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
1240 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
developerfd40db22021-04-29 10:08:25 +08001241 if (stats)
1242 hw_stats->rx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001243 hw_stats->rx_packets +=
1244 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x08 + offs);
1245 hw_stats->rx_overflow +=
1246 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1247 hw_stats->rx_fcs_errors +=
1248 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1249 hw_stats->rx_short_errors +=
1250 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1251 hw_stats->rx_long_errors +=
1252 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1253 hw_stats->rx_checksum_errors +=
1254 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
developerfd40db22021-04-29 10:08:25 +08001255 hw_stats->rx_flow_control_packets +=
developer68ce74f2023-01-03 16:11:57 +08001256 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
developer089e8852022-09-28 14:43:46 +08001257
1258 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer68ce74f2023-01-03 16:11:57 +08001259 hw_stats->tx_skip +=
1260 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1261 hw_stats->tx_collisions +=
1262 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1263 hw_stats->tx_bytes +=
1264 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1265 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
developer089e8852022-09-28 14:43:46 +08001266 if (stats)
1267 hw_stats->tx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001268 hw_stats->tx_packets +=
1269 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
developer089e8852022-09-28 14:43:46 +08001270 } else {
developer68ce74f2023-01-03 16:11:57 +08001271 hw_stats->tx_skip +=
1272 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1273 hw_stats->tx_collisions +=
1274 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1275 hw_stats->tx_bytes +=
1276 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1277 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
developer089e8852022-09-28 14:43:46 +08001278 if (stats)
1279 hw_stats->tx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001280 hw_stats->tx_packets +=
1281 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
developer089e8852022-09-28 14:43:46 +08001282 }
developer68ce74f2023-01-03 16:11:57 +08001283
1284 u64_stats_update_end(&hw_stats->syncp);
developerfd40db22021-04-29 10:08:25 +08001285}
1286
1287static void mtk_stats_update(struct mtk_eth *eth)
1288{
1289 int i;
1290
1291 for (i = 0; i < MTK_MAC_COUNT; i++) {
1292 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1293 continue;
1294 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1295 mtk_stats_update_mac(eth->mac[i]);
1296 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1297 }
1298 }
1299}
1300
1301static void mtk_get_stats64(struct net_device *dev,
1302 struct rtnl_link_stats64 *storage)
1303{
1304 struct mtk_mac *mac = netdev_priv(dev);
1305 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1306 unsigned int start;
1307
1308 if (netif_running(dev) && netif_device_present(dev)) {
1309 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1310 mtk_stats_update_mac(mac);
1311 spin_unlock_bh(&hw_stats->stats_lock);
1312 }
1313 }
1314
1315 do {
1316 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
1317 storage->rx_packets = hw_stats->rx_packets;
1318 storage->tx_packets = hw_stats->tx_packets;
1319 storage->rx_bytes = hw_stats->rx_bytes;
1320 storage->tx_bytes = hw_stats->tx_bytes;
1321 storage->collisions = hw_stats->tx_collisions;
1322 storage->rx_length_errors = hw_stats->rx_short_errors +
1323 hw_stats->rx_long_errors;
1324 storage->rx_over_errors = hw_stats->rx_overflow;
1325 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1326 storage->rx_errors = hw_stats->rx_checksum_errors;
1327 storage->tx_aborted_errors = hw_stats->tx_skip;
1328 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
1329
1330 storage->tx_errors = dev->stats.tx_errors;
1331 storage->rx_dropped = dev->stats.rx_dropped;
1332 storage->tx_dropped = dev->stats.tx_dropped;
1333}
1334
1335static inline int mtk_max_frag_size(int mtu)
1336{
1337 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1338 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
1339 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
1340
1341 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1342 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1343}
1344
1345static inline int mtk_max_buf_size(int frag_size)
1346{
1347 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1348 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1349
1350 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
1351
1352 return buf_size;
1353}
1354
developere9356982022-07-04 09:03:20 +08001355static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1356 struct mtk_rx_dma_v2 *dma_rxd)
developerfd40db22021-04-29 10:08:25 +08001357{
developerfd40db22021-04-29 10:08:25 +08001358 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +08001359 if (!(rxd->rxd2 & RX_DMA_DONE))
1360 return false;
1361
1362 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +08001363 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1364 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developere9356982022-07-04 09:03:20 +08001365
developer089e8852022-09-28 14:43:46 +08001366 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1367 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001368 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1369 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
developer006325c2022-10-06 16:39:50 +08001370 rxd->rxd7 = READ_ONCE(dma_rxd->rxd7);
developere9356982022-07-04 09:03:20 +08001371 }
1372
developerc4671b22021-05-28 13:16:42 +08001373 return true;
developerfd40db22021-04-29 10:08:25 +08001374}
1375
1376/* the qdma core needs scratch memory to be setup */
1377static int mtk_init_fq_dma(struct mtk_eth *eth)
1378{
developere9356982022-07-04 09:03:20 +08001379 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001380 dma_addr_t phy_ring_tail;
1381 int cnt = MTK_DMA_SIZE;
1382 dma_addr_t dma_addr;
1383 int i;
1384
1385 if (!eth->soc->has_sram) {
developer3f28d382023-03-07 16:06:30 +08001386 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08001387 cnt * soc->txrx.txd_size,
developerfd40db22021-04-29 10:08:25 +08001388 &eth->phy_scratch_ring,
developere9356982022-07-04 09:03:20 +08001389 GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001390 } else {
developer089e8852022-09-28 14:43:46 +08001391 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1392 eth->scratch_ring = eth->sram_base;
1393 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1394 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
developerfd40db22021-04-29 10:08:25 +08001395 }
1396
1397 if (unlikely(!eth->scratch_ring))
1398 return -ENOMEM;
1399
developere9356982022-07-04 09:03:20 +08001400 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001401 if (unlikely(!eth->scratch_head))
1402 return -ENOMEM;
1403
developer3f28d382023-03-07 16:06:30 +08001404 dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001405 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1406 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001407 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
developerfd40db22021-04-29 10:08:25 +08001408 return -ENOMEM;
1409
developer8b6f2402022-11-28 13:42:34 +08001410 phy_ring_tail = eth->phy_scratch_ring +
1411 (dma_addr_t)soc->txrx.txd_size * (cnt - 1);
developerfd40db22021-04-29 10:08:25 +08001412
1413 for (i = 0; i < cnt; i++) {
developere9356982022-07-04 09:03:20 +08001414 struct mtk_tx_dma_v2 *txd;
1415
1416 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1417 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerfd40db22021-04-29 10:08:25 +08001418 if (i < cnt - 1)
developere9356982022-07-04 09:03:20 +08001419 txd->txd2 = eth->phy_scratch_ring +
1420 (i + 1) * soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001421
developere9356982022-07-04 09:03:20 +08001422 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1423 txd->txd4 = 0;
1424
developer089e8852022-09-28 14:43:46 +08001425 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1426 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001427 txd->txd5 = 0;
1428 txd->txd6 = 0;
1429 txd->txd7 = 0;
1430 txd->txd8 = 0;
developerfd40db22021-04-29 10:08:25 +08001431 }
developerfd40db22021-04-29 10:08:25 +08001432 }
1433
developer68ce74f2023-01-03 16:11:57 +08001434 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1435 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1436 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1437 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
developerfd40db22021-04-29 10:08:25 +08001438
1439 return 0;
1440}
1441
1442static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1443{
developere9356982022-07-04 09:03:20 +08001444 return ring->dma + (desc - ring->phys);
developerfd40db22021-04-29 10:08:25 +08001445}
1446
1447static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001448 void *txd, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001449{
developere9356982022-07-04 09:03:20 +08001450 int idx = (txd - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001451
1452 return &ring->buf[idx];
1453}
1454
1455static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001456 void *dma)
developerfd40db22021-04-29 10:08:25 +08001457{
1458 return ring->dma_pdma - ring->dma + dma;
1459}
1460
developere9356982022-07-04 09:03:20 +08001461static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001462{
developere9356982022-07-04 09:03:20 +08001463 return (dma - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001464}
1465
developerc4671b22021-05-28 13:16:42 +08001466static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1467 bool napi)
developerfd40db22021-04-29 10:08:25 +08001468{
1469 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1470 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
developer3f28d382023-03-07 16:06:30 +08001471 dma_unmap_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001472 dma_unmap_addr(tx_buf, dma_addr0),
1473 dma_unmap_len(tx_buf, dma_len0),
1474 DMA_TO_DEVICE);
1475 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
developer3f28d382023-03-07 16:06:30 +08001476 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001477 dma_unmap_addr(tx_buf, dma_addr0),
1478 dma_unmap_len(tx_buf, dma_len0),
1479 DMA_TO_DEVICE);
1480 }
1481 } else {
1482 if (dma_unmap_len(tx_buf, dma_len0)) {
developer3f28d382023-03-07 16:06:30 +08001483 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001484 dma_unmap_addr(tx_buf, dma_addr0),
1485 dma_unmap_len(tx_buf, dma_len0),
1486 DMA_TO_DEVICE);
1487 }
1488
1489 if (dma_unmap_len(tx_buf, dma_len1)) {
developer3f28d382023-03-07 16:06:30 +08001490 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001491 dma_unmap_addr(tx_buf, dma_addr1),
1492 dma_unmap_len(tx_buf, dma_len1),
1493 DMA_TO_DEVICE);
1494 }
1495 }
1496
1497 tx_buf->flags = 0;
1498 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +08001499 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
1500 if (napi)
1501 napi_consume_skb(tx_buf->skb, napi);
1502 else
1503 dev_kfree_skb_any(tx_buf->skb);
1504 }
developerfd40db22021-04-29 10:08:25 +08001505 tx_buf->skb = NULL;
1506}
1507
1508static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1509 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1510 size_t size, int idx)
1511{
1512 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1513 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1514 dma_unmap_len_set(tx_buf, dma_len0, size);
1515 } else {
1516 if (idx & 1) {
1517 txd->txd3 = mapped_addr;
1518 txd->txd2 |= TX_DMA_PLEN1(size);
1519 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1520 dma_unmap_len_set(tx_buf, dma_len1, size);
1521 } else {
1522 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1523 txd->txd1 = mapped_addr;
1524 txd->txd2 = TX_DMA_PLEN0(size);
1525 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1526 dma_unmap_len_set(tx_buf, dma_len0, size);
1527 }
1528 }
1529}
1530
developere9356982022-07-04 09:03:20 +08001531static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
1532 struct mtk_tx_dma_desc_info *info)
1533{
1534 struct mtk_mac *mac = netdev_priv(dev);
1535 struct mtk_eth *eth = mac->hw;
1536 struct mtk_tx_dma *desc = txd;
1537 u32 data;
1538
1539 WRITE_ONCE(desc->txd1, info->addr);
1540
1541 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1542 if (info->last)
1543 data |= TX_DMA_LS0;
1544 WRITE_ONCE(desc->txd3, data);
1545
1546 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1547 data |= QID_HIGH_BITS(info->qid);
1548 if (info->first) {
1549 if (info->gso)
1550 data |= TX_DMA_TSO;
1551 /* tx checksum offload */
1552 if (info->csum)
1553 data |= TX_DMA_CHKSUM;
1554 /* vlan header offload */
1555 if (info->vlan)
1556 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1557 }
1558
1559#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1560 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1561 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1562 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1563 }
1564
1565 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1566 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1567#endif
1568 WRITE_ONCE(desc->txd4, data);
1569}
1570
1571static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1572 struct mtk_tx_dma_desc_info *info)
1573{
1574 struct mtk_mac *mac = netdev_priv(dev);
1575 struct mtk_eth *eth = mac->hw;
1576 struct mtk_tx_dma_v2 *desc = txd;
developerce08bca2022-10-06 16:21:13 +08001577 u32 data = 0;
1578
1579 if (!info->qid && mac->id)
1580 info->qid = MTK_QDMA_GMAC2_QID;
1581
1582 WRITE_ONCE(desc->txd1, info->addr);
1583
1584 data = TX_DMA_PLEN0(info->size);
1585 if (info->last)
1586 data |= TX_DMA_LS0;
1587 WRITE_ONCE(desc->txd3, data);
1588
1589 data = ((mac->id == MTK_GMAC3_ID) ?
1590 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1591 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1592#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1593 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1594 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1595 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1596 }
1597
1598 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1599 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1600#endif
1601 WRITE_ONCE(desc->txd4, data);
1602
1603 data = 0;
1604 if (info->first) {
1605 if (info->gso)
1606 data |= TX_DMA_TSO_V2;
1607 /* tx checksum offload */
1608 if (info->csum)
1609 data |= TX_DMA_CHKSUM_V2;
1610 }
1611 WRITE_ONCE(desc->txd5, data);
1612
1613 data = 0;
1614 if (info->first && info->vlan)
1615 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1616 WRITE_ONCE(desc->txd6, data);
1617
1618 WRITE_ONCE(desc->txd7, 0);
1619 WRITE_ONCE(desc->txd8, 0);
1620}
1621
1622static void mtk_tx_set_dma_desc_v3(struct sk_buff *skb, struct net_device *dev, void *txd,
1623 struct mtk_tx_dma_desc_info *info)
1624{
1625 struct mtk_mac *mac = netdev_priv(dev);
1626 struct mtk_eth *eth = mac->hw;
1627 struct mtk_tx_dma_v2 *desc = txd;
developer089e8852022-09-28 14:43:46 +08001628 u64 addr64 = 0;
developere9356982022-07-04 09:03:20 +08001629 u32 data = 0;
developere9356982022-07-04 09:03:20 +08001630
developerce08bca2022-10-06 16:21:13 +08001631 if (!info->qid && mac->id)
developerb9463012022-09-14 10:28:45 +08001632 info->qid = MTK_QDMA_GMAC2_QID;
developere9356982022-07-04 09:03:20 +08001633
developer089e8852022-09-28 14:43:46 +08001634 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1635 TX_DMA_SDP1(info->addr) : 0;
1636
developere9356982022-07-04 09:03:20 +08001637 WRITE_ONCE(desc->txd1, info->addr);
1638
1639 data = TX_DMA_PLEN0(info->size);
1640 if (info->last)
1641 data |= TX_DMA_LS0;
developer089e8852022-09-28 14:43:46 +08001642 WRITE_ONCE(desc->txd3, data | addr64);
developere9356982022-07-04 09:03:20 +08001643
developer089e8852022-09-28 14:43:46 +08001644 data = ((mac->id == MTK_GMAC3_ID) ?
1645 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
developerb9463012022-09-14 10:28:45 +08001646 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
developere9356982022-07-04 09:03:20 +08001647#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1648 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1649 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1650 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1651 }
1652
1653 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1654 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1655#endif
1656 WRITE_ONCE(desc->txd4, data);
1657
1658 data = 0;
1659 if (info->first) {
1660 if (info->gso)
1661 data |= TX_DMA_TSO_V2;
1662 /* tx checksum offload */
1663 if (info->csum)
1664 data |= TX_DMA_CHKSUM_V2;
developerce08bca2022-10-06 16:21:13 +08001665
1666 if (netdev_uses_dsa(dev))
1667 data |= TX_DMA_SPTAG_V3;
developere9356982022-07-04 09:03:20 +08001668 }
1669 WRITE_ONCE(desc->txd5, data);
1670
1671 data = 0;
1672 if (info->first && info->vlan)
1673 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1674 WRITE_ONCE(desc->txd6, data);
1675
1676 WRITE_ONCE(desc->txd7, 0);
1677 WRITE_ONCE(desc->txd8, 0);
1678}
1679
1680static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
1681 struct mtk_tx_dma_desc_info *info)
1682{
1683 struct mtk_mac *mac = netdev_priv(dev);
1684 struct mtk_eth *eth = mac->hw;
1685
developerce08bca2022-10-06 16:21:13 +08001686 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1687 mtk_tx_set_dma_desc_v3(skb, dev, txd, info);
1688 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developere9356982022-07-04 09:03:20 +08001689 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
1690 else
1691 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
1692}
1693
developerfd40db22021-04-29 10:08:25 +08001694static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1695 int tx_num, struct mtk_tx_ring *ring, bool gso)
1696{
developere9356982022-07-04 09:03:20 +08001697 struct mtk_tx_dma_desc_info txd_info = {
1698 .size = skb_headlen(skb),
1699 .qid = skb->mark & MTK_QDMA_TX_MASK,
1700 .gso = gso,
1701 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1702 .vlan = skb_vlan_tag_present(skb),
1703 .vlan_tci = skb_vlan_tag_get(skb),
1704 .first = true,
1705 .last = !skb_is_nonlinear(skb),
1706 };
developerfd40db22021-04-29 10:08:25 +08001707 struct mtk_mac *mac = netdev_priv(dev);
1708 struct mtk_eth *eth = mac->hw;
developere9356982022-07-04 09:03:20 +08001709 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001710 struct mtk_tx_dma *itxd, *txd;
1711 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1712 struct mtk_tx_buf *itx_buf, *tx_buf;
developerfd40db22021-04-29 10:08:25 +08001713 int i, n_desc = 1;
developerfd40db22021-04-29 10:08:25 +08001714 int k = 0;
1715
developerb3a9e7b2023-02-08 15:18:10 +08001716 if (skb->len < 32) {
1717 if (skb_put_padto(skb, MTK_MIN_TX_LENGTH))
1718 return -ENOMEM;
1719
1720 txd_info.size = skb_headlen(skb);
1721 }
1722
developerfd40db22021-04-29 10:08:25 +08001723 itxd = ring->next_free;
1724 itxd_pdma = qdma_to_pdma(ring, itxd);
1725 if (itxd == ring->last_free)
1726 return -ENOMEM;
1727
developere9356982022-07-04 09:03:20 +08001728 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001729 memset(itx_buf, 0, sizeof(*itx_buf));
1730
developer3f28d382023-03-07 16:06:30 +08001731 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
developere9356982022-07-04 09:03:20 +08001732 DMA_TO_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001733 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
developerfd40db22021-04-29 10:08:25 +08001734 return -ENOMEM;
1735
developere9356982022-07-04 09:03:20 +08001736 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
1737
developerfd40db22021-04-29 10:08:25 +08001738 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
developer089e8852022-09-28 14:43:46 +08001739 itx_buf->flags |= (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1740 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1741 MTK_TX_FLAGS_FPORT2;
developere9356982022-07-04 09:03:20 +08001742 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerfd40db22021-04-29 10:08:25 +08001743 k++);
1744
developerfd40db22021-04-29 10:08:25 +08001745 /* TX SG offload */
1746 txd = itxd;
1747 txd_pdma = qdma_to_pdma(ring, txd);
1748
developere9356982022-07-04 09:03:20 +08001749 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerfd40db22021-04-29 10:08:25 +08001750 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1751 unsigned int offset = 0;
1752 int frag_size = skb_frag_size(frag);
1753
1754 while (frag_size) {
developerfd40db22021-04-29 10:08:25 +08001755 bool new_desc = true;
1756
developere9356982022-07-04 09:03:20 +08001757 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerfd40db22021-04-29 10:08:25 +08001758 (i & 0x1)) {
1759 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1760 txd_pdma = qdma_to_pdma(ring, txd);
1761 if (txd == ring->last_free)
1762 goto err_dma;
1763
1764 n_desc++;
1765 } else {
1766 new_desc = false;
1767 }
1768
developere9356982022-07-04 09:03:20 +08001769 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1770 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1771 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1772 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1773 !(frag_size - txd_info.size);
developer3f28d382023-03-07 16:06:30 +08001774 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
developere9356982022-07-04 09:03:20 +08001775 offset, txd_info.size,
1776 DMA_TO_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001777 if (unlikely(dma_mapping_error(eth->dma_dev,
1778 txd_info.addr)))
developere9356982022-07-04 09:03:20 +08001779 goto err_dma;
developerfd40db22021-04-29 10:08:25 +08001780
developere9356982022-07-04 09:03:20 +08001781 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerfd40db22021-04-29 10:08:25 +08001782
developere9356982022-07-04 09:03:20 +08001783 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001784 if (new_desc)
1785 memset(tx_buf, 0, sizeof(*tx_buf));
1786 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1787 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
developer089e8852022-09-28 14:43:46 +08001788 tx_buf->flags |=
1789 (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1790 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1791 MTK_TX_FLAGS_FPORT2;
developerfd40db22021-04-29 10:08:25 +08001792
developere9356982022-07-04 09:03:20 +08001793 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1794 txd_info.size, k++);
developerfd40db22021-04-29 10:08:25 +08001795
developere9356982022-07-04 09:03:20 +08001796 frag_size -= txd_info.size;
1797 offset += txd_info.size;
developerfd40db22021-04-29 10:08:25 +08001798 }
1799 }
1800
1801 /* store skb to cleanup */
1802 itx_buf->skb = skb;
1803
developere9356982022-07-04 09:03:20 +08001804 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001805 if (k & 0x1)
1806 txd_pdma->txd2 |= TX_DMA_LS0;
1807 else
1808 txd_pdma->txd2 |= TX_DMA_LS1;
1809 }
1810
1811 netdev_sent_queue(dev, skb->len);
1812 skb_tx_timestamp(skb);
1813
1814 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1815 atomic_sub(n_desc, &ring->free_count);
1816
1817 /* make sure that all changes to the dma ring are flushed before we
1818 * continue
1819 */
1820 wmb();
1821
developere9356982022-07-04 09:03:20 +08001822 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001823 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1824 !netdev_xmit_more())
developer68ce74f2023-01-03 16:11:57 +08001825 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
developerfd40db22021-04-29 10:08:25 +08001826 } else {
developere9356982022-07-04 09:03:20 +08001827 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
developerfd40db22021-04-29 10:08:25 +08001828 ring->dma_size);
1829 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1830 }
1831
1832 return 0;
1833
1834err_dma:
1835 do {
developere9356982022-07-04 09:03:20 +08001836 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001837
1838 /* unmap dma */
developerc4671b22021-05-28 13:16:42 +08001839 mtk_tx_unmap(eth, tx_buf, false);
developerfd40db22021-04-29 10:08:25 +08001840
1841 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
developere9356982022-07-04 09:03:20 +08001842 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
developerfd40db22021-04-29 10:08:25 +08001843 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1844
1845 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1846 itxd_pdma = qdma_to_pdma(ring, itxd);
1847 } while (itxd != txd);
1848
1849 return -ENOMEM;
1850}
1851
1852static inline int mtk_cal_txd_req(struct sk_buff *skb)
1853{
1854 int i, nfrags;
1855 skb_frag_t *frag;
1856
1857 nfrags = 1;
1858 if (skb_is_gso(skb)) {
1859 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1860 frag = &skb_shinfo(skb)->frags[i];
1861 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1862 MTK_TX_DMA_BUF_LEN);
1863 }
1864 } else {
1865 nfrags += skb_shinfo(skb)->nr_frags;
1866 }
1867
1868 return nfrags;
1869}
1870
1871static int mtk_queue_stopped(struct mtk_eth *eth)
1872{
1873 int i;
1874
1875 for (i = 0; i < MTK_MAC_COUNT; i++) {
1876 if (!eth->netdev[i])
1877 continue;
1878 if (netif_queue_stopped(eth->netdev[i]))
1879 return 1;
1880 }
1881
1882 return 0;
1883}
1884
1885static void mtk_wake_queue(struct mtk_eth *eth)
1886{
1887 int i;
1888
1889 for (i = 0; i < MTK_MAC_COUNT; i++) {
1890 if (!eth->netdev[i])
1891 continue;
1892 netif_wake_queue(eth->netdev[i]);
1893 }
1894}
1895
1896static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1897{
1898 struct mtk_mac *mac = netdev_priv(dev);
1899 struct mtk_eth *eth = mac->hw;
1900 struct mtk_tx_ring *ring = &eth->tx_ring;
1901 struct net_device_stats *stats = &dev->stats;
1902 bool gso = false;
1903 int tx_num;
1904
1905 /* normally we can rely on the stack not calling this more than once,
1906 * however we have 2 queues running on the same ring so we need to lock
1907 * the ring access
1908 */
1909 spin_lock(&eth->page_lock);
1910
1911 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1912 goto drop;
1913
1914 tx_num = mtk_cal_txd_req(skb);
1915 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1916 netif_stop_queue(dev);
1917 netif_err(eth, tx_queued, dev,
1918 "Tx Ring full when queue awake!\n");
1919 spin_unlock(&eth->page_lock);
1920 return NETDEV_TX_BUSY;
1921 }
1922
1923 /* TSO: fill MSS info in tcp checksum field */
1924 if (skb_is_gso(skb)) {
1925 if (skb_cow_head(skb, 0)) {
1926 netif_warn(eth, tx_err, dev,
1927 "GSO expand head fail.\n");
1928 goto drop;
1929 }
1930
1931 if (skb_shinfo(skb)->gso_type &
1932 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1933 gso = true;
1934 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1935 }
1936 }
1937
1938 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1939 goto drop;
1940
1941 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1942 netif_stop_queue(dev);
1943
1944 spin_unlock(&eth->page_lock);
1945
1946 return NETDEV_TX_OK;
1947
1948drop:
1949 spin_unlock(&eth->page_lock);
1950 stats->tx_dropped++;
1951 dev_kfree_skb_any(skb);
1952 return NETDEV_TX_OK;
1953}
1954
1955static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1956{
1957 int i;
1958 struct mtk_rx_ring *ring;
1959 int idx;
1960
developerfd40db22021-04-29 10:08:25 +08001961 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
developere9356982022-07-04 09:03:20 +08001962 struct mtk_rx_dma *rxd;
1963
developer77d03a72021-06-06 00:06:00 +08001964 if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i))
1965 continue;
1966
developerfd40db22021-04-29 10:08:25 +08001967 ring = &eth->rx_ring[i];
1968 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08001969 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1970 if (rxd->rxd2 & RX_DMA_DONE) {
developerfd40db22021-04-29 10:08:25 +08001971 ring->calc_idx_update = true;
1972 return ring;
1973 }
1974 }
1975
1976 return NULL;
1977}
1978
developer18f46a82021-07-20 21:08:21 +08001979static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
developerfd40db22021-04-29 10:08:25 +08001980{
developerfd40db22021-04-29 10:08:25 +08001981 int i;
1982
developerfb556ca2021-10-13 10:52:09 +08001983 if (!eth->hwlro)
developerfd40db22021-04-29 10:08:25 +08001984 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerfb556ca2021-10-13 10:52:09 +08001985 else {
developerfd40db22021-04-29 10:08:25 +08001986 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1987 ring = &eth->rx_ring[i];
1988 if (ring->calc_idx_update) {
1989 ring->calc_idx_update = false;
1990 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1991 }
1992 }
1993 }
1994}
1995
1996static int mtk_poll_rx(struct napi_struct *napi, int budget,
1997 struct mtk_eth *eth)
1998{
developer18f46a82021-07-20 21:08:21 +08001999 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
2000 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002001 int idx;
2002 struct sk_buff *skb;
developer089e8852022-09-28 14:43:46 +08002003 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002004 u8 *data, *new_data;
developere9356982022-07-04 09:03:20 +08002005 struct mtk_rx_dma_v2 *rxd, trxd;
developerfd40db22021-04-29 10:08:25 +08002006 int done = 0;
2007
developer18f46a82021-07-20 21:08:21 +08002008 if (unlikely(!ring))
2009 goto rx_done;
2010
developerfd40db22021-04-29 10:08:25 +08002011 while (done < budget) {
developer68ce74f2023-01-03 16:11:57 +08002012 unsigned int pktlen, *rxdcsum;
developer006325c2022-10-06 16:39:50 +08002013 struct net_device *netdev = NULL;
developer8b6f2402022-11-28 13:42:34 +08002014 dma_addr_t dma_addr = 0;
developere9356982022-07-04 09:03:20 +08002015 int mac = 0;
developerfd40db22021-04-29 10:08:25 +08002016
developer18f46a82021-07-20 21:08:21 +08002017 if (eth->hwlro)
2018 ring = mtk_get_rx_ring(eth);
2019
developerfd40db22021-04-29 10:08:25 +08002020 if (unlikely(!ring))
2021 goto rx_done;
2022
2023 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developere9356982022-07-04 09:03:20 +08002024 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
developerfd40db22021-04-29 10:08:25 +08002025 data = ring->data[idx];
2026
developere9356982022-07-04 09:03:20 +08002027 if (!mtk_rx_get_desc(eth, &trxd, rxd))
developerfd40db22021-04-29 10:08:25 +08002028 break;
2029
2030 /* find out which mac the packet come from. values start at 1 */
2031 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2032 mac = 0;
2033 } else {
developer089e8852022-09-28 14:43:46 +08002034 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2035 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
2036 switch (RX_DMA_GET_SPORT_V2(trxd.rxd5)) {
2037 case PSE_GDM1_PORT:
2038 case PSE_GDM2_PORT:
2039 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
2040 break;
2041 case PSE_GDM3_PORT:
2042 mac = MTK_GMAC3_ID;
2043 break;
2044 }
2045 } else
developerfd40db22021-04-29 10:08:25 +08002046 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
2047 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2048 }
2049
2050 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
2051 !eth->netdev[mac]))
2052 goto release_desc;
2053
2054 netdev = eth->netdev[mac];
2055
2056 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2057 goto release_desc;
2058
2059 /* alloc new buffer */
2060 new_data = napi_alloc_frag(ring->frag_size);
2061 if (unlikely(!new_data)) {
2062 netdev->stats.rx_dropped++;
2063 goto release_desc;
2064 }
developer3f28d382023-03-07 16:06:30 +08002065 dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08002066 new_data + NET_SKB_PAD +
2067 eth->ip_align,
2068 ring->buf_size,
2069 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08002070 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
developerfd40db22021-04-29 10:08:25 +08002071 skb_free_frag(new_data);
2072 netdev->stats.rx_dropped++;
2073 goto release_desc;
2074 }
2075
developer089e8852022-09-28 14:43:46 +08002076 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2077 ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
2078
developer3f28d382023-03-07 16:06:30 +08002079 dma_unmap_single(eth->dma_dev,
developer089e8852022-09-28 14:43:46 +08002080 (u64)(trxd.rxd1 | addr64),
developerc4671b22021-05-28 13:16:42 +08002081 ring->buf_size, DMA_FROM_DEVICE);
2082
developerfd40db22021-04-29 10:08:25 +08002083 /* receive data */
2084 skb = build_skb(data, ring->frag_size);
2085 if (unlikely(!skb)) {
developerc4671b22021-05-28 13:16:42 +08002086 skb_free_frag(data);
developerfd40db22021-04-29 10:08:25 +08002087 netdev->stats.rx_dropped++;
developerc4671b22021-05-28 13:16:42 +08002088 goto skip_rx;
developerfd40db22021-04-29 10:08:25 +08002089 }
2090 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2091
developerfd40db22021-04-29 10:08:25 +08002092 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2093 skb->dev = netdev;
2094 skb_put(skb, pktlen);
2095
developer68ce74f2023-01-03 16:11:57 +08002096 if ((MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ||
2097 (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)))
2098 rxdcsum = &trxd.rxd3;
2099 else
2100 rxdcsum = &trxd.rxd4;
2101
2102 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
developerfd40db22021-04-29 10:08:25 +08002103 skb->ip_summed = CHECKSUM_UNNECESSARY;
2104 else
2105 skb_checksum_none_assert(skb);
2106 skb->protocol = eth_type_trans(skb, netdev);
2107
2108 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developer089e8852022-09-28 14:43:46 +08002109 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2110 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer255bba22021-07-27 15:16:33 +08002111 if (trxd.rxd3 & RX_DMA_VTAG_V2)
developerfd40db22021-04-29 10:08:25 +08002112 __vlan_hwaccel_put_tag(skb,
developer255bba22021-07-27 15:16:33 +08002113 htons(RX_DMA_VPID_V2(trxd.rxd4)),
developerfd40db22021-04-29 10:08:25 +08002114 RX_DMA_VID_V2(trxd.rxd4));
2115 } else {
2116 if (trxd.rxd2 & RX_DMA_VTAG)
2117 __vlan_hwaccel_put_tag(skb,
2118 htons(RX_DMA_VPID(trxd.rxd3)),
2119 RX_DMA_VID(trxd.rxd3));
2120 }
2121
2122 /* If netdev is attached to dsa switch, the special
2123 * tag inserted in VLAN field by switch hardware can
2124 * be offload by RX HW VLAN offload. Clears the VLAN
2125 * information from @skb to avoid unexpected 8021d
2126 * handler before packet enter dsa framework.
2127 */
2128 if (netdev_uses_dsa(netdev))
2129 __vlan_hwaccel_clear_tag(skb);
2130 }
2131
2132#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developer089e8852022-09-28 14:43:46 +08002133 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2134 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +08002135 *(u32 *)(skb->head) = trxd.rxd5;
2136 else
developerfd40db22021-04-29 10:08:25 +08002137 *(u32 *)(skb->head) = trxd.rxd4;
2138
2139 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08002140 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08002141 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
2142
2143 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
2144 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
2145 __func__, skb_hnat_reason(skb));
2146 skb->pkt_type = PACKET_HOST;
2147 }
2148
2149 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
2150 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
2151 skb_hnat_reason(skb), skb_hnat_alg(skb));
2152#endif
developer77d03a72021-06-06 00:06:00 +08002153 if (mtk_hwlro_stats_ebl &&
2154 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
2155 hw_lro_stats_update(ring->ring_no, &trxd);
2156 hw_lro_flush_stats_update(ring->ring_no, &trxd);
2157 }
developerfd40db22021-04-29 10:08:25 +08002158
2159 skb_record_rx_queue(skb, 0);
2160 napi_gro_receive(napi, skb);
2161
developerc4671b22021-05-28 13:16:42 +08002162skip_rx:
developerfd40db22021-04-29 10:08:25 +08002163 ring->data[idx] = new_data;
2164 rxd->rxd1 = (unsigned int)dma_addr;
2165
2166release_desc:
developer089e8852022-09-28 14:43:46 +08002167 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2168 RX_DMA_SDP1(dma_addr) : 0;
2169
developerfd40db22021-04-29 10:08:25 +08002170 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2171 rxd->rxd2 = RX_DMA_LSO;
2172 else
developer089e8852022-09-28 14:43:46 +08002173 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08002174
2175 ring->calc_idx = idx;
2176
2177 done++;
2178 }
2179
2180rx_done:
2181 if (done) {
2182 /* make sure that all changes to the dma ring are flushed before
2183 * we continue
2184 */
2185 wmb();
developer18f46a82021-07-20 21:08:21 +08002186 mtk_update_rx_cpu_idx(eth, ring);
developerfd40db22021-04-29 10:08:25 +08002187 }
2188
2189 return done;
2190}
2191
developerfb556ca2021-10-13 10:52:09 +08002192static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08002193 unsigned int *done, unsigned int *bytes)
2194{
developer68ce74f2023-01-03 16:11:57 +08002195 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developere9356982022-07-04 09:03:20 +08002196 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002197 struct mtk_tx_ring *ring = &eth->tx_ring;
2198 struct mtk_tx_dma *desc;
2199 struct sk_buff *skb;
2200 struct mtk_tx_buf *tx_buf;
2201 u32 cpu, dma;
2202
developerc4671b22021-05-28 13:16:42 +08002203 cpu = ring->last_free_ptr;
developer68ce74f2023-01-03 16:11:57 +08002204 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
developerfd40db22021-04-29 10:08:25 +08002205
2206 desc = mtk_qdma_phys_to_virt(ring, cpu);
2207
2208 while ((cpu != dma) && budget) {
2209 u32 next_cpu = desc->txd2;
2210 int mac = 0;
2211
2212 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2213 break;
2214
2215 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2216
developere9356982022-07-04 09:03:20 +08002217 tx_buf = mtk_desc_to_tx_buf(ring, desc, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08002218 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
developer089e8852022-09-28 14:43:46 +08002219 mac = MTK_GMAC2_ID;
2220 else if (tx_buf->flags & MTK_TX_FLAGS_FPORT2)
2221 mac = MTK_GMAC3_ID;
developerfd40db22021-04-29 10:08:25 +08002222
2223 skb = tx_buf->skb;
2224 if (!skb)
2225 break;
2226
2227 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
2228 bytes[mac] += skb->len;
2229 done[mac]++;
2230 budget--;
2231 }
developerc4671b22021-05-28 13:16:42 +08002232 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08002233
2234 ring->last_free = desc;
2235 atomic_inc(&ring->free_count);
2236
2237 cpu = next_cpu;
2238 }
2239
developerc4671b22021-05-28 13:16:42 +08002240 ring->last_free_ptr = cpu;
developer68ce74f2023-01-03 16:11:57 +08002241 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
developerfd40db22021-04-29 10:08:25 +08002242}
2243
developerfb556ca2021-10-13 10:52:09 +08002244static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developerfd40db22021-04-29 10:08:25 +08002245 unsigned int *done, unsigned int *bytes)
2246{
2247 struct mtk_tx_ring *ring = &eth->tx_ring;
2248 struct mtk_tx_dma *desc;
2249 struct sk_buff *skb;
2250 struct mtk_tx_buf *tx_buf;
2251 u32 cpu, dma;
2252
2253 cpu = ring->cpu_idx;
2254 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2255
2256 while ((cpu != dma) && budget) {
2257 tx_buf = &ring->buf[cpu];
2258 skb = tx_buf->skb;
2259 if (!skb)
2260 break;
2261
2262 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
2263 bytes[0] += skb->len;
2264 done[0]++;
2265 budget--;
2266 }
2267
developerc4671b22021-05-28 13:16:42 +08002268 mtk_tx_unmap(eth, tx_buf, true);
developerfd40db22021-04-29 10:08:25 +08002269
developere9356982022-07-04 09:03:20 +08002270 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08002271 ring->last_free = desc;
2272 atomic_inc(&ring->free_count);
2273
2274 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2275 }
2276
2277 ring->cpu_idx = cpu;
developerfd40db22021-04-29 10:08:25 +08002278}
2279
2280static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2281{
2282 struct mtk_tx_ring *ring = &eth->tx_ring;
2283 unsigned int done[MTK_MAX_DEVS];
2284 unsigned int bytes[MTK_MAX_DEVS];
2285 int total = 0, i;
2286
2287 memset(done, 0, sizeof(done));
2288 memset(bytes, 0, sizeof(bytes));
2289
2290 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developerfb556ca2021-10-13 10:52:09 +08002291 mtk_poll_tx_qdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08002292 else
developerfb556ca2021-10-13 10:52:09 +08002293 mtk_poll_tx_pdma(eth, budget, done, bytes);
developerfd40db22021-04-29 10:08:25 +08002294
2295 for (i = 0; i < MTK_MAC_COUNT; i++) {
2296 if (!eth->netdev[i] || !done[i])
2297 continue;
2298 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
2299 total += done[i];
2300 }
2301
2302 if (mtk_queue_stopped(eth) &&
2303 (atomic_read(&ring->free_count) > ring->thresh))
2304 mtk_wake_queue(eth);
2305
2306 return total;
2307}
2308
2309static void mtk_handle_status_irq(struct mtk_eth *eth)
2310{
developer8051e042022-04-08 13:26:36 +08002311 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002312
2313 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2314 mtk_stats_update(eth);
2315 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer8051e042022-04-08 13:26:36 +08002316 MTK_FE_INT_STATUS);
developerfd40db22021-04-29 10:08:25 +08002317 }
2318}
2319
2320static int mtk_napi_tx(struct napi_struct *napi, int budget)
2321{
2322 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
developer68ce74f2023-01-03 16:11:57 +08002323 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08002324 u32 status, mask;
2325 int tx_done = 0;
2326
2327 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2328 mtk_handle_status_irq(eth);
developer68ce74f2023-01-03 16:11:57 +08002329 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
developerfd40db22021-04-29 10:08:25 +08002330 tx_done = mtk_poll_tx(eth, budget);
2331
2332 if (unlikely(netif_msg_intr(eth))) {
developer68ce74f2023-01-03 16:11:57 +08002333 status = mtk_r32(eth, reg_map->tx_irq_status);
2334 mask = mtk_r32(eth, reg_map->tx_irq_mask);
developerfd40db22021-04-29 10:08:25 +08002335 dev_info(eth->dev,
2336 "done tx %d, intr 0x%08x/0x%x\n",
2337 tx_done, status, mask);
2338 }
2339
2340 if (tx_done == budget)
2341 return budget;
2342
developer68ce74f2023-01-03 16:11:57 +08002343 status = mtk_r32(eth, reg_map->tx_irq_status);
developerfd40db22021-04-29 10:08:25 +08002344 if (status & MTK_TX_DONE_INT)
2345 return budget;
2346
developerc4671b22021-05-28 13:16:42 +08002347 if (napi_complete(napi))
2348 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developerfd40db22021-04-29 10:08:25 +08002349
2350 return tx_done;
2351}
2352
2353static int mtk_napi_rx(struct napi_struct *napi, int budget)
2354{
developer18f46a82021-07-20 21:08:21 +08002355 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
2356 struct mtk_eth *eth = rx_napi->eth;
developer68ce74f2023-01-03 16:11:57 +08002357 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer18f46a82021-07-20 21:08:21 +08002358 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08002359 u32 status, mask;
2360 int rx_done = 0;
2361 int remain_budget = budget;
2362
2363 mtk_handle_status_irq(eth);
2364
2365poll_again:
developer68ce74f2023-01-03 16:11:57 +08002366 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), reg_map->pdma.irq_status);
developerfd40db22021-04-29 10:08:25 +08002367 rx_done = mtk_poll_rx(napi, remain_budget, eth);
2368
2369 if (unlikely(netif_msg_intr(eth))) {
developer68ce74f2023-01-03 16:11:57 +08002370 status = mtk_r32(eth, reg_map->pdma.irq_status);
2371 mask = mtk_r32(eth, reg_map->pdma.irq_mask);
developerfd40db22021-04-29 10:08:25 +08002372 dev_info(eth->dev,
2373 "done rx %d, intr 0x%08x/0x%x\n",
2374 rx_done, status, mask);
2375 }
2376 if (rx_done == remain_budget)
2377 return budget;
2378
developer68ce74f2023-01-03 16:11:57 +08002379 status = mtk_r32(eth, reg_map->pdma.irq_status);
developer18f46a82021-07-20 21:08:21 +08002380 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
developerfd40db22021-04-29 10:08:25 +08002381 remain_budget -= rx_done;
2382 goto poll_again;
2383 }
developerc4671b22021-05-28 13:16:42 +08002384
2385 if (napi_complete(napi))
developer18f46a82021-07-20 21:08:21 +08002386 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
developerfd40db22021-04-29 10:08:25 +08002387
2388 return rx_done + budget - remain_budget;
2389}
2390
2391static int mtk_tx_alloc(struct mtk_eth *eth)
2392{
developere9356982022-07-04 09:03:20 +08002393 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002394 struct mtk_tx_ring *ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002395 int i, sz = soc->txrx.txd_size;
2396 struct mtk_tx_dma_v2 *txd, *pdma_txd;
developerfd40db22021-04-29 10:08:25 +08002397
2398 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
2399 GFP_KERNEL);
2400 if (!ring->buf)
2401 goto no_tx_mem;
2402
2403 if (!eth->soc->has_sram)
developer3f28d382023-03-07 16:06:30 +08002404 ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002405 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002406 else {
developere9356982022-07-04 09:03:20 +08002407 ring->dma = eth->scratch_ring + MTK_DMA_SIZE * sz;
developer8b6f2402022-11-28 13:42:34 +08002408 ring->phys = eth->phy_scratch_ring +
2409 MTK_DMA_SIZE * (dma_addr_t)sz;
developerfd40db22021-04-29 10:08:25 +08002410 }
2411
2412 if (!ring->dma)
2413 goto no_tx_mem;
2414
2415 for (i = 0; i < MTK_DMA_SIZE; i++) {
2416 int next = (i + 1) % MTK_DMA_SIZE;
2417 u32 next_ptr = ring->phys + next * sz;
2418
developere9356982022-07-04 09:03:20 +08002419 txd = ring->dma + i * sz;
2420 txd->txd2 = next_ptr;
2421 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2422 txd->txd4 = 0;
2423
developer089e8852022-09-28 14:43:46 +08002424 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2425 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08002426 txd->txd5 = 0;
2427 txd->txd6 = 0;
2428 txd->txd7 = 0;
2429 txd->txd8 = 0;
2430 }
developerfd40db22021-04-29 10:08:25 +08002431 }
2432
2433 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2434 * only as the framework. The real HW descriptors are the PDMA
2435 * descriptors in ring->dma_pdma.
2436 */
2437 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer3f28d382023-03-07 16:06:30 +08002438 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev,
2439 MTK_DMA_SIZE * sz,
developere9356982022-07-04 09:03:20 +08002440 &ring->phys_pdma, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002441 if (!ring->dma_pdma)
2442 goto no_tx_mem;
2443
2444 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +08002445 pdma_txd = ring->dma_pdma + i *sz;
2446
2447 pdma_txd->txd2 = TX_DMA_DESP2_DEF;
2448 pdma_txd->txd4 = 0;
developerfd40db22021-04-29 10:08:25 +08002449 }
2450 }
2451
2452 ring->dma_size = MTK_DMA_SIZE;
2453 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
developere9356982022-07-04 09:03:20 +08002454 ring->next_free = ring->dma;
2455 ring->last_free = (void *)txd;
developerc4671b22021-05-28 13:16:42 +08002456 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
developerfd40db22021-04-29 10:08:25 +08002457 ring->thresh = MAX_SKB_FRAGS;
2458
2459 /* make sure that all changes to the dma ring are flushed before we
2460 * continue
2461 */
2462 wmb();
2463
2464 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer68ce74f2023-01-03 16:11:57 +08002465 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2466 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
developerfd40db22021-04-29 10:08:25 +08002467 mtk_w32(eth,
2468 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
developer68ce74f2023-01-03 16:11:57 +08002469 soc->reg_map->qdma.crx_ptr);
2470 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
developerfd40db22021-04-29 10:08:25 +08002471 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
developer68ce74f2023-01-03 16:11:57 +08002472 soc->reg_map->qdma.qtx_cfg);
developerfd40db22021-04-29 10:08:25 +08002473 } else {
2474 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2475 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
2476 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
developer68ce74f2023-01-03 16:11:57 +08002477 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
developerfd40db22021-04-29 10:08:25 +08002478 }
2479
2480 return 0;
2481
2482no_tx_mem:
2483 return -ENOMEM;
2484}
2485
2486static void mtk_tx_clean(struct mtk_eth *eth)
2487{
developere9356982022-07-04 09:03:20 +08002488 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08002489 struct mtk_tx_ring *ring = &eth->tx_ring;
2490 int i;
2491
2492 if (ring->buf) {
2493 for (i = 0; i < MTK_DMA_SIZE; i++)
developerc4671b22021-05-28 13:16:42 +08002494 mtk_tx_unmap(eth, &ring->buf[i], false);
developerfd40db22021-04-29 10:08:25 +08002495 kfree(ring->buf);
2496 ring->buf = NULL;
2497 }
2498
2499 if (!eth->soc->has_sram && ring->dma) {
developer3f28d382023-03-07 16:06:30 +08002500 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002501 MTK_DMA_SIZE * soc->txrx.txd_size,
2502 ring->dma, ring->phys);
developerfd40db22021-04-29 10:08:25 +08002503 ring->dma = NULL;
2504 }
2505
2506 if (ring->dma_pdma) {
developer3f28d382023-03-07 16:06:30 +08002507 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002508 MTK_DMA_SIZE * soc->txrx.txd_size,
2509 ring->dma_pdma, ring->phys_pdma);
developerfd40db22021-04-29 10:08:25 +08002510 ring->dma_pdma = NULL;
2511 }
2512}
2513
2514static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2515{
developer68ce74f2023-01-03 16:11:57 +08002516 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08002517 struct mtk_rx_ring *ring;
2518 int rx_data_len, rx_dma_size;
2519 int i;
developer089e8852022-09-28 14:43:46 +08002520 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002521
2522 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2523 if (ring_no)
2524 return -EINVAL;
2525 ring = &eth->rx_ring_qdma;
2526 } else {
2527 ring = &eth->rx_ring[ring_no];
2528 }
2529
2530 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2531 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2532 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2533 } else {
2534 rx_data_len = ETH_DATA_LEN;
2535 rx_dma_size = MTK_DMA_SIZE;
2536 }
2537
2538 ring->frag_size = mtk_max_frag_size(rx_data_len);
2539 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2540 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2541 GFP_KERNEL);
2542 if (!ring->data)
2543 return -ENOMEM;
2544
2545 for (i = 0; i < rx_dma_size; i++) {
2546 ring->data[i] = netdev_alloc_frag(ring->frag_size);
2547 if (!ring->data[i])
2548 return -ENOMEM;
2549 }
2550
2551 if ((!eth->soc->has_sram) || (eth->soc->has_sram
2552 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
developer3f28d382023-03-07 16:06:30 +08002553 ring->dma = dma_alloc_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002554 rx_dma_size * eth->soc->txrx.rxd_size,
2555 &ring->phys, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08002556 else {
2557 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developere9356982022-07-04 09:03:20 +08002558 ring->dma = tx_ring->dma + MTK_DMA_SIZE *
2559 eth->soc->txrx.rxd_size * (ring_no + 1);
developer18f46a82021-07-20 21:08:21 +08002560 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
developere9356982022-07-04 09:03:20 +08002561 eth->soc->txrx.rxd_size * (ring_no + 1);
developerfd40db22021-04-29 10:08:25 +08002562 }
2563
2564 if (!ring->dma)
2565 return -ENOMEM;
2566
2567 for (i = 0; i < rx_dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002568 struct mtk_rx_dma_v2 *rxd;
2569
developer3f28d382023-03-07 16:06:30 +08002570 dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08002571 ring->data[i] + NET_SKB_PAD + eth->ip_align,
2572 ring->buf_size,
2573 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08002574 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
developerfd40db22021-04-29 10:08:25 +08002575 return -ENOMEM;
developere9356982022-07-04 09:03:20 +08002576
2577 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2578 rxd->rxd1 = (unsigned int)dma_addr;
developerfd40db22021-04-29 10:08:25 +08002579
developer089e8852022-09-28 14:43:46 +08002580 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2581 RX_DMA_SDP1(dma_addr) : 0;
2582
developerfd40db22021-04-29 10:08:25 +08002583 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developere9356982022-07-04 09:03:20 +08002584 rxd->rxd2 = RX_DMA_LSO;
developerfd40db22021-04-29 10:08:25 +08002585 else
developer089e8852022-09-28 14:43:46 +08002586 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerfd40db22021-04-29 10:08:25 +08002587
developere9356982022-07-04 09:03:20 +08002588 rxd->rxd3 = 0;
2589 rxd->rxd4 = 0;
2590
developer089e8852022-09-28 14:43:46 +08002591 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2592 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08002593 rxd->rxd5 = 0;
2594 rxd->rxd6 = 0;
2595 rxd->rxd7 = 0;
2596 rxd->rxd8 = 0;
developerfd40db22021-04-29 10:08:25 +08002597 }
developerfd40db22021-04-29 10:08:25 +08002598 }
2599 ring->dma_size = rx_dma_size;
2600 ring->calc_idx_update = false;
2601 ring->calc_idx = rx_dma_size - 1;
2602 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
2603 MTK_QRX_CRX_IDX_CFG(ring_no) :
2604 MTK_PRX_CRX_IDX_CFG(ring_no);
developer77d03a72021-06-06 00:06:00 +08002605 ring->ring_no = ring_no;
developerfd40db22021-04-29 10:08:25 +08002606 /* make sure that all changes to the dma ring are flushed before we
2607 * continue
2608 */
2609 wmb();
2610
2611 if (rx_flag == MTK_RX_FLAGS_QDMA) {
developer68ce74f2023-01-03 16:11:57 +08002612 mtk_w32(eth, ring->phys,
2613 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2614 mtk_w32(eth, rx_dma_size,
2615 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2616 mtk_w32(eth, ring->calc_idx,
2617 ring->crx_idx_reg);
2618 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2619 reg_map->qdma.rst_idx);
developerfd40db22021-04-29 10:08:25 +08002620 } else {
developer68ce74f2023-01-03 16:11:57 +08002621 mtk_w32(eth, ring->phys,
2622 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2623 mtk_w32(eth, rx_dma_size,
2624 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2625 mtk_w32(eth, ring->calc_idx,
2626 ring->crx_idx_reg);
2627 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2628 reg_map->pdma.rst_idx);
developerfd40db22021-04-29 10:08:25 +08002629 }
2630
2631 return 0;
2632}
2633
2634static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
2635{
2636 int i;
developer089e8852022-09-28 14:43:46 +08002637 u64 addr64 = 0;
developerfd40db22021-04-29 10:08:25 +08002638
2639 if (ring->data && ring->dma) {
2640 for (i = 0; i < ring->dma_size; i++) {
developere9356982022-07-04 09:03:20 +08002641 struct mtk_rx_dma *rxd;
2642
developerfd40db22021-04-29 10:08:25 +08002643 if (!ring->data[i])
2644 continue;
developere9356982022-07-04 09:03:20 +08002645
2646 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2647 if (!rxd->rxd1)
developerfd40db22021-04-29 10:08:25 +08002648 continue;
developere9356982022-07-04 09:03:20 +08002649
developer089e8852022-09-28 14:43:46 +08002650 addr64 = (MTK_HAS_CAPS(eth->soc->caps,
2651 MTK_8GB_ADDRESSING)) ?
2652 ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
2653
developer3f28d382023-03-07 16:06:30 +08002654 dma_unmap_single(eth->dma_dev,
developer089e8852022-09-28 14:43:46 +08002655 (u64)(rxd->rxd1 | addr64),
developerfd40db22021-04-29 10:08:25 +08002656 ring->buf_size,
2657 DMA_FROM_DEVICE);
2658 skb_free_frag(ring->data[i]);
2659 }
2660 kfree(ring->data);
2661 ring->data = NULL;
2662 }
2663
2664 if(in_sram)
2665 return;
2666
2667 if (ring->dma) {
developer3f28d382023-03-07 16:06:30 +08002668 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08002669 ring->dma_size * eth->soc->txrx.rxd_size,
developerfd40db22021-04-29 10:08:25 +08002670 ring->dma,
2671 ring->phys);
2672 ring->dma = NULL;
2673 }
2674}
2675
2676static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2677{
2678 int i;
developer77d03a72021-06-06 00:06:00 +08002679 u32 val;
developerfd40db22021-04-29 10:08:25 +08002680 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2681 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2682
2683 /* set LRO rings to auto-learn modes */
2684 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2685
2686 /* validate LRO ring */
2687 ring_ctrl_dw2 |= MTK_RING_VLD;
2688
2689 /* set AGE timer (unit: 20us) */
2690 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2691 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2692
2693 /* set max AGG timer (unit: 20us) */
2694 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2695
2696 /* set max LRO AGG count */
2697 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2698 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2699
developer77d03a72021-06-06 00:06:00 +08002700 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002701 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2702 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2703 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2704 }
2705
2706 /* IPv4 checksum update enable */
2707 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2708
2709 /* switch priority comparison to packet count mode */
2710 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2711
2712 /* bandwidth threshold setting */
2713 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2714
2715 /* auto-learn score delta setting */
developer77d03a72021-06-06 00:06:00 +08002716 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
developerfd40db22021-04-29 10:08:25 +08002717
2718 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2719 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2720 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2721
developerfd40db22021-04-29 10:08:25 +08002722 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2723 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2724
developer089e8852022-09-28 14:43:46 +08002725 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2726 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer77d03a72021-06-06 00:06:00 +08002727 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
2728 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
2729 MTK_PDMA_RX_CFG);
2730
2731 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
2732 } else {
2733 /* set HW LRO mode & the max aggregation count for rx packets */
2734 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2735 }
2736
developerfd40db22021-04-29 10:08:25 +08002737 /* enable HW LRO */
2738 lro_ctrl_dw0 |= MTK_LRO_EN;
2739
developer77d03a72021-06-06 00:06:00 +08002740 /* enable cpu reason black list */
2741 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
2742
developerfd40db22021-04-29 10:08:25 +08002743 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2744 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2745
developer77d03a72021-06-06 00:06:00 +08002746 /* no use PPE cpu reason */
2747 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
2748
developerfd40db22021-04-29 10:08:25 +08002749 return 0;
2750}
2751
2752static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2753{
2754 int i;
2755 u32 val;
2756
2757 /* relinquish lro rings, flush aggregated packets */
developer77d03a72021-06-06 00:06:00 +08002758 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
developerfd40db22021-04-29 10:08:25 +08002759
2760 /* wait for relinquishments done */
2761 for (i = 0; i < 10; i++) {
2762 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
developer77d03a72021-06-06 00:06:00 +08002763 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer8051e042022-04-08 13:26:36 +08002764 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08002765 continue;
2766 }
2767 break;
2768 }
2769
2770 /* invalidate lro rings */
developer77d03a72021-06-06 00:06:00 +08002771 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
developerfd40db22021-04-29 10:08:25 +08002772 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2773
2774 /* disable HW LRO */
2775 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2776}
2777
2778static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2779{
2780 u32 reg_val;
2781
developer089e8852022-09-28 14:43:46 +08002782 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2783 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77d03a72021-06-06 00:06:00 +08002784 idx += 1;
2785
developerfd40db22021-04-29 10:08:25 +08002786 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2787
2788 /* invalidate the IP setting */
2789 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2790
2791 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2792
2793 /* validate the IP setting */
2794 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2795}
2796
2797static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2798{
2799 u32 reg_val;
2800
developer089e8852022-09-28 14:43:46 +08002801 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2802 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77d03a72021-06-06 00:06:00 +08002803 idx += 1;
2804
developerfd40db22021-04-29 10:08:25 +08002805 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2806
2807 /* invalidate the IP setting */
2808 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2809
2810 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2811}
2812
2813static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2814{
2815 int cnt = 0;
2816 int i;
2817
2818 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2819 if (mac->hwlro_ip[i])
2820 cnt++;
2821 }
2822
2823 return cnt;
2824}
2825
2826static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2827 struct ethtool_rxnfc *cmd)
2828{
2829 struct ethtool_rx_flow_spec *fsp =
2830 (struct ethtool_rx_flow_spec *)&cmd->fs;
2831 struct mtk_mac *mac = netdev_priv(dev);
2832 struct mtk_eth *eth = mac->hw;
2833 int hwlro_idx;
2834
2835 if ((fsp->flow_type != TCP_V4_FLOW) ||
2836 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2837 (fsp->location > 1))
2838 return -EINVAL;
2839
2840 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2841 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2842
2843 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2844
2845 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2846
2847 return 0;
2848}
2849
2850static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2851 struct ethtool_rxnfc *cmd)
2852{
2853 struct ethtool_rx_flow_spec *fsp =
2854 (struct ethtool_rx_flow_spec *)&cmd->fs;
2855 struct mtk_mac *mac = netdev_priv(dev);
2856 struct mtk_eth *eth = mac->hw;
2857 int hwlro_idx;
2858
2859 if (fsp->location > 1)
2860 return -EINVAL;
2861
2862 mac->hwlro_ip[fsp->location] = 0;
2863 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2864
2865 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2866
2867 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2868
2869 return 0;
2870}
2871
2872static void mtk_hwlro_netdev_disable(struct net_device *dev)
2873{
2874 struct mtk_mac *mac = netdev_priv(dev);
2875 struct mtk_eth *eth = mac->hw;
2876 int i, hwlro_idx;
2877
2878 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2879 mac->hwlro_ip[i] = 0;
2880 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2881
2882 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2883 }
2884
2885 mac->hwlro_ip_cnt = 0;
2886}
2887
2888static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2889 struct ethtool_rxnfc *cmd)
2890{
2891 struct mtk_mac *mac = netdev_priv(dev);
2892 struct ethtool_rx_flow_spec *fsp =
2893 (struct ethtool_rx_flow_spec *)&cmd->fs;
2894
2895 /* only tcp dst ipv4 is meaningful, others are meaningless */
2896 fsp->flow_type = TCP_V4_FLOW;
2897 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2898 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2899
2900 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2901 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2902 fsp->h_u.tcp_ip4_spec.psrc = 0;
2903 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2904 fsp->h_u.tcp_ip4_spec.pdst = 0;
2905 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2906 fsp->h_u.tcp_ip4_spec.tos = 0;
2907 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2908
2909 return 0;
2910}
2911
2912static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2913 struct ethtool_rxnfc *cmd,
2914 u32 *rule_locs)
2915{
2916 struct mtk_mac *mac = netdev_priv(dev);
2917 int cnt = 0;
2918 int i;
2919
2920 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2921 if (mac->hwlro_ip[i]) {
2922 rule_locs[cnt] = i;
2923 cnt++;
2924 }
2925 }
2926
2927 cmd->rule_cnt = cnt;
2928
2929 return 0;
2930}
2931
developer18f46a82021-07-20 21:08:21 +08002932static int mtk_rss_init(struct mtk_eth *eth)
2933{
2934 u32 val;
2935
developer089e8852022-09-28 14:43:46 +08002936 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) {
developer18f46a82021-07-20 21:08:21 +08002937 /* Set RSS rings to PSE modes */
2938 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
2939 val |= MTK_RING_PSE_MODE;
2940 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
2941
2942 /* Enable non-lro multiple rx */
2943 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2944 val |= MTK_NON_LRO_MULTI_EN;
2945 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2946
2947 /* Enable RSS dly int supoort */
2948 val |= MTK_LRO_DLY_INT_EN;
2949 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
2950
2951 /* Set RSS delay config int ring1 */
2952 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
2953 }
2954
2955 /* Hash Type */
2956 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
2957 val |= MTK_RSS_IPV4_STATIC_HASH;
2958 val |= MTK_RSS_IPV6_STATIC_HASH;
2959 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2960
2961 /* Select the size of indirection table */
2962 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW0);
2963 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW1);
2964 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW2);
2965 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW3);
2966 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW4);
2967 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW5);
2968 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW6);
2969 mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW7);
2970
2971 /* Pause */
2972 val |= MTK_RSS_CFG_REQ;
2973 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2974
2975 /* Enable RSS*/
2976 val |= MTK_RSS_EN;
2977 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2978
2979 /* Release pause */
2980 val &= ~(MTK_RSS_CFG_REQ);
2981 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
2982
2983 /* Set perRSS GRP INT */
2984 mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING1), MTK_PDMA_INT_GRP3);
2985
2986 /* Set GRP INT */
2987 mtk_w32(eth, 0x21021030, MTK_FE_INT_GRP);
2988
developer089e8852022-09-28 14:43:46 +08002989 /* Enable RSS delay interrupt */
2990 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_RSS_DELAY_INT);
2991
developer18f46a82021-07-20 21:08:21 +08002992 return 0;
2993}
2994
2995static void mtk_rss_uninit(struct mtk_eth *eth)
2996{
2997 u32 val;
2998
2999 /* Pause */
3000 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
3001 val |= MTK_RSS_CFG_REQ;
3002 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3003
3004 /* Disable RSS*/
3005 val &= ~(MTK_RSS_EN);
3006 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3007
3008 /* Release pause */
3009 val &= ~(MTK_RSS_CFG_REQ);
3010 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3011}
3012
developerfd40db22021-04-29 10:08:25 +08003013static netdev_features_t mtk_fix_features(struct net_device *dev,
3014 netdev_features_t features)
3015{
3016 if (!(features & NETIF_F_LRO)) {
3017 struct mtk_mac *mac = netdev_priv(dev);
3018 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3019
3020 if (ip_cnt) {
3021 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3022
3023 features |= NETIF_F_LRO;
3024 }
3025 }
3026
3027 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
3028 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
3029
3030 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
3031 }
3032
3033 return features;
3034}
3035
3036static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3037{
3038 struct mtk_mac *mac = netdev_priv(dev);
3039 struct mtk_eth *eth = mac->hw;
3040 int err = 0;
3041
3042 if (!((dev->features ^ features) & MTK_SET_FEATURES))
3043 return 0;
3044
3045 if (!(features & NETIF_F_LRO))
3046 mtk_hwlro_netdev_disable(dev);
3047
3048 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
3049 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3050 else
3051 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3052
3053 return err;
3054}
3055
3056/* wait for DMA to finish whatever it is doing before we start using it again */
3057static int mtk_dma_busy_wait(struct mtk_eth *eth)
3058{
3059 unsigned long t_start = jiffies;
3060
3061 while (1) {
3062 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3063 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
3064 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
3065 return 0;
3066 } else {
3067 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
3068 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
3069 return 0;
3070 }
3071
3072 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
3073 break;
3074 }
3075
3076 dev_err(eth->dev, "DMA init timeout\n");
3077 return -1;
3078}
3079
3080static int mtk_dma_init(struct mtk_eth *eth)
3081{
3082 int err;
3083 u32 i;
3084
3085 if (mtk_dma_busy_wait(eth))
3086 return -EBUSY;
3087
3088 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3089 /* QDMA needs scratch memory for internal reordering of the
3090 * descriptors
3091 */
3092 err = mtk_init_fq_dma(eth);
3093 if (err)
3094 return err;
3095 }
3096
3097 err = mtk_tx_alloc(eth);
3098 if (err)
3099 return err;
3100
3101 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3102 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3103 if (err)
3104 return err;
3105 }
3106
3107 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3108 if (err)
3109 return err;
3110
3111 if (eth->hwlro) {
developer089e8852022-09-28 14:43:46 +08003112 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) ? 1 : 4;
developer77d03a72021-06-06 00:06:00 +08003113 for (; i < MTK_MAX_RX_RING_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08003114 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3115 if (err)
3116 return err;
3117 }
3118 err = mtk_hwlro_rx_init(eth);
3119 if (err)
3120 return err;
3121 }
3122
developer18f46a82021-07-20 21:08:21 +08003123 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3124 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3125 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL);
3126 if (err)
3127 return err;
3128 }
3129 err = mtk_rss_init(eth);
3130 if (err)
3131 return err;
3132 }
3133
developerfd40db22021-04-29 10:08:25 +08003134 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3135 /* Enable random early drop and set drop threshold
3136 * automatically
3137 */
3138 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
developer68ce74f2023-01-03 16:11:57 +08003139 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3140 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred2);
developerfd40db22021-04-29 10:08:25 +08003141 }
3142
3143 return 0;
3144}
3145
3146static void mtk_dma_free(struct mtk_eth *eth)
3147{
developere9356982022-07-04 09:03:20 +08003148 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08003149 int i;
3150
3151 for (i = 0; i < MTK_MAC_COUNT; i++)
3152 if (eth->netdev[i])
3153 netdev_reset_queue(eth->netdev[i]);
3154 if ( !eth->soc->has_sram && eth->scratch_ring) {
developer3f28d382023-03-07 16:06:30 +08003155 dma_free_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08003156 MTK_DMA_SIZE * soc->txrx.txd_size,
3157 eth->scratch_ring, eth->phy_scratch_ring);
developerfd40db22021-04-29 10:08:25 +08003158 eth->scratch_ring = NULL;
3159 eth->phy_scratch_ring = 0;
3160 }
3161 mtk_tx_clean(eth);
developerb3ce86f2022-06-30 13:31:47 +08003162 mtk_rx_clean(eth, &eth->rx_ring[0],eth->soc->has_sram);
developerfd40db22021-04-29 10:08:25 +08003163 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
3164
3165 if (eth->hwlro) {
3166 mtk_hwlro_rx_uninit(eth);
developer77d03a72021-06-06 00:06:00 +08003167
developer089e8852022-09-28 14:43:46 +08003168 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) ? 1 : 4;
developer77d03a72021-06-06 00:06:00 +08003169 for (; i < MTK_MAX_RX_RING_NUM; i++)
3170 mtk_rx_clean(eth, &eth->rx_ring[i], 0);
developerfd40db22021-04-29 10:08:25 +08003171 }
3172
developer18f46a82021-07-20 21:08:21 +08003173 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3174 mtk_rss_uninit(eth);
3175
3176 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
3177 mtk_rx_clean(eth, &eth->rx_ring[i], 1);
3178 }
3179
developer94008d92021-09-23 09:47:41 +08003180 if (eth->scratch_head) {
3181 kfree(eth->scratch_head);
3182 eth->scratch_head = NULL;
3183 }
developerfd40db22021-04-29 10:08:25 +08003184}
3185
3186static void mtk_tx_timeout(struct net_device *dev)
3187{
3188 struct mtk_mac *mac = netdev_priv(dev);
3189 struct mtk_eth *eth = mac->hw;
3190
3191 eth->netdev[mac->id]->stats.tx_errors++;
3192 netif_err(eth, tx_err, dev,
3193 "transmit timed out\n");
developer8051e042022-04-08 13:26:36 +08003194
3195 if (atomic_read(&reset_lock) == 0)
3196 schedule_work(&eth->pending_work);
developerfd40db22021-04-29 10:08:25 +08003197}
3198
developer18f46a82021-07-20 21:08:21 +08003199static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
developerfd40db22021-04-29 10:08:25 +08003200{
developer18f46a82021-07-20 21:08:21 +08003201 struct mtk_napi *rx_napi = priv;
3202 struct mtk_eth *eth = rx_napi->eth;
3203 struct mtk_rx_ring *ring = rx_napi->rx_ring;
developerfd40db22021-04-29 10:08:25 +08003204
developer18f46a82021-07-20 21:08:21 +08003205 if (likely(napi_schedule_prep(&rx_napi->napi))) {
developer18f46a82021-07-20 21:08:21 +08003206 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
developer6bbe70d2021-08-06 09:34:55 +08003207 __napi_schedule(&rx_napi->napi);
developerfd40db22021-04-29 10:08:25 +08003208 }
3209
3210 return IRQ_HANDLED;
3211}
3212
3213static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3214{
3215 struct mtk_eth *eth = _eth;
3216
3217 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developerfd40db22021-04-29 10:08:25 +08003218 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer6bbe70d2021-08-06 09:34:55 +08003219 __napi_schedule(&eth->tx_napi);
developerfd40db22021-04-29 10:08:25 +08003220 }
3221
3222 return IRQ_HANDLED;
3223}
3224
3225static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3226{
3227 struct mtk_eth *eth = _eth;
developer68ce74f2023-01-03 16:11:57 +08003228 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08003229
developer68ce74f2023-01-03 16:11:57 +08003230 if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT(0)) {
3231 if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT(0))
developer18f46a82021-07-20 21:08:21 +08003232 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08003233 }
developer68ce74f2023-01-03 16:11:57 +08003234 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3235 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
developerfd40db22021-04-29 10:08:25 +08003236 mtk_handle_irq_tx(irq, _eth);
3237 }
3238
3239 return IRQ_HANDLED;
3240}
3241
developera2613e62022-07-01 18:29:37 +08003242static irqreturn_t mtk_handle_irq_fixed_link(int irq, void *_mac)
3243{
3244 struct mtk_mac *mac = _mac;
3245 struct mtk_eth *eth = mac->hw;
3246 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
3247 struct net_device *dev = phylink_priv->dev;
3248 int link_old, link_new;
3249
3250 // clear interrupt status for gpy211
3251 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
3252
3253 link_old = phylink_priv->link;
3254 link_new = _mtk_mdio_read(eth, phylink_priv->phyaddr, MII_BMSR) & BMSR_LSTATUS;
3255
3256 if (link_old != link_new) {
3257 phylink_priv->link = link_new;
3258 if (link_new) {
3259 printk("phylink.%d %s: Link is Up\n", phylink_priv->id, dev->name);
3260 if (dev)
3261 netif_carrier_on(dev);
3262 } else {
3263 printk("phylink.%d %s: Link is Down\n", phylink_priv->id, dev->name);
3264 if (dev)
3265 netif_carrier_off(dev);
3266 }
3267 }
3268
3269 return IRQ_HANDLED;
3270}
3271
developerfd40db22021-04-29 10:08:25 +08003272#ifdef CONFIG_NET_POLL_CONTROLLER
3273static void mtk_poll_controller(struct net_device *dev)
3274{
3275 struct mtk_mac *mac = netdev_priv(dev);
3276 struct mtk_eth *eth = mac->hw;
3277
3278 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003279 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
3280 mtk_handle_irq_rx(eth->irq[2], &eth->rx_napi[0]);
developerfd40db22021-04-29 10:08:25 +08003281 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003282 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08003283}
3284#endif
3285
3286static int mtk_start_dma(struct mtk_eth *eth)
3287{
3288 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developer68ce74f2023-01-03 16:11:57 +08003289 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer77d03a72021-06-06 00:06:00 +08003290 int val, err;
developerfd40db22021-04-29 10:08:25 +08003291
3292 err = mtk_dma_init(eth);
3293 if (err) {
3294 mtk_dma_free(eth);
3295 return err;
3296 }
3297
3298 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developer68ce74f2023-01-03 16:11:57 +08003299 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
developer089e8852022-09-28 14:43:46 +08003300 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
3301 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer19d84562022-04-21 17:01:06 +08003302 val &= ~MTK_RESV_BUF_MASK;
developerfd40db22021-04-29 10:08:25 +08003303 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003304 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08003305 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
3306 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
3307 MTK_RESV_BUF | MTK_WCOMP_EN |
3308 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
developer68ce74f2023-01-03 16:11:57 +08003309 MTK_RX_2B_OFFSET, reg_map->qdma.glo_cfg);
developer19d84562022-04-21 17:01:06 +08003310 }
developerfd40db22021-04-29 10:08:25 +08003311 else
3312 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003313 val | MTK_TX_DMA_EN |
developerfd40db22021-04-29 10:08:25 +08003314 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
3315 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
3316 MTK_RX_BT_32DWORDS,
developer68ce74f2023-01-03 16:11:57 +08003317 reg_map->qdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003318
developer68ce74f2023-01-03 16:11:57 +08003319 val = mtk_r32(eth, reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003320 mtk_w32(eth,
developer15d0d282021-07-14 16:40:44 +08003321 val | MTK_RX_DMA_EN | rx_2b_offset |
developerfd40db22021-04-29 10:08:25 +08003322 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
developer68ce74f2023-01-03 16:11:57 +08003323 reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003324 } else {
3325 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3326 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
developer68ce74f2023-01-03 16:11:57 +08003327 reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003328 }
3329
developer089e8852022-09-28 14:43:46 +08003330 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) && eth->hwlro) {
developer77d03a72021-06-06 00:06:00 +08003331 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
3332 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
3333 }
3334
developerfd40db22021-04-29 10:08:25 +08003335 return 0;
3336}
3337
developerdca0fde2022-12-14 11:40:35 +08003338void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
developerfd40db22021-04-29 10:08:25 +08003339{
developerdca0fde2022-12-14 11:40:35 +08003340 u32 val;
developerfd40db22021-04-29 10:08:25 +08003341
3342 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3343 return;
3344
developerdca0fde2022-12-14 11:40:35 +08003345 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
developerfd40db22021-04-29 10:08:25 +08003346
developerdca0fde2022-12-14 11:40:35 +08003347 /* default setup the forward port to send frame to PDMA */
3348 val &= ~0xffff;
developerfd40db22021-04-29 10:08:25 +08003349
developerdca0fde2022-12-14 11:40:35 +08003350 /* Enable RX checksum */
3351 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
developerfd40db22021-04-29 10:08:25 +08003352
developerdca0fde2022-12-14 11:40:35 +08003353 val |= config;
developerfd40db22021-04-29 10:08:25 +08003354
developerdca0fde2022-12-14 11:40:35 +08003355 if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3356 val |= MTK_GDMA_SPECIAL_TAG;
developerfd40db22021-04-29 10:08:25 +08003357
developerdca0fde2022-12-14 11:40:35 +08003358 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
developerfd40db22021-04-29 10:08:25 +08003359}
3360
developer7cd7e5e2022-11-17 13:57:32 +08003361void mtk_set_pse_drop(u32 config)
3362{
3363 struct mtk_eth *eth = g_eth;
3364
3365 if (eth)
3366 mtk_w32(eth, config, PSE_PPE0_DROP);
3367}
3368EXPORT_SYMBOL(mtk_set_pse_drop);
3369
developerfd40db22021-04-29 10:08:25 +08003370static int mtk_open(struct net_device *dev)
3371{
3372 struct mtk_mac *mac = netdev_priv(dev);
3373 struct mtk_eth *eth = mac->hw;
developera2613e62022-07-01 18:29:37 +08003374 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
developer18f46a82021-07-20 21:08:21 +08003375 int err, i;
developer3a5969e2022-02-09 15:36:36 +08003376 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08003377
3378 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3379 if (err) {
3380 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3381 err);
3382 return err;
3383 }
3384
3385 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3386 if (!refcount_read(&eth->dma_refcnt)) {
3387 int err = mtk_start_dma(eth);
3388
3389 if (err)
3390 return err;
3391
developerfd40db22021-04-29 10:08:25 +08003392
3393 /* Indicates CDM to parse the MTK special tag from CPU */
3394 if (netdev_uses_dsa(dev)) {
3395 u32 val;
3396 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3397 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3398 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3399 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3400 }
3401
3402 napi_enable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003403 napi_enable(&eth->rx_napi[0].napi);
developerfd40db22021-04-29 10:08:25 +08003404 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003405 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
3406
3407 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3408 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3409 napi_enable(&eth->rx_napi[i].napi);
3410 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i));
3411 }
3412 }
3413
developerfd40db22021-04-29 10:08:25 +08003414 refcount_set(&eth->dma_refcnt, 1);
3415 }
3416 else
3417 refcount_inc(&eth->dma_refcnt);
3418
developera2613e62022-07-01 18:29:37 +08003419 if (phylink_priv->desc) {
3420 /*Notice: This programming sequence is only for GPY211 single PHY chip.
3421 If single PHY chip is not GPY211, the following step you should do:
3422 1. Contact your Single PHY chip vendor and get the details of
3423 - how to enables link status change interrupt
3424 - how to clears interrupt source
3425 */
3426
3427 // clear interrupt source for gpy211
3428 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
3429
3430 // enable link status change interrupt for gpy211
3431 _mtk_mdio_write(eth, phylink_priv->phyaddr, 0x19, 0x0001);
3432
3433 phylink_priv->dev = dev;
3434
3435 // override dev pointer for single PHY chip 0
3436 if (phylink_priv->id == 0) {
3437 struct net_device *tmp;
3438
3439 tmp = __dev_get_by_name(&init_net, phylink_priv->label);
3440 if (tmp)
3441 phylink_priv->dev = tmp;
3442 else
3443 phylink_priv->dev = NULL;
3444 }
3445 }
3446
developerfd40db22021-04-29 10:08:25 +08003447 phylink_start(mac->phylink);
3448 netif_start_queue(dev);
developer3a5969e2022-02-09 15:36:36 +08003449 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer089e8852022-09-28 14:43:46 +08003450 if (!phy_node && eth->xgmii->regmap_sgmii[mac->id])
3451 regmap_write(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
3452
developerdca0fde2022-12-14 11:40:35 +08003453 mtk_gdm_config(eth, mac->id, MTK_GDMA_TO_PDMA);
3454
developerfd40db22021-04-29 10:08:25 +08003455 return 0;
3456}
3457
3458static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3459{
3460 u32 val;
3461 int i;
3462
3463 /* stop the dma engine */
3464 spin_lock_bh(&eth->page_lock);
3465 val = mtk_r32(eth, glo_cfg);
3466 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3467 glo_cfg);
3468 spin_unlock_bh(&eth->page_lock);
3469
3470 /* wait for dma stop */
3471 for (i = 0; i < 10; i++) {
3472 val = mtk_r32(eth, glo_cfg);
3473 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer8051e042022-04-08 13:26:36 +08003474 mdelay(20);
developerfd40db22021-04-29 10:08:25 +08003475 continue;
3476 }
3477 break;
3478 }
3479}
3480
3481static int mtk_stop(struct net_device *dev)
3482{
3483 struct mtk_mac *mac = netdev_priv(dev);
3484 struct mtk_eth *eth = mac->hw;
developer18f46a82021-07-20 21:08:21 +08003485 int i;
developer3a5969e2022-02-09 15:36:36 +08003486 u32 val = 0;
3487 struct device_node *phy_node;
developerfd40db22021-04-29 10:08:25 +08003488
developerdca0fde2022-12-14 11:40:35 +08003489 mtk_gdm_config(eth, mac->id, MTK_GDMA_DROP_ALL);
developerfd40db22021-04-29 10:08:25 +08003490 netif_tx_disable(dev);
3491
developer3a5969e2022-02-09 15:36:36 +08003492 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
3493 if (phy_node) {
3494 val = _mtk_mdio_read(eth, 0, 0);
3495 val |= BMCR_PDOWN;
3496 _mtk_mdio_write(eth, 0, 0, val);
developer089e8852022-09-28 14:43:46 +08003497 } else if (eth->xgmii->regmap_sgmii[mac->id]) {
3498 regmap_read(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer3a5969e2022-02-09 15:36:36 +08003499 val |= SGMII_PHYA_PWD;
developer089e8852022-09-28 14:43:46 +08003500 regmap_write(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer3a5969e2022-02-09 15:36:36 +08003501 }
3502
3503 //GMAC RX disable
3504 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
3505 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
3506
3507 phylink_stop(mac->phylink);
3508
developerfd40db22021-04-29 10:08:25 +08003509 phylink_disconnect_phy(mac->phylink);
3510
3511 /* only shutdown DMA if this is the last user */
3512 if (!refcount_dec_and_test(&eth->dma_refcnt))
3513 return 0;
3514
developerfd40db22021-04-29 10:08:25 +08003515
3516 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
developer18f46a82021-07-20 21:08:21 +08003517 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developerfd40db22021-04-29 10:08:25 +08003518 napi_disable(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08003519 napi_disable(&eth->rx_napi[0].napi);
3520
3521 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3522 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3523 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i));
3524 napi_disable(&eth->rx_napi[i].napi);
3525 }
3526 }
developerfd40db22021-04-29 10:08:25 +08003527
3528 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developer68ce74f2023-01-03 16:11:57 +08003529 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3530 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
developerfd40db22021-04-29 10:08:25 +08003531
3532 mtk_dma_free(eth);
3533
3534 return 0;
3535}
3536
developer8051e042022-04-08 13:26:36 +08003537void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerfd40db22021-04-29 10:08:25 +08003538{
developer8051e042022-04-08 13:26:36 +08003539 u32 val = 0, i = 0;
developerfd40db22021-04-29 10:08:25 +08003540
developerfd40db22021-04-29 10:08:25 +08003541 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer8051e042022-04-08 13:26:36 +08003542 reset_bits, reset_bits);
3543
3544 while (i++ < 5000) {
3545 mdelay(1);
3546 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3547
3548 if ((val & reset_bits) == reset_bits) {
3549 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
3550 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3551 reset_bits, ~reset_bits);
3552 break;
3553 }
3554 }
3555
developerfd40db22021-04-29 10:08:25 +08003556 mdelay(10);
3557}
3558
3559static void mtk_clk_disable(struct mtk_eth *eth)
3560{
3561 int clk;
3562
3563 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3564 clk_disable_unprepare(eth->clks[clk]);
3565}
3566
3567static int mtk_clk_enable(struct mtk_eth *eth)
3568{
3569 int clk, ret;
3570
3571 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3572 ret = clk_prepare_enable(eth->clks[clk]);
3573 if (ret)
3574 goto err_disable_clks;
3575 }
3576
3577 return 0;
3578
3579err_disable_clks:
3580 while (--clk >= 0)
3581 clk_disable_unprepare(eth->clks[clk]);
3582
3583 return ret;
3584}
3585
developer18f46a82021-07-20 21:08:21 +08003586static int mtk_napi_init(struct mtk_eth *eth)
3587{
3588 struct mtk_napi *rx_napi = &eth->rx_napi[0];
3589 int i;
3590
3591 rx_napi->eth = eth;
3592 rx_napi->rx_ring = &eth->rx_ring[0];
3593 rx_napi->irq_grp_no = 2;
3594
3595 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3596 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
3597 rx_napi = &eth->rx_napi[i];
3598 rx_napi->eth = eth;
3599 rx_napi->rx_ring = &eth->rx_ring[i];
3600 rx_napi->irq_grp_no = 2 + i;
3601 }
3602 }
3603
3604 return 0;
3605}
3606
developer8051e042022-04-08 13:26:36 +08003607static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerfd40db22021-04-29 10:08:25 +08003608{
developer3f28d382023-03-07 16:06:30 +08003609 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3610 ETHSYS_DMA_AG_MAP_PPE;
developer68ce74f2023-01-03 16:11:57 +08003611 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer8051e042022-04-08 13:26:36 +08003612 int i, ret = 0;
developerdca0fde2022-12-14 11:40:35 +08003613 u32 val;
developerfd40db22021-04-29 10:08:25 +08003614
developer8051e042022-04-08 13:26:36 +08003615 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
3616 atomic_read(&reset_lock), atomic_read(&force));
developerfd40db22021-04-29 10:08:25 +08003617
developer8051e042022-04-08 13:26:36 +08003618 if (atomic_read(&reset_lock) == 0) {
3619 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
3620 return 0;
developerfd40db22021-04-29 10:08:25 +08003621
developer8051e042022-04-08 13:26:36 +08003622 pm_runtime_enable(eth->dev);
3623 pm_runtime_get_sync(eth->dev);
3624
3625 ret = mtk_clk_enable(eth);
3626 if (ret)
3627 goto err_disable_pm;
3628 }
developerfd40db22021-04-29 10:08:25 +08003629
developer3f28d382023-03-07 16:06:30 +08003630 if (eth->ethsys)
3631 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3632 of_dma_is_coherent(eth->dma_dev->of_node) *
3633 dma_mask);
3634
developerfd40db22021-04-29 10:08:25 +08003635 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3636 ret = device_reset(eth->dev);
3637 if (ret) {
3638 dev_err(eth->dev, "MAC reset failed!\n");
3639 goto err_disable_pm;
3640 }
3641
3642 /* enable interrupt delay for RX */
3643 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
3644
3645 /* disable delay and normal interrupt */
3646 mtk_tx_irq_disable(eth, ~0);
3647 mtk_rx_irq_disable(eth, ~0);
3648
3649 return 0;
3650 }
3651
developer8051e042022-04-08 13:26:36 +08003652 pr_info("[%s] execute fe %s reset\n", __func__,
3653 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developer545abf02021-07-15 17:47:01 +08003654
developer8051e042022-04-08 13:26:36 +08003655 if (type == MTK_TYPE_WARM_RESET)
3656 mtk_eth_warm_reset(eth);
developer545abf02021-07-15 17:47:01 +08003657 else
developer8051e042022-04-08 13:26:36 +08003658 mtk_eth_cold_reset(eth);
developer545abf02021-07-15 17:47:01 +08003659
developer089e8852022-09-28 14:43:46 +08003660 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
3661 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer545abf02021-07-15 17:47:01 +08003662 /* Set FE to PDMAv2 if necessary */
developerfd40db22021-04-29 10:08:25 +08003663 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
developer545abf02021-07-15 17:47:01 +08003664 }
developerfd40db22021-04-29 10:08:25 +08003665
3666 if (eth->pctl) {
3667 /* Set GE2 driving and slew rate */
3668 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3669
3670 /* set GE2 TDSEL */
3671 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3672
3673 /* set GE2 TUNE */
3674 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3675 }
3676
3677 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3678 * up with the more appropriate value when mtk_mac_config call is being
3679 * invoked.
3680 */
3681 for (i = 0; i < MTK_MAC_COUNT; i++)
3682 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3683
3684 /* Enable RX VLan Offloading */
developer41294e32021-05-07 16:11:23 +08003685 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
3686 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3687 else
3688 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
developerfd40db22021-04-29 10:08:25 +08003689
3690 /* enable interrupt delay for RX/TX */
3691 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
3692 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
3693
3694 mtk_tx_irq_disable(eth, ~0);
3695 mtk_rx_irq_disable(eth, ~0);
3696
3697 /* FE int grouping */
developer68ce74f2023-01-03 16:11:57 +08003698 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3699 mtk_w32(eth, MTK_RX_DONE_INT(0), reg_map->pdma.int_grp2);
3700 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3701 mtk_w32(eth, MTK_RX_DONE_INT(0), reg_map->qdma.int_grp2);
developer8051e042022-04-08 13:26:36 +08003702 mtk_w32(eth, 0x21021003, MTK_FE_INT_GRP);
developerbe971722022-05-23 13:51:05 +08003703 mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
developer8051e042022-04-08 13:26:36 +08003704 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
3705 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerfd40db22021-04-29 10:08:25 +08003706
developer089e8852022-09-28 14:43:46 +08003707 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
3708 /* PSE should not drop port1, port8 and port9 packets */
3709 mtk_w32(eth, 0x00000302, PSE_NO_DROP_CFG);
3710
developer15f760a2022-10-12 15:57:21 +08003711 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3712 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3713
developer84d1e832022-11-24 11:25:05 +08003714 /* PSE free buffer drop threshold */
3715 mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
3716
developer089e8852022-09-28 14:43:46 +08003717 /* GDM and CDM Threshold */
3718 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
3719 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
3720
developerdca0fde2022-12-14 11:40:35 +08003721 /* Disable GDM1 RX CRC stripping */
3722 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(0));
3723 val &= ~MTK_GDMA_STRP_CRC;
3724 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(0));
3725
developer089e8852022-09-28 14:43:46 +08003726 /* PSE GDM3 MIB counter has incorrect hw default values,
3727 * so the driver ought to read clear the values beforehand
3728 * in case ethtool retrieve wrong mib values.
3729 */
3730 for (i = 0; i < MTK_STAT_OFFSET; i += 0x4)
3731 mtk_r32(eth,
3732 MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * 2 + i);
3733 } else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfef9efd2021-06-16 18:28:09 +08003734 /* PSE Free Queue Flow Control */
3735 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3736
developer459b78e2022-07-01 17:25:10 +08003737 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3738 mtk_w32(eth, 0x00000300, PSE_NO_DROP_CFG);
3739
3740 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
3741 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
developer81bcad32021-07-15 14:14:38 +08003742
developerfef9efd2021-06-16 18:28:09 +08003743 /* PSE config input queue threshold */
developerfd40db22021-04-29 10:08:25 +08003744 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3745 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3746 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3747 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3748 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3749 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3750 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
developerfd5f9152022-01-05 16:29:42 +08003751 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
developerfd40db22021-04-29 10:08:25 +08003752
developerfef9efd2021-06-16 18:28:09 +08003753 /* PSE config output queue threshold */
developerfd40db22021-04-29 10:08:25 +08003754 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3755 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3756 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3757 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3758 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3759 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3760 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3761 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
developerfef9efd2021-06-16 18:28:09 +08003762
3763 /* GDM and CDM Threshold */
3764 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3765 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3766 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3767 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3768 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3769 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
developerfd40db22021-04-29 10:08:25 +08003770 }
3771
3772 return 0;
3773
3774err_disable_pm:
3775 pm_runtime_put_sync(eth->dev);
3776 pm_runtime_disable(eth->dev);
3777
3778 return ret;
3779}
3780
3781static int mtk_hw_deinit(struct mtk_eth *eth)
3782{
3783 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3784 return 0;
3785
3786 mtk_clk_disable(eth);
3787
3788 pm_runtime_put_sync(eth->dev);
3789 pm_runtime_disable(eth->dev);
3790
3791 return 0;
3792}
3793
3794static int __init mtk_init(struct net_device *dev)
3795{
3796 struct mtk_mac *mac = netdev_priv(dev);
3797 struct mtk_eth *eth = mac->hw;
3798 const char *mac_addr;
3799
3800 mac_addr = of_get_mac_address(mac->of_node);
3801 if (!IS_ERR(mac_addr))
3802 ether_addr_copy(dev->dev_addr, mac_addr);
3803
3804 /* If the mac address is invalid, use random mac address */
3805 if (!is_valid_ether_addr(dev->dev_addr)) {
3806 eth_hw_addr_random(dev);
3807 dev_err(eth->dev, "generated random MAC address %pM\n",
3808 dev->dev_addr);
3809 }
3810
3811 return 0;
3812}
3813
3814static void mtk_uninit(struct net_device *dev)
3815{
3816 struct mtk_mac *mac = netdev_priv(dev);
3817 struct mtk_eth *eth = mac->hw;
3818
3819 phylink_disconnect_phy(mac->phylink);
3820 mtk_tx_irq_disable(eth, ~0);
3821 mtk_rx_irq_disable(eth, ~0);
3822}
3823
3824static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3825{
3826 struct mtk_mac *mac = netdev_priv(dev);
3827
3828 switch (cmd) {
3829 case SIOCGMIIPHY:
3830 case SIOCGMIIREG:
3831 case SIOCSMIIREG:
3832 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3833 default:
3834 /* default invoke the mtk_eth_dbg handler */
3835 return mtk_do_priv_ioctl(dev, ifr, cmd);
3836 break;
3837 }
3838
3839 return -EOPNOTSUPP;
3840}
3841
developer37482a42022-12-26 13:31:13 +08003842int mtk_phy_config(struct mtk_eth *eth, int enable)
3843{
3844 struct device_node *mii_np = NULL;
3845 struct device_node *child = NULL;
3846 int addr = 0;
3847 u32 val = 0;
3848
3849 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
3850 if (!mii_np) {
3851 dev_err(eth->dev, "no %s child node found", "mdio-bus");
3852 return -ENODEV;
3853 }
3854
3855 if (!of_device_is_available(mii_np)) {
3856 dev_err(eth->dev, "device is not available\n");
3857 return -ENODEV;
3858 }
3859
3860 for_each_available_child_of_node(mii_np, child) {
3861 addr = of_mdio_parse_addr(&eth->mii_bus->dev, child);
3862 if (addr < 0)
3863 continue;
3864 pr_info("%s %d addr:%d name:%s\n",
3865 __func__, __LINE__, addr, child->name);
3866 val = _mtk_mdio_read(eth, addr, mdiobus_c45_addr(0x1e, 0));
3867 if (enable)
3868 val &= ~BMCR_PDOWN;
3869 else
3870 val |= BMCR_PDOWN;
3871 _mtk_mdio_write(eth, addr, mdiobus_c45_addr(0x1e, 0), val);
3872 }
3873
3874 return 0;
3875}
3876
developerfd40db22021-04-29 10:08:25 +08003877static void mtk_pending_work(struct work_struct *work)
3878{
3879 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer8051e042022-04-08 13:26:36 +08003880 struct device_node *phy_node = NULL;
3881 struct mtk_mac *mac = NULL;
3882 int err, i = 0;
developerfd40db22021-04-29 10:08:25 +08003883 unsigned long restart = 0;
developer8051e042022-04-08 13:26:36 +08003884 u32 val = 0;
3885
3886 atomic_inc(&reset_lock);
3887 val = mtk_r32(eth, MTK_FE_INT_STATUS);
3888 if (!mtk_check_reset_event(eth, val)) {
3889 atomic_dec(&reset_lock);
3890 pr_info("[%s] No need to do FE reset !\n", __func__);
3891 return;
3892 }
developerfd40db22021-04-29 10:08:25 +08003893
3894 rtnl_lock();
3895
developer37482a42022-12-26 13:31:13 +08003896 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
3897 cpu_relax();
3898
3899 mtk_phy_config(eth, 0);
developer8051e042022-04-08 13:26:36 +08003900
3901 /* Adjust PPE configurations to prepare for reset */
3902 mtk_prepare_reset_ppe(eth, 0);
3903 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3904 mtk_prepare_reset_ppe(eth, 1);
3905
3906 /* Adjust FE configurations to prepare for reset */
3907 mtk_prepare_reset_fe(eth);
3908
3909 /* Trigger Wifi SER reset */
developer6bb3f3a2022-11-22 09:59:14 +08003910 for (i = 0; i < MTK_MAC_COUNT; i++) {
3911 if (!eth->netdev[i])
3912 continue;
developer37482a42022-12-26 13:31:13 +08003913 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
3914 pr_info("send MTK_FE_STOP_TRAFFIC event\n");
3915 call_netdevice_notifiers(MTK_FE_STOP_TRAFFIC,
3916 eth->netdev[i]);
3917 } else {
3918 pr_info("send MTK_FE_START_RESET event\n");
3919 call_netdevice_notifiers(MTK_FE_START_RESET,
3920 eth->netdev[i]);
3921 }
developer6bb3f3a2022-11-22 09:59:14 +08003922 rtnl_unlock();
developer37482a42022-12-26 13:31:13 +08003923 if (!wait_for_completion_timeout(&wait_ser_done, 3000))
developer0baa6962023-01-31 14:25:23 +08003924 pr_warn("wait for MTK_FE_START_RESET\n");
developer6bb3f3a2022-11-22 09:59:14 +08003925 rtnl_lock();
3926 break;
3927 }
developerfd40db22021-04-29 10:08:25 +08003928
developer8051e042022-04-08 13:26:36 +08003929 del_timer_sync(&eth->mtk_dma_monitor_timer);
3930 pr_info("[%s] mtk_stop starts !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003931 /* stop all devices to make sure that dma is properly shut down */
3932 for (i = 0; i < MTK_MAC_COUNT; i++) {
3933 if (!eth->netdev[i])
3934 continue;
3935 mtk_stop(eth->netdev[i]);
3936 __set_bit(i, &restart);
3937 }
developer8051e042022-04-08 13:26:36 +08003938 pr_info("[%s] mtk_stop ends !\n", __func__);
3939 mdelay(15);
developerfd40db22021-04-29 10:08:25 +08003940
3941 if (eth->dev->pins)
3942 pinctrl_select_state(eth->dev->pins->p,
3943 eth->dev->pins->default_state);
developer8051e042022-04-08 13:26:36 +08003944
3945 pr_info("[%s] mtk_hw_init starts !\n", __func__);
3946 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
3947 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerfd40db22021-04-29 10:08:25 +08003948
3949 /* restart DMA and enable IRQs */
3950 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer6bb3f3a2022-11-22 09:59:14 +08003951 if (!test_bit(i, &restart) || !eth->netdev[i])
developerfd40db22021-04-29 10:08:25 +08003952 continue;
3953 err = mtk_open(eth->netdev[i]);
3954 if (err) {
3955 netif_alert(eth, ifup, eth->netdev[i],
3956 "Driver up/down cycle failed, closing device.\n");
3957 dev_close(eth->netdev[i]);
3958 }
3959 }
3960
developer8051e042022-04-08 13:26:36 +08003961 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer6bb3f3a2022-11-22 09:59:14 +08003962 if (!eth->netdev[i])
3963 continue;
developer37482a42022-12-26 13:31:13 +08003964 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
3965 pr_info("send MTK_FE_START_TRAFFIC event\n");
3966 call_netdevice_notifiers(MTK_FE_START_TRAFFIC,
3967 eth->netdev[i]);
3968 } else {
3969 pr_info("send MTK_FE_RESET_DONE event\n");
3970 call_netdevice_notifiers(MTK_FE_RESET_DONE,
3971 eth->netdev[i]);
developer8051e042022-04-08 13:26:36 +08003972 }
developer37482a42022-12-26 13:31:13 +08003973 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE,
3974 eth->netdev[i]);
developer6bb3f3a2022-11-22 09:59:14 +08003975 break;
3976 }
developer8051e042022-04-08 13:26:36 +08003977
3978 atomic_dec(&reset_lock);
developer8051e042022-04-08 13:26:36 +08003979
3980 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
3981 eth->mtk_dma_monitor_timer.expires = jiffies;
3982 add_timer(&eth->mtk_dma_monitor_timer);
developer37482a42022-12-26 13:31:13 +08003983
3984 mtk_phy_config(eth, 1);
3985 mtk_reset_flag = 0;
developerfd40db22021-04-29 10:08:25 +08003986 clear_bit_unlock(MTK_RESETTING, &eth->state);
3987
3988 rtnl_unlock();
3989}
3990
3991static int mtk_free_dev(struct mtk_eth *eth)
3992{
3993 int i;
3994
3995 for (i = 0; i < MTK_MAC_COUNT; i++) {
3996 if (!eth->netdev[i])
3997 continue;
3998 free_netdev(eth->netdev[i]);
3999 }
4000
4001 return 0;
4002}
4003
4004static int mtk_unreg_dev(struct mtk_eth *eth)
4005{
4006 int i;
4007
4008 for (i = 0; i < MTK_MAC_COUNT; i++) {
4009 if (!eth->netdev[i])
4010 continue;
4011 unregister_netdev(eth->netdev[i]);
4012 }
4013
4014 return 0;
4015}
4016
4017static int mtk_cleanup(struct mtk_eth *eth)
4018{
4019 mtk_unreg_dev(eth);
4020 mtk_free_dev(eth);
4021 cancel_work_sync(&eth->pending_work);
4022
4023 return 0;
4024}
4025
4026static int mtk_get_link_ksettings(struct net_device *ndev,
4027 struct ethtool_link_ksettings *cmd)
4028{
4029 struct mtk_mac *mac = netdev_priv(ndev);
4030
4031 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4032 return -EBUSY;
4033
4034 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4035}
4036
4037static int mtk_set_link_ksettings(struct net_device *ndev,
4038 const struct ethtool_link_ksettings *cmd)
4039{
4040 struct mtk_mac *mac = netdev_priv(ndev);
4041
4042 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4043 return -EBUSY;
4044
4045 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4046}
4047
4048static void mtk_get_drvinfo(struct net_device *dev,
4049 struct ethtool_drvinfo *info)
4050{
4051 struct mtk_mac *mac = netdev_priv(dev);
4052
4053 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4054 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4055 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4056}
4057
4058static u32 mtk_get_msglevel(struct net_device *dev)
4059{
4060 struct mtk_mac *mac = netdev_priv(dev);
4061
4062 return mac->hw->msg_enable;
4063}
4064
4065static void mtk_set_msglevel(struct net_device *dev, u32 value)
4066{
4067 struct mtk_mac *mac = netdev_priv(dev);
4068
4069 mac->hw->msg_enable = value;
4070}
4071
4072static int mtk_nway_reset(struct net_device *dev)
4073{
4074 struct mtk_mac *mac = netdev_priv(dev);
4075
4076 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4077 return -EBUSY;
4078
4079 if (!mac->phylink)
4080 return -ENOTSUPP;
4081
4082 return phylink_ethtool_nway_reset(mac->phylink);
4083}
4084
4085static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4086{
4087 int i;
4088
4089 switch (stringset) {
4090 case ETH_SS_STATS:
4091 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4092 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4093 data += ETH_GSTRING_LEN;
4094 }
4095 break;
4096 }
4097}
4098
4099static int mtk_get_sset_count(struct net_device *dev, int sset)
4100{
4101 switch (sset) {
4102 case ETH_SS_STATS:
4103 return ARRAY_SIZE(mtk_ethtool_stats);
4104 default:
4105 return -EOPNOTSUPP;
4106 }
4107}
4108
4109static void mtk_get_ethtool_stats(struct net_device *dev,
4110 struct ethtool_stats *stats, u64 *data)
4111{
4112 struct mtk_mac *mac = netdev_priv(dev);
4113 struct mtk_hw_stats *hwstats = mac->hw_stats;
4114 u64 *data_src, *data_dst;
4115 unsigned int start;
4116 int i;
4117
4118 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4119 return;
4120
4121 if (netif_running(dev) && netif_device_present(dev)) {
4122 if (spin_trylock_bh(&hwstats->stats_lock)) {
4123 mtk_stats_update_mac(mac);
4124 spin_unlock_bh(&hwstats->stats_lock);
4125 }
4126 }
4127
4128 data_src = (u64 *)hwstats;
4129
4130 do {
4131 data_dst = data;
4132 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
4133
4134 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4135 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4136 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
4137}
4138
4139static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4140 u32 *rule_locs)
4141{
4142 int ret = -EOPNOTSUPP;
4143
4144 switch (cmd->cmd) {
4145 case ETHTOOL_GRXRINGS:
4146 if (dev->hw_features & NETIF_F_LRO) {
4147 cmd->data = MTK_MAX_RX_RING_NUM;
4148 ret = 0;
4149 }
4150 break;
4151 case ETHTOOL_GRXCLSRLCNT:
4152 if (dev->hw_features & NETIF_F_LRO) {
4153 struct mtk_mac *mac = netdev_priv(dev);
4154
4155 cmd->rule_cnt = mac->hwlro_ip_cnt;
4156 ret = 0;
4157 }
4158 break;
4159 case ETHTOOL_GRXCLSRULE:
4160 if (dev->hw_features & NETIF_F_LRO)
4161 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4162 break;
4163 case ETHTOOL_GRXCLSRLALL:
4164 if (dev->hw_features & NETIF_F_LRO)
4165 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4166 rule_locs);
4167 break;
4168 default:
4169 break;
4170 }
4171
4172 return ret;
4173}
4174
4175static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4176{
4177 int ret = -EOPNOTSUPP;
4178
4179 switch (cmd->cmd) {
4180 case ETHTOOL_SRXCLSRLINS:
4181 if (dev->hw_features & NETIF_F_LRO)
4182 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4183 break;
4184 case ETHTOOL_SRXCLSRLDEL:
4185 if (dev->hw_features & NETIF_F_LRO)
4186 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4187 break;
4188 default:
4189 break;
4190 }
4191
4192 return ret;
4193}
4194
developer6c5cbb52022-08-12 11:37:45 +08004195static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4196{
4197 struct mtk_mac *mac = netdev_priv(dev);
developerf2823bb2022-12-29 18:20:14 +08004198 struct mtk_eth *eth = mac->hw;
4199 u32 val;
4200
4201 pause->autoneg = 0;
4202
4203 if (mac->type == MTK_GDM_TYPE) {
4204 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
4205
4206 pause->rx_pause = !!(val & MAC_MCR_FORCE_RX_FC);
4207 pause->tx_pause = !!(val & MAC_MCR_FORCE_TX_FC);
4208 } else if (mac->type == MTK_XGDM_TYPE) {
4209 val = mtk_r32(eth, MTK_XMAC_MCR(mac->id));
developer6c5cbb52022-08-12 11:37:45 +08004210
developerf2823bb2022-12-29 18:20:14 +08004211 pause->rx_pause = !!(val & XMAC_MCR_FORCE_RX_FC);
4212 pause->tx_pause = !!(val & XMAC_MCR_FORCE_TX_FC);
4213 }
developer6c5cbb52022-08-12 11:37:45 +08004214}
4215
4216static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4217{
4218 struct mtk_mac *mac = netdev_priv(dev);
4219
4220 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4221}
4222
developer9b725932022-11-24 16:25:56 +08004223static int mtk_get_eee(struct net_device *dev, struct ethtool_eee *eee)
4224{
4225 struct mtk_mac *mac = netdev_priv(dev);
4226 struct mtk_eth *eth = mac->hw;
4227 u32 val;
4228
4229 if (mac->type == MTK_GDM_TYPE) {
4230 val = mtk_r32(eth, MTK_MAC_EEE(mac->id));
4231
4232 eee->tx_lpi_enabled = mac->tx_lpi_enabled;
4233 eee->tx_lpi_timer = FIELD_GET(MAC_EEE_LPI_TXIDLE_THD, val);
4234 }
4235
4236 return phylink_ethtool_get_eee(mac->phylink, eee);
4237}
4238
4239static int mtk_set_eee(struct net_device *dev, struct ethtool_eee *eee)
4240{
4241 struct mtk_mac *mac = netdev_priv(dev);
4242 struct mtk_eth *eth = mac->hw;
4243
4244 if (mac->type == MTK_GDM_TYPE) {
4245 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
4246 return -EINVAL;
4247
4248 mac->tx_lpi_timer = eee->tx_lpi_timer;
4249
4250 mtk_setup_eee(mac, eee->eee_enabled && eee->tx_lpi_timer);
4251 }
4252
4253 return phylink_ethtool_set_eee(mac->phylink, eee);
4254}
4255
developerfd40db22021-04-29 10:08:25 +08004256static const struct ethtool_ops mtk_ethtool_ops = {
4257 .get_link_ksettings = mtk_get_link_ksettings,
4258 .set_link_ksettings = mtk_set_link_ksettings,
4259 .get_drvinfo = mtk_get_drvinfo,
4260 .get_msglevel = mtk_get_msglevel,
4261 .set_msglevel = mtk_set_msglevel,
4262 .nway_reset = mtk_nway_reset,
4263 .get_link = ethtool_op_get_link,
4264 .get_strings = mtk_get_strings,
4265 .get_sset_count = mtk_get_sset_count,
4266 .get_ethtool_stats = mtk_get_ethtool_stats,
4267 .get_rxnfc = mtk_get_rxnfc,
4268 .set_rxnfc = mtk_set_rxnfc,
developer6c5cbb52022-08-12 11:37:45 +08004269 .get_pauseparam = mtk_get_pauseparam,
4270 .set_pauseparam = mtk_set_pauseparam,
developer9b725932022-11-24 16:25:56 +08004271 .get_eee = mtk_get_eee,
4272 .set_eee = mtk_set_eee,
developerfd40db22021-04-29 10:08:25 +08004273};
4274
4275static const struct net_device_ops mtk_netdev_ops = {
4276 .ndo_init = mtk_init,
4277 .ndo_uninit = mtk_uninit,
4278 .ndo_open = mtk_open,
4279 .ndo_stop = mtk_stop,
4280 .ndo_start_xmit = mtk_start_xmit,
4281 .ndo_set_mac_address = mtk_set_mac_address,
4282 .ndo_validate_addr = eth_validate_addr,
4283 .ndo_do_ioctl = mtk_do_ioctl,
4284 .ndo_tx_timeout = mtk_tx_timeout,
4285 .ndo_get_stats64 = mtk_get_stats64,
4286 .ndo_fix_features = mtk_fix_features,
4287 .ndo_set_features = mtk_set_features,
4288#ifdef CONFIG_NET_POLL_CONTROLLER
4289 .ndo_poll_controller = mtk_poll_controller,
4290#endif
4291};
4292
4293static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4294{
4295 const __be32 *_id = of_get_property(np, "reg", NULL);
developer30e13e72022-11-03 10:21:24 +08004296 const char *label;
developerfd40db22021-04-29 10:08:25 +08004297 struct phylink *phylink;
developer30e13e72022-11-03 10:21:24 +08004298 int mac_type, phy_mode, id, err;
developerfd40db22021-04-29 10:08:25 +08004299 struct mtk_mac *mac;
developera2613e62022-07-01 18:29:37 +08004300 struct mtk_phylink_priv *phylink_priv;
4301 struct fwnode_handle *fixed_node;
4302 struct gpio_desc *desc;
developerfd40db22021-04-29 10:08:25 +08004303
4304 if (!_id) {
4305 dev_err(eth->dev, "missing mac id\n");
4306 return -EINVAL;
4307 }
4308
4309 id = be32_to_cpup(_id);
developerfb556ca2021-10-13 10:52:09 +08004310 if (id < 0 || id >= MTK_MAC_COUNT) {
developerfd40db22021-04-29 10:08:25 +08004311 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4312 return -EINVAL;
4313 }
4314
4315 if (eth->netdev[id]) {
4316 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4317 return -EINVAL;
4318 }
4319
4320 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
4321 if (!eth->netdev[id]) {
4322 dev_err(eth->dev, "alloc_etherdev failed\n");
4323 return -ENOMEM;
4324 }
4325 mac = netdev_priv(eth->netdev[id]);
4326 eth->mac[id] = mac;
4327 mac->id = id;
4328 mac->hw = eth;
4329 mac->of_node = np;
4330
4331 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4332 mac->hwlro_ip_cnt = 0;
4333
4334 mac->hw_stats = devm_kzalloc(eth->dev,
4335 sizeof(*mac->hw_stats),
4336 GFP_KERNEL);
4337 if (!mac->hw_stats) {
4338 dev_err(eth->dev, "failed to allocate counter memory\n");
4339 err = -ENOMEM;
4340 goto free_netdev;
4341 }
4342 spin_lock_init(&mac->hw_stats->stats_lock);
4343 u64_stats_init(&mac->hw_stats->syncp);
4344 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
4345
4346 /* phylink create */
4347 phy_mode = of_get_phy_mode(np);
4348 if (phy_mode < 0) {
4349 dev_err(eth->dev, "incorrect phy-mode\n");
4350 err = -EINVAL;
4351 goto free_netdev;
4352 }
4353
4354 /* mac config is not set */
4355 mac->interface = PHY_INTERFACE_MODE_NA;
4356 mac->mode = MLO_AN_PHY;
4357 mac->speed = SPEED_UNKNOWN;
4358
developer9b725932022-11-24 16:25:56 +08004359 mac->tx_lpi_timer = 1;
4360
developerfd40db22021-04-29 10:08:25 +08004361 mac->phylink_config.dev = &eth->netdev[id]->dev;
4362 mac->phylink_config.type = PHYLINK_NETDEV;
4363
developer30e13e72022-11-03 10:21:24 +08004364 mac->type = 0;
4365 if (!of_property_read_string(np, "mac-type", &label)) {
4366 for (mac_type = 0; mac_type < MTK_GDM_TYPE_MAX; mac_type++) {
4367 if (!strcasecmp(label, gdm_type(mac_type)))
4368 break;
4369 }
4370
4371 switch (mac_type) {
4372 case 0:
4373 mac->type = MTK_GDM_TYPE;
4374 break;
4375 case 1:
4376 mac->type = MTK_XGDM_TYPE;
4377 break;
4378 default:
4379 dev_warn(eth->dev, "incorrect mac-type\n");
4380 break;
4381 };
4382 }
developer089e8852022-09-28 14:43:46 +08004383
developerfd40db22021-04-29 10:08:25 +08004384 phylink = phylink_create(&mac->phylink_config,
4385 of_fwnode_handle(mac->of_node),
4386 phy_mode, &mtk_phylink_ops);
4387 if (IS_ERR(phylink)) {
4388 err = PTR_ERR(phylink);
4389 goto free_netdev;
4390 }
4391
4392 mac->phylink = phylink;
4393
developera2613e62022-07-01 18:29:37 +08004394 fixed_node = fwnode_get_named_child_node(of_fwnode_handle(mac->of_node),
4395 "fixed-link");
4396 if (fixed_node) {
4397 desc = fwnode_get_named_gpiod(fixed_node, "link-gpio",
4398 0, GPIOD_IN, "?");
4399 if (!IS_ERR(desc)) {
4400 struct device_node *phy_np;
4401 const char *label;
4402 int irq, phyaddr;
4403
4404 phylink_priv = &mac->phylink_priv;
4405
4406 phylink_priv->desc = desc;
4407 phylink_priv->id = id;
4408 phylink_priv->link = -1;
4409
4410 irq = gpiod_to_irq(desc);
4411 if (irq > 0) {
4412 devm_request_irq(eth->dev, irq, mtk_handle_irq_fixed_link,
4413 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
4414 "ethernet:fixed link", mac);
4415 }
4416
developer8b6f2402022-11-28 13:42:34 +08004417 if (!of_property_read_string(to_of_node(fixed_node),
4418 "label", &label)) {
developer659fdeb2022-12-01 23:03:07 +08004419 if (strlen(label) < 16) {
4420 strncpy(phylink_priv->label, label,
4421 strlen(label));
4422 } else
developer8b6f2402022-11-28 13:42:34 +08004423 dev_err(eth->dev, "insufficient space for label!\n");
4424 }
developera2613e62022-07-01 18:29:37 +08004425
4426 phy_np = of_parse_phandle(to_of_node(fixed_node), "phy-handle", 0);
4427 if (phy_np) {
4428 if (!of_property_read_u32(phy_np, "reg", &phyaddr))
4429 phylink_priv->phyaddr = phyaddr;
4430 }
4431 }
4432 fwnode_handle_put(fixed_node);
4433 }
4434
developerfd40db22021-04-29 10:08:25 +08004435 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4436 eth->netdev[id]->watchdog_timeo = 5 * HZ;
4437 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4438 eth->netdev[id]->base_addr = (unsigned long)eth->base;
4439
4440 eth->netdev[id]->hw_features = eth->soc->hw_features;
4441 if (eth->hwlro)
4442 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4443
4444 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4445 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
4446 eth->netdev[id]->features |= eth->soc->hw_features;
4447 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4448
4449 eth->netdev[id]->irq = eth->irq[0];
4450 eth->netdev[id]->dev.of_node = np;
4451
4452 return 0;
4453
4454free_netdev:
4455 free_netdev(eth->netdev[id]);
4456 return err;
4457}
4458
developer3f28d382023-03-07 16:06:30 +08004459void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4460{
4461 struct net_device *dev, *tmp;
4462 LIST_HEAD(dev_list);
4463 int i;
4464
4465 rtnl_lock();
4466
4467 for (i = 0; i < MTK_MAC_COUNT; i++) {
4468 dev = eth->netdev[i];
4469
4470 if (!dev || !(dev->flags & IFF_UP))
4471 continue;
4472
4473 list_add_tail(&dev->close_list, &dev_list);
4474 }
4475
4476 dev_close_many(&dev_list, false);
4477
4478 eth->dma_dev = dma_dev;
4479
4480 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4481 list_del_init(&dev->close_list);
4482 dev_open(dev, NULL);
4483 }
4484
4485 rtnl_unlock();
4486}
4487
developerfd40db22021-04-29 10:08:25 +08004488static int mtk_probe(struct platform_device *pdev)
4489{
4490 struct device_node *mac_np;
4491 struct mtk_eth *eth;
4492 int err, i;
4493
4494 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4495 if (!eth)
4496 return -ENOMEM;
4497
4498 eth->soc = of_device_get_match_data(&pdev->dev);
4499
4500 eth->dev = &pdev->dev;
developer3f28d382023-03-07 16:06:30 +08004501 eth->dma_dev = &pdev->dev;
developerfd40db22021-04-29 10:08:25 +08004502 eth->base = devm_platform_ioremap_resource(pdev, 0);
4503 if (IS_ERR(eth->base))
4504 return PTR_ERR(eth->base);
4505
developer089e8852022-09-28 14:43:46 +08004506 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
4507 eth->sram_base = devm_platform_ioremap_resource(pdev, 1);
4508 if (IS_ERR(eth->sram_base))
4509 return PTR_ERR(eth->sram_base);
4510 }
4511
developerfd40db22021-04-29 10:08:25 +08004512 if(eth->soc->has_sram) {
4513 struct resource *res;
4514 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
developer4c32b7a2021-11-13 16:46:43 +08004515 if (unlikely(!res))
4516 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +08004517 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
4518 }
4519
developer68ce74f2023-01-03 16:11:57 +08004520 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developerfd40db22021-04-29 10:08:25 +08004521 eth->ip_align = NET_IP_ALIGN;
developerfd40db22021-04-29 10:08:25 +08004522
developer089e8852022-09-28 14:43:46 +08004523 if (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) {
4524 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
4525 if (!err) {
4526 err = dma_set_coherent_mask(&pdev->dev,
4527 DMA_BIT_MASK(36));
4528 if (err) {
4529 dev_err(&pdev->dev, "Wrong DMA config\n");
4530 return -EINVAL;
4531 }
4532 }
4533 }
4534
developerfd40db22021-04-29 10:08:25 +08004535 spin_lock_init(&eth->page_lock);
4536 spin_lock_init(&eth->tx_irq_lock);
4537 spin_lock_init(&eth->rx_irq_lock);
developerd82e8372022-02-09 15:00:09 +08004538 spin_lock_init(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +08004539
4540 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4541 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4542 "mediatek,ethsys");
4543 if (IS_ERR(eth->ethsys)) {
4544 dev_err(&pdev->dev, "no ethsys regmap found\n");
4545 return PTR_ERR(eth->ethsys);
4546 }
4547 }
4548
4549 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4550 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4551 "mediatek,infracfg");
4552 if (IS_ERR(eth->infra)) {
4553 dev_err(&pdev->dev, "no infracfg regmap found\n");
4554 return PTR_ERR(eth->infra);
4555 }
4556 }
4557
developer3f28d382023-03-07 16:06:30 +08004558 if (of_dma_is_coherent(pdev->dev.of_node)) {
4559 struct regmap *cci;
4560
4561 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4562 "cci-control-port");
4563 /* enable CPU/bus coherency */
4564 if (!IS_ERR(cci))
4565 regmap_write(cci, 0, 3);
4566 }
4567
developerfd40db22021-04-29 10:08:25 +08004568 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
developer089e8852022-09-28 14:43:46 +08004569 eth->xgmii = devm_kzalloc(eth->dev, sizeof(*eth->xgmii),
developerfd40db22021-04-29 10:08:25 +08004570 GFP_KERNEL);
developer089e8852022-09-28 14:43:46 +08004571 if (!eth->xgmii)
developerfd40db22021-04-29 10:08:25 +08004572 return -ENOMEM;
4573
developer089e8852022-09-28 14:43:46 +08004574 eth->xgmii->eth = eth;
4575 err = mtk_sgmii_init(eth->xgmii, pdev->dev.of_node,
developerfd40db22021-04-29 10:08:25 +08004576 eth->soc->ana_rgc3);
4577
developer089e8852022-09-28 14:43:46 +08004578 if (err)
4579 return err;
4580 }
4581
4582 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
4583 err = mtk_usxgmii_init(eth->xgmii, pdev->dev.of_node);
4584 if (err)
4585 return err;
4586
4587 err = mtk_xfi_pextp_init(eth->xgmii, pdev->dev.of_node);
4588 if (err)
4589 return err;
4590
4591 err = mtk_xfi_pll_init(eth->xgmii, pdev->dev.of_node);
4592 if (err)
4593 return err;
4594
4595 err = mtk_toprgu_init(eth, pdev->dev.of_node);
developerfd40db22021-04-29 10:08:25 +08004596 if (err)
4597 return err;
4598 }
4599
4600 if (eth->soc->required_pctl) {
4601 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4602 "mediatek,pctl");
4603 if (IS_ERR(eth->pctl)) {
4604 dev_err(&pdev->dev, "no pctl regmap found\n");
4605 return PTR_ERR(eth->pctl);
4606 }
4607 }
4608
developer18f46a82021-07-20 21:08:21 +08004609 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08004610 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4611 eth->irq[i] = eth->irq[0];
4612 else
4613 eth->irq[i] = platform_get_irq(pdev, i);
4614 if (eth->irq[i] < 0) {
4615 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4616 return -ENXIO;
4617 }
4618 }
4619
4620 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4621 eth->clks[i] = devm_clk_get(eth->dev,
4622 mtk_clks_source_name[i]);
4623 if (IS_ERR(eth->clks[i])) {
4624 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
4625 return -EPROBE_DEFER;
4626 if (eth->soc->required_clks & BIT(i)) {
4627 dev_err(&pdev->dev, "clock %s not found\n",
4628 mtk_clks_source_name[i]);
4629 return -EINVAL;
4630 }
4631 eth->clks[i] = NULL;
4632 }
4633 }
4634
4635 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4636 INIT_WORK(&eth->pending_work, mtk_pending_work);
4637
developer8051e042022-04-08 13:26:36 +08004638 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerfd40db22021-04-29 10:08:25 +08004639 if (err)
4640 return err;
4641
4642 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4643
4644 for_each_child_of_node(pdev->dev.of_node, mac_np) {
4645 if (!of_device_is_compatible(mac_np,
4646 "mediatek,eth-mac"))
4647 continue;
4648
4649 if (!of_device_is_available(mac_np))
4650 continue;
4651
4652 err = mtk_add_mac(eth, mac_np);
4653 if (err) {
4654 of_node_put(mac_np);
4655 goto err_deinit_hw;
4656 }
4657 }
4658
developer18f46a82021-07-20 21:08:21 +08004659 err = mtk_napi_init(eth);
4660 if (err)
4661 goto err_free_dev;
4662
developerfd40db22021-04-29 10:08:25 +08004663 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4664 err = devm_request_irq(eth->dev, eth->irq[0],
4665 mtk_handle_irq, 0,
4666 dev_name(eth->dev), eth);
4667 } else {
4668 err = devm_request_irq(eth->dev, eth->irq[1],
4669 mtk_handle_irq_tx, 0,
4670 dev_name(eth->dev), eth);
4671 if (err)
4672 goto err_free_dev;
4673
4674 err = devm_request_irq(eth->dev, eth->irq[2],
4675 mtk_handle_irq_rx, 0,
developer18f46a82021-07-20 21:08:21 +08004676 dev_name(eth->dev), &eth->rx_napi[0]);
4677 if (err)
4678 goto err_free_dev;
4679
developer793f7b42022-05-20 13:54:51 +08004680 if (MTK_MAX_IRQ_NUM > 3) {
4681 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4682 for (i = 1; i < MTK_RX_NAPI_NUM; i++) {
4683 err = devm_request_irq(eth->dev,
4684 eth->irq[2 + i],
4685 mtk_handle_irq_rx, 0,
4686 dev_name(eth->dev),
4687 &eth->rx_napi[i]);
4688 if (err)
4689 goto err_free_dev;
4690 }
4691 } else {
4692 err = devm_request_irq(eth->dev, eth->irq[3],
4693 mtk_handle_fe_irq, 0,
4694 dev_name(eth->dev), eth);
developer18f46a82021-07-20 21:08:21 +08004695 if (err)
4696 goto err_free_dev;
4697 }
4698 }
developerfd40db22021-04-29 10:08:25 +08004699 }
developer8051e042022-04-08 13:26:36 +08004700
developerfd40db22021-04-29 10:08:25 +08004701 if (err)
4702 goto err_free_dev;
4703
4704 /* No MT7628/88 support yet */
4705 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4706 err = mtk_mdio_init(eth);
4707 if (err)
4708 goto err_free_dev;
4709 }
4710
4711 for (i = 0; i < MTK_MAX_DEVS; i++) {
4712 if (!eth->netdev[i])
4713 continue;
4714
4715 err = register_netdev(eth->netdev[i]);
4716 if (err) {
4717 dev_err(eth->dev, "error bringing up device\n");
4718 goto err_deinit_mdio;
4719 } else
4720 netif_info(eth, probe, eth->netdev[i],
4721 "mediatek frame engine at 0x%08lx, irq %d\n",
4722 eth->netdev[i]->base_addr, eth->irq[0]);
4723 }
4724
4725 /* we run 2 devices on the same DMA ring so we need a dummy device
4726 * for NAPI to work
4727 */
4728 init_dummy_netdev(&eth->dummy_dev);
4729 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
4730 MTK_NAPI_WEIGHT);
developer18f46a82021-07-20 21:08:21 +08004731 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
developerfd40db22021-04-29 10:08:25 +08004732 MTK_NAPI_WEIGHT);
4733
developer18f46a82021-07-20 21:08:21 +08004734 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4735 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
4736 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[i].napi,
4737 mtk_napi_rx, MTK_NAPI_WEIGHT);
4738 }
4739
developer75e4dad2022-11-16 15:17:14 +08004740#if defined(CONFIG_XFRM_OFFLOAD)
4741 mtk_ipsec_offload_init(eth);
4742#endif
developerfd40db22021-04-29 10:08:25 +08004743 mtketh_debugfs_init(eth);
4744 debug_proc_init(eth);
4745
4746 platform_set_drvdata(pdev, eth);
4747
developer8051e042022-04-08 13:26:36 +08004748 register_netdevice_notifier(&mtk_eth_netdevice_nb);
developer37482a42022-12-26 13:31:13 +08004749#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer8051e042022-04-08 13:26:36 +08004750 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
4751 eth->mtk_dma_monitor_timer.expires = jiffies;
4752 add_timer(&eth->mtk_dma_monitor_timer);
developer793f7b42022-05-20 13:54:51 +08004753#endif
developer8051e042022-04-08 13:26:36 +08004754
developerfd40db22021-04-29 10:08:25 +08004755 return 0;
4756
4757err_deinit_mdio:
4758 mtk_mdio_cleanup(eth);
4759err_free_dev:
4760 mtk_free_dev(eth);
4761err_deinit_hw:
4762 mtk_hw_deinit(eth);
4763
4764 return err;
4765}
4766
4767static int mtk_remove(struct platform_device *pdev)
4768{
4769 struct mtk_eth *eth = platform_get_drvdata(pdev);
4770 struct mtk_mac *mac;
4771 int i;
4772
4773 /* stop all devices to make sure that dma is properly shut down */
4774 for (i = 0; i < MTK_MAC_COUNT; i++) {
4775 if (!eth->netdev[i])
4776 continue;
4777 mtk_stop(eth->netdev[i]);
4778 mac = netdev_priv(eth->netdev[i]);
4779 phylink_disconnect_phy(mac->phylink);
4780 }
4781
4782 mtk_hw_deinit(eth);
4783
4784 netif_napi_del(&eth->tx_napi);
developer18f46a82021-07-20 21:08:21 +08004785 netif_napi_del(&eth->rx_napi[0].napi);
4786
4787 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4788 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
4789 netif_napi_del(&eth->rx_napi[i].napi);
4790 }
4791
developerfd40db22021-04-29 10:08:25 +08004792 mtk_cleanup(eth);
4793 mtk_mdio_cleanup(eth);
developer8051e042022-04-08 13:26:36 +08004794 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
4795 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerfd40db22021-04-29 10:08:25 +08004796
4797 return 0;
4798}
4799
4800static const struct mtk_soc_data mt2701_data = {
developer68ce74f2023-01-03 16:11:57 +08004801 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08004802 .caps = MT7623_CAPS | MTK_HWLRO,
4803 .hw_features = MTK_HW_FEATURES,
4804 .required_clks = MT7623_CLKS_BITMAP,
4805 .required_pctl = true,
4806 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004807 .txrx = {
4808 .txd_size = sizeof(struct mtk_tx_dma),
4809 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08004810 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08004811 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4812 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4813 },
developerfd40db22021-04-29 10:08:25 +08004814};
4815
4816static const struct mtk_soc_data mt7621_data = {
developer68ce74f2023-01-03 16:11:57 +08004817 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08004818 .caps = MT7621_CAPS,
4819 .hw_features = MTK_HW_FEATURES,
4820 .required_clks = MT7621_CLKS_BITMAP,
4821 .required_pctl = false,
4822 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004823 .txrx = {
4824 .txd_size = sizeof(struct mtk_tx_dma),
developer68ce74f2023-01-03 16:11:57 +08004825 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08004826 .rxd_size = sizeof(struct mtk_rx_dma),
4827 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4828 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4829 },
developerfd40db22021-04-29 10:08:25 +08004830};
4831
4832static const struct mtk_soc_data mt7622_data = {
developer68ce74f2023-01-03 16:11:57 +08004833 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08004834 .ana_rgc3 = 0x2028,
4835 .caps = MT7622_CAPS | MTK_HWLRO,
4836 .hw_features = MTK_HW_FEATURES,
4837 .required_clks = MT7622_CLKS_BITMAP,
4838 .required_pctl = false,
4839 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004840 .txrx = {
4841 .txd_size = sizeof(struct mtk_tx_dma),
4842 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08004843 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08004844 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4845 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4846 },
developerfd40db22021-04-29 10:08:25 +08004847};
4848
4849static const struct mtk_soc_data mt7623_data = {
developer68ce74f2023-01-03 16:11:57 +08004850 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08004851 .caps = MT7623_CAPS | MTK_HWLRO,
4852 .hw_features = MTK_HW_FEATURES,
4853 .required_clks = MT7623_CLKS_BITMAP,
4854 .required_pctl = true,
4855 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004856 .txrx = {
4857 .txd_size = sizeof(struct mtk_tx_dma),
4858 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08004859 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08004860 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4861 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4862 },
developerfd40db22021-04-29 10:08:25 +08004863};
4864
4865static const struct mtk_soc_data mt7629_data = {
developer68ce74f2023-01-03 16:11:57 +08004866 .reg_map = &mtk_reg_map,
developerfd40db22021-04-29 10:08:25 +08004867 .ana_rgc3 = 0x128,
4868 .caps = MT7629_CAPS | MTK_HWLRO,
4869 .hw_features = MTK_HW_FEATURES,
4870 .required_clks = MT7629_CLKS_BITMAP,
4871 .required_pctl = false,
4872 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004873 .txrx = {
4874 .txd_size = sizeof(struct mtk_tx_dma),
4875 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08004876 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developere9356982022-07-04 09:03:20 +08004877 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4878 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4879 },
developerfd40db22021-04-29 10:08:25 +08004880};
4881
4882static const struct mtk_soc_data mt7986_data = {
developer68ce74f2023-01-03 16:11:57 +08004883 .reg_map = &mt7986_reg_map,
developerfd40db22021-04-29 10:08:25 +08004884 .ana_rgc3 = 0x128,
4885 .caps = MT7986_CAPS,
developercba5f4e2021-05-06 14:01:53 +08004886 .hw_features = MTK_HW_FEATURES,
developerfd40db22021-04-29 10:08:25 +08004887 .required_clks = MT7986_CLKS_BITMAP,
4888 .required_pctl = false,
4889 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004890 .txrx = {
4891 .txd_size = sizeof(struct mtk_tx_dma_v2),
4892 .rxd_size = sizeof(struct mtk_rx_dma_v2),
developer68ce74f2023-01-03 16:11:57 +08004893 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developere9356982022-07-04 09:03:20 +08004894 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4895 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4896 },
developerfd40db22021-04-29 10:08:25 +08004897};
4898
developer255bba22021-07-27 15:16:33 +08004899static const struct mtk_soc_data mt7981_data = {
developer68ce74f2023-01-03 16:11:57 +08004900 .reg_map = &mt7986_reg_map,
developer255bba22021-07-27 15:16:33 +08004901 .ana_rgc3 = 0x128,
4902 .caps = MT7981_CAPS,
developer7377b0b2021-11-18 14:54:47 +08004903 .hw_features = MTK_HW_FEATURES,
developer255bba22021-07-27 15:16:33 +08004904 .required_clks = MT7981_CLKS_BITMAP,
4905 .required_pctl = false,
4906 .has_sram = true,
developere9356982022-07-04 09:03:20 +08004907 .txrx = {
4908 .txd_size = sizeof(struct mtk_tx_dma_v2),
4909 .rxd_size = sizeof(struct mtk_rx_dma_v2),
developer68ce74f2023-01-03 16:11:57 +08004910 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developere9356982022-07-04 09:03:20 +08004911 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4912 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4913 },
developer255bba22021-07-27 15:16:33 +08004914};
4915
developer089e8852022-09-28 14:43:46 +08004916static const struct mtk_soc_data mt7988_data = {
developer68ce74f2023-01-03 16:11:57 +08004917 .reg_map = &mt7988_reg_map,
developer089e8852022-09-28 14:43:46 +08004918 .ana_rgc3 = 0x128,
4919 .caps = MT7988_CAPS,
4920 .hw_features = MTK_HW_FEATURES,
4921 .required_clks = MT7988_CLKS_BITMAP,
4922 .required_pctl = false,
4923 .has_sram = true,
4924 .txrx = {
4925 .txd_size = sizeof(struct mtk_tx_dma_v2),
4926 .rxd_size = sizeof(struct mtk_rx_dma_v2),
developer68ce74f2023-01-03 16:11:57 +08004927 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developer089e8852022-09-28 14:43:46 +08004928 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4929 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
4930 },
4931};
4932
developerfd40db22021-04-29 10:08:25 +08004933static const struct mtk_soc_data rt5350_data = {
developer68ce74f2023-01-03 16:11:57 +08004934 .reg_map = &mt7628_reg_map,
developerfd40db22021-04-29 10:08:25 +08004935 .caps = MT7628_CAPS,
4936 .hw_features = MTK_HW_FEATURES_MT7628,
4937 .required_clks = MT7628_CLKS_BITMAP,
4938 .required_pctl = false,
4939 .has_sram = false,
developere9356982022-07-04 09:03:20 +08004940 .txrx = {
4941 .txd_size = sizeof(struct mtk_tx_dma),
4942 .rxd_size = sizeof(struct mtk_rx_dma),
developer68ce74f2023-01-03 16:11:57 +08004943 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
developere9356982022-07-04 09:03:20 +08004944 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4945 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
4946 },
developerfd40db22021-04-29 10:08:25 +08004947};
4948
4949const struct of_device_id of_mtk_match[] = {
4950 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4951 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4952 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4953 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4954 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4955 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
developer255bba22021-07-27 15:16:33 +08004956 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developer089e8852022-09-28 14:43:46 +08004957 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data},
developerfd40db22021-04-29 10:08:25 +08004958 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4959 {},
4960};
4961MODULE_DEVICE_TABLE(of, of_mtk_match);
4962
4963static struct platform_driver mtk_driver = {
4964 .probe = mtk_probe,
4965 .remove = mtk_remove,
4966 .driver = {
4967 .name = "mtk_soc_eth",
4968 .of_match_table = of_mtk_match,
4969 },
4970};
4971
4972module_platform_driver(mtk_driver);
4973
4974MODULE_LICENSE("GPL");
4975MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4976MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");