blob: bd4aac7af31e4c3c746905a531fa568bb65b154a [file] [log] [blame]
developerec4ebe42022-04-12 11:17:45 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
developerb35f4fa2023-03-14 13:24:47 +080012#include <linux/of_address.h>
developerec4ebe42022-04-12 11:17:45 +080013#include <linux/mfd/syscon.h>
14#include <linux/regmap.h>
15#include <linux/clk.h>
16#include <linux/pm_runtime.h>
17#include <linux/if_vlan.h>
18#include <linux/reset.h>
19#include <linux/tcp.h>
20#include <linux/interrupt.h>
21#include <linux/pinctrl/devinfo.h>
22#include <linux/phylink.h>
developer9c038292022-07-06 15:03:09 +080023#include <linux/gpio/consumer.h>
developerec4ebe42022-04-12 11:17:45 +080024#include <net/dsa.h>
25
26#include "mtk_eth_soc.h"
27#include "mtk_eth_dbg.h"
developer3d2dd692022-04-19 12:53:29 +080028#include "mtk_eth_reset.h"
developerec4ebe42022-04-12 11:17:45 +080029
30#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
31#include "mtk_hnat/nf_hnat_mtk.h"
32#endif
33
34static int mtk_msg_level = -1;
developer3d2dd692022-04-19 12:53:29 +080035atomic_t reset_lock = ATOMIC_INIT(0);
36atomic_t force = ATOMIC_INIT(0);
37
developerec4ebe42022-04-12 11:17:45 +080038module_param_named(msg_level, mtk_msg_level, int, 0);
39MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer3d2dd692022-04-19 12:53:29 +080040DECLARE_COMPLETION(wait_ser_done);
developerec4ebe42022-04-12 11:17:45 +080041
42#define MTK_ETHTOOL_STAT(x) { #x, \
43 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
44
developerb35f4fa2023-03-14 13:24:47 +080045static const struct mtk_reg_map mtk_reg_map = {
46 .tx_irq_mask = 0x1a1c,
47 .tx_irq_status = 0x1a18,
48 .pdma = {
developer722ab5f2024-02-22 11:01:46 +080049 .tx_ptr = 0x0800,
50 .tx_cnt_cfg = 0x0804,
51 .pctx_ptr = 0x0808,
52 .pdtx_ptr = 0x080c,
developerb35f4fa2023-03-14 13:24:47 +080053 .rx_ptr = 0x0900,
54 .rx_cnt_cfg = 0x0904,
55 .pcrx_ptr = 0x0908,
56 .glo_cfg = 0x0a04,
57 .rst_idx = 0x0a08,
58 .delay_irq = 0x0a0c,
59 .irq_status = 0x0a20,
60 .irq_mask = 0x0a28,
61 .int_grp = 0x0a50,
62 .int_grp2 = 0x0a54,
63 },
64 .qdma = {
65 .qtx_cfg = 0x1800,
66 .qtx_sch = 0x1804,
67 .rx_ptr = 0x1900,
68 .rx_cnt_cfg = 0x1904,
69 .qcrx_ptr = 0x1908,
70 .glo_cfg = 0x1a04,
71 .rst_idx = 0x1a08,
72 .delay_irq = 0x1a0c,
73 .fc_th = 0x1a10,
74 .tx_sch_rate = 0x1a14,
75 .int_grp = 0x1a20,
76 .int_grp2 = 0x1a24,
77 .hred2 = 0x1a44,
78 .ctx_ptr = 0x1b00,
79 .dtx_ptr = 0x1b04,
80 .crx_ptr = 0x1b10,
81 .drx_ptr = 0x1b14,
82 .fq_head = 0x1b20,
83 .fq_tail = 0x1b24,
84 .fq_count = 0x1b28,
85 .fq_blen = 0x1b2c,
86 },
87 .gdm1_cnt = 0x2400,
88 .gdma_to_ppe0 = 0x4444,
89 .ppe_base = {
90 [0] = 0x0c00,
91 },
92 .wdma_base = {
93 [0] = 0x2800,
94 [1] = 0x2c00,
95 },
96};
97
98static const struct mtk_reg_map mt7628_reg_map = {
99 .tx_irq_mask = 0x0a28,
100 .tx_irq_status = 0x0a20,
101 .pdma = {
developer722ab5f2024-02-22 11:01:46 +0800102 .tx_ptr = 0x0800,
103 .tx_cnt_cfg = 0x0804,
104 .pctx_ptr = 0x0808,
105 .pdtx_ptr = 0x080c,
developerb35f4fa2023-03-14 13:24:47 +0800106 .rx_ptr = 0x0900,
107 .rx_cnt_cfg = 0x0904,
108 .pcrx_ptr = 0x0908,
109 .glo_cfg = 0x0a04,
110 .rst_idx = 0x0a08,
111 .delay_irq = 0x0a0c,
112 .irq_status = 0x0a20,
113 .irq_mask = 0x0a28,
114 .int_grp = 0x0a50,
115 .int_grp2 = 0x0a54,
116 },
117};
118
119static const struct mtk_reg_map mt7986_reg_map = {
120 .tx_irq_mask = 0x461c,
121 .tx_irq_status = 0x4618,
122 .pdma = {
developer722ab5f2024-02-22 11:01:46 +0800123 .tx_ptr = 0x4000,
124 .tx_cnt_cfg = 0x4004,
125 .pctx_ptr = 0x4008,
126 .pdtx_ptr = 0x400c,
developerb35f4fa2023-03-14 13:24:47 +0800127 .rx_ptr = 0x4100,
128 .rx_cnt_cfg = 0x4104,
129 .pcrx_ptr = 0x4108,
130 .glo_cfg = 0x4204,
131 .rst_idx = 0x4208,
132 .delay_irq = 0x420c,
133 .irq_status = 0x4220,
134 .irq_mask = 0x4228,
135 .int_grp = 0x4250,
136 .int_grp2 = 0x4254,
137 },
138 .qdma = {
139 .qtx_cfg = 0x4400,
140 .qtx_sch = 0x4404,
141 .rx_ptr = 0x4500,
142 .rx_cnt_cfg = 0x4504,
143 .qcrx_ptr = 0x4508,
144 .glo_cfg = 0x4604,
145 .rst_idx = 0x4608,
146 .delay_irq = 0x460c,
147 .fc_th = 0x4610,
148 .int_grp = 0x4620,
149 .int_grp2 = 0x4624,
150 .hred2 = 0x4644,
151 .ctx_ptr = 0x4700,
152 .dtx_ptr = 0x4704,
153 .crx_ptr = 0x4710,
154 .drx_ptr = 0x4714,
155 .fq_head = 0x4720,
156 .fq_tail = 0x4724,
157 .fq_count = 0x4728,
158 .fq_blen = 0x472c,
159 .tx_sch_rate = 0x4798,
160 },
161 .gdm1_cnt = 0x1c00,
162 .gdma_to_ppe0 = 0x3333,
163 .ppe_base = {
164 [0] = 0x2000,
165 [1] = 0x2400,
166 },
167 .wdma_base = {
168 [0] = 0x4800,
169 [1] = 0x4c00,
170 },
171};
172
173static const struct mtk_reg_map mt7988_reg_map = {
174 .tx_irq_mask = 0x461c,
175 .tx_irq_status = 0x4618,
176 .pdma = {
developer722ab5f2024-02-22 11:01:46 +0800177 .tx_ptr = 0x6800,
178 .tx_cnt_cfg = 0x6804,
179 .pctx_ptr = 0x6808,
180 .pdtx_ptr = 0x680c,
developerb35f4fa2023-03-14 13:24:47 +0800181 .rx_ptr = 0x6900,
182 .rx_cnt_cfg = 0x6904,
183 .pcrx_ptr = 0x6908,
184 .glo_cfg = 0x6a04,
185 .rst_idx = 0x6a08,
186 .delay_irq = 0x6a0c,
187 .irq_status = 0x6a20,
188 .irq_mask = 0x6a28,
189 .int_grp = 0x6a50,
190 .int_grp2 = 0x6a54,
191 },
192 .qdma = {
193 .qtx_cfg = 0x4400,
194 .qtx_sch = 0x4404,
195 .rx_ptr = 0x4500,
196 .rx_cnt_cfg = 0x4504,
197 .qcrx_ptr = 0x4508,
198 .glo_cfg = 0x4604,
199 .rst_idx = 0x4608,
200 .delay_irq = 0x460c,
201 .fc_th = 0x4610,
202 .int_grp = 0x4620,
203 .int_grp2 = 0x4624,
204 .hred2 = 0x4644,
205 .ctx_ptr = 0x4700,
206 .dtx_ptr = 0x4704,
207 .crx_ptr = 0x4710,
208 .drx_ptr = 0x4714,
209 .fq_head = 0x4720,
210 .fq_tail = 0x4724,
211 .fq_count = 0x4728,
212 .fq_blen = 0x472c,
213 .tx_sch_rate = 0x4798,
214 },
215 .gdm1_cnt = 0x1c00,
216 .gdma_to_ppe0 = 0x3333,
217 .ppe_base = {
218 [0] = 0x2000,
219 [1] = 0x2400,
220 [2] = 0x2c00,
221 },
222 .wdma_base = {
223 [0] = 0x4800,
224 [1] = 0x4c00,
225 [2] = 0x5000,
226 },
227};
228
developerec4ebe42022-04-12 11:17:45 +0800229/* strings used by ethtool */
230static const struct mtk_ethtool_stats {
231 char str[ETH_GSTRING_LEN];
232 u32 offset;
233} mtk_ethtool_stats[] = {
234 MTK_ETHTOOL_STAT(tx_bytes),
235 MTK_ETHTOOL_STAT(tx_packets),
236 MTK_ETHTOOL_STAT(tx_skip),
237 MTK_ETHTOOL_STAT(tx_collisions),
238 MTK_ETHTOOL_STAT(rx_bytes),
239 MTK_ETHTOOL_STAT(rx_packets),
240 MTK_ETHTOOL_STAT(rx_overflow),
241 MTK_ETHTOOL_STAT(rx_fcs_errors),
242 MTK_ETHTOOL_STAT(rx_short_errors),
243 MTK_ETHTOOL_STAT(rx_long_errors),
244 MTK_ETHTOOL_STAT(rx_checksum_errors),
245 MTK_ETHTOOL_STAT(rx_flow_control_packets),
246};
247
248static const char * const mtk_clks_source_name[] = {
developerbdc8eab2022-11-21 11:36:55 +0800249 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "gp3",
250 "xgp1", "xgp2", "xgp3", "crypto", "fe", "trgpll",
developerec4ebe42022-04-12 11:17:45 +0800251 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
252 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
developerda660eb2023-01-04 17:20:43 +0800253 "sgmii_ck", "eth2pll", "wocpu0", "wocpu1",
254 "ethwarp_wocpu2", "ethwarp_wocpu1", "ethwarp_wocpu0",
255 "top_usxgmii0_sel", "top_usxgmii1_sel", "top_sgm0_sel", "top_sgm1_sel",
256 "top_xfi_phy0_xtal_sel", "top_xfi_phy1_xtal_sel", "top_eth_gmii_sel",
257 "top_eth_refck_50m_sel", "top_eth_sys_200m_sel", "top_eth_sys_sel",
258 "top_eth_xgmii_sel", "top_eth_mii_sel", "top_netsys_sel",
259 "top_netsys_500m_sel", "top_netsys_pao_2x_sel",
260 "top_netsys_sync_250m_sel", "top_netsys_ppefb_250m_sel",
developer69bcd592024-03-25 14:26:39 +0800261 "top_netsys_warp_sel", "top_macsec_sel",
developerec4ebe42022-04-12 11:17:45 +0800262};
263
264void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
265{
266 __raw_writel(val, eth->base + reg);
267}
268
269u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
270{
271 return __raw_readl(eth->base + reg);
272}
273
274u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
275{
276 u32 val;
277
278 val = mtk_r32(eth, reg);
279 val &= ~mask;
280 val |= set;
281 mtk_w32(eth, val, reg);
282 return reg;
283}
284
285static int mtk_mdio_busy_wait(struct mtk_eth *eth)
286{
287 unsigned long t_start = jiffies;
288
289 while (1) {
290 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
291 return 0;
292 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
293 break;
294 cond_resched();
295 }
296
297 dev_err(eth->dev, "mdio: MDIO timeout\n");
298 return -1;
299}
300
developer7f77d022022-06-16 14:15:52 +0800301u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
302 int phy_reg, u16 write_data)
developerec4ebe42022-04-12 11:17:45 +0800303{
304 if (mtk_mdio_busy_wait(eth))
305 return -1;
306
307 write_data &= 0xffff;
308
developer7f77d022022-06-16 14:15:52 +0800309 if (phy_reg & MII_ADDR_C45) {
310 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
311 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
312 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
313 MTK_PHY_IAC);
314
315 if (mtk_mdio_busy_wait(eth))
316 return -1;
317
318 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
319 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
320 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
321 MTK_PHY_IAC);
322 } else {
323 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
324 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
325 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
326 MTK_PHY_IAC);
327 }
developerec4ebe42022-04-12 11:17:45 +0800328
329 if (mtk_mdio_busy_wait(eth))
330 return -1;
331
332 return 0;
333}
334
developer7f77d022022-06-16 14:15:52 +0800335u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerec4ebe42022-04-12 11:17:45 +0800336{
337 u32 d;
338
339 if (mtk_mdio_busy_wait(eth))
340 return 0xffff;
341
developer7f77d022022-06-16 14:15:52 +0800342 if (phy_reg & MII_ADDR_C45) {
343 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
344 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
345 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
346 MTK_PHY_IAC);
347
348 if (mtk_mdio_busy_wait(eth))
349 return 0xffff;
350
351 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
352 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
353 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
354 MTK_PHY_IAC);
355 } else {
356 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
357 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
358 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
359 MTK_PHY_IAC);
360 }
developerec4ebe42022-04-12 11:17:45 +0800361
362 if (mtk_mdio_busy_wait(eth))
363 return 0xffff;
364
365 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
366
367 return d;
368}
369
370static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
371 int phy_reg, u16 val)
372{
373 struct mtk_eth *eth = bus->priv;
374
375 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
376}
377
378static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
379{
380 struct mtk_eth *eth = bus->priv;
381
382 return _mtk_mdio_read(eth, phy_addr, phy_reg);
383}
384
developer8ec491f2022-08-22 19:48:44 +0800385static int mtk_mdio_reset(struct mii_bus *bus)
386{
387 /* The mdiobus_register will trigger a reset pulse when enabling Bus reset,
388 * we just need to wait until device ready.
389 */
390 mdelay(20);
391
392 return 0;
393}
394
developerec4ebe42022-04-12 11:17:45 +0800395static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
396 phy_interface_t interface)
397{
developerd18d9be2022-12-05 13:12:37 +0800398 u32 val = 0;
developerec4ebe42022-04-12 11:17:45 +0800399
400 /* Check DDR memory type.
401 * Currently TRGMII mode with DDR2 memory is not supported.
402 */
403 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
404 if (interface == PHY_INTERFACE_MODE_TRGMII &&
405 val & SYSCFG_DRAM_TYPE_DDR2) {
406 dev_err(eth->dev,
407 "TRGMII mode with DDR2 memory is not supported!\n");
408 return -EOPNOTSUPP;
409 }
410
411 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
412 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
413
414 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
415 ETHSYS_TRGMII_MT7621_MASK, val);
416
417 return 0;
418}
419
420static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
421 phy_interface_t interface, int speed)
422{
423 u32 val;
424 int ret;
425
426 if (interface == PHY_INTERFACE_MODE_TRGMII) {
427 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
428 val = 500000000;
429 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
430 if (ret)
431 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
432 return;
433 }
434
435 val = (speed == SPEED_1000) ?
436 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
437 mtk_w32(eth, val, INTF_MODE);
438
439 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
440 ETHSYS_TRGMII_CLK_SEL362_5,
441 ETHSYS_TRGMII_CLK_SEL362_5);
442
443 val = (speed == SPEED_1000) ? 250000000 : 500000000;
444 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
445 if (ret)
446 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
447
448 val = (speed == SPEED_1000) ?
449 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
450 mtk_w32(eth, val, TRGMII_RCK_CTRL);
451
452 val = (speed == SPEED_1000) ?
453 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
454 mtk_w32(eth, val, TRGMII_TCK_CTRL);
455}
456
developere86c3ec2022-10-11 10:29:18 +0800457static void mtk_setup_bridge_switch(struct mtk_eth *eth)
458{
459 int val;
460
461 /* Force Port1 XGMAC Link Up */
462 val = mtk_r32(eth, MTK_XGMAC_STS(MTK_GMAC1_ID));
developer7cf584b2023-12-21 13:04:36 +0800463 mtk_w32(eth, val | MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID),
developere86c3ec2022-10-11 10:29:18 +0800464 MTK_XGMAC_STS(MTK_GMAC1_ID));
465
466 /* Adjust GSW bridge IPG to 11*/
467 val = mtk_r32(eth, MTK_GSW_CFG);
468 val &= ~(GSWTX_IPG_MASK | GSWRX_IPG_MASK);
469 val |= (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
470 (GSW_IPG_11 << GSWRX_IPG_SHIFT);
471 mtk_w32(eth, val, MTK_GSW_CFG);
developere86c3ec2022-10-11 10:29:18 +0800472}
473
developerbe718682023-05-12 18:09:06 +0800474static bool mtk_check_gmac23_idle(struct mtk_mac *mac)
475{
476 u32 mac_fsm, gdm_fsm;
477
478 mac_fsm = mtk_r32(mac->hw, MTK_MAC_FSM(mac->id));
479
480 switch (mac->id) {
481 case MTK_GMAC2_ID:
482 gdm_fsm = mtk_r32(mac->hw, MTK_FE_GDM2_FSM);
483 break;
484 case MTK_GMAC3_ID:
485 gdm_fsm = mtk_r32(mac->hw, MTK_FE_GDM3_FSM);
486 break;
developer4af681c2023-05-22 14:34:27 +0800487 default:
488 return true;
developerbe718682023-05-12 18:09:06 +0800489 };
490
491 if ((mac_fsm & 0xFFFF0000) == 0x01010000 &&
492 (gdm_fsm & 0xFFFF0000) == 0x00000000)
493 return true;
494
495 return false;
496}
497
developer993c84b2023-02-15 16:03:22 +0800498static void mtk_setup_eee(struct mtk_mac *mac, bool enable)
499{
500 struct mtk_eth *eth = mac->hw;
501 u32 mcr, mcr_cur;
502 u32 val;
503
504 mcr = mcr_cur = mtk_r32(eth, MTK_MAC_MCR(mac->id));
505 mcr &= ~(MAC_MCR_FORCE_EEE100 | MAC_MCR_FORCE_EEE1000);
506
507 if (enable) {
508 mac->tx_lpi_enabled = 1;
509
510 val = FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 19) |
511 FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 33) |
512 FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD,
513 mac->tx_lpi_timer) |
514 FIELD_PREP(MAC_EEE_RESV0, 14);
515 mtk_w32(eth, val, MTK_MAC_EEE(mac->id));
516
517 switch (mac->speed) {
518 case SPEED_1000:
519 mcr |= MAC_MCR_FORCE_EEE1000;
520 break;
521 case SPEED_100:
522 mcr |= MAC_MCR_FORCE_EEE100;
523 break;
524 };
525 } else {
526 mac->tx_lpi_enabled = 0;
527
528 mtk_w32(eth, 0x00000002, MTK_MAC_EEE(mac->id));
529 }
530
531 /* Only update control register when needed! */
532 if (mcr != mcr_cur)
533 mtk_w32(eth, mcr, MTK_MAC_MCR(mac->id));
534}
535
developerbe718682023-05-12 18:09:06 +0800536static int mtk_get_hwver(struct mtk_eth *eth)
537{
538 struct device_node *np;
539 struct regmap *hwver;
540 u32 info = 0;
541
542 eth->hwver = MTK_HWID_V1;
543
544 np = of_parse_phandle(eth->dev->of_node, "mediatek,hwver", 0);
545 if (!np)
546 return -EINVAL;
547
548 hwver = syscon_node_to_regmap(np);
549 if (IS_ERR(hwver))
550 return PTR_ERR(hwver);
551
552 regmap_read(hwver, 0x8, &info);
553
554 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
555 eth->hwver = FIELD_GET(HWVER_BIT_NETSYS_3, info);
556 else
557 eth->hwver = FIELD_GET(HWVER_BIT_NETSYS_1_2, info);
558
559 of_node_put(np);
560
561 return 0;
562}
563
developer4ef16e32023-04-17 14:33:01 +0800564static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
565 phy_interface_t interface)
566{
567 struct mtk_mac *mac = container_of(config, struct mtk_mac,
568 phylink_config);
569 struct mtk_eth *eth = mac->hw;
570 unsigned int sid;
571
572 if (interface == PHY_INTERFACE_MODE_SGMII ||
573 phy_interface_mode_is_8023z(interface)) {
574 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
575 0 : mtk_mac2xgmii_id(eth, mac->id);
576
577 return mtk_sgmii_select_pcs(eth->sgmii, sid);
578 } else if (interface == PHY_INTERFACE_MODE_USXGMII ||
579 interface == PHY_INTERFACE_MODE_10GKR ||
580 interface == PHY_INTERFACE_MODE_5GBASER) {
581 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
582 mac->id != MTK_GMAC1_ID) {
583 sid = mtk_mac2xgmii_id(eth, mac->id);
584
585 return mtk_usxgmii_select_pcs(eth->usxgmii, sid);
586 }
587 }
588
589 return NULL;
590}
591
developer7cf584b2023-12-21 13:04:36 +0800592static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode,
593 phy_interface_t iface)
594{
595 struct mtk_mac *mac = container_of(config, struct mtk_mac,
596 phylink_config);
597 u32 val;
598
599 if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
600 val = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
601 val &= 0xfffffff0;
602 val |= XMAC_MCR_TRX_DISABLE;
603 mtk_w32(mac->hw, val, MTK_XMAC_MCR(mac->id));
604
605 val = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
606 val |= MTK_XGMAC_FORCE_MODE(mac->id);
607 val &= ~MTK_XGMAC_FORCE_LINK(mac->id);
608 mtk_w32(mac->hw, val, MTK_XGMAC_STS(mac->id));
609 }
610
611 return 0;
612}
613
developerec4ebe42022-04-12 11:17:45 +0800614static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
615 const struct phylink_link_state *state)
616{
617 struct mtk_mac *mac = container_of(config, struct mtk_mac,
618 phylink_config);
619 struct mtk_eth *eth = mac->hw;
developer722ab5f2024-02-22 11:01:46 +0800620 u32 i;
developer9cebade2023-07-31 10:36:52 +0800621 int val = 0, ge_mode, err = 0;
developerb35f4fa2023-03-14 13:24:47 +0800622 unsigned int mac_type = mac->type;
developerec4ebe42022-04-12 11:17:45 +0800623
624 /* MT76x8 has no hardware settings between for the MAC */
625 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
626 mac->interface != state->interface) {
627 /* Setup soc pin functions */
628 switch (state->interface) {
629 case PHY_INTERFACE_MODE_TRGMII:
630 if (mac->id)
631 goto err_phy;
632 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
633 MTK_GMAC1_TRGMII))
634 goto err_phy;
635 /* fall through */
636 case PHY_INTERFACE_MODE_RGMII_TXID:
637 case PHY_INTERFACE_MODE_RGMII_RXID:
638 case PHY_INTERFACE_MODE_RGMII_ID:
639 case PHY_INTERFACE_MODE_RGMII:
640 case PHY_INTERFACE_MODE_MII:
641 case PHY_INTERFACE_MODE_REVMII:
642 case PHY_INTERFACE_MODE_RMII:
developerb35f4fa2023-03-14 13:24:47 +0800643 mac->type = MTK_GDM_TYPE;
developerec4ebe42022-04-12 11:17:45 +0800644 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
645 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
646 if (err)
647 goto init_err;
648 }
649 break;
650 case PHY_INTERFACE_MODE_1000BASEX:
651 case PHY_INTERFACE_MODE_2500BASEX:
652 case PHY_INTERFACE_MODE_SGMII:
developerb35f4fa2023-03-14 13:24:47 +0800653 mac->type = MTK_GDM_TYPE;
developerec4ebe42022-04-12 11:17:45 +0800654 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
655 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
656 if (err)
657 goto init_err;
658 }
659 break;
660 case PHY_INTERFACE_MODE_GMII:
developerb35f4fa2023-03-14 13:24:47 +0800661 mac->type = MTK_GDM_TYPE;
developerec4ebe42022-04-12 11:17:45 +0800662 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
663 err = mtk_gmac_gephy_path_setup(eth, mac->id);
664 if (err)
665 goto init_err;
666 }
667 break;
developer2cbf2fb2022-11-16 12:20:48 +0800668 case PHY_INTERFACE_MODE_XGMII:
developerb35f4fa2023-03-14 13:24:47 +0800669 mac->type = MTK_XGDM_TYPE;
developer2cbf2fb2022-11-16 12:20:48 +0800670 if (MTK_HAS_CAPS(eth->soc->caps, MTK_XGMII)) {
671 err = mtk_gmac_xgmii_path_setup(eth, mac->id);
672 if (err)
673 goto init_err;
674 }
675 break;
developere86c3ec2022-10-11 10:29:18 +0800676 case PHY_INTERFACE_MODE_USXGMII:
677 case PHY_INTERFACE_MODE_10GKR:
developer6af609d2023-01-18 10:26:39 +0800678 case PHY_INTERFACE_MODE_5GBASER:
developerb35f4fa2023-03-14 13:24:47 +0800679 mac->type = MTK_XGDM_TYPE;
developere86c3ec2022-10-11 10:29:18 +0800680 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
681 err = mtk_gmac_usxgmii_path_setup(eth, mac->id);
682 if (err)
683 goto init_err;
684 }
685 break;
developerec4ebe42022-04-12 11:17:45 +0800686 default:
687 goto err_phy;
688 }
689
690 /* Setup clock for 1st gmac */
691 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
692 !phy_interface_mode_is_8023z(state->interface) &&
693 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
694 if (MTK_HAS_CAPS(mac->hw->soc->caps,
695 MTK_TRGMII_MT7621_CLK)) {
696 if (mt7621_gmac0_rgmii_adjust(mac->hw,
697 state->interface))
698 goto err_phy;
699 } else {
700 mtk_gmac0_rgmii_adjust(mac->hw,
701 state->interface,
702 state->speed);
703
704 /* mt7623_pad_clk_setup */
705 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
706 mtk_w32(mac->hw,
707 TD_DM_DRVP(8) | TD_DM_DRVN(8),
708 TRGMII_TD_ODT(i));
709
710 /* Assert/release MT7623 RXC reset */
711 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
712 TRGMII_RCK_CTRL);
713 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
714 }
715 }
716
717 ge_mode = 0;
718 switch (state->interface) {
719 case PHY_INTERFACE_MODE_MII:
720 case PHY_INTERFACE_MODE_GMII:
721 ge_mode = 1;
722 break;
723 case PHY_INTERFACE_MODE_REVMII:
724 ge_mode = 2;
725 break;
726 case PHY_INTERFACE_MODE_RMII:
727 if (mac->id)
728 goto err_phy;
729 ge_mode = 3;
730 break;
731 default:
732 break;
733 }
734
735 /* put the gmac into the right mode */
736 spin_lock(&eth->syscfg0_lock);
737 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
738 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
739 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
740 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
741 spin_unlock(&eth->syscfg0_lock);
742
743 mac->interface = state->interface;
744 }
745
746 /* SGMII */
747 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
748 phy_interface_mode_is_8023z(state->interface)) {
749 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
750 * being setup done.
751 */
752 spin_lock(&eth->syscfg0_lock);
753 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
754
755 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
756 SYSCFG0_SGMII_MASK,
757 ~(u32)SYSCFG0_SGMII_MASK);
758
developer722ab5f2024-02-22 11:01:46 +0800759 /* Save the syscfg0 value for mac_finish */
760 mac->syscfg0 = val;
developerec4ebe42022-04-12 11:17:45 +0800761 spin_unlock(&eth->syscfg0_lock);
developere86c3ec2022-10-11 10:29:18 +0800762 } else if (state->interface == PHY_INTERFACE_MODE_USXGMII ||
developer6af609d2023-01-18 10:26:39 +0800763 state->interface == PHY_INTERFACE_MODE_10GKR ||
764 state->interface == PHY_INTERFACE_MODE_5GBASER) {
developer4ef16e32023-04-17 14:33:01 +0800765 /* Nothing to do */
developerec4ebe42022-04-12 11:17:45 +0800766 } else if (phylink_autoneg_inband(mode)) {
767 dev_err(eth->dev,
768 "In-band mode not supported in non SGMII mode!\n");
769 return;
770 }
771
772 /* Setup gmac */
developer2cbf2fb2022-11-16 12:20:48 +0800773 if (mac->type == MTK_XGDM_TYPE) {
developere86c3ec2022-10-11 10:29:18 +0800774 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
775 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
developerec4ebe42022-04-12 11:17:45 +0800776
developere86c3ec2022-10-11 10:29:18 +0800777 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer9cebade2023-07-31 10:36:52 +0800778 if (mac->id == MTK_GMAC1_ID)
developere86c3ec2022-10-11 10:29:18 +0800779 mtk_setup_bridge_switch(eth);
developerb35f4fa2023-03-14 13:24:47 +0800780 }
781 } else if (mac->type == MTK_GDM_TYPE) {
782 val = mtk_r32(eth, MTK_GDMA_EG_CTRL(mac->id));
783 mtk_w32(eth, val & ~MTK_GDMA_XGDM_SEL,
784 MTK_GDMA_EG_CTRL(mac->id));
785
developer4ef16e32023-04-17 14:33:01 +0800786 /* FIXME: In current hardware design, we have to reset FE
787 * when swtiching XGDM to GDM. Therefore, here trigger an SER
788 * to let GDM go back to the initial state.
789 */
developerbe718682023-05-12 18:09:06 +0800790 if (mac->type != mac_type && !mtk_check_gmac23_idle(mac)) {
791 if (!test_bit(MTK_RESETTING, &mac->hw->state)) {
developerb35f4fa2023-03-14 13:24:47 +0800792 atomic_inc(&force);
793 schedule_work(&eth->pending_work);
developerbe718682023-05-12 18:09:06 +0800794 }
developerb35f4fa2023-03-14 13:24:47 +0800795 }
developerec4ebe42022-04-12 11:17:45 +0800796 }
797
developerec4ebe42022-04-12 11:17:45 +0800798 return;
799
800err_phy:
801 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
802 mac->id, phy_modes(state->interface));
803 return;
804
805init_err:
806 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
807 mac->id, phy_modes(state->interface), err);
808}
809
developer4ef16e32023-04-17 14:33:01 +0800810static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
811 phy_interface_t interface)
812{
813 struct mtk_mac *mac = container_of(config, struct mtk_mac,
814 phylink_config);
815 struct mtk_eth *eth = mac->hw;
816
817 /* Enable SGMII */
818 if (interface == PHY_INTERFACE_MODE_SGMII ||
819 phy_interface_mode_is_8023z(interface))
820 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
821 SYSCFG0_SGMII_MASK, mac->syscfg0);
822
823 return 0;
824}
825
developere86c3ec2022-10-11 10:29:18 +0800826static int mtk_mac_pcs_get_state(struct phylink_config *config,
827 struct phylink_link_state *state)
developerec4ebe42022-04-12 11:17:45 +0800828{
829 struct mtk_mac *mac = container_of(config, struct mtk_mac,
830 phylink_config);
developerec4ebe42022-04-12 11:17:45 +0800831
developere86c3ec2022-10-11 10:29:18 +0800832 if (mac->type == MTK_XGDM_TYPE) {
833 u32 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
developerec4ebe42022-04-12 11:17:45 +0800834
developere86c3ec2022-10-11 10:29:18 +0800835 if (mac->id == MTK_GMAC2_ID)
836 sts = sts >> 16;
837
developer4ef16e32023-04-17 14:33:01 +0800838 state->duplex = DUPLEX_FULL;
developere86c3ec2022-10-11 10:29:18 +0800839
840 switch (FIELD_GET(MTK_USXGMII_PCS_MODE, sts)) {
841 case 0:
842 state->speed = SPEED_10000;
843 break;
844 case 1:
845 state->speed = SPEED_5000;
846 break;
847 case 2:
848 state->speed = SPEED_2500;
849 break;
850 case 3:
851 state->speed = SPEED_1000;
852 break;
853 }
854
developerb35f4fa2023-03-14 13:24:47 +0800855 state->interface = mac->interface;
developere86c3ec2022-10-11 10:29:18 +0800856 state->link = FIELD_GET(MTK_USXGMII_PCS_LINK, sts);
857 } else if (mac->type == MTK_GDM_TYPE) {
858 struct mtk_eth *eth = mac->hw;
developer4ef16e32023-04-17 14:33:01 +0800859 struct mtk_sgmii *ss = eth->sgmii;
developere86c3ec2022-10-11 10:29:18 +0800860 u32 id = mtk_mac2xgmii_id(eth, mac->id);
861 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
developer9df659d2023-04-19 09:52:47 +0800862 u32 bm, adv, rgc3, sgm_mode;
developere86c3ec2022-10-11 10:29:18 +0800863
developerb35f4fa2023-03-14 13:24:47 +0800864 state->interface = mac->interface;
developere86c3ec2022-10-11 10:29:18 +0800865
developer9df659d2023-04-19 09:52:47 +0800866 regmap_read(ss->pcs[id].regmap, SGMSYS_PCS_CONTROL_1, &bm);
867 if (bm & SGMII_AN_ENABLE) {
developer4ef16e32023-04-17 14:33:01 +0800868 regmap_read(ss->pcs[id].regmap,
developer9df659d2023-04-19 09:52:47 +0800869 SGMSYS_PCS_ADVERTISE, &adv);
developere86c3ec2022-10-11 10:29:18 +0800870
developer9df659d2023-04-19 09:52:47 +0800871 phylink_mii_c22_pcs_decode_state(
872 state,
873 FIELD_GET(SGMII_BMSR, bm),
874 FIELD_GET(SGMII_LPA, adv));
developere86c3ec2022-10-11 10:29:18 +0800875 } else {
developer9df659d2023-04-19 09:52:47 +0800876 state->link = !!(bm & SGMII_LINK_STATYS);
developere86c3ec2022-10-11 10:29:18 +0800877
developer9df659d2023-04-19 09:52:47 +0800878 regmap_read(ss->pcs[id].regmap,
879 SGMSYS_SGMII_MODE, &sgm_mode);
developerec4ebe42022-04-12 11:17:45 +0800880
developer9df659d2023-04-19 09:52:47 +0800881 switch (sgm_mode & SGMII_SPEED_MASK) {
882 case SGMII_SPEED_10:
developere86c3ec2022-10-11 10:29:18 +0800883 state->speed = SPEED_10;
884 break;
developer9df659d2023-04-19 09:52:47 +0800885 case SGMII_SPEED_100:
developere86c3ec2022-10-11 10:29:18 +0800886 state->speed = SPEED_100;
887 break;
developer9df659d2023-04-19 09:52:47 +0800888 case SGMII_SPEED_1000:
developer4ef16e32023-04-17 14:33:01 +0800889 regmap_read(ss->pcs[id].regmap,
developer9df659d2023-04-19 09:52:47 +0800890 ss->pcs[id].ana_rgc3, &rgc3);
891 rgc3 = FIELD_GET(RG_PHY_SPEED_3_125G, rgc3);
developer4ef16e32023-04-17 14:33:01 +0800892 state->speed = rgc3 ? SPEED_2500 : SPEED_1000;
developere86c3ec2022-10-11 10:29:18 +0800893 break;
894 }
developer9df659d2023-04-19 09:52:47 +0800895
896 if (sgm_mode & SGMII_DUPLEX_HALF)
897 state->duplex = DUPLEX_HALF;
898 else
899 state->duplex = DUPLEX_FULL;
developere86c3ec2022-10-11 10:29:18 +0800900 }
901
902 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
903 if (pmsr & MAC_MSR_RX_FC)
904 state->pause |= MLO_PAUSE_RX;
905 if (pmsr & MAC_MSR_TX_FC)
906 state->pause |= MLO_PAUSE_TX;
907 }
developerec4ebe42022-04-12 11:17:45 +0800908
909 return 1;
910}
911
developer7ebbd872023-08-18 16:54:47 +0800912static int mtk_gdm_fsm_get(struct mtk_mac *mac, u32 gdm)
913{
914 u32 fsm = mtk_r32(mac->hw, gdm);
developer3c9c74d2023-09-11 11:36:12 +0800915 u32 ret = 0, val = 0;
developer7ebbd872023-08-18 16:54:47 +0800916
developer3c9c74d2023-09-11 11:36:12 +0800917 switch (mac->type) {
918 case MTK_GDM_TYPE:
developer7ebbd872023-08-18 16:54:47 +0800919 ret = fsm == 0;
developer3c9c74d2023-09-11 11:36:12 +0800920 break;
921 case MTK_XGDM_TYPE:
922 ret = fsm == 0x10000000;
923 break;
924 default:
925 break;
926 }
927
928 if ((mac->type == MTK_XGDM_TYPE) && (mac->id != MTK_GMAC1_ID)) {
929 val = mtk_r32(mac->hw, MTK_MAC_FSM(mac->id));
930 if ((val == 0x02010100) || (val == 0x01010100)) {
931 ret = (mac->interface == PHY_INTERFACE_MODE_XGMII) ?
932 ((fsm & 0x0fffffff) == 0) : ((fsm & 0x00ffffff) == 0);
developer7ebbd872023-08-18 16:54:47 +0800933 } else
developer3c9c74d2023-09-11 11:36:12 +0800934 ret = 0;
developer7ebbd872023-08-18 16:54:47 +0800935 }
936
937 return ret;
938}
939
940static void mtk_gdm_fsm_poll(struct mtk_mac *mac)
941{
942 u32 gdm = 0, i = 0;
943
944 switch (mac->id) {
945 case MTK_GMAC1_ID:
946 gdm = MTK_FE_GDM1_FSM;
947 break;
948 case MTK_GMAC2_ID:
949 gdm = MTK_FE_GDM2_FSM;
950 break;
951 case MTK_GMAC3_ID:
952 gdm = MTK_FE_GDM3_FSM;
953 break;
954 default:
955 pr_info("%s mac id invalid", __func__);
956 break;
957 }
developer3c9c74d2023-09-11 11:36:12 +0800958
developer7ebbd872023-08-18 16:54:47 +0800959 while (i < 3) {
960 if (mtk_gdm_fsm_get(mac, gdm))
961 break;
962 msleep(500);
963 i++;
964 }
965
966 if (i == 3)
967 pr_info("%s fsm invalid", __func__);
968}
969
developer69bcd592024-03-25 14:26:39 +0800970static void mtk_pse_port_link_set(struct mtk_mac *mac, bool up,
971 phy_interface_t interface)
developer7ebbd872023-08-18 16:54:47 +0800972{
developer3c9c74d2023-09-11 11:36:12 +0800973 u32 fe_glo_cfg, val = 0;
developer7ebbd872023-08-18 16:54:47 +0800974
developer69bcd592024-03-25 14:26:39 +0800975 if (!up && interface == PHY_INTERFACE_MODE_XGMII) {
976 void __iomem *base;
977
978 base = ioremap(0x0F0CFB00, SZ_4K);
979 if (base) {
980 /* wait for internal 2.5G PHY to turn off */
981 usleep_range(100, 1000);
982 /* enable the XGMAC clock for 10 msecs to
983 * flush the packets.
984 */
985 writel(readl(base) | BIT(9), base);
986 usleep_range(10000, 11000);
987 writel(readl(base) & ~BIT(9), base);
988 iounmap(base);
989 }
990 }
991
developer7ebbd872023-08-18 16:54:47 +0800992 fe_glo_cfg = mtk_r32(mac->hw, MTK_FE_GLO_CFG(mac->id));
993 switch (mac->id) {
994 case MTK_GMAC1_ID:
995 val = MTK_FE_LINK_DOWN_P1;
996 break;
997 case MTK_GMAC2_ID:
998 val = MTK_FE_LINK_DOWN_P2;
999 break;
1000 case MTK_GMAC3_ID:
1001 val = MTK_FE_LINK_DOWN_P15;
1002 break;
1003 }
1004
1005 if (!up)
1006 fe_glo_cfg |= val;
1007 else
1008 fe_glo_cfg &= ~val;
1009
1010 mtk_w32(mac->hw, fe_glo_cfg, MTK_FE_GLO_CFG(mac->id));
1011 mtk_gdm_fsm_poll(mac);
1012}
1013
developerec4ebe42022-04-12 11:17:45 +08001014static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
1015 phy_interface_t interface)
1016{
1017 struct mtk_mac *mac = container_of(config, struct mtk_mac,
1018 phylink_config);
developer3c9c74d2023-09-11 11:36:12 +08001019 struct mtk_eth *eth = mac->hw;
1020 unsigned int id;
developer9cebade2023-07-31 10:36:52 +08001021 u32 mcr, sts;
developerec4ebe42022-04-12 11:17:45 +08001022
developer69bcd592024-03-25 14:26:39 +08001023 mtk_pse_port_link_set(mac, false, interface);
developere86c3ec2022-10-11 10:29:18 +08001024 if (mac->type == MTK_GDM_TYPE) {
1025 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
developer7ebbd872023-08-18 16:54:47 +08001026 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
developere86c3ec2022-10-11 10:29:18 +08001027 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
1028 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
developer3c9c74d2023-09-11 11:36:12 +08001029 struct mtk_usxgmii_pcs *mpcs;
developere86c3ec2022-10-11 10:29:18 +08001030
developer3c9c74d2023-09-11 11:36:12 +08001031 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
developere86c3ec2022-10-11 10:29:18 +08001032 mcr &= 0xfffffff0;
1033 mcr |= XMAC_MCR_TRX_DISABLE;
1034 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
developer9cebade2023-07-31 10:36:52 +08001035
1036 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
1037 sts &= ~MTK_XGMAC_FORCE_LINK(mac->id);
1038 mtk_w32(mac->hw, sts, MTK_XGMAC_STS(mac->id));
developer3c9c74d2023-09-11 11:36:12 +08001039
1040 id = mtk_mac2xgmii_id(eth, mac->id);
1041 mpcs = &eth->usxgmii->pcs[id];
developere86c3ec2022-10-11 10:29:18 +08001042 }
developerec4ebe42022-04-12 11:17:45 +08001043}
1044
developer722ab5f2024-02-22 11:01:46 +08001045static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
1046 int speed)
1047{
1048 const struct mtk_soc_data *soc = eth->soc;
1049 u32 val;
1050
1051 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1052 return;
1053
1054 val = MTK_QTX_SCH_MIN_RATE_EN |
1055 /* minimum: 10 Mbps */
1056 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
1057 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
1058 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
1059
1060 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1))
1061 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
1062
1063 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
1064 switch (speed) {
1065 case SPEED_10:
1066 val |= MTK_QTX_SCH_MAX_RATE_EN |
1067 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
1068 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
1069 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
1070 break;
1071 case SPEED_100:
1072 val |= MTK_QTX_SCH_MAX_RATE_EN |
1073 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
1074 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
1075 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
1076 break;
1077 case SPEED_1000:
1078 val |= MTK_QTX_SCH_MAX_RATE_EN |
1079 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
1080 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
1081 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
1082 break;
1083 default:
1084 break;
1085 }
1086 } else {
1087 switch (speed) {
1088 case SPEED_10:
1089 val |= MTK_QTX_SCH_MAX_RATE_EN |
1090 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
1091 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
1092 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
1093 break;
1094 case SPEED_100:
1095 val |= MTK_QTX_SCH_MAX_RATE_EN |
1096 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
1097 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
1098 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
1099 break;
1100 case SPEED_1000:
1101 val |= MTK_QTX_SCH_MAX_RATE_EN |
1102 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
1103 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
1104 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
1105 break;
1106 case SPEED_2500:
1107 val |= MTK_QTX_SCH_MAX_RATE_EN |
1108 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 25) |
1109 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
1110 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
1111 break;
1112 case SPEED_10000:
1113 val |= MTK_QTX_SCH_MAX_RATE_EN |
1114 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 100) |
1115 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
1116 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
1117 break;
1118 default:
1119 break;
1120 }
1121 }
1122
1123 mtk_w32(eth, (idx / MTK_QTX_PER_PAGE) & MTK_QTX_CFG_PAGE, MTK_QDMA_PAGE);
1124 mtk_w32(eth, val, MTK_QTX_SCH(idx));
1125}
1126
developer53810fa2022-04-19 10:14:08 +08001127static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
1128 phy_interface_t interface,
1129 struct phy_device *phy)
developerec4ebe42022-04-12 11:17:45 +08001130{
1131 struct mtk_mac *mac = container_of(config, struct mtk_mac,
1132 phylink_config);
developer7cf584b2023-12-21 13:04:36 +08001133 u32 mcr, mcr_cur, sts;
developere86c3ec2022-10-11 10:29:18 +08001134
developer993c84b2023-02-15 16:03:22 +08001135 mac->speed = speed;
1136
developere86c3ec2022-10-11 10:29:18 +08001137 if (mac->type == MTK_GDM_TYPE) {
1138 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
1139 mcr = mcr_cur;
1140 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
1141 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
1142 MAC_MCR_FORCE_RX_FC);
1143 mcr |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
1144 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
1145
1146 /* Configure speed */
1147 switch (speed) {
1148 case SPEED_2500:
1149 case SPEED_1000:
1150 mcr |= MAC_MCR_SPEED_1000;
1151 break;
1152 case SPEED_100:
1153 mcr |= MAC_MCR_SPEED_100;
1154 break;
1155 }
developerec4ebe42022-04-12 11:17:45 +08001156
developere86c3ec2022-10-11 10:29:18 +08001157 /* Configure duplex */
developer7cf584b2023-12-21 13:04:36 +08001158 if (duplex == DUPLEX_FULL ||
1159 interface == PHY_INTERFACE_MODE_SGMII)
developere86c3ec2022-10-11 10:29:18 +08001160 mcr |= MAC_MCR_FORCE_DPX;
developer69bcd592024-03-25 14:26:39 +08001161 else if (interface == PHY_INTERFACE_MODE_GMII)
1162 mcr |= MAC_MCR_PRMBL_LMT_EN;
developere86c3ec2022-10-11 10:29:18 +08001163
1164 /* Configure pause modes -
1165 * phylink will avoid these for half duplex
1166 */
1167 if (tx_pause)
1168 mcr |= MAC_MCR_FORCE_TX_FC;
1169 if (rx_pause)
1170 mcr |= MAC_MCR_FORCE_RX_FC;
1171
1172 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
1173
1174 /* Only update control register when needed! */
1175 if (mcr != mcr_cur)
1176 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
developer993c84b2023-02-15 16:03:22 +08001177
1178 if (mode == MLO_AN_PHY && phy)
1179 mtk_setup_eee(mac, phy_init_eee(phy, false) >= 0);
developere86c3ec2022-10-11 10:29:18 +08001180 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
developer7cf584b2023-12-21 13:04:36 +08001181 if (mode == MLO_AN_INBAND)
1182 mdelay(1000);
1183
developer9cebade2023-07-31 10:36:52 +08001184 /* Eliminate the interference(before link-up) caused by PHY noise */
1185 mtk_m32(mac->hw, XMAC_LOGIC_RST, 0x0, MTK_XMAC_LOGIC_RST(mac->id));
1186 mdelay(20);
1187 mtk_m32(mac->hw, XMAC_GLB_CNTCLR, 0x1, MTK_XMAC_CNT_CTRL(mac->id));
1188
developer7cf584b2023-12-21 13:04:36 +08001189 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
1190 sts |= MTK_XGMAC_FORCE_LINK(mac->id);
1191 mtk_w32(mac->hw, sts, MTK_XGMAC_STS(mac->id));
developer9cebade2023-07-31 10:36:52 +08001192
developere86c3ec2022-10-11 10:29:18 +08001193 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
1194
1195 mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC);
1196 /* Configure pause modes -
1197 * phylink will avoid these for half duplex
1198 */
1199 if (tx_pause)
1200 mcr |= XMAC_MCR_FORCE_TX_FC;
1201 if (rx_pause)
1202 mcr |= XMAC_MCR_FORCE_RX_FC;
1203
1204 mcr &= ~(XMAC_MCR_TRX_DISABLE);
1205 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
1206 }
developer69bcd592024-03-25 14:26:39 +08001207 mtk_pse_port_link_set(mac, true, interface);
developerec4ebe42022-04-12 11:17:45 +08001208}
1209
1210static void mtk_validate(struct phylink_config *config,
1211 unsigned long *supported,
1212 struct phylink_link_state *state)
1213{
1214 struct mtk_mac *mac = container_of(config, struct mtk_mac,
1215 phylink_config);
1216 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1217
1218 if (state->interface != PHY_INTERFACE_MODE_NA &&
1219 state->interface != PHY_INTERFACE_MODE_MII &&
1220 state->interface != PHY_INTERFACE_MODE_GMII &&
1221 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
1222 phy_interface_mode_is_rgmii(state->interface)) &&
1223 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
1224 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
1225 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
1226 (state->interface == PHY_INTERFACE_MODE_SGMII ||
developere86c3ec2022-10-11 10:29:18 +08001227 phy_interface_mode_is_8023z(state->interface))) &&
developer2cbf2fb2022-11-16 12:20:48 +08001228 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_XGMII) &&
1229 (state->interface == PHY_INTERFACE_MODE_XGMII)) &&
developere86c3ec2022-10-11 10:29:18 +08001230 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
1231 (state->interface == PHY_INTERFACE_MODE_USXGMII)) &&
1232 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
1233 (state->interface == PHY_INTERFACE_MODE_10GKR))) {
developerec4ebe42022-04-12 11:17:45 +08001234 linkmode_zero(supported);
1235 return;
1236 }
1237
1238 phylink_set_port_modes(mask);
1239 phylink_set(mask, Autoneg);
1240
1241 switch (state->interface) {
developere86c3ec2022-10-11 10:29:18 +08001242 case PHY_INTERFACE_MODE_USXGMII:
1243 case PHY_INTERFACE_MODE_10GKR:
1244 phylink_set(mask, 10000baseKR_Full);
1245 phylink_set(mask, 10000baseT_Full);
1246 phylink_set(mask, 10000baseCR_Full);
1247 phylink_set(mask, 10000baseSR_Full);
1248 phylink_set(mask, 10000baseLR_Full);
1249 phylink_set(mask, 10000baseLRM_Full);
1250 phylink_set(mask, 10000baseER_Full);
1251 phylink_set(mask, 100baseT_Half);
1252 phylink_set(mask, 100baseT_Full);
1253 phylink_set(mask, 1000baseT_Half);
1254 phylink_set(mask, 1000baseT_Full);
1255 phylink_set(mask, 1000baseX_Full);
developer29344f12022-10-17 12:01:44 +08001256 phylink_set(mask, 2500baseT_Full);
1257 phylink_set(mask, 5000baseT_Full);
developere86c3ec2022-10-11 10:29:18 +08001258 break;
developerec4ebe42022-04-12 11:17:45 +08001259 case PHY_INTERFACE_MODE_TRGMII:
1260 phylink_set(mask, 1000baseT_Full);
1261 break;
developer2cbf2fb2022-11-16 12:20:48 +08001262 case PHY_INTERFACE_MODE_XGMII:
1263 /* fall through */
developerec4ebe42022-04-12 11:17:45 +08001264 case PHY_INTERFACE_MODE_1000BASEX:
developerec4ebe42022-04-12 11:17:45 +08001265 phylink_set(mask, 1000baseX_Full);
developerbd1b38a2023-06-19 11:13:22 +08001266 /* fall through */
developere86c3ec2022-10-11 10:29:18 +08001267 case PHY_INTERFACE_MODE_2500BASEX:
developerec4ebe42022-04-12 11:17:45 +08001268 phylink_set(mask, 2500baseX_Full);
developer6e3b5d12022-08-16 15:37:38 +08001269 phylink_set(mask, 2500baseT_Full);
developerbd1b38a2023-06-19 11:13:22 +08001270 /* fall through */
developerec4ebe42022-04-12 11:17:45 +08001271 case PHY_INTERFACE_MODE_GMII:
1272 case PHY_INTERFACE_MODE_RGMII:
1273 case PHY_INTERFACE_MODE_RGMII_ID:
1274 case PHY_INTERFACE_MODE_RGMII_RXID:
1275 case PHY_INTERFACE_MODE_RGMII_TXID:
1276 phylink_set(mask, 1000baseT_Half);
1277 /* fall through */
1278 case PHY_INTERFACE_MODE_SGMII:
1279 phylink_set(mask, 1000baseT_Full);
1280 phylink_set(mask, 1000baseX_Full);
1281 /* fall through */
1282 case PHY_INTERFACE_MODE_MII:
1283 case PHY_INTERFACE_MODE_RMII:
1284 case PHY_INTERFACE_MODE_REVMII:
1285 case PHY_INTERFACE_MODE_NA:
1286 default:
1287 phylink_set(mask, 10baseT_Half);
1288 phylink_set(mask, 10baseT_Full);
1289 phylink_set(mask, 100baseT_Half);
1290 phylink_set(mask, 100baseT_Full);
1291 break;
1292 }
1293
1294 if (state->interface == PHY_INTERFACE_MODE_NA) {
developere86c3ec2022-10-11 10:29:18 +08001295
1296 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII)) {
1297 phylink_set(mask, 10000baseKR_Full);
developerafda3572022-12-28 16:28:30 +08001298 phylink_set(mask, 10000baseT_Full);
developerbe718682023-05-12 18:09:06 +08001299 phylink_set(mask, 10000baseCR_Full);
developere86c3ec2022-10-11 10:29:18 +08001300 phylink_set(mask, 10000baseSR_Full);
1301 phylink_set(mask, 10000baseLR_Full);
1302 phylink_set(mask, 10000baseLRM_Full);
1303 phylink_set(mask, 10000baseER_Full);
1304 phylink_set(mask, 1000baseKX_Full);
1305 phylink_set(mask, 1000baseT_Full);
1306 phylink_set(mask, 1000baseX_Full);
1307 phylink_set(mask, 2500baseX_Full);
developer6af609d2023-01-18 10:26:39 +08001308 phylink_set(mask, 2500baseT_Full);
1309 phylink_set(mask, 5000baseT_Full);
developere86c3ec2022-10-11 10:29:18 +08001310 }
developerec4ebe42022-04-12 11:17:45 +08001311 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
1312 phylink_set(mask, 1000baseT_Full);
1313 phylink_set(mask, 1000baseX_Full);
1314 phylink_set(mask, 2500baseX_Full);
1315 }
1316 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
1317 phylink_set(mask, 1000baseT_Full);
1318 phylink_set(mask, 1000baseT_Half);
1319 phylink_set(mask, 1000baseX_Full);
1320 }
1321 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
1322 phylink_set(mask, 1000baseT_Full);
1323 phylink_set(mask, 1000baseT_Half);
1324 }
1325 }
1326
developer2cbf2fb2022-11-16 12:20:48 +08001327 if (mac->type == MTK_XGDM_TYPE) {
1328 phylink_clear(mask, 10baseT_Half);
1329 phylink_clear(mask, 100baseT_Half);
1330 phylink_clear(mask, 1000baseT_Half);
1331 }
1332
developerec4ebe42022-04-12 11:17:45 +08001333 phylink_set(mask, Pause);
1334 phylink_set(mask, Asym_Pause);
1335
1336 linkmode_and(supported, supported, mask);
1337 linkmode_and(state->advertising, state->advertising, mask);
1338
1339 /* We can only operate at 2500BaseX or 1000BaseX. If requested
1340 * to advertise both, only report advertising at 2500BaseX.
1341 */
1342 phylink_helper_basex_speed(state);
1343}
1344
1345static const struct phylink_mac_ops mtk_phylink_ops = {
1346 .validate = mtk_validate,
developer4ef16e32023-04-17 14:33:01 +08001347 .mac_select_pcs = mtk_mac_select_pcs,
developere86c3ec2022-10-11 10:29:18 +08001348 .mac_link_state = mtk_mac_pcs_get_state,
developer7cf584b2023-12-21 13:04:36 +08001349 .mac_prepare = mtk_mac_prepare,
developerec4ebe42022-04-12 11:17:45 +08001350 .mac_config = mtk_mac_config,
developer4ef16e32023-04-17 14:33:01 +08001351 .mac_finish = mtk_mac_finish,
developerec4ebe42022-04-12 11:17:45 +08001352 .mac_link_down = mtk_mac_link_down,
1353 .mac_link_up = mtk_mac_link_up,
1354};
1355
developer9faf1ef2023-03-21 16:49:51 +08001356static int mtk_mdc_init(struct mtk_eth *eth)
developerec4ebe42022-04-12 11:17:45 +08001357{
1358 struct device_node *mii_np;
developer9faf1ef2023-03-21 16:49:51 +08001359 int max_clk = 2500000, divider;
developer8b8f87d2023-04-27 11:01:26 +08001360 int ret = 0;
developer1d83bed2022-11-16 14:11:04 +08001361 u32 val;
developerec4ebe42022-04-12 11:17:45 +08001362
1363 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
1364 if (!mii_np) {
1365 dev_err(eth->dev, "no %s child node found", "mdio-bus");
1366 return -ENODEV;
1367 }
1368
1369 if (!of_device_is_available(mii_np)) {
1370 ret = -ENODEV;
1371 goto err_put_node;
1372 }
1373
developer9faf1ef2023-03-21 16:49:51 +08001374 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
1375 if (val > MDC_MAX_FREQ ||
1376 val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1377 dev_err(eth->dev, "MDIO clock frequency out of range");
1378 ret = -EINVAL;
1379 goto err_put_node;
1380 }
developer1d83bed2022-11-16 14:11:04 +08001381 max_clk = val;
developer9faf1ef2023-03-21 16:49:51 +08001382 }
developer1d83bed2022-11-16 14:11:04 +08001383
developer9faf1ef2023-03-21 16:49:51 +08001384 divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
developer1d83bed2022-11-16 14:11:04 +08001385
1386 /* Configure MDC Turbo Mode */
1387 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1388 val = mtk_r32(eth, MTK_MAC_MISC);
1389 val |= MISC_MDC_TURBO;
1390 mtk_w32(eth, val, MTK_MAC_MISC);
1391 } else {
1392 val = mtk_r32(eth, MTK_PPSC);
1393 val |= PPSC_MDC_TURBO;
1394 mtk_w32(eth, val, MTK_PPSC);
1395 }
1396
1397 /* Configure MDC Divider */
1398 val = mtk_r32(eth, MTK_PPSC);
1399 val &= ~PPSC_MDC_CFG;
1400 val |= FIELD_PREP(PPSC_MDC_CFG, divider);
1401 mtk_w32(eth, val, MTK_PPSC);
1402
developer9faf1ef2023-03-21 16:49:51 +08001403 dev_info(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
1404
1405err_put_node:
1406 of_node_put(mii_np);
1407 return ret;
1408}
1409
1410static int mtk_mdio_init(struct mtk_eth *eth)
1411{
1412 struct device_node *mii_np;
1413 int ret;
1414
1415 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
1416 if (!mii_np) {
1417 dev_err(eth->dev, "no %s child node found", "mdio-bus");
1418 return -ENODEV;
1419 }
1420
1421 if (!of_device_is_available(mii_np)) {
1422 ret = -ENODEV;
1423 goto err_put_node;
1424 }
1425
1426 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
1427 if (!eth->mii_bus) {
1428 ret = -ENOMEM;
1429 goto err_put_node;
1430 }
1431
1432 eth->mii_bus->name = "mdio";
1433 eth->mii_bus->read = mtk_mdio_read;
1434 eth->mii_bus->write = mtk_mdio_write;
1435 eth->mii_bus->reset = mtk_mdio_reset;
1436 eth->mii_bus->priv = eth;
1437 eth->mii_bus->parent = eth->dev;
1438
1439 if (snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
1440 ret = -ENOMEM;
1441 goto err_put_node;
1442 }
developer1d83bed2022-11-16 14:11:04 +08001443
developerec4ebe42022-04-12 11:17:45 +08001444 ret = of_mdiobus_register(eth->mii_bus, mii_np);
1445
1446err_put_node:
1447 of_node_put(mii_np);
1448 return ret;
1449}
1450
1451static void mtk_mdio_cleanup(struct mtk_eth *eth)
1452{
1453 if (!eth->mii_bus)
1454 return;
1455
1456 mdiobus_unregister(eth->mii_bus);
1457}
1458
1459static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
1460{
1461 unsigned long flags;
1462 u32 val;
1463
developer722ab5f2024-02-22 11:01:46 +08001464 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1465 spin_lock_irqsave(&eth->tx_irq_lock, flags);
1466 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1467 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
1468 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1469 } else {
1470 spin_lock_irqsave(&eth->txrx_irq_lock, flags);
1471 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1472 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
1473 spin_unlock_irqrestore(&eth->txrx_irq_lock, flags);
1474 }
developerec4ebe42022-04-12 11:17:45 +08001475}
1476
1477static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
1478{
1479 unsigned long flags;
1480 u32 val;
1481
developer722ab5f2024-02-22 11:01:46 +08001482 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1483 spin_lock_irqsave(&eth->tx_irq_lock, flags);
1484 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1485 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
1486 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1487 } else {
1488 spin_lock_irqsave(&eth->txrx_irq_lock, flags);
1489 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1490 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
1491 spin_unlock_irqrestore(&eth->txrx_irq_lock, flags);
1492 }
developerec4ebe42022-04-12 11:17:45 +08001493}
1494
1495static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
1496{
1497 unsigned long flags;
1498 u32 val;
developer722ab5f2024-02-22 11:01:46 +08001499 spinlock_t *irq_lock;
developerec4ebe42022-04-12 11:17:45 +08001500
developer722ab5f2024-02-22 11:01:46 +08001501 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1502 irq_lock = &eth->rx_irq_lock;
1503 else
1504 irq_lock = &eth->txrx_irq_lock;
1505
1506 spin_lock_irqsave(irq_lock, flags);
developerb35f4fa2023-03-14 13:24:47 +08001507 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1508 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
developer722ab5f2024-02-22 11:01:46 +08001509 spin_unlock_irqrestore(irq_lock, flags);
developerec4ebe42022-04-12 11:17:45 +08001510}
1511
1512static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
1513{
1514 unsigned long flags;
1515 u32 val;
developer722ab5f2024-02-22 11:01:46 +08001516 spinlock_t *irq_lock;
developerec4ebe42022-04-12 11:17:45 +08001517
developer722ab5f2024-02-22 11:01:46 +08001518 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1519 irq_lock = &eth->rx_irq_lock;
1520 else
1521 irq_lock = &eth->txrx_irq_lock;
1522
1523 spin_lock_irqsave(irq_lock, flags);
developerb35f4fa2023-03-14 13:24:47 +08001524 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1525 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
developer722ab5f2024-02-22 11:01:46 +08001526 spin_unlock_irqrestore(irq_lock, flags);
developerec4ebe42022-04-12 11:17:45 +08001527}
1528
1529static int mtk_set_mac_address(struct net_device *dev, void *p)
1530{
1531 int ret = eth_mac_addr(dev, p);
1532 struct mtk_mac *mac = netdev_priv(dev);
1533 struct mtk_eth *eth = mac->hw;
1534 const char *macaddr = dev->dev_addr;
1535
1536 if (ret)
1537 return ret;
1538
1539 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
1540 return -EBUSY;
1541
1542 spin_lock_bh(&mac->hw->page_lock);
1543 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1544 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1545 MT7628_SDM_MAC_ADRH);
1546 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1547 (macaddr[4] << 8) | macaddr[5],
1548 MT7628_SDM_MAC_ADRL);
1549 } else {
1550 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1551 MTK_GDMA_MAC_ADRH(mac->id));
1552 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1553 (macaddr[4] << 8) | macaddr[5],
1554 MTK_GDMA_MAC_ADRL(mac->id));
1555 }
1556 spin_unlock_bh(&mac->hw->page_lock);
1557
1558 return 0;
1559}
1560
1561void mtk_stats_update_mac(struct mtk_mac *mac)
1562{
developere86c3ec2022-10-11 10:29:18 +08001563 struct mtk_eth *eth = mac->hw;
developerb35f4fa2023-03-14 13:24:47 +08001564 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerec4ebe42022-04-12 11:17:45 +08001565 struct mtk_hw_stats *hw_stats = mac->hw_stats;
developerb35f4fa2023-03-14 13:24:47 +08001566 unsigned int offs = hw_stats->reg_offset;
developerec4ebe42022-04-12 11:17:45 +08001567 u64 stats;
1568
developerec4ebe42022-04-12 11:17:45 +08001569 u64_stats_update_begin(&hw_stats->syncp);
1570
developerb35f4fa2023-03-14 13:24:47 +08001571 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
1572 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
developerec4ebe42022-04-12 11:17:45 +08001573 if (stats)
1574 hw_stats->rx_bytes += (stats << 32);
developerb35f4fa2023-03-14 13:24:47 +08001575 hw_stats->rx_packets +=
1576 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x08 + offs);
1577 hw_stats->rx_overflow +=
1578 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1579 hw_stats->rx_fcs_errors +=
1580 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1581 hw_stats->rx_short_errors +=
1582 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1583 hw_stats->rx_long_errors +=
1584 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1585 hw_stats->rx_checksum_errors +=
1586 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
developerec4ebe42022-04-12 11:17:45 +08001587 hw_stats->rx_flow_control_packets +=
developerb35f4fa2023-03-14 13:24:47 +08001588 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
developere86c3ec2022-10-11 10:29:18 +08001589
1590 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerb35f4fa2023-03-14 13:24:47 +08001591 hw_stats->tx_skip +=
1592 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1593 hw_stats->tx_collisions +=
1594 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1595 hw_stats->tx_bytes +=
1596 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1597 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
developere86c3ec2022-10-11 10:29:18 +08001598 if (stats)
1599 hw_stats->tx_bytes += (stats << 32);
developerb35f4fa2023-03-14 13:24:47 +08001600 hw_stats->tx_packets +=
1601 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
developere86c3ec2022-10-11 10:29:18 +08001602 } else {
developerb35f4fa2023-03-14 13:24:47 +08001603 hw_stats->tx_skip +=
1604 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1605 hw_stats->tx_collisions +=
1606 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1607 hw_stats->tx_bytes +=
1608 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1609 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
developere86c3ec2022-10-11 10:29:18 +08001610 if (stats)
1611 hw_stats->tx_bytes += (stats << 32);
developerb35f4fa2023-03-14 13:24:47 +08001612 hw_stats->tx_packets +=
1613 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
developere86c3ec2022-10-11 10:29:18 +08001614 }
developerb35f4fa2023-03-14 13:24:47 +08001615
1616 u64_stats_update_end(&hw_stats->syncp);
developerec4ebe42022-04-12 11:17:45 +08001617}
1618
1619static void mtk_stats_update(struct mtk_eth *eth)
1620{
1621 int i;
1622
1623 for (i = 0; i < MTK_MAC_COUNT; i++) {
1624 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1625 continue;
1626 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1627 mtk_stats_update_mac(eth->mac[i]);
1628 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1629 }
1630 }
1631}
1632
1633static void mtk_get_stats64(struct net_device *dev,
1634 struct rtnl_link_stats64 *storage)
1635{
1636 struct mtk_mac *mac = netdev_priv(dev);
1637 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1638 unsigned int start;
1639
1640 if (netif_running(dev) && netif_device_present(dev)) {
1641 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1642 mtk_stats_update_mac(mac);
1643 spin_unlock_bh(&hw_stats->stats_lock);
1644 }
1645 }
1646
1647 do {
1648 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
1649 storage->rx_packets = hw_stats->rx_packets;
1650 storage->tx_packets = hw_stats->tx_packets;
1651 storage->rx_bytes = hw_stats->rx_bytes;
1652 storage->tx_bytes = hw_stats->tx_bytes;
1653 storage->collisions = hw_stats->tx_collisions;
1654 storage->rx_length_errors = hw_stats->rx_short_errors +
1655 hw_stats->rx_long_errors;
1656 storage->rx_over_errors = hw_stats->rx_overflow;
1657 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1658 storage->rx_errors = hw_stats->rx_checksum_errors;
1659 storage->tx_aborted_errors = hw_stats->tx_skip;
1660 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
1661
1662 storage->tx_errors = dev->stats.tx_errors;
1663 storage->rx_dropped = dev->stats.rx_dropped;
1664 storage->tx_dropped = dev->stats.tx_dropped;
1665}
1666
1667static inline int mtk_max_frag_size(int mtu)
1668{
1669 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1670 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
1671 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
1672
1673 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1674 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1675}
1676
1677static inline int mtk_max_buf_size(int frag_size)
1678{
1679 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1680 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1681
1682 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
1683
1684 return buf_size;
1685}
1686
developer29f66b32022-07-12 15:23:20 +08001687static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1688 struct mtk_rx_dma_v2 *dma_rxd)
developerec4ebe42022-04-12 11:17:45 +08001689{
1690 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1691 if (!(rxd->rxd2 & RX_DMA_DONE))
1692 return false;
1693
1694 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1695 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1696 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developer29f66b32022-07-12 15:23:20 +08001697
developerb35f4fa2023-03-14 13:24:47 +08001698 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer29f66b32022-07-12 15:23:20 +08001699 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1700 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
developere86c3ec2022-10-11 10:29:18 +08001701 rxd->rxd7 = READ_ONCE(dma_rxd->rxd7);
developer29f66b32022-07-12 15:23:20 +08001702 }
1703
developerec4ebe42022-04-12 11:17:45 +08001704 return true;
1705}
1706
developerfce0d152024-01-11 13:37:13 +08001707static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1708{
1709 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1710 unsigned long data;
1711
1712 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1713 get_order(size));
1714
1715 return (void *)data;
1716}
1717
developerec4ebe42022-04-12 11:17:45 +08001718/* the qdma core needs scratch memory to be setup */
1719static int mtk_init_fq_dma(struct mtk_eth *eth)
1720{
developer29f66b32022-07-12 15:23:20 +08001721 const struct mtk_soc_data *soc = eth->soc;
developerec4ebe42022-04-12 11:17:45 +08001722 dma_addr_t phy_ring_tail;
1723 int cnt = MTK_DMA_SIZE;
1724 dma_addr_t dma_addr;
developera05cf4c2023-10-27 14:35:41 +08001725 u64 addr64 = 0;
developerec4ebe42022-04-12 11:17:45 +08001726 int i;
1727
1728 if (!eth->soc->has_sram) {
developerb35f4fa2023-03-14 13:24:47 +08001729 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
developer29f66b32022-07-12 15:23:20 +08001730 cnt * soc->txrx.txd_size,
developerec4ebe42022-04-12 11:17:45 +08001731 &eth->phy_scratch_ring,
developer29f66b32022-07-12 15:23:20 +08001732 GFP_KERNEL);
developerec4ebe42022-04-12 11:17:45 +08001733 } else {
developer722ab5f2024-02-22 11:01:46 +08001734 eth->scratch_ring = eth->sram_base;
developerec4ebe42022-04-12 11:17:45 +08001735 }
1736
1737 if (unlikely(!eth->scratch_ring))
1738 return -ENOMEM;
1739
developer29f66b32022-07-12 15:23:20 +08001740 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerec4ebe42022-04-12 11:17:45 +08001741 if (unlikely(!eth->scratch_head))
1742 return -ENOMEM;
1743
developerb35f4fa2023-03-14 13:24:47 +08001744 dma_addr = dma_map_single(eth->dma_dev,
developerec4ebe42022-04-12 11:17:45 +08001745 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1746 DMA_FROM_DEVICE);
developerb35f4fa2023-03-14 13:24:47 +08001747 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
developerec4ebe42022-04-12 11:17:45 +08001748 return -ENOMEM;
1749
developer3d5faf22022-11-29 18:07:22 +08001750 phy_ring_tail = eth->phy_scratch_ring +
1751 (dma_addr_t)soc->txrx.txd_size * (cnt - 1);
developerec4ebe42022-04-12 11:17:45 +08001752
1753 for (i = 0; i < cnt; i++) {
developer29f66b32022-07-12 15:23:20 +08001754 struct mtk_tx_dma_v2 *txd;
1755
1756 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1757 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerec4ebe42022-04-12 11:17:45 +08001758 if (i < cnt - 1)
developer29f66b32022-07-12 15:23:20 +08001759 txd->txd2 = eth->phy_scratch_ring +
1760 (i + 1) * soc->txrx.txd_size;
developerec4ebe42022-04-12 11:17:45 +08001761
developera05cf4c2023-10-27 14:35:41 +08001762 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1763 TX_DMA_SDP1(dma_addr + i * MTK_QDMA_PAGE_SIZE) : 0;
1764
1765 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE) | addr64;
developer29f66b32022-07-12 15:23:20 +08001766 txd->txd4 = 0;
1767
developere86c3ec2022-10-11 10:29:18 +08001768 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1769 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer29f66b32022-07-12 15:23:20 +08001770 txd->txd5 = 0;
1771 txd->txd6 = 0;
1772 txd->txd7 = 0;
1773 txd->txd8 = 0;
developerec4ebe42022-04-12 11:17:45 +08001774 }
developerec4ebe42022-04-12 11:17:45 +08001775 }
1776
developerb35f4fa2023-03-14 13:24:47 +08001777 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1778 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1779 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1780 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
developerec4ebe42022-04-12 11:17:45 +08001781
1782 return 0;
1783}
1784
1785static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1786{
developer29f66b32022-07-12 15:23:20 +08001787 return ring->dma + (desc - ring->phys);
developerec4ebe42022-04-12 11:17:45 +08001788}
1789
1790static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developer29f66b32022-07-12 15:23:20 +08001791 void *txd, u32 txd_size)
developerec4ebe42022-04-12 11:17:45 +08001792{
developer29f66b32022-07-12 15:23:20 +08001793 int idx = (txd - ring->dma) / txd_size;
developerec4ebe42022-04-12 11:17:45 +08001794
1795 return &ring->buf[idx];
1796}
1797
1798static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developer29f66b32022-07-12 15:23:20 +08001799 void *dma)
developerec4ebe42022-04-12 11:17:45 +08001800{
1801 return ring->dma_pdma - ring->dma + dma;
1802}
1803
developer29f66b32022-07-12 15:23:20 +08001804static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerec4ebe42022-04-12 11:17:45 +08001805{
developer29f66b32022-07-12 15:23:20 +08001806 return (dma - ring->dma) / txd_size;
developerec4ebe42022-04-12 11:17:45 +08001807}
1808
1809static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1810 bool napi)
1811{
1812 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1813 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
developerb35f4fa2023-03-14 13:24:47 +08001814 dma_unmap_single(eth->dma_dev,
developerec4ebe42022-04-12 11:17:45 +08001815 dma_unmap_addr(tx_buf, dma_addr0),
1816 dma_unmap_len(tx_buf, dma_len0),
1817 DMA_TO_DEVICE);
1818 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
developerb35f4fa2023-03-14 13:24:47 +08001819 dma_unmap_page(eth->dma_dev,
developerec4ebe42022-04-12 11:17:45 +08001820 dma_unmap_addr(tx_buf, dma_addr0),
1821 dma_unmap_len(tx_buf, dma_len0),
1822 DMA_TO_DEVICE);
1823 }
1824 } else {
1825 if (dma_unmap_len(tx_buf, dma_len0)) {
developerb35f4fa2023-03-14 13:24:47 +08001826 dma_unmap_page(eth->dma_dev,
developerec4ebe42022-04-12 11:17:45 +08001827 dma_unmap_addr(tx_buf, dma_addr0),
1828 dma_unmap_len(tx_buf, dma_len0),
1829 DMA_TO_DEVICE);
1830 }
1831
1832 if (dma_unmap_len(tx_buf, dma_len1)) {
developerb35f4fa2023-03-14 13:24:47 +08001833 dma_unmap_page(eth->dma_dev,
developerec4ebe42022-04-12 11:17:45 +08001834 dma_unmap_addr(tx_buf, dma_addr1),
1835 dma_unmap_len(tx_buf, dma_len1),
1836 DMA_TO_DEVICE);
1837 }
1838 }
1839
1840 tx_buf->flags = 0;
1841 if (tx_buf->skb &&
1842 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
1843 if (napi)
1844 napi_consume_skb(tx_buf->skb, napi);
1845 else
1846 dev_kfree_skb_any(tx_buf->skb);
1847 }
1848 tx_buf->skb = NULL;
1849}
1850
1851static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1852 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1853 size_t size, int idx)
1854{
developer722ab5f2024-02-22 11:01:46 +08001855 u64 addr64 = 0;
1856
developerec4ebe42022-04-12 11:17:45 +08001857 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1858 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1859 dma_unmap_len_set(tx_buf, dma_len0, size);
1860 } else {
developer722ab5f2024-02-22 11:01:46 +08001861 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1862 TX_DMA_SDP1(mapped_addr) : 0;
1863
developerec4ebe42022-04-12 11:17:45 +08001864 if (idx & 1) {
1865 txd->txd3 = mapped_addr;
developer722ab5f2024-02-22 11:01:46 +08001866 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1867 txd->txd4 = TX_DMA_PLEN1(size) | addr64;
1868 else
1869 txd->txd2 |= TX_DMA_PLEN1(size);
developerec4ebe42022-04-12 11:17:45 +08001870 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1871 dma_unmap_len_set(tx_buf, dma_len1, size);
1872 } else {
1873 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1874 txd->txd1 = mapped_addr;
developer722ab5f2024-02-22 11:01:46 +08001875 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1876 txd->txd2 = TX_DMA_PLEN0(size) | addr64;
1877 else
1878 txd->txd2 = TX_DMA_PLEN0(size);
developerec4ebe42022-04-12 11:17:45 +08001879 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1880 dma_unmap_len_set(tx_buf, dma_len0, size);
1881 }
1882 }
1883}
1884
developer29f66b32022-07-12 15:23:20 +08001885static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
1886 struct mtk_tx_dma_desc_info *info)
1887{
1888 struct mtk_mac *mac = netdev_priv(dev);
1889 struct mtk_eth *eth = mac->hw;
1890 struct mtk_tx_dma *desc = txd;
1891 u32 data;
1892
1893 WRITE_ONCE(desc->txd1, info->addr);
1894
1895 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1896 if (info->last)
1897 data |= TX_DMA_LS0;
1898 WRITE_ONCE(desc->txd3, data);
1899
1900 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1901 data |= QID_HIGH_BITS(info->qid);
1902 if (info->first) {
1903 if (info->gso)
1904 data |= TX_DMA_TSO;
1905 /* tx checksum offload */
1906 if (info->csum)
1907 data |= TX_DMA_CHKSUM;
1908 /* vlan header offload */
1909 if (info->vlan)
1910 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1911 }
1912
1913#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1914 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1915 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1916 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1917 }
1918
1919 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1920 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1921#endif
1922 WRITE_ONCE(desc->txd4, data);
1923}
1924
1925static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1926 struct mtk_tx_dma_desc_info *info)
1927{
1928 struct mtk_mac *mac = netdev_priv(dev);
1929 struct mtk_eth *eth = mac->hw;
1930 struct mtk_tx_dma_v2 *desc = txd;
1931 u32 data = 0;
developer29f66b32022-07-12 15:23:20 +08001932
developer29f66b32022-07-12 15:23:20 +08001933 WRITE_ONCE(desc->txd1, info->addr);
1934
1935 data = TX_DMA_PLEN0(info->size);
1936 if (info->last)
1937 data |= TX_DMA_LS0;
1938 WRITE_ONCE(desc->txd3, data);
1939
developere86c3ec2022-10-11 10:29:18 +08001940 data = ((mac->id == MTK_GMAC3_ID) ?
1941 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1942 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1943#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1944 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1945 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1946 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1947 }
1948
1949 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1950 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1951#endif
1952 WRITE_ONCE(desc->txd4, data);
1953
1954 data = 0;
1955 if (info->first) {
1956 if (info->gso)
1957 data |= TX_DMA_TSO_V2;
1958 /* tx checksum offload */
1959 if (info->csum)
1960 data |= TX_DMA_CHKSUM_V2;
1961 }
1962 WRITE_ONCE(desc->txd5, data);
1963
1964 data = 0;
1965 if (info->first && info->vlan)
1966 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1967 WRITE_ONCE(desc->txd6, data);
1968
1969 WRITE_ONCE(desc->txd7, 0);
1970 WRITE_ONCE(desc->txd8, 0);
1971}
1972
1973static void mtk_tx_set_dma_desc_v3(struct sk_buff *skb, struct net_device *dev, void *txd,
1974 struct mtk_tx_dma_desc_info *info)
1975{
1976 struct mtk_mac *mac = netdev_priv(dev);
1977 struct mtk_eth *eth = mac->hw;
1978 struct mtk_tx_dma_v2 *desc = txd;
1979 u64 addr64 = 0;
1980 u32 data = 0;
1981
developere86c3ec2022-10-11 10:29:18 +08001982 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1983 TX_DMA_SDP1(info->addr) : 0;
1984
1985 WRITE_ONCE(desc->txd1, info->addr);
1986
1987 data = TX_DMA_PLEN0(info->size);
1988 if (info->last)
1989 data |= TX_DMA_LS0;
1990 WRITE_ONCE(desc->txd3, data | addr64);
1991
1992 data = ((mac->id == MTK_GMAC3_ID) ?
1993 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
developer33907d42022-09-19 14:33:58 +08001994 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
developer29f66b32022-07-12 15:23:20 +08001995#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1996 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1997 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1998 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1999 }
2000
2001 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
2002 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
2003#endif
2004 WRITE_ONCE(desc->txd4, data);
2005
2006 data = 0;
2007 if (info->first) {
2008 if (info->gso)
2009 data |= TX_DMA_TSO_V2;
2010 /* tx checksum offload */
2011 if (info->csum)
2012 data |= TX_DMA_CHKSUM_V2;
developere86c3ec2022-10-11 10:29:18 +08002013
2014 if (netdev_uses_dsa(dev))
2015 data |= TX_DMA_SPTAG_V3;
developer29f66b32022-07-12 15:23:20 +08002016 }
2017 WRITE_ONCE(desc->txd5, data);
2018
2019 data = 0;
2020 if (info->first && info->vlan)
2021 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
2022 WRITE_ONCE(desc->txd6, data);
2023
2024 WRITE_ONCE(desc->txd7, 0);
2025 WRITE_ONCE(desc->txd8, 0);
2026}
2027
developer722ab5f2024-02-22 11:01:46 +08002028static void mtk_tx_set_pdma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
2029 struct mtk_tx_dma_desc_info *info)
2030{
2031 struct mtk_mac *mac = netdev_priv(dev);
2032 struct mtk_tx_dma_v2 *desc = txd;
2033 u32 data = 0;
2034
2035 if (info->first) {
2036 data = ((mac->id == MTK_GMAC3_ID) ?
2037 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_PDMA;
2038 if (info->gso)
2039 data |= TX_DMA_TSO_V2;
2040 if (info->csum)
2041 data |= TX_DMA_CHKSUM_V2;
2042 if (netdev_uses_dsa(dev))
2043 data |= TX_DMA_SPTAG_V3;
2044 WRITE_ONCE(desc->txd5, data);
2045
2046 if (info->vlan) {
2047 WRITE_ONCE(desc->txd6, TX_DMA_INS_VLAN_V2);
2048 WRITE_ONCE(desc->txd7, info->vlan_tci);
2049 }
2050
2051 WRITE_ONCE(desc->txd8, 0);
2052 }
2053}
2054
developer29f66b32022-07-12 15:23:20 +08002055static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
2056 struct mtk_tx_dma_desc_info *info)
2057{
2058 struct mtk_mac *mac = netdev_priv(dev);
2059 struct mtk_eth *eth = mac->hw;
2060
developer722ab5f2024-02-22 11:01:46 +08002061 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2062 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
2063 mtk_tx_set_dma_desc_v3(skb, dev, txd, info);
2064 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2065 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
2066 else
2067 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
2068 } else {
2069 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
2070 mtk_tx_set_pdma_desc(skb, dev, txd, info);
2071 }
developer29f66b32022-07-12 15:23:20 +08002072}
2073
developerec4ebe42022-04-12 11:17:45 +08002074static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
2075 int tx_num, struct mtk_tx_ring *ring, bool gso)
2076{
developer29f66b32022-07-12 15:23:20 +08002077 struct mtk_tx_dma_desc_info txd_info = {
2078 .size = skb_headlen(skb),
developer722ab5f2024-02-22 11:01:46 +08002079 .qid = skb_get_queue_mapping(skb),
developer29f66b32022-07-12 15:23:20 +08002080 .gso = gso,
2081 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
2082 .vlan = skb_vlan_tag_present(skb),
2083 .vlan_tci = skb_vlan_tag_get(skb),
2084 .first = true,
2085 .last = !skb_is_nonlinear(skb),
2086 };
developer722ab5f2024-02-22 11:01:46 +08002087 struct netdev_queue *txq;
developerec4ebe42022-04-12 11:17:45 +08002088 struct mtk_mac *mac = netdev_priv(dev);
2089 struct mtk_eth *eth = mac->hw;
developer29f66b32022-07-12 15:23:20 +08002090 const struct mtk_soc_data *soc = eth->soc;
developerec4ebe42022-04-12 11:17:45 +08002091 struct mtk_tx_dma *itxd, *txd;
2092 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
2093 struct mtk_tx_buf *itx_buf, *tx_buf;
developerec4ebe42022-04-12 11:17:45 +08002094 int i, n_desc = 1;
developer722ab5f2024-02-22 11:01:46 +08002095 int queue = skb_get_queue_mapping(skb);
developerec4ebe42022-04-12 11:17:45 +08002096 int k = 0;
2097
developer3ed7b542023-02-13 16:51:27 +08002098 if (skb->len < 32) {
2099 if (skb_put_padto(skb, MTK_MIN_TX_LENGTH))
2100 return -ENOMEM;
2101
2102 txd_info.size = skb_headlen(skb);
2103 }
2104
developer722ab5f2024-02-22 11:01:46 +08002105 txq = netdev_get_tx_queue(dev, txd_info.qid);
developerec4ebe42022-04-12 11:17:45 +08002106 itxd = ring->next_free;
2107 itxd_pdma = qdma_to_pdma(ring, itxd);
2108 if (itxd == ring->last_free)
2109 return -ENOMEM;
2110
developer29f66b32022-07-12 15:23:20 +08002111 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerec4ebe42022-04-12 11:17:45 +08002112 memset(itx_buf, 0, sizeof(*itx_buf));
2113
developerb35f4fa2023-03-14 13:24:47 +08002114 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
developer29f66b32022-07-12 15:23:20 +08002115 DMA_TO_DEVICE);
developerb35f4fa2023-03-14 13:24:47 +08002116 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
developerec4ebe42022-04-12 11:17:45 +08002117 return -ENOMEM;
2118
developer722ab5f2024-02-22 11:01:46 +08002119 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2120 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
2121 else
2122 mtk_tx_set_dma_desc(skb, dev, itxd_pdma, &txd_info);
developer29f66b32022-07-12 15:23:20 +08002123
developerec4ebe42022-04-12 11:17:45 +08002124 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
developere86c3ec2022-10-11 10:29:18 +08002125 itx_buf->flags |= (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
2126 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
2127 MTK_TX_FLAGS_FPORT2;
developer29f66b32022-07-12 15:23:20 +08002128 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerec4ebe42022-04-12 11:17:45 +08002129 k++);
2130
developerec4ebe42022-04-12 11:17:45 +08002131 /* TX SG offload */
2132 txd = itxd;
2133 txd_pdma = qdma_to_pdma(ring, txd);
2134
developer29f66b32022-07-12 15:23:20 +08002135 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerec4ebe42022-04-12 11:17:45 +08002136 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2137 unsigned int offset = 0;
2138 int frag_size = skb_frag_size(frag);
2139
2140 while (frag_size) {
developerec4ebe42022-04-12 11:17:45 +08002141 bool new_desc = true;
2142
developer29f66b32022-07-12 15:23:20 +08002143 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerec4ebe42022-04-12 11:17:45 +08002144 (i & 0x1)) {
2145 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
2146 txd_pdma = qdma_to_pdma(ring, txd);
2147 if (txd == ring->last_free)
2148 goto err_dma;
2149
2150 n_desc++;
2151 } else {
2152 new_desc = false;
2153 }
2154
developer29f66b32022-07-12 15:23:20 +08002155 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
2156 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
developer722ab5f2024-02-22 11:01:46 +08002157 txd_info.qid = queue;
developer29f66b32022-07-12 15:23:20 +08002158 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
2159 !(frag_size - txd_info.size);
developerb35f4fa2023-03-14 13:24:47 +08002160 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
developer29f66b32022-07-12 15:23:20 +08002161 offset, txd_info.size,
2162 DMA_TO_DEVICE);
developerb35f4fa2023-03-14 13:24:47 +08002163 if (unlikely(dma_mapping_error(eth->dma_dev,
2164 txd_info.addr)))
developer29f66b32022-07-12 15:23:20 +08002165 goto err_dma;
developerec4ebe42022-04-12 11:17:45 +08002166
developer722ab5f2024-02-22 11:01:46 +08002167 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2168 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
2169 else
2170 mtk_tx_set_dma_desc(skb, dev, txd_pdma, &txd_info);
developerec4ebe42022-04-12 11:17:45 +08002171
developer29f66b32022-07-12 15:23:20 +08002172 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerec4ebe42022-04-12 11:17:45 +08002173 if (new_desc)
2174 memset(tx_buf, 0, sizeof(*tx_buf));
2175 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
2176 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
developere86c3ec2022-10-11 10:29:18 +08002177 tx_buf->flags |=
2178 (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
2179 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
2180 MTK_TX_FLAGS_FPORT2;
developerec4ebe42022-04-12 11:17:45 +08002181
developer29f66b32022-07-12 15:23:20 +08002182 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
2183 txd_info.size, k++);
developerec4ebe42022-04-12 11:17:45 +08002184
developer29f66b32022-07-12 15:23:20 +08002185 frag_size -= txd_info.size;
2186 offset += txd_info.size;
developerec4ebe42022-04-12 11:17:45 +08002187 }
2188 }
2189
2190 /* store skb to cleanup */
2191 itx_buf->skb = skb;
2192
developer29f66b32022-07-12 15:23:20 +08002193 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developer722ab5f2024-02-22 11:01:46 +08002194 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
2195 if (k & 0x1)
2196 txd_pdma->txd2 |= TX_DMA_LS0;
2197 else
2198 txd_pdma->txd4 |= TX_DMA_LS1_V2;
2199 } else {
2200 if (k & 0x1)
2201 txd_pdma->txd2 |= TX_DMA_LS0;
2202 else
2203 txd_pdma->txd2 |= TX_DMA_LS1;
2204 }
developerec4ebe42022-04-12 11:17:45 +08002205 }
2206
developer722ab5f2024-02-22 11:01:46 +08002207 netdev_tx_sent_queue(txq, skb->len);
developerec4ebe42022-04-12 11:17:45 +08002208 skb_tx_timestamp(skb);
2209
2210 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
2211 atomic_sub(n_desc, &ring->free_count);
2212
2213 /* make sure that all changes to the dma ring are flushed before we
2214 * continue
2215 */
2216 wmb();
2217
developer29f66b32022-07-12 15:23:20 +08002218 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developer722ab5f2024-02-22 11:01:46 +08002219 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
developerb35f4fa2023-03-14 13:24:47 +08002220 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
developerec4ebe42022-04-12 11:17:45 +08002221 } else {
developer29f66b32022-07-12 15:23:20 +08002222 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
developerec4ebe42022-04-12 11:17:45 +08002223 ring->dma_size);
developer722ab5f2024-02-22 11:01:46 +08002224 mtk_w32(eth, next_idx, soc->reg_map->pdma.pctx_ptr);
developerec4ebe42022-04-12 11:17:45 +08002225 }
2226
2227 return 0;
2228
2229err_dma:
2230 do {
developer29f66b32022-07-12 15:23:20 +08002231 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerec4ebe42022-04-12 11:17:45 +08002232
2233 /* unmap dma */
2234 mtk_tx_unmap(eth, tx_buf, false);
2235
2236 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
developer29f66b32022-07-12 15:23:20 +08002237 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
developerec4ebe42022-04-12 11:17:45 +08002238 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
2239
2240 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
2241 itxd_pdma = qdma_to_pdma(ring, itxd);
2242 } while (itxd != txd);
2243
2244 return -ENOMEM;
2245}
2246
2247static inline int mtk_cal_txd_req(struct sk_buff *skb)
2248{
2249 int i, nfrags;
2250 skb_frag_t *frag;
2251
2252 nfrags = 1;
2253 if (skb_is_gso(skb)) {
2254 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2255 frag = &skb_shinfo(skb)->frags[i];
2256 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
2257 MTK_TX_DMA_BUF_LEN);
2258 }
2259 } else {
2260 nfrags += skb_shinfo(skb)->nr_frags;
2261 }
2262
2263 return nfrags;
2264}
2265
2266static int mtk_queue_stopped(struct mtk_eth *eth)
2267{
2268 int i;
2269
2270 for (i = 0; i < MTK_MAC_COUNT; i++) {
2271 if (!eth->netdev[i])
2272 continue;
2273 if (netif_queue_stopped(eth->netdev[i]))
2274 return 1;
2275 }
2276
2277 return 0;
2278}
2279
2280static void mtk_wake_queue(struct mtk_eth *eth)
2281{
2282 int i;
2283
2284 for (i = 0; i < MTK_MAC_COUNT; i++) {
2285 if (!eth->netdev[i])
2286 continue;
developer722ab5f2024-02-22 11:01:46 +08002287 netif_tx_wake_all_queues(eth->netdev[i]);
developerec4ebe42022-04-12 11:17:45 +08002288 }
2289}
2290
2291static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
2292{
2293 struct mtk_mac *mac = netdev_priv(dev);
2294 struct mtk_eth *eth = mac->hw;
2295 struct mtk_tx_ring *ring = &eth->tx_ring;
2296 struct net_device_stats *stats = &dev->stats;
2297 bool gso = false;
2298 int tx_num;
2299
2300 /* normally we can rely on the stack not calling this more than once,
2301 * however we have 2 queues running on the same ring so we need to lock
2302 * the ring access
2303 */
2304 spin_lock(&eth->page_lock);
2305
2306 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2307 goto drop;
2308
2309 tx_num = mtk_cal_txd_req(skb);
2310 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
developer722ab5f2024-02-22 11:01:46 +08002311 netif_tx_stop_all_queues(dev);
developerec4ebe42022-04-12 11:17:45 +08002312 netif_err(eth, tx_queued, dev,
2313 "Tx Ring full when queue awake!\n");
2314 spin_unlock(&eth->page_lock);
2315 return NETDEV_TX_BUSY;
2316 }
2317
2318 /* TSO: fill MSS info in tcp checksum field */
2319 if (skb_is_gso(skb)) {
2320 if (skb_cow_head(skb, 0)) {
2321 netif_warn(eth, tx_err, dev,
2322 "GSO expand head fail.\n");
2323 goto drop;
2324 }
2325
2326 if (skb_shinfo(skb)->gso_type &
2327 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
2328 gso = true;
2329 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
2330 }
2331 }
2332
2333 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
2334 goto drop;
2335
2336 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
developer722ab5f2024-02-22 11:01:46 +08002337 netif_tx_stop_all_queues(dev);
developerec4ebe42022-04-12 11:17:45 +08002338
2339 spin_unlock(&eth->page_lock);
2340
2341 return NETDEV_TX_OK;
2342
2343drop:
2344 spin_unlock(&eth->page_lock);
2345 stats->tx_dropped++;
2346 dev_kfree_skb_any(skb);
2347 return NETDEV_TX_OK;
2348}
2349
developerec4ebe42022-04-12 11:17:45 +08002350static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2351{
developera05cf4c2023-10-27 14:35:41 +08002352 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
developerec4ebe42022-04-12 11:17:45 +08002353}
2354
2355static int mtk_poll_rx(struct napi_struct *napi, int budget,
2356 struct mtk_eth *eth)
2357{
2358 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
2359 struct mtk_rx_ring *ring = rx_napi->rx_ring;
2360 int idx;
2361 struct sk_buff *skb;
2362 u8 *data, *new_data;
developer29f66b32022-07-12 15:23:20 +08002363 struct mtk_rx_dma_v2 *rxd, trxd;
developerec4ebe42022-04-12 11:17:45 +08002364 int done = 0;
2365
2366 if (unlikely(!ring))
2367 goto rx_done;
2368
2369 while (done < budget) {
developerb35f4fa2023-03-14 13:24:47 +08002370 unsigned int pktlen, *rxdcsum;
developere86c3ec2022-10-11 10:29:18 +08002371 struct net_device *netdev = NULL;
developerfce0d152024-01-11 13:37:13 +08002372 dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2373 u64 addr64 = 0;
developer29f66b32022-07-12 15:23:20 +08002374 int mac = 0;
developerec4ebe42022-04-12 11:17:45 +08002375
developerec4ebe42022-04-12 11:17:45 +08002376 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
developer29f66b32022-07-12 15:23:20 +08002377 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
developerec4ebe42022-04-12 11:17:45 +08002378 data = ring->data[idx];
2379
developer29f66b32022-07-12 15:23:20 +08002380 if (!mtk_rx_get_desc(eth, &trxd, rxd))
developerec4ebe42022-04-12 11:17:45 +08002381 break;
2382
2383 /* find out which mac the packet come from. values start at 1 */
2384 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2385 mac = 0;
2386 } else {
developerb35f4fa2023-03-14 13:24:47 +08002387 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developere86c3ec2022-10-11 10:29:18 +08002388 switch (RX_DMA_GET_SPORT_V2(trxd.rxd5)) {
2389 case PSE_GDM1_PORT:
2390 case PSE_GDM2_PORT:
2391 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
2392 break;
2393 case PSE_GDM3_PORT:
2394 mac = MTK_GMAC3_ID;
2395 break;
2396 }
2397 } else
developerec4ebe42022-04-12 11:17:45 +08002398 mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
2399 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2400 }
2401
2402 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
2403 !eth->netdev[mac]))
2404 goto release_desc;
2405
2406 netdev = eth->netdev[mac];
2407
2408 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2409 goto release_desc;
2410
2411 /* alloc new buffer */
developerfce0d152024-01-11 13:37:13 +08002412 if (ring->frag_size <= PAGE_SIZE)
2413 new_data = napi_alloc_frag(ring->frag_size);
2414 else
2415 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
developerec4ebe42022-04-12 11:17:45 +08002416 if (unlikely(!new_data)) {
2417 netdev->stats.rx_dropped++;
2418 goto release_desc;
2419 }
developerb35f4fa2023-03-14 13:24:47 +08002420 dma_addr = dma_map_single(eth->dma_dev,
developerec4ebe42022-04-12 11:17:45 +08002421 new_data + NET_SKB_PAD +
2422 eth->ip_align,
2423 ring->buf_size,
2424 DMA_FROM_DEVICE);
developerb35f4fa2023-03-14 13:24:47 +08002425 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
developerec4ebe42022-04-12 11:17:45 +08002426 skb_free_frag(new_data);
2427 netdev->stats.rx_dropped++;
2428 goto release_desc;
2429 }
2430
developere86c3ec2022-10-11 10:29:18 +08002431 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2432 ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
2433
developerb35f4fa2023-03-14 13:24:47 +08002434 dma_unmap_single(eth->dma_dev,
developerfce0d152024-01-11 13:37:13 +08002435 ((u64)(trxd.rxd1) | addr64),
developerec4ebe42022-04-12 11:17:45 +08002436 ring->buf_size, DMA_FROM_DEVICE);
2437
2438 /* receive data */
2439 skb = build_skb(data, ring->frag_size);
2440 if (unlikely(!skb)) {
2441 skb_free_frag(data);
2442 netdev->stats.rx_dropped++;
2443 goto skip_rx;
2444 }
2445 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2446
2447 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2448 skb->dev = netdev;
2449 skb_put(skb, pktlen);
2450
developerb35f4fa2023-03-14 13:24:47 +08002451 if ((MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)))
2452 rxdcsum = &trxd.rxd3;
2453 else
2454 rxdcsum = &trxd.rxd4;
2455
2456 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
developerec4ebe42022-04-12 11:17:45 +08002457 skb->ip_summed = CHECKSUM_UNNECESSARY;
2458 else
2459 skb_checksum_none_assert(skb);
2460 skb->protocol = eth_type_trans(skb, netdev);
2461
2462 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developerb35f4fa2023-03-14 13:24:47 +08002463 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developerec4ebe42022-04-12 11:17:45 +08002464 if (trxd.rxd3 & RX_DMA_VTAG_V2)
2465 __vlan_hwaccel_put_tag(skb,
2466 htons(RX_DMA_VPID_V2(trxd.rxd4)),
2467 RX_DMA_VID_V2(trxd.rxd4));
2468 } else {
2469 if (trxd.rxd2 & RX_DMA_VTAG)
2470 __vlan_hwaccel_put_tag(skb,
2471 htons(RX_DMA_VPID(trxd.rxd3)),
2472 RX_DMA_VID(trxd.rxd3));
2473 }
2474
2475 /* If netdev is attached to dsa switch, the special
2476 * tag inserted in VLAN field by switch hardware can
2477 * be offload by RX HW VLAN offload. Clears the VLAN
2478 * information from @skb to avoid unexpected 8021d
2479 * handler before packet enter dsa framework.
2480 */
2481 if (netdev_uses_dsa(netdev))
2482 __vlan_hwaccel_clear_tag(skb);
2483 }
2484
2485#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
developerb35f4fa2023-03-14 13:24:47 +08002486 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developerec4ebe42022-04-12 11:17:45 +08002487 *(u32 *)(skb->head) = trxd.rxd5;
2488 else
developerec4ebe42022-04-12 11:17:45 +08002489 *(u32 *)(skb->head) = trxd.rxd4;
2490
2491 skb_hnat_alg(skb) = 0;
2492 skb_hnat_filled(skb) = 0;
2493 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
2494
2495 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
2496 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
2497 __func__, skb_hnat_reason(skb));
2498 skb->pkt_type = PACKET_HOST;
2499 }
2500
2501 trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
2502 __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
2503 skb_hnat_reason(skb), skb_hnat_alg(skb));
2504#endif
2505 if (mtk_hwlro_stats_ebl &&
2506 IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
2507 hw_lro_stats_update(ring->ring_no, &trxd);
2508 hw_lro_flush_stats_update(ring->ring_no, &trxd);
2509 }
2510
2511 skb_record_rx_queue(skb, 0);
2512 napi_gro_receive(napi, skb);
2513
2514skip_rx:
2515 ring->data[idx] = new_data;
2516 rxd->rxd1 = (unsigned int)dma_addr;
2517
2518release_desc:
developerfce0d152024-01-11 13:37:13 +08002519 if (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) {
2520 if (unlikely(dma_addr == DMA_MAPPING_ERROR))
2521 addr64 = RX_DMA_GET_SDP1(rxd->rxd2);
2522 else
2523 addr64 = RX_DMA_SDP1(dma_addr);
2524 }
developere86c3ec2022-10-11 10:29:18 +08002525
developerec4ebe42022-04-12 11:17:45 +08002526 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2527 rxd->rxd2 = RX_DMA_LSO;
2528 else
developere86c3ec2022-10-11 10:29:18 +08002529 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerec4ebe42022-04-12 11:17:45 +08002530
2531 ring->calc_idx = idx;
2532
2533 done++;
2534 }
2535
2536rx_done:
2537 if (done) {
2538 /* make sure that all changes to the dma ring are flushed before
2539 * we continue
2540 */
2541 wmb();
2542 mtk_update_rx_cpu_idx(eth, ring);
2543 }
2544
2545 return done;
2546}
2547
developer722ab5f2024-02-22 11:01:46 +08002548struct mtk_poll_state {
2549 struct netdev_queue *txq;
2550 unsigned int total;
2551 unsigned int done;
2552 unsigned int bytes;
2553};
2554
2555static void
2556mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2557 struct sk_buff *skb)
2558{
2559 struct netdev_queue *txq;
2560 struct net_device *dev;
2561 unsigned int bytes = skb->len;
2562
2563 state->total++;
2564
2565 dev = eth->netdev[mac];
2566 if (!dev)
2567 return;
2568
2569 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2570 if (state->txq == txq) {
2571 state->done++;
2572 state->bytes += bytes;
2573 return;
2574 }
2575
2576 if (state->txq)
2577 netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2578
2579 state->txq = txq;
2580 state->done = 1;
2581 state->bytes = bytes;
2582}
2583
developerec4ebe42022-04-12 11:17:45 +08002584static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
developer722ab5f2024-02-22 11:01:46 +08002585 struct mtk_poll_state *state)
developerec4ebe42022-04-12 11:17:45 +08002586{
developerb35f4fa2023-03-14 13:24:47 +08002587 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer29f66b32022-07-12 15:23:20 +08002588 const struct mtk_soc_data *soc = eth->soc;
developerec4ebe42022-04-12 11:17:45 +08002589 struct mtk_tx_ring *ring = &eth->tx_ring;
2590 struct mtk_tx_dma *desc;
2591 struct sk_buff *skb;
2592 struct mtk_tx_buf *tx_buf;
2593 u32 cpu, dma;
2594
2595 cpu = ring->last_free_ptr;
developerb35f4fa2023-03-14 13:24:47 +08002596 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
developerec4ebe42022-04-12 11:17:45 +08002597
2598 desc = mtk_qdma_phys_to_virt(ring, cpu);
2599
2600 while ((cpu != dma) && budget) {
2601 u32 next_cpu = desc->txd2;
2602 int mac = 0;
2603
2604 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2605 break;
2606
2607 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2608
developer29f66b32022-07-12 15:23:20 +08002609 tx_buf = mtk_desc_to_tx_buf(ring, desc, soc->txrx.txd_size);
developerec4ebe42022-04-12 11:17:45 +08002610 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
developere86c3ec2022-10-11 10:29:18 +08002611 mac = MTK_GMAC2_ID;
2612 else if (tx_buf->flags & MTK_TX_FLAGS_FPORT2)
2613 mac = MTK_GMAC3_ID;
developerec4ebe42022-04-12 11:17:45 +08002614
2615 skb = tx_buf->skb;
2616 if (!skb)
2617 break;
2618
2619 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
developer722ab5f2024-02-22 11:01:46 +08002620 mtk_poll_tx_done(eth, state, mac, skb);
developerec4ebe42022-04-12 11:17:45 +08002621 budget--;
2622 }
2623 mtk_tx_unmap(eth, tx_buf, true);
2624
2625 ring->last_free = desc;
2626 atomic_inc(&ring->free_count);
2627
2628 cpu = next_cpu;
2629 }
2630
2631 ring->last_free_ptr = cpu;
developerb35f4fa2023-03-14 13:24:47 +08002632 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
developerec4ebe42022-04-12 11:17:45 +08002633}
2634
2635static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
developer722ab5f2024-02-22 11:01:46 +08002636 struct mtk_poll_state *state)
developerec4ebe42022-04-12 11:17:45 +08002637{
developer722ab5f2024-02-22 11:01:46 +08002638 const struct mtk_soc_data *soc = eth->soc;
developerec4ebe42022-04-12 11:17:45 +08002639 struct mtk_tx_ring *ring = &eth->tx_ring;
2640 struct mtk_tx_dma *desc;
2641 struct sk_buff *skb;
2642 struct mtk_tx_buf *tx_buf;
2643 u32 cpu, dma;
2644
2645 cpu = ring->cpu_idx;
developer722ab5f2024-02-22 11:01:46 +08002646 dma = mtk_r32(eth, soc->reg_map->pdma.pdtx_ptr);
developerec4ebe42022-04-12 11:17:45 +08002647
2648 while ((cpu != dma) && budget) {
developer722ab5f2024-02-22 11:01:46 +08002649 int mac = 0;
2650
2651 desc = ring->dma_pdma + cpu * eth->soc->txrx.txd_size;
2652 if ((desc->txd2 & TX_DMA_OWNER_CPU) == 0)
2653 break;
2654
developerec4ebe42022-04-12 11:17:45 +08002655 tx_buf = &ring->buf[cpu];
developer722ab5f2024-02-22 11:01:46 +08002656 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
2657 mac = MTK_GMAC2_ID;
2658 else if (tx_buf->flags & MTK_TX_FLAGS_FPORT2)
2659 mac = MTK_GMAC3_ID;
2660
developerec4ebe42022-04-12 11:17:45 +08002661 skb = tx_buf->skb;
2662 if (!skb)
2663 break;
2664
2665 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
developer722ab5f2024-02-22 11:01:46 +08002666 mtk_poll_tx_done(eth, state, mac, skb);
developerec4ebe42022-04-12 11:17:45 +08002667 budget--;
2668 }
2669
2670 mtk_tx_unmap(eth, tx_buf, true);
2671
developer29f66b32022-07-12 15:23:20 +08002672 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
developerec4ebe42022-04-12 11:17:45 +08002673 ring->last_free = desc;
2674 atomic_inc(&ring->free_count);
2675
2676 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2677 }
2678
2679 ring->cpu_idx = cpu;
2680}
2681
2682static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2683{
2684 struct mtk_tx_ring *ring = &eth->tx_ring;
developer722ab5f2024-02-22 11:01:46 +08002685 struct mtk_poll_state state = {};
developerec4ebe42022-04-12 11:17:45 +08002686
2687 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developer722ab5f2024-02-22 11:01:46 +08002688 mtk_poll_tx_qdma(eth, budget, &state);
developerec4ebe42022-04-12 11:17:45 +08002689 else
developer722ab5f2024-02-22 11:01:46 +08002690 mtk_poll_tx_pdma(eth, budget, &state);
developerec4ebe42022-04-12 11:17:45 +08002691
developer722ab5f2024-02-22 11:01:46 +08002692 if (state.txq)
2693 netdev_tx_completed_queue(state.txq, state.done, state.bytes);
developerec4ebe42022-04-12 11:17:45 +08002694
2695 if (mtk_queue_stopped(eth) &&
2696 (atomic_read(&ring->free_count) > ring->thresh))
2697 mtk_wake_queue(eth);
2698
developer722ab5f2024-02-22 11:01:46 +08002699 return state.total;
developerec4ebe42022-04-12 11:17:45 +08002700}
2701
2702static void mtk_handle_status_irq(struct mtk_eth *eth)
2703{
developer3d2dd692022-04-19 12:53:29 +08002704 u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
developerec4ebe42022-04-12 11:17:45 +08002705
2706 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2707 mtk_stats_update(eth);
2708 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
developer3d2dd692022-04-19 12:53:29 +08002709 MTK_FE_INT_STATUS);
developerec4ebe42022-04-12 11:17:45 +08002710 }
2711}
2712
2713static int mtk_napi_tx(struct napi_struct *napi, int budget)
2714{
2715 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
developerb35f4fa2023-03-14 13:24:47 +08002716 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerec4ebe42022-04-12 11:17:45 +08002717 u32 status, mask;
2718 int tx_done = 0;
2719
developer722ab5f2024-02-22 11:01:46 +08002720 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developerec4ebe42022-04-12 11:17:45 +08002721 mtk_handle_status_irq(eth);
developer722ab5f2024-02-22 11:01:46 +08002722 mtk_w32(eth, MTK_TX_DONE_INT(0), reg_map->tx_irq_status);
2723 } else {
2724 mtk_w32(eth, MTK_TX_DONE_INT(0), reg_map->pdma.irq_status);
2725 }
developerec4ebe42022-04-12 11:17:45 +08002726 tx_done = mtk_poll_tx(eth, budget);
2727
2728 if (unlikely(netif_msg_intr(eth))) {
developer722ab5f2024-02-22 11:01:46 +08002729 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2730 status = mtk_r32(eth, reg_map->tx_irq_status);
2731 mask = mtk_r32(eth, reg_map->tx_irq_mask);
2732 } else {
2733 status = mtk_r32(eth, reg_map->pdma.irq_status);
2734 mask = mtk_r32(eth, reg_map->pdma.irq_mask);
2735 }
developerec4ebe42022-04-12 11:17:45 +08002736 dev_info(eth->dev,
2737 "done tx %d, intr 0x%08x/0x%x\n",
2738 tx_done, status, mask);
2739 }
2740
2741 if (tx_done == budget)
2742 return budget;
2743
developer722ab5f2024-02-22 11:01:46 +08002744 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2745 status = mtk_r32(eth, reg_map->tx_irq_status);
2746 else
2747 status = mtk_r32(eth, reg_map->pdma.irq_status);
2748 if (status & MTK_TX_DONE_INT(0))
developerec4ebe42022-04-12 11:17:45 +08002749 return budget;
2750
2751 if (napi_complete(napi))
developer722ab5f2024-02-22 11:01:46 +08002752 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT(0));
developerec4ebe42022-04-12 11:17:45 +08002753
2754 return tx_done;
2755}
2756
2757static int mtk_napi_rx(struct napi_struct *napi, int budget)
2758{
2759 struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
2760 struct mtk_eth *eth = rx_napi->eth;
developerb35f4fa2023-03-14 13:24:47 +08002761 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerec4ebe42022-04-12 11:17:45 +08002762 struct mtk_rx_ring *ring = rx_napi->rx_ring;
2763 u32 status, mask;
2764 int rx_done = 0;
2765 int remain_budget = budget;
2766
2767 mtk_handle_status_irq(eth);
2768
2769poll_again:
developerb35f4fa2023-03-14 13:24:47 +08002770 mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), reg_map->pdma.irq_status);
developerec4ebe42022-04-12 11:17:45 +08002771 rx_done = mtk_poll_rx(napi, remain_budget, eth);
2772
2773 if (unlikely(netif_msg_intr(eth))) {
developerb35f4fa2023-03-14 13:24:47 +08002774 status = mtk_r32(eth, reg_map->pdma.irq_status);
2775 mask = mtk_r32(eth, reg_map->pdma.irq_mask);
developerec4ebe42022-04-12 11:17:45 +08002776 dev_info(eth->dev,
2777 "done rx %d, intr 0x%08x/0x%x\n",
2778 rx_done, status, mask);
2779 }
2780 if (rx_done == remain_budget)
2781 return budget;
2782
developerb35f4fa2023-03-14 13:24:47 +08002783 status = mtk_r32(eth, reg_map->pdma.irq_status);
developerec4ebe42022-04-12 11:17:45 +08002784 if (status & MTK_RX_DONE_INT(ring->ring_no)) {
2785 remain_budget -= rx_done;
2786 goto poll_again;
2787 }
2788
2789 if (napi_complete(napi))
2790 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no));
2791
2792 return rx_done + budget - remain_budget;
2793}
2794
2795static int mtk_tx_alloc(struct mtk_eth *eth)
2796{
developer29f66b32022-07-12 15:23:20 +08002797 const struct mtk_soc_data *soc = eth->soc;
developerec4ebe42022-04-12 11:17:45 +08002798 struct mtk_tx_ring *ring = &eth->tx_ring;
developer29f66b32022-07-12 15:23:20 +08002799 int i, sz = soc->txrx.txd_size;
2800 struct mtk_tx_dma_v2 *txd, *pdma_txd;
developerec4ebe42022-04-12 11:17:45 +08002801
2802 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
2803 GFP_KERNEL);
2804 if (!ring->buf)
2805 goto no_tx_mem;
2806
2807 if (!eth->soc->has_sram)
developerb35f4fa2023-03-14 13:24:47 +08002808 ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developer29f66b32022-07-12 15:23:20 +08002809 &ring->phys, GFP_KERNEL);
developerec4ebe42022-04-12 11:17:45 +08002810 else {
developer722ab5f2024-02-22 11:01:46 +08002811 ring->dma = eth->sram_base + MTK_DMA_SIZE * sz;
developer3d5faf22022-11-29 18:07:22 +08002812 ring->phys = eth->phy_scratch_ring +
2813 MTK_DMA_SIZE * (dma_addr_t)sz;
developerec4ebe42022-04-12 11:17:45 +08002814 }
2815
2816 if (!ring->dma)
2817 goto no_tx_mem;
2818
2819 for (i = 0; i < MTK_DMA_SIZE; i++) {
2820 int next = (i + 1) % MTK_DMA_SIZE;
2821 u32 next_ptr = ring->phys + next * sz;
2822
developer29f66b32022-07-12 15:23:20 +08002823 txd = ring->dma + i * sz;
2824 txd->txd2 = next_ptr;
2825 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2826 txd->txd4 = 0;
2827
developere86c3ec2022-10-11 10:29:18 +08002828 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
2829 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer29f66b32022-07-12 15:23:20 +08002830 txd->txd5 = 0;
2831 txd->txd6 = 0;
2832 txd->txd7 = 0;
2833 txd->txd8 = 0;
2834 }
developerec4ebe42022-04-12 11:17:45 +08002835 }
2836
2837 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2838 * only as the framework. The real HW descriptors are the PDMA
2839 * descriptors in ring->dma_pdma.
2840 */
2841 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developerb35f4fa2023-03-14 13:24:47 +08002842 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev,
2843 MTK_DMA_SIZE * sz,
developer29f66b32022-07-12 15:23:20 +08002844 &ring->phys_pdma, GFP_KERNEL);
developerec4ebe42022-04-12 11:17:45 +08002845 if (!ring->dma_pdma)
2846 goto no_tx_mem;
2847
2848 for (i = 0; i < MTK_DMA_SIZE; i++) {
developer722ab5f2024-02-22 11:01:46 +08002849 pdma_txd = ring->dma_pdma + i * sz;
developer29f66b32022-07-12 15:23:20 +08002850
2851 pdma_txd->txd2 = TX_DMA_DESP2_DEF;
2852 pdma_txd->txd4 = 0;
developer722ab5f2024-02-22 11:01:46 +08002853
2854 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
2855 pdma_txd->txd5 = 0;
2856 pdma_txd->txd6 = 0;
2857 pdma_txd->txd7 = 0;
2858 pdma_txd->txd8 = 0;
2859 }
developerec4ebe42022-04-12 11:17:45 +08002860 }
2861 }
2862
2863 ring->dma_size = MTK_DMA_SIZE;
2864 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
developer29f66b32022-07-12 15:23:20 +08002865 ring->next_free = ring->dma;
2866 ring->last_free = (void *)txd;
developerec4ebe42022-04-12 11:17:45 +08002867 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
2868 ring->thresh = MAX_SKB_FRAGS;
developer722ab5f2024-02-22 11:01:46 +08002869 ring->cpu_idx = 0;
developerec4ebe42022-04-12 11:17:45 +08002870
2871 /* make sure that all changes to the dma ring are flushed before we
2872 * continue
2873 */
2874 wmb();
2875
2876 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developerb35f4fa2023-03-14 13:24:47 +08002877 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2878 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
developerec4ebe42022-04-12 11:17:45 +08002879 mtk_w32(eth,
2880 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
developerb35f4fa2023-03-14 13:24:47 +08002881 soc->reg_map->qdma.crx_ptr);
2882 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
developerec4ebe42022-04-12 11:17:45 +08002883 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
developerb35f4fa2023-03-14 13:24:47 +08002884 soc->reg_map->qdma.qtx_cfg);
developerec4ebe42022-04-12 11:17:45 +08002885 } else {
developer722ab5f2024-02-22 11:01:46 +08002886 mtk_w32(eth, ring->phys_pdma, soc->reg_map->pdma.tx_ptr);
2887 mtk_w32(eth, MTK_DMA_SIZE, soc->reg_map->pdma.tx_cnt_cfg);
2888 mtk_w32(eth, ring->cpu_idx, soc->reg_map->pdma.pctx_ptr);
2889 mtk_w32(eth, MTK_PST_DTX_IDX_CFG(0), soc->reg_map->pdma.rst_idx);
developerec4ebe42022-04-12 11:17:45 +08002890 }
2891
2892 return 0;
2893
2894no_tx_mem:
2895 return -ENOMEM;
2896}
2897
2898static void mtk_tx_clean(struct mtk_eth *eth)
2899{
developer29f66b32022-07-12 15:23:20 +08002900 const struct mtk_soc_data *soc = eth->soc;
developerec4ebe42022-04-12 11:17:45 +08002901 struct mtk_tx_ring *ring = &eth->tx_ring;
2902 int i;
2903
2904 if (ring->buf) {
2905 for (i = 0; i < MTK_DMA_SIZE; i++)
2906 mtk_tx_unmap(eth, &ring->buf[i], false);
2907 kfree(ring->buf);
2908 ring->buf = NULL;
2909 }
2910
2911 if (!eth->soc->has_sram && ring->dma) {
developerb35f4fa2023-03-14 13:24:47 +08002912 dma_free_coherent(eth->dma_dev,
developer29f66b32022-07-12 15:23:20 +08002913 MTK_DMA_SIZE * soc->txrx.txd_size,
2914 ring->dma, ring->phys);
developerec4ebe42022-04-12 11:17:45 +08002915 ring->dma = NULL;
2916 }
2917
2918 if (ring->dma_pdma) {
developerb35f4fa2023-03-14 13:24:47 +08002919 dma_free_coherent(eth->dma_dev,
developer29f66b32022-07-12 15:23:20 +08002920 MTK_DMA_SIZE * soc->txrx.txd_size,
2921 ring->dma_pdma, ring->phys_pdma);
developerec4ebe42022-04-12 11:17:45 +08002922 ring->dma_pdma = NULL;
2923 }
2924}
2925
2926static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2927{
developerb35f4fa2023-03-14 13:24:47 +08002928 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerec4ebe42022-04-12 11:17:45 +08002929 struct mtk_rx_ring *ring;
2930 int rx_data_len, rx_dma_size;
2931 int i;
developere86c3ec2022-10-11 10:29:18 +08002932 u64 addr64 = 0;
developerec4ebe42022-04-12 11:17:45 +08002933
2934 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2935 if (ring_no)
2936 return -EINVAL;
2937 ring = &eth->rx_ring_qdma;
2938 } else {
2939 ring = &eth->rx_ring[ring_no];
2940 }
2941
2942 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2943 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2944 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2945 } else {
2946 rx_data_len = ETH_DATA_LEN;
2947 rx_dma_size = MTK_DMA_SIZE;
2948 }
2949
2950 ring->frag_size = mtk_max_frag_size(rx_data_len);
2951 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2952 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2953 GFP_KERNEL);
2954 if (!ring->data)
2955 return -ENOMEM;
2956
2957 for (i = 0; i < rx_dma_size; i++) {
developerfce0d152024-01-11 13:37:13 +08002958 if (ring->frag_size <= PAGE_SIZE)
2959 ring->data[i] = napi_alloc_frag(ring->frag_size);
2960 else
2961 ring->data[i] = mtk_max_lro_buf_alloc(GFP_ATOMIC);
developerec4ebe42022-04-12 11:17:45 +08002962 if (!ring->data[i])
2963 return -ENOMEM;
2964 }
2965
2966 if ((!eth->soc->has_sram) || (eth->soc->has_sram
2967 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
developerb35f4fa2023-03-14 13:24:47 +08002968 ring->dma = dma_alloc_coherent(eth->dma_dev,
developer29f66b32022-07-12 15:23:20 +08002969 rx_dma_size * eth->soc->txrx.rxd_size,
2970 &ring->phys, GFP_KERNEL);
developerec4ebe42022-04-12 11:17:45 +08002971 else {
2972 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
developer29f66b32022-07-12 15:23:20 +08002973 ring->dma = tx_ring->dma + MTK_DMA_SIZE *
developerb35f4fa2023-03-14 13:24:47 +08002974 eth->soc->txrx.txd_size * (ring_no + 1);
developerec4ebe42022-04-12 11:17:45 +08002975 ring->phys = tx_ring->phys + MTK_DMA_SIZE *
developerb35f4fa2023-03-14 13:24:47 +08002976 eth->soc->txrx.txd_size * (ring_no + 1);
developerec4ebe42022-04-12 11:17:45 +08002977 }
2978
2979 if (!ring->dma)
2980 return -ENOMEM;
2981
2982 for (i = 0; i < rx_dma_size; i++) {
developer29f66b32022-07-12 15:23:20 +08002983 struct mtk_rx_dma_v2 *rxd;
2984
developerb35f4fa2023-03-14 13:24:47 +08002985 dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
developerec4ebe42022-04-12 11:17:45 +08002986 ring->data[i] + NET_SKB_PAD + eth->ip_align,
2987 ring->buf_size,
2988 DMA_FROM_DEVICE);
developerb35f4fa2023-03-14 13:24:47 +08002989 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
developerec4ebe42022-04-12 11:17:45 +08002990 return -ENOMEM;
developer29f66b32022-07-12 15:23:20 +08002991
2992 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2993 rxd->rxd1 = (unsigned int)dma_addr;
developerec4ebe42022-04-12 11:17:45 +08002994
developere86c3ec2022-10-11 10:29:18 +08002995 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
2996 RX_DMA_SDP1(dma_addr) : 0;
2997
developerec4ebe42022-04-12 11:17:45 +08002998 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developer29f66b32022-07-12 15:23:20 +08002999 rxd->rxd2 = RX_DMA_LSO;
developerec4ebe42022-04-12 11:17:45 +08003000 else
developere86c3ec2022-10-11 10:29:18 +08003001 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size) | addr64;
developerec4ebe42022-04-12 11:17:45 +08003002
developer29f66b32022-07-12 15:23:20 +08003003 rxd->rxd3 = 0;
3004 rxd->rxd4 = 0;
3005
developerb35f4fa2023-03-14 13:24:47 +08003006 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer29f66b32022-07-12 15:23:20 +08003007 rxd->rxd5 = 0;
3008 rxd->rxd6 = 0;
3009 rxd->rxd7 = 0;
3010 rxd->rxd8 = 0;
developerec4ebe42022-04-12 11:17:45 +08003011 }
developerec4ebe42022-04-12 11:17:45 +08003012 }
3013 ring->dma_size = rx_dma_size;
3014 ring->calc_idx_update = false;
3015 ring->calc_idx = rx_dma_size - 1;
3016 ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
3017 MTK_QRX_CRX_IDX_CFG(ring_no) :
3018 MTK_PRX_CRX_IDX_CFG(ring_no);
3019 ring->ring_no = ring_no;
3020 /* make sure that all changes to the dma ring are flushed before we
3021 * continue
3022 */
3023 wmb();
3024
3025 if (rx_flag == MTK_RX_FLAGS_QDMA) {
developerb35f4fa2023-03-14 13:24:47 +08003026 mtk_w32(eth, ring->phys,
3027 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
3028 mtk_w32(eth, rx_dma_size,
3029 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
3030 mtk_w32(eth, ring->calc_idx,
3031 ring->crx_idx_reg);
3032 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
3033 reg_map->qdma.rst_idx);
developerec4ebe42022-04-12 11:17:45 +08003034 } else {
developerb35f4fa2023-03-14 13:24:47 +08003035 mtk_w32(eth, ring->phys,
3036 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
3037 mtk_w32(eth, rx_dma_size,
3038 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
3039 mtk_w32(eth, ring->calc_idx,
3040 ring->crx_idx_reg);
3041 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
3042 reg_map->pdma.rst_idx);
developerec4ebe42022-04-12 11:17:45 +08003043 }
3044
3045 return 0;
3046}
3047
3048static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
3049{
3050 int i;
developere86c3ec2022-10-11 10:29:18 +08003051 u64 addr64 = 0;
developerec4ebe42022-04-12 11:17:45 +08003052
3053 if (ring->data && ring->dma) {
3054 for (i = 0; i < ring->dma_size; i++) {
developer29f66b32022-07-12 15:23:20 +08003055 struct mtk_rx_dma *rxd;
3056
developerec4ebe42022-04-12 11:17:45 +08003057 if (!ring->data[i])
3058 continue;
developer29f66b32022-07-12 15:23:20 +08003059
3060 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
3061 if (!rxd->rxd1)
developerec4ebe42022-04-12 11:17:45 +08003062 continue;
developer29f66b32022-07-12 15:23:20 +08003063
developere86c3ec2022-10-11 10:29:18 +08003064 addr64 = (MTK_HAS_CAPS(eth->soc->caps,
3065 MTK_8GB_ADDRESSING)) ?
3066 ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
3067
developerb35f4fa2023-03-14 13:24:47 +08003068 dma_unmap_single(eth->dma_dev,
developerfce0d152024-01-11 13:37:13 +08003069 ((u64)(rxd->rxd1) | addr64),
developerec4ebe42022-04-12 11:17:45 +08003070 ring->buf_size,
3071 DMA_FROM_DEVICE);
3072 skb_free_frag(ring->data[i]);
3073 }
3074 kfree(ring->data);
3075 ring->data = NULL;
3076 }
3077
3078 if(in_sram)
3079 return;
3080
3081 if (ring->dma) {
developerb35f4fa2023-03-14 13:24:47 +08003082 dma_free_coherent(eth->dma_dev,
developer29f66b32022-07-12 15:23:20 +08003083 ring->dma_size * eth->soc->txrx.rxd_size,
developerec4ebe42022-04-12 11:17:45 +08003084 ring->dma,
3085 ring->phys);
3086 ring->dma = NULL;
3087 }
3088}
3089
3090static int mtk_hwlro_rx_init(struct mtk_eth *eth)
3091{
3092 int i;
3093 u32 val;
3094 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
3095 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
3096
3097 /* set LRO rings to auto-learn modes */
3098 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
3099
3100 /* validate LRO ring */
3101 ring_ctrl_dw2 |= MTK_RING_VLD;
3102
3103 /* set AGE timer (unit: 20us) */
3104 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
3105 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
3106
3107 /* set max AGG timer (unit: 20us) */
3108 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
3109
3110 /* set max LRO AGG count */
3111 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
3112 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
3113
3114 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
3115 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
3116 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
3117 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
3118 }
3119
3120 /* IPv4 checksum update enable */
3121 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
3122
3123 /* switch priority comparison to packet count mode */
3124 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
3125
3126 /* bandwidth threshold setting */
3127 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
3128
3129 /* auto-learn score delta setting */
3130 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA);
3131
3132 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
3133 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
3134 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
3135
3136 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
3137 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
3138
developerb35f4fa2023-03-14 13:24:47 +08003139 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developerec4ebe42022-04-12 11:17:45 +08003140 val = mtk_r32(eth, MTK_PDMA_RX_CFG);
3141 mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
3142 MTK_PDMA_RX_CFG);
3143
3144 lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET;
3145 } else {
3146 /* set HW LRO mode & the max aggregation count for rx packets */
3147 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
3148 }
3149
3150 /* enable HW LRO */
3151 lro_ctrl_dw0 |= MTK_LRO_EN;
3152
3153 /* enable cpu reason black list */
3154 lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW;
3155
3156 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
3157 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
3158
3159 /* no use PPE cpu reason */
3160 mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1);
3161
developera05cf4c2023-10-27 14:35:41 +08003162 /* Set perLRO GRP INT */
3163 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) ||
3164 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
3165 mtk_m32(eth, MTK_RX_DONE_INT(MTK_HW_LRO_RING(1)),
3166 MTK_RX_DONE_INT(MTK_HW_LRO_RING(1)), MTK_PDMA_INT_GRP1);
3167 mtk_m32(eth, MTK_RX_DONE_INT(MTK_HW_LRO_RING(2)),
3168 MTK_RX_DONE_INT(MTK_HW_LRO_RING(2)), MTK_PDMA_INT_GRP2);
3169 mtk_m32(eth, MTK_RX_DONE_INT(MTK_HW_LRO_RING(3)),
3170 MTK_RX_DONE_INT(MTK_HW_LRO_RING(3)), MTK_PDMA_INT_GRP3);
3171 }
3172
developerec4ebe42022-04-12 11:17:45 +08003173 return 0;
3174}
3175
3176static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
3177{
3178 int i;
3179 u32 val;
3180
3181 /* relinquish lro rings, flush aggregated packets */
3182 mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
3183
3184 /* wait for relinquishments done */
3185 for (i = 0; i < 10; i++) {
3186 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
3187 if (val & MTK_LRO_RING_RELINGUISH_DONE) {
developer3d2dd692022-04-19 12:53:29 +08003188 mdelay(20);
developerec4ebe42022-04-12 11:17:45 +08003189 continue;
3190 }
3191 break;
3192 }
3193
3194 /* invalidate lro rings */
3195 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
3196 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
3197
3198 /* disable HW LRO */
3199 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
3200}
3201
3202static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
3203{
3204 u32 reg_val;
3205
developerb35f4fa2023-03-14 13:24:47 +08003206 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developerec4ebe42022-04-12 11:17:45 +08003207 idx += 1;
3208
3209 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3210
3211 /* invalidate the IP setting */
3212 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3213
3214 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
3215
3216 /* validate the IP setting */
3217 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3218}
3219
3220static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
3221{
3222 u32 reg_val;
3223
developerb35f4fa2023-03-14 13:24:47 +08003224 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developerec4ebe42022-04-12 11:17:45 +08003225 idx += 1;
3226
3227 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3228
3229 /* invalidate the IP setting */
3230 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3231
3232 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
3233}
3234
3235static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
3236{
3237 int cnt = 0;
3238 int i;
3239
3240 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3241 if (mac->hwlro_ip[i])
3242 cnt++;
3243 }
3244
3245 return cnt;
3246}
3247
developer7cf584b2023-12-21 13:04:36 +08003248static int mtk_hwlro_add_ipaddr_idx(struct net_device *dev, u32 ip4dst)
3249{
3250 struct mtk_mac *mac = netdev_priv(dev);
3251 struct mtk_eth *eth = mac->hw;
3252 u32 reg_val;
3253 int i;
3254
3255 /* check for duplicate IP address in the current DIP list */
3256 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
3257 reg_val = mtk_r32(eth, MTK_LRO_DIP_DW0_CFG(i));
3258 if (reg_val == ip4dst)
3259 break;
3260 }
3261
3262 if (i <= MTK_HW_LRO_RING_NUM) {
3263 netdev_warn(dev, "Duplicate IP address at DIP(%d)!\n", i);
3264 return -EEXIST;
3265 }
3266
3267 /* find out available DIP index */
3268 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
3269 reg_val = mtk_r32(eth, MTK_LRO_DIP_DW0_CFG(i));
3270 if (reg_val == 0UL)
3271 break;
3272 }
3273
3274 if (i > MTK_HW_LRO_RING_NUM) {
3275 netdev_warn(dev, "DIP index is currently out of resource!\n");
3276 return -EBUSY;
3277 }
3278
3279 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
3280 i -= 1;
3281
3282 return i;
3283}
3284
3285static int mtk_hwlro_get_ipaddr_idx(struct net_device *dev, u32 ip4dst)
3286{
3287 struct mtk_mac *mac = netdev_priv(dev);
3288 struct mtk_eth *eth = mac->hw;
3289 u32 reg_val;
3290 int i;
3291
3292 /* find out DIP index that matches the given IP address */
3293 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
3294 reg_val = mtk_r32(eth, MTK_LRO_DIP_DW0_CFG(i));
3295 if (reg_val == ip4dst)
3296 break;
3297 }
3298
3299 if (i > MTK_HW_LRO_RING_NUM) {
3300 netdev_warn(dev, "DIP address is not exist!\n");
3301 return -ENOENT;
3302 }
3303
3304 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
3305 i -= 1;
3306
3307 return i;
3308}
3309
developerec4ebe42022-04-12 11:17:45 +08003310static int mtk_hwlro_add_ipaddr(struct net_device *dev,
3311 struct ethtool_rxnfc *cmd)
3312{
3313 struct ethtool_rx_flow_spec *fsp =
3314 (struct ethtool_rx_flow_spec *)&cmd->fs;
3315 struct mtk_mac *mac = netdev_priv(dev);
3316 struct mtk_eth *eth = mac->hw;
3317 int hwlro_idx;
developer7cf584b2023-12-21 13:04:36 +08003318 u32 ip4dst;
developerec4ebe42022-04-12 11:17:45 +08003319
3320 if ((fsp->flow_type != TCP_V4_FLOW) ||
3321 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
3322 (fsp->location > 1))
3323 return -EINVAL;
3324
developer7cf584b2023-12-21 13:04:36 +08003325 ip4dst = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
3326 hwlro_idx = mtk_hwlro_add_ipaddr_idx(dev, ip4dst);
3327 if (hwlro_idx < 0)
3328 return hwlro_idx;
developerec4ebe42022-04-12 11:17:45 +08003329
developer7cf584b2023-12-21 13:04:36 +08003330 mac->hwlro_ip[fsp->location] = ip4dst;
developerec4ebe42022-04-12 11:17:45 +08003331 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3332
3333 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
3334
3335 return 0;
3336}
3337
3338static int mtk_hwlro_del_ipaddr(struct net_device *dev,
3339 struct ethtool_rxnfc *cmd)
3340{
3341 struct ethtool_rx_flow_spec *fsp =
3342 (struct ethtool_rx_flow_spec *)&cmd->fs;
3343 struct mtk_mac *mac = netdev_priv(dev);
3344 struct mtk_eth *eth = mac->hw;
3345 int hwlro_idx;
developer7cf584b2023-12-21 13:04:36 +08003346 u32 ip4dst;
developerec4ebe42022-04-12 11:17:45 +08003347
3348 if (fsp->location > 1)
3349 return -EINVAL;
3350
developer7cf584b2023-12-21 13:04:36 +08003351 ip4dst = mac->hwlro_ip[fsp->location];
3352 hwlro_idx = mtk_hwlro_get_ipaddr_idx(dev, ip4dst);
3353 if (hwlro_idx < 0)
3354 return hwlro_idx;
developerec4ebe42022-04-12 11:17:45 +08003355
developer7cf584b2023-12-21 13:04:36 +08003356 mac->hwlro_ip[fsp->location] = 0;
developerec4ebe42022-04-12 11:17:45 +08003357 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3358
3359 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3360
3361 return 0;
3362}
3363
developer7cf584b2023-12-21 13:04:36 +08003364static void mtk_hwlro_netdev_enable(struct net_device *dev)
3365{
3366 struct mtk_mac *mac = netdev_priv(dev);
3367 struct mtk_eth *eth = mac->hw;
3368 int i, hwlro_idx;
3369
3370 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3371 if (mac->hwlro_ip[i] == 0)
3372 continue;
3373
3374 hwlro_idx = mtk_hwlro_get_ipaddr_idx(dev, mac->hwlro_ip[i]);
3375 if (hwlro_idx < 0)
3376 continue;
3377
3378 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[i]);
3379 }
3380}
3381
developerec4ebe42022-04-12 11:17:45 +08003382static void mtk_hwlro_netdev_disable(struct net_device *dev)
3383{
3384 struct mtk_mac *mac = netdev_priv(dev);
3385 struct mtk_eth *eth = mac->hw;
3386 int i, hwlro_idx;
3387
3388 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
developer7cf584b2023-12-21 13:04:36 +08003389 if (mac->hwlro_ip[i] == 0)
3390 continue;
3391
3392 hwlro_idx = mtk_hwlro_get_ipaddr_idx(dev, mac->hwlro_ip[i]);
3393 if (hwlro_idx < 0)
3394 continue;
3395
developerec4ebe42022-04-12 11:17:45 +08003396 mac->hwlro_ip[i] = 0;
developerec4ebe42022-04-12 11:17:45 +08003397
3398 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3399 }
3400
3401 mac->hwlro_ip_cnt = 0;
3402}
3403
3404static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
3405 struct ethtool_rxnfc *cmd)
3406{
3407 struct mtk_mac *mac = netdev_priv(dev);
3408 struct ethtool_rx_flow_spec *fsp =
3409 (struct ethtool_rx_flow_spec *)&cmd->fs;
3410
3411 /* only tcp dst ipv4 is meaningful, others are meaningless */
3412 fsp->flow_type = TCP_V4_FLOW;
3413 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
3414 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
3415
3416 fsp->h_u.tcp_ip4_spec.ip4src = 0;
3417 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
3418 fsp->h_u.tcp_ip4_spec.psrc = 0;
3419 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
3420 fsp->h_u.tcp_ip4_spec.pdst = 0;
3421 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
3422 fsp->h_u.tcp_ip4_spec.tos = 0;
3423 fsp->m_u.tcp_ip4_spec.tos = 0xff;
3424
3425 return 0;
3426}
3427
3428static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3429 struct ethtool_rxnfc *cmd,
3430 u32 *rule_locs)
3431{
3432 struct mtk_mac *mac = netdev_priv(dev);
3433 int cnt = 0;
3434 int i;
3435
3436 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3437 if (mac->hwlro_ip[i]) {
3438 rule_locs[cnt] = i;
3439 cnt++;
3440 }
3441 }
3442
3443 cmd->rule_cnt = cnt;
3444
3445 return 0;
3446}
3447
developer55392d12023-07-10 12:54:02 +08003448u32 mtk_rss_indr_table(struct mtk_rss_params *rss_params, int index)
developeredbe69e2023-06-08 11:08:46 +08003449{
developer55392d12023-07-10 12:54:02 +08003450 u32 val = 0;
3451 int i;
developeredbe69e2023-06-08 11:08:46 +08003452
developer55392d12023-07-10 12:54:02 +08003453 for (i = 16 * index; i < 16 * index + 16; i++)
3454 val |= (rss_params->indirection_table[i] << (2 * (i % 16)));
developeredbe69e2023-06-08 11:08:46 +08003455
developer55392d12023-07-10 12:54:02 +08003456 return val;
developeredbe69e2023-06-08 11:08:46 +08003457}
3458
developerec4ebe42022-04-12 11:17:45 +08003459static int mtk_rss_init(struct mtk_eth *eth)
3460{
developer55392d12023-07-10 12:54:02 +08003461 struct mtk_rss_params *rss_params = &eth->rss_params;
3462 static u8 hash_key[MTK_RSS_HASH_KEYSIZE] = {
3463 0xfa, 0x01, 0xac, 0xbe, 0x3b, 0xb7, 0x42, 0x6a,
3464 0x0c, 0xf2, 0x30, 0x80, 0xa3, 0x2d, 0xcb, 0x77,
3465 0xb4, 0x30, 0x7b, 0xae, 0xcb, 0x2b, 0xca, 0xd0,
3466 0xb0, 0x8f, 0xa3, 0x43, 0x3d, 0x25, 0x67, 0x41,
3467 0xc2, 0x0e, 0x5b, 0x25, 0xda, 0x56, 0x5a, 0x6d};
developerec4ebe42022-04-12 11:17:45 +08003468 u32 val;
developer55392d12023-07-10 12:54:02 +08003469 int i;
3470
3471 memcpy(rss_params->hash_key, hash_key, MTK_RSS_HASH_KEYSIZE);
3472
3473 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
3474 rss_params->indirection_table[i] = i % eth->soc->rss_num;
developerec4ebe42022-04-12 11:17:45 +08003475
developerb35f4fa2023-03-14 13:24:47 +08003476 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developerec4ebe42022-04-12 11:17:45 +08003477 /* Set RSS rings to PSE modes */
3478 val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
3479 val |= MTK_RING_PSE_MODE;
3480 mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1));
3481
3482 /* Enable non-lro multiple rx */
3483 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
3484 val |= MTK_NON_LRO_MULTI_EN;
3485 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
3486
3487 /* Enable RSS dly int supoort */
3488 val |= MTK_LRO_DLY_INT_EN;
3489 mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0);
developerec4ebe42022-04-12 11:17:45 +08003490 }
3491
3492 /* Hash Type */
3493 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
3494 val |= MTK_RSS_IPV4_STATIC_HASH;
3495 val |= MTK_RSS_IPV6_STATIC_HASH;
3496 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3497
developer55392d12023-07-10 12:54:02 +08003498 /* Hash Key */
3499 for (i = 0; i < MTK_RSS_HASH_KEYSIZE / sizeof(u32); i++)
3500 mtk_w32(eth, rss_params->hash_key[i], MTK_RSS_HASH_KEY_DW(i));
3501
developerec4ebe42022-04-12 11:17:45 +08003502 /* Select the size of indirection table */
developer55392d12023-07-10 12:54:02 +08003503 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE / 16; i++)
3504 mtk_w32(eth, mtk_rss_indr_table(rss_params, i),
3505 MTK_RSS_INDR_TABLE_DW(i));
developerec4ebe42022-04-12 11:17:45 +08003506
3507 /* Pause */
3508 val |= MTK_RSS_CFG_REQ;
3509 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3510
3511 /* Enable RSS*/
3512 val |= MTK_RSS_EN;
3513 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3514
3515 /* Release pause */
3516 val &= ~(MTK_RSS_CFG_REQ);
3517 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3518
3519 /* Set perRSS GRP INT */
developera05cf4c2023-10-27 14:35:41 +08003520 mtk_m32(eth, MTK_RX_DONE_INT(MTK_RSS_RING(0)),
3521 MTK_RX_DONE_INT(MTK_RSS_RING(0)), MTK_PDMA_INT_GRP1);
3522 mtk_m32(eth, MTK_RX_DONE_INT(MTK_RSS_RING(1)),
3523 MTK_RX_DONE_INT(MTK_RSS_RING(1)), MTK_PDMA_INT_GRP2);
3524 mtk_m32(eth, MTK_RX_DONE_INT(MTK_RSS_RING(2)),
3525 MTK_RX_DONE_INT(MTK_RSS_RING(2)), MTK_PDMA_INT_GRP3);
developerec4ebe42022-04-12 11:17:45 +08003526
3527 /* Set GRP INT */
developer4e17c282023-05-30 10:57:24 +08003528 mtk_w32(eth, 0x210FFFF2, MTK_FE_INT_GRP);
developerec4ebe42022-04-12 11:17:45 +08003529
developere86c3ec2022-10-11 10:29:18 +08003530 /* Enable RSS delay interrupt */
developer740bee82023-10-16 10:58:43 +08003531 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
3532 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT);
3533 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX2_DLY_INT);
3534 mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX3_DLY_INT);
3535 } else
3536 mtk_w32(eth, MTK_MAX_DELAY_INT_V2, MTK_PDMA_RSS_DELAY_INT);
developere86c3ec2022-10-11 10:29:18 +08003537
developerec4ebe42022-04-12 11:17:45 +08003538 return 0;
3539}
3540
3541static void mtk_rss_uninit(struct mtk_eth *eth)
3542{
3543 u32 val;
3544
3545 /* Pause */
3546 val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG);
3547 val |= MTK_RSS_CFG_REQ;
3548 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3549
3550 /* Disable RSS*/
3551 val &= ~(MTK_RSS_EN);
3552 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3553
3554 /* Release pause */
3555 val &= ~(MTK_RSS_CFG_REQ);
3556 mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG);
3557}
3558
3559static netdev_features_t mtk_fix_features(struct net_device *dev,
3560 netdev_features_t features)
3561{
3562 if (!(features & NETIF_F_LRO)) {
3563 struct mtk_mac *mac = netdev_priv(dev);
3564 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3565
3566 if (ip_cnt) {
3567 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3568
3569 features |= NETIF_F_LRO;
3570 }
3571 }
3572
3573 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
3574 netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
3575
3576 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
3577 }
3578
3579 return features;
3580}
3581
3582static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3583{
3584 struct mtk_mac *mac = netdev_priv(dev);
3585 struct mtk_eth *eth = mac->hw;
developer7cf584b2023-12-21 13:04:36 +08003586 netdev_features_t lro;
developerec4ebe42022-04-12 11:17:45 +08003587 int err = 0;
3588
3589 if (!((dev->features ^ features) & MTK_SET_FEATURES))
3590 return 0;
3591
developer7cf584b2023-12-21 13:04:36 +08003592 lro = dev->features & NETIF_F_LRO;
3593 if (!(features & NETIF_F_LRO) && lro)
developerec4ebe42022-04-12 11:17:45 +08003594 mtk_hwlro_netdev_disable(dev);
developer7cf584b2023-12-21 13:04:36 +08003595 else if ((features & NETIF_F_LRO) && !lro)
3596 mtk_hwlro_netdev_enable(dev);
developerec4ebe42022-04-12 11:17:45 +08003597
3598 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
3599 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3600 else
3601 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3602
3603 return err;
3604}
3605
3606/* wait for DMA to finish whatever it is doing before we start using it again */
3607static int mtk_dma_busy_wait(struct mtk_eth *eth)
3608{
3609 unsigned long t_start = jiffies;
3610
3611 while (1) {
3612 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3613 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
3614 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
3615 return 0;
3616 } else {
3617 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
3618 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
3619 return 0;
3620 }
3621
3622 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
3623 break;
3624 }
3625
3626 dev_err(eth->dev, "DMA init timeout\n");
3627 return -1;
3628}
3629
3630static int mtk_dma_init(struct mtk_eth *eth)
3631{
3632 int err;
3633 u32 i;
3634
3635 if (mtk_dma_busy_wait(eth))
3636 return -EBUSY;
3637
3638 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3639 /* QDMA needs scratch memory for internal reordering of the
3640 * descriptors
3641 */
3642 err = mtk_init_fq_dma(eth);
3643 if (err)
3644 return err;
3645 }
3646
3647 err = mtk_tx_alloc(eth);
3648 if (err)
3649 return err;
3650
3651 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3652 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3653 if (err)
3654 return err;
3655 }
3656
3657 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3658 if (err)
3659 return err;
3660
3661 if (eth->hwlro) {
developera05cf4c2023-10-27 14:35:41 +08003662 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++) {
3663 err = mtk_rx_alloc(eth, MTK_HW_LRO_RING(i), MTK_RX_FLAGS_HWLRO);
developerec4ebe42022-04-12 11:17:45 +08003664 if (err)
3665 return err;
3666 }
3667 err = mtk_hwlro_rx_init(eth);
3668 if (err)
3669 return err;
3670 }
3671
3672 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
developera05cf4c2023-10-27 14:35:41 +08003673 for (i = 0; i < MTK_RX_RSS_NUM; i++) {
3674 err = mtk_rx_alloc(eth, MTK_RSS_RING(i), MTK_RX_FLAGS_NORMAL);
developerec4ebe42022-04-12 11:17:45 +08003675 if (err)
3676 return err;
3677 }
3678 err = mtk_rss_init(eth);
3679 if (err)
3680 return err;
3681 }
3682
3683 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3684 /* Enable random early drop and set drop threshold
3685 * automatically
3686 */
3687 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
developerb35f4fa2023-03-14 13:24:47 +08003688 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3689 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred2);
developerec4ebe42022-04-12 11:17:45 +08003690 }
3691
3692 return 0;
3693}
3694
3695static void mtk_dma_free(struct mtk_eth *eth)
3696{
developer29f66b32022-07-12 15:23:20 +08003697 const struct mtk_soc_data *soc = eth->soc;
developerec4ebe42022-04-12 11:17:45 +08003698 int i;
3699
3700 for (i = 0; i < MTK_MAC_COUNT; i++)
3701 if (eth->netdev[i])
3702 netdev_reset_queue(eth->netdev[i]);
3703 if ( !eth->soc->has_sram && eth->scratch_ring) {
developerb35f4fa2023-03-14 13:24:47 +08003704 dma_free_coherent(eth->dma_dev,
developer29f66b32022-07-12 15:23:20 +08003705 MTK_DMA_SIZE * soc->txrx.txd_size,
3706 eth->scratch_ring, eth->phy_scratch_ring);
developerec4ebe42022-04-12 11:17:45 +08003707 eth->scratch_ring = NULL;
3708 eth->phy_scratch_ring = 0;
3709 }
3710 mtk_tx_clean(eth);
developer9c038292022-07-06 15:03:09 +08003711 mtk_rx_clean(eth, &eth->rx_ring[0],eth->soc->has_sram);
developerec4ebe42022-04-12 11:17:45 +08003712 mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
3713
3714 if (eth->hwlro) {
3715 mtk_hwlro_rx_uninit(eth);
3716
developera05cf4c2023-10-27 14:35:41 +08003717 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++)
3718 mtk_rx_clean(eth, &eth->rx_ring[MTK_HW_LRO_RING(i)], 0);
developerec4ebe42022-04-12 11:17:45 +08003719 }
3720
3721 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
3722 mtk_rss_uninit(eth);
3723
developera05cf4c2023-10-27 14:35:41 +08003724 for (i = 0; i < MTK_RX_RSS_NUM; i++)
3725 mtk_rx_clean(eth, &eth->rx_ring[MTK_RSS_RING(i)], 1);
developerec4ebe42022-04-12 11:17:45 +08003726 }
3727
3728 if (eth->scratch_head) {
3729 kfree(eth->scratch_head);
3730 eth->scratch_head = NULL;
3731 }
3732}
3733
3734static void mtk_tx_timeout(struct net_device *dev)
3735{
3736 struct mtk_mac *mac = netdev_priv(dev);
3737 struct mtk_eth *eth = mac->hw;
3738
3739 eth->netdev[mac->id]->stats.tx_errors++;
3740 netif_err(eth, tx_err, dev,
3741 "transmit timed out\n");
developer3d2dd692022-04-19 12:53:29 +08003742
3743 if (atomic_read(&reset_lock) == 0)
3744 schedule_work(&eth->pending_work);
developerec4ebe42022-04-12 11:17:45 +08003745}
3746
3747static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
3748{
3749 struct mtk_napi *rx_napi = priv;
3750 struct mtk_eth *eth = rx_napi->eth;
3751 struct mtk_rx_ring *ring = rx_napi->rx_ring;
3752
developera05cf4c2023-10-27 14:35:41 +08003753 if (unlikely(!(mtk_r32(eth, eth->soc->reg_map->pdma.irq_status) &
developerfce0d152024-01-11 13:37:13 +08003754 mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask) &
developera05cf4c2023-10-27 14:35:41 +08003755 MTK_RX_DONE_INT(ring->ring_no))))
3756 return IRQ_NONE;
3757
developerec4ebe42022-04-12 11:17:45 +08003758 if (likely(napi_schedule_prep(&rx_napi->napi))) {
3759 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no));
3760 __napi_schedule(&rx_napi->napi);
3761 }
3762
3763 return IRQ_HANDLED;
3764}
3765
3766static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3767{
3768 struct mtk_eth *eth = _eth;
3769
3770 if (likely(napi_schedule_prep(&eth->tx_napi))) {
developer722ab5f2024-02-22 11:01:46 +08003771 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT(0));
developerec4ebe42022-04-12 11:17:45 +08003772 __napi_schedule(&eth->tx_napi);
3773 }
3774
3775 return IRQ_HANDLED;
3776}
3777
3778static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3779{
3780 struct mtk_eth *eth = _eth;
developerb35f4fa2023-03-14 13:24:47 +08003781 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerec4ebe42022-04-12 11:17:45 +08003782
developerb35f4fa2023-03-14 13:24:47 +08003783 if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT(0)) {
3784 if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT(0))
developerec4ebe42022-04-12 11:17:45 +08003785 mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
3786 }
developer722ab5f2024-02-22 11:01:46 +08003787
3788 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3789 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT(0)) {
3790 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT(0))
3791 mtk_handle_irq_tx(irq, _eth);
3792 }
3793 } else {
3794 if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_TX_DONE_INT(0)) {
3795 if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_TX_DONE_INT(0))
3796 mtk_handle_irq_tx(irq, _eth);
3797 }
developerec4ebe42022-04-12 11:17:45 +08003798 }
3799
3800 return IRQ_HANDLED;
3801}
3802
developer9c038292022-07-06 15:03:09 +08003803static irqreturn_t mtk_handle_irq_fixed_link(int irq, void *_mac)
3804{
3805 struct mtk_mac *mac = _mac;
3806 struct mtk_eth *eth = mac->hw;
3807 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
3808 struct net_device *dev = phylink_priv->dev;
3809 int link_old, link_new;
3810
3811 // clear interrupt status for gpy211
3812 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
3813
3814 link_old = phylink_priv->link;
3815 link_new = _mtk_mdio_read(eth, phylink_priv->phyaddr, MII_BMSR) & BMSR_LSTATUS;
3816
3817 if (link_old != link_new) {
3818 phylink_priv->link = link_new;
3819 if (link_new) {
3820 printk("phylink.%d %s: Link is Up\n", phylink_priv->id, dev->name);
3821 if (dev)
3822 netif_carrier_on(dev);
3823 } else {
3824 printk("phylink.%d %s: Link is Down\n", phylink_priv->id, dev->name);
3825 if (dev)
3826 netif_carrier_off(dev);
3827 }
3828 }
3829
3830 return IRQ_HANDLED;
3831}
3832
developerec4ebe42022-04-12 11:17:45 +08003833#ifdef CONFIG_NET_POLL_CONTROLLER
3834static void mtk_poll_controller(struct net_device *dev)
3835{
3836 struct mtk_mac *mac = netdev_priv(dev);
3837 struct mtk_eth *eth = mac->hw;
3838
developer722ab5f2024-02-22 11:01:46 +08003839 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT(0));
developerec4ebe42022-04-12 11:17:45 +08003840 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
developer4e17c282023-05-30 10:57:24 +08003841 mtk_handle_irq_rx(eth->irq_fe[2], &eth->rx_napi[0]);
developer722ab5f2024-02-22 11:01:46 +08003842 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT(0));
developerec4ebe42022-04-12 11:17:45 +08003843 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
3844}
3845#endif
3846
3847static int mtk_start_dma(struct mtk_eth *eth)
3848{
3849 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
developerb35f4fa2023-03-14 13:24:47 +08003850 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerec4ebe42022-04-12 11:17:45 +08003851 int val, err;
3852
3853 err = mtk_dma_init(eth);
3854 if (err) {
3855 mtk_dma_free(eth);
3856 return err;
3857 }
3858
3859 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
developerb35f4fa2023-03-14 13:24:47 +08003860 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
developere86c3ec2022-10-11 10:29:18 +08003861 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
3862 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerd8fbe422022-05-03 13:42:22 +08003863 val &= ~MTK_RESV_BUF_MASK;
developerec4ebe42022-04-12 11:17:45 +08003864 mtk_w32(eth,
3865 val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3866 MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
3867 MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
3868 MTK_RESV_BUF | MTK_WCOMP_EN |
3869 MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
developerb35f4fa2023-03-14 13:24:47 +08003870 MTK_RX_2B_OFFSET, reg_map->qdma.glo_cfg);
developerd8fbe422022-05-03 13:42:22 +08003871 }
developerec4ebe42022-04-12 11:17:45 +08003872 else
3873 mtk_w32(eth,
3874 val | MTK_TX_DMA_EN |
3875 MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
3876 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
3877 MTK_RX_BT_32DWORDS,
developerb35f4fa2023-03-14 13:24:47 +08003878 reg_map->qdma.glo_cfg);
developerec4ebe42022-04-12 11:17:45 +08003879
developerb35f4fa2023-03-14 13:24:47 +08003880 val = mtk_r32(eth, reg_map->pdma.glo_cfg);
developerec4ebe42022-04-12 11:17:45 +08003881 mtk_w32(eth,
3882 val | MTK_RX_DMA_EN | rx_2b_offset |
3883 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
developerb35f4fa2023-03-14 13:24:47 +08003884 reg_map->pdma.glo_cfg);
developerec4ebe42022-04-12 11:17:45 +08003885 } else {
3886 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3887 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
developerb35f4fa2023-03-14 13:24:47 +08003888 reg_map->pdma.glo_cfg);
developerec4ebe42022-04-12 11:17:45 +08003889 }
3890
developerb35f4fa2023-03-14 13:24:47 +08003891 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) && eth->hwlro) {
developerec4ebe42022-04-12 11:17:45 +08003892 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
3893 mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
3894 }
3895
3896 return 0;
3897}
3898
developer780b9152022-12-15 14:09:45 +08003899void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
developerec4ebe42022-04-12 11:17:45 +08003900{
developer780b9152022-12-15 14:09:45 +08003901 u32 val;
developerec4ebe42022-04-12 11:17:45 +08003902
3903 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3904 return;
3905
developer780b9152022-12-15 14:09:45 +08003906 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
developerec4ebe42022-04-12 11:17:45 +08003907
developer780b9152022-12-15 14:09:45 +08003908 /* default setup the forward port to send frame to PDMA */
3909 val &= ~0xffff;
developerec4ebe42022-04-12 11:17:45 +08003910
developer780b9152022-12-15 14:09:45 +08003911 /* Enable RX checksum */
3912 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
developerec4ebe42022-04-12 11:17:45 +08003913
developer780b9152022-12-15 14:09:45 +08003914 val |= config;
developerec4ebe42022-04-12 11:17:45 +08003915
developer780b9152022-12-15 14:09:45 +08003916 if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3917 val |= MTK_GDMA_SPECIAL_TAG;
developerec4ebe42022-04-12 11:17:45 +08003918
developer780b9152022-12-15 14:09:45 +08003919 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
developerec4ebe42022-04-12 11:17:45 +08003920}
3921
developer3d5faf22022-11-29 18:07:22 +08003922void mtk_set_pse_drop(u32 config)
3923{
3924 struct mtk_eth *eth = g_eth;
3925
3926 if (eth)
3927 mtk_w32(eth, config, PSE_PPE0_DROP);
3928}
3929EXPORT_SYMBOL(mtk_set_pse_drop);
3930
developer722ab5f2024-02-22 11:01:46 +08003931static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3932{
3933 struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3934 struct mtk_eth *eth = mac->hw;
3935 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3936 struct ethtool_link_ksettings s;
3937 struct net_device *ldev;
3938 struct list_head *iter;
3939 struct dsa_port *dp;
3940 unsigned int queue = 0;
3941
3942 if (!eth->pppq_toggle)
3943 return NOTIFY_DONE;
3944
3945 if (event != NETDEV_CHANGE)
3946 return NOTIFY_DONE;
3947
3948 switch (mac->id) {
3949 case MTK_GMAC1_ID:
3950 netdev_for_each_lower_dev(dev, ldev, iter) {
3951 if (netdev_priv(ldev) == mac)
3952 goto dsa_set_speed;
3953 }
3954 break;
3955 case MTK_GMAC2_ID:
3956 if (strcmp(netdev_name(dev), "eth1"))
3957 break;
3958
3959 queue = MTK_QDMA_GMAC2_QID;
3960 goto set_speed;
3961 case MTK_GMAC3_ID:
3962 if (strcmp(netdev_name(dev), "eth2"))
3963 break;
3964
3965 queue = MTK_QDMA_GMAC3_QID;
3966 goto set_speed;
3967 default:
3968 pr_info("%s mac id invalid", __func__);
3969 break;
3970 }
3971
3972 return NOTIFY_DONE;
3973
3974set_speed:
3975 if (__ethtool_get_link_ksettings(dev, &s))
3976 return NOTIFY_DONE;
3977
3978 if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3979 return NOTIFY_DONE;
3980
3981 if (queue >= MTK_QDMA_TX_NUM)
3982 return NOTIFY_DONE;
3983
3984 if (mac->speed > 0 && mac->speed < s.base.speed)
3985 s.base.speed = 0;
3986
3987 mtk_set_queue_speed(eth, queue, s.base.speed);
3988
3989 return NOTIFY_DONE;
3990
3991dsa_set_speed:
3992 if (!dsa_slave_dev_check(dev))
3993 return NOTIFY_DONE;
3994
3995 if (__ethtool_get_link_ksettings(dev, &s))
3996 return NOTIFY_DONE;
3997
3998 if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3999 return NOTIFY_DONE;
4000
4001 dp = dsa_port_from_netdev(dev);
4002 if (dp->index >= MTK_QDMA_TX_NUM)
4003 return NOTIFY_DONE;
4004
4005 if (mac->speed > 0 && mac->speed <= s.base.speed)
4006 s.base.speed = 0;
4007
4008 mtk_set_queue_speed(eth, dp->index, s.base.speed);
4009
4010 return NOTIFY_DONE;
4011}
4012
developerec4ebe42022-04-12 11:17:45 +08004013static int mtk_open(struct net_device *dev)
4014{
4015 struct mtk_mac *mac = netdev_priv(dev);
4016 struct mtk_eth *eth = mac->hw;
developer9c038292022-07-06 15:03:09 +08004017 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
developer4ef16e32023-04-17 14:33:01 +08004018 u32 id = mtk_mac2xgmii_id(eth, mac->id);
developerec4ebe42022-04-12 11:17:45 +08004019 int err, i;
4020 struct device_node *phy_node;
4021
4022 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
4023 if (err) {
4024 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
4025 err);
4026 return err;
4027 }
4028
4029 /* we run 2 netdevs on the same dma ring so we only bring it up once */
4030 if (!refcount_read(&eth->dma_refcnt)) {
4031 int err = mtk_start_dma(eth);
4032
4033 if (err)
4034 return err;
4035
developerec4ebe42022-04-12 11:17:45 +08004036
4037 /* Indicates CDM to parse the MTK special tag from CPU */
4038 if (netdev_uses_dsa(dev)) {
4039 u32 val;
4040 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
4041 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
4042 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
4043 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
4044 }
4045
4046 napi_enable(&eth->tx_napi);
4047 napi_enable(&eth->rx_napi[0].napi);
developer722ab5f2024-02-22 11:01:46 +08004048 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT(0));
developerec4ebe42022-04-12 11:17:45 +08004049 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0));
4050
4051 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
developera05cf4c2023-10-27 14:35:41 +08004052 for (i = 0; i < MTK_RX_RSS_NUM; i++) {
4053 napi_enable(&eth->rx_napi[MTK_RSS_RING(i)].napi);
4054 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(MTK_RSS_RING(i)));
developerec4ebe42022-04-12 11:17:45 +08004055 }
4056 }
4057
developera05cf4c2023-10-27 14:35:41 +08004058 if (MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO)) {
4059 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++) {
4060 napi_enable(&eth->rx_napi[MTK_HW_LRO_RING(i)].napi);
4061 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(MTK_HW_LRO_RING(i)));
4062 }
4063 }
4064
developerec4ebe42022-04-12 11:17:45 +08004065 refcount_set(&eth->dma_refcnt, 1);
4066 }
4067 else
4068 refcount_inc(&eth->dma_refcnt);
4069
developer9c038292022-07-06 15:03:09 +08004070 if (phylink_priv->desc) {
4071 /*Notice: This programming sequence is only for GPY211 single PHY chip.
4072 If single PHY chip is not GPY211, the following step you should do:
4073 1. Contact your Single PHY chip vendor and get the details of
4074 - how to enables link status change interrupt
4075 - how to clears interrupt source
4076 */
4077
4078 // clear interrupt source for gpy211
4079 _mtk_mdio_read(eth, phylink_priv->phyaddr, 0x1A);
4080
4081 // enable link status change interrupt for gpy211
4082 _mtk_mdio_write(eth, phylink_priv->phyaddr, 0x19, 0x0001);
4083
4084 phylink_priv->dev = dev;
4085
4086 // override dev pointer for single PHY chip 0
4087 if (phylink_priv->id == 0) {
4088 struct net_device *tmp;
4089
4090 tmp = __dev_get_by_name(&init_net, phylink_priv->label);
4091 if (tmp)
4092 phylink_priv->dev = tmp;
4093 else
4094 phylink_priv->dev = NULL;
4095 }
4096 }
4097
developerec4ebe42022-04-12 11:17:45 +08004098 phylink_start(mac->phylink);
developer722ab5f2024-02-22 11:01:46 +08004099 netif_tx_start_all_queues(dev);
developerec4ebe42022-04-12 11:17:45 +08004100 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer4ef16e32023-04-17 14:33:01 +08004101 if (!phy_node && eth->sgmii->pcs[id].regmap)
4102 regmap_write(eth->sgmii->pcs[id].regmap,
4103 SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developere86c3ec2022-10-11 10:29:18 +08004104
developer780b9152022-12-15 14:09:45 +08004105 mtk_gdm_config(eth, mac->id, MTK_GDMA_TO_PDMA);
4106
developerec4ebe42022-04-12 11:17:45 +08004107 return 0;
4108}
4109
4110static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
4111{
4112 u32 val;
4113 int i;
4114
4115 /* stop the dma engine */
4116 spin_lock_bh(&eth->page_lock);
4117 val = mtk_r32(eth, glo_cfg);
4118 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
4119 glo_cfg);
4120 spin_unlock_bh(&eth->page_lock);
4121
4122 /* wait for dma stop */
4123 for (i = 0; i < 10; i++) {
4124 val = mtk_r32(eth, glo_cfg);
4125 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
developer3d2dd692022-04-19 12:53:29 +08004126 mdelay(20);
developerec4ebe42022-04-12 11:17:45 +08004127 continue;
4128 }
4129 break;
4130 }
4131}
4132
4133static int mtk_stop(struct net_device *dev)
4134{
4135 struct mtk_mac *mac = netdev_priv(dev);
4136 struct mtk_eth *eth = mac->hw;
4137 int i;
developer4ef16e32023-04-17 14:33:01 +08004138 u32 id = mtk_mac2xgmii_id(eth, mac->id);
developerec4ebe42022-04-12 11:17:45 +08004139 u32 val = 0;
4140 struct device_node *phy_node;
4141
developer780b9152022-12-15 14:09:45 +08004142 mtk_gdm_config(eth, mac->id, MTK_GDMA_DROP_ALL);
developerec4ebe42022-04-12 11:17:45 +08004143 netif_tx_disable(dev);
4144
4145 phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
developer4ef16e32023-04-17 14:33:01 +08004146 if (!phy_node && eth->sgmii->pcs[id].regmap) {
4147 regmap_read(eth->sgmii->pcs[id].regmap,
4148 SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developerec4ebe42022-04-12 11:17:45 +08004149 val |= SGMII_PHYA_PWD;
developer4ef16e32023-04-17 14:33:01 +08004150 regmap_write(eth->sgmii->pcs[id].regmap,
4151 SGMSYS_QPHY_PWR_STATE_CTRL, val);
developerec4ebe42022-04-12 11:17:45 +08004152 }
4153
4154 //GMAC RX disable
4155 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
4156 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id));
4157
4158 phylink_stop(mac->phylink);
4159
4160 phylink_disconnect_phy(mac->phylink);
4161
4162 /* only shutdown DMA if this is the last user */
4163 if (!refcount_dec_and_test(&eth->dma_refcnt))
4164 return 0;
4165
developerec4ebe42022-04-12 11:17:45 +08004166
developer722ab5f2024-02-22 11:01:46 +08004167 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT(0));
developerec4ebe42022-04-12 11:17:45 +08004168 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0));
4169 napi_disable(&eth->tx_napi);
4170 napi_disable(&eth->rx_napi[0].napi);
4171
4172 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
developera05cf4c2023-10-27 14:35:41 +08004173 for (i = 0; i < MTK_RX_RSS_NUM; i++) {
4174 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(MTK_RSS_RING(i)));
4175 napi_disable(&eth->rx_napi[MTK_RSS_RING(i)].napi);
developerec4ebe42022-04-12 11:17:45 +08004176 }
4177 }
4178
developera05cf4c2023-10-27 14:35:41 +08004179 if (MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO)) {
4180 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++) {
4181 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(MTK_HW_LRO_RING(i)));
4182 napi_disable(&eth->rx_napi[MTK_HW_LRO_RING(i)].napi);
4183 }
4184 }
4185
developerec4ebe42022-04-12 11:17:45 +08004186 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
developerb35f4fa2023-03-14 13:24:47 +08004187 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
4188 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
developerec4ebe42022-04-12 11:17:45 +08004189
4190 mtk_dma_free(eth);
4191
4192 return 0;
4193}
4194
developer3d2dd692022-04-19 12:53:29 +08004195void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
developerec4ebe42022-04-12 11:17:45 +08004196{
developer3d2dd692022-04-19 12:53:29 +08004197 u32 val = 0, i = 0;
developerec4ebe42022-04-12 11:17:45 +08004198
developerec4ebe42022-04-12 11:17:45 +08004199 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
developer3d2dd692022-04-19 12:53:29 +08004200 reset_bits, reset_bits);
4201
4202 while (i++ < 5000) {
4203 mdelay(1);
4204 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
4205
4206 if ((val & reset_bits) == reset_bits) {
4207 mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
4208 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
4209 reset_bits, ~reset_bits);
4210 break;
4211 }
4212 }
4213
developerec4ebe42022-04-12 11:17:45 +08004214 mdelay(10);
4215}
4216
4217static void mtk_clk_disable(struct mtk_eth *eth)
4218{
4219 int clk;
4220
4221 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
4222 clk_disable_unprepare(eth->clks[clk]);
4223}
4224
4225static int mtk_clk_enable(struct mtk_eth *eth)
4226{
4227 int clk, ret;
4228
4229 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
4230 ret = clk_prepare_enable(eth->clks[clk]);
4231 if (ret)
4232 goto err_disable_clks;
4233 }
4234
4235 return 0;
4236
4237err_disable_clks:
4238 while (--clk >= 0)
4239 clk_disable_unprepare(eth->clks[clk]);
4240
4241 return ret;
4242}
4243
4244static int mtk_napi_init(struct mtk_eth *eth)
4245{
4246 struct mtk_napi *rx_napi = &eth->rx_napi[0];
4247 int i;
4248
4249 rx_napi->eth = eth;
4250 rx_napi->rx_ring = &eth->rx_ring[0];
4251 rx_napi->irq_grp_no = 2;
4252
4253 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
developera05cf4c2023-10-27 14:35:41 +08004254 for (i = 0; i < MTK_RX_RSS_NUM; i++) {
4255 rx_napi = &eth->rx_napi[MTK_RSS_RING(i)];
developerec4ebe42022-04-12 11:17:45 +08004256 rx_napi->eth = eth;
developera05cf4c2023-10-27 14:35:41 +08004257 rx_napi->rx_ring = &eth->rx_ring[MTK_RSS_RING(i)];
developerec4ebe42022-04-12 11:17:45 +08004258 rx_napi->irq_grp_no = 2 + i;
4259 }
4260 }
4261
developera05cf4c2023-10-27 14:35:41 +08004262 if (MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO)) {
4263 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++) {
4264 rx_napi = &eth->rx_napi[MTK_HW_LRO_RING(i)];
4265 rx_napi->eth = eth;
4266 rx_napi->rx_ring = &eth->rx_ring[MTK_HW_LRO_RING(i)];
4267 rx_napi->irq_grp_no = 2;
4268 }
4269 }
4270
developerec4ebe42022-04-12 11:17:45 +08004271 return 0;
4272}
4273
developer3d2dd692022-04-19 12:53:29 +08004274static int mtk_hw_init(struct mtk_eth *eth, u32 type)
developerec4ebe42022-04-12 11:17:45 +08004275{
developerb35f4fa2023-03-14 13:24:47 +08004276 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
4277 ETHSYS_DMA_AG_MAP_PPE;
4278 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developer3d2dd692022-04-19 12:53:29 +08004279 int i, ret = 0;
developer780b9152022-12-15 14:09:45 +08004280 u32 val;
developerec4ebe42022-04-12 11:17:45 +08004281
developer3d2dd692022-04-19 12:53:29 +08004282 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
4283 atomic_read(&reset_lock), atomic_read(&force));
developerec4ebe42022-04-12 11:17:45 +08004284
developer3d2dd692022-04-19 12:53:29 +08004285 if (atomic_read(&reset_lock) == 0) {
4286 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
4287 return 0;
developerec4ebe42022-04-12 11:17:45 +08004288
developer3d2dd692022-04-19 12:53:29 +08004289 pm_runtime_enable(eth->dev);
4290 pm_runtime_get_sync(eth->dev);
4291
4292 ret = mtk_clk_enable(eth);
4293 if (ret)
4294 goto err_disable_pm;
4295 }
developerec4ebe42022-04-12 11:17:45 +08004296
developerb35f4fa2023-03-14 13:24:47 +08004297 if (eth->ethsys)
4298 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
4299 of_dma_is_coherent(eth->dma_dev->of_node) *
4300 dma_mask);
4301
developerec4ebe42022-04-12 11:17:45 +08004302 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4303 ret = device_reset(eth->dev);
4304 if (ret) {
4305 dev_err(eth->dev, "MAC reset failed!\n");
4306 goto err_disable_pm;
4307 }
4308
4309 /* enable interrupt delay for RX */
4310 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
4311
4312 /* disable delay and normal interrupt */
4313 mtk_tx_irq_disable(eth, ~0);
4314 mtk_rx_irq_disable(eth, ~0);
4315
4316 return 0;
4317 }
4318
developer3d2dd692022-04-19 12:53:29 +08004319 pr_info("[%s] execute fe %s reset\n", __func__,
4320 (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
developerec4ebe42022-04-12 11:17:45 +08004321
developer3d2dd692022-04-19 12:53:29 +08004322 if (type == MTK_TYPE_WARM_RESET)
4323 mtk_eth_warm_reset(eth);
developerec4ebe42022-04-12 11:17:45 +08004324 else
developer3d2dd692022-04-19 12:53:29 +08004325 mtk_eth_cold_reset(eth);
developerec4ebe42022-04-12 11:17:45 +08004326
developer9faf1ef2023-03-21 16:49:51 +08004327 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4328 mtk_mdc_init(eth);
4329
developerb35f4fa2023-03-14 13:24:47 +08004330 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developerec4ebe42022-04-12 11:17:45 +08004331 /* Set FE to PDMAv2 if necessary */
4332 mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
4333 }
4334
4335 if (eth->pctl) {
4336 /* Set GE2 driving and slew rate */
4337 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
4338
4339 /* set GE2 TDSEL */
4340 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
4341
4342 /* set GE2 TUNE */
4343 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
4344 }
4345
4346 /* Set linkdown as the default for each GMAC. Its own MCR would be set
4347 * up with the more appropriate value when mtk_mac_config call is being
4348 * invoked.
4349 */
4350 for (i = 0; i < MTK_MAC_COUNT; i++)
4351 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
4352
4353 /* Enable RX VLan Offloading */
4354 if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
4355 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
4356 else
4357 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
4358
4359 /* enable interrupt delay for RX/TX */
4360 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
4361 mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
developer722ab5f2024-02-22 11:01:46 +08004362 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
4363 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_TX_DELAY_INT0);
4364 mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_TX_DELAY_INT1);
4365 }
developerec4ebe42022-04-12 11:17:45 +08004366
4367 mtk_tx_irq_disable(eth, ~0);
4368 mtk_rx_irq_disable(eth, ~0);
4369
4370 /* FE int grouping */
developer722ab5f2024-02-22 11:01:46 +08004371 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4372 mtk_w32(eth, MTK_TX_DONE_INT(0), reg_map->qdma.int_grp);
4373 else
4374 mtk_w32(eth, MTK_TX_DONE_INT(0), reg_map->pdma.int_grp);
developerb35f4fa2023-03-14 13:24:47 +08004375 mtk_w32(eth, MTK_RX_DONE_INT(0), reg_map->qdma.int_grp2);
developer722ab5f2024-02-22 11:01:46 +08004376 if (MTK_HAS_CAPS(eth->soc->caps, MTK_PDMA_INT)) {
4377 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4378 mtk_w32(eth, 0x210FFFF2, MTK_FE_INT_GRP);
4379 else
4380 mtk_w32(eth, 0xFFFF1FF2, MTK_FE_INT_GRP);
4381 } else {
developer5ec55b62023-11-14 15:41:21 +08004382 mtk_w32(eth, MTK_RX_DONE_INT(0), reg_map->pdma.int_grp);
4383 mtk_w32(eth, 0x210F2FF3, MTK_FE_INT_GRP);
4384 }
developer0ccb3482022-06-01 10:56:51 +08004385 mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
developer3d2dd692022-04-19 12:53:29 +08004386 MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
4387 MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
developerec4ebe42022-04-12 11:17:45 +08004388
developere86c3ec2022-10-11 10:29:18 +08004389 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerbe718682023-05-12 18:09:06 +08004390 /* PSE dummy page mechanism */
4391 if (eth->soc->caps != MT7988_CAPS || eth->hwver != MTK_HWID_V1)
4392 mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) |
4393 PSE_DUMMY_WORK_GDM(2) | PSE_DUMMY_WORK_GDM(3) |
4394 DUMMY_PAGE_THR, PSE_DUMY_REQ);
4395
developere86c3ec2022-10-11 10:29:18 +08004396 /* PSE should not drop port1, port8 and port9 packets */
4397 mtk_w32(eth, 0x00000302, PSE_NO_DROP_CFG);
4398
developer29344f12022-10-17 12:01:44 +08004399 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
4400 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
4401
developer3d5faf22022-11-29 18:07:22 +08004402 /* PSE free buffer drop threshold */
4403 mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
4404
developere86c3ec2022-10-11 10:29:18 +08004405 /* GDM and CDM Threshold */
developer69bcd592024-03-25 14:26:39 +08004406 mtk_w32(eth, 0x00000004, MTK_CDM2_THRES);
developere86c3ec2022-10-11 10:29:18 +08004407 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
4408 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
4409
developer780b9152022-12-15 14:09:45 +08004410 /* Disable GDM1 RX CRC stripping */
4411 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(0));
4412 val &= ~MTK_GDMA_STRP_CRC;
4413 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(0));
4414
developere86c3ec2022-10-11 10:29:18 +08004415 /* PSE GDM3 MIB counter has incorrect hw default values,
4416 * so the driver ought to read clear the values beforehand
4417 * in case ethtool retrieve wrong mib values.
4418 */
4419 for (i = 0; i < MTK_STAT_OFFSET; i += 0x4)
4420 mtk_r32(eth,
4421 MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * 2 + i);
4422 } else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerec4ebe42022-04-12 11:17:45 +08004423 /* PSE Free Queue Flow Control */
4424 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
4425
developer9c038292022-07-06 15:03:09 +08004426 /* PSE should not drop port8 and port9 packets from WDMA Tx */
4427 mtk_w32(eth, 0x00000300, PSE_NO_DROP_CFG);
4428
4429 /* PSE should drop p8 and p9 packets when WDMA Rx ring full*/
4430 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
developerec4ebe42022-04-12 11:17:45 +08004431
4432 /* PSE config input queue threshold */
4433 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
4434 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
4435 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
4436 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
4437 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
4438 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
4439 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
4440 mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8));
4441
4442 /* PSE config output queue threshold */
4443 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
4444 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
4445 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
4446 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
4447 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
4448 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
4449 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
4450 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
4451
4452 /* GDM and CDM Threshold */
4453 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4454 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4455 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4456 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4457 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4458 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4459 }
4460
4461 return 0;
4462
4463err_disable_pm:
4464 pm_runtime_put_sync(eth->dev);
4465 pm_runtime_disable(eth->dev);
4466
4467 return ret;
4468}
4469
4470static int mtk_hw_deinit(struct mtk_eth *eth)
4471{
4472 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
4473 return 0;
4474
4475 mtk_clk_disable(eth);
4476
4477 pm_runtime_put_sync(eth->dev);
4478 pm_runtime_disable(eth->dev);
4479
4480 return 0;
4481}
4482
4483static int __init mtk_init(struct net_device *dev)
4484{
4485 struct mtk_mac *mac = netdev_priv(dev);
4486 struct mtk_eth *eth = mac->hw;
4487 const char *mac_addr;
4488
4489 mac_addr = of_get_mac_address(mac->of_node);
4490 if (!IS_ERR(mac_addr))
4491 ether_addr_copy(dev->dev_addr, mac_addr);
4492
4493 /* If the mac address is invalid, use random mac address */
4494 if (!is_valid_ether_addr(dev->dev_addr)) {
4495 eth_hw_addr_random(dev);
4496 dev_err(eth->dev, "generated random MAC address %pM\n",
4497 dev->dev_addr);
4498 }
4499
4500 return 0;
4501}
4502
4503static void mtk_uninit(struct net_device *dev)
4504{
4505 struct mtk_mac *mac = netdev_priv(dev);
4506 struct mtk_eth *eth = mac->hw;
4507
4508 phylink_disconnect_phy(mac->phylink);
4509 mtk_tx_irq_disable(eth, ~0);
4510 mtk_rx_irq_disable(eth, ~0);
4511}
4512
4513static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4514{
4515 struct mtk_mac *mac = netdev_priv(dev);
4516
4517 switch (cmd) {
4518 case SIOCGMIIPHY:
4519 case SIOCGMIIREG:
4520 case SIOCSMIIREG:
4521 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4522 default:
4523 /* default invoke the mtk_eth_dbg handler */
4524 return mtk_do_priv_ioctl(dev, ifr, cmd);
4525 break;
4526 }
4527
4528 return -EOPNOTSUPP;
4529}
4530
developerafda3572022-12-28 16:28:30 +08004531int mtk_phy_config(struct mtk_eth *eth, int enable)
4532{
4533 struct device_node *mii_np = NULL;
4534 struct device_node *child = NULL;
4535 int addr = 0;
4536 u32 val = 0;
4537
4538 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
4539 if (!mii_np) {
4540 dev_err(eth->dev, "no %s child node found", "mdio-bus");
4541 return -ENODEV;
4542 }
4543
4544 if (!of_device_is_available(mii_np)) {
4545 dev_err(eth->dev, "device is not available\n");
4546 return -ENODEV;
4547 }
4548
4549 for_each_available_child_of_node(mii_np, child) {
4550 addr = of_mdio_parse_addr(&eth->mii_bus->dev, child);
4551 if (addr < 0)
4552 continue;
4553 pr_info("%s %d addr:%d name:%s\n",
4554 __func__, __LINE__, addr, child->name);
4555 val = _mtk_mdio_read(eth, addr, mdiobus_c45_addr(0x1e, 0));
4556 if (enable)
4557 val &= ~BMCR_PDOWN;
4558 else
4559 val |= BMCR_PDOWN;
4560 _mtk_mdio_write(eth, addr, mdiobus_c45_addr(0x1e, 0), val);
4561 }
4562
4563 return 0;
4564}
4565
developerec4ebe42022-04-12 11:17:45 +08004566static void mtk_pending_work(struct work_struct *work)
4567{
4568 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
developer3d2dd692022-04-19 12:53:29 +08004569 int err, i = 0;
developerec4ebe42022-04-12 11:17:45 +08004570 unsigned long restart = 0;
developer3d2dd692022-04-19 12:53:29 +08004571 u32 val = 0;
4572
4573 atomic_inc(&reset_lock);
4574 val = mtk_r32(eth, MTK_FE_INT_STATUS);
4575 if (!mtk_check_reset_event(eth, val)) {
4576 atomic_dec(&reset_lock);
4577 pr_info("[%s] No need to do FE reset !\n", __func__);
4578 return;
4579 }
developerec4ebe42022-04-12 11:17:45 +08004580
4581 rtnl_lock();
4582
developerafda3572022-12-28 16:28:30 +08004583 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
4584 cpu_relax();
4585
4586 mtk_phy_config(eth, 0);
developer3d2dd692022-04-19 12:53:29 +08004587
4588 /* Adjust PPE configurations to prepare for reset */
4589 mtk_prepare_reset_ppe(eth, 0);
4590 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4591 mtk_prepare_reset_ppe(eth, 1);
4592
4593 /* Adjust FE configurations to prepare for reset */
4594 mtk_prepare_reset_fe(eth);
4595
4596 /* Trigger Wifi SER reset */
developer1721ef62022-11-24 14:42:19 +08004597 for (i = 0; i < MTK_MAC_COUNT; i++) {
4598 if (!eth->netdev[i])
4599 continue;
developerafda3572022-12-28 16:28:30 +08004600 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
4601 pr_info("send MTK_FE_STOP_TRAFFIC event\n");
4602 call_netdevice_notifiers(MTK_FE_STOP_TRAFFIC,
4603 eth->netdev[i]);
4604 } else {
4605 pr_info("send MTK_FE_START_RESET event\n");
4606 call_netdevice_notifiers(MTK_FE_START_RESET,
4607 eth->netdev[i]);
4608 }
developer1721ef62022-11-24 14:42:19 +08004609 rtnl_unlock();
developer8b8f87d2023-04-27 11:01:26 +08004610 if (!wait_for_completion_timeout(&wait_ser_done, 3000)) {
4611 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
4612 (mtk_stop_fail)) {
4613 pr_info("send MTK_FE_START_RESET stop\n");
4614 rtnl_lock();
4615 call_netdevice_notifiers(MTK_FE_START_RESET,
4616 eth->netdev[i]);
4617 rtnl_unlock();
4618 if (!wait_for_completion_timeout(&wait_ser_done,
4619 3000))
4620 pr_warn("wait for MTK_FE_START_RESET\n");
4621 }
developere744f1b2023-02-07 15:43:21 +08004622 pr_warn("wait for MTK_FE_START_RESET\n");
developer8b8f87d2023-04-27 11:01:26 +08004623 }
developer1721ef62022-11-24 14:42:19 +08004624 rtnl_lock();
4625 break;
4626 }
developerec4ebe42022-04-12 11:17:45 +08004627
developer3d2dd692022-04-19 12:53:29 +08004628 del_timer_sync(&eth->mtk_dma_monitor_timer);
4629 pr_info("[%s] mtk_stop starts !\n", __func__);
developerec4ebe42022-04-12 11:17:45 +08004630 /* stop all devices to make sure that dma is properly shut down */
4631 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer7cf584b2023-12-21 13:04:36 +08004632 if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
developerec4ebe42022-04-12 11:17:45 +08004633 continue;
4634 mtk_stop(eth->netdev[i]);
4635 __set_bit(i, &restart);
4636 }
developer3d2dd692022-04-19 12:53:29 +08004637 pr_info("[%s] mtk_stop ends !\n", __func__);
4638 mdelay(15);
developerec4ebe42022-04-12 11:17:45 +08004639
4640 if (eth->dev->pins)
4641 pinctrl_select_state(eth->dev->pins->p,
4642 eth->dev->pins->default_state);
developer3d2dd692022-04-19 12:53:29 +08004643
4644 pr_info("[%s] mtk_hw_init starts !\n", __func__);
4645 mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
4646 pr_info("[%s] mtk_hw_init ends !\n", __func__);
developerec4ebe42022-04-12 11:17:45 +08004647
4648 /* restart DMA and enable IRQs */
4649 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer1721ef62022-11-24 14:42:19 +08004650 if (!test_bit(i, &restart) || !eth->netdev[i])
developerec4ebe42022-04-12 11:17:45 +08004651 continue;
4652 err = mtk_open(eth->netdev[i]);
4653 if (err) {
4654 netif_alert(eth, ifup, eth->netdev[i],
4655 "Driver up/down cycle failed, closing device.\n");
4656 dev_close(eth->netdev[i]);
4657 }
4658 }
4659
developer3d2dd692022-04-19 12:53:29 +08004660 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer1721ef62022-11-24 14:42:19 +08004661 if (!eth->netdev[i])
4662 continue;
developerafda3572022-12-28 16:28:30 +08004663 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
4664 pr_info("send MTK_FE_START_TRAFFIC event\n");
4665 call_netdevice_notifiers(MTK_FE_START_TRAFFIC,
4666 eth->netdev[i]);
4667 } else {
4668 pr_info("send MTK_FE_RESET_DONE event\n");
4669 call_netdevice_notifiers(MTK_FE_RESET_DONE,
4670 eth->netdev[i]);
developer3d2dd692022-04-19 12:53:29 +08004671 }
developerafda3572022-12-28 16:28:30 +08004672 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE,
4673 eth->netdev[i]);
developer1721ef62022-11-24 14:42:19 +08004674 break;
4675 }
developer3d2dd692022-04-19 12:53:29 +08004676
4677 atomic_dec(&reset_lock);
developer3d2dd692022-04-19 12:53:29 +08004678
4679 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
4680 eth->mtk_dma_monitor_timer.expires = jiffies;
4681 add_timer(&eth->mtk_dma_monitor_timer);
developerafda3572022-12-28 16:28:30 +08004682
4683 mtk_phy_config(eth, 1);
4684 mtk_reset_flag = 0;
developerec4ebe42022-04-12 11:17:45 +08004685 clear_bit_unlock(MTK_RESETTING, &eth->state);
4686
4687 rtnl_unlock();
4688}
4689
4690static int mtk_free_dev(struct mtk_eth *eth)
4691{
4692 int i;
4693
4694 for (i = 0; i < MTK_MAC_COUNT; i++) {
4695 if (!eth->netdev[i])
4696 continue;
4697 free_netdev(eth->netdev[i]);
4698 }
4699
4700 return 0;
4701}
4702
4703static int mtk_unreg_dev(struct mtk_eth *eth)
4704{
4705 int i;
4706
4707 for (i = 0; i < MTK_MAC_COUNT; i++) {
developer722ab5f2024-02-22 11:01:46 +08004708 struct mtk_mac *mac;
developerec4ebe42022-04-12 11:17:45 +08004709 if (!eth->netdev[i])
4710 continue;
developer722ab5f2024-02-22 11:01:46 +08004711 mac = netdev_priv(eth->netdev[i]);
4712 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4713 unregister_netdevice_notifier(&mac->device_notifier);
developerec4ebe42022-04-12 11:17:45 +08004714 unregister_netdev(eth->netdev[i]);
4715 }
4716
4717 return 0;
4718}
4719
4720static int mtk_cleanup(struct mtk_eth *eth)
4721{
4722 mtk_unreg_dev(eth);
4723 mtk_free_dev(eth);
4724 cancel_work_sync(&eth->pending_work);
4725
4726 return 0;
4727}
4728
4729static int mtk_get_link_ksettings(struct net_device *ndev,
4730 struct ethtool_link_ksettings *cmd)
4731{
4732 struct mtk_mac *mac = netdev_priv(ndev);
4733
4734 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4735 return -EBUSY;
4736
4737 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4738}
4739
4740static int mtk_set_link_ksettings(struct net_device *ndev,
4741 const struct ethtool_link_ksettings *cmd)
4742{
4743 struct mtk_mac *mac = netdev_priv(ndev);
4744
4745 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4746 return -EBUSY;
4747
4748 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4749}
4750
4751static void mtk_get_drvinfo(struct net_device *dev,
4752 struct ethtool_drvinfo *info)
4753{
4754 struct mtk_mac *mac = netdev_priv(dev);
4755
4756 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4757 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4758 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4759}
4760
4761static u32 mtk_get_msglevel(struct net_device *dev)
4762{
4763 struct mtk_mac *mac = netdev_priv(dev);
4764
4765 return mac->hw->msg_enable;
4766}
4767
4768static void mtk_set_msglevel(struct net_device *dev, u32 value)
4769{
4770 struct mtk_mac *mac = netdev_priv(dev);
4771
4772 mac->hw->msg_enable = value;
4773}
4774
4775static int mtk_nway_reset(struct net_device *dev)
4776{
4777 struct mtk_mac *mac = netdev_priv(dev);
4778
4779 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4780 return -EBUSY;
4781
4782 if (!mac->phylink)
4783 return -ENOTSUPP;
4784
4785 return phylink_ethtool_nway_reset(mac->phylink);
4786}
4787
4788static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4789{
4790 int i;
4791
4792 switch (stringset) {
4793 case ETH_SS_STATS:
4794 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4795 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4796 data += ETH_GSTRING_LEN;
4797 }
4798 break;
4799 }
4800}
4801
4802static int mtk_get_sset_count(struct net_device *dev, int sset)
4803{
4804 switch (sset) {
4805 case ETH_SS_STATS:
4806 return ARRAY_SIZE(mtk_ethtool_stats);
4807 default:
4808 return -EOPNOTSUPP;
4809 }
4810}
4811
4812static void mtk_get_ethtool_stats(struct net_device *dev,
4813 struct ethtool_stats *stats, u64 *data)
4814{
4815 struct mtk_mac *mac = netdev_priv(dev);
4816 struct mtk_hw_stats *hwstats = mac->hw_stats;
4817 u64 *data_src, *data_dst;
4818 unsigned int start;
4819 int i;
4820
4821 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4822 return;
4823
4824 if (netif_running(dev) && netif_device_present(dev)) {
4825 if (spin_trylock_bh(&hwstats->stats_lock)) {
4826 mtk_stats_update_mac(mac);
4827 spin_unlock_bh(&hwstats->stats_lock);
4828 }
4829 }
4830
4831 data_src = (u64 *)hwstats;
4832
4833 do {
4834 data_dst = data;
4835 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
4836
4837 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4838 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4839 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
4840}
4841
4842static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4843 u32 *rule_locs)
4844{
developer55392d12023-07-10 12:54:02 +08004845 struct mtk_mac *mac = netdev_priv(dev);
4846 struct mtk_eth *eth = mac->hw;
developerec4ebe42022-04-12 11:17:45 +08004847 int ret = -EOPNOTSUPP;
4848
4849 switch (cmd->cmd) {
4850 case ETHTOOL_GRXRINGS:
4851 if (dev->hw_features & NETIF_F_LRO) {
4852 cmd->data = MTK_MAX_RX_RING_NUM;
4853 ret = 0;
developer55392d12023-07-10 12:54:02 +08004854 } else if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
4855 cmd->data = eth->soc->rss_num;
4856 ret = 0;
developerec4ebe42022-04-12 11:17:45 +08004857 }
4858 break;
4859 case ETHTOOL_GRXCLSRLCNT:
4860 if (dev->hw_features & NETIF_F_LRO) {
developerec4ebe42022-04-12 11:17:45 +08004861 cmd->rule_cnt = mac->hwlro_ip_cnt;
4862 ret = 0;
4863 }
4864 break;
4865 case ETHTOOL_GRXCLSRULE:
4866 if (dev->hw_features & NETIF_F_LRO)
4867 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4868 break;
4869 case ETHTOOL_GRXCLSRLALL:
4870 if (dev->hw_features & NETIF_F_LRO)
4871 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4872 rule_locs);
4873 break;
4874 default:
4875 break;
4876 }
4877
4878 return ret;
4879}
4880
4881static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4882{
4883 int ret = -EOPNOTSUPP;
4884
4885 switch (cmd->cmd) {
4886 case ETHTOOL_SRXCLSRLINS:
4887 if (dev->hw_features & NETIF_F_LRO)
4888 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4889 break;
4890 case ETHTOOL_SRXCLSRLDEL:
4891 if (dev->hw_features & NETIF_F_LRO)
4892 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4893 break;
4894 default:
4895 break;
4896 }
4897
4898 return ret;
4899}
4900
developer55392d12023-07-10 12:54:02 +08004901static u32 mtk_get_rxfh_key_size(struct net_device *dev)
4902{
4903 return MTK_RSS_HASH_KEYSIZE;
4904}
4905
4906static u32 mtk_get_rxfh_indir_size(struct net_device *dev)
4907{
4908 return MTK_RSS_MAX_INDIRECTION_TABLE;
4909}
4910
4911static int mtk_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4912 u8 *hfunc)
4913{
4914 struct mtk_mac *mac = netdev_priv(dev);
4915 struct mtk_eth *eth = mac->hw;
4916 struct mtk_rss_params *rss_params = &eth->rss_params;
4917 int i;
4918
4919 if (hfunc)
4920 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
4921
4922 if (key) {
4923 memcpy(key, rss_params->hash_key,
4924 sizeof(rss_params->hash_key));
4925 }
4926
4927 if (indir) {
4928 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
4929 indir[i] = rss_params->indirection_table[i];
4930 }
4931
4932 return 0;
4933}
4934
4935static int mtk_set_rxfh(struct net_device *dev, const u32 *indir,
4936 const u8 *key, const u8 hfunc)
4937{
4938 struct mtk_mac *mac = netdev_priv(dev);
4939 struct mtk_eth *eth = mac->hw;
4940 struct mtk_rss_params *rss_params = &eth->rss_params;
4941 int i;
4942
4943 if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
4944 hfunc != ETH_RSS_HASH_TOP)
4945 return -EOPNOTSUPP;
4946
4947 if (key) {
4948 memcpy(rss_params->hash_key, key,
4949 sizeof(rss_params->hash_key));
4950
4951 for (i = 0; i < MTK_RSS_HASH_KEYSIZE / sizeof(u32); i++)
4952 mtk_w32(eth, rss_params->hash_key[i],
4953 MTK_RSS_HASH_KEY_DW(i));
4954 }
4955
4956 if (indir) {
4957 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
4958 rss_params->indirection_table[i] = indir[i];
4959
4960 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE / 16; i++)
4961 mtk_w32(eth, mtk_rss_indr_table(rss_params, i),
4962 MTK_RSS_INDR_TABLE_DW(i));
4963 }
4964
4965 return 0;
4966}
4967
developer6e3b5d12022-08-16 15:37:38 +08004968static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4969{
4970 struct mtk_mac *mac = netdev_priv(dev);
developer5e599762023-01-03 16:01:51 +08004971 struct mtk_eth *eth = mac->hw;
4972 u32 val;
4973
4974 pause->autoneg = 0;
4975
4976 if (mac->type == MTK_GDM_TYPE) {
4977 val = mtk_r32(eth, MTK_MAC_MCR(mac->id));
4978
4979 pause->rx_pause = !!(val & MAC_MCR_FORCE_RX_FC);
4980 pause->tx_pause = !!(val & MAC_MCR_FORCE_TX_FC);
4981 } else if (mac->type == MTK_XGDM_TYPE) {
4982 val = mtk_r32(eth, MTK_XMAC_MCR(mac->id));
developer6e3b5d12022-08-16 15:37:38 +08004983
developer5e599762023-01-03 16:01:51 +08004984 pause->rx_pause = !!(val & XMAC_MCR_FORCE_RX_FC);
4985 pause->tx_pause = !!(val & XMAC_MCR_FORCE_TX_FC);
4986 }
developer6e3b5d12022-08-16 15:37:38 +08004987}
4988
4989static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4990{
4991 struct mtk_mac *mac = netdev_priv(dev);
4992
4993 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4994}
4995
developer993c84b2023-02-15 16:03:22 +08004996static int mtk_get_eee(struct net_device *dev, struct ethtool_eee *eee)
4997{
4998 struct mtk_mac *mac = netdev_priv(dev);
4999 struct mtk_eth *eth = mac->hw;
5000 u32 val;
5001
5002 if (mac->type == MTK_GDM_TYPE) {
5003 val = mtk_r32(eth, MTK_MAC_EEE(mac->id));
5004
5005 eee->tx_lpi_enabled = mac->tx_lpi_enabled;
5006 eee->tx_lpi_timer = FIELD_GET(MAC_EEE_LPI_TXIDLE_THD, val);
5007 }
5008
5009 return phylink_ethtool_get_eee(mac->phylink, eee);
5010}
5011
5012static int mtk_set_eee(struct net_device *dev, struct ethtool_eee *eee)
5013{
5014 struct mtk_mac *mac = netdev_priv(dev);
developer993c84b2023-02-15 16:03:22 +08005015
5016 if (mac->type == MTK_GDM_TYPE) {
5017 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
5018 return -EINVAL;
5019
5020 mac->tx_lpi_timer = eee->tx_lpi_timer;
5021
5022 mtk_setup_eee(mac, eee->eee_enabled && eee->tx_lpi_timer);
5023 }
5024
5025 return phylink_ethtool_set_eee(mac->phylink, eee);
5026}
5027
developer722ab5f2024-02-22 11:01:46 +08005028static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
5029 struct net_device *sb_dev)
5030{
5031 struct mtk_mac *mac = netdev_priv(dev);
5032 struct mtk_eth *eth = mac->hw;
5033 unsigned int queue = 0;
5034
5035 if (skb->mark > 0 && skb->mark < MTK_QDMA_TX_NUM)
5036 return skb->mark;
5037
5038 if (eth->pppq_toggle) {
5039 switch (mac->id) {
5040 case MTK_GMAC1_ID:
5041 queue = skb_get_queue_mapping(skb);
5042 break;
5043 case MTK_GMAC2_ID:
5044 queue = MTK_QDMA_GMAC2_QID;
5045 break;
5046 case MTK_GMAC3_ID:
5047 queue = MTK_QDMA_GMAC3_QID;
5048 break;
5049 default:
5050 pr_info("%s mac id invalid", __func__);
5051 break;
5052 }
5053 } else
5054 queue = mac->id ? MTK_QDMA_GMAC2_QID : 0;
5055
5056 if (queue >= MTK_QDMA_TX_NUM)
5057 queue = 0;
5058
5059 return queue;
5060}
5061
developerec4ebe42022-04-12 11:17:45 +08005062static const struct ethtool_ops mtk_ethtool_ops = {
5063 .get_link_ksettings = mtk_get_link_ksettings,
5064 .set_link_ksettings = mtk_set_link_ksettings,
5065 .get_drvinfo = mtk_get_drvinfo,
5066 .get_msglevel = mtk_get_msglevel,
5067 .set_msglevel = mtk_set_msglevel,
5068 .nway_reset = mtk_nway_reset,
5069 .get_link = ethtool_op_get_link,
5070 .get_strings = mtk_get_strings,
5071 .get_sset_count = mtk_get_sset_count,
5072 .get_ethtool_stats = mtk_get_ethtool_stats,
5073 .get_rxnfc = mtk_get_rxnfc,
5074 .set_rxnfc = mtk_set_rxnfc,
developer55392d12023-07-10 12:54:02 +08005075 .get_rxfh_key_size = mtk_get_rxfh_key_size,
5076 .get_rxfh_indir_size = mtk_get_rxfh_indir_size,
5077 .get_rxfh = mtk_get_rxfh,
5078 .set_rxfh = mtk_set_rxfh,
developer6e3b5d12022-08-16 15:37:38 +08005079 .get_pauseparam = mtk_get_pauseparam,
5080 .set_pauseparam = mtk_set_pauseparam,
developer993c84b2023-02-15 16:03:22 +08005081 .get_eee = mtk_get_eee,
5082 .set_eee = mtk_set_eee,
developerec4ebe42022-04-12 11:17:45 +08005083};
5084
5085static const struct net_device_ops mtk_netdev_ops = {
5086 .ndo_init = mtk_init,
5087 .ndo_uninit = mtk_uninit,
5088 .ndo_open = mtk_open,
5089 .ndo_stop = mtk_stop,
5090 .ndo_start_xmit = mtk_start_xmit,
developer722ab5f2024-02-22 11:01:46 +08005091 .ndo_select_queue = mtk_select_queue,
developerec4ebe42022-04-12 11:17:45 +08005092 .ndo_set_mac_address = mtk_set_mac_address,
5093 .ndo_validate_addr = eth_validate_addr,
5094 .ndo_do_ioctl = mtk_do_ioctl,
5095 .ndo_tx_timeout = mtk_tx_timeout,
5096 .ndo_get_stats64 = mtk_get_stats64,
5097 .ndo_fix_features = mtk_fix_features,
5098 .ndo_set_features = mtk_set_features,
5099#ifdef CONFIG_NET_POLL_CONTROLLER
5100 .ndo_poll_controller = mtk_poll_controller,
5101#endif
5102};
5103
developer3c9c74d2023-09-11 11:36:12 +08005104static void mux_poll(struct work_struct *work)
5105{
5106 struct mtk_mux *mux = container_of(work, struct mtk_mux, poll.work);
5107 struct mtk_mac *mac = mux->mac;
5108 struct mtk_eth *eth = mac->hw;
5109 struct net_device *dev = eth->netdev[mac->id];
5110 unsigned int channel;
5111
5112 if (IS_ERR(mux->gpio[0]) || IS_ERR(mux->gpio[1]))
5113 goto exit;
5114
5115 channel = gpiod_get_value_cansleep(mux->gpio[0]);
5116 if (mux->channel == channel || !netif_running(dev))
5117 goto exit;
5118
5119 rtnl_lock();
5120
5121 mtk_stop(dev);
5122
5123 if (channel == 0 || channel == 1) {
5124 mac->of_node = mux->data[channel]->of_node;
5125 mac->phylink = mux->data[channel]->phylink;
5126 };
5127
5128 dev_info(eth->dev, "ethernet mux: switch to channel%d\n", channel);
5129
5130 gpiod_set_value_cansleep(mux->gpio[1], channel);
5131
5132 mtk_open(dev);
5133
5134 rtnl_unlock();
5135
5136 mux->channel = channel;
5137
5138exit:
5139 mod_delayed_work(system_wq, &mux->poll, msecs_to_jiffies(100));
5140}
5141
5142static int mtk_add_mux_channel(struct mtk_mux *mux, struct device_node *np)
5143{
5144 const __be32 *_id = of_get_property(np, "reg", NULL);
5145 struct mtk_mac *mac = mux->mac;
5146 struct mtk_eth *eth = mac->hw;
5147 struct mtk_mux_data *data;
5148 struct phylink *phylink;
5149 int phy_mode, id;
5150
5151 if (!_id) {
5152 dev_err(eth->dev, "missing mux channel id\n");
5153 return -EINVAL;
5154 }
5155
5156 id = be32_to_cpup(_id);
5157 if (id < 0 || id > 1) {
5158 dev_err(eth->dev, "%d is not a valid mux channel id\n", id);
5159 return -EINVAL;
5160 }
5161
5162 data = kmalloc(sizeof(*data), GFP_KERNEL);
5163 if (unlikely(!data)) {
5164 dev_err(eth->dev, "failed to create mux data structure\n");
5165 return -ENOMEM;
5166 }
5167
5168 mux->data[id] = data;
5169
5170 /* phylink create */
5171 phy_mode = of_get_phy_mode(np);
5172 if (phy_mode < 0) {
5173 dev_err(eth->dev, "incorrect phy-mode\n");
5174 return -EINVAL;
5175 }
5176
5177 phylink = phylink_create(&mux->mac->phylink_config,
5178 of_fwnode_handle(np),
5179 phy_mode, &mtk_phylink_ops);
5180 if (IS_ERR(phylink)) {
5181 dev_err(eth->dev, "failed to create phylink structure\n");
5182 return PTR_ERR(phylink);
5183 }
5184
5185 data->of_node = np;
5186 data->phylink = phylink;
5187
5188 return 0;
5189}
5190
5191static int mtk_add_mux(struct mtk_eth *eth, struct device_node *np)
5192{
5193 const __be32 *_id = of_get_property(np, "reg", NULL);
5194 struct device_node *child;
5195 struct mtk_mux *mux;
developer740bee82023-10-16 10:58:43 +08005196 int id, err;
developer3c9c74d2023-09-11 11:36:12 +08005197
5198 if (!_id) {
5199 dev_err(eth->dev, "missing attach mac id\n");
5200 return -EINVAL;
5201 }
5202
5203 id = be32_to_cpup(_id);
5204 if (id < 0 || id >= MTK_MAX_DEVS) {
5205 dev_err(eth->dev, "%d is not a valid attach mac id\n", id);
5206 return -EINVAL;
5207 }
5208
5209 mux = kmalloc(sizeof(struct mtk_mux), GFP_KERNEL);
5210 if (unlikely(!mux)) {
5211 dev_err(eth->dev, "failed to create mux structure\n");
5212 return -ENOMEM;
5213 }
5214
5215 eth->mux[id] = mux;
5216
5217 mux->mac = eth->mac[id];
5218 mux->channel = 0;
5219
5220 mux->gpio[0] = fwnode_get_named_gpiod(of_fwnode_handle(np),
5221 "mod-def0-gpios", 0,
5222 GPIOD_IN, "?");
5223 if (IS_ERR(mux->gpio[0]))
5224 dev_err(eth->dev, "failed to requset gpio for mod-def0-gpios\n");
5225
5226 mux->gpio[1] = fwnode_get_named_gpiod(of_fwnode_handle(np),
5227 "chan-sel-gpios", 0,
5228 GPIOD_OUT_LOW, "?");
5229 if (IS_ERR(mux->gpio[1]))
5230 dev_err(eth->dev, "failed to requset gpio for chan-sel-gpios\n");
5231
5232 for_each_child_of_node(np, child) {
5233 err = mtk_add_mux_channel(mux, child);
5234 if (err) {
5235 dev_err(eth->dev, "failed to add mtk_mux\n");
5236 of_node_put(child);
5237 return -ECHILD;
5238 }
5239 of_node_put(child);
5240 }
5241
5242 INIT_DELAYED_WORK(&mux->poll, mux_poll);
5243 mod_delayed_work(system_wq, &mux->poll, msecs_to_jiffies(3000));
5244
5245 return 0;
5246}
5247
developerec4ebe42022-04-12 11:17:45 +08005248static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
5249{
5250 const __be32 *_id = of_get_property(np, "reg", NULL);
developer2cbf2fb2022-11-16 12:20:48 +08005251 const char *label;
developerec4ebe42022-04-12 11:17:45 +08005252 struct phylink *phylink;
developer2cbf2fb2022-11-16 12:20:48 +08005253 int mac_type, phy_mode, id, err;
developerec4ebe42022-04-12 11:17:45 +08005254 struct mtk_mac *mac;
developer9c038292022-07-06 15:03:09 +08005255 struct mtk_phylink_priv *phylink_priv;
5256 struct fwnode_handle *fixed_node;
5257 struct gpio_desc *desc;
developer722ab5f2024-02-22 11:01:46 +08005258 int txqs = 1;
developerec4ebe42022-04-12 11:17:45 +08005259
5260 if (!_id) {
5261 dev_err(eth->dev, "missing mac id\n");
5262 return -EINVAL;
5263 }
5264
5265 id = be32_to_cpup(_id);
5266 if (id < 0 || id >= MTK_MAC_COUNT) {
5267 dev_err(eth->dev, "%d is not a valid mac id\n", id);
5268 return -EINVAL;
5269 }
5270
5271 if (eth->netdev[id]) {
5272 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
5273 return -EINVAL;
5274 }
5275
developer722ab5f2024-02-22 11:01:46 +08005276 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
5277 txqs = MTK_QDMA_TX_NUM;
5278
5279 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
developerec4ebe42022-04-12 11:17:45 +08005280 if (!eth->netdev[id]) {
5281 dev_err(eth->dev, "alloc_etherdev failed\n");
5282 return -ENOMEM;
5283 }
5284 mac = netdev_priv(eth->netdev[id]);
5285 eth->mac[id] = mac;
5286 mac->id = id;
5287 mac->hw = eth;
5288 mac->of_node = np;
5289
5290 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
5291 mac->hwlro_ip_cnt = 0;
5292
5293 mac->hw_stats = devm_kzalloc(eth->dev,
5294 sizeof(*mac->hw_stats),
5295 GFP_KERNEL);
5296 if (!mac->hw_stats) {
5297 dev_err(eth->dev, "failed to allocate counter memory\n");
5298 err = -ENOMEM;
5299 goto free_netdev;
5300 }
5301 spin_lock_init(&mac->hw_stats->stats_lock);
5302 u64_stats_init(&mac->hw_stats->syncp);
5303 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
5304
5305 /* phylink create */
5306 phy_mode = of_get_phy_mode(np);
5307 if (phy_mode < 0) {
5308 dev_err(eth->dev, "incorrect phy-mode\n");
5309 err = -EINVAL;
5310 goto free_netdev;
5311 }
5312
5313 /* mac config is not set */
5314 mac->interface = PHY_INTERFACE_MODE_NA;
5315 mac->mode = MLO_AN_PHY;
5316 mac->speed = SPEED_UNKNOWN;
5317
developer993c84b2023-02-15 16:03:22 +08005318 mac->tx_lpi_timer = 1;
5319
developerec4ebe42022-04-12 11:17:45 +08005320 mac->phylink_config.dev = &eth->netdev[id]->dev;
5321 mac->phylink_config.type = PHYLINK_NETDEV;
5322
developer2cbf2fb2022-11-16 12:20:48 +08005323 mac->type = 0;
5324 if (!of_property_read_string(np, "mac-type", &label)) {
5325 for (mac_type = 0; mac_type < MTK_GDM_TYPE_MAX; mac_type++) {
5326 if (!strcasecmp(label, gdm_type(mac_type)))
5327 break;
5328 }
5329
5330 switch (mac_type) {
5331 case 0:
5332 mac->type = MTK_GDM_TYPE;
5333 break;
5334 case 1:
5335 mac->type = MTK_XGDM_TYPE;
5336 break;
5337 default:
5338 dev_warn(eth->dev, "incorrect mac-type\n");
5339 break;
5340 };
5341 }
developere86c3ec2022-10-11 10:29:18 +08005342
developerec4ebe42022-04-12 11:17:45 +08005343 phylink = phylink_create(&mac->phylink_config,
5344 of_fwnode_handle(mac->of_node),
5345 phy_mode, &mtk_phylink_ops);
5346 if (IS_ERR(phylink)) {
5347 err = PTR_ERR(phylink);
5348 goto free_netdev;
5349 }
5350
5351 mac->phylink = phylink;
5352
developer9c038292022-07-06 15:03:09 +08005353 fixed_node = fwnode_get_named_child_node(of_fwnode_handle(mac->of_node),
5354 "fixed-link");
5355 if (fixed_node) {
5356 desc = fwnode_get_named_gpiod(fixed_node, "link-gpio",
5357 0, GPIOD_IN, "?");
5358 if (!IS_ERR(desc)) {
5359 struct device_node *phy_np;
5360 const char *label;
5361 int irq, phyaddr;
5362
5363 phylink_priv = &mac->phylink_priv;
5364
5365 phylink_priv->desc = desc;
5366 phylink_priv->id = id;
5367 phylink_priv->link = -1;
5368
5369 irq = gpiod_to_irq(desc);
5370 if (irq > 0) {
5371 devm_request_irq(eth->dev, irq, mtk_handle_irq_fixed_link,
5372 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
5373 "ethernet:fixed link", mac);
5374 }
5375
developer3d5faf22022-11-29 18:07:22 +08005376 if (!of_property_read_string(to_of_node(fixed_node),
5377 "label", &label)) {
developerdfbc5db2022-12-14 10:36:44 +08005378 if (strlen(label) < 16) {
5379 strncpy(phylink_priv->label, label,
5380 strlen(label));
5381 } else
developer3d5faf22022-11-29 18:07:22 +08005382 dev_err(eth->dev, "insufficient space for label!\n");
5383 }
developer9c038292022-07-06 15:03:09 +08005384
5385 phy_np = of_parse_phandle(to_of_node(fixed_node), "phy-handle", 0);
5386 if (phy_np) {
5387 if (!of_property_read_u32(phy_np, "reg", &phyaddr))
5388 phylink_priv->phyaddr = phyaddr;
5389 }
5390 }
5391 fwnode_handle_put(fixed_node);
5392 }
5393
developerec4ebe42022-04-12 11:17:45 +08005394 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
5395 eth->netdev[id]->watchdog_timeo = 5 * HZ;
5396 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
5397 eth->netdev[id]->base_addr = (unsigned long)eth->base;
5398
5399 eth->netdev[id]->hw_features = eth->soc->hw_features;
5400 if (eth->hwlro)
5401 eth->netdev[id]->hw_features |= NETIF_F_LRO;
5402
5403 eth->netdev[id]->vlan_features = eth->soc->hw_features &
5404 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
5405 eth->netdev[id]->features |= eth->soc->hw_features;
5406 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
5407
developer4e17c282023-05-30 10:57:24 +08005408 eth->netdev[id]->irq = eth->irq_fe[0];
developerec4ebe42022-04-12 11:17:45 +08005409 eth->netdev[id]->dev.of_node = np;
5410
developer722ab5f2024-02-22 11:01:46 +08005411 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
5412 mac->device_notifier.notifier_call = mtk_device_event;
5413 register_netdevice_notifier(&mac->device_notifier);
5414 }
5415
developerec4ebe42022-04-12 11:17:45 +08005416 return 0;
5417
5418free_netdev:
5419 free_netdev(eth->netdev[id]);
5420 return err;
5421}
5422
developerb35f4fa2023-03-14 13:24:47 +08005423void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
5424{
5425 struct net_device *dev, *tmp;
5426 LIST_HEAD(dev_list);
5427 int i;
5428
5429 rtnl_lock();
5430
5431 for (i = 0; i < MTK_MAC_COUNT; i++) {
5432 dev = eth->netdev[i];
5433
5434 if (!dev || !(dev->flags & IFF_UP))
5435 continue;
5436
5437 list_add_tail(&dev->close_list, &dev_list);
5438 }
5439
5440 dev_close_many(&dev_list, false);
5441
5442 eth->dma_dev = dma_dev;
5443
5444 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
5445 list_del_init(&dev->close_list);
5446 dev_open(dev, NULL);
5447 }
5448
5449 rtnl_unlock();
5450}
5451
developerec4ebe42022-04-12 11:17:45 +08005452static int mtk_probe(struct platform_device *pdev)
5453{
developer3c9c74d2023-09-11 11:36:12 +08005454 struct device_node *mac_np, *mux_np;
developerec4ebe42022-04-12 11:17:45 +08005455 struct mtk_eth *eth;
5456 int err, i;
5457
5458 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
5459 if (!eth)
5460 return -ENOMEM;
5461
5462 eth->soc = of_device_get_match_data(&pdev->dev);
5463
5464 eth->dev = &pdev->dev;
developerb35f4fa2023-03-14 13:24:47 +08005465 eth->dma_dev = &pdev->dev;
developerec4ebe42022-04-12 11:17:45 +08005466 eth->base = devm_platform_ioremap_resource(pdev, 0);
5467 if (IS_ERR(eth->base))
5468 return PTR_ERR(eth->base);
5469
developere86c3ec2022-10-11 10:29:18 +08005470 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer722ab5f2024-02-22 11:01:46 +08005471 eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
developere86c3ec2022-10-11 10:29:18 +08005472 if (IS_ERR(eth->sram_base))
5473 return PTR_ERR(eth->sram_base);
developer722ab5f2024-02-22 11:01:46 +08005474 } else {
5475 eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
developere86c3ec2022-10-11 10:29:18 +08005476 }
5477
developerec4ebe42022-04-12 11:17:45 +08005478 if(eth->soc->has_sram) {
5479 struct resource *res;
5480 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5481 if (unlikely(!res))
5482 return -EINVAL;
5483 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
5484 }
5485
developerbe718682023-05-12 18:09:06 +08005486 mtk_get_hwver(eth);
5487
developerb35f4fa2023-03-14 13:24:47 +08005488 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
developerec4ebe42022-04-12 11:17:45 +08005489 eth->ip_align = NET_IP_ALIGN;
developerec4ebe42022-04-12 11:17:45 +08005490
developere86c3ec2022-10-11 10:29:18 +08005491 if (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) {
5492 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
5493 if (!err) {
5494 err = dma_set_coherent_mask(&pdev->dev,
developera05cf4c2023-10-27 14:35:41 +08005495 DMA_BIT_MASK(32));
developere86c3ec2022-10-11 10:29:18 +08005496 if (err) {
5497 dev_err(&pdev->dev, "Wrong DMA config\n");
5498 return -EINVAL;
5499 }
5500 }
5501 }
5502
developerec4ebe42022-04-12 11:17:45 +08005503 spin_lock_init(&eth->page_lock);
5504 spin_lock_init(&eth->tx_irq_lock);
5505 spin_lock_init(&eth->rx_irq_lock);
developer722ab5f2024-02-22 11:01:46 +08005506 spin_lock_init(&eth->txrx_irq_lock);
developerec4ebe42022-04-12 11:17:45 +08005507 spin_lock_init(&eth->syscfg0_lock);
5508
5509 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5510 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5511 "mediatek,ethsys");
5512 if (IS_ERR(eth->ethsys)) {
5513 dev_err(&pdev->dev, "no ethsys regmap found\n");
5514 return PTR_ERR(eth->ethsys);
5515 }
5516 }
5517
5518 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
5519 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5520 "mediatek,infracfg");
5521 if (IS_ERR(eth->infra)) {
5522 dev_err(&pdev->dev, "no infracfg regmap found\n");
5523 return PTR_ERR(eth->infra);
5524 }
5525 }
5526
developerb35f4fa2023-03-14 13:24:47 +08005527 if (of_dma_is_coherent(pdev->dev.of_node)) {
5528 struct regmap *cci;
5529
5530 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5531 "cci-control-port");
5532 /* enable CPU/bus coherency */
5533 if (!IS_ERR(cci))
5534 regmap_write(cci, 0, 3);
5535 }
5536
developerec4ebe42022-04-12 11:17:45 +08005537 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
developer4ef16e32023-04-17 14:33:01 +08005538 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
developerec4ebe42022-04-12 11:17:45 +08005539 GFP_KERNEL);
developer4ef16e32023-04-17 14:33:01 +08005540 if (!eth->sgmii)
developerec4ebe42022-04-12 11:17:45 +08005541 return -ENOMEM;
5542
developer4ef16e32023-04-17 14:33:01 +08005543 err = mtk_sgmii_init(eth, pdev->dev.of_node,
developerec4ebe42022-04-12 11:17:45 +08005544 eth->soc->ana_rgc3);
developere86c3ec2022-10-11 10:29:18 +08005545 if (err)
5546 return err;
5547 }
5548
5549 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
developer4ef16e32023-04-17 14:33:01 +08005550 eth->usxgmii = devm_kzalloc(eth->dev, sizeof(*eth->usxgmii),
5551 GFP_KERNEL);
5552 if (!eth->usxgmii)
5553 return -ENOMEM;
developere86c3ec2022-10-11 10:29:18 +08005554
developer4ef16e32023-04-17 14:33:01 +08005555 err = mtk_usxgmii_init(eth, pdev->dev.of_node);
developere86c3ec2022-10-11 10:29:18 +08005556 if (err)
5557 return err;
5558
5559 err = mtk_toprgu_init(eth, pdev->dev.of_node);
5560 if (err)
5561 return err;
developerec4ebe42022-04-12 11:17:45 +08005562 }
5563
5564 if (eth->soc->required_pctl) {
5565 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5566 "mediatek,pctl");
5567 if (IS_ERR(eth->pctl)) {
5568 dev_err(&pdev->dev, "no pctl regmap found\n");
5569 return PTR_ERR(eth->pctl);
5570 }
5571 }
5572
developer5ec55b62023-11-14 15:41:21 +08005573 if (MTK_HAS_CAPS(eth->soc->caps, MTK_PDMA_INT)) {
5574 for (i = 0; i < MTK_PDMA_IRQ_NUM; i++)
5575 eth->irq_pdma[i] = platform_get_irq(pdev, i);
5576 }
developer4e17c282023-05-30 10:57:24 +08005577
5578 for (i = 0; i < MTK_FE_IRQ_NUM; i++) {
developerec4ebe42022-04-12 11:17:45 +08005579 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
developer4e17c282023-05-30 10:57:24 +08005580 eth->irq_fe[i] = eth->irq_fe[0];
developer5ec55b62023-11-14 15:41:21 +08005581 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_PDMA_INT))
developer4e17c282023-05-30 10:57:24 +08005582 eth->irq_fe[i] =
5583 platform_get_irq(pdev, i + MTK_PDMA_IRQ_NUM);
developer5ec55b62023-11-14 15:41:21 +08005584 else
5585 eth->irq_fe[i] = platform_get_irq(pdev, i);
developer4e17c282023-05-30 10:57:24 +08005586
5587 if (eth->irq_fe[i] < 0) {
developerec4ebe42022-04-12 11:17:45 +08005588 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
5589 return -ENXIO;
5590 }
5591 }
5592
5593 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
5594 eth->clks[i] = devm_clk_get(eth->dev,
5595 mtk_clks_source_name[i]);
5596 if (IS_ERR(eth->clks[i])) {
5597 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
5598 return -EPROBE_DEFER;
5599 if (eth->soc->required_clks & BIT(i)) {
5600 dev_err(&pdev->dev, "clock %s not found\n",
5601 mtk_clks_source_name[i]);
5602 return -EINVAL;
5603 }
5604 eth->clks[i] = NULL;
5605 }
5606 }
5607
5608 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
5609 INIT_WORK(&eth->pending_work, mtk_pending_work);
5610
developer3d2dd692022-04-19 12:53:29 +08005611 err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
developerec4ebe42022-04-12 11:17:45 +08005612 if (err)
5613 return err;
5614
5615 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
5616
5617 for_each_child_of_node(pdev->dev.of_node, mac_np) {
5618 if (!of_device_is_compatible(mac_np,
5619 "mediatek,eth-mac"))
5620 continue;
5621
5622 if (!of_device_is_available(mac_np))
5623 continue;
5624
5625 err = mtk_add_mac(eth, mac_np);
5626 if (err) {
5627 of_node_put(mac_np);
5628 goto err_deinit_hw;
5629 }
5630 }
5631
developer3c9c74d2023-09-11 11:36:12 +08005632 mux_np = of_get_child_by_name(eth->dev->of_node, "mux-bus");
5633 if (mux_np) {
5634 struct device_node *child;
5635
5636 for_each_available_child_of_node(mux_np, child) {
5637 if (!of_device_is_compatible(child,
5638 "mediatek,eth-mux"))
5639 continue;
5640
5641 if (!of_device_is_available(child))
5642 continue;
5643
5644 err = mtk_add_mux(eth, child);
5645 if (err)
5646 dev_err(&pdev->dev, "failed to add mux\n");
5647
5648 of_node_put(mux_np);
5649 };
5650 }
5651
developerec4ebe42022-04-12 11:17:45 +08005652 err = mtk_napi_init(eth);
5653 if (err)
5654 goto err_free_dev;
5655
5656 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
developer4e17c282023-05-30 10:57:24 +08005657 err = devm_request_irq(eth->dev, eth->irq_fe[0],
developerec4ebe42022-04-12 11:17:45 +08005658 mtk_handle_irq, 0,
5659 dev_name(eth->dev), eth);
5660 } else {
developer4e17c282023-05-30 10:57:24 +08005661 err = devm_request_irq(eth->dev, eth->irq_fe[1],
developerec4ebe42022-04-12 11:17:45 +08005662 mtk_handle_irq_tx, 0,
5663 dev_name(eth->dev), eth);
5664 if (err)
5665 goto err_free_dev;
5666
developer5ec55b62023-11-14 15:41:21 +08005667 if (MTK_HAS_CAPS(eth->soc->caps, MTK_PDMA_INT)) {
5668 err = devm_request_irq(eth->dev, eth->irq_fe[2],
5669 mtk_handle_fe_irq, 0,
5670 dev_name(eth->dev), eth);
5671 if (err)
5672 goto err_free_dev;
developer4e17c282023-05-30 10:57:24 +08005673
developer5ec55b62023-11-14 15:41:21 +08005674 err = devm_request_irq(eth->dev, eth->irq_pdma[0],
5675 mtk_handle_irq_rx, IRQF_SHARED,
5676 dev_name(eth->dev), &eth->rx_napi[0]);
5677 if (err)
5678 goto err_free_dev;
developerec4ebe42022-04-12 11:17:45 +08005679
developer5ec55b62023-11-14 15:41:21 +08005680 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
5681 for (i = 0; i < MTK_RX_RSS_NUM; i++) {
5682 err = devm_request_irq(eth->dev,
5683 eth->irq_pdma[MTK_RSS_RING(i)],
5684 mtk_handle_irq_rx, IRQF_SHARED,
5685 dev_name(eth->dev),
5686 &eth->rx_napi[MTK_RSS_RING(i)]);
5687 if (err)
5688 goto err_free_dev;
5689 }
developera05cf4c2023-10-27 14:35:41 +08005690 }
developer5ec55b62023-11-14 15:41:21 +08005691
5692 if (MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO)) {
5693 i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) ||
5694 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) ? 0 : 1;
5695 for (; i < MTK_HW_LRO_RING_NUM; i++) {
5696 err = devm_request_irq(eth->dev,
5697 eth->irq_pdma[i],
5698 mtk_handle_irq_rx, IRQF_SHARED,
5699 dev_name(eth->dev),
5700 &eth->rx_napi[MTK_HW_LRO_RING(i)]);
5701 if (err)
5702 goto err_free_dev;
5703 }
5704 }
5705 } else {
5706 err = devm_request_irq(eth->dev, eth->irq_fe[2],
5707 mtk_handle_irq_rx, 0,
5708 dev_name(eth->dev), &eth->rx_napi[0]);
5709 if (err)
5710 goto err_free_dev;
developera05cf4c2023-10-27 14:35:41 +08005711
developer5ec55b62023-11-14 15:41:21 +08005712 if (MTK_FE_IRQ_NUM > 3) {
5713 err = devm_request_irq(eth->dev, eth->irq_fe[3],
5714 mtk_handle_fe_irq, 0,
5715 dev_name(eth->dev), eth);
developerec4ebe42022-04-12 11:17:45 +08005716 if (err)
5717 goto err_free_dev;
5718 }
5719 }
5720 }
developer3d2dd692022-04-19 12:53:29 +08005721
developerec4ebe42022-04-12 11:17:45 +08005722 if (err)
5723 goto err_free_dev;
5724
5725 /* No MT7628/88 support yet */
5726 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5727 err = mtk_mdio_init(eth);
5728 if (err)
5729 goto err_free_dev;
5730 }
5731
5732 for (i = 0; i < MTK_MAX_DEVS; i++) {
5733 if (!eth->netdev[i])
5734 continue;
5735
5736 err = register_netdev(eth->netdev[i]);
5737 if (err) {
5738 dev_err(eth->dev, "error bringing up device\n");
5739 goto err_deinit_mdio;
5740 } else
5741 netif_info(eth, probe, eth->netdev[i],
5742 "mediatek frame engine at 0x%08lx, irq %d\n",
developer4e17c282023-05-30 10:57:24 +08005743 eth->netdev[i]->base_addr, eth->irq_fe[0]);
developerec4ebe42022-04-12 11:17:45 +08005744 }
5745
5746 /* we run 2 devices on the same DMA ring so we need a dummy device
5747 * for NAPI to work
5748 */
5749 init_dummy_netdev(&eth->dummy_dev);
5750 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
5751 MTK_NAPI_WEIGHT);
5752 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[0].napi, mtk_napi_rx,
5753 MTK_NAPI_WEIGHT);
5754
5755 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
developera05cf4c2023-10-27 14:35:41 +08005756 for (i = 0; i < MTK_RX_RSS_NUM; i++)
5757 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[MTK_RSS_RING(i)].napi,
5758 mtk_napi_rx, MTK_NAPI_WEIGHT);
5759 }
5760
5761 if (MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO)) {
5762 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++) {
5763 netif_napi_add(&eth->dummy_dev, &eth->rx_napi[MTK_HW_LRO_RING(i)].napi,
developerec4ebe42022-04-12 11:17:45 +08005764 mtk_napi_rx, MTK_NAPI_WEIGHT);
developera05cf4c2023-10-27 14:35:41 +08005765 }
developerec4ebe42022-04-12 11:17:45 +08005766 }
5767
5768 mtketh_debugfs_init(eth);
5769 debug_proc_init(eth);
5770
5771 platform_set_drvdata(pdev, eth);
5772
developer3d2dd692022-04-19 12:53:29 +08005773 register_netdevice_notifier(&mtk_eth_netdevice_nb);
developerafda3572022-12-28 16:28:30 +08005774#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer3d2dd692022-04-19 12:53:29 +08005775 timer_setup(&eth->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
5776 eth->mtk_dma_monitor_timer.expires = jiffies;
5777 add_timer(&eth->mtk_dma_monitor_timer);
developer0ccb3482022-06-01 10:56:51 +08005778#endif
developer3d2dd692022-04-19 12:53:29 +08005779
developerec4ebe42022-04-12 11:17:45 +08005780 return 0;
5781
5782err_deinit_mdio:
5783 mtk_mdio_cleanup(eth);
5784err_free_dev:
5785 mtk_free_dev(eth);
5786err_deinit_hw:
5787 mtk_hw_deinit(eth);
5788
5789 return err;
5790}
5791
5792static int mtk_remove(struct platform_device *pdev)
5793{
5794 struct mtk_eth *eth = platform_get_drvdata(pdev);
5795 struct mtk_mac *mac;
5796 int i;
5797
5798 /* stop all devices to make sure that dma is properly shut down */
5799 for (i = 0; i < MTK_MAC_COUNT; i++) {
5800 if (!eth->netdev[i])
5801 continue;
5802 mtk_stop(eth->netdev[i]);
5803 mac = netdev_priv(eth->netdev[i]);
5804 phylink_disconnect_phy(mac->phylink);
5805 }
5806
5807 mtk_hw_deinit(eth);
5808
5809 netif_napi_del(&eth->tx_napi);
5810 netif_napi_del(&eth->rx_napi[0].napi);
5811
5812 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
5813 for (i = 1; i < MTK_RX_NAPI_NUM; i++)
5814 netif_napi_del(&eth->rx_napi[i].napi);
5815 }
5816
5817 mtk_cleanup(eth);
5818 mtk_mdio_cleanup(eth);
developer3d2dd692022-04-19 12:53:29 +08005819 unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
5820 del_timer_sync(&eth->mtk_dma_monitor_timer);
developerec4ebe42022-04-12 11:17:45 +08005821
5822 return 0;
5823}
5824
5825static const struct mtk_soc_data mt2701_data = {
developerb35f4fa2023-03-14 13:24:47 +08005826 .reg_map = &mtk_reg_map,
developerec4ebe42022-04-12 11:17:45 +08005827 .caps = MT7623_CAPS | MTK_HWLRO,
5828 .hw_features = MTK_HW_FEATURES,
5829 .required_clks = MT7623_CLKS_BITMAP,
5830 .required_pctl = true,
5831 .has_sram = false,
developeredbe69e2023-06-08 11:08:46 +08005832 .rss_num = 0,
developer29f66b32022-07-12 15:23:20 +08005833 .txrx = {
5834 .txd_size = sizeof(struct mtk_tx_dma),
5835 .rxd_size = sizeof(struct mtk_rx_dma),
developerb35f4fa2023-03-14 13:24:47 +08005836 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer29f66b32022-07-12 15:23:20 +08005837 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5838 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5839 },
developerec4ebe42022-04-12 11:17:45 +08005840};
5841
5842static const struct mtk_soc_data mt7621_data = {
developerb35f4fa2023-03-14 13:24:47 +08005843 .reg_map = &mtk_reg_map,
developerec4ebe42022-04-12 11:17:45 +08005844 .caps = MT7621_CAPS,
5845 .hw_features = MTK_HW_FEATURES,
5846 .required_clks = MT7621_CLKS_BITMAP,
5847 .required_pctl = false,
5848 .has_sram = false,
developeredbe69e2023-06-08 11:08:46 +08005849 .rss_num = 0,
developer29f66b32022-07-12 15:23:20 +08005850 .txrx = {
5851 .txd_size = sizeof(struct mtk_tx_dma),
developerb35f4fa2023-03-14 13:24:47 +08005852 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer29f66b32022-07-12 15:23:20 +08005853 .rxd_size = sizeof(struct mtk_rx_dma),
5854 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5855 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5856 },
developerec4ebe42022-04-12 11:17:45 +08005857};
5858
5859static const struct mtk_soc_data mt7622_data = {
developerb35f4fa2023-03-14 13:24:47 +08005860 .reg_map = &mtk_reg_map,
developerec4ebe42022-04-12 11:17:45 +08005861 .ana_rgc3 = 0x2028,
5862 .caps = MT7622_CAPS | MTK_HWLRO,
5863 .hw_features = MTK_HW_FEATURES,
5864 .required_clks = MT7622_CLKS_BITMAP,
5865 .required_pctl = false,
5866 .has_sram = false,
developeredbe69e2023-06-08 11:08:46 +08005867 .rss_num = 0,
developer29f66b32022-07-12 15:23:20 +08005868 .txrx = {
5869 .txd_size = sizeof(struct mtk_tx_dma),
5870 .rxd_size = sizeof(struct mtk_rx_dma),
developerb35f4fa2023-03-14 13:24:47 +08005871 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer29f66b32022-07-12 15:23:20 +08005872 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5873 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5874 },
developerec4ebe42022-04-12 11:17:45 +08005875};
5876
5877static const struct mtk_soc_data mt7623_data = {
developerb35f4fa2023-03-14 13:24:47 +08005878 .reg_map = &mtk_reg_map,
developerec4ebe42022-04-12 11:17:45 +08005879 .caps = MT7623_CAPS | MTK_HWLRO,
5880 .hw_features = MTK_HW_FEATURES,
5881 .required_clks = MT7623_CLKS_BITMAP,
5882 .required_pctl = true,
5883 .has_sram = false,
developeredbe69e2023-06-08 11:08:46 +08005884 .rss_num = 0,
developer29f66b32022-07-12 15:23:20 +08005885 .txrx = {
5886 .txd_size = sizeof(struct mtk_tx_dma),
5887 .rxd_size = sizeof(struct mtk_rx_dma),
developerb35f4fa2023-03-14 13:24:47 +08005888 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer29f66b32022-07-12 15:23:20 +08005889 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5890 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5891 },
developerec4ebe42022-04-12 11:17:45 +08005892};
5893
5894static const struct mtk_soc_data mt7629_data = {
developerb35f4fa2023-03-14 13:24:47 +08005895 .reg_map = &mtk_reg_map,
developerec4ebe42022-04-12 11:17:45 +08005896 .ana_rgc3 = 0x128,
5897 .caps = MT7629_CAPS | MTK_HWLRO,
5898 .hw_features = MTK_HW_FEATURES,
5899 .required_clks = MT7629_CLKS_BITMAP,
5900 .required_pctl = false,
5901 .has_sram = false,
developeredbe69e2023-06-08 11:08:46 +08005902 .rss_num = 0,
developer29f66b32022-07-12 15:23:20 +08005903 .txrx = {
5904 .txd_size = sizeof(struct mtk_tx_dma),
5905 .rxd_size = sizeof(struct mtk_rx_dma),
developerb35f4fa2023-03-14 13:24:47 +08005906 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer29f66b32022-07-12 15:23:20 +08005907 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5908 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5909 },
developerec4ebe42022-04-12 11:17:45 +08005910};
5911
5912static const struct mtk_soc_data mt7986_data = {
developerb35f4fa2023-03-14 13:24:47 +08005913 .reg_map = &mt7986_reg_map,
developerec4ebe42022-04-12 11:17:45 +08005914 .ana_rgc3 = 0x128,
5915 .caps = MT7986_CAPS,
5916 .hw_features = MTK_HW_FEATURES,
5917 .required_clks = MT7986_CLKS_BITMAP,
5918 .required_pctl = false,
developer3c9c74d2023-09-11 11:36:12 +08005919 .has_sram = false,
developer740bee82023-10-16 10:58:43 +08005920 .rss_num = 4,
developer29f66b32022-07-12 15:23:20 +08005921 .txrx = {
5922 .txd_size = sizeof(struct mtk_tx_dma_v2),
developerb35f4fa2023-03-14 13:24:47 +08005923 .rxd_size = sizeof(struct mtk_rx_dma),
5924 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developer29f66b32022-07-12 15:23:20 +08005925 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5926 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
5927 },
developerec4ebe42022-04-12 11:17:45 +08005928};
5929
5930static const struct mtk_soc_data mt7981_data = {
developerb35f4fa2023-03-14 13:24:47 +08005931 .reg_map = &mt7986_reg_map,
developerec4ebe42022-04-12 11:17:45 +08005932 .ana_rgc3 = 0x128,
5933 .caps = MT7981_CAPS,
5934 .hw_features = MTK_HW_FEATURES,
5935 .required_clks = MT7981_CLKS_BITMAP,
5936 .required_pctl = false,
developer3c9c74d2023-09-11 11:36:12 +08005937 .has_sram = false,
developer740bee82023-10-16 10:58:43 +08005938 .rss_num = 4,
developer29f66b32022-07-12 15:23:20 +08005939 .txrx = {
5940 .txd_size = sizeof(struct mtk_tx_dma_v2),
developerb35f4fa2023-03-14 13:24:47 +08005941 .rxd_size = sizeof(struct mtk_rx_dma),
5942 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developer29f66b32022-07-12 15:23:20 +08005943 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5944 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
5945 },
developerec4ebe42022-04-12 11:17:45 +08005946};
5947
developere86c3ec2022-10-11 10:29:18 +08005948static const struct mtk_soc_data mt7988_data = {
developerb35f4fa2023-03-14 13:24:47 +08005949 .reg_map = &mt7988_reg_map,
developere86c3ec2022-10-11 10:29:18 +08005950 .ana_rgc3 = 0x128,
developera05cf4c2023-10-27 14:35:41 +08005951 .caps = MT7988_CAPS | MTK_HWLRO,
developere86c3ec2022-10-11 10:29:18 +08005952 .hw_features = MTK_HW_FEATURES,
5953 .required_clks = MT7988_CLKS_BITMAP,
5954 .required_pctl = false,
5955 .has_sram = true,
developeredbe69e2023-06-08 11:08:46 +08005956 .rss_num = 4,
developere86c3ec2022-10-11 10:29:18 +08005957 .txrx = {
5958 .txd_size = sizeof(struct mtk_tx_dma_v2),
5959 .rxd_size = sizeof(struct mtk_rx_dma_v2),
developerb35f4fa2023-03-14 13:24:47 +08005960 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developere86c3ec2022-10-11 10:29:18 +08005961 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5962 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
5963 },
5964};
5965
developerec4ebe42022-04-12 11:17:45 +08005966static const struct mtk_soc_data rt5350_data = {
developerb35f4fa2023-03-14 13:24:47 +08005967 .reg_map = &mt7628_reg_map,
developerec4ebe42022-04-12 11:17:45 +08005968 .caps = MT7628_CAPS,
5969 .hw_features = MTK_HW_FEATURES_MT7628,
5970 .required_clks = MT7628_CLKS_BITMAP,
5971 .required_pctl = false,
5972 .has_sram = false,
developeredbe69e2023-06-08 11:08:46 +08005973 .rss_num = 0,
developer29f66b32022-07-12 15:23:20 +08005974 .txrx = {
5975 .txd_size = sizeof(struct mtk_tx_dma),
5976 .rxd_size = sizeof(struct mtk_rx_dma),
developerb35f4fa2023-03-14 13:24:47 +08005977 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
developer29f66b32022-07-12 15:23:20 +08005978 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5979 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
5980 },
developerec4ebe42022-04-12 11:17:45 +08005981};
5982
5983const struct of_device_id of_mtk_match[] = {
5984 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
5985 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
5986 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
5987 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
5988 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
5989 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
5990 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
developere86c3ec2022-10-11 10:29:18 +08005991 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data},
developerec4ebe42022-04-12 11:17:45 +08005992 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
5993 {},
5994};
5995MODULE_DEVICE_TABLE(of, of_mtk_match);
5996
5997static struct platform_driver mtk_driver = {
5998 .probe = mtk_probe,
5999 .remove = mtk_remove,
6000 .driver = {
6001 .name = "mtk_soc_eth",
6002 .of_match_table = of_mtk_match,
6003 },
6004};
6005
6006module_platform_driver(mtk_driver);
6007
6008MODULE_LICENSE("GPL");
6009MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
6010MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");