blob: 409b834514e20898ae490db7e81e2a2281d9981a [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9#include <linux/of_device.h>
10#include <linux/of_mdio.h>
11#include <linux/of_net.h>
developer3f28d382023-03-07 16:06:30 +080012#include <linux/of_address.h>
developerfd40db22021-04-29 10:08:25 +080013#include <linux/mfd/syscon.h>
14#include <linux/regmap.h>
15#include <linux/clk.h>
16#include <linux/pm_runtime.h>
17#include <linux/if_vlan.h>
18#include <linux/reset.h>
19#include <linux/tcp.h>
20#include <linux/interrupt.h>
21#include <linux/pinctrl/devinfo.h>
22#include <linux/phylink.h>
developera2613e62022-07-01 18:29:37 +080023#include <linux/gpio/consumer.h>
developerfd40db22021-04-29 10:08:25 +080024#include <net/dsa.h>
25
26#include "mtk_eth_soc.h"
27#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080028#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080029
30#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
31#include "mtk_hnat/nf_hnat_mtk.h"
32#endif
33
34static int mtk_msg_level = -1;
developer8051e042022-04-08 13:26:36 +080035atomic_t reset_lock = ATOMIC_INIT(0);
36atomic_t force = ATOMIC_INIT(0);
37
developerfd40db22021-04-29 10:08:25 +080038module_param_named(msg_level, mtk_msg_level, int, 0);
39MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
developer8051e042022-04-08 13:26:36 +080040DECLARE_COMPLETION(wait_ser_done);
developerfd40db22021-04-29 10:08:25 +080041
42#define MTK_ETHTOOL_STAT(x) { #x, \
43 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
44
developer68ce74f2023-01-03 16:11:57 +080045static const struct mtk_reg_map mtk_reg_map = {
46 .tx_irq_mask = 0x1a1c,
47 .tx_irq_status = 0x1a18,
48 .pdma = {
49 .rx_ptr = 0x0900,
50 .rx_cnt_cfg = 0x0904,
51 .pcrx_ptr = 0x0908,
52 .glo_cfg = 0x0a04,
53 .rst_idx = 0x0a08,
54 .delay_irq = 0x0a0c,
55 .irq_status = 0x0a20,
56 .irq_mask = 0x0a28,
57 .int_grp = 0x0a50,
58 .int_grp2 = 0x0a54,
59 },
60 .qdma = {
61 .qtx_cfg = 0x1800,
62 .qtx_sch = 0x1804,
63 .rx_ptr = 0x1900,
64 .rx_cnt_cfg = 0x1904,
65 .qcrx_ptr = 0x1908,
66 .glo_cfg = 0x1a04,
67 .rst_idx = 0x1a08,
68 .delay_irq = 0x1a0c,
69 .fc_th = 0x1a10,
70 .tx_sch_rate = 0x1a14,
71 .int_grp = 0x1a20,
72 .int_grp2 = 0x1a24,
73 .hred2 = 0x1a44,
74 .ctx_ptr = 0x1b00,
75 .dtx_ptr = 0x1b04,
76 .crx_ptr = 0x1b10,
77 .drx_ptr = 0x1b14,
78 .fq_head = 0x1b20,
79 .fq_tail = 0x1b24,
80 .fq_count = 0x1b28,
81 .fq_blen = 0x1b2c,
82 },
83 .gdm1_cnt = 0x2400,
84 .gdma_to_ppe0 = 0x4444,
85 .ppe_base = {
86 [0] = 0x0c00,
87 },
88 .wdma_base = {
89 [0] = 0x2800,
90 [1] = 0x2c00,
91 },
92};
93
94static const struct mtk_reg_map mt7628_reg_map = {
95 .tx_irq_mask = 0x0a28,
96 .tx_irq_status = 0x0a20,
97 .pdma = {
98 .rx_ptr = 0x0900,
99 .rx_cnt_cfg = 0x0904,
100 .pcrx_ptr = 0x0908,
101 .glo_cfg = 0x0a04,
102 .rst_idx = 0x0a08,
103 .delay_irq = 0x0a0c,
104 .irq_status = 0x0a20,
105 .irq_mask = 0x0a28,
106 .int_grp = 0x0a50,
107 .int_grp2 = 0x0a54,
108 },
109};
110
111static const struct mtk_reg_map mt7986_reg_map = {
112 .tx_irq_mask = 0x461c,
113 .tx_irq_status = 0x4618,
114 .pdma = {
developer8ecd51b2023-03-13 11:28:28 +0800115 .rx_ptr = 0x4100,
116 .rx_cnt_cfg = 0x4104,
117 .pcrx_ptr = 0x4108,
118 .glo_cfg = 0x4204,
119 .rst_idx = 0x4208,
120 .delay_irq = 0x420c,
121 .irq_status = 0x4220,
122 .irq_mask = 0x4228,
123 .int_grp = 0x4250,
124 .int_grp2 = 0x4254,
developer68ce74f2023-01-03 16:11:57 +0800125 },
126 .qdma = {
127 .qtx_cfg = 0x4400,
128 .qtx_sch = 0x4404,
129 .rx_ptr = 0x4500,
130 .rx_cnt_cfg = 0x4504,
131 .qcrx_ptr = 0x4508,
132 .glo_cfg = 0x4604,
133 .rst_idx = 0x4608,
134 .delay_irq = 0x460c,
135 .fc_th = 0x4610,
136 .int_grp = 0x4620,
137 .int_grp2 = 0x4624,
138 .hred2 = 0x4644,
139 .ctx_ptr = 0x4700,
140 .dtx_ptr = 0x4704,
141 .crx_ptr = 0x4710,
142 .drx_ptr = 0x4714,
143 .fq_head = 0x4720,
144 .fq_tail = 0x4724,
145 .fq_count = 0x4728,
146 .fq_blen = 0x472c,
147 .tx_sch_rate = 0x4798,
148 },
149 .gdm1_cnt = 0x1c00,
150 .gdma_to_ppe0 = 0x3333,
151 .ppe_base = {
152 [0] = 0x2000,
153 [1] = 0x2400,
154 },
155 .wdma_base = {
156 [0] = 0x4800,
157 [1] = 0x4c00,
158 },
159};
160
161static const struct mtk_reg_map mt7988_reg_map = {
162 .tx_irq_mask = 0x461c,
163 .tx_irq_status = 0x4618,
164 .pdma = {
165 .rx_ptr = 0x6900,
166 .rx_cnt_cfg = 0x6904,
167 .pcrx_ptr = 0x6908,
168 .glo_cfg = 0x6a04,
169 .rst_idx = 0x6a08,
170 .delay_irq = 0x6a0c,
171 .irq_status = 0x6a20,
172 .irq_mask = 0x6a28,
173 .int_grp = 0x6a50,
174 .int_grp2 = 0x6a54,
175 },
176 .qdma = {
177 .qtx_cfg = 0x4400,
178 .qtx_sch = 0x4404,
179 .rx_ptr = 0x4500,
180 .rx_cnt_cfg = 0x4504,
181 .qcrx_ptr = 0x4508,
182 .glo_cfg = 0x4604,
183 .rst_idx = 0x4608,
184 .delay_irq = 0x460c,
185 .fc_th = 0x4610,
186 .int_grp = 0x4620,
187 .int_grp2 = 0x4624,
188 .hred2 = 0x4644,
189 .ctx_ptr = 0x4700,
190 .dtx_ptr = 0x4704,
191 .crx_ptr = 0x4710,
192 .drx_ptr = 0x4714,
193 .fq_head = 0x4720,
194 .fq_tail = 0x4724,
195 .fq_count = 0x4728,
196 .fq_blen = 0x472c,
197 .tx_sch_rate = 0x4798,
198 },
199 .gdm1_cnt = 0x1c00,
200 .gdma_to_ppe0 = 0x3333,
201 .ppe_base = {
202 [0] = 0x2000,
203 [1] = 0x2400,
204 [2] = 0x2c00,
205 },
206 .wdma_base = {
207 [0] = 0x4800,
208 [1] = 0x4c00,
209 [2] = 0x5000,
210 },
211};
212
developerfd40db22021-04-29 10:08:25 +0800213/* strings used by ethtool */
214static const struct mtk_ethtool_stats {
215 char str[ETH_GSTRING_LEN];
216 u32 offset;
217} mtk_ethtool_stats[] = {
218 MTK_ETHTOOL_STAT(tx_bytes),
219 MTK_ETHTOOL_STAT(tx_packets),
220 MTK_ETHTOOL_STAT(tx_skip),
221 MTK_ETHTOOL_STAT(tx_collisions),
222 MTK_ETHTOOL_STAT(rx_bytes),
223 MTK_ETHTOOL_STAT(rx_packets),
224 MTK_ETHTOOL_STAT(rx_overflow),
225 MTK_ETHTOOL_STAT(rx_fcs_errors),
226 MTK_ETHTOOL_STAT(rx_short_errors),
227 MTK_ETHTOOL_STAT(rx_long_errors),
228 MTK_ETHTOOL_STAT(rx_checksum_errors),
229 MTK_ETHTOOL_STAT(rx_flow_control_packets),
230};
231
232static const char * const mtk_clks_source_name[] = {
developer1bbcf512022-11-18 16:09:33 +0800233 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "gp3",
234 "xgp1", "xgp2", "xgp3", "crypto", "fe", "trgpll",
developerfd40db22021-04-29 10:08:25 +0800235 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
236 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
developer5cfc67a2022-12-29 19:06:51 +0800237 "sgmii_ck", "eth2pll", "wocpu0", "wocpu1",
238 "ethwarp_wocpu2", "ethwarp_wocpu1", "ethwarp_wocpu0",
239 "top_usxgmii0_sel", "top_usxgmii1_sel", "top_sgm0_sel", "top_sgm1_sel",
240 "top_xfi_phy0_xtal_sel", "top_xfi_phy1_xtal_sel", "top_eth_gmii_sel",
241 "top_eth_refck_50m_sel", "top_eth_sys_200m_sel", "top_eth_sys_sel",
242 "top_eth_xgmii_sel", "top_eth_mii_sel", "top_netsys_sel",
243 "top_netsys_500m_sel", "top_netsys_pao_2x_sel",
244 "top_netsys_sync_250m_sel", "top_netsys_ppefb_250m_sel",
245 "top_netsys_warp_sel",
developerfd40db22021-04-29 10:08:25 +0800246};
247
248void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
249{
250 __raw_writel(val, eth->base + reg);
251}
252
253u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
254{
255 return __raw_readl(eth->base + reg);
256}
257
258u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
259{
260 u32 val;
261
262 val = mtk_r32(eth, reg);
263 val &= ~mask;
264 val |= set;
265 mtk_w32(eth, val, reg);
266 return reg;
267}
268
269static int mtk_mdio_busy_wait(struct mtk_eth *eth)
270{
271 unsigned long t_start = jiffies;
272
273 while (1) {
274 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
275 return 0;
276 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
277 break;
developerc4671b22021-05-28 13:16:42 +0800278 cond_resched();
developerfd40db22021-04-29 10:08:25 +0800279 }
280
281 dev_err(eth->dev, "mdio: MDIO timeout\n");
282 return -1;
283}
284
developer599cda42022-05-24 15:13:31 +0800285u32 _mtk_mdio_write(struct mtk_eth *eth, int phy_addr,
286 int phy_reg, u16 write_data)
developerfd40db22021-04-29 10:08:25 +0800287{
288 if (mtk_mdio_busy_wait(eth))
289 return -1;
290
291 write_data &= 0xffff;
292
developer599cda42022-05-24 15:13:31 +0800293 if (phy_reg & MII_ADDR_C45) {
294 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
295 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
296 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
297 MTK_PHY_IAC);
298
299 if (mtk_mdio_busy_wait(eth))
300 return -1;
301
302 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_WRITE |
303 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
304 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
305 MTK_PHY_IAC);
306 } else {
307 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
308 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
309 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data,
310 MTK_PHY_IAC);
311 }
developerfd40db22021-04-29 10:08:25 +0800312
313 if (mtk_mdio_busy_wait(eth))
314 return -1;
315
316 return 0;
317}
318
developer599cda42022-05-24 15:13:31 +0800319u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
developerfd40db22021-04-29 10:08:25 +0800320{
321 u32 d;
322
323 if (mtk_mdio_busy_wait(eth))
324 return 0xffff;
325
developer599cda42022-05-24 15:13:31 +0800326 if (phy_reg & MII_ADDR_C45) {
327 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_ADDR_C45 |
328 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
329 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | mdiobus_c45_regad(phy_reg),
330 MTK_PHY_IAC);
331
332 if (mtk_mdio_busy_wait(eth))
333 return 0xffff;
334
335 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START_C45 | PHY_IAC_READ_C45 |
336 ((mdiobus_c45_devad(phy_reg) & 0x1f) << PHY_IAC_REG_SHIFT) |
337 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
338 MTK_PHY_IAC);
339 } else {
340 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
341 ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) |
342 ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT),
343 MTK_PHY_IAC);
344 }
developerfd40db22021-04-29 10:08:25 +0800345
346 if (mtk_mdio_busy_wait(eth))
347 return 0xffff;
348
349 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
350
351 return d;
352}
353
354static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
355 int phy_reg, u16 val)
356{
357 struct mtk_eth *eth = bus->priv;
358
359 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
360}
361
362static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
363{
364 struct mtk_eth *eth = bus->priv;
365
366 return _mtk_mdio_read(eth, phy_addr, phy_reg);
367}
368
developerabeadd52022-08-15 11:26:44 +0800369static int mtk_mdio_reset(struct mii_bus *bus)
370{
371 /* The mdiobus_register will trigger a reset pulse when enabling Bus reset,
372 * we just need to wait until device ready.
373 */
374 mdelay(20);
375
376 return 0;
377}
378
developerfd40db22021-04-29 10:08:25 +0800379static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
380 phy_interface_t interface)
381{
developer543e7922022-12-01 11:24:47 +0800382 u32 val = 0;
developerfd40db22021-04-29 10:08:25 +0800383
384 /* Check DDR memory type.
385 * Currently TRGMII mode with DDR2 memory is not supported.
386 */
387 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
388 if (interface == PHY_INTERFACE_MODE_TRGMII &&
389 val & SYSCFG_DRAM_TYPE_DDR2) {
390 dev_err(eth->dev,
391 "TRGMII mode with DDR2 memory is not supported!\n");
392 return -EOPNOTSUPP;
393 }
394
395 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
396 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
397
398 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
399 ETHSYS_TRGMII_MT7621_MASK, val);
400
401 return 0;
402}
403
404static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
405 phy_interface_t interface, int speed)
406{
407 u32 val;
408 int ret;
409
410 if (interface == PHY_INTERFACE_MODE_TRGMII) {
411 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
412 val = 500000000;
413 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
414 if (ret)
415 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
416 return;
417 }
418
419 val = (speed == SPEED_1000) ?
420 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
421 mtk_w32(eth, val, INTF_MODE);
422
423 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
424 ETHSYS_TRGMII_CLK_SEL362_5,
425 ETHSYS_TRGMII_CLK_SEL362_5);
426
427 val = (speed == SPEED_1000) ? 250000000 : 500000000;
428 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
429 if (ret)
430 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
431
432 val = (speed == SPEED_1000) ?
433 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
434 mtk_w32(eth, val, TRGMII_RCK_CTRL);
435
436 val = (speed == SPEED_1000) ?
437 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
438 mtk_w32(eth, val, TRGMII_TCK_CTRL);
439}
440
developer089e8852022-09-28 14:43:46 +0800441static void mtk_setup_bridge_switch(struct mtk_eth *eth)
442{
443 int val;
444
445 /* Force Port1 XGMAC Link Up */
446 val = mtk_r32(eth, MTK_XGMAC_STS(MTK_GMAC1_ID));
developer2b9bc722023-03-09 11:48:44 +0800447 mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
developer089e8852022-09-28 14:43:46 +0800448 MTK_XGMAC_STS(MTK_GMAC1_ID));
449
450 /* Adjust GSW bridge IPG to 11*/
451 val = mtk_r32(eth, MTK_GSW_CFG);
452 val &= ~(GSWTX_IPG_MASK | GSWRX_IPG_MASK);
453 val |= (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
454 (GSW_IPG_11 << GSWRX_IPG_SHIFT);
455 mtk_w32(eth, val, MTK_GSW_CFG);
developer089e8852022-09-28 14:43:46 +0800456}
457
developera7570e72023-05-09 17:06:42 +0800458static bool mtk_check_gmac23_idle(struct mtk_mac *mac)
459{
460 u32 mac_fsm, gdm_fsm;
461
462 mac_fsm = mtk_r32(mac->hw, MTK_MAC_FSM(mac->id));
463
464 switch (mac->id) {
465 case MTK_GMAC2_ID:
466 gdm_fsm = mtk_r32(mac->hw, MTK_FE_GDM2_FSM);
467 break;
468 case MTK_GMAC3_ID:
469 gdm_fsm = mtk_r32(mac->hw, MTK_FE_GDM3_FSM);
470 break;
developer10b556b2023-05-15 09:49:08 +0800471 default:
472 return true;
developera7570e72023-05-09 17:06:42 +0800473 };
474
475 if ((mac_fsm & 0xFFFF0000) == 0x01010000 &&
476 (gdm_fsm & 0xFFFF0000) == 0x00000000)
477 return true;
478
479 return false;
480}
481
developer9b725932022-11-24 16:25:56 +0800482static void mtk_setup_eee(struct mtk_mac *mac, bool enable)
483{
484 struct mtk_eth *eth = mac->hw;
485 u32 mcr, mcr_cur;
486 u32 val;
487
488 mcr = mcr_cur = mtk_r32(eth, MTK_MAC_MCR(mac->id));
489 mcr &= ~(MAC_MCR_FORCE_EEE100 | MAC_MCR_FORCE_EEE1000);
490
491 if (enable) {
492 mac->tx_lpi_enabled = 1;
493
494 val = FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 19) |
495 FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 33) |
496 FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD,
497 mac->tx_lpi_timer) |
498 FIELD_PREP(MAC_EEE_RESV0, 14);
499 mtk_w32(eth, val, MTK_MAC_EEE(mac->id));
500
501 switch (mac->speed) {
502 case SPEED_1000:
503 mcr |= MAC_MCR_FORCE_EEE1000;
504 break;
505 case SPEED_100:
506 mcr |= MAC_MCR_FORCE_EEE100;
507 break;
508 };
509 } else {
510 mac->tx_lpi_enabled = 0;
511
512 mtk_w32(eth, 0x00000002, MTK_MAC_EEE(mac->id));
513 }
514
515 /* Only update control register when needed! */
516 if (mcr != mcr_cur)
517 mtk_w32(eth, mcr, MTK_MAC_MCR(mac->id));
518}
519
developer0fef5222023-04-26 14:48:31 +0800520static int mtk_get_hwver(struct mtk_eth *eth)
521{
522 struct device_node *np;
523 struct regmap *hwver;
524 u32 info = 0;
525
526 eth->hwver = MTK_HWID_V1;
527
528 np = of_parse_phandle(eth->dev->of_node, "mediatek,hwver", 0);
529 if (!np)
530 return -EINVAL;
531
532 hwver = syscon_node_to_regmap(np);
533 if (IS_ERR(hwver))
534 return PTR_ERR(hwver);
535
536 regmap_read(hwver, 0x8, &info);
537
538 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
539 eth->hwver = FIELD_GET(HWVER_BIT_NETSYS_3, info);
540 else
541 eth->hwver = FIELD_GET(HWVER_BIT_NETSYS_1_2, info);
542
543 of_node_put(np);
544
545 return 0;
546}
547
developer4e8a3fd2023-04-10 18:05:44 +0800548static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
549 phy_interface_t interface)
550{
551 struct mtk_mac *mac = container_of(config, struct mtk_mac,
552 phylink_config);
553 struct mtk_eth *eth = mac->hw;
554 unsigned int sid;
555
556 if (interface == PHY_INTERFACE_MODE_SGMII ||
557 phy_interface_mode_is_8023z(interface)) {
558 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
559 0 : mtk_mac2xgmii_id(eth, mac->id);
560
561 return mtk_sgmii_select_pcs(eth->sgmii, sid);
562 } else if (interface == PHY_INTERFACE_MODE_USXGMII ||
563 interface == PHY_INTERFACE_MODE_10GKR ||
564 interface == PHY_INTERFACE_MODE_5GBASER) {
565 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
566 mac->id != MTK_GMAC1_ID) {
567 sid = mtk_mac2xgmii_id(eth, mac->id);
568
569 return mtk_usxgmii_select_pcs(eth->usxgmii, sid);
570 }
571 }
572
573 return NULL;
574}
575
developerfd40db22021-04-29 10:08:25 +0800576static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
577 const struct phylink_link_state *state)
578{
579 struct mtk_mac *mac = container_of(config, struct mtk_mac,
580 phylink_config);
581 struct mtk_eth *eth = mac->hw;
developer089e8852022-09-28 14:43:46 +0800582 u32 sid, i;
developerff5e5092023-07-25 15:55:28 +0800583 int val = 0, ge_mode, err = 0;
developer82eae452023-02-13 10:04:09 +0800584 unsigned int mac_type = mac->type;
developerfd40db22021-04-29 10:08:25 +0800585
586 /* MT76x8 has no hardware settings between for the MAC */
587 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
588 mac->interface != state->interface) {
589 /* Setup soc pin functions */
590 switch (state->interface) {
591 case PHY_INTERFACE_MODE_TRGMII:
592 if (mac->id)
593 goto err_phy;
594 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
595 MTK_GMAC1_TRGMII))
596 goto err_phy;
597 /* fall through */
598 case PHY_INTERFACE_MODE_RGMII_TXID:
599 case PHY_INTERFACE_MODE_RGMII_RXID:
600 case PHY_INTERFACE_MODE_RGMII_ID:
601 case PHY_INTERFACE_MODE_RGMII:
602 case PHY_INTERFACE_MODE_MII:
603 case PHY_INTERFACE_MODE_REVMII:
604 case PHY_INTERFACE_MODE_RMII:
developer82eae452023-02-13 10:04:09 +0800605 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800606 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
607 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
608 if (err)
609 goto init_err;
610 }
611 break;
612 case PHY_INTERFACE_MODE_1000BASEX:
613 case PHY_INTERFACE_MODE_2500BASEX:
614 case PHY_INTERFACE_MODE_SGMII:
developer82eae452023-02-13 10:04:09 +0800615 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800616 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
617 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
618 if (err)
619 goto init_err;
620 }
621 break;
622 case PHY_INTERFACE_MODE_GMII:
developer82eae452023-02-13 10:04:09 +0800623 mac->type = MTK_GDM_TYPE;
developerfd40db22021-04-29 10:08:25 +0800624 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
625 err = mtk_gmac_gephy_path_setup(eth, mac->id);
626 if (err)
627 goto init_err;
628 }
629 break;
developer30e13e72022-11-03 10:21:24 +0800630 case PHY_INTERFACE_MODE_XGMII:
developer82eae452023-02-13 10:04:09 +0800631 mac->type = MTK_XGDM_TYPE;
developer30e13e72022-11-03 10:21:24 +0800632 if (MTK_HAS_CAPS(eth->soc->caps, MTK_XGMII)) {
633 err = mtk_gmac_xgmii_path_setup(eth, mac->id);
634 if (err)
635 goto init_err;
636 }
637 break;
developer089e8852022-09-28 14:43:46 +0800638 case PHY_INTERFACE_MODE_USXGMII:
639 case PHY_INTERFACE_MODE_10GKR:
developercfa104b2023-01-11 17:40:41 +0800640 case PHY_INTERFACE_MODE_5GBASER:
developer82eae452023-02-13 10:04:09 +0800641 mac->type = MTK_XGDM_TYPE;
developer089e8852022-09-28 14:43:46 +0800642 if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
643 err = mtk_gmac_usxgmii_path_setup(eth, mac->id);
644 if (err)
645 goto init_err;
646 }
647 break;
developerfd40db22021-04-29 10:08:25 +0800648 default:
649 goto err_phy;
650 }
651
652 /* Setup clock for 1st gmac */
653 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
654 !phy_interface_mode_is_8023z(state->interface) &&
655 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
656 if (MTK_HAS_CAPS(mac->hw->soc->caps,
657 MTK_TRGMII_MT7621_CLK)) {
658 if (mt7621_gmac0_rgmii_adjust(mac->hw,
659 state->interface))
660 goto err_phy;
661 } else {
662 mtk_gmac0_rgmii_adjust(mac->hw,
663 state->interface,
664 state->speed);
665
666 /* mt7623_pad_clk_setup */
667 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
668 mtk_w32(mac->hw,
669 TD_DM_DRVP(8) | TD_DM_DRVN(8),
670 TRGMII_TD_ODT(i));
671
672 /* Assert/release MT7623 RXC reset */
673 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
674 TRGMII_RCK_CTRL);
675 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
676 }
677 }
678
679 ge_mode = 0;
680 switch (state->interface) {
681 case PHY_INTERFACE_MODE_MII:
682 case PHY_INTERFACE_MODE_GMII:
683 ge_mode = 1;
684 break;
685 case PHY_INTERFACE_MODE_REVMII:
686 ge_mode = 2;
687 break;
688 case PHY_INTERFACE_MODE_RMII:
689 if (mac->id)
690 goto err_phy;
691 ge_mode = 3;
692 break;
693 default:
694 break;
695 }
696
697 /* put the gmac into the right mode */
developerd82e8372022-02-09 15:00:09 +0800698 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800699 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
700 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
701 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
702 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
developerd82e8372022-02-09 15:00:09 +0800703 spin_unlock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800704
705 mac->interface = state->interface;
706 }
707
708 /* SGMII */
709 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
710 phy_interface_mode_is_8023z(state->interface)) {
711 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
712 * being setup done.
713 */
developerd82e8372022-02-09 15:00:09 +0800714 spin_lock(&eth->syscfg0_lock);
developerfd40db22021-04-29 10:08:25 +0800715 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
716
717 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
718 SYSCFG0_SGMII_MASK,
719 ~(u32)SYSCFG0_SGMII_MASK);
720
721 /* Decide how GMAC and SGMIISYS be mapped */
722 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
723 0 : mac->id;
724
developer4e8a3fd2023-04-10 18:05:44 +0800725 /* Save the syscfg0 value for mac_finish */
726 mac->syscfg0 = val;
developerd82e8372022-02-09 15:00:09 +0800727 spin_unlock(&eth->syscfg0_lock);
developer089e8852022-09-28 14:43:46 +0800728 } else if (state->interface == PHY_INTERFACE_MODE_USXGMII ||
developercfa104b2023-01-11 17:40:41 +0800729 state->interface == PHY_INTERFACE_MODE_10GKR ||
730 state->interface == PHY_INTERFACE_MODE_5GBASER) {
developer4e8a3fd2023-04-10 18:05:44 +0800731 /* Nothing to do */
developerfd40db22021-04-29 10:08:25 +0800732 } else if (phylink_autoneg_inband(mode)) {
733 dev_err(eth->dev,
734 "In-band mode not supported in non SGMII mode!\n");
735 return;
736 }
737
738 /* Setup gmac */
developer30e13e72022-11-03 10:21:24 +0800739 if (mac->type == MTK_XGDM_TYPE) {
developer089e8852022-09-28 14:43:46 +0800740 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
741 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
developerfd40db22021-04-29 10:08:25 +0800742
developer089e8852022-09-28 14:43:46 +0800743 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerff5e5092023-07-25 15:55:28 +0800744 if (mac->id == MTK_GMAC1_ID)
developer089e8852022-09-28 14:43:46 +0800745 mtk_setup_bridge_switch(eth);
developer089e8852022-09-28 14:43:46 +0800746 }
developer82eae452023-02-13 10:04:09 +0800747 } else if (mac->type == MTK_GDM_TYPE) {
748 val = mtk_r32(eth, MTK_GDMA_EG_CTRL(mac->id));
749 mtk_w32(eth, val & ~MTK_GDMA_XGDM_SEL,
750 MTK_GDMA_EG_CTRL(mac->id));
751
developer4e8a3fd2023-04-10 18:05:44 +0800752 /* FIXME: In current hardware design, we have to reset FE
753 * when swtiching XGDM to GDM. Therefore, here trigger an SER
754 * to let GDM go back to the initial state.
755 */
developera7570e72023-05-09 17:06:42 +0800756 if (mac->type != mac_type && !mtk_check_gmac23_idle(mac)) {
757 if (!test_bit(MTK_RESETTING, &mac->hw->state)) {
developer82eae452023-02-13 10:04:09 +0800758 atomic_inc(&force);
759 schedule_work(&eth->pending_work);
developera7570e72023-05-09 17:06:42 +0800760 }
developer82eae452023-02-13 10:04:09 +0800761 }
developerfd40db22021-04-29 10:08:25 +0800762 }
763
developerfd40db22021-04-29 10:08:25 +0800764 return;
765
766err_phy:
767 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
768 mac->id, phy_modes(state->interface));
769 return;
770
771init_err:
772 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
773 mac->id, phy_modes(state->interface), err);
774}
775
developer4e8a3fd2023-04-10 18:05:44 +0800776static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
777 phy_interface_t interface)
778{
779 struct mtk_mac *mac = container_of(config, struct mtk_mac,
780 phylink_config);
781 struct mtk_eth *eth = mac->hw;
782
783 /* Enable SGMII */
784 if (interface == PHY_INTERFACE_MODE_SGMII ||
785 phy_interface_mode_is_8023z(interface))
786 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
787 SYSCFG0_SGMII_MASK, mac->syscfg0);
788
789 return 0;
790}
791
developer089e8852022-09-28 14:43:46 +0800792static int mtk_mac_pcs_get_state(struct phylink_config *config,
793 struct phylink_link_state *state)
developerfd40db22021-04-29 10:08:25 +0800794{
795 struct mtk_mac *mac = container_of(config, struct mtk_mac,
796 phylink_config);
developerfd40db22021-04-29 10:08:25 +0800797
developer089e8852022-09-28 14:43:46 +0800798 if (mac->type == MTK_XGDM_TYPE) {
799 u32 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
developerfd40db22021-04-29 10:08:25 +0800800
developer089e8852022-09-28 14:43:46 +0800801 if (mac->id == MTK_GMAC2_ID)
802 sts = sts >> 16;
developerfd40db22021-04-29 10:08:25 +0800803
developer4e8a3fd2023-04-10 18:05:44 +0800804 state->duplex = DUPLEX_FULL;
developer089e8852022-09-28 14:43:46 +0800805
806 switch (FIELD_GET(MTK_USXGMII_PCS_MODE, sts)) {
807 case 0:
808 state->speed = SPEED_10000;
809 break;
810 case 1:
811 state->speed = SPEED_5000;
812 break;
813 case 2:
814 state->speed = SPEED_2500;
815 break;
816 case 3:
817 state->speed = SPEED_1000;
818 break;
819 }
820
developer82eae452023-02-13 10:04:09 +0800821 state->interface = mac->interface;
developer089e8852022-09-28 14:43:46 +0800822 state->link = FIELD_GET(MTK_USXGMII_PCS_LINK, sts);
823 } else if (mac->type == MTK_GDM_TYPE) {
824 struct mtk_eth *eth = mac->hw;
developer4e8a3fd2023-04-10 18:05:44 +0800825 struct mtk_sgmii *ss = eth->sgmii;
developer089e8852022-09-28 14:43:46 +0800826 u32 id = mtk_mac2xgmii_id(eth, mac->id);
827 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
developer38afb1a2023-04-17 09:57:27 +0800828 u32 bm, adv, rgc3, sgm_mode;
developer089e8852022-09-28 14:43:46 +0800829
developer82eae452023-02-13 10:04:09 +0800830 state->interface = mac->interface;
developer089e8852022-09-28 14:43:46 +0800831
developer38afb1a2023-04-17 09:57:27 +0800832 regmap_read(ss->pcs[id].regmap, SGMSYS_PCS_CONTROL_1, &bm);
833 if (bm & SGMII_AN_ENABLE) {
developer4e8a3fd2023-04-10 18:05:44 +0800834 regmap_read(ss->pcs[id].regmap,
developer38afb1a2023-04-17 09:57:27 +0800835 SGMSYS_PCS_ADVERTISE, &adv);
developer089e8852022-09-28 14:43:46 +0800836
developer38afb1a2023-04-17 09:57:27 +0800837 phylink_mii_c22_pcs_decode_state(
838 state,
839 FIELD_GET(SGMII_BMSR, bm),
840 FIELD_GET(SGMII_LPA, adv));
developer089e8852022-09-28 14:43:46 +0800841 } else {
developer38afb1a2023-04-17 09:57:27 +0800842 state->link = !!(bm & SGMII_LINK_STATYS);
developer089e8852022-09-28 14:43:46 +0800843
developer38afb1a2023-04-17 09:57:27 +0800844 regmap_read(ss->pcs[id].regmap,
845 SGMSYS_SGMII_MODE, &sgm_mode);
developer089e8852022-09-28 14:43:46 +0800846
developer38afb1a2023-04-17 09:57:27 +0800847 switch (sgm_mode & SGMII_SPEED_MASK) {
848 case SGMII_SPEED_10:
developer089e8852022-09-28 14:43:46 +0800849 state->speed = SPEED_10;
850 break;
developer38afb1a2023-04-17 09:57:27 +0800851 case SGMII_SPEED_100:
developer089e8852022-09-28 14:43:46 +0800852 state->speed = SPEED_100;
853 break;
developer38afb1a2023-04-17 09:57:27 +0800854 case SGMII_SPEED_1000:
developer4e8a3fd2023-04-10 18:05:44 +0800855 regmap_read(ss->pcs[id].regmap,
developer38afb1a2023-04-17 09:57:27 +0800856 ss->pcs[id].ana_rgc3, &rgc3);
857 rgc3 = FIELD_GET(RG_PHY_SPEED_3_125G, rgc3);
developer4e8a3fd2023-04-10 18:05:44 +0800858 state->speed = rgc3 ? SPEED_2500 : SPEED_1000;
developer089e8852022-09-28 14:43:46 +0800859 break;
860 }
developer38afb1a2023-04-17 09:57:27 +0800861
862 if (sgm_mode & SGMII_DUPLEX_HALF)
863 state->duplex = DUPLEX_HALF;
864 else
865 state->duplex = DUPLEX_FULL;
developer089e8852022-09-28 14:43:46 +0800866 }
867
868 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
869 if (pmsr & MAC_MSR_RX_FC)
870 state->pause |= MLO_PAUSE_RX;
871 if (pmsr & MAC_MSR_TX_FC)
872 state->pause |= MLO_PAUSE_TX;
873 }
developerfd40db22021-04-29 10:08:25 +0800874
875 return 1;
876}
877
developer65f32592023-08-02 09:35:49 +0800878static int mtk_gdm_fsm_get(struct mtk_mac *mac, u32 gdm)
879{
880 u32 fsm = mtk_r32(mac->hw, gdm);
developer26d0a532023-08-28 16:24:58 +0800881 u32 ret = 0, val = 0;
developer65f32592023-08-02 09:35:49 +0800882
developer26d0a532023-08-28 16:24:58 +0800883 switch (mac->type) {
884 case MTK_GDM_TYPE:
developer65f32592023-08-02 09:35:49 +0800885 ret = fsm == 0;
developer26d0a532023-08-28 16:24:58 +0800886 break;
887 case MTK_XGDM_TYPE:
888 ret = fsm == 0x10000000;
889 break;
890 default:
891 break;
892 }
893
894 if ((mac->type == MTK_XGDM_TYPE) && (mac->id != MTK_GMAC1_ID)) {
895 val = mtk_r32(mac->hw, MTK_MAC_FSM(mac->id));
896 if ((val == 0x02010100) || (val == 0x01010100)) {
897 ret = (mac->interface == PHY_INTERFACE_MODE_XGMII) ?
898 ((fsm & 0x0fffffff) == 0) : ((fsm & 0x00ffffff) == 0);
developer65f32592023-08-02 09:35:49 +0800899 } else
developer26d0a532023-08-28 16:24:58 +0800900 ret = 0;
developer65f32592023-08-02 09:35:49 +0800901 }
902
903 return ret;
904}
905
906static void mtk_gdm_fsm_poll(struct mtk_mac *mac)
907{
908 u32 gdm = 0, i = 0;
909
910 switch (mac->id) {
911 case MTK_GMAC1_ID:
912 gdm = MTK_FE_GDM1_FSM;
913 break;
914 case MTK_GMAC2_ID:
915 gdm = MTK_FE_GDM2_FSM;
916 break;
917 case MTK_GMAC3_ID:
918 gdm = MTK_FE_GDM3_FSM;
919 break;
920 default:
921 pr_info("%s mac id invalid", __func__);
922 break;
923 }
developer26d0a532023-08-28 16:24:58 +0800924
developer65f32592023-08-02 09:35:49 +0800925 while (i < 3) {
926 if (mtk_gdm_fsm_get(mac, gdm))
927 break;
928 msleep(500);
929 i++;
930 }
931
932 if (i == 3)
933 pr_info("%s fsm invalid", __func__);
934}
935
936static void mtk_pse_port_link_set(struct mtk_mac *mac, bool up)
937{
developera7d382a2023-08-25 12:05:22 +0800938 u32 fe_glo_cfg, val = 0;
developer65f32592023-08-02 09:35:49 +0800939
940 fe_glo_cfg = mtk_r32(mac->hw, MTK_FE_GLO_CFG(mac->id));
941 switch (mac->id) {
942 case MTK_GMAC1_ID:
943 val = MTK_FE_LINK_DOWN_P1;
944 break;
945 case MTK_GMAC2_ID:
946 val = MTK_FE_LINK_DOWN_P2;
947 break;
948 case MTK_GMAC3_ID:
949 val = MTK_FE_LINK_DOWN_P15;
950 break;
951 }
952
953 if (!up)
954 fe_glo_cfg |= val;
955 else
956 fe_glo_cfg &= ~val;
957
958 mtk_w32(mac->hw, fe_glo_cfg, MTK_FE_GLO_CFG(mac->id));
959 mtk_gdm_fsm_poll(mac);
960}
961
developerfd40db22021-04-29 10:08:25 +0800962static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
963 phy_interface_t interface)
964{
965 struct mtk_mac *mac = container_of(config, struct mtk_mac,
966 phylink_config);
developer21260d02023-09-04 11:29:04 +0800967 struct mtk_eth *eth = mac->hw;
968 unsigned int id;
developerff5e5092023-07-25 15:55:28 +0800969 u32 mcr, sts;
developer089e8852022-09-28 14:43:46 +0800970
developer65f32592023-08-02 09:35:49 +0800971 mtk_pse_port_link_set(mac, false);
developer089e8852022-09-28 14:43:46 +0800972 if (mac->type == MTK_GDM_TYPE) {
973 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
developer65f32592023-08-02 09:35:49 +0800974 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
developer089e8852022-09-28 14:43:46 +0800975 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
976 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
developer21260d02023-09-04 11:29:04 +0800977 struct mtk_usxgmii_pcs *mpcs;
developerfd40db22021-04-29 10:08:25 +0800978
developer21260d02023-09-04 11:29:04 +0800979 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
developer089e8852022-09-28 14:43:46 +0800980 mcr &= 0xfffffff0;
981 mcr |= XMAC_MCR_TRX_DISABLE;
982 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
developerff5e5092023-07-25 15:55:28 +0800983
984 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
985 sts &= ~MTK_XGMAC_FORCE_LINK(mac->id);
986 mtk_w32(mac->hw, sts, MTK_XGMAC_STS(mac->id));
developer21260d02023-09-04 11:29:04 +0800987
988 id = mtk_mac2xgmii_id(eth, mac->id);
989 mpcs = &eth->usxgmii->pcs[id];
990 cancel_delayed_work_sync(&mpcs->link_poll);
developer089e8852022-09-28 14:43:46 +0800991 }
developerfd40db22021-04-29 10:08:25 +0800992}
993
994static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
995 phy_interface_t interface,
996 struct phy_device *phy)
997{
998 struct mtk_mac *mac = container_of(config, struct mtk_mac,
999 phylink_config);
developerff5e5092023-07-25 15:55:28 +08001000 u32 mcr, mcr_cur, sts, force_link;
developer089e8852022-09-28 14:43:46 +08001001
developer9b725932022-11-24 16:25:56 +08001002 mac->speed = speed;
1003
developer089e8852022-09-28 14:43:46 +08001004 if (mac->type == MTK_GDM_TYPE) {
1005 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
1006 mcr = mcr_cur;
1007 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
1008 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
1009 MAC_MCR_FORCE_RX_FC);
1010 mcr |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
1011 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
1012
1013 /* Configure speed */
1014 switch (speed) {
1015 case SPEED_2500:
1016 case SPEED_1000:
1017 mcr |= MAC_MCR_SPEED_1000;
1018 break;
1019 case SPEED_100:
1020 mcr |= MAC_MCR_SPEED_100;
1021 break;
1022 }
1023
1024 /* Configure duplex */
1025 if (duplex == DUPLEX_FULL)
1026 mcr |= MAC_MCR_FORCE_DPX;
1027
1028 /* Configure pause modes -
1029 * phylink will avoid these for half duplex
1030 */
1031 if (tx_pause)
1032 mcr |= MAC_MCR_FORCE_TX_FC;
1033 if (rx_pause)
1034 mcr |= MAC_MCR_FORCE_RX_FC;
1035
1036 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
1037
1038 /* Only update control register when needed! */
1039 if (mcr != mcr_cur)
1040 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
developer9b725932022-11-24 16:25:56 +08001041
1042 if (mode == MLO_AN_PHY && phy)
1043 mtk_setup_eee(mac, phy_init_eee(phy, false) >= 0);
developer089e8852022-09-28 14:43:46 +08001044 } else if (mac->type == MTK_XGDM_TYPE && mac->id != MTK_GMAC1_ID) {
developerff5e5092023-07-25 15:55:28 +08001045 /* Eliminate the interference(before link-up) caused by PHY noise */
1046 mtk_m32(mac->hw, XMAC_LOGIC_RST, 0x0, MTK_XMAC_LOGIC_RST(mac->id));
1047 mdelay(20);
1048 mtk_m32(mac->hw, XMAC_GLB_CNTCLR, 0x1, MTK_XMAC_CNT_CTRL(mac->id));
1049
1050 switch (mac->id) {
1051 case MTK_GMAC2_ID:
1052 force_link = (mac->interface ==
1053 PHY_INTERFACE_MODE_XGMII) ?
1054 MTK_XGMAC_FORCE_LINK(mac->id) : 0;
1055 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
1056 mtk_w32(mac->hw, sts | force_link,
1057 MTK_XGMAC_STS(mac->id));
1058 break;
1059 case MTK_GMAC3_ID:
1060 sts = mtk_r32(mac->hw, MTK_XGMAC_STS(mac->id));
1061 mtk_w32(mac->hw,
1062 sts | MTK_XGMAC_FORCE_LINK(mac->id),
1063 MTK_XGMAC_STS(mac->id));
1064 break;
1065 }
1066
developer089e8852022-09-28 14:43:46 +08001067 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
1068
1069 mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC);
1070 /* Configure pause modes -
1071 * phylink will avoid these for half duplex
1072 */
1073 if (tx_pause)
1074 mcr |= XMAC_MCR_FORCE_TX_FC;
1075 if (rx_pause)
1076 mcr |= XMAC_MCR_FORCE_RX_FC;
developerfd40db22021-04-29 10:08:25 +08001077
developer089e8852022-09-28 14:43:46 +08001078 mcr &= ~(XMAC_MCR_TRX_DISABLE);
1079 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
1080 }
developer65f32592023-08-02 09:35:49 +08001081 mtk_pse_port_link_set(mac, true);
developerfd40db22021-04-29 10:08:25 +08001082}
1083
1084static void mtk_validate(struct phylink_config *config,
1085 unsigned long *supported,
1086 struct phylink_link_state *state)
1087{
1088 struct mtk_mac *mac = container_of(config, struct mtk_mac,
1089 phylink_config);
1090 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1091
1092 if (state->interface != PHY_INTERFACE_MODE_NA &&
1093 state->interface != PHY_INTERFACE_MODE_MII &&
1094 state->interface != PHY_INTERFACE_MODE_GMII &&
1095 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
1096 phy_interface_mode_is_rgmii(state->interface)) &&
1097 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
1098 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
1099 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
1100 (state->interface == PHY_INTERFACE_MODE_SGMII ||
developer089e8852022-09-28 14:43:46 +08001101 phy_interface_mode_is_8023z(state->interface))) &&
developer30e13e72022-11-03 10:21:24 +08001102 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_XGMII) &&
1103 (state->interface == PHY_INTERFACE_MODE_XGMII)) &&
developer089e8852022-09-28 14:43:46 +08001104 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
1105 (state->interface == PHY_INTERFACE_MODE_USXGMII)) &&
1106 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII) &&
1107 (state->interface == PHY_INTERFACE_MODE_10GKR))) {
developerfd40db22021-04-29 10:08:25 +08001108 linkmode_zero(supported);
1109 return;
1110 }
1111
1112 phylink_set_port_modes(mask);
1113 phylink_set(mask, Autoneg);
1114
1115 switch (state->interface) {
developer089e8852022-09-28 14:43:46 +08001116 case PHY_INTERFACE_MODE_USXGMII:
1117 case PHY_INTERFACE_MODE_10GKR:
1118 phylink_set(mask, 10000baseKR_Full);
1119 phylink_set(mask, 10000baseT_Full);
1120 phylink_set(mask, 10000baseCR_Full);
1121 phylink_set(mask, 10000baseSR_Full);
1122 phylink_set(mask, 10000baseLR_Full);
1123 phylink_set(mask, 10000baseLRM_Full);
1124 phylink_set(mask, 10000baseER_Full);
1125 phylink_set(mask, 100baseT_Half);
1126 phylink_set(mask, 100baseT_Full);
1127 phylink_set(mask, 1000baseT_Half);
1128 phylink_set(mask, 1000baseT_Full);
1129 phylink_set(mask, 1000baseX_Full);
developerb88cdb02022-10-12 18:10:03 +08001130 phylink_set(mask, 2500baseT_Full);
1131 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +08001132 break;
developerfd40db22021-04-29 10:08:25 +08001133 case PHY_INTERFACE_MODE_TRGMII:
1134 phylink_set(mask, 1000baseT_Full);
1135 break;
developer30e13e72022-11-03 10:21:24 +08001136 case PHY_INTERFACE_MODE_XGMII:
1137 /* fall through */
developerfd40db22021-04-29 10:08:25 +08001138 case PHY_INTERFACE_MODE_1000BASEX:
developerfd40db22021-04-29 10:08:25 +08001139 phylink_set(mask, 1000baseX_Full);
developerebf63e22023-06-15 17:45:36 +08001140 /* fall through */
developer089e8852022-09-28 14:43:46 +08001141 case PHY_INTERFACE_MODE_2500BASEX:
developerfd40db22021-04-29 10:08:25 +08001142 phylink_set(mask, 2500baseX_Full);
developer2fbee452022-08-12 13:58:20 +08001143 phylink_set(mask, 2500baseT_Full);
developerebf63e22023-06-15 17:45:36 +08001144 /* fall through */
developerfd40db22021-04-29 10:08:25 +08001145 case PHY_INTERFACE_MODE_GMII:
1146 case PHY_INTERFACE_MODE_RGMII:
1147 case PHY_INTERFACE_MODE_RGMII_ID:
1148 case PHY_INTERFACE_MODE_RGMII_RXID:
1149 case PHY_INTERFACE_MODE_RGMII_TXID:
1150 phylink_set(mask, 1000baseT_Half);
1151 /* fall through */
1152 case PHY_INTERFACE_MODE_SGMII:
1153 phylink_set(mask, 1000baseT_Full);
1154 phylink_set(mask, 1000baseX_Full);
1155 /* fall through */
1156 case PHY_INTERFACE_MODE_MII:
1157 case PHY_INTERFACE_MODE_RMII:
1158 case PHY_INTERFACE_MODE_REVMII:
1159 case PHY_INTERFACE_MODE_NA:
1160 default:
1161 phylink_set(mask, 10baseT_Half);
1162 phylink_set(mask, 10baseT_Full);
1163 phylink_set(mask, 100baseT_Half);
1164 phylink_set(mask, 100baseT_Full);
1165 break;
1166 }
1167
1168 if (state->interface == PHY_INTERFACE_MODE_NA) {
developer089e8852022-09-28 14:43:46 +08001169
1170 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_USXGMII)) {
1171 phylink_set(mask, 10000baseKR_Full);
developerc9bd9ae2022-12-23 16:54:36 +08001172 phylink_set(mask, 10000baseT_Full);
developer3ef64802023-05-10 10:48:43 +08001173 phylink_set(mask, 10000baseCR_Full);
developer089e8852022-09-28 14:43:46 +08001174 phylink_set(mask, 10000baseSR_Full);
1175 phylink_set(mask, 10000baseLR_Full);
1176 phylink_set(mask, 10000baseLRM_Full);
1177 phylink_set(mask, 10000baseER_Full);
1178 phylink_set(mask, 1000baseKX_Full);
1179 phylink_set(mask, 1000baseT_Full);
1180 phylink_set(mask, 1000baseX_Full);
1181 phylink_set(mask, 2500baseX_Full);
developercfa104b2023-01-11 17:40:41 +08001182 phylink_set(mask, 2500baseT_Full);
1183 phylink_set(mask, 5000baseT_Full);
developer089e8852022-09-28 14:43:46 +08001184 }
developerfd40db22021-04-29 10:08:25 +08001185 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
1186 phylink_set(mask, 1000baseT_Full);
1187 phylink_set(mask, 1000baseX_Full);
1188 phylink_set(mask, 2500baseX_Full);
1189 }
1190 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
1191 phylink_set(mask, 1000baseT_Full);
1192 phylink_set(mask, 1000baseT_Half);
1193 phylink_set(mask, 1000baseX_Full);
1194 }
1195 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
1196 phylink_set(mask, 1000baseT_Full);
1197 phylink_set(mask, 1000baseT_Half);
1198 }
1199 }
1200
developer30e13e72022-11-03 10:21:24 +08001201 if (mac->type == MTK_XGDM_TYPE) {
1202 phylink_clear(mask, 10baseT_Half);
1203 phylink_clear(mask, 100baseT_Half);
1204 phylink_clear(mask, 1000baseT_Half);
1205 }
1206
developerfd40db22021-04-29 10:08:25 +08001207 phylink_set(mask, Pause);
1208 phylink_set(mask, Asym_Pause);
1209
1210 linkmode_and(supported, supported, mask);
1211 linkmode_and(state->advertising, state->advertising, mask);
1212
1213 /* We can only operate at 2500BaseX or 1000BaseX. If requested
1214 * to advertise both, only report advertising at 2500BaseX.
1215 */
1216 phylink_helper_basex_speed(state);
1217}
1218
1219static const struct phylink_mac_ops mtk_phylink_ops = {
1220 .validate = mtk_validate,
developer4e8a3fd2023-04-10 18:05:44 +08001221 .mac_select_pcs = mtk_mac_select_pcs,
developer089e8852022-09-28 14:43:46 +08001222 .mac_link_state = mtk_mac_pcs_get_state,
developerfd40db22021-04-29 10:08:25 +08001223 .mac_config = mtk_mac_config,
developer4e8a3fd2023-04-10 18:05:44 +08001224 .mac_finish = mtk_mac_finish,
developerfd40db22021-04-29 10:08:25 +08001225 .mac_link_down = mtk_mac_link_down,
1226 .mac_link_up = mtk_mac_link_up,
1227};
1228
developerc4d8da72023-03-16 14:37:28 +08001229static int mtk_mdc_init(struct mtk_eth *eth)
developerfd40db22021-04-29 10:08:25 +08001230{
1231 struct device_node *mii_np;
developerc4d8da72023-03-16 14:37:28 +08001232 int max_clk = 2500000, divider;
developer778e4122023-04-20 16:09:32 +08001233 int ret = 0;
developerc8acd8d2022-11-10 09:07:10 +08001234 u32 val;
developerfd40db22021-04-29 10:08:25 +08001235
1236 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
1237 if (!mii_np) {
1238 dev_err(eth->dev, "no %s child node found", "mdio-bus");
1239 return -ENODEV;
1240 }
1241
1242 if (!of_device_is_available(mii_np)) {
1243 ret = -ENODEV;
1244 goto err_put_node;
1245 }
1246
developerc4d8da72023-03-16 14:37:28 +08001247 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
1248 if (val > MDC_MAX_FREQ ||
1249 val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1250 dev_err(eth->dev, "MDIO clock frequency out of range");
1251 ret = -EINVAL;
1252 goto err_put_node;
1253 }
developerc8acd8d2022-11-10 09:07:10 +08001254 max_clk = val;
developerc4d8da72023-03-16 14:37:28 +08001255 }
developerc8acd8d2022-11-10 09:07:10 +08001256
developerc4d8da72023-03-16 14:37:28 +08001257 divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
developerc8acd8d2022-11-10 09:07:10 +08001258
1259 /* Configure MDC Turbo Mode */
1260 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1261 val = mtk_r32(eth, MTK_MAC_MISC);
1262 val |= MISC_MDC_TURBO;
1263 mtk_w32(eth, val, MTK_MAC_MISC);
1264 } else {
1265 val = mtk_r32(eth, MTK_PPSC);
1266 val |= PPSC_MDC_TURBO;
1267 mtk_w32(eth, val, MTK_PPSC);
1268 }
1269
1270 /* Configure MDC Divider */
1271 val = mtk_r32(eth, MTK_PPSC);
1272 val &= ~PPSC_MDC_CFG;
1273 val |= FIELD_PREP(PPSC_MDC_CFG, divider);
1274 mtk_w32(eth, val, MTK_PPSC);
1275
developerc4d8da72023-03-16 14:37:28 +08001276 dev_info(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
1277
1278err_put_node:
1279 of_node_put(mii_np);
1280 return ret;
1281}
1282
1283static int mtk_mdio_init(struct mtk_eth *eth)
1284{
1285 struct device_node *mii_np;
1286 int ret;
1287
1288 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
1289 if (!mii_np) {
1290 dev_err(eth->dev, "no %s child node found", "mdio-bus");
1291 return -ENODEV;
1292 }
1293
1294 if (!of_device_is_available(mii_np)) {
1295 ret = -ENODEV;
1296 goto err_put_node;
1297 }
1298
1299 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
1300 if (!eth->mii_bus) {
1301 ret = -ENOMEM;
1302 goto err_put_node;
1303 }
1304
1305 eth->mii_bus->name = "mdio";
1306 eth->mii_bus->read = mtk_mdio_read;
1307 eth->mii_bus->write = mtk_mdio_write;
1308 eth->mii_bus->reset = mtk_mdio_reset;
1309 eth->mii_bus->priv = eth;
1310 eth->mii_bus->parent = eth->dev;
1311
1312 if (snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) {
1313 ret = -ENOMEM;
1314 goto err_put_node;
1315 }
developerc8acd8d2022-11-10 09:07:10 +08001316
developerfd40db22021-04-29 10:08:25 +08001317 ret = of_mdiobus_register(eth->mii_bus, mii_np);
1318
1319err_put_node:
1320 of_node_put(mii_np);
1321 return ret;
1322}
1323
1324static void mtk_mdio_cleanup(struct mtk_eth *eth)
1325{
1326 if (!eth->mii_bus)
1327 return;
1328
1329 mdiobus_unregister(eth->mii_bus);
1330}
1331
1332static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
1333{
1334 unsigned long flags;
1335 u32 val;
1336
1337 spin_lock_irqsave(&eth->tx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001338 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1339 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
developerfd40db22021-04-29 10:08:25 +08001340 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1341}
1342
1343static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
1344{
1345 unsigned long flags;
1346 u32 val;
1347
1348 spin_lock_irqsave(&eth->tx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001349 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1350 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
developerfd40db22021-04-29 10:08:25 +08001351 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1352}
1353
1354static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
1355{
1356 unsigned long flags;
1357 u32 val;
1358
1359 spin_lock_irqsave(&eth->rx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001360 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1361 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
developerfd40db22021-04-29 10:08:25 +08001362 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1363}
1364
1365static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
1366{
1367 unsigned long flags;
1368 u32 val;
1369
1370 spin_lock_irqsave(&eth->rx_irq_lock, flags);
developer68ce74f2023-01-03 16:11:57 +08001371 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1372 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
developerfd40db22021-04-29 10:08:25 +08001373 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1374}
1375
1376static int mtk_set_mac_address(struct net_device *dev, void *p)
1377{
1378 int ret = eth_mac_addr(dev, p);
1379 struct mtk_mac *mac = netdev_priv(dev);
1380 struct mtk_eth *eth = mac->hw;
1381 const char *macaddr = dev->dev_addr;
1382
1383 if (ret)
1384 return ret;
1385
1386 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
1387 return -EBUSY;
1388
1389 spin_lock_bh(&mac->hw->page_lock);
1390 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1391 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1392 MT7628_SDM_MAC_ADRH);
1393 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1394 (macaddr[4] << 8) | macaddr[5],
1395 MT7628_SDM_MAC_ADRL);
1396 } else {
1397 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1398 MTK_GDMA_MAC_ADRH(mac->id));
1399 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1400 (macaddr[4] << 8) | macaddr[5],
1401 MTK_GDMA_MAC_ADRL(mac->id));
1402 }
1403 spin_unlock_bh(&mac->hw->page_lock);
1404
1405 return 0;
1406}
1407
1408void mtk_stats_update_mac(struct mtk_mac *mac)
1409{
developer089e8852022-09-28 14:43:46 +08001410 struct mtk_eth *eth = mac->hw;
developer68ce74f2023-01-03 16:11:57 +08001411 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
developerfd40db22021-04-29 10:08:25 +08001412 struct mtk_hw_stats *hw_stats = mac->hw_stats;
developer68ce74f2023-01-03 16:11:57 +08001413 unsigned int offs = hw_stats->reg_offset;
developerfd40db22021-04-29 10:08:25 +08001414 u64 stats;
1415
developerfd40db22021-04-29 10:08:25 +08001416 u64_stats_update_begin(&hw_stats->syncp);
1417
developer68ce74f2023-01-03 16:11:57 +08001418 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
1419 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
developerfd40db22021-04-29 10:08:25 +08001420 if (stats)
1421 hw_stats->rx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001422 hw_stats->rx_packets +=
1423 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x08 + offs);
1424 hw_stats->rx_overflow +=
1425 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1426 hw_stats->rx_fcs_errors +=
1427 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1428 hw_stats->rx_short_errors +=
1429 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1430 hw_stats->rx_long_errors +=
1431 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1432 hw_stats->rx_checksum_errors +=
1433 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
developerfd40db22021-04-29 10:08:25 +08001434 hw_stats->rx_flow_control_packets +=
developer68ce74f2023-01-03 16:11:57 +08001435 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
developer089e8852022-09-28 14:43:46 +08001436
1437 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer68ce74f2023-01-03 16:11:57 +08001438 hw_stats->tx_skip +=
1439 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1440 hw_stats->tx_collisions +=
1441 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1442 hw_stats->tx_bytes +=
1443 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1444 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
developer089e8852022-09-28 14:43:46 +08001445 if (stats)
1446 hw_stats->tx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001447 hw_stats->tx_packets +=
1448 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
developer089e8852022-09-28 14:43:46 +08001449 } else {
developer68ce74f2023-01-03 16:11:57 +08001450 hw_stats->tx_skip +=
1451 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1452 hw_stats->tx_collisions +=
1453 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1454 hw_stats->tx_bytes +=
1455 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1456 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
developer089e8852022-09-28 14:43:46 +08001457 if (stats)
1458 hw_stats->tx_bytes += (stats << 32);
developer68ce74f2023-01-03 16:11:57 +08001459 hw_stats->tx_packets +=
1460 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
developer089e8852022-09-28 14:43:46 +08001461 }
developer68ce74f2023-01-03 16:11:57 +08001462
1463 u64_stats_update_end(&hw_stats->syncp);
developerfd40db22021-04-29 10:08:25 +08001464}
1465
1466static void mtk_stats_update(struct mtk_eth *eth)
1467{
1468 int i;
1469
1470 for (i = 0; i < MTK_MAC_COUNT; i++) {
1471 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1472 continue;
1473 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1474 mtk_stats_update_mac(eth->mac[i]);
1475 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1476 }
1477 }
1478}
1479
1480static void mtk_get_stats64(struct net_device *dev,
1481 struct rtnl_link_stats64 *storage)
1482{
1483 struct mtk_mac *mac = netdev_priv(dev);
1484 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1485 unsigned int start;
1486
1487 if (netif_running(dev) && netif_device_present(dev)) {
1488 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1489 mtk_stats_update_mac(mac);
1490 spin_unlock_bh(&hw_stats->stats_lock);
1491 }
1492 }
1493
1494 do {
1495 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
1496 storage->rx_packets = hw_stats->rx_packets;
1497 storage->tx_packets = hw_stats->tx_packets;
1498 storage->rx_bytes = hw_stats->rx_bytes;
1499 storage->tx_bytes = hw_stats->tx_bytes;
1500 storage->collisions = hw_stats->tx_collisions;
1501 storage->rx_length_errors = hw_stats->rx_short_errors +
1502 hw_stats->rx_long_errors;
1503 storage->rx_over_errors = hw_stats->rx_overflow;
1504 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1505 storage->rx_errors = hw_stats->rx_checksum_errors;
1506 storage->tx_aborted_errors = hw_stats->tx_skip;
1507 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
1508
1509 storage->tx_errors = dev->stats.tx_errors;
1510 storage->rx_dropped = dev->stats.rx_dropped;
1511 storage->tx_dropped = dev->stats.tx_dropped;
1512}
1513
1514static inline int mtk_max_frag_size(int mtu)
1515{
1516 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1517 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
1518 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
1519
1520 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1521 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1522}
1523
1524static inline int mtk_max_buf_size(int frag_size)
1525{
1526 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1527 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1528
1529 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
1530
1531 return buf_size;
1532}
1533
developere9356982022-07-04 09:03:20 +08001534static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1535 struct mtk_rx_dma_v2 *dma_rxd)
developerfd40db22021-04-29 10:08:25 +08001536{
developerfd40db22021-04-29 10:08:25 +08001537 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
developerc4671b22021-05-28 13:16:42 +08001538 if (!(rxd->rxd2 & RX_DMA_DONE))
1539 return false;
1540
1541 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
developerfd40db22021-04-29 10:08:25 +08001542 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1543 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
developere9356982022-07-04 09:03:20 +08001544
developer8ecd51b2023-03-13 11:28:28 +08001545 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developere9356982022-07-04 09:03:20 +08001546 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1547 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
developer006325c2022-10-06 16:39:50 +08001548 rxd->rxd7 = READ_ONCE(dma_rxd->rxd7);
developere9356982022-07-04 09:03:20 +08001549 }
1550
developerc4671b22021-05-28 13:16:42 +08001551 return true;
developerfd40db22021-04-29 10:08:25 +08001552}
1553
1554/* the qdma core needs scratch memory to be setup */
1555static int mtk_init_fq_dma(struct mtk_eth *eth)
1556{
developere9356982022-07-04 09:03:20 +08001557 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001558 dma_addr_t phy_ring_tail;
1559 int cnt = MTK_DMA_SIZE;
1560 dma_addr_t dma_addr;
1561 int i;
1562
1563 if (!eth->soc->has_sram) {
developer3f28d382023-03-07 16:06:30 +08001564 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
developere9356982022-07-04 09:03:20 +08001565 cnt * soc->txrx.txd_size,
developerfd40db22021-04-29 10:08:25 +08001566 &eth->phy_scratch_ring,
developere9356982022-07-04 09:03:20 +08001567 GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001568 } else {
developer089e8852022-09-28 14:43:46 +08001569 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1570 eth->scratch_ring = eth->sram_base;
1571 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1572 eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
developerfd40db22021-04-29 10:08:25 +08001573 }
1574
1575 if (unlikely(!eth->scratch_ring))
1576 return -ENOMEM;
1577
developere9356982022-07-04 09:03:20 +08001578 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
developerfd40db22021-04-29 10:08:25 +08001579 if (unlikely(!eth->scratch_head))
1580 return -ENOMEM;
1581
developer3f28d382023-03-07 16:06:30 +08001582 dma_addr = dma_map_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001583 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1584 DMA_FROM_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001585 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
developerfd40db22021-04-29 10:08:25 +08001586 return -ENOMEM;
1587
developer8b6f2402022-11-28 13:42:34 +08001588 phy_ring_tail = eth->phy_scratch_ring +
1589 (dma_addr_t)soc->txrx.txd_size * (cnt - 1);
developerfd40db22021-04-29 10:08:25 +08001590
1591 for (i = 0; i < cnt; i++) {
developere9356982022-07-04 09:03:20 +08001592 struct mtk_tx_dma_v2 *txd;
1593
1594 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1595 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
developerfd40db22021-04-29 10:08:25 +08001596 if (i < cnt - 1)
developere9356982022-07-04 09:03:20 +08001597 txd->txd2 = eth->phy_scratch_ring +
1598 (i + 1) * soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +08001599
developere9356982022-07-04 09:03:20 +08001600 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1601 txd->txd4 = 0;
1602
developer089e8852022-09-28 14:43:46 +08001603 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1604 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001605 txd->txd5 = 0;
1606 txd->txd6 = 0;
1607 txd->txd7 = 0;
1608 txd->txd8 = 0;
developerfd40db22021-04-29 10:08:25 +08001609 }
developerfd40db22021-04-29 10:08:25 +08001610 }
1611
developer68ce74f2023-01-03 16:11:57 +08001612 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1613 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1614 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1615 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
developerfd40db22021-04-29 10:08:25 +08001616
1617 return 0;
1618}
1619
1620static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1621{
developere9356982022-07-04 09:03:20 +08001622 return ring->dma + (desc - ring->phys);
developerfd40db22021-04-29 10:08:25 +08001623}
1624
1625static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001626 void *txd, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001627{
developere9356982022-07-04 09:03:20 +08001628 int idx = (txd - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001629
1630 return &ring->buf[idx];
1631}
1632
1633static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
developere9356982022-07-04 09:03:20 +08001634 void *dma)
developerfd40db22021-04-29 10:08:25 +08001635{
1636 return ring->dma_pdma - ring->dma + dma;
1637}
1638
developere9356982022-07-04 09:03:20 +08001639static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
developerfd40db22021-04-29 10:08:25 +08001640{
developere9356982022-07-04 09:03:20 +08001641 return (dma - ring->dma) / txd_size;
developerfd40db22021-04-29 10:08:25 +08001642}
1643
developerc4671b22021-05-28 13:16:42 +08001644static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1645 bool napi)
developerfd40db22021-04-29 10:08:25 +08001646{
1647 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1648 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
developer3f28d382023-03-07 16:06:30 +08001649 dma_unmap_single(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001650 dma_unmap_addr(tx_buf, dma_addr0),
1651 dma_unmap_len(tx_buf, dma_len0),
1652 DMA_TO_DEVICE);
1653 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
developer3f28d382023-03-07 16:06:30 +08001654 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001655 dma_unmap_addr(tx_buf, dma_addr0),
1656 dma_unmap_len(tx_buf, dma_len0),
1657 DMA_TO_DEVICE);
1658 }
1659 } else {
1660 if (dma_unmap_len(tx_buf, dma_len0)) {
developer3f28d382023-03-07 16:06:30 +08001661 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001662 dma_unmap_addr(tx_buf, dma_addr0),
1663 dma_unmap_len(tx_buf, dma_len0),
1664 DMA_TO_DEVICE);
1665 }
1666
1667 if (dma_unmap_len(tx_buf, dma_len1)) {
developer3f28d382023-03-07 16:06:30 +08001668 dma_unmap_page(eth->dma_dev,
developerfd40db22021-04-29 10:08:25 +08001669 dma_unmap_addr(tx_buf, dma_addr1),
1670 dma_unmap_len(tx_buf, dma_len1),
1671 DMA_TO_DEVICE);
1672 }
1673 }
1674
1675 tx_buf->flags = 0;
1676 if (tx_buf->skb &&
developerc4671b22021-05-28 13:16:42 +08001677 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
1678 if (napi)
1679 napi_consume_skb(tx_buf->skb, napi);
1680 else
1681 dev_kfree_skb_any(tx_buf->skb);
1682 }
developerfd40db22021-04-29 10:08:25 +08001683 tx_buf->skb = NULL;
1684}
1685
1686static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1687 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1688 size_t size, int idx)
1689{
1690 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1691 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1692 dma_unmap_len_set(tx_buf, dma_len0, size);
1693 } else {
1694 if (idx & 1) {
1695 txd->txd3 = mapped_addr;
1696 txd->txd2 |= TX_DMA_PLEN1(size);
1697 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1698 dma_unmap_len_set(tx_buf, dma_len1, size);
1699 } else {
1700 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1701 txd->txd1 = mapped_addr;
1702 txd->txd2 = TX_DMA_PLEN0(size);
1703 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1704 dma_unmap_len_set(tx_buf, dma_len0, size);
1705 }
1706 }
1707}
1708
developere9356982022-07-04 09:03:20 +08001709static void mtk_tx_set_dma_desc_v1(struct sk_buff *skb, struct net_device *dev, void *txd,
1710 struct mtk_tx_dma_desc_info *info)
1711{
1712 struct mtk_mac *mac = netdev_priv(dev);
1713 struct mtk_eth *eth = mac->hw;
1714 struct mtk_tx_dma *desc = txd;
1715 u32 data;
1716
1717 WRITE_ONCE(desc->txd1, info->addr);
1718
1719 data = TX_DMA_SWC | QID_LOW_BITS(info->qid) | TX_DMA_PLEN0(info->size);
1720 if (info->last)
1721 data |= TX_DMA_LS0;
1722 WRITE_ONCE(desc->txd3, data);
1723
1724 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1725 data |= QID_HIGH_BITS(info->qid);
1726 if (info->first) {
1727 if (info->gso)
1728 data |= TX_DMA_TSO;
1729 /* tx checksum offload */
1730 if (info->csum)
1731 data |= TX_DMA_CHKSUM;
1732 /* vlan header offload */
1733 if (info->vlan)
1734 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1735 }
1736
1737#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1738 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1739 data &= ~(0x7 << TX_DMA_FPORT_SHIFT);
1740 data |= 0x4 << TX_DMA_FPORT_SHIFT;
1741 }
1742
1743 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1744 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1745#endif
1746 WRITE_ONCE(desc->txd4, data);
1747}
1748
1749static void mtk_tx_set_dma_desc_v2(struct sk_buff *skb, struct net_device *dev, void *txd,
1750 struct mtk_tx_dma_desc_info *info)
1751{
1752 struct mtk_mac *mac = netdev_priv(dev);
1753 struct mtk_eth *eth = mac->hw;
1754 struct mtk_tx_dma_v2 *desc = txd;
developerce08bca2022-10-06 16:21:13 +08001755 u32 data = 0;
1756
1757 if (!info->qid && mac->id)
1758 info->qid = MTK_QDMA_GMAC2_QID;
1759
1760 WRITE_ONCE(desc->txd1, info->addr);
1761
1762 data = TX_DMA_PLEN0(info->size);
1763 if (info->last)
1764 data |= TX_DMA_LS0;
1765 WRITE_ONCE(desc->txd3, data);
1766
1767 data = ((mac->id == MTK_GMAC3_ID) ?
1768 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1769 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1770#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1771 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1772 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1773 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1774 }
1775
1776 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1777 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1778#endif
1779 WRITE_ONCE(desc->txd4, data);
1780
1781 data = 0;
1782 if (info->first) {
1783 if (info->gso)
1784 data |= TX_DMA_TSO_V2;
1785 /* tx checksum offload */
1786 if (info->csum)
1787 data |= TX_DMA_CHKSUM_V2;
1788 }
1789 WRITE_ONCE(desc->txd5, data);
1790
1791 data = 0;
1792 if (info->first && info->vlan)
1793 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1794 WRITE_ONCE(desc->txd6, data);
1795
1796 WRITE_ONCE(desc->txd7, 0);
1797 WRITE_ONCE(desc->txd8, 0);
1798}
1799
1800static void mtk_tx_set_dma_desc_v3(struct sk_buff *skb, struct net_device *dev, void *txd,
1801 struct mtk_tx_dma_desc_info *info)
1802{
1803 struct mtk_mac *mac = netdev_priv(dev);
1804 struct mtk_eth *eth = mac->hw;
1805 struct mtk_tx_dma_v2 *desc = txd;
developer089e8852022-09-28 14:43:46 +08001806 u64 addr64 = 0;
developere9356982022-07-04 09:03:20 +08001807 u32 data = 0;
developere9356982022-07-04 09:03:20 +08001808
developerce08bca2022-10-06 16:21:13 +08001809 if (!info->qid && mac->id)
developerb9463012022-09-14 10:28:45 +08001810 info->qid = MTK_QDMA_GMAC2_QID;
developere9356982022-07-04 09:03:20 +08001811
developer089e8852022-09-28 14:43:46 +08001812 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
1813 TX_DMA_SDP1(info->addr) : 0;
1814
developere9356982022-07-04 09:03:20 +08001815 WRITE_ONCE(desc->txd1, info->addr);
1816
1817 data = TX_DMA_PLEN0(info->size);
1818 if (info->last)
1819 data |= TX_DMA_LS0;
developer089e8852022-09-28 14:43:46 +08001820 WRITE_ONCE(desc->txd3, data | addr64);
developere9356982022-07-04 09:03:20 +08001821
developer089e8852022-09-28 14:43:46 +08001822 data = ((mac->id == MTK_GMAC3_ID) ?
1823 PSE_GDM3_PORT : (mac->id + 1)) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
developerb9463012022-09-14 10:28:45 +08001824 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
developere9356982022-07-04 09:03:20 +08001825#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
1826 if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
1827 data &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
1828 data |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
1829 }
1830
1831 trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
1832 __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
1833#endif
1834 WRITE_ONCE(desc->txd4, data);
1835
1836 data = 0;
1837 if (info->first) {
1838 if (info->gso)
1839 data |= TX_DMA_TSO_V2;
1840 /* tx checksum offload */
1841 if (info->csum)
1842 data |= TX_DMA_CHKSUM_V2;
developerce08bca2022-10-06 16:21:13 +08001843
1844 if (netdev_uses_dsa(dev))
1845 data |= TX_DMA_SPTAG_V3;
developere9356982022-07-04 09:03:20 +08001846 }
1847 WRITE_ONCE(desc->txd5, data);
1848
1849 data = 0;
1850 if (info->first && info->vlan)
1851 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1852 WRITE_ONCE(desc->txd6, data);
1853
1854 WRITE_ONCE(desc->txd7, 0);
1855 WRITE_ONCE(desc->txd8, 0);
1856}
1857
1858static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
1859 struct mtk_tx_dma_desc_info *info)
1860{
1861 struct mtk_mac *mac = netdev_priv(dev);
1862 struct mtk_eth *eth = mac->hw;
1863
developerce08bca2022-10-06 16:21:13 +08001864 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
1865 mtk_tx_set_dma_desc_v3(skb, dev, txd, info);
1866 else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developere9356982022-07-04 09:03:20 +08001867 mtk_tx_set_dma_desc_v2(skb, dev, txd, info);
1868 else
1869 mtk_tx_set_dma_desc_v1(skb, dev, txd, info);
1870}
1871
developerfd40db22021-04-29 10:08:25 +08001872static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1873 int tx_num, struct mtk_tx_ring *ring, bool gso)
1874{
developere9356982022-07-04 09:03:20 +08001875 struct mtk_tx_dma_desc_info txd_info = {
1876 .size = skb_headlen(skb),
1877 .qid = skb->mark & MTK_QDMA_TX_MASK,
1878 .gso = gso,
1879 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1880 .vlan = skb_vlan_tag_present(skb),
1881 .vlan_tci = skb_vlan_tag_get(skb),
1882 .first = true,
1883 .last = !skb_is_nonlinear(skb),
1884 };
developerfd40db22021-04-29 10:08:25 +08001885 struct mtk_mac *mac = netdev_priv(dev);
1886 struct mtk_eth *eth = mac->hw;
developere9356982022-07-04 09:03:20 +08001887 const struct mtk_soc_data *soc = eth->soc;
developerfd40db22021-04-29 10:08:25 +08001888 struct mtk_tx_dma *itxd, *txd;
1889 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1890 struct mtk_tx_buf *itx_buf, *tx_buf;
developerfd40db22021-04-29 10:08:25 +08001891 int i, n_desc = 1;
developerfd40db22021-04-29 10:08:25 +08001892 int k = 0;
1893
developerb3a9e7b2023-02-08 15:18:10 +08001894 if (skb->len < 32) {
1895 if (skb_put_padto(skb, MTK_MIN_TX_LENGTH))
1896 return -ENOMEM;
1897
1898 txd_info.size = skb_headlen(skb);
1899 }
1900
developerfd40db22021-04-29 10:08:25 +08001901 itxd = ring->next_free;
1902 itxd_pdma = qdma_to_pdma(ring, itxd);
1903 if (itxd == ring->last_free)
1904 return -ENOMEM;
1905
developere9356982022-07-04 09:03:20 +08001906 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001907 memset(itx_buf, 0, sizeof(*itx_buf));
1908
developer3f28d382023-03-07 16:06:30 +08001909 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
developere9356982022-07-04 09:03:20 +08001910 DMA_TO_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001911 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
developerfd40db22021-04-29 10:08:25 +08001912 return -ENOMEM;
1913
developere9356982022-07-04 09:03:20 +08001914 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
1915
developerfd40db22021-04-29 10:08:25 +08001916 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
developer089e8852022-09-28 14:43:46 +08001917 itx_buf->flags |= (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1918 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1919 MTK_TX_FLAGS_FPORT2;
developere9356982022-07-04 09:03:20 +08001920 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
developerfd40db22021-04-29 10:08:25 +08001921 k++);
1922
developerfd40db22021-04-29 10:08:25 +08001923 /* TX SG offload */
1924 txd = itxd;
1925 txd_pdma = qdma_to_pdma(ring, txd);
1926
developere9356982022-07-04 09:03:20 +08001927 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
developerfd40db22021-04-29 10:08:25 +08001928 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1929 unsigned int offset = 0;
1930 int frag_size = skb_frag_size(frag);
1931
1932 while (frag_size) {
developerfd40db22021-04-29 10:08:25 +08001933 bool new_desc = true;
1934
developere9356982022-07-04 09:03:20 +08001935 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
developerfd40db22021-04-29 10:08:25 +08001936 (i & 0x1)) {
1937 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1938 txd_pdma = qdma_to_pdma(ring, txd);
1939 if (txd == ring->last_free)
1940 goto err_dma;
1941
1942 n_desc++;
1943 } else {
1944 new_desc = false;
1945 }
1946
developere9356982022-07-04 09:03:20 +08001947 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1948 txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1949 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1950 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1951 !(frag_size - txd_info.size);
developer3f28d382023-03-07 16:06:30 +08001952 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
developere9356982022-07-04 09:03:20 +08001953 offset, txd_info.size,
1954 DMA_TO_DEVICE);
developer3f28d382023-03-07 16:06:30 +08001955 if (unlikely(dma_mapping_error(eth->dma_dev,
1956 txd_info.addr)))
developere9356982022-07-04 09:03:20 +08001957 goto err_dma;
developerfd40db22021-04-29 10:08:25 +08001958
developere9356982022-07-04 09:03:20 +08001959 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerfd40db22021-04-29 10:08:25 +08001960
developere9356982022-07-04 09:03:20 +08001961 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
developerfd40db22021-04-29 10:08:25 +08001962 if (new_desc)
1963 memset(tx_buf, 0, sizeof(*tx_buf));
1964 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1965 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
developer089e8852022-09-28 14:43:46 +08001966 tx_buf->flags |=
1967 (mac->id == MTK_GMAC1_ID) ? MTK_TX_FLAGS_FPORT0 :
1968 (mac->id == MTK_GMAC2_ID) ? MTK_TX_FLAGS_FPORT1 :
1969 MTK_TX_FLAGS_FPORT2;
developerfd40db22021-04-29 10:08:25 +08001970
developere9356982022-07-04 09:03:20 +08001971 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1972 txd_info.size, k++);
developerfd40db22021-04-29 10:08:25 +08001973
developere9356982022-07-04 09:03:20 +08001974 frag_size -= txd_info.size;
1975 offset += txd_info.size;
developerfd40db22021-04-29 10:08:25 +08001976 }
1977 }
1978
1979 /* store skb to cleanup */
1980 itx_buf->skb = skb;
1981
developere9356982022-07-04 09:03:20 +08001982 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
developerfd40db22021-04-29 10:08:25 +08001983 if (k & 0x1)
1984 txd_pdma->txd2 |= TX_DMA_LS0;
1985 else
1986 txd_pdma->txd2 |= TX_DMA_LS1;
1987 }
1988
1989 netdev_sent_queue(dev, skb->len);
1990 skb_tx_timestamp(skb);
1991
1992 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1993 atomic_sub(n_desc, &ring->free_count);
1994
1995