blob: e0955fc0a8fda70de78db8eb3182265f5f8fd4f1 [file] [log] [blame]
developer8051e042022-04-08 13:26:36 +08001/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright (c) 2022 MediaTek Inc.
4 * Author: Henry Yen <henry.yen@mediatek.com>
5 */
6
7#include <linux/regmap.h>
8#include "mtk_eth_soc.h"
9#include "mtk_eth_dbg.h"
10#include "mtk_eth_reset.h"
11
12char* mtk_reset_event_name[32] = {
13 [MTK_EVENT_FORCE] = "Force",
14 [MTK_EVENT_WARM_CNT] = "Warm",
15 [MTK_EVENT_COLD_CNT] = "Cold",
16 [MTK_EVENT_TOTAL_CNT] = "Total",
17 [MTK_EVENT_FQ_EMPTY] = "FQ Empty",
18 [MTK_EVENT_TSO_FAIL] = "TSO Fail",
19 [MTK_EVENT_TSO_ILLEGAL] = "TSO Illegal",
20 [MTK_EVENT_TSO_ALIGN] = "TSO Align",
21 [MTK_EVENT_RFIFO_OV] = "RFIFO OV",
22 [MTK_EVENT_RFIFO_UF] = "RFIFO UF",
23};
24
developerbe971722022-05-23 13:51:05 +080025static int mtk_wifi_num = 0;
26static int mtk_rest_cnt = 0;
developer37482a42022-12-26 13:31:13 +080027u32 mtk_reset_flag = MTK_FE_START_RESET;
28typedef u32 (*mtk_monitor_xdma_func) (struct mtk_eth *eth);
developerbe971722022-05-23 13:51:05 +080029
developer8051e042022-04-08 13:26:36 +080030void mtk_reset_event_update(struct mtk_eth *eth, u32 id)
31{
32 struct mtk_reset_event *reset_event = &eth->reset_event;
33 reset_event->count[id]++;
34}
35
36int mtk_eth_cold_reset(struct mtk_eth *eth)
37{
developer0baa6962023-01-31 14:25:23 +080038 u32 reset_bits = 0;
developer089e8852022-09-28 14:43:46 +080039 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
40 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer8051e042022-04-08 13:26:36 +080041 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
42
developer0baa6962023-01-31 14:25:23 +080043 reset_bits = RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0;
developer8051e042022-04-08 13:26:36 +080044 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
developer0baa6962023-01-31 14:25:23 +080045 reset_bits |= RSTCTRL_PPE1;
46#if defined(CONFIG_MEDIATEK_NETSYS_V3)
47 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
48 reset_bits |= RSTCTRL_PPE2;
49 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
50 reset_bits |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
51#endif
52 ethsys_reset(eth, reset_bits);
developer8051e042022-04-08 13:26:36 +080053
developer089e8852022-09-28 14:43:46 +080054 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
55 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer8051e042022-04-08 13:26:36 +080056 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff);
57
58 return 0;
59}
60
61int mtk_eth_warm_reset(struct mtk_eth *eth)
62{
63 u32 reset_bits = 0, i = 0, done = 0;
64 u32 val1 = 0, val2 = 0, val3 = 0;
65
66 mdelay(100);
67
68 reset_bits |= RSTCTRL_FE;
69 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
70 reset_bits, reset_bits);
71
72 while (i < 1000) {
73 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val1);
74 if (val1 & RSTCTRL_FE)
75 break;
76 i++;
77 udelay(1);
78 }
79
80 if (i < 1000) {
developer37482a42022-12-26 13:31:13 +080081 reset_bits = RSTCTRL_ETH | RSTCTRL_PPE0;
developer8051e042022-04-08 13:26:36 +080082 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
developer37482a42022-12-26 13:31:13 +080083 reset_bits |= RSTCTRL_PPE1;
84#if defined(CONFIG_MEDIATEK_NETSYS_V3)
85 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
86 reset_bits |= RSTCTRL_PPE2;
87 if (mtk_reset_flag == MTK_FE_START_RESET)
88 reset_bits |= RSTCTRL_WDMA0 |
89 RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
90#endif
developer8051e042022-04-08 13:26:36 +080091
92 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
93 reset_bits, reset_bits);
94
95 udelay(1);
96 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val2);
97 if (!(val2 & reset_bits))
98 pr_info("[%s] error val2=0x%x reset_bits=0x%x !\n",
99 __func__, val2, reset_bits);
100 reset_bits |= RSTCTRL_FE;
101 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
102 reset_bits, ~reset_bits);
103
104 udelay(1);
105 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val3);
106 if (val3 & reset_bits)
107 pr_info("[%s] error val3=0x%x reset_bits=0x%x !\n",
108 __func__, val3, reset_bits);
109 done = 1;
110 mtk_reset_event_update(eth, MTK_EVENT_WARM_CNT);
111 }
112
developer0baa6962023-01-31 14:25:23 +0800113 pr_info("[%s] reset record val1=0x%x, val2=0x%x, val3=0x%x i:%d done:%d\n",
114 __func__, val1, val2, val3, i, done);
developer8051e042022-04-08 13:26:36 +0800115
116 if (!done)
117 mtk_eth_cold_reset(eth);
118
119 return 0;
120}
121
122u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status)
123{
124 u32 ret = 0, val = 0;
125
126 if ((status & MTK_FE_INT_FQ_EMPTY) ||
127 (status & MTK_FE_INT_RFIFO_UF) ||
128 (status & MTK_FE_INT_RFIFO_OV) ||
129 (status & MTK_FE_INT_TSO_FAIL) ||
130 (status & MTK_FE_INT_TSO_ALIGN) ||
131 (status & MTK_FE_INT_TSO_ILLEGAL)) {
132 while (status) {
133 val = ffs((unsigned int)status) - 1;
134 mtk_reset_event_update(eth, val);
135 status &= ~(1 << val);
136 }
137 ret = 1;
138 }
139
140 if (atomic_read(&force)) {
141 mtk_reset_event_update(eth, MTK_EVENT_FORCE);
142 ret = 1;
143 }
144
145 if (ret) {
146 mtk_reset_event_update(eth, MTK_EVENT_TOTAL_CNT);
147 mtk_dump_netsys_info(eth);
148 }
149
150 return ret;
151}
152
153irqreturn_t mtk_handle_fe_irq(int irq, void *_eth)
154{
155 struct mtk_eth *eth = _eth;
156 u32 status = 0, val = 0;
157
158 status = mtk_r32(eth, MTK_FE_INT_STATUS);
159 pr_info("[%s] Trigger FE Misc ISR: 0x%x\n", __func__, status);
160
161 while (status) {
162 val = ffs((unsigned int)status) - 1;
163 status &= ~(1 << val);
developera7ee5fe2022-04-21 17:45:57 +0800164
developerbe971722022-05-23 13:51:05 +0800165 if ((val == MTK_EVENT_TSO_FAIL) ||
developera7ee5fe2022-04-21 17:45:57 +0800166 (val == MTK_EVENT_TSO_ILLEGAL) ||
167 (val == MTK_EVENT_TSO_ALIGN) ||
168 (val == MTK_EVENT_RFIFO_OV) ||
169 (val == MTK_EVENT_RFIFO_UF))
170 pr_info("[%s] Detect reset event: %s !\n", __func__,
171 mtk_reset_event_name[val]);
developer8051e042022-04-08 13:26:36 +0800172 }
developera7ee5fe2022-04-21 17:45:57 +0800173 mtk_w32(eth, 0xFFFFFFFF, MTK_FE_INT_STATUS);
developer8051e042022-04-08 13:26:36 +0800174
175 return IRQ_HANDLED;
176}
177
178static void mtk_dump_reg(void *_eth, char *name, u32 offset, u32 range)
179{
180 struct mtk_eth *eth = _eth;
181 u32 cur = offset;
182
183 pr_info("\n============ %s ============\n", name);
184 while(cur < offset + range) {
185 pr_info("0x%x: %08x %08x %08x %08x\n",
186 cur, mtk_r32(eth, cur), mtk_r32(eth, cur + 0x4),
187 mtk_r32(eth, cur + 0x8), mtk_r32(eth, cur + 0xc));
188 cur += 0x10;
189 }
190}
191
192void mtk_dump_netsys_info(void *_eth)
193{
194 struct mtk_eth *eth = _eth;
developer797e46c2022-07-29 12:05:32 +0800195 u32 id = 0;
developer8051e042022-04-08 13:26:36 +0800196
197 mtk_dump_reg(eth, "FE", 0x0, 0x500);
198 mtk_dump_reg(eth, "ADMA", PDMA_BASE, 0x300);
developer797e46c2022-07-29 12:05:32 +0800199 for (id = 0; id < MTK_QDMA_PAGE_NUM; id++){
200 mtk_w32(eth, id, MTK_QDMA_PAGE);
201 pr_info("\nQDMA PAGE:%x ",mtk_r32(eth, MTK_QDMA_PAGE));
202 mtk_dump_reg(eth, "QDMA", QDMA_BASE, 0x100);
203 mtk_w32(eth, 0, MTK_QDMA_PAGE);
204 }
205 mtk_dump_reg(eth, "QDMA", MTK_QRX_BASE_PTR0, 0x300);
developer8051e042022-04-08 13:26:36 +0800206 mtk_dump_reg(eth, "WDMA", WDMA_BASE(0), 0x600);
207 mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
208 mtk_dump_reg(eth, "GMAC", 0x10000, 0x300);
developer0baa6962023-01-31 14:25:23 +0800209 mtk_dump_reg(eth, "XGMAC0", 0x12000, 0x300);
210 mtk_dump_reg(eth, "XGMAC1", 0x13000, 0x300);
211 mtk_dump_usxgmii(eth->xgmii->regmap_usxgmii[0], "USXGMII0", 0, 0x1000);
212 mtk_dump_usxgmii(eth->xgmii->regmap_usxgmii[1], "USXGMII1", 0, 0x1000);
developer8051e042022-04-08 13:26:36 +0800213}
214
developer37482a42022-12-26 13:31:13 +0800215u32 mtk_monitor_wdma_tx(struct mtk_eth *eth)
developer8051e042022-04-08 13:26:36 +0800216{
developer37482a42022-12-26 13:31:13 +0800217 static u32 pre_dtx[MTK_WDMA_CNT];
218 static u32 err_cnt[MTK_WDMA_CNT];
219 u32 i = 0, cur_dtx = 0, tx_busy = 0, tx_rdy = 0, err_flag = 0;
220 u32 dbg_mon = 0;
developer797e46c2022-07-29 12:05:32 +0800221
developer37482a42022-12-26 13:31:13 +0800222 for (i = 0; i < MTK_WDMA_CNT; i++) {
223 cur_dtx = mtk_r32(eth, MTK_WDMA_DTX_PTR(i));
224 tx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(i)) & MTK_TX_DMA_BUSY;
225 dbg_mon = mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(i));
226 tx_rdy = !(dbg_mon & MTK_CDM_TXFIFO_RDY);
227 if (cur_dtx == pre_dtx[i] && tx_busy && tx_rdy) {
228 err_cnt[i]++;
229 if (err_cnt[i] >= 3) {
230 pr_info("WDMA %d Info\n", i);
231 pr_info("err_cnt = %d", err_cnt[i]);
232 pr_info("prev_dtx = 0x%x | cur_dtx = 0x%x\n",
233 pre_dtx[i], cur_dtx);
234 pr_info("WDMA_CTX_PTR = 0x%x\n",
235 mtk_r32(eth, MTK_WDMA_CTX_PTR(i)));
236 pr_info("WDMA_DTX_PTR = 0x%x\n",
237 mtk_r32(eth, MTK_WDMA_DTX_PTR(i)));
238 pr_info("WDMA_GLO_CFG = 0x%x\n",
239 mtk_r32(eth, MTK_WDMA_GLO_CFG(i)));
240 pr_info("WDMA_TX_DBG_MON0 = 0x%x\n",
241 mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(i)));
242 pr_info("==============================\n");
243 err_flag = 1;
244 }
245 } else
246 err_cnt[i] = 0;
247 pre_dtx[i] = cur_dtx;
248 }
developer8051e042022-04-08 13:26:36 +0800249
developer37482a42022-12-26 13:31:13 +0800250 if (err_flag)
251 return MTK_FE_START_RESET;
252 else
253 return 0;
254}
developer8051e042022-04-08 13:26:36 +0800255
developer37482a42022-12-26 13:31:13 +0800256u32 mtk_monitor_wdma_rx(struct mtk_eth *eth)
257{
258 static u32 pre_drx[MTK_WDMA_CNT];
259 static u32 pre_opq[MTK_WDMA_CNT];
260 static u32 err_cnt[MTK_WDMA_CNT];
261 u32 i = 0, cur_drx = 0, rx_busy = 0, err_flag = 0;
262 u32 cur_opq = 0;
263
264 for (i = 0; i < MTK_WDMA_CNT; i++) {
265 cur_drx = mtk_r32(eth, MTK_WDMA_DRX_PTR(i));
266 rx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(i)) & MTK_RX_DMA_BUSY;
267 if (i == 0)
268 cur_opq = (mtk_r32(eth, MTK_PSE_OQ_STA(5)) & 0x1FF);
269 else if (i == 1)
270 cur_opq = (mtk_r32(eth, MTK_PSE_OQ_STA(5)) & 0x1FF0000);
271 else
272 cur_opq = (mtk_r32(eth, MTK_PSE_OQ_STA(7)) & 0x1FF0000);
273
274 if (cur_drx == pre_drx[i] && rx_busy && cur_opq != 0 &&
275 cur_opq == pre_opq[i]) {
276 err_cnt[i]++;
277 if (err_cnt[i] >= 3) {
278 pr_info("WDMA %d Info\n", i);
279 pr_info("err_cnt = %d", err_cnt[i]);
280 pr_info("prev_drx = 0x%x | cur_drx = 0x%x\n",
281 pre_drx[i], cur_drx);
282 pr_info("WDMA_CRX_PTR = 0x%x\n",
283 mtk_r32(eth, MTK_WDMA_CRX_PTR(i)));
284 pr_info("WDMA_DRX_PTR = 0x%x\n",
285 mtk_r32(eth, MTK_WDMA_DRX_PTR(i)));
286 pr_info("WDMA_GLO_CFG = 0x%x\n",
287 mtk_r32(eth, MTK_WDMA_GLO_CFG(i)));
288 pr_info("==============================\n");
289 err_flag = 1;
developer8051e042022-04-08 13:26:36 +0800290 }
developer37482a42022-12-26 13:31:13 +0800291 } else
292 err_cnt[i] = 0;
293 pre_drx[i] = cur_drx;
294 pre_opq[i] = cur_opq;
295 }
296
297 if (err_flag)
298 return MTK_FE_START_RESET;
299 else
300 return 0;
301}
302
303u32 mtk_monitor_rx_fc(struct mtk_eth *eth)
304{
305 u32 i = 0, mib_base = 0, gdm_fc = 0;
306
307 for (i = 0; i < MTK_MAC_COUNT; i++) {
308 mib_base = MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET*i;
309 gdm_fc = mtk_r32(eth, mib_base);
310 if (gdm_fc < 1)
311 return 1;
312 }
313 return 0;
314}
315
316u32 mtk_monitor_qdma_tx(struct mtk_eth *eth)
317{
318 static u32 err_cnt_qtx;
319 u32 err_flag = 0;
320 u32 i = 0, is_rx_fc = 0;
321
322 u32 is_qfsm_hang = (mtk_r32(eth, MTK_QDMA_FSM) & 0xF00) != 0;
323 u32 is_qfwd_hang = mtk_r32(eth, MTK_QDMA_FWD_CNT) == 0;
324
325 is_rx_fc = mtk_monitor_rx_fc(eth);
326 if (is_qfsm_hang && is_qfwd_hang && is_rx_fc) {
327 err_cnt_qtx++;
328 if (err_cnt_qtx >= 3) {
developer797e46c2022-07-29 12:05:32 +0800329 pr_info("QDMA Tx Info\n");
developer37482a42022-12-26 13:31:13 +0800330 pr_info("err_cnt = %d", err_cnt_qtx);
developer8051e042022-04-08 13:26:36 +0800331 pr_info("is_qfsm_hang = %d\n", is_qfsm_hang);
332 pr_info("is_qfwd_hang = %d\n", is_qfwd_hang);
developer8051e042022-04-08 13:26:36 +0800333 pr_info("-- -- -- -- -- -- --\n");
334 pr_info("MTK_QDMA_FSM = 0x%x\n",
335 mtk_r32(eth, MTK_QDMA_FSM));
336 pr_info("MTK_QDMA_FWD_CNT = 0x%x\n",
337 mtk_r32(eth, MTK_QDMA_FWD_CNT));
338 pr_info("MTK_QDMA_FQ_CNT = 0x%x\n",
339 mtk_r32(eth, MTK_QDMA_FQ_CNT));
340 pr_info("==============================\n");
developer37482a42022-12-26 13:31:13 +0800341 err_flag = 1;
developer8051e042022-04-08 13:26:36 +0800342 }
developer37482a42022-12-26 13:31:13 +0800343 } else
344 err_cnt_qtx = 0;
345
346 if (err_flag)
347 return MTK_FE_STOP_TRAFFIC;
348 else
349 return 0;
350}
351
352u32 mtk_monitor_qdma_rx(struct mtk_eth *eth)
353{
354 static u32 err_cnt_qrx;
355 static u32 pre_fq_head, pre_fq_tail;
356 u32 err_flag = 0;
357
358 u32 qrx_fsm = (mtk_r32(eth, MTK_QDMA_FSM) & 0x1F) == 9;
359 u32 fq_head = mtk_r32(eth, MTK_QDMA_FQ_HEAD);
360 u32 fq_tail = mtk_r32(eth, MTK_QDMA_FQ_TAIL);
361
362 if (qrx_fsm && fq_head == pre_fq_head &&
363 fq_tail == pre_fq_tail) {
364 err_cnt_qrx++;
365 if (err_cnt_qrx >= 3) {
366 pr_info("QDMA Rx Info\n");
367 pr_info("err_cnt = %d", err_cnt_qrx);
368 pr_info("MTK_QDMA_FSM = %d\n",
369 mtk_r32(eth, MTK_QDMA_FSM));
370 pr_info("FQ_HEAD = 0x%x\n",
371 mtk_r32(eth, MTK_QDMA_FQ_HEAD));
372 pr_info("FQ_TAIL = 0x%x\n",
373 mtk_r32(eth, MTK_QDMA_FQ_TAIL));
374 err_flag = 1;
375 } else
376 err_cnt_qrx = 0;
377 }
378 pre_fq_head = fq_head;
379 pre_fq_tail = fq_tail;
380
381 if (err_flag)
382 return MTK_FE_STOP_TRAFFIC;
383 else
384 return 0;
385}
386
387
388u32 mtk_monitor_adma_rx(struct mtk_eth *eth)
389{
390 static u32 err_cnt_arx;
391 u32 err_flag = 0;
392 u32 opq0 = (mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x1FF) != 0;
393 u32 cdm1_fsm = (mtk_r32(eth, MTK_FE_CDM1_FSM) & 0xFFFF0000) != 0;
394 u32 cur_stat = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x1F) == 0);
395 u32 fifo_rdy = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x40) == 0);
396
397 if (opq0 && cdm1_fsm && cur_stat && fifo_rdy) {
398 err_cnt_arx++;
399 if (err_cnt_arx >= 3) {
developer797e46c2022-07-29 12:05:32 +0800400 pr_info("ADMA Rx Info\n");
developer37482a42022-12-26 13:31:13 +0800401 pr_info("err_cnt = %d", err_cnt_arx);
402 pr_info("CDM1_FSM = %d\n",
403 mtk_r32(eth, MTK_FE_CDM1_FSM));
developer8051e042022-04-08 13:26:36 +0800404 pr_info("MTK_PSE_OQ_STA1 = 0x%x\n",
405 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
406 pr_info("MTK_ADMA_RX_DBG0 = 0x%x\n",
407 mtk_r32(eth, MTK_ADMA_RX_DBG0));
408 pr_info("MTK_ADMA_RX_DBG1 = 0x%x\n",
409 mtk_r32(eth, MTK_ADMA_RX_DBG1));
410 pr_info("==============================\n");
developer37482a42022-12-26 13:31:13 +0800411 err_flag = 1;
412 }
413 } else
414 err_cnt_arx = 0;
415
416 if (err_flag)
417 return MTK_FE_STOP_TRAFFIC;
418 else
419 return 0;
420}
421
422u32 mtk_monitor_tdma_tx(struct mtk_eth *eth)
423{
424 static u32 err_cnt_ttx;
425 static u32 pre_fsm;
426 u32 err_flag = 0;
427 u32 cur_fsm = 0;
428 u32 tx_busy = 0;
429
430 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
431 cur_fsm = (mtk_r32(eth, MTK_FE_CDM6_FSM) & 0x1FFF) != 0;
432 tx_busy = ((mtk_r32(eth, MTK_TDMA_GLO_CFG) & 0x2) != 0);
433
434 if (cur_fsm == pre_fsm && cur_fsm != 0 && tx_busy) {
435 err_cnt_ttx++;
436 if (err_cnt_ttx >= 3) {
437 pr_info("TDMA Tx Info\n");
438 pr_info("err_cnt = %d", err_cnt_ttx);
439 pr_info("CDM6_FSM = %d\n",
440 mtk_r32(eth, MTK_FE_CDM6_FSM));
441 pr_info("DMA CFG = 0x%x\n",
442 mtk_r32(eth, MTK_TDMA_GLO_CFG));
443 pr_info("==============================\n");
444 err_flag = 1;
445 }
446 } else
447 err_cnt_ttx = 0;
448
449 pre_fsm = cur_fsm;
450 }
451
452 if (err_flag)
453 return MTK_FE_STOP_TRAFFIC;
454 else
455 return 0;
456}
457
458u32 mtk_monitor_tdma_rx(struct mtk_eth *eth)
459{
460 static u32 err_cnt_trx;
461 static u32 pre_fsm;
462 u32 err_flag = 0;
463 u32 cur_fsm = 0;
464 u32 rx_busy = 0;
465
466 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
467 cur_fsm = (mtk_r32(eth, MTK_FE_CDM6_FSM) & 0xFFF0000) != 0;
468 rx_busy = ((mtk_r32(eth, MTK_TDMA_GLO_CFG) & 0x8) != 0);
469
470 if (cur_fsm == pre_fsm && cur_fsm != 0 && rx_busy) {
471 err_cnt_trx++;
472 if (err_cnt_trx >= 3) {
473 pr_info("TDMA Rx Info\n");
474 pr_info("err_cnt = %d", err_cnt_trx);
475 pr_info("CDM6_FSM = %d\n",
476 mtk_r32(eth, MTK_FE_CDM6_FSM));
477 pr_info("DMA CFG = 0x%x\n",
478 mtk_r32(eth, MTK_TDMA_GLO_CFG));
479 pr_info("==============================\n");
480 err_flag = 1;
481 }
482 } else
483 err_cnt_trx = 0;
484
485 pre_fsm = cur_fsm;
486 }
487
488 if (err_flag)
489 return MTK_FE_STOP_TRAFFIC;
490 else
491 return 0;
492}
493
494static const mtk_monitor_xdma_func mtk_reset_monitor_func[] = {
495 [0] = mtk_monitor_wdma_tx,
496 [1] = mtk_monitor_wdma_rx,
497 [2] = mtk_monitor_qdma_tx,
498 [3] = mtk_monitor_qdma_rx,
499 [4] = mtk_monitor_adma_rx,
500 [5] = mtk_monitor_tdma_tx,
501 [6] = mtk_monitor_tdma_rx,
502};
503
504void mtk_dma_monitor(struct timer_list *t)
505{
506 struct mtk_eth *eth = from_timer(eth, t, mtk_dma_monitor_timer);
507 u32 i = 0, ret = 0;
508
509 for (i = 0; i < 6; i++) {
510 ret = (*mtk_reset_monitor_func[i]) (eth);
511 if ((ret == MTK_FE_START_RESET) ||
512 (ret == MTK_FE_STOP_TRAFFIC)) {
developer8051e042022-04-08 13:26:36 +0800513 if ((atomic_read(&reset_lock) == 0) &&
developer0baa6962023-01-31 14:25:23 +0800514 (atomic_read(&force) == 1)) {
developer37482a42022-12-26 13:31:13 +0800515 mtk_reset_flag = ret;
developer8051e042022-04-08 13:26:36 +0800516 schedule_work(&eth->pending_work);
517 }
developer37482a42022-12-26 13:31:13 +0800518 break;
developer8051e042022-04-08 13:26:36 +0800519 }
developer8051e042022-04-08 13:26:36 +0800520 }
521
developer8051e042022-04-08 13:26:36 +0800522 mod_timer(&eth->mtk_dma_monitor_timer, jiffies + 1 * HZ);
523}
524
525void mtk_prepare_reset_fe(struct mtk_eth *eth)
526{
developer37482a42022-12-26 13:31:13 +0800527 u32 i = 0, val = 0, mcr = 0;
developer8051e042022-04-08 13:26:36 +0800528
529 /* Disable NETSYS Interrupt */
530 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
531 mtk_w32(eth, 0, MTK_PDMA_INT_MASK);
532 mtk_w32(eth, 0, MTK_QDMA_INT_MASK);
533
534 /* Disable Linux netif Tx path */
535 for (i = 0; i < MTK_MAC_COUNT; i++) {
536 if (!eth->netdev[i])
537 continue;
538 netif_tx_disable(eth->netdev[i]);
539 }
540
541 /* Disable QDMA Tx */
542 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
543 mtk_w32(eth, val & ~(MTK_TX_DMA_EN), MTK_QDMA_GLO_CFG);
544
developer37482a42022-12-26 13:31:13 +0800545 for (i = 0; i < MTK_MAC_COUNT; i++) {
546 pr_info("[%s] i:%d type:%d id:%d\n",
547 __func__, i, eth->mac[i]->type, eth->mac[i]->id);
548 if (eth->mac[i]->type == MTK_XGDM_TYPE &&
549 eth->mac[i]->id != MTK_GMAC1_ID) {
550 mcr = mtk_r32(eth, MTK_XMAC_MCR(eth->mac[i]->id));
551 mcr &= 0xfffffff0;
552 mcr |= XMAC_MCR_TRX_DISABLE;
553 pr_info("disable XMAC TX/RX\n");
554 mtk_w32(eth, mcr, MTK_XMAC_MCR(eth->mac[i]->id));
555 }
developer793f7b42022-05-20 13:54:51 +0800556
developer37482a42022-12-26 13:31:13 +0800557 if (eth->mac[i]->type == MTK_GDM_TYPE) {
558 mcr = mtk_r32(eth, MTK_MAC_MCR(eth->mac[i]->id));
559 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
560 mtk_w32(eth, mcr, MTK_MAC_MCR(eth->mac[i]->id));
561 pr_info("disable GMAC TX/RX\n");
562 }
developer793f7b42022-05-20 13:54:51 +0800563 }
developer8051e042022-04-08 13:26:36 +0800564
developer8051e042022-04-08 13:26:36 +0800565 /* Enable GDM drop */
developerdca0fde2022-12-14 11:40:35 +0800566 for (i = 0; i < MTK_MAC_COUNT; i++)
567 mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
developer8051e042022-04-08 13:26:36 +0800568
569 /* Disable ADMA Rx */
570 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
571 mtk_w32(eth, val & ~(MTK_RX_DMA_EN), MTK_PDMA_GLO_CFG);
572}
573
574void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id)
575{
576 u32 i = 0, poll_time = 5000, val;
577
578 /* Disable KA */
579 mtk_m32(eth, MTK_PPE_KA_CFG_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
580 mtk_m32(eth, MTK_PPE_NTU_KA_MASK, 0, MTK_PPE_BIND_LMT_1(ppe_id));
581 mtk_w32(eth, 0, MTK_PPE_KA(ppe_id));
582 mdelay(10);
583
584 /* Set KA timer to maximum */
585 mtk_m32(eth, MTK_PPE_NTU_KA_MASK, (0xFF << 16), MTK_PPE_BIND_LMT_1(ppe_id));
586 mtk_w32(eth, 0xFFFFFFFF, MTK_PPE_KA(ppe_id));
587
588 /* Set KA tick select */
589 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, (0x1 << 24), MTK_PPE_TB_CFG(ppe_id));
590 mtk_m32(eth, MTK_PPE_KA_CFG_MASK, (0x3 << 12), MTK_PPE_TB_CFG(ppe_id));
591 mdelay(10);
592
593 /* Disable scan mode */
594 mtk_m32(eth, MTK_PPE_SCAN_MODE_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
595 mdelay(10);
596
597 /* Check PPE idle */
598 while (i++ < poll_time) {
599 val = mtk_r32(eth, MTK_PPE_GLO_CFG(ppe_id));
600 if (!(val & MTK_PPE_BUSY))
601 break;
602 mdelay(1);
603 }
604
605 if (i >= poll_time) {
606 pr_info("[%s] PPE keeps busy !\n", __func__);
607 mtk_dump_reg(eth, "FE", 0x0, 0x500);
608 mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
609 }
610}
611
612static int mtk_eth_netdevice_event(struct notifier_block *unused,
613 unsigned long event, void *ptr)
614{
615 switch (event) {
616 case MTK_WIFI_RESET_DONE:
developer37482a42022-12-26 13:31:13 +0800617 case MTK_FE_STOP_TRAFFIC_DONE:
developerbe971722022-05-23 13:51:05 +0800618 mtk_rest_cnt--;
619 if(!mtk_rest_cnt) {
620 complete(&wait_ser_done);
621 mtk_rest_cnt = mtk_wifi_num;
622 }
623 break;
624 case MTK_WIFI_CHIP_ONLINE:
625 mtk_wifi_num++;
626 mtk_rest_cnt = mtk_wifi_num;
627 break;
628 case MTK_WIFI_CHIP_OFFLINE:
629 mtk_wifi_num--;
630 mtk_rest_cnt = mtk_wifi_num;
developer8051e042022-04-08 13:26:36 +0800631 break;
632 default:
633 break;
634 }
635
636 return NOTIFY_DONE;
637}
638
639struct notifier_block mtk_eth_netdevice_nb __read_mostly = {
640 .notifier_call = mtk_eth_netdevice_event,
641};