blob: eebec742840c275643a4e13e1f66765755531c85 [file] [log] [blame]
developer8051e042022-04-08 13:26:36 +08001/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright (c) 2022 MediaTek Inc.
4 * Author: Henry Yen <henry.yen@mediatek.com>
5 */
6
7#include <linux/regmap.h>
8#include "mtk_eth_soc.h"
9#include "mtk_eth_dbg.h"
10#include "mtk_eth_reset.h"
11
12char* mtk_reset_event_name[32] = {
13 [MTK_EVENT_FORCE] = "Force",
14 [MTK_EVENT_WARM_CNT] = "Warm",
15 [MTK_EVENT_COLD_CNT] = "Cold",
16 [MTK_EVENT_TOTAL_CNT] = "Total",
17 [MTK_EVENT_FQ_EMPTY] = "FQ Empty",
18 [MTK_EVENT_TSO_FAIL] = "TSO Fail",
19 [MTK_EVENT_TSO_ILLEGAL] = "TSO Illegal",
20 [MTK_EVENT_TSO_ALIGN] = "TSO Align",
21 [MTK_EVENT_RFIFO_OV] = "RFIFO OV",
22 [MTK_EVENT_RFIFO_UF] = "RFIFO UF",
23};
24
developerbe971722022-05-23 13:51:05 +080025static int mtk_wifi_num = 0;
26static int mtk_rest_cnt = 0;
developer37482a42022-12-26 13:31:13 +080027u32 mtk_reset_flag = MTK_FE_START_RESET;
28typedef u32 (*mtk_monitor_xdma_func) (struct mtk_eth *eth);
developerbe971722022-05-23 13:51:05 +080029
developer8051e042022-04-08 13:26:36 +080030void mtk_reset_event_update(struct mtk_eth *eth, u32 id)
31{
32 struct mtk_reset_event *reset_event = &eth->reset_event;
33 reset_event->count[id]++;
34}
35
36int mtk_eth_cold_reset(struct mtk_eth *eth)
37{
developer0baa6962023-01-31 14:25:23 +080038 u32 reset_bits = 0;
developer089e8852022-09-28 14:43:46 +080039 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
40 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer8051e042022-04-08 13:26:36 +080041 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
42
developer0baa6962023-01-31 14:25:23 +080043 reset_bits = RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0;
developer8051e042022-04-08 13:26:36 +080044 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
developer0baa6962023-01-31 14:25:23 +080045 reset_bits |= RSTCTRL_PPE1;
46#if defined(CONFIG_MEDIATEK_NETSYS_V3)
47 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
48 reset_bits |= RSTCTRL_PPE2;
49 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
50 reset_bits |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
51#endif
52 ethsys_reset(eth, reset_bits);
developer8051e042022-04-08 13:26:36 +080053
developer089e8852022-09-28 14:43:46 +080054 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
55 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer8051e042022-04-08 13:26:36 +080056 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff);
57
58 return 0;
59}
60
61int mtk_eth_warm_reset(struct mtk_eth *eth)
62{
63 u32 reset_bits = 0, i = 0, done = 0;
64 u32 val1 = 0, val2 = 0, val3 = 0;
65
66 mdelay(100);
67
68 reset_bits |= RSTCTRL_FE;
69 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
70 reset_bits, reset_bits);
71
72 while (i < 1000) {
73 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val1);
74 if (val1 & RSTCTRL_FE)
75 break;
76 i++;
77 udelay(1);
78 }
79
80 if (i < 1000) {
developer37482a42022-12-26 13:31:13 +080081 reset_bits = RSTCTRL_ETH | RSTCTRL_PPE0;
developer8051e042022-04-08 13:26:36 +080082 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
developer37482a42022-12-26 13:31:13 +080083 reset_bits |= RSTCTRL_PPE1;
84#if defined(CONFIG_MEDIATEK_NETSYS_V3)
85 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
86 reset_bits |= RSTCTRL_PPE2;
87 if (mtk_reset_flag == MTK_FE_START_RESET)
88 reset_bits |= RSTCTRL_WDMA0 |
89 RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
90#endif
developer8051e042022-04-08 13:26:36 +080091
92 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
93 reset_bits, reset_bits);
94
95 udelay(1);
96 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val2);
97 if (!(val2 & reset_bits))
98 pr_info("[%s] error val2=0x%x reset_bits=0x%x !\n",
99 __func__, val2, reset_bits);
100 reset_bits |= RSTCTRL_FE;
101 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
102 reset_bits, ~reset_bits);
103
104 udelay(1);
105 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val3);
106 if (val3 & reset_bits)
107 pr_info("[%s] error val3=0x%x reset_bits=0x%x !\n",
108 __func__, val3, reset_bits);
109 done = 1;
110 mtk_reset_event_update(eth, MTK_EVENT_WARM_CNT);
111 }
112
developer0baa6962023-01-31 14:25:23 +0800113 pr_info("[%s] reset record val1=0x%x, val2=0x%x, val3=0x%x i:%d done:%d\n",
114 __func__, val1, val2, val3, i, done);
developer8051e042022-04-08 13:26:36 +0800115
116 if (!done)
117 mtk_eth_cold_reset(eth);
118
119 return 0;
120}
121
122u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status)
123{
124 u32 ret = 0, val = 0;
125
126 if ((status & MTK_FE_INT_FQ_EMPTY) ||
127 (status & MTK_FE_INT_RFIFO_UF) ||
128 (status & MTK_FE_INT_RFIFO_OV) ||
129 (status & MTK_FE_INT_TSO_FAIL) ||
130 (status & MTK_FE_INT_TSO_ALIGN) ||
131 (status & MTK_FE_INT_TSO_ILLEGAL)) {
132 while (status) {
133 val = ffs((unsigned int)status) - 1;
134 mtk_reset_event_update(eth, val);
135 status &= ~(1 << val);
136 }
137 ret = 1;
138 }
139
140 if (atomic_read(&force)) {
141 mtk_reset_event_update(eth, MTK_EVENT_FORCE);
142 ret = 1;
143 }
144
145 if (ret) {
146 mtk_reset_event_update(eth, MTK_EVENT_TOTAL_CNT);
147 mtk_dump_netsys_info(eth);
148 }
149
150 return ret;
151}
152
153irqreturn_t mtk_handle_fe_irq(int irq, void *_eth)
154{
155 struct mtk_eth *eth = _eth;
156 u32 status = 0, val = 0;
157
158 status = mtk_r32(eth, MTK_FE_INT_STATUS);
159 pr_info("[%s] Trigger FE Misc ISR: 0x%x\n", __func__, status);
160
161 while (status) {
162 val = ffs((unsigned int)status) - 1;
163 status &= ~(1 << val);
developera7ee5fe2022-04-21 17:45:57 +0800164
developerbe971722022-05-23 13:51:05 +0800165 if ((val == MTK_EVENT_TSO_FAIL) ||
developera7ee5fe2022-04-21 17:45:57 +0800166 (val == MTK_EVENT_TSO_ILLEGAL) ||
167 (val == MTK_EVENT_TSO_ALIGN) ||
168 (val == MTK_EVENT_RFIFO_OV) ||
169 (val == MTK_EVENT_RFIFO_UF))
170 pr_info("[%s] Detect reset event: %s !\n", __func__,
171 mtk_reset_event_name[val]);
developer8051e042022-04-08 13:26:36 +0800172 }
developera7ee5fe2022-04-21 17:45:57 +0800173 mtk_w32(eth, 0xFFFFFFFF, MTK_FE_INT_STATUS);
developer8051e042022-04-08 13:26:36 +0800174
175 return IRQ_HANDLED;
176}
177
178static void mtk_dump_reg(void *_eth, char *name, u32 offset, u32 range)
179{
180 struct mtk_eth *eth = _eth;
181 u32 cur = offset;
182
183 pr_info("\n============ %s ============\n", name);
184 while(cur < offset + range) {
185 pr_info("0x%x: %08x %08x %08x %08x\n",
186 cur, mtk_r32(eth, cur), mtk_r32(eth, cur + 0x4),
187 mtk_r32(eth, cur + 0x8), mtk_r32(eth, cur + 0xc));
188 cur += 0x10;
189 }
190}
191
192void mtk_dump_netsys_info(void *_eth)
193{
194 struct mtk_eth *eth = _eth;
developer797e46c2022-07-29 12:05:32 +0800195 u32 id = 0;
developer8051e042022-04-08 13:26:36 +0800196
197 mtk_dump_reg(eth, "FE", 0x0, 0x500);
198 mtk_dump_reg(eth, "ADMA", PDMA_BASE, 0x300);
developer797e46c2022-07-29 12:05:32 +0800199 for (id = 0; id < MTK_QDMA_PAGE_NUM; id++){
200 mtk_w32(eth, id, MTK_QDMA_PAGE);
201 pr_info("\nQDMA PAGE:%x ",mtk_r32(eth, MTK_QDMA_PAGE));
202 mtk_dump_reg(eth, "QDMA", QDMA_BASE, 0x100);
203 mtk_w32(eth, 0, MTK_QDMA_PAGE);
204 }
205 mtk_dump_reg(eth, "QDMA", MTK_QRX_BASE_PTR0, 0x300);
developer8051e042022-04-08 13:26:36 +0800206 mtk_dump_reg(eth, "WDMA", WDMA_BASE(0), 0x600);
207 mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
208 mtk_dump_reg(eth, "GMAC", 0x10000, 0x300);
developer31d10662023-02-09 16:56:34 +0800209 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
210 mtk_dump_reg(eth, "XGMAC0", 0x12000, 0x300);
211 mtk_dump_reg(eth, "XGMAC1", 0x13000, 0x300);
developer4e8a3fd2023-04-10 18:05:44 +0800212 mtk_dump_usxgmii(eth->usxgmii->pcs[0].regmap,
developer31d10662023-02-09 16:56:34 +0800213 "USXGMII0", 0, 0x1000);
developer4e8a3fd2023-04-10 18:05:44 +0800214 mtk_dump_usxgmii(eth->usxgmii->pcs[1].regmap,
developer31d10662023-02-09 16:56:34 +0800215 "USXGMII1", 0, 0x1000);
216 }
developer8051e042022-04-08 13:26:36 +0800217}
218
developer37482a42022-12-26 13:31:13 +0800219u32 mtk_monitor_wdma_tx(struct mtk_eth *eth)
developer8051e042022-04-08 13:26:36 +0800220{
developer37482a42022-12-26 13:31:13 +0800221 static u32 pre_dtx[MTK_WDMA_CNT];
222 static u32 err_cnt[MTK_WDMA_CNT];
223 u32 i = 0, cur_dtx = 0, tx_busy = 0, tx_rdy = 0, err_flag = 0;
224 u32 dbg_mon = 0;
developer797e46c2022-07-29 12:05:32 +0800225
developer37482a42022-12-26 13:31:13 +0800226 for (i = 0; i < MTK_WDMA_CNT; i++) {
227 cur_dtx = mtk_r32(eth, MTK_WDMA_DTX_PTR(i));
228 tx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(i)) & MTK_TX_DMA_BUSY;
229 dbg_mon = mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(i));
230 tx_rdy = !(dbg_mon & MTK_CDM_TXFIFO_RDY);
231 if (cur_dtx == pre_dtx[i] && tx_busy && tx_rdy) {
232 err_cnt[i]++;
233 if (err_cnt[i] >= 3) {
234 pr_info("WDMA %d Info\n", i);
235 pr_info("err_cnt = %d", err_cnt[i]);
236 pr_info("prev_dtx = 0x%x | cur_dtx = 0x%x\n",
237 pre_dtx[i], cur_dtx);
238 pr_info("WDMA_CTX_PTR = 0x%x\n",
239 mtk_r32(eth, MTK_WDMA_CTX_PTR(i)));
240 pr_info("WDMA_DTX_PTR = 0x%x\n",
241 mtk_r32(eth, MTK_WDMA_DTX_PTR(i)));
242 pr_info("WDMA_GLO_CFG = 0x%x\n",
243 mtk_r32(eth, MTK_WDMA_GLO_CFG(i)));
244 pr_info("WDMA_TX_DBG_MON0 = 0x%x\n",
245 mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(i)));
246 pr_info("==============================\n");
247 err_flag = 1;
248 }
249 } else
250 err_cnt[i] = 0;
251 pre_dtx[i] = cur_dtx;
252 }
developer8051e042022-04-08 13:26:36 +0800253
developer37482a42022-12-26 13:31:13 +0800254 if (err_flag)
255 return MTK_FE_START_RESET;
256 else
257 return 0;
258}
developer8051e042022-04-08 13:26:36 +0800259
developer37482a42022-12-26 13:31:13 +0800260u32 mtk_monitor_wdma_rx(struct mtk_eth *eth)
261{
262 static u32 pre_drx[MTK_WDMA_CNT];
263 static u32 pre_opq[MTK_WDMA_CNT];
264 static u32 err_cnt[MTK_WDMA_CNT];
265 u32 i = 0, cur_drx = 0, rx_busy = 0, err_flag = 0;
266 u32 cur_opq = 0;
267
268 for (i = 0; i < MTK_WDMA_CNT; i++) {
269 cur_drx = mtk_r32(eth, MTK_WDMA_DRX_PTR(i));
270 rx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(i)) & MTK_RX_DMA_BUSY;
271 if (i == 0)
272 cur_opq = (mtk_r32(eth, MTK_PSE_OQ_STA(5)) & 0x1FF);
273 else if (i == 1)
274 cur_opq = (mtk_r32(eth, MTK_PSE_OQ_STA(5)) & 0x1FF0000);
275 else
276 cur_opq = (mtk_r32(eth, MTK_PSE_OQ_STA(7)) & 0x1FF0000);
277
278 if (cur_drx == pre_drx[i] && rx_busy && cur_opq != 0 &&
279 cur_opq == pre_opq[i]) {
280 err_cnt[i]++;
281 if (err_cnt[i] >= 3) {
282 pr_info("WDMA %d Info\n", i);
283 pr_info("err_cnt = %d", err_cnt[i]);
284 pr_info("prev_drx = 0x%x | cur_drx = 0x%x\n",
285 pre_drx[i], cur_drx);
286 pr_info("WDMA_CRX_PTR = 0x%x\n",
287 mtk_r32(eth, MTK_WDMA_CRX_PTR(i)));
288 pr_info("WDMA_DRX_PTR = 0x%x\n",
289 mtk_r32(eth, MTK_WDMA_DRX_PTR(i)));
290 pr_info("WDMA_GLO_CFG = 0x%x\n",
291 mtk_r32(eth, MTK_WDMA_GLO_CFG(i)));
292 pr_info("==============================\n");
293 err_flag = 1;
developer8051e042022-04-08 13:26:36 +0800294 }
developer37482a42022-12-26 13:31:13 +0800295 } else
296 err_cnt[i] = 0;
297 pre_drx[i] = cur_drx;
298 pre_opq[i] = cur_opq;
299 }
300
301 if (err_flag)
302 return MTK_FE_START_RESET;
303 else
304 return 0;
305}
306
307u32 mtk_monitor_rx_fc(struct mtk_eth *eth)
308{
309 u32 i = 0, mib_base = 0, gdm_fc = 0;
310
311 for (i = 0; i < MTK_MAC_COUNT; i++) {
312 mib_base = MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET*i;
313 gdm_fc = mtk_r32(eth, mib_base);
314 if (gdm_fc < 1)
315 return 1;
316 }
317 return 0;
318}
319
320u32 mtk_monitor_qdma_tx(struct mtk_eth *eth)
321{
322 static u32 err_cnt_qtx;
323 u32 err_flag = 0;
324 u32 i = 0, is_rx_fc = 0;
325
326 u32 is_qfsm_hang = (mtk_r32(eth, MTK_QDMA_FSM) & 0xF00) != 0;
327 u32 is_qfwd_hang = mtk_r32(eth, MTK_QDMA_FWD_CNT) == 0;
328
329 is_rx_fc = mtk_monitor_rx_fc(eth);
330 if (is_qfsm_hang && is_qfwd_hang && is_rx_fc) {
331 err_cnt_qtx++;
332 if (err_cnt_qtx >= 3) {
developer797e46c2022-07-29 12:05:32 +0800333 pr_info("QDMA Tx Info\n");
developer37482a42022-12-26 13:31:13 +0800334 pr_info("err_cnt = %d", err_cnt_qtx);
developer8051e042022-04-08 13:26:36 +0800335 pr_info("is_qfsm_hang = %d\n", is_qfsm_hang);
336 pr_info("is_qfwd_hang = %d\n", is_qfwd_hang);
developer8051e042022-04-08 13:26:36 +0800337 pr_info("-- -- -- -- -- -- --\n");
338 pr_info("MTK_QDMA_FSM = 0x%x\n",
339 mtk_r32(eth, MTK_QDMA_FSM));
340 pr_info("MTK_QDMA_FWD_CNT = 0x%x\n",
341 mtk_r32(eth, MTK_QDMA_FWD_CNT));
342 pr_info("MTK_QDMA_FQ_CNT = 0x%x\n",
343 mtk_r32(eth, MTK_QDMA_FQ_CNT));
344 pr_info("==============================\n");
developer37482a42022-12-26 13:31:13 +0800345 err_flag = 1;
developer8051e042022-04-08 13:26:36 +0800346 }
developer37482a42022-12-26 13:31:13 +0800347 } else
348 err_cnt_qtx = 0;
349
350 if (err_flag)
351 return MTK_FE_STOP_TRAFFIC;
352 else
353 return 0;
354}
355
356u32 mtk_monitor_qdma_rx(struct mtk_eth *eth)
357{
358 static u32 err_cnt_qrx;
359 static u32 pre_fq_head, pre_fq_tail;
360 u32 err_flag = 0;
361
362 u32 qrx_fsm = (mtk_r32(eth, MTK_QDMA_FSM) & 0x1F) == 9;
363 u32 fq_head = mtk_r32(eth, MTK_QDMA_FQ_HEAD);
364 u32 fq_tail = mtk_r32(eth, MTK_QDMA_FQ_TAIL);
365
366 if (qrx_fsm && fq_head == pre_fq_head &&
367 fq_tail == pre_fq_tail) {
368 err_cnt_qrx++;
369 if (err_cnt_qrx >= 3) {
370 pr_info("QDMA Rx Info\n");
371 pr_info("err_cnt = %d", err_cnt_qrx);
372 pr_info("MTK_QDMA_FSM = %d\n",
373 mtk_r32(eth, MTK_QDMA_FSM));
374 pr_info("FQ_HEAD = 0x%x\n",
375 mtk_r32(eth, MTK_QDMA_FQ_HEAD));
376 pr_info("FQ_TAIL = 0x%x\n",
377 mtk_r32(eth, MTK_QDMA_FQ_TAIL));
378 err_flag = 1;
379 } else
380 err_cnt_qrx = 0;
381 }
382 pre_fq_head = fq_head;
383 pre_fq_tail = fq_tail;
384
385 if (err_flag)
386 return MTK_FE_STOP_TRAFFIC;
387 else
388 return 0;
389}
390
391
392u32 mtk_monitor_adma_rx(struct mtk_eth *eth)
393{
394 static u32 err_cnt_arx;
395 u32 err_flag = 0;
396 u32 opq0 = (mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x1FF) != 0;
397 u32 cdm1_fsm = (mtk_r32(eth, MTK_FE_CDM1_FSM) & 0xFFFF0000) != 0;
398 u32 cur_stat = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x1F) == 0);
399 u32 fifo_rdy = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x40) == 0);
400
401 if (opq0 && cdm1_fsm && cur_stat && fifo_rdy) {
402 err_cnt_arx++;
403 if (err_cnt_arx >= 3) {
developer797e46c2022-07-29 12:05:32 +0800404 pr_info("ADMA Rx Info\n");
developer37482a42022-12-26 13:31:13 +0800405 pr_info("err_cnt = %d", err_cnt_arx);
406 pr_info("CDM1_FSM = %d\n",
407 mtk_r32(eth, MTK_FE_CDM1_FSM));
developer8051e042022-04-08 13:26:36 +0800408 pr_info("MTK_PSE_OQ_STA1 = 0x%x\n",
409 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
410 pr_info("MTK_ADMA_RX_DBG0 = 0x%x\n",
411 mtk_r32(eth, MTK_ADMA_RX_DBG0));
412 pr_info("MTK_ADMA_RX_DBG1 = 0x%x\n",
413 mtk_r32(eth, MTK_ADMA_RX_DBG1));
414 pr_info("==============================\n");
developer37482a42022-12-26 13:31:13 +0800415 err_flag = 1;
416 }
417 } else
418 err_cnt_arx = 0;
419
420 if (err_flag)
421 return MTK_FE_STOP_TRAFFIC;
422 else
423 return 0;
424}
425
426u32 mtk_monitor_tdma_tx(struct mtk_eth *eth)
427{
428 static u32 err_cnt_ttx;
429 static u32 pre_fsm;
430 u32 err_flag = 0;
431 u32 cur_fsm = 0;
432 u32 tx_busy = 0;
433
434 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
435 cur_fsm = (mtk_r32(eth, MTK_FE_CDM6_FSM) & 0x1FFF) != 0;
436 tx_busy = ((mtk_r32(eth, MTK_TDMA_GLO_CFG) & 0x2) != 0);
437
438 if (cur_fsm == pre_fsm && cur_fsm != 0 && tx_busy) {
439 err_cnt_ttx++;
440 if (err_cnt_ttx >= 3) {
441 pr_info("TDMA Tx Info\n");
442 pr_info("err_cnt = %d", err_cnt_ttx);
443 pr_info("CDM6_FSM = %d\n",
444 mtk_r32(eth, MTK_FE_CDM6_FSM));
445 pr_info("DMA CFG = 0x%x\n",
446 mtk_r32(eth, MTK_TDMA_GLO_CFG));
447 pr_info("==============================\n");
448 err_flag = 1;
449 }
450 } else
451 err_cnt_ttx = 0;
452
453 pre_fsm = cur_fsm;
454 }
455
456 if (err_flag)
457 return MTK_FE_STOP_TRAFFIC;
458 else
459 return 0;
460}
461
462u32 mtk_monitor_tdma_rx(struct mtk_eth *eth)
463{
464 static u32 err_cnt_trx;
465 static u32 pre_fsm;
466 u32 err_flag = 0;
467 u32 cur_fsm = 0;
468 u32 rx_busy = 0;
469
470 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
471 cur_fsm = (mtk_r32(eth, MTK_FE_CDM6_FSM) & 0xFFF0000) != 0;
472 rx_busy = ((mtk_r32(eth, MTK_TDMA_GLO_CFG) & 0x8) != 0);
473
474 if (cur_fsm == pre_fsm && cur_fsm != 0 && rx_busy) {
475 err_cnt_trx++;
476 if (err_cnt_trx >= 3) {
477 pr_info("TDMA Rx Info\n");
478 pr_info("err_cnt = %d", err_cnt_trx);
479 pr_info("CDM6_FSM = %d\n",
480 mtk_r32(eth, MTK_FE_CDM6_FSM));
481 pr_info("DMA CFG = 0x%x\n",
482 mtk_r32(eth, MTK_TDMA_GLO_CFG));
483 pr_info("==============================\n");
484 err_flag = 1;
485 }
486 } else
487 err_cnt_trx = 0;
488
489 pre_fsm = cur_fsm;
490 }
491
492 if (err_flag)
493 return MTK_FE_STOP_TRAFFIC;
494 else
495 return 0;
496}
497
498static const mtk_monitor_xdma_func mtk_reset_monitor_func[] = {
499 [0] = mtk_monitor_wdma_tx,
500 [1] = mtk_monitor_wdma_rx,
501 [2] = mtk_monitor_qdma_tx,
502 [3] = mtk_monitor_qdma_rx,
503 [4] = mtk_monitor_adma_rx,
504 [5] = mtk_monitor_tdma_tx,
505 [6] = mtk_monitor_tdma_rx,
506};
507
508void mtk_dma_monitor(struct timer_list *t)
509{
510 struct mtk_eth *eth = from_timer(eth, t, mtk_dma_monitor_timer);
511 u32 i = 0, ret = 0;
512
513 for (i = 0; i < 6; i++) {
514 ret = (*mtk_reset_monitor_func[i]) (eth);
515 if ((ret == MTK_FE_START_RESET) ||
516 (ret == MTK_FE_STOP_TRAFFIC)) {
developer8051e042022-04-08 13:26:36 +0800517 if ((atomic_read(&reset_lock) == 0) &&
developer0baa6962023-01-31 14:25:23 +0800518 (atomic_read(&force) == 1)) {
developer37482a42022-12-26 13:31:13 +0800519 mtk_reset_flag = ret;
developer8051e042022-04-08 13:26:36 +0800520 schedule_work(&eth->pending_work);
521 }
developer37482a42022-12-26 13:31:13 +0800522 break;
developer8051e042022-04-08 13:26:36 +0800523 }
developer8051e042022-04-08 13:26:36 +0800524 }
525
developer8051e042022-04-08 13:26:36 +0800526 mod_timer(&eth->mtk_dma_monitor_timer, jiffies + 1 * HZ);
527}
528
529void mtk_prepare_reset_fe(struct mtk_eth *eth)
530{
developer37482a42022-12-26 13:31:13 +0800531 u32 i = 0, val = 0, mcr = 0;
developer8051e042022-04-08 13:26:36 +0800532
533 /* Disable NETSYS Interrupt */
534 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
535 mtk_w32(eth, 0, MTK_PDMA_INT_MASK);
536 mtk_w32(eth, 0, MTK_QDMA_INT_MASK);
537
538 /* Disable Linux netif Tx path */
539 for (i = 0; i < MTK_MAC_COUNT; i++) {
540 if (!eth->netdev[i])
541 continue;
542 netif_tx_disable(eth->netdev[i]);
543 }
544
545 /* Disable QDMA Tx */
546 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
547 mtk_w32(eth, val & ~(MTK_TX_DMA_EN), MTK_QDMA_GLO_CFG);
548
developer37482a42022-12-26 13:31:13 +0800549 for (i = 0; i < MTK_MAC_COUNT; i++) {
550 pr_info("[%s] i:%d type:%d id:%d\n",
551 __func__, i, eth->mac[i]->type, eth->mac[i]->id);
552 if (eth->mac[i]->type == MTK_XGDM_TYPE &&
553 eth->mac[i]->id != MTK_GMAC1_ID) {
554 mcr = mtk_r32(eth, MTK_XMAC_MCR(eth->mac[i]->id));
555 mcr &= 0xfffffff0;
556 mcr |= XMAC_MCR_TRX_DISABLE;
557 pr_info("disable XMAC TX/RX\n");
558 mtk_w32(eth, mcr, MTK_XMAC_MCR(eth->mac[i]->id));
559 }
developer793f7b42022-05-20 13:54:51 +0800560
developer37482a42022-12-26 13:31:13 +0800561 if (eth->mac[i]->type == MTK_GDM_TYPE) {
562 mcr = mtk_r32(eth, MTK_MAC_MCR(eth->mac[i]->id));
563 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
564 mtk_w32(eth, mcr, MTK_MAC_MCR(eth->mac[i]->id));
565 pr_info("disable GMAC TX/RX\n");
566 }
developer793f7b42022-05-20 13:54:51 +0800567 }
developer8051e042022-04-08 13:26:36 +0800568
developer8051e042022-04-08 13:26:36 +0800569 /* Enable GDM drop */
developerdca0fde2022-12-14 11:40:35 +0800570 for (i = 0; i < MTK_MAC_COUNT; i++)
571 mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
developer8051e042022-04-08 13:26:36 +0800572
573 /* Disable ADMA Rx */
574 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
575 mtk_w32(eth, val & ~(MTK_RX_DMA_EN), MTK_PDMA_GLO_CFG);
576}
577
578void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id)
579{
580 u32 i = 0, poll_time = 5000, val;
581
582 /* Disable KA */
583 mtk_m32(eth, MTK_PPE_KA_CFG_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
584 mtk_m32(eth, MTK_PPE_NTU_KA_MASK, 0, MTK_PPE_BIND_LMT_1(ppe_id));
585 mtk_w32(eth, 0, MTK_PPE_KA(ppe_id));
586 mdelay(10);
587
588 /* Set KA timer to maximum */
589 mtk_m32(eth, MTK_PPE_NTU_KA_MASK, (0xFF << 16), MTK_PPE_BIND_LMT_1(ppe_id));
590 mtk_w32(eth, 0xFFFFFFFF, MTK_PPE_KA(ppe_id));
591
592 /* Set KA tick select */
593 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, (0x1 << 24), MTK_PPE_TB_CFG(ppe_id));
594 mtk_m32(eth, MTK_PPE_KA_CFG_MASK, (0x3 << 12), MTK_PPE_TB_CFG(ppe_id));
595 mdelay(10);
596
597 /* Disable scan mode */
598 mtk_m32(eth, MTK_PPE_SCAN_MODE_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
599 mdelay(10);
600
601 /* Check PPE idle */
602 while (i++ < poll_time) {
603 val = mtk_r32(eth, MTK_PPE_GLO_CFG(ppe_id));
604 if (!(val & MTK_PPE_BUSY))
605 break;
606 mdelay(1);
607 }
608
609 if (i >= poll_time) {
610 pr_info("[%s] PPE keeps busy !\n", __func__);
611 mtk_dump_reg(eth, "FE", 0x0, 0x500);
612 mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
613 }
614}
615
616static int mtk_eth_netdevice_event(struct notifier_block *unused,
617 unsigned long event, void *ptr)
618{
619 switch (event) {
620 case MTK_WIFI_RESET_DONE:
developer37482a42022-12-26 13:31:13 +0800621 case MTK_FE_STOP_TRAFFIC_DONE:
developerbe971722022-05-23 13:51:05 +0800622 mtk_rest_cnt--;
623 if(!mtk_rest_cnt) {
624 complete(&wait_ser_done);
625 mtk_rest_cnt = mtk_wifi_num;
626 }
627 break;
628 case MTK_WIFI_CHIP_ONLINE:
629 mtk_wifi_num++;
630 mtk_rest_cnt = mtk_wifi_num;
631 break;
632 case MTK_WIFI_CHIP_OFFLINE:
633 mtk_wifi_num--;
634 mtk_rest_cnt = mtk_wifi_num;
developer8051e042022-04-08 13:26:36 +0800635 break;
636 default:
637 break;
638 }
639
640 return NOTIFY_DONE;
641}
642
643struct notifier_block mtk_eth_netdevice_nb __read_mostly = {
644 .notifier_call = mtk_eth_netdevice_event,
645};