blob: 02fd90bec023cfe83d841d0a747d8a9bacdd2146 [file] [log] [blame]
developer8051e042022-04-08 13:26:36 +08001/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright (c) 2022 MediaTek Inc.
4 * Author: Henry Yen <henry.yen@mediatek.com>
5 */
6
7#include <linux/regmap.h>
8#include "mtk_eth_soc.h"
9#include "mtk_eth_dbg.h"
10#include "mtk_eth_reset.h"
11
12char* mtk_reset_event_name[32] = {
13 [MTK_EVENT_FORCE] = "Force",
14 [MTK_EVENT_WARM_CNT] = "Warm",
15 [MTK_EVENT_COLD_CNT] = "Cold",
16 [MTK_EVENT_TOTAL_CNT] = "Total",
17 [MTK_EVENT_FQ_EMPTY] = "FQ Empty",
18 [MTK_EVENT_TSO_FAIL] = "TSO Fail",
19 [MTK_EVENT_TSO_ILLEGAL] = "TSO Illegal",
20 [MTK_EVENT_TSO_ALIGN] = "TSO Align",
21 [MTK_EVENT_RFIFO_OV] = "RFIFO OV",
22 [MTK_EVENT_RFIFO_UF] = "RFIFO UF",
23};
24
developerbe971722022-05-23 13:51:05 +080025static int mtk_wifi_num = 0;
26static int mtk_rest_cnt = 0;
developer37482a42022-12-26 13:31:13 +080027u32 mtk_reset_flag = MTK_FE_START_RESET;
28typedef u32 (*mtk_monitor_xdma_func) (struct mtk_eth *eth);
developerbe971722022-05-23 13:51:05 +080029
developer8051e042022-04-08 13:26:36 +080030void mtk_reset_event_update(struct mtk_eth *eth, u32 id)
31{
32 struct mtk_reset_event *reset_event = &eth->reset_event;
33 reset_event->count[id]++;
34}
35
36int mtk_eth_cold_reset(struct mtk_eth *eth)
37{
developer089e8852022-09-28 14:43:46 +080038 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
39 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer8051e042022-04-08 13:26:36 +080040 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
41
42 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
43 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0 | RSTCTRL_PPE1);
44 else
45 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0);
46
developer089e8852022-09-28 14:43:46 +080047 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
48 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer8051e042022-04-08 13:26:36 +080049 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff);
50
51 return 0;
52}
53
54int mtk_eth_warm_reset(struct mtk_eth *eth)
55{
56 u32 reset_bits = 0, i = 0, done = 0;
57 u32 val1 = 0, val2 = 0, val3 = 0;
58
59 mdelay(100);
60
61 reset_bits |= RSTCTRL_FE;
62 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
63 reset_bits, reset_bits);
64
65 while (i < 1000) {
66 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val1);
67 if (val1 & RSTCTRL_FE)
68 break;
69 i++;
70 udelay(1);
71 }
72
73 if (i < 1000) {
developer37482a42022-12-26 13:31:13 +080074 reset_bits = RSTCTRL_ETH | RSTCTRL_PPE0;
developer8051e042022-04-08 13:26:36 +080075 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
developer37482a42022-12-26 13:31:13 +080076 reset_bits |= RSTCTRL_PPE1;
77#if defined(CONFIG_MEDIATEK_NETSYS_V3)
78 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
79 reset_bits |= RSTCTRL_PPE2;
80 if (mtk_reset_flag == MTK_FE_START_RESET)
81 reset_bits |= RSTCTRL_WDMA0 |
82 RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
83#endif
developer8051e042022-04-08 13:26:36 +080084
85 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
86 reset_bits, reset_bits);
87
88 udelay(1);
89 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val2);
90 if (!(val2 & reset_bits))
91 pr_info("[%s] error val2=0x%x reset_bits=0x%x !\n",
92 __func__, val2, reset_bits);
93 reset_bits |= RSTCTRL_FE;
94 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
95 reset_bits, ~reset_bits);
96
97 udelay(1);
98 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val3);
99 if (val3 & reset_bits)
100 pr_info("[%s] error val3=0x%x reset_bits=0x%x !\n",
101 __func__, val3, reset_bits);
102 done = 1;
103 mtk_reset_event_update(eth, MTK_EVENT_WARM_CNT);
104 }
105
106 pr_info("[%s] reset record val1=0x%x, val2=0x%x, val3=0x%x !\n",
developera7ee5fe2022-04-21 17:45:57 +0800107 __func__, val1, val2, val3);
developer8051e042022-04-08 13:26:36 +0800108
109 if (!done)
110 mtk_eth_cold_reset(eth);
111
112 return 0;
113}
114
115u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status)
116{
117 u32 ret = 0, val = 0;
118
119 if ((status & MTK_FE_INT_FQ_EMPTY) ||
120 (status & MTK_FE_INT_RFIFO_UF) ||
121 (status & MTK_FE_INT_RFIFO_OV) ||
122 (status & MTK_FE_INT_TSO_FAIL) ||
123 (status & MTK_FE_INT_TSO_ALIGN) ||
124 (status & MTK_FE_INT_TSO_ILLEGAL)) {
125 while (status) {
126 val = ffs((unsigned int)status) - 1;
127 mtk_reset_event_update(eth, val);
128 status &= ~(1 << val);
129 }
130 ret = 1;
131 }
132
133 if (atomic_read(&force)) {
134 mtk_reset_event_update(eth, MTK_EVENT_FORCE);
135 ret = 1;
136 }
137
138 if (ret) {
139 mtk_reset_event_update(eth, MTK_EVENT_TOTAL_CNT);
140 mtk_dump_netsys_info(eth);
141 }
142
143 return ret;
144}
145
146irqreturn_t mtk_handle_fe_irq(int irq, void *_eth)
147{
148 struct mtk_eth *eth = _eth;
149 u32 status = 0, val = 0;
150
151 status = mtk_r32(eth, MTK_FE_INT_STATUS);
152 pr_info("[%s] Trigger FE Misc ISR: 0x%x\n", __func__, status);
153
154 while (status) {
155 val = ffs((unsigned int)status) - 1;
156 status &= ~(1 << val);
developera7ee5fe2022-04-21 17:45:57 +0800157
developerbe971722022-05-23 13:51:05 +0800158 if ((val == MTK_EVENT_TSO_FAIL) ||
developera7ee5fe2022-04-21 17:45:57 +0800159 (val == MTK_EVENT_TSO_ILLEGAL) ||
160 (val == MTK_EVENT_TSO_ALIGN) ||
161 (val == MTK_EVENT_RFIFO_OV) ||
162 (val == MTK_EVENT_RFIFO_UF))
163 pr_info("[%s] Detect reset event: %s !\n", __func__,
164 mtk_reset_event_name[val]);
developer8051e042022-04-08 13:26:36 +0800165 }
developera7ee5fe2022-04-21 17:45:57 +0800166 mtk_w32(eth, 0xFFFFFFFF, MTK_FE_INT_STATUS);
developer8051e042022-04-08 13:26:36 +0800167
168 return IRQ_HANDLED;
169}
170
171static void mtk_dump_reg(void *_eth, char *name, u32 offset, u32 range)
172{
173 struct mtk_eth *eth = _eth;
174 u32 cur = offset;
175
176 pr_info("\n============ %s ============\n", name);
177 while(cur < offset + range) {
178 pr_info("0x%x: %08x %08x %08x %08x\n",
179 cur, mtk_r32(eth, cur), mtk_r32(eth, cur + 0x4),
180 mtk_r32(eth, cur + 0x8), mtk_r32(eth, cur + 0xc));
181 cur += 0x10;
182 }
183}
184
185void mtk_dump_netsys_info(void *_eth)
186{
187 struct mtk_eth *eth = _eth;
developer797e46c2022-07-29 12:05:32 +0800188 u32 id = 0;
developer8051e042022-04-08 13:26:36 +0800189
190 mtk_dump_reg(eth, "FE", 0x0, 0x500);
191 mtk_dump_reg(eth, "ADMA", PDMA_BASE, 0x300);
developer797e46c2022-07-29 12:05:32 +0800192 for (id = 0; id < MTK_QDMA_PAGE_NUM; id++){
193 mtk_w32(eth, id, MTK_QDMA_PAGE);
194 pr_info("\nQDMA PAGE:%x ",mtk_r32(eth, MTK_QDMA_PAGE));
195 mtk_dump_reg(eth, "QDMA", QDMA_BASE, 0x100);
196 mtk_w32(eth, 0, MTK_QDMA_PAGE);
197 }
198 mtk_dump_reg(eth, "QDMA", MTK_QRX_BASE_PTR0, 0x300);
developer8051e042022-04-08 13:26:36 +0800199 mtk_dump_reg(eth, "WDMA", WDMA_BASE(0), 0x600);
200 mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
201 mtk_dump_reg(eth, "GMAC", 0x10000, 0x300);
202}
203
developer37482a42022-12-26 13:31:13 +0800204u32 mtk_monitor_wdma_tx(struct mtk_eth *eth)
developer8051e042022-04-08 13:26:36 +0800205{
developer37482a42022-12-26 13:31:13 +0800206 static u32 pre_dtx[MTK_WDMA_CNT];
207 static u32 err_cnt[MTK_WDMA_CNT];
208 u32 i = 0, cur_dtx = 0, tx_busy = 0, tx_rdy = 0, err_flag = 0;
209 u32 dbg_mon = 0;
developer797e46c2022-07-29 12:05:32 +0800210
developer37482a42022-12-26 13:31:13 +0800211 for (i = 0; i < MTK_WDMA_CNT; i++) {
212 cur_dtx = mtk_r32(eth, MTK_WDMA_DTX_PTR(i));
213 tx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(i)) & MTK_TX_DMA_BUSY;
214 dbg_mon = mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(i));
215 tx_rdy = !(dbg_mon & MTK_CDM_TXFIFO_RDY);
216 if (cur_dtx == pre_dtx[i] && tx_busy && tx_rdy) {
217 err_cnt[i]++;
218 if (err_cnt[i] >= 3) {
219 pr_info("WDMA %d Info\n", i);
220 pr_info("err_cnt = %d", err_cnt[i]);
221 pr_info("prev_dtx = 0x%x | cur_dtx = 0x%x\n",
222 pre_dtx[i], cur_dtx);
223 pr_info("WDMA_CTX_PTR = 0x%x\n",
224 mtk_r32(eth, MTK_WDMA_CTX_PTR(i)));
225 pr_info("WDMA_DTX_PTR = 0x%x\n",
226 mtk_r32(eth, MTK_WDMA_DTX_PTR(i)));
227 pr_info("WDMA_GLO_CFG = 0x%x\n",
228 mtk_r32(eth, MTK_WDMA_GLO_CFG(i)));
229 pr_info("WDMA_TX_DBG_MON0 = 0x%x\n",
230 mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(i)));
231 pr_info("==============================\n");
232 err_flag = 1;
233 }
234 } else
235 err_cnt[i] = 0;
236 pre_dtx[i] = cur_dtx;
237 }
developer8051e042022-04-08 13:26:36 +0800238
developer37482a42022-12-26 13:31:13 +0800239 if (err_flag)
240 return MTK_FE_START_RESET;
241 else
242 return 0;
243}
developer8051e042022-04-08 13:26:36 +0800244
developer37482a42022-12-26 13:31:13 +0800245u32 mtk_monitor_wdma_rx(struct mtk_eth *eth)
246{
247 static u32 pre_drx[MTK_WDMA_CNT];
248 static u32 pre_opq[MTK_WDMA_CNT];
249 static u32 err_cnt[MTK_WDMA_CNT];
250 u32 i = 0, cur_drx = 0, rx_busy = 0, err_flag = 0;
251 u32 cur_opq = 0;
252
253 for (i = 0; i < MTK_WDMA_CNT; i++) {
254 cur_drx = mtk_r32(eth, MTK_WDMA_DRX_PTR(i));
255 rx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(i)) & MTK_RX_DMA_BUSY;
256 if (i == 0)
257 cur_opq = (mtk_r32(eth, MTK_PSE_OQ_STA(5)) & 0x1FF);
258 else if (i == 1)
259 cur_opq = (mtk_r32(eth, MTK_PSE_OQ_STA(5)) & 0x1FF0000);
260 else
261 cur_opq = (mtk_r32(eth, MTK_PSE_OQ_STA(7)) & 0x1FF0000);
262
263 if (cur_drx == pre_drx[i] && rx_busy && cur_opq != 0 &&
264 cur_opq == pre_opq[i]) {
265 err_cnt[i]++;
266 if (err_cnt[i] >= 3) {
267 pr_info("WDMA %d Info\n", i);
268 pr_info("err_cnt = %d", err_cnt[i]);
269 pr_info("prev_drx = 0x%x | cur_drx = 0x%x\n",
270 pre_drx[i], cur_drx);
271 pr_info("WDMA_CRX_PTR = 0x%x\n",
272 mtk_r32(eth, MTK_WDMA_CRX_PTR(i)));
273 pr_info("WDMA_DRX_PTR = 0x%x\n",
274 mtk_r32(eth, MTK_WDMA_DRX_PTR(i)));
275 pr_info("WDMA_GLO_CFG = 0x%x\n",
276 mtk_r32(eth, MTK_WDMA_GLO_CFG(i)));
277 pr_info("==============================\n");
278 err_flag = 1;
developer8051e042022-04-08 13:26:36 +0800279 }
developer37482a42022-12-26 13:31:13 +0800280 } else
281 err_cnt[i] = 0;
282 pre_drx[i] = cur_drx;
283 pre_opq[i] = cur_opq;
284 }
285
286 if (err_flag)
287 return MTK_FE_START_RESET;
288 else
289 return 0;
290}
291
292u32 mtk_monitor_rx_fc(struct mtk_eth *eth)
293{
294 u32 i = 0, mib_base = 0, gdm_fc = 0;
295
296 for (i = 0; i < MTK_MAC_COUNT; i++) {
297 mib_base = MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET*i;
298 gdm_fc = mtk_r32(eth, mib_base);
299 if (gdm_fc < 1)
300 return 1;
301 }
302 return 0;
303}
304
305u32 mtk_monitor_qdma_tx(struct mtk_eth *eth)
306{
307 static u32 err_cnt_qtx;
308 u32 err_flag = 0;
309 u32 i = 0, is_rx_fc = 0;
310
311 u32 is_qfsm_hang = (mtk_r32(eth, MTK_QDMA_FSM) & 0xF00) != 0;
312 u32 is_qfwd_hang = mtk_r32(eth, MTK_QDMA_FWD_CNT) == 0;
313
314 is_rx_fc = mtk_monitor_rx_fc(eth);
315 if (is_qfsm_hang && is_qfwd_hang && is_rx_fc) {
316 err_cnt_qtx++;
317 if (err_cnt_qtx >= 3) {
developer797e46c2022-07-29 12:05:32 +0800318 pr_info("QDMA Tx Info\n");
developer37482a42022-12-26 13:31:13 +0800319 pr_info("err_cnt = %d", err_cnt_qtx);
developer8051e042022-04-08 13:26:36 +0800320 pr_info("is_qfsm_hang = %d\n", is_qfsm_hang);
321 pr_info("is_qfwd_hang = %d\n", is_qfwd_hang);
developer8051e042022-04-08 13:26:36 +0800322 pr_info("-- -- -- -- -- -- --\n");
323 pr_info("MTK_QDMA_FSM = 0x%x\n",
324 mtk_r32(eth, MTK_QDMA_FSM));
325 pr_info("MTK_QDMA_FWD_CNT = 0x%x\n",
326 mtk_r32(eth, MTK_QDMA_FWD_CNT));
327 pr_info("MTK_QDMA_FQ_CNT = 0x%x\n",
328 mtk_r32(eth, MTK_QDMA_FQ_CNT));
329 pr_info("==============================\n");
developer37482a42022-12-26 13:31:13 +0800330 err_flag = 1;
developer8051e042022-04-08 13:26:36 +0800331 }
developer37482a42022-12-26 13:31:13 +0800332 } else
333 err_cnt_qtx = 0;
334
335 if (err_flag)
336 return MTK_FE_STOP_TRAFFIC;
337 else
338 return 0;
339}
340
341u32 mtk_monitor_qdma_rx(struct mtk_eth *eth)
342{
343 static u32 err_cnt_qrx;
344 static u32 pre_fq_head, pre_fq_tail;
345 u32 err_flag = 0;
346
347 u32 qrx_fsm = (mtk_r32(eth, MTK_QDMA_FSM) & 0x1F) == 9;
348 u32 fq_head = mtk_r32(eth, MTK_QDMA_FQ_HEAD);
349 u32 fq_tail = mtk_r32(eth, MTK_QDMA_FQ_TAIL);
350
351 if (qrx_fsm && fq_head == pre_fq_head &&
352 fq_tail == pre_fq_tail) {
353 err_cnt_qrx++;
354 if (err_cnt_qrx >= 3) {
355 pr_info("QDMA Rx Info\n");
356 pr_info("err_cnt = %d", err_cnt_qrx);
357 pr_info("MTK_QDMA_FSM = %d\n",
358 mtk_r32(eth, MTK_QDMA_FSM));
359 pr_info("FQ_HEAD = 0x%x\n",
360 mtk_r32(eth, MTK_QDMA_FQ_HEAD));
361 pr_info("FQ_TAIL = 0x%x\n",
362 mtk_r32(eth, MTK_QDMA_FQ_TAIL));
363 err_flag = 1;
364 } else
365 err_cnt_qrx = 0;
366 }
367 pre_fq_head = fq_head;
368 pre_fq_tail = fq_tail;
369
370 if (err_flag)
371 return MTK_FE_STOP_TRAFFIC;
372 else
373 return 0;
374}
375
376
377u32 mtk_monitor_adma_rx(struct mtk_eth *eth)
378{
379 static u32 err_cnt_arx;
380 u32 err_flag = 0;
381 u32 opq0 = (mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x1FF) != 0;
382 u32 cdm1_fsm = (mtk_r32(eth, MTK_FE_CDM1_FSM) & 0xFFFF0000) != 0;
383 u32 cur_stat = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x1F) == 0);
384 u32 fifo_rdy = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x40) == 0);
385
386 if (opq0 && cdm1_fsm && cur_stat && fifo_rdy) {
387 err_cnt_arx++;
388 if (err_cnt_arx >= 3) {
developer797e46c2022-07-29 12:05:32 +0800389 pr_info("ADMA Rx Info\n");
developer37482a42022-12-26 13:31:13 +0800390 pr_info("err_cnt = %d", err_cnt_arx);
391 pr_info("CDM1_FSM = %d\n",
392 mtk_r32(eth, MTK_FE_CDM1_FSM));
developer8051e042022-04-08 13:26:36 +0800393 pr_info("MTK_PSE_OQ_STA1 = 0x%x\n",
394 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
395 pr_info("MTK_ADMA_RX_DBG0 = 0x%x\n",
396 mtk_r32(eth, MTK_ADMA_RX_DBG0));
397 pr_info("MTK_ADMA_RX_DBG1 = 0x%x\n",
398 mtk_r32(eth, MTK_ADMA_RX_DBG1));
399 pr_info("==============================\n");
developer37482a42022-12-26 13:31:13 +0800400 err_flag = 1;
401 }
402 } else
403 err_cnt_arx = 0;
404
405 if (err_flag)
406 return MTK_FE_STOP_TRAFFIC;
407 else
408 return 0;
409}
410
411u32 mtk_monitor_tdma_tx(struct mtk_eth *eth)
412{
413 static u32 err_cnt_ttx;
414 static u32 pre_fsm;
415 u32 err_flag = 0;
416 u32 cur_fsm = 0;
417 u32 tx_busy = 0;
418
419 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
420 cur_fsm = (mtk_r32(eth, MTK_FE_CDM6_FSM) & 0x1FFF) != 0;
421 tx_busy = ((mtk_r32(eth, MTK_TDMA_GLO_CFG) & 0x2) != 0);
422
423 if (cur_fsm == pre_fsm && cur_fsm != 0 && tx_busy) {
424 err_cnt_ttx++;
425 if (err_cnt_ttx >= 3) {
426 pr_info("TDMA Tx Info\n");
427 pr_info("err_cnt = %d", err_cnt_ttx);
428 pr_info("CDM6_FSM = %d\n",
429 mtk_r32(eth, MTK_FE_CDM6_FSM));
430 pr_info("DMA CFG = 0x%x\n",
431 mtk_r32(eth, MTK_TDMA_GLO_CFG));
432 pr_info("==============================\n");
433 err_flag = 1;
434 }
435 } else
436 err_cnt_ttx = 0;
437
438 pre_fsm = cur_fsm;
439 }
440
441 if (err_flag)
442 return MTK_FE_STOP_TRAFFIC;
443 else
444 return 0;
445}
446
447u32 mtk_monitor_tdma_rx(struct mtk_eth *eth)
448{
449 static u32 err_cnt_trx;
450 static u32 pre_fsm;
451 u32 err_flag = 0;
452 u32 cur_fsm = 0;
453 u32 rx_busy = 0;
454
455 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
456 cur_fsm = (mtk_r32(eth, MTK_FE_CDM6_FSM) & 0xFFF0000) != 0;
457 rx_busy = ((mtk_r32(eth, MTK_TDMA_GLO_CFG) & 0x8) != 0);
458
459 if (cur_fsm == pre_fsm && cur_fsm != 0 && rx_busy) {
460 err_cnt_trx++;
461 if (err_cnt_trx >= 3) {
462 pr_info("TDMA Rx Info\n");
463 pr_info("err_cnt = %d", err_cnt_trx);
464 pr_info("CDM6_FSM = %d\n",
465 mtk_r32(eth, MTK_FE_CDM6_FSM));
466 pr_info("DMA CFG = 0x%x\n",
467 mtk_r32(eth, MTK_TDMA_GLO_CFG));
468 pr_info("==============================\n");
469 err_flag = 1;
470 }
471 } else
472 err_cnt_trx = 0;
473
474 pre_fsm = cur_fsm;
475 }
476
477 if (err_flag)
478 return MTK_FE_STOP_TRAFFIC;
479 else
480 return 0;
481}
482
483static const mtk_monitor_xdma_func mtk_reset_monitor_func[] = {
484 [0] = mtk_monitor_wdma_tx,
485 [1] = mtk_monitor_wdma_rx,
486 [2] = mtk_monitor_qdma_tx,
487 [3] = mtk_monitor_qdma_rx,
488 [4] = mtk_monitor_adma_rx,
489 [5] = mtk_monitor_tdma_tx,
490 [6] = mtk_monitor_tdma_rx,
491};
492
493void mtk_dma_monitor(struct timer_list *t)
494{
495 struct mtk_eth *eth = from_timer(eth, t, mtk_dma_monitor_timer);
496 u32 i = 0, ret = 0;
497
498 for (i = 0; i < 6; i++) {
499 ret = (*mtk_reset_monitor_func[i]) (eth);
500 if ((ret == MTK_FE_START_RESET) ||
501 (ret == MTK_FE_STOP_TRAFFIC)) {
developer8051e042022-04-08 13:26:36 +0800502 if ((atomic_read(&reset_lock) == 0) &&
developer37482a42022-12-26 13:31:13 +0800503 (atomic_read(&force) == 0)) {
developer8051e042022-04-08 13:26:36 +0800504 atomic_inc(&force);
developer37482a42022-12-26 13:31:13 +0800505 mtk_reset_flag = ret;
developer8051e042022-04-08 13:26:36 +0800506 schedule_work(&eth->pending_work);
507 }
developer37482a42022-12-26 13:31:13 +0800508 break;
developer8051e042022-04-08 13:26:36 +0800509 }
developer8051e042022-04-08 13:26:36 +0800510 }
511
developer8051e042022-04-08 13:26:36 +0800512 mod_timer(&eth->mtk_dma_monitor_timer, jiffies + 1 * HZ);
513}
514
515void mtk_prepare_reset_fe(struct mtk_eth *eth)
516{
developer37482a42022-12-26 13:31:13 +0800517 u32 i = 0, val = 0, mcr = 0;
developer8051e042022-04-08 13:26:36 +0800518
519 /* Disable NETSYS Interrupt */
520 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
521 mtk_w32(eth, 0, MTK_PDMA_INT_MASK);
522 mtk_w32(eth, 0, MTK_QDMA_INT_MASK);
523
524 /* Disable Linux netif Tx path */
525 for (i = 0; i < MTK_MAC_COUNT; i++) {
526 if (!eth->netdev[i])
527 continue;
528 netif_tx_disable(eth->netdev[i]);
529 }
530
531 /* Disable QDMA Tx */
532 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
533 mtk_w32(eth, val & ~(MTK_TX_DMA_EN), MTK_QDMA_GLO_CFG);
534
developer37482a42022-12-26 13:31:13 +0800535 for (i = 0; i < MTK_MAC_COUNT; i++) {
536 pr_info("[%s] i:%d type:%d id:%d\n",
537 __func__, i, eth->mac[i]->type, eth->mac[i]->id);
538 if (eth->mac[i]->type == MTK_XGDM_TYPE &&
539 eth->mac[i]->id != MTK_GMAC1_ID) {
540 mcr = mtk_r32(eth, MTK_XMAC_MCR(eth->mac[i]->id));
541 mcr &= 0xfffffff0;
542 mcr |= XMAC_MCR_TRX_DISABLE;
543 pr_info("disable XMAC TX/RX\n");
544 mtk_w32(eth, mcr, MTK_XMAC_MCR(eth->mac[i]->id));
545 }
developer793f7b42022-05-20 13:54:51 +0800546
developer37482a42022-12-26 13:31:13 +0800547 if (eth->mac[i]->type == MTK_GDM_TYPE) {
548 mcr = mtk_r32(eth, MTK_MAC_MCR(eth->mac[i]->id));
549 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
550 mtk_w32(eth, mcr, MTK_MAC_MCR(eth->mac[i]->id));
551 pr_info("disable GMAC TX/RX\n");
552 }
developer793f7b42022-05-20 13:54:51 +0800553 }
developer8051e042022-04-08 13:26:36 +0800554
developer8051e042022-04-08 13:26:36 +0800555 /* Enable GDM drop */
developerdca0fde2022-12-14 11:40:35 +0800556 for (i = 0; i < MTK_MAC_COUNT; i++)
557 mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
developer8051e042022-04-08 13:26:36 +0800558
559 /* Disable ADMA Rx */
560 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
561 mtk_w32(eth, val & ~(MTK_RX_DMA_EN), MTK_PDMA_GLO_CFG);
562}
563
564void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id)
565{
566 u32 i = 0, poll_time = 5000, val;
567
568 /* Disable KA */
569 mtk_m32(eth, MTK_PPE_KA_CFG_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
570 mtk_m32(eth, MTK_PPE_NTU_KA_MASK, 0, MTK_PPE_BIND_LMT_1(ppe_id));
571 mtk_w32(eth, 0, MTK_PPE_KA(ppe_id));
572 mdelay(10);
573
574 /* Set KA timer to maximum */
575 mtk_m32(eth, MTK_PPE_NTU_KA_MASK, (0xFF << 16), MTK_PPE_BIND_LMT_1(ppe_id));
576 mtk_w32(eth, 0xFFFFFFFF, MTK_PPE_KA(ppe_id));
577
578 /* Set KA tick select */
579 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, (0x1 << 24), MTK_PPE_TB_CFG(ppe_id));
580 mtk_m32(eth, MTK_PPE_KA_CFG_MASK, (0x3 << 12), MTK_PPE_TB_CFG(ppe_id));
581 mdelay(10);
582
583 /* Disable scan mode */
584 mtk_m32(eth, MTK_PPE_SCAN_MODE_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
585 mdelay(10);
586
587 /* Check PPE idle */
588 while (i++ < poll_time) {
589 val = mtk_r32(eth, MTK_PPE_GLO_CFG(ppe_id));
590 if (!(val & MTK_PPE_BUSY))
591 break;
592 mdelay(1);
593 }
594
595 if (i >= poll_time) {
596 pr_info("[%s] PPE keeps busy !\n", __func__);
597 mtk_dump_reg(eth, "FE", 0x0, 0x500);
598 mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
599 }
600}
601
602static int mtk_eth_netdevice_event(struct notifier_block *unused,
603 unsigned long event, void *ptr)
604{
605 switch (event) {
606 case MTK_WIFI_RESET_DONE:
developer37482a42022-12-26 13:31:13 +0800607 case MTK_FE_STOP_TRAFFIC_DONE:
developerbe971722022-05-23 13:51:05 +0800608 mtk_rest_cnt--;
609 if(!mtk_rest_cnt) {
610 complete(&wait_ser_done);
611 mtk_rest_cnt = mtk_wifi_num;
612 }
613 break;
614 case MTK_WIFI_CHIP_ONLINE:
615 mtk_wifi_num++;
616 mtk_rest_cnt = mtk_wifi_num;
617 break;
618 case MTK_WIFI_CHIP_OFFLINE:
619 mtk_wifi_num--;
620 mtk_rest_cnt = mtk_wifi_num;
developer8051e042022-04-08 13:26:36 +0800621 break;
622 default:
623 break;
624 }
625
626 return NOTIFY_DONE;
627}
628
629struct notifier_block mtk_eth_netdevice_nb __read_mostly = {
630 .notifier_call = mtk_eth_netdevice_event,
631};