blob: 5ab74add9bd199b8eea5f007b343dcbfdebcdb6d [file] [log] [blame]
developer8051e042022-04-08 13:26:36 +08001/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright (c) 2022 MediaTek Inc.
4 * Author: Henry Yen <henry.yen@mediatek.com>
5 */
6
7#include <linux/regmap.h>
8#include "mtk_eth_soc.h"
9#include "mtk_eth_dbg.h"
10#include "mtk_eth_reset.h"
11
12char* mtk_reset_event_name[32] = {
13 [MTK_EVENT_FORCE] = "Force",
14 [MTK_EVENT_WARM_CNT] = "Warm",
15 [MTK_EVENT_COLD_CNT] = "Cold",
16 [MTK_EVENT_TOTAL_CNT] = "Total",
17 [MTK_EVENT_FQ_EMPTY] = "FQ Empty",
18 [MTK_EVENT_TSO_FAIL] = "TSO Fail",
19 [MTK_EVENT_TSO_ILLEGAL] = "TSO Illegal",
20 [MTK_EVENT_TSO_ALIGN] = "TSO Align",
21 [MTK_EVENT_RFIFO_OV] = "RFIFO OV",
22 [MTK_EVENT_RFIFO_UF] = "RFIFO UF",
23};
24
developerbe971722022-05-23 13:51:05 +080025static int mtk_wifi_num = 0;
26static int mtk_rest_cnt = 0;
developer37482a42022-12-26 13:31:13 +080027u32 mtk_reset_flag = MTK_FE_START_RESET;
developer7979ddb2023-04-24 17:19:21 +080028bool mtk_stop_fail;
29
developer37482a42022-12-26 13:31:13 +080030typedef u32 (*mtk_monitor_xdma_func) (struct mtk_eth *eth);
developerbe971722022-05-23 13:51:05 +080031
developer8051e042022-04-08 13:26:36 +080032void mtk_reset_event_update(struct mtk_eth *eth, u32 id)
33{
34 struct mtk_reset_event *reset_event = &eth->reset_event;
35 reset_event->count[id]++;
36}
37
38int mtk_eth_cold_reset(struct mtk_eth *eth)
39{
developer0baa6962023-01-31 14:25:23 +080040 u32 reset_bits = 0;
developer089e8852022-09-28 14:43:46 +080041 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
42 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer8051e042022-04-08 13:26:36 +080043 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
44
developer0baa6962023-01-31 14:25:23 +080045 reset_bits = RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0;
developer8051e042022-04-08 13:26:36 +080046 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
developer0baa6962023-01-31 14:25:23 +080047 reset_bits |= RSTCTRL_PPE1;
48#if defined(CONFIG_MEDIATEK_NETSYS_V3)
49 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
50 reset_bits |= RSTCTRL_PPE2;
51 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
52 reset_bits |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
53#endif
54 ethsys_reset(eth, reset_bits);
developer8051e042022-04-08 13:26:36 +080055
developer7979ddb2023-04-24 17:19:21 +080056 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developer8051e042022-04-08 13:26:36 +080057 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff);
58
developer7979ddb2023-04-24 17:19:21 +080059 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
60 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x6F8FF);
61
developer8051e042022-04-08 13:26:36 +080062 return 0;
63}
64
65int mtk_eth_warm_reset(struct mtk_eth *eth)
66{
67 u32 reset_bits = 0, i = 0, done = 0;
68 u32 val1 = 0, val2 = 0, val3 = 0;
69
70 mdelay(100);
71
72 reset_bits |= RSTCTRL_FE;
73 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
74 reset_bits, reset_bits);
75
76 while (i < 1000) {
77 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val1);
78 if (val1 & RSTCTRL_FE)
79 break;
80 i++;
81 udelay(1);
82 }
83
84 if (i < 1000) {
developer37482a42022-12-26 13:31:13 +080085 reset_bits = RSTCTRL_ETH | RSTCTRL_PPE0;
developer8051e042022-04-08 13:26:36 +080086 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
developer37482a42022-12-26 13:31:13 +080087 reset_bits |= RSTCTRL_PPE1;
88#if defined(CONFIG_MEDIATEK_NETSYS_V3)
89 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
90 reset_bits |= RSTCTRL_PPE2;
91 if (mtk_reset_flag == MTK_FE_START_RESET)
92 reset_bits |= RSTCTRL_WDMA0 |
93 RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
94#endif
developer8051e042022-04-08 13:26:36 +080095
96 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
97 reset_bits, reset_bits);
98
99 udelay(1);
100 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val2);
101 if (!(val2 & reset_bits))
102 pr_info("[%s] error val2=0x%x reset_bits=0x%x !\n",
103 __func__, val2, reset_bits);
104 reset_bits |= RSTCTRL_FE;
105 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
106 reset_bits, ~reset_bits);
107
108 udelay(1);
109 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val3);
110 if (val3 & reset_bits)
111 pr_info("[%s] error val3=0x%x reset_bits=0x%x !\n",
112 __func__, val3, reset_bits);
113 done = 1;
114 mtk_reset_event_update(eth, MTK_EVENT_WARM_CNT);
115 }
116
developer0baa6962023-01-31 14:25:23 +0800117 pr_info("[%s] reset record val1=0x%x, val2=0x%x, val3=0x%x i:%d done:%d\n",
118 __func__, val1, val2, val3, i, done);
developer8051e042022-04-08 13:26:36 +0800119
120 if (!done)
121 mtk_eth_cold_reset(eth);
122
123 return 0;
124}
125
126u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status)
127{
128 u32 ret = 0, val = 0;
129
130 if ((status & MTK_FE_INT_FQ_EMPTY) ||
131 (status & MTK_FE_INT_RFIFO_UF) ||
132 (status & MTK_FE_INT_RFIFO_OV) ||
133 (status & MTK_FE_INT_TSO_FAIL) ||
134 (status & MTK_FE_INT_TSO_ALIGN) ||
135 (status & MTK_FE_INT_TSO_ILLEGAL)) {
136 while (status) {
137 val = ffs((unsigned int)status) - 1;
138 mtk_reset_event_update(eth, val);
139 status &= ~(1 << val);
140 }
141 ret = 1;
142 }
143
144 if (atomic_read(&force)) {
145 mtk_reset_event_update(eth, MTK_EVENT_FORCE);
146 ret = 1;
147 }
148
149 if (ret) {
150 mtk_reset_event_update(eth, MTK_EVENT_TOTAL_CNT);
developer7979ddb2023-04-24 17:19:21 +0800151 if (dbg_show_level)
152 mtk_dump_netsys_info(eth);
developer8051e042022-04-08 13:26:36 +0800153 }
154
155 return ret;
156}
157
158irqreturn_t mtk_handle_fe_irq(int irq, void *_eth)
159{
160 struct mtk_eth *eth = _eth;
161 u32 status = 0, val = 0;
162
163 status = mtk_r32(eth, MTK_FE_INT_STATUS);
164 pr_info("[%s] Trigger FE Misc ISR: 0x%x\n", __func__, status);
165
166 while (status) {
167 val = ffs((unsigned int)status) - 1;
168 status &= ~(1 << val);
developera7ee5fe2022-04-21 17:45:57 +0800169
developerbe971722022-05-23 13:51:05 +0800170 if ((val == MTK_EVENT_TSO_FAIL) ||
developera7ee5fe2022-04-21 17:45:57 +0800171 (val == MTK_EVENT_TSO_ILLEGAL) ||
172 (val == MTK_EVENT_TSO_ALIGN) ||
173 (val == MTK_EVENT_RFIFO_OV) ||
174 (val == MTK_EVENT_RFIFO_UF))
175 pr_info("[%s] Detect reset event: %s !\n", __func__,
176 mtk_reset_event_name[val]);
developer8051e042022-04-08 13:26:36 +0800177 }
developera7ee5fe2022-04-21 17:45:57 +0800178 mtk_w32(eth, 0xFFFFFFFF, MTK_FE_INT_STATUS);
developer8051e042022-04-08 13:26:36 +0800179
180 return IRQ_HANDLED;
181}
182
183static void mtk_dump_reg(void *_eth, char *name, u32 offset, u32 range)
184{
185 struct mtk_eth *eth = _eth;
186 u32 cur = offset;
187
188 pr_info("\n============ %s ============\n", name);
189 while(cur < offset + range) {
190 pr_info("0x%x: %08x %08x %08x %08x\n",
191 cur, mtk_r32(eth, cur), mtk_r32(eth, cur + 0x4),
192 mtk_r32(eth, cur + 0x8), mtk_r32(eth, cur + 0xc));
193 cur += 0x10;
194 }
195}
196
197void mtk_dump_netsys_info(void *_eth)
198{
199 struct mtk_eth *eth = _eth;
developer797e46c2022-07-29 12:05:32 +0800200 u32 id = 0;
developer8051e042022-04-08 13:26:36 +0800201
202 mtk_dump_reg(eth, "FE", 0x0, 0x500);
203 mtk_dump_reg(eth, "ADMA", PDMA_BASE, 0x300);
developer797e46c2022-07-29 12:05:32 +0800204 for (id = 0; id < MTK_QDMA_PAGE_NUM; id++){
205 mtk_w32(eth, id, MTK_QDMA_PAGE);
206 pr_info("\nQDMA PAGE:%x ",mtk_r32(eth, MTK_QDMA_PAGE));
207 mtk_dump_reg(eth, "QDMA", QDMA_BASE, 0x100);
208 mtk_w32(eth, 0, MTK_QDMA_PAGE);
209 }
210 mtk_dump_reg(eth, "QDMA", MTK_QRX_BASE_PTR0, 0x300);
developer8051e042022-04-08 13:26:36 +0800211 mtk_dump_reg(eth, "WDMA", WDMA_BASE(0), 0x600);
212 mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
213 mtk_dump_reg(eth, "GMAC", 0x10000, 0x300);
developer31d10662023-02-09 16:56:34 +0800214 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
215 mtk_dump_reg(eth, "XGMAC0", 0x12000, 0x300);
216 mtk_dump_reg(eth, "XGMAC1", 0x13000, 0x300);
developer4e8a3fd2023-04-10 18:05:44 +0800217 mtk_dump_usxgmii(eth->usxgmii->pcs[0].regmap,
developer31d10662023-02-09 16:56:34 +0800218 "USXGMII0", 0, 0x1000);
developer4e8a3fd2023-04-10 18:05:44 +0800219 mtk_dump_usxgmii(eth->usxgmii->pcs[1].regmap,
developer31d10662023-02-09 16:56:34 +0800220 "USXGMII1", 0, 0x1000);
221 }
developer8051e042022-04-08 13:26:36 +0800222}
223
developer37482a42022-12-26 13:31:13 +0800224u32 mtk_monitor_wdma_tx(struct mtk_eth *eth)
developer8051e042022-04-08 13:26:36 +0800225{
developer37482a42022-12-26 13:31:13 +0800226 static u32 pre_dtx[MTK_WDMA_CNT];
227 static u32 err_cnt[MTK_WDMA_CNT];
228 u32 i = 0, cur_dtx = 0, tx_busy = 0, tx_rdy = 0, err_flag = 0;
229 u32 dbg_mon = 0;
developer797e46c2022-07-29 12:05:32 +0800230
developer37482a42022-12-26 13:31:13 +0800231 for (i = 0; i < MTK_WDMA_CNT; i++) {
232 cur_dtx = mtk_r32(eth, MTK_WDMA_DTX_PTR(i));
233 tx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(i)) & MTK_TX_DMA_BUSY;
234 dbg_mon = mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(i));
235 tx_rdy = !(dbg_mon & MTK_CDM_TXFIFO_RDY);
236 if (cur_dtx == pre_dtx[i] && tx_busy && tx_rdy) {
237 err_cnt[i]++;
238 if (err_cnt[i] >= 3) {
239 pr_info("WDMA %d Info\n", i);
240 pr_info("err_cnt = %d", err_cnt[i]);
241 pr_info("prev_dtx = 0x%x | cur_dtx = 0x%x\n",
242 pre_dtx[i], cur_dtx);
243 pr_info("WDMA_CTX_PTR = 0x%x\n",
244 mtk_r32(eth, MTK_WDMA_CTX_PTR(i)));
245 pr_info("WDMA_DTX_PTR = 0x%x\n",
246 mtk_r32(eth, MTK_WDMA_DTX_PTR(i)));
247 pr_info("WDMA_GLO_CFG = 0x%x\n",
248 mtk_r32(eth, MTK_WDMA_GLO_CFG(i)));
249 pr_info("WDMA_TX_DBG_MON0 = 0x%x\n",
250 mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(i)));
251 pr_info("==============================\n");
252 err_flag = 1;
253 }
254 } else
255 err_cnt[i] = 0;
256 pre_dtx[i] = cur_dtx;
257 }
developer8051e042022-04-08 13:26:36 +0800258
developer37482a42022-12-26 13:31:13 +0800259 if (err_flag)
260 return MTK_FE_START_RESET;
261 else
262 return 0;
263}
developer8051e042022-04-08 13:26:36 +0800264
developer37482a42022-12-26 13:31:13 +0800265u32 mtk_monitor_wdma_rx(struct mtk_eth *eth)
266{
267 static u32 pre_drx[MTK_WDMA_CNT];
268 static u32 pre_opq[MTK_WDMA_CNT];
269 static u32 err_cnt[MTK_WDMA_CNT];
270 u32 i = 0, cur_drx = 0, rx_busy = 0, err_flag = 0;
271 u32 cur_opq = 0;
272
273 for (i = 0; i < MTK_WDMA_CNT; i++) {
274 cur_drx = mtk_r32(eth, MTK_WDMA_DRX_PTR(i));
275 rx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(i)) & MTK_RX_DMA_BUSY;
276 if (i == 0)
277 cur_opq = (mtk_r32(eth, MTK_PSE_OQ_STA(5)) & 0x1FF);
278 else if (i == 1)
279 cur_opq = (mtk_r32(eth, MTK_PSE_OQ_STA(5)) & 0x1FF0000);
280 else
281 cur_opq = (mtk_r32(eth, MTK_PSE_OQ_STA(7)) & 0x1FF0000);
282
283 if (cur_drx == pre_drx[i] && rx_busy && cur_opq != 0 &&
284 cur_opq == pre_opq[i]) {
285 err_cnt[i]++;
286 if (err_cnt[i] >= 3) {
287 pr_info("WDMA %d Info\n", i);
288 pr_info("err_cnt = %d", err_cnt[i]);
289 pr_info("prev_drx = 0x%x | cur_drx = 0x%x\n",
290 pre_drx[i], cur_drx);
291 pr_info("WDMA_CRX_PTR = 0x%x\n",
292 mtk_r32(eth, MTK_WDMA_CRX_PTR(i)));
293 pr_info("WDMA_DRX_PTR = 0x%x\n",
294 mtk_r32(eth, MTK_WDMA_DRX_PTR(i)));
295 pr_info("WDMA_GLO_CFG = 0x%x\n",
296 mtk_r32(eth, MTK_WDMA_GLO_CFG(i)));
297 pr_info("==============================\n");
298 err_flag = 1;
developer8051e042022-04-08 13:26:36 +0800299 }
developer37482a42022-12-26 13:31:13 +0800300 } else
301 err_cnt[i] = 0;
302 pre_drx[i] = cur_drx;
303 pre_opq[i] = cur_opq;
304 }
305
306 if (err_flag)
307 return MTK_FE_START_RESET;
308 else
309 return 0;
310}
311
312u32 mtk_monitor_rx_fc(struct mtk_eth *eth)
313{
314 u32 i = 0, mib_base = 0, gdm_fc = 0;
315
316 for (i = 0; i < MTK_MAC_COUNT; i++) {
317 mib_base = MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET*i;
318 gdm_fc = mtk_r32(eth, mib_base);
319 if (gdm_fc < 1)
320 return 1;
321 }
322 return 0;
323}
324
325u32 mtk_monitor_qdma_tx(struct mtk_eth *eth)
326{
327 static u32 err_cnt_qtx;
328 u32 err_flag = 0;
329 u32 i = 0, is_rx_fc = 0;
330
331 u32 is_qfsm_hang = (mtk_r32(eth, MTK_QDMA_FSM) & 0xF00) != 0;
332 u32 is_qfwd_hang = mtk_r32(eth, MTK_QDMA_FWD_CNT) == 0;
333
334 is_rx_fc = mtk_monitor_rx_fc(eth);
335 if (is_qfsm_hang && is_qfwd_hang && is_rx_fc) {
336 err_cnt_qtx++;
337 if (err_cnt_qtx >= 3) {
developer797e46c2022-07-29 12:05:32 +0800338 pr_info("QDMA Tx Info\n");
developer37482a42022-12-26 13:31:13 +0800339 pr_info("err_cnt = %d", err_cnt_qtx);
developer8051e042022-04-08 13:26:36 +0800340 pr_info("is_qfsm_hang = %d\n", is_qfsm_hang);
341 pr_info("is_qfwd_hang = %d\n", is_qfwd_hang);
developer8051e042022-04-08 13:26:36 +0800342 pr_info("-- -- -- -- -- -- --\n");
343 pr_info("MTK_QDMA_FSM = 0x%x\n",
344 mtk_r32(eth, MTK_QDMA_FSM));
345 pr_info("MTK_QDMA_FWD_CNT = 0x%x\n",
346 mtk_r32(eth, MTK_QDMA_FWD_CNT));
347 pr_info("MTK_QDMA_FQ_CNT = 0x%x\n",
348 mtk_r32(eth, MTK_QDMA_FQ_CNT));
349 pr_info("==============================\n");
developer37482a42022-12-26 13:31:13 +0800350 err_flag = 1;
developer8051e042022-04-08 13:26:36 +0800351 }
developer37482a42022-12-26 13:31:13 +0800352 } else
353 err_cnt_qtx = 0;
354
355 if (err_flag)
356 return MTK_FE_STOP_TRAFFIC;
357 else
358 return 0;
359}
360
361u32 mtk_monitor_qdma_rx(struct mtk_eth *eth)
362{
363 static u32 err_cnt_qrx;
364 static u32 pre_fq_head, pre_fq_tail;
365 u32 err_flag = 0;
366
367 u32 qrx_fsm = (mtk_r32(eth, MTK_QDMA_FSM) & 0x1F) == 9;
368 u32 fq_head = mtk_r32(eth, MTK_QDMA_FQ_HEAD);
369 u32 fq_tail = mtk_r32(eth, MTK_QDMA_FQ_TAIL);
370
371 if (qrx_fsm && fq_head == pre_fq_head &&
372 fq_tail == pre_fq_tail) {
373 err_cnt_qrx++;
374 if (err_cnt_qrx >= 3) {
375 pr_info("QDMA Rx Info\n");
376 pr_info("err_cnt = %d", err_cnt_qrx);
377 pr_info("MTK_QDMA_FSM = %d\n",
378 mtk_r32(eth, MTK_QDMA_FSM));
379 pr_info("FQ_HEAD = 0x%x\n",
380 mtk_r32(eth, MTK_QDMA_FQ_HEAD));
381 pr_info("FQ_TAIL = 0x%x\n",
382 mtk_r32(eth, MTK_QDMA_FQ_TAIL));
383 err_flag = 1;
384 } else
385 err_cnt_qrx = 0;
386 }
387 pre_fq_head = fq_head;
388 pre_fq_tail = fq_tail;
389
390 if (err_flag)
391 return MTK_FE_STOP_TRAFFIC;
392 else
393 return 0;
394}
395
396
397u32 mtk_monitor_adma_rx(struct mtk_eth *eth)
398{
399 static u32 err_cnt_arx;
400 u32 err_flag = 0;
401 u32 opq0 = (mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x1FF) != 0;
402 u32 cdm1_fsm = (mtk_r32(eth, MTK_FE_CDM1_FSM) & 0xFFFF0000) != 0;
403 u32 cur_stat = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x1F) == 0);
404 u32 fifo_rdy = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x40) == 0);
405
406 if (opq0 && cdm1_fsm && cur_stat && fifo_rdy) {
407 err_cnt_arx++;
408 if (err_cnt_arx >= 3) {
developer797e46c2022-07-29 12:05:32 +0800409 pr_info("ADMA Rx Info\n");
developer37482a42022-12-26 13:31:13 +0800410 pr_info("err_cnt = %d", err_cnt_arx);
411 pr_info("CDM1_FSM = %d\n",
412 mtk_r32(eth, MTK_FE_CDM1_FSM));
developer8051e042022-04-08 13:26:36 +0800413 pr_info("MTK_PSE_OQ_STA1 = 0x%x\n",
414 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
415 pr_info("MTK_ADMA_RX_DBG0 = 0x%x\n",
416 mtk_r32(eth, MTK_ADMA_RX_DBG0));
417 pr_info("MTK_ADMA_RX_DBG1 = 0x%x\n",
418 mtk_r32(eth, MTK_ADMA_RX_DBG1));
419 pr_info("==============================\n");
developer37482a42022-12-26 13:31:13 +0800420 err_flag = 1;
421 }
422 } else
423 err_cnt_arx = 0;
424
425 if (err_flag)
426 return MTK_FE_STOP_TRAFFIC;
427 else
428 return 0;
429}
430
431u32 mtk_monitor_tdma_tx(struct mtk_eth *eth)
432{
433 static u32 err_cnt_ttx;
434 static u32 pre_fsm;
435 u32 err_flag = 0;
436 u32 cur_fsm = 0;
437 u32 tx_busy = 0;
438
439 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
440 cur_fsm = (mtk_r32(eth, MTK_FE_CDM6_FSM) & 0x1FFF) != 0;
441 tx_busy = ((mtk_r32(eth, MTK_TDMA_GLO_CFG) & 0x2) != 0);
442
443 if (cur_fsm == pre_fsm && cur_fsm != 0 && tx_busy) {
444 err_cnt_ttx++;
445 if (err_cnt_ttx >= 3) {
446 pr_info("TDMA Tx Info\n");
447 pr_info("err_cnt = %d", err_cnt_ttx);
448 pr_info("CDM6_FSM = %d\n",
449 mtk_r32(eth, MTK_FE_CDM6_FSM));
450 pr_info("DMA CFG = 0x%x\n",
451 mtk_r32(eth, MTK_TDMA_GLO_CFG));
452 pr_info("==============================\n");
453 err_flag = 1;
454 }
455 } else
456 err_cnt_ttx = 0;
457
458 pre_fsm = cur_fsm;
459 }
460
461 if (err_flag)
462 return MTK_FE_STOP_TRAFFIC;
463 else
464 return 0;
465}
466
467u32 mtk_monitor_tdma_rx(struct mtk_eth *eth)
468{
469 static u32 err_cnt_trx;
470 static u32 pre_fsm;
471 u32 err_flag = 0;
472 u32 cur_fsm = 0;
473 u32 rx_busy = 0;
474
475 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
476 cur_fsm = (mtk_r32(eth, MTK_FE_CDM6_FSM) & 0xFFF0000) != 0;
477 rx_busy = ((mtk_r32(eth, MTK_TDMA_GLO_CFG) & 0x8) != 0);
478
479 if (cur_fsm == pre_fsm && cur_fsm != 0 && rx_busy) {
480 err_cnt_trx++;
481 if (err_cnt_trx >= 3) {
482 pr_info("TDMA Rx Info\n");
483 pr_info("err_cnt = %d", err_cnt_trx);
484 pr_info("CDM6_FSM = %d\n",
485 mtk_r32(eth, MTK_FE_CDM6_FSM));
486 pr_info("DMA CFG = 0x%x\n",
487 mtk_r32(eth, MTK_TDMA_GLO_CFG));
488 pr_info("==============================\n");
489 err_flag = 1;
490 }
491 } else
492 err_cnt_trx = 0;
493
494 pre_fsm = cur_fsm;
495 }
496
497 if (err_flag)
498 return MTK_FE_STOP_TRAFFIC;
499 else
500 return 0;
501}
502
503static const mtk_monitor_xdma_func mtk_reset_monitor_func[] = {
504 [0] = mtk_monitor_wdma_tx,
505 [1] = mtk_monitor_wdma_rx,
506 [2] = mtk_monitor_qdma_tx,
507 [3] = mtk_monitor_qdma_rx,
508 [4] = mtk_monitor_adma_rx,
509 [5] = mtk_monitor_tdma_tx,
510 [6] = mtk_monitor_tdma_rx,
511};
512
513void mtk_dma_monitor(struct timer_list *t)
514{
515 struct mtk_eth *eth = from_timer(eth, t, mtk_dma_monitor_timer);
516 u32 i = 0, ret = 0;
517
518 for (i = 0; i < 6; i++) {
519 ret = (*mtk_reset_monitor_func[i]) (eth);
520 if ((ret == MTK_FE_START_RESET) ||
521 (ret == MTK_FE_STOP_TRAFFIC)) {
developer8051e042022-04-08 13:26:36 +0800522 if ((atomic_read(&reset_lock) == 0) &&
developer0baa6962023-01-31 14:25:23 +0800523 (atomic_read(&force) == 1)) {
developer37482a42022-12-26 13:31:13 +0800524 mtk_reset_flag = ret;
developer8051e042022-04-08 13:26:36 +0800525 schedule_work(&eth->pending_work);
526 }
developer37482a42022-12-26 13:31:13 +0800527 break;
developer8051e042022-04-08 13:26:36 +0800528 }
developer8051e042022-04-08 13:26:36 +0800529 }
530
developer8051e042022-04-08 13:26:36 +0800531 mod_timer(&eth->mtk_dma_monitor_timer, jiffies + 1 * HZ);
532}
533
534void mtk_prepare_reset_fe(struct mtk_eth *eth)
535{
developer37482a42022-12-26 13:31:13 +0800536 u32 i = 0, val = 0, mcr = 0;
developer8051e042022-04-08 13:26:36 +0800537
538 /* Disable NETSYS Interrupt */
539 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
540 mtk_w32(eth, 0, MTK_PDMA_INT_MASK);
541 mtk_w32(eth, 0, MTK_QDMA_INT_MASK);
542
543 /* Disable Linux netif Tx path */
544 for (i = 0; i < MTK_MAC_COUNT; i++) {
545 if (!eth->netdev[i])
546 continue;
547 netif_tx_disable(eth->netdev[i]);
548 }
549
550 /* Disable QDMA Tx */
551 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
552 mtk_w32(eth, val & ~(MTK_TX_DMA_EN), MTK_QDMA_GLO_CFG);
553
developer37482a42022-12-26 13:31:13 +0800554 for (i = 0; i < MTK_MAC_COUNT; i++) {
555 pr_info("[%s] i:%d type:%d id:%d\n",
556 __func__, i, eth->mac[i]->type, eth->mac[i]->id);
557 if (eth->mac[i]->type == MTK_XGDM_TYPE &&
558 eth->mac[i]->id != MTK_GMAC1_ID) {
559 mcr = mtk_r32(eth, MTK_XMAC_MCR(eth->mac[i]->id));
560 mcr &= 0xfffffff0;
561 mcr |= XMAC_MCR_TRX_DISABLE;
562 pr_info("disable XMAC TX/RX\n");
563 mtk_w32(eth, mcr, MTK_XMAC_MCR(eth->mac[i]->id));
564 }
developer793f7b42022-05-20 13:54:51 +0800565
developer37482a42022-12-26 13:31:13 +0800566 if (eth->mac[i]->type == MTK_GDM_TYPE) {
567 mcr = mtk_r32(eth, MTK_MAC_MCR(eth->mac[i]->id));
568 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
569 mtk_w32(eth, mcr, MTK_MAC_MCR(eth->mac[i]->id));
570 pr_info("disable GMAC TX/RX\n");
571 }
developer793f7b42022-05-20 13:54:51 +0800572 }
developer8051e042022-04-08 13:26:36 +0800573
developer8051e042022-04-08 13:26:36 +0800574 /* Enable GDM drop */
developerdca0fde2022-12-14 11:40:35 +0800575 for (i = 0; i < MTK_MAC_COUNT; i++)
576 mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
developer8051e042022-04-08 13:26:36 +0800577
578 /* Disable ADMA Rx */
579 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
580 mtk_w32(eth, val & ~(MTK_RX_DMA_EN), MTK_PDMA_GLO_CFG);
581}
582
583void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id)
584{
585 u32 i = 0, poll_time = 5000, val;
586
587 /* Disable KA */
588 mtk_m32(eth, MTK_PPE_KA_CFG_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
589 mtk_m32(eth, MTK_PPE_NTU_KA_MASK, 0, MTK_PPE_BIND_LMT_1(ppe_id));
590 mtk_w32(eth, 0, MTK_PPE_KA(ppe_id));
591 mdelay(10);
592
593 /* Set KA timer to maximum */
594 mtk_m32(eth, MTK_PPE_NTU_KA_MASK, (0xFF << 16), MTK_PPE_BIND_LMT_1(ppe_id));
595 mtk_w32(eth, 0xFFFFFFFF, MTK_PPE_KA(ppe_id));
596
597 /* Set KA tick select */
598 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, (0x1 << 24), MTK_PPE_TB_CFG(ppe_id));
599 mtk_m32(eth, MTK_PPE_KA_CFG_MASK, (0x3 << 12), MTK_PPE_TB_CFG(ppe_id));
600 mdelay(10);
601
602 /* Disable scan mode */
603 mtk_m32(eth, MTK_PPE_SCAN_MODE_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
604 mdelay(10);
605
606 /* Check PPE idle */
607 while (i++ < poll_time) {
608 val = mtk_r32(eth, MTK_PPE_GLO_CFG(ppe_id));
609 if (!(val & MTK_PPE_BUSY))
610 break;
611 mdelay(1);
612 }
613
614 if (i >= poll_time) {
615 pr_info("[%s] PPE keeps busy !\n", __func__);
616 mtk_dump_reg(eth, "FE", 0x0, 0x500);
617 mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
618 }
619}
620
621static int mtk_eth_netdevice_event(struct notifier_block *unused,
622 unsigned long event, void *ptr)
623{
624 switch (event) {
625 case MTK_WIFI_RESET_DONE:
developer37482a42022-12-26 13:31:13 +0800626 case MTK_FE_STOP_TRAFFIC_DONE:
developer7979ddb2023-04-24 17:19:21 +0800627 pr_info("%s rcv done event:%x\n", __func__, event);
developerbe971722022-05-23 13:51:05 +0800628 mtk_rest_cnt--;
629 if(!mtk_rest_cnt) {
630 complete(&wait_ser_done);
631 mtk_rest_cnt = mtk_wifi_num;
632 }
633 break;
634 case MTK_WIFI_CHIP_ONLINE:
635 mtk_wifi_num++;
636 mtk_rest_cnt = mtk_wifi_num;
637 break;
638 case MTK_WIFI_CHIP_OFFLINE:
639 mtk_wifi_num--;
640 mtk_rest_cnt = mtk_wifi_num;
developer8051e042022-04-08 13:26:36 +0800641 break;
developer7979ddb2023-04-24 17:19:21 +0800642 case MTK_FE_STOP_TRAFFIC_DONE_FAIL:
643 mtk_stop_fail = true;
644 mtk_reset_flag = MTK_FE_START_RESET;
645 pr_info("%s rcv done event:%x\n", __func__, event);
646 complete(&wait_ser_done);
647 mtk_rest_cnt = mtk_wifi_num;
648 break;
developer8051e042022-04-08 13:26:36 +0800649 default:
650 break;
651 }
652
653 return NOTIFY_DONE;
654}
655
656struct notifier_block mtk_eth_netdevice_nb __read_mostly = {
657 .notifier_call = mtk_eth_netdevice_event,
658};