blob: a8aeccc1145890a7633240d0dfe376d203e1ba2e [file] [log] [blame]
developer8051e042022-04-08 13:26:36 +08001/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright (c) 2022 MediaTek Inc.
4 * Author: Henry Yen <henry.yen@mediatek.com>
5 */
6
7#include <linux/regmap.h>
8#include "mtk_eth_soc.h"
9#include "mtk_eth_dbg.h"
10#include "mtk_eth_reset.h"
11
12char* mtk_reset_event_name[32] = {
13 [MTK_EVENT_FORCE] = "Force",
14 [MTK_EVENT_WARM_CNT] = "Warm",
15 [MTK_EVENT_COLD_CNT] = "Cold",
16 [MTK_EVENT_TOTAL_CNT] = "Total",
17 [MTK_EVENT_FQ_EMPTY] = "FQ Empty",
18 [MTK_EVENT_TSO_FAIL] = "TSO Fail",
19 [MTK_EVENT_TSO_ILLEGAL] = "TSO Illegal",
20 [MTK_EVENT_TSO_ALIGN] = "TSO Align",
21 [MTK_EVENT_RFIFO_OV] = "RFIFO OV",
22 [MTK_EVENT_RFIFO_UF] = "RFIFO UF",
23};
24
25void mtk_reset_event_update(struct mtk_eth *eth, u32 id)
26{
27 struct mtk_reset_event *reset_event = &eth->reset_event;
28 reset_event->count[id]++;
29}
30
31int mtk_eth_cold_reset(struct mtk_eth *eth)
32{
33 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
34 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
35
36 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
37 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0 | RSTCTRL_PPE1);
38 else
39 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0);
40
41 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
42 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff);
43
44 return 0;
45}
46
47int mtk_eth_warm_reset(struct mtk_eth *eth)
48{
49 u32 reset_bits = 0, i = 0, done = 0;
50 u32 val1 = 0, val2 = 0, val3 = 0;
51
52 mdelay(100);
53
54 reset_bits |= RSTCTRL_FE;
55 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
56 reset_bits, reset_bits);
57
58 while (i < 1000) {
59 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val1);
60 if (val1 & RSTCTRL_FE)
61 break;
62 i++;
63 udelay(1);
64 }
65
66 if (i < 1000) {
67 reset_bits = 0;
68
69 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
70 reset_bits |= RSTCTRL_ETH | RSTCTRL_PPE0 | RSTCTRL_PPE1;
71 else
72 reset_bits |= RSTCTRL_ETH | RSTCTRL_PPE0;
73
74 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
75 reset_bits, reset_bits);
76
77 udelay(1);
78 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val2);
79 if (!(val2 & reset_bits))
80 pr_info("[%s] error val2=0x%x reset_bits=0x%x !\n",
81 __func__, val2, reset_bits);
82 reset_bits |= RSTCTRL_FE;
83 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
84 reset_bits, ~reset_bits);
85
86 udelay(1);
87 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val3);
88 if (val3 & reset_bits)
89 pr_info("[%s] error val3=0x%x reset_bits=0x%x !\n",
90 __func__, val3, reset_bits);
91 done = 1;
92 mtk_reset_event_update(eth, MTK_EVENT_WARM_CNT);
93 }
94
95 pr_info("[%s] reset record val1=0x%x, val2=0x%x, val3=0x%x !\n",
developera7ee5fe2022-04-21 17:45:57 +080096 __func__, val1, val2, val3);
developer8051e042022-04-08 13:26:36 +080097
98 if (!done)
99 mtk_eth_cold_reset(eth);
100
101 return 0;
102}
103
104u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status)
105{
106 u32 ret = 0, val = 0;
107
108 if ((status & MTK_FE_INT_FQ_EMPTY) ||
109 (status & MTK_FE_INT_RFIFO_UF) ||
110 (status & MTK_FE_INT_RFIFO_OV) ||
111 (status & MTK_FE_INT_TSO_FAIL) ||
112 (status & MTK_FE_INT_TSO_ALIGN) ||
113 (status & MTK_FE_INT_TSO_ILLEGAL)) {
114 while (status) {
115 val = ffs((unsigned int)status) - 1;
116 mtk_reset_event_update(eth, val);
117 status &= ~(1 << val);
118 }
119 ret = 1;
120 }
121
122 if (atomic_read(&force)) {
123 mtk_reset_event_update(eth, MTK_EVENT_FORCE);
124 ret = 1;
125 }
126
127 if (ret) {
128 mtk_reset_event_update(eth, MTK_EVENT_TOTAL_CNT);
129 mtk_dump_netsys_info(eth);
130 }
131
132 return ret;
133}
134
135irqreturn_t mtk_handle_fe_irq(int irq, void *_eth)
136{
137 struct mtk_eth *eth = _eth;
138 u32 status = 0, val = 0;
139
140 status = mtk_r32(eth, MTK_FE_INT_STATUS);
141 pr_info("[%s] Trigger FE Misc ISR: 0x%x\n", __func__, status);
142
143 while (status) {
144 val = ffs((unsigned int)status) - 1;
145 status &= ~(1 << val);
developera7ee5fe2022-04-21 17:45:57 +0800146
147 if ((val == MTK_EVENT_FQ_EMPTY) ||
148 (val == MTK_EVENT_TSO_FAIL) ||
149 (val == MTK_EVENT_TSO_ILLEGAL) ||
150 (val == MTK_EVENT_TSO_ALIGN) ||
151 (val == MTK_EVENT_RFIFO_OV) ||
152 (val == MTK_EVENT_RFIFO_UF))
153 pr_info("[%s] Detect reset event: %s !\n", __func__,
154 mtk_reset_event_name[val]);
developer8051e042022-04-08 13:26:36 +0800155 }
developera7ee5fe2022-04-21 17:45:57 +0800156 mtk_w32(eth, 0xFFFFFFFF, MTK_FE_INT_STATUS);
developer8051e042022-04-08 13:26:36 +0800157
158 return IRQ_HANDLED;
159}
160
161static void mtk_dump_reg(void *_eth, char *name, u32 offset, u32 range)
162{
163 struct mtk_eth *eth = _eth;
164 u32 cur = offset;
165
166 pr_info("\n============ %s ============\n", name);
167 while(cur < offset + range) {
168 pr_info("0x%x: %08x %08x %08x %08x\n",
169 cur, mtk_r32(eth, cur), mtk_r32(eth, cur + 0x4),
170 mtk_r32(eth, cur + 0x8), mtk_r32(eth, cur + 0xc));
171 cur += 0x10;
172 }
173}
174
175void mtk_dump_netsys_info(void *_eth)
176{
177 struct mtk_eth *eth = _eth;
178
179 mtk_dump_reg(eth, "FE", 0x0, 0x500);
180 mtk_dump_reg(eth, "ADMA", PDMA_BASE, 0x300);
181 mtk_dump_reg(eth, "QDMA", QDMA_BASE, 0x400);
182 mtk_dump_reg(eth, "WDMA", WDMA_BASE(0), 0x600);
183 mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
184 mtk_dump_reg(eth, "GMAC", 0x10000, 0x300);
185}
186
187void mtk_dma_monitor(struct timer_list *t)
188{
189 struct mtk_eth *eth = from_timer(eth, t, mtk_dma_monitor_timer);
190 static u32 timestamp = 0;
191 static u32 err_cnt1 = 0, err_cnt2 = 0, err_cnt3 = 0;
192 static u32 prev_wdidx = 0;
193 u32 cur_wdidx = mtk_r32(eth, MTK_WDMA_DTX_PTR(0));
194 u32 is_wtx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(0)) & MTK_TX_DMA_BUSY;
195 u32 is_oq_free = ((mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x01FF0000) == 0) &&
196 ((mtk_r32(eth, MTK_PSE_OQ_STA(1)) & 0x000001FF) == 0) &&
197 ((mtk_r32(eth, MTK_PSE_OQ_STA(4)) & 0x01FF0000) == 0);
198 u32 is_cdm_full =
199 !(mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0)) & MTK_CDM_TXFIFO_RDY);
200 u32 is_qfsm_hang = mtk_r32(eth, MTK_QDMA_FSM) != 0;
201 u32 is_qfwd_hang = mtk_r32(eth, MTK_QDMA_FWD_CNT) == 0;
202 u32 is_qfq_hang = mtk_r32(eth, MTK_QDMA_FQ_CNT) !=
203 ((MTK_DMA_SIZE << 16) | MTK_DMA_SIZE);
204 u32 is_oq0_stuck = (mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x1FF) != 0;
205 u32 is_cdm1_busy = (mtk_r32(eth, MTK_FE_CDM1_FSM) & 0xFFFF0000) != 0;
206 u32 is_adma_busy = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x1F) == 0) &&
207 ((mtk_r32(eth, MTK_ADMA_RX_DBG1) & 0x3F0000) == 0) &&
developera1729cd2022-05-11 13:42:14 +0800208 ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x40) == 0);
developer8051e042022-04-08 13:26:36 +0800209
210 if (cur_wdidx == prev_wdidx && is_wtx_busy &&
211 is_oq_free && is_cdm_full) {
212 err_cnt1++;
213 if (err_cnt1 == 3) {
214 pr_info("WDMA CDM Hang !\n");
215 pr_info("============== Time: %d ================\n",
216 timestamp);
217 pr_info("err_cnt1 = %d", err_cnt1);
218 pr_info("prev_wdidx = 0x%x | cur_wdidx = 0x%x\n",
219 prev_wdidx, cur_wdidx);
220 pr_info("is_wtx_busy = %d | is_oq_free = %d | is_cdm_full = %d\n",
221 is_wtx_busy, is_oq_free, is_cdm_full);
222 pr_info("-- -- -- -- -- -- --\n");
223 pr_info("WDMA_CTX_PTR = 0x%x\n", mtk_r32(eth, 0x4808));
224 pr_info("WDMA_DTX_PTR = 0x%x\n",
225 mtk_r32(eth, MTK_WDMA_DTX_PTR(0)));
226 pr_info("WDMA_GLO_CFG = 0x%x\n",
227 mtk_r32(eth, MTK_WDMA_GLO_CFG(0)));
228 pr_info("WDMA_TX_DBG_MON0 = 0x%x\n",
229 mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0)));
230 pr_info("PSE_OQ_STA1 = 0x%x\n",
231 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
232 pr_info("PSE_OQ_STA2 = 0x%x\n",
233 mtk_r32(eth, MTK_PSE_OQ_STA(1)));
234 pr_info("PSE_OQ_STA5 = 0x%x\n",
235 mtk_r32(eth, MTK_PSE_OQ_STA(4)));
236 pr_info("==============================\n");
237
238 if ((atomic_read(&reset_lock) == 0) &&
239 (atomic_read(&force) == 0)){
240 atomic_inc(&force);
241 schedule_work(&eth->pending_work);
242 }
243 }
244 } else if (is_qfsm_hang && is_qfwd_hang) {
245 err_cnt2++;
246 if (err_cnt2 == 3) {
247 pr_info("QDMA Tx Hang !\n");
248 pr_info("============== Time: %d ================\n",
249 timestamp);
250 pr_info("err_cnt2 = %d", err_cnt2);
251 pr_info("is_qfsm_hang = %d\n", is_qfsm_hang);
252 pr_info("is_qfwd_hang = %d\n", is_qfwd_hang);
253 pr_info("is_qfq_hang = %d\n", is_qfq_hang);
254 pr_info("-- -- -- -- -- -- --\n");
255 pr_info("MTK_QDMA_FSM = 0x%x\n",
256 mtk_r32(eth, MTK_QDMA_FSM));
257 pr_info("MTK_QDMA_FWD_CNT = 0x%x\n",
258 mtk_r32(eth, MTK_QDMA_FWD_CNT));
259 pr_info("MTK_QDMA_FQ_CNT = 0x%x\n",
260 mtk_r32(eth, MTK_QDMA_FQ_CNT));
261 pr_info("==============================\n");
262
263 if ((atomic_read(&reset_lock) == 0) &&
264 (atomic_read(&force) == 0)){
265 atomic_inc(&force);
266 schedule_work(&eth->pending_work);
267 }
268 }
269 } else if (is_oq0_stuck && is_cdm1_busy && is_adma_busy) {
270 err_cnt3++;
271 if (err_cnt3 == 3) {
272 pr_info("ADMA Rx Hang !\n");
273 pr_info("============== Time: %d ================\n",
274 timestamp);
275 pr_info("err_cnt3 = %d", err_cnt3);
276 pr_info("is_oq0_stuck = %d\n", is_oq0_stuck);
277 pr_info("is_cdm1_busy = %d\n", is_cdm1_busy);
278 pr_info("is_adma_busy = %d\n", is_adma_busy);
279 pr_info("-- -- -- -- -- -- --\n");
280 pr_info("MTK_PSE_OQ_STA1 = 0x%x\n",
281 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
282 pr_info("MTK_ADMA_RX_DBG0 = 0x%x\n",
283 mtk_r32(eth, MTK_ADMA_RX_DBG0));
284 pr_info("MTK_ADMA_RX_DBG1 = 0x%x\n",
285 mtk_r32(eth, MTK_ADMA_RX_DBG1));
286 pr_info("==============================\n");
287 if ((atomic_read(&reset_lock) == 0) &&
288 (atomic_read(&force) == 0)){
289 atomic_inc(&force);
290 schedule_work(&eth->pending_work);
291 }
292 }
293 } else {
294 err_cnt1 = 0;
295 err_cnt2 = 0;
296 err_cnt3 = 0;
297 }
298
299 prev_wdidx = cur_wdidx;
300 mod_timer(&eth->mtk_dma_monitor_timer, jiffies + 1 * HZ);
301}
302
303void mtk_prepare_reset_fe(struct mtk_eth *eth)
304{
305 u32 i = 0, val = 0;
306
307 /* Disable NETSYS Interrupt */
308 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
309 mtk_w32(eth, 0, MTK_PDMA_INT_MASK);
310 mtk_w32(eth, 0, MTK_QDMA_INT_MASK);
311
312 /* Disable Linux netif Tx path */
313 for (i = 0; i < MTK_MAC_COUNT; i++) {
314 if (!eth->netdev[i])
315 continue;
316 netif_tx_disable(eth->netdev[i]);
317 }
318
319 /* Disable QDMA Tx */
320 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
321 mtk_w32(eth, val & ~(MTK_TX_DMA_EN), MTK_QDMA_GLO_CFG);
322
323 /* Power down sgmii */
324 regmap_read(eth->sgmii->regmap[0], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
325 val |= SGMII_PHYA_PWD;
326 regmap_write(eth->sgmii->regmap[0], SGMSYS_QPHY_PWR_STATE_CTRL, val);
327 regmap_read(eth->sgmii->regmap[1], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
328 val |= SGMII_PHYA_PWD;
329 regmap_write(eth->sgmii->regmap[1], SGMSYS_QPHY_PWR_STATE_CTRL, val);
330
331 /* Force link down GMAC */
332 val = mtk_r32(eth, MTK_MAC_MCR(0));
333 mtk_w32(eth, val & ~(MAC_MCR_FORCE_LINK), MTK_MAC_MCR(0));
334 val = mtk_r32(eth, MTK_MAC_MCR(1));
335 mtk_w32(eth, val & ~(MAC_MCR_FORCE_LINK), MTK_MAC_MCR(1));
336
337 /* Disable GMAC Rx */
338 val = mtk_r32(eth, MTK_MAC_MCR(0));
339 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(0));
340 val = mtk_r32(eth, MTK_MAC_MCR(1));
341 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(1));
342
343 /* Enable GDM drop */
344 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
345
346 /* Disable ADMA Rx */
347 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
348 mtk_w32(eth, val & ~(MTK_RX_DMA_EN), MTK_PDMA_GLO_CFG);
349}
350
351void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id)
352{
353 u32 i = 0, poll_time = 5000, val;
354
355 /* Disable KA */
356 mtk_m32(eth, MTK_PPE_KA_CFG_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
357 mtk_m32(eth, MTK_PPE_NTU_KA_MASK, 0, MTK_PPE_BIND_LMT_1(ppe_id));
358 mtk_w32(eth, 0, MTK_PPE_KA(ppe_id));
359 mdelay(10);
360
361 /* Set KA timer to maximum */
362 mtk_m32(eth, MTK_PPE_NTU_KA_MASK, (0xFF << 16), MTK_PPE_BIND_LMT_1(ppe_id));
363 mtk_w32(eth, 0xFFFFFFFF, MTK_PPE_KA(ppe_id));
364
365 /* Set KA tick select */
366 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, (0x1 << 24), MTK_PPE_TB_CFG(ppe_id));
367 mtk_m32(eth, MTK_PPE_KA_CFG_MASK, (0x3 << 12), MTK_PPE_TB_CFG(ppe_id));
368 mdelay(10);
369
370 /* Disable scan mode */
371 mtk_m32(eth, MTK_PPE_SCAN_MODE_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
372 mdelay(10);
373
374 /* Check PPE idle */
375 while (i++ < poll_time) {
376 val = mtk_r32(eth, MTK_PPE_GLO_CFG(ppe_id));
377 if (!(val & MTK_PPE_BUSY))
378 break;
379 mdelay(1);
380 }
381
382 if (i >= poll_time) {
383 pr_info("[%s] PPE keeps busy !\n", __func__);
384 mtk_dump_reg(eth, "FE", 0x0, 0x500);
385 mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
386 }
387}
388
389static int mtk_eth_netdevice_event(struct notifier_block *unused,
390 unsigned long event, void *ptr)
391{
392 switch (event) {
393 case MTK_WIFI_RESET_DONE:
394 complete(&wait_ser_done);
395 break;
396 default:
397 break;
398 }
399
400 return NOTIFY_DONE;
401}
402
403struct notifier_block mtk_eth_netdevice_nb __read_mostly = {
404 .notifier_call = mtk_eth_netdevice_event,
405};