blob: e983d6ece83af3b32893d1d119a6f8cbb027cc0f [file] [log] [blame]
developer8051e042022-04-08 13:26:36 +08001/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright (c) 2022 MediaTek Inc.
4 * Author: Henry Yen <henry.yen@mediatek.com>
5 */
6
7#include <linux/regmap.h>
8#include "mtk_eth_soc.h"
9#include "mtk_eth_dbg.h"
10#include "mtk_eth_reset.h"
11
12char* mtk_reset_event_name[32] = {
13 [MTK_EVENT_FORCE] = "Force",
14 [MTK_EVENT_WARM_CNT] = "Warm",
15 [MTK_EVENT_COLD_CNT] = "Cold",
16 [MTK_EVENT_TOTAL_CNT] = "Total",
17 [MTK_EVENT_FQ_EMPTY] = "FQ Empty",
18 [MTK_EVENT_TSO_FAIL] = "TSO Fail",
19 [MTK_EVENT_TSO_ILLEGAL] = "TSO Illegal",
20 [MTK_EVENT_TSO_ALIGN] = "TSO Align",
21 [MTK_EVENT_RFIFO_OV] = "RFIFO OV",
22 [MTK_EVENT_RFIFO_UF] = "RFIFO UF",
23};
24
developerbe971722022-05-23 13:51:05 +080025static int mtk_wifi_num = 0;
26static int mtk_rest_cnt = 0;
27
developer8051e042022-04-08 13:26:36 +080028void mtk_reset_event_update(struct mtk_eth *eth, u32 id)
29{
30 struct mtk_reset_event *reset_event = &eth->reset_event;
31 reset_event->count[id]++;
32}
33
34int mtk_eth_cold_reset(struct mtk_eth *eth)
35{
developer089e8852022-09-28 14:43:46 +080036 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
37 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer8051e042022-04-08 13:26:36 +080038 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
39
40 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
41 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0 | RSTCTRL_PPE1);
42 else
43 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0);
44
developer089e8852022-09-28 14:43:46 +080045 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
46 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer8051e042022-04-08 13:26:36 +080047 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff);
48
49 return 0;
50}
51
52int mtk_eth_warm_reset(struct mtk_eth *eth)
53{
54 u32 reset_bits = 0, i = 0, done = 0;
55 u32 val1 = 0, val2 = 0, val3 = 0;
56
57 mdelay(100);
58
59 reset_bits |= RSTCTRL_FE;
60 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
61 reset_bits, reset_bits);
62
63 while (i < 1000) {
64 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val1);
65 if (val1 & RSTCTRL_FE)
66 break;
67 i++;
68 udelay(1);
69 }
70
71 if (i < 1000) {
72 reset_bits = 0;
73
74 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
75 reset_bits |= RSTCTRL_ETH | RSTCTRL_PPE0 | RSTCTRL_PPE1;
76 else
77 reset_bits |= RSTCTRL_ETH | RSTCTRL_PPE0;
78
79 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
80 reset_bits, reset_bits);
81
82 udelay(1);
83 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val2);
84 if (!(val2 & reset_bits))
85 pr_info("[%s] error val2=0x%x reset_bits=0x%x !\n",
86 __func__, val2, reset_bits);
87 reset_bits |= RSTCTRL_FE;
88 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
89 reset_bits, ~reset_bits);
90
91 udelay(1);
92 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val3);
93 if (val3 & reset_bits)
94 pr_info("[%s] error val3=0x%x reset_bits=0x%x !\n",
95 __func__, val3, reset_bits);
96 done = 1;
97 mtk_reset_event_update(eth, MTK_EVENT_WARM_CNT);
98 }
99
100 pr_info("[%s] reset record val1=0x%x, val2=0x%x, val3=0x%x !\n",
developera7ee5fe2022-04-21 17:45:57 +0800101 __func__, val1, val2, val3);
developer8051e042022-04-08 13:26:36 +0800102
103 if (!done)
104 mtk_eth_cold_reset(eth);
105
106 return 0;
107}
108
109u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status)
110{
111 u32 ret = 0, val = 0;
112
113 if ((status & MTK_FE_INT_FQ_EMPTY) ||
114 (status & MTK_FE_INT_RFIFO_UF) ||
115 (status & MTK_FE_INT_RFIFO_OV) ||
116 (status & MTK_FE_INT_TSO_FAIL) ||
117 (status & MTK_FE_INT_TSO_ALIGN) ||
118 (status & MTK_FE_INT_TSO_ILLEGAL)) {
119 while (status) {
120 val = ffs((unsigned int)status) - 1;
121 mtk_reset_event_update(eth, val);
122 status &= ~(1 << val);
123 }
124 ret = 1;
125 }
126
127 if (atomic_read(&force)) {
128 mtk_reset_event_update(eth, MTK_EVENT_FORCE);
129 ret = 1;
130 }
131
132 if (ret) {
133 mtk_reset_event_update(eth, MTK_EVENT_TOTAL_CNT);
134 mtk_dump_netsys_info(eth);
135 }
136
137 return ret;
138}
139
140irqreturn_t mtk_handle_fe_irq(int irq, void *_eth)
141{
142 struct mtk_eth *eth = _eth;
143 u32 status = 0, val = 0;
144
145 status = mtk_r32(eth, MTK_FE_INT_STATUS);
146 pr_info("[%s] Trigger FE Misc ISR: 0x%x\n", __func__, status);
147
148 while (status) {
149 val = ffs((unsigned int)status) - 1;
150 status &= ~(1 << val);
developera7ee5fe2022-04-21 17:45:57 +0800151
developerbe971722022-05-23 13:51:05 +0800152 if ((val == MTK_EVENT_TSO_FAIL) ||
developera7ee5fe2022-04-21 17:45:57 +0800153 (val == MTK_EVENT_TSO_ILLEGAL) ||
154 (val == MTK_EVENT_TSO_ALIGN) ||
155 (val == MTK_EVENT_RFIFO_OV) ||
156 (val == MTK_EVENT_RFIFO_UF))
157 pr_info("[%s] Detect reset event: %s !\n", __func__,
158 mtk_reset_event_name[val]);
developer8051e042022-04-08 13:26:36 +0800159 }
developera7ee5fe2022-04-21 17:45:57 +0800160 mtk_w32(eth, 0xFFFFFFFF, MTK_FE_INT_STATUS);
developer8051e042022-04-08 13:26:36 +0800161
162 return IRQ_HANDLED;
163}
164
165static void mtk_dump_reg(void *_eth, char *name, u32 offset, u32 range)
166{
167 struct mtk_eth *eth = _eth;
168 u32 cur = offset;
169
170 pr_info("\n============ %s ============\n", name);
171 while(cur < offset + range) {
172 pr_info("0x%x: %08x %08x %08x %08x\n",
173 cur, mtk_r32(eth, cur), mtk_r32(eth, cur + 0x4),
174 mtk_r32(eth, cur + 0x8), mtk_r32(eth, cur + 0xc));
175 cur += 0x10;
176 }
177}
178
179void mtk_dump_netsys_info(void *_eth)
180{
181 struct mtk_eth *eth = _eth;
developer797e46c2022-07-29 12:05:32 +0800182 u32 id = 0;
developer8051e042022-04-08 13:26:36 +0800183
184 mtk_dump_reg(eth, "FE", 0x0, 0x500);
185 mtk_dump_reg(eth, "ADMA", PDMA_BASE, 0x300);
developer797e46c2022-07-29 12:05:32 +0800186 for (id = 0; id < MTK_QDMA_PAGE_NUM; id++){
187 mtk_w32(eth, id, MTK_QDMA_PAGE);
188 pr_info("\nQDMA PAGE:%x ",mtk_r32(eth, MTK_QDMA_PAGE));
189 mtk_dump_reg(eth, "QDMA", QDMA_BASE, 0x100);
190 mtk_w32(eth, 0, MTK_QDMA_PAGE);
191 }
192 mtk_dump_reg(eth, "QDMA", MTK_QRX_BASE_PTR0, 0x300);
developer8051e042022-04-08 13:26:36 +0800193 mtk_dump_reg(eth, "WDMA", WDMA_BASE(0), 0x600);
194 mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
195 mtk_dump_reg(eth, "GMAC", 0x10000, 0x300);
196}
197
198void mtk_dma_monitor(struct timer_list *t)
199{
200 struct mtk_eth *eth = from_timer(eth, t, mtk_dma_monitor_timer);
201 static u32 timestamp = 0;
202 static u32 err_cnt1 = 0, err_cnt2 = 0, err_cnt3 = 0;
203 static u32 prev_wdidx = 0;
developer797e46c2022-07-29 12:05:32 +0800204 unsigned int mib_base = MTK_GDM1_TX_GBCNT;
developer797e46c2022-07-29 12:05:32 +0800205
206 /*wdma tx path*/
developer8051e042022-04-08 13:26:36 +0800207 u32 cur_wdidx = mtk_r32(eth, MTK_WDMA_DTX_PTR(0));
208 u32 is_wtx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(0)) & MTK_TX_DMA_BUSY;
209 u32 is_oq_free = ((mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x01FF0000) == 0) &&
210 ((mtk_r32(eth, MTK_PSE_OQ_STA(1)) & 0x000001FF) == 0) &&
211 ((mtk_r32(eth, MTK_PSE_OQ_STA(4)) & 0x01FF0000) == 0);
212 u32 is_cdm_full =
213 !(mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0)) & MTK_CDM_TXFIFO_RDY);
developer797e46c2022-07-29 12:05:32 +0800214 /*qdma tx path*/
developer8051e042022-04-08 13:26:36 +0800215 u32 is_qfsm_hang = mtk_r32(eth, MTK_QDMA_FSM) != 0;
216 u32 is_qfwd_hang = mtk_r32(eth, MTK_QDMA_FWD_CNT) == 0;
217 u32 is_qfq_hang = mtk_r32(eth, MTK_QDMA_FQ_CNT) !=
218 ((MTK_DMA_SIZE << 16) | MTK_DMA_SIZE);
developer797e46c2022-07-29 12:05:32 +0800219 u32 is_gdm1_tx = (mtk_r32(eth, MTK_FE_GDM1_FSM) & 0xFFFF0000) > 0;
220 u32 is_gdm2_tx = (mtk_r32(eth, MTK_FE_GDM2_FSM) & 0xFFFF0000) > 0;
221 u32 is_gmac1_tx = (mtk_r32(eth, MTK_MAC_FSM(0)) & 0xFF000000) != 0x1000000;
222 u32 is_gmac2_tx = (mtk_r32(eth, MTK_MAC_FSM(1)) & 0xFF000000) != 0x1000000;
223 u32 gdm1_fc = mtk_r32(eth, mib_base+0x24);
224 u32 gdm2_fc = mtk_r32(eth, mib_base+0x64);
225 /*adma rx path*/
developer8051e042022-04-08 13:26:36 +0800226 u32 is_oq0_stuck = (mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x1FF) != 0;
227 u32 is_cdm1_busy = (mtk_r32(eth, MTK_FE_CDM1_FSM) & 0xFFFF0000) != 0;
228 u32 is_adma_busy = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x1F) == 0) &&
developera1729cd2022-05-11 13:42:14 +0800229 ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x40) == 0);
developer8051e042022-04-08 13:26:36 +0800230
231 if (cur_wdidx == prev_wdidx && is_wtx_busy &&
232 is_oq_free && is_cdm_full) {
233 err_cnt1++;
developerbd42c172022-07-18 17:51:30 +0800234 if (err_cnt1 >= 3) {
developer797e46c2022-07-29 12:05:32 +0800235 pr_info("WDMA CDM Info\n");
developer8051e042022-04-08 13:26:36 +0800236 pr_info("============== Time: %d ================\n",
237 timestamp);
238 pr_info("err_cnt1 = %d", err_cnt1);
239 pr_info("prev_wdidx = 0x%x | cur_wdidx = 0x%x\n",
240 prev_wdidx, cur_wdidx);
241 pr_info("is_wtx_busy = %d | is_oq_free = %d | is_cdm_full = %d\n",
242 is_wtx_busy, is_oq_free, is_cdm_full);
243 pr_info("-- -- -- -- -- -- --\n");
244 pr_info("WDMA_CTX_PTR = 0x%x\n", mtk_r32(eth, 0x4808));
245 pr_info("WDMA_DTX_PTR = 0x%x\n",
246 mtk_r32(eth, MTK_WDMA_DTX_PTR(0)));
247 pr_info("WDMA_GLO_CFG = 0x%x\n",
248 mtk_r32(eth, MTK_WDMA_GLO_CFG(0)));
249 pr_info("WDMA_TX_DBG_MON0 = 0x%x\n",
250 mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0)));
251 pr_info("PSE_OQ_STA1 = 0x%x\n",
252 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
253 pr_info("PSE_OQ_STA2 = 0x%x\n",
254 mtk_r32(eth, MTK_PSE_OQ_STA(1)));
255 pr_info("PSE_OQ_STA5 = 0x%x\n",
256 mtk_r32(eth, MTK_PSE_OQ_STA(4)));
257 pr_info("==============================\n");
258
259 if ((atomic_read(&reset_lock) == 0) &&
260 (atomic_read(&force) == 0)){
261 atomic_inc(&force);
262 schedule_work(&eth->pending_work);
263 }
264 }
developer797e46c2022-07-29 12:05:32 +0800265 } else if (is_qfsm_hang && is_qfwd_hang &&
266 ((is_gdm1_tx && is_gmac1_tx && (gdm1_fc < 1)) || (is_gdm2_tx && is_gmac2_tx && (gdm2_fc < 1)))) {
developer8051e042022-04-08 13:26:36 +0800267 err_cnt2++;
developerbd42c172022-07-18 17:51:30 +0800268 if (err_cnt2 >= 3) {
developer797e46c2022-07-29 12:05:32 +0800269 pr_info("QDMA Tx Info\n");
developer8051e042022-04-08 13:26:36 +0800270 pr_info("============== Time: %d ================\n",
271 timestamp);
272 pr_info("err_cnt2 = %d", err_cnt2);
273 pr_info("is_qfsm_hang = %d\n", is_qfsm_hang);
274 pr_info("is_qfwd_hang = %d\n", is_qfwd_hang);
275 pr_info("is_qfq_hang = %d\n", is_qfq_hang);
276 pr_info("-- -- -- -- -- -- --\n");
277 pr_info("MTK_QDMA_FSM = 0x%x\n",
278 mtk_r32(eth, MTK_QDMA_FSM));
279 pr_info("MTK_QDMA_FWD_CNT = 0x%x\n",
280 mtk_r32(eth, MTK_QDMA_FWD_CNT));
281 pr_info("MTK_QDMA_FQ_CNT = 0x%x\n",
282 mtk_r32(eth, MTK_QDMA_FQ_CNT));
developer797e46c2022-07-29 12:05:32 +0800283 pr_info("GDM1 FC = 0x%x\n",gdm1_fc);
284 pr_info("GDM2 FC = 0x%x\n",gdm2_fc);
developer8051e042022-04-08 13:26:36 +0800285 pr_info("==============================\n");
286
287 if ((atomic_read(&reset_lock) == 0) &&
288 (atomic_read(&force) == 0)){
289 atomic_inc(&force);
290 schedule_work(&eth->pending_work);
291 }
292 }
293 } else if (is_oq0_stuck && is_cdm1_busy && is_adma_busy) {
294 err_cnt3++;
developerbd42c172022-07-18 17:51:30 +0800295 if (err_cnt3 >= 3) {
developer797e46c2022-07-29 12:05:32 +0800296 pr_info("ADMA Rx Info\n");
developer8051e042022-04-08 13:26:36 +0800297 pr_info("============== Time: %d ================\n",
298 timestamp);
299 pr_info("err_cnt3 = %d", err_cnt3);
300 pr_info("is_oq0_stuck = %d\n", is_oq0_stuck);
301 pr_info("is_cdm1_busy = %d\n", is_cdm1_busy);
302 pr_info("is_adma_busy = %d\n", is_adma_busy);
303 pr_info("-- -- -- -- -- -- --\n");
304 pr_info("MTK_PSE_OQ_STA1 = 0x%x\n",
305 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
306 pr_info("MTK_ADMA_RX_DBG0 = 0x%x\n",
307 mtk_r32(eth, MTK_ADMA_RX_DBG0));
308 pr_info("MTK_ADMA_RX_DBG1 = 0x%x\n",
309 mtk_r32(eth, MTK_ADMA_RX_DBG1));
310 pr_info("==============================\n");
311 if ((atomic_read(&reset_lock) == 0) &&
312 (atomic_read(&force) == 0)){
313 atomic_inc(&force);
314 schedule_work(&eth->pending_work);
315 }
316 }
developer2a4e4e82022-08-05 12:25:08 +0800317 }else {
developer8051e042022-04-08 13:26:36 +0800318 err_cnt1 = 0;
319 err_cnt2 = 0;
320 err_cnt3 = 0;
321 }
322
323 prev_wdidx = cur_wdidx;
324 mod_timer(&eth->mtk_dma_monitor_timer, jiffies + 1 * HZ);
325}
326
327void mtk_prepare_reset_fe(struct mtk_eth *eth)
328{
329 u32 i = 0, val = 0;
330
331 /* Disable NETSYS Interrupt */
332 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
333 mtk_w32(eth, 0, MTK_PDMA_INT_MASK);
334 mtk_w32(eth, 0, MTK_QDMA_INT_MASK);
335
336 /* Disable Linux netif Tx path */
337 for (i = 0; i < MTK_MAC_COUNT; i++) {
338 if (!eth->netdev[i])
339 continue;
340 netif_tx_disable(eth->netdev[i]);
341 }
342
343 /* Disable QDMA Tx */
344 val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
345 mtk_w32(eth, val & ~(MTK_TX_DMA_EN), MTK_QDMA_GLO_CFG);
346
347 /* Power down sgmii */
developer793f7b42022-05-20 13:54:51 +0800348 for (i = 0; i < MTK_MAX_DEVS; i++) {
developer089e8852022-09-28 14:43:46 +0800349 if (!eth->xgmii->regmap_sgmii[i])
developer793f7b42022-05-20 13:54:51 +0800350 continue;
351
developer089e8852022-09-28 14:43:46 +0800352 regmap_read(eth->xgmii->regmap_sgmii[i], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
developer793f7b42022-05-20 13:54:51 +0800353 val |= SGMII_PHYA_PWD;
developer089e8852022-09-28 14:43:46 +0800354 regmap_write(eth->xgmii->regmap_sgmii[i], SGMSYS_QPHY_PWR_STATE_CTRL, val);
developer793f7b42022-05-20 13:54:51 +0800355 }
developer8051e042022-04-08 13:26:36 +0800356
357 /* Force link down GMAC */
358 val = mtk_r32(eth, MTK_MAC_MCR(0));
359 mtk_w32(eth, val & ~(MAC_MCR_FORCE_LINK), MTK_MAC_MCR(0));
360 val = mtk_r32(eth, MTK_MAC_MCR(1));
361 mtk_w32(eth, val & ~(MAC_MCR_FORCE_LINK), MTK_MAC_MCR(1));
362
363 /* Disable GMAC Rx */
364 val = mtk_r32(eth, MTK_MAC_MCR(0));
365 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(0));
366 val = mtk_r32(eth, MTK_MAC_MCR(1));
367 mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(1));
368
369 /* Enable GDM drop */
developerdca0fde2022-12-14 11:40:35 +0800370 for (i = 0; i < MTK_MAC_COUNT; i++)
371 mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
developer8051e042022-04-08 13:26:36 +0800372
373 /* Disable ADMA Rx */
374 val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
375 mtk_w32(eth, val & ~(MTK_RX_DMA_EN), MTK_PDMA_GLO_CFG);
376}
377
378void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id)
379{
380 u32 i = 0, poll_time = 5000, val;
381
382 /* Disable KA */
383 mtk_m32(eth, MTK_PPE_KA_CFG_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
384 mtk_m32(eth, MTK_PPE_NTU_KA_MASK, 0, MTK_PPE_BIND_LMT_1(ppe_id));
385 mtk_w32(eth, 0, MTK_PPE_KA(ppe_id));
386 mdelay(10);
387
388 /* Set KA timer to maximum */
389 mtk_m32(eth, MTK_PPE_NTU_KA_MASK, (0xFF << 16), MTK_PPE_BIND_LMT_1(ppe_id));
390 mtk_w32(eth, 0xFFFFFFFF, MTK_PPE_KA(ppe_id));
391
392 /* Set KA tick select */
393 mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, (0x1 << 24), MTK_PPE_TB_CFG(ppe_id));
394 mtk_m32(eth, MTK_PPE_KA_CFG_MASK, (0x3 << 12), MTK_PPE_TB_CFG(ppe_id));
395 mdelay(10);
396
397 /* Disable scan mode */
398 mtk_m32(eth, MTK_PPE_SCAN_MODE_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
399 mdelay(10);
400
401 /* Check PPE idle */
402 while (i++ < poll_time) {
403 val = mtk_r32(eth, MTK_PPE_GLO_CFG(ppe_id));
404 if (!(val & MTK_PPE_BUSY))
405 break;
406 mdelay(1);
407 }
408
409 if (i >= poll_time) {
410 pr_info("[%s] PPE keeps busy !\n", __func__);
411 mtk_dump_reg(eth, "FE", 0x0, 0x500);
412 mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
413 }
414}
415
416static int mtk_eth_netdevice_event(struct notifier_block *unused,
417 unsigned long event, void *ptr)
418{
419 switch (event) {
420 case MTK_WIFI_RESET_DONE:
developerbe971722022-05-23 13:51:05 +0800421 mtk_rest_cnt--;
422 if(!mtk_rest_cnt) {
423 complete(&wait_ser_done);
424 mtk_rest_cnt = mtk_wifi_num;
425 }
426 break;
427 case MTK_WIFI_CHIP_ONLINE:
428 mtk_wifi_num++;
429 mtk_rest_cnt = mtk_wifi_num;
430 break;
431 case MTK_WIFI_CHIP_OFFLINE:
432 mtk_wifi_num--;
433 mtk_rest_cnt = mtk_wifi_num;
developer8051e042022-04-08 13:26:36 +0800434 break;
435 default:
436 break;
437 }
438
439 return NOTIFY_DONE;
440}
441
442struct notifier_block mtk_eth_netdevice_nb __read_mostly = {
443 .notifier_call = mtk_eth_netdevice_event,
444};