developer | 8051e04 | 2022-04-08 13:26:36 +0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 |
| 2 | * |
| 3 | * Copyright (c) 2022 MediaTek Inc. |
| 4 | * Author: Henry Yen <henry.yen@mediatek.com> |
| 5 | */ |
| 6 | |
| 7 | #include <linux/regmap.h> |
| 8 | #include "mtk_eth_soc.h" |
| 9 | #include "mtk_eth_dbg.h" |
| 10 | #include "mtk_eth_reset.h" |
| 11 | |
| 12 | char* mtk_reset_event_name[32] = { |
| 13 | [MTK_EVENT_FORCE] = "Force", |
| 14 | [MTK_EVENT_WARM_CNT] = "Warm", |
| 15 | [MTK_EVENT_COLD_CNT] = "Cold", |
| 16 | [MTK_EVENT_TOTAL_CNT] = "Total", |
| 17 | [MTK_EVENT_FQ_EMPTY] = "FQ Empty", |
| 18 | [MTK_EVENT_TSO_FAIL] = "TSO Fail", |
| 19 | [MTK_EVENT_TSO_ILLEGAL] = "TSO Illegal", |
| 20 | [MTK_EVENT_TSO_ALIGN] = "TSO Align", |
| 21 | [MTK_EVENT_RFIFO_OV] = "RFIFO OV", |
| 22 | [MTK_EVENT_RFIFO_UF] = "RFIFO UF", |
| 23 | }; |
| 24 | |
developer | be97172 | 2022-05-23 13:51:05 +0800 | [diff] [blame] | 25 | static int mtk_wifi_num = 0; |
| 26 | static int mtk_rest_cnt = 0; |
| 27 | |
developer | 8051e04 | 2022-04-08 13:26:36 +0800 | [diff] [blame] | 28 | void mtk_reset_event_update(struct mtk_eth *eth, u32 id) |
| 29 | { |
| 30 | struct mtk_reset_event *reset_event = ð->reset_event; |
| 31 | reset_event->count[id]++; |
| 32 | } |
| 33 | |
| 34 | int mtk_eth_cold_reset(struct mtk_eth *eth) |
| 35 | { |
| 36 | if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) |
| 37 | regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); |
| 38 | |
| 39 | if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) |
| 40 | ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0 | RSTCTRL_PPE1); |
| 41 | else |
| 42 | ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0); |
| 43 | |
| 44 | if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) |
| 45 | regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff); |
| 46 | |
| 47 | return 0; |
| 48 | } |
| 49 | |
| 50 | int mtk_eth_warm_reset(struct mtk_eth *eth) |
| 51 | { |
| 52 | u32 reset_bits = 0, i = 0, done = 0; |
| 53 | u32 val1 = 0, val2 = 0, val3 = 0; |
| 54 | |
| 55 | mdelay(100); |
| 56 | |
| 57 | reset_bits |= RSTCTRL_FE; |
| 58 | regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, |
| 59 | reset_bits, reset_bits); |
| 60 | |
| 61 | while (i < 1000) { |
| 62 | regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val1); |
| 63 | if (val1 & RSTCTRL_FE) |
| 64 | break; |
| 65 | i++; |
| 66 | udelay(1); |
| 67 | } |
| 68 | |
| 69 | if (i < 1000) { |
| 70 | reset_bits = 0; |
| 71 | |
| 72 | if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) |
| 73 | reset_bits |= RSTCTRL_ETH | RSTCTRL_PPE0 | RSTCTRL_PPE1; |
| 74 | else |
| 75 | reset_bits |= RSTCTRL_ETH | RSTCTRL_PPE0; |
| 76 | |
| 77 | regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, |
| 78 | reset_bits, reset_bits); |
| 79 | |
| 80 | udelay(1); |
| 81 | regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val2); |
| 82 | if (!(val2 & reset_bits)) |
| 83 | pr_info("[%s] error val2=0x%x reset_bits=0x%x !\n", |
| 84 | __func__, val2, reset_bits); |
| 85 | reset_bits |= RSTCTRL_FE; |
| 86 | regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, |
| 87 | reset_bits, ~reset_bits); |
| 88 | |
| 89 | udelay(1); |
| 90 | regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val3); |
| 91 | if (val3 & reset_bits) |
| 92 | pr_info("[%s] error val3=0x%x reset_bits=0x%x !\n", |
| 93 | __func__, val3, reset_bits); |
| 94 | done = 1; |
| 95 | mtk_reset_event_update(eth, MTK_EVENT_WARM_CNT); |
| 96 | } |
| 97 | |
| 98 | pr_info("[%s] reset record val1=0x%x, val2=0x%x, val3=0x%x !\n", |
developer | a7ee5fe | 2022-04-21 17:45:57 +0800 | [diff] [blame] | 99 | __func__, val1, val2, val3); |
developer | 8051e04 | 2022-04-08 13:26:36 +0800 | [diff] [blame] | 100 | |
| 101 | if (!done) |
| 102 | mtk_eth_cold_reset(eth); |
| 103 | |
| 104 | return 0; |
| 105 | } |
| 106 | |
| 107 | u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status) |
| 108 | { |
| 109 | u32 ret = 0, val = 0; |
| 110 | |
| 111 | if ((status & MTK_FE_INT_FQ_EMPTY) || |
| 112 | (status & MTK_FE_INT_RFIFO_UF) || |
| 113 | (status & MTK_FE_INT_RFIFO_OV) || |
| 114 | (status & MTK_FE_INT_TSO_FAIL) || |
| 115 | (status & MTK_FE_INT_TSO_ALIGN) || |
| 116 | (status & MTK_FE_INT_TSO_ILLEGAL)) { |
| 117 | while (status) { |
| 118 | val = ffs((unsigned int)status) - 1; |
| 119 | mtk_reset_event_update(eth, val); |
| 120 | status &= ~(1 << val); |
| 121 | } |
| 122 | ret = 1; |
| 123 | } |
| 124 | |
| 125 | if (atomic_read(&force)) { |
| 126 | mtk_reset_event_update(eth, MTK_EVENT_FORCE); |
| 127 | ret = 1; |
| 128 | } |
| 129 | |
| 130 | if (ret) { |
| 131 | mtk_reset_event_update(eth, MTK_EVENT_TOTAL_CNT); |
| 132 | mtk_dump_netsys_info(eth); |
| 133 | } |
| 134 | |
| 135 | return ret; |
| 136 | } |
| 137 | |
| 138 | irqreturn_t mtk_handle_fe_irq(int irq, void *_eth) |
| 139 | { |
| 140 | struct mtk_eth *eth = _eth; |
| 141 | u32 status = 0, val = 0; |
| 142 | |
| 143 | status = mtk_r32(eth, MTK_FE_INT_STATUS); |
| 144 | pr_info("[%s] Trigger FE Misc ISR: 0x%x\n", __func__, status); |
| 145 | |
| 146 | while (status) { |
| 147 | val = ffs((unsigned int)status) - 1; |
| 148 | status &= ~(1 << val); |
developer | a7ee5fe | 2022-04-21 17:45:57 +0800 | [diff] [blame] | 149 | |
developer | be97172 | 2022-05-23 13:51:05 +0800 | [diff] [blame] | 150 | if ((val == MTK_EVENT_TSO_FAIL) || |
developer | a7ee5fe | 2022-04-21 17:45:57 +0800 | [diff] [blame] | 151 | (val == MTK_EVENT_TSO_ILLEGAL) || |
| 152 | (val == MTK_EVENT_TSO_ALIGN) || |
| 153 | (val == MTK_EVENT_RFIFO_OV) || |
| 154 | (val == MTK_EVENT_RFIFO_UF)) |
| 155 | pr_info("[%s] Detect reset event: %s !\n", __func__, |
| 156 | mtk_reset_event_name[val]); |
developer | 8051e04 | 2022-04-08 13:26:36 +0800 | [diff] [blame] | 157 | } |
developer | a7ee5fe | 2022-04-21 17:45:57 +0800 | [diff] [blame] | 158 | mtk_w32(eth, 0xFFFFFFFF, MTK_FE_INT_STATUS); |
developer | 8051e04 | 2022-04-08 13:26:36 +0800 | [diff] [blame] | 159 | |
| 160 | return IRQ_HANDLED; |
| 161 | } |
| 162 | |
| 163 | static void mtk_dump_reg(void *_eth, char *name, u32 offset, u32 range) |
| 164 | { |
| 165 | struct mtk_eth *eth = _eth; |
| 166 | u32 cur = offset; |
| 167 | |
| 168 | pr_info("\n============ %s ============\n", name); |
| 169 | while(cur < offset + range) { |
| 170 | pr_info("0x%x: %08x %08x %08x %08x\n", |
| 171 | cur, mtk_r32(eth, cur), mtk_r32(eth, cur + 0x4), |
| 172 | mtk_r32(eth, cur + 0x8), mtk_r32(eth, cur + 0xc)); |
| 173 | cur += 0x10; |
| 174 | } |
| 175 | } |
| 176 | |
| 177 | void mtk_dump_netsys_info(void *_eth) |
| 178 | { |
| 179 | struct mtk_eth *eth = _eth; |
| 180 | |
| 181 | mtk_dump_reg(eth, "FE", 0x0, 0x500); |
| 182 | mtk_dump_reg(eth, "ADMA", PDMA_BASE, 0x300); |
| 183 | mtk_dump_reg(eth, "QDMA", QDMA_BASE, 0x400); |
| 184 | mtk_dump_reg(eth, "WDMA", WDMA_BASE(0), 0x600); |
| 185 | mtk_dump_reg(eth, "PPE", 0x2200, 0x200); |
| 186 | mtk_dump_reg(eth, "GMAC", 0x10000, 0x300); |
| 187 | } |
| 188 | |
| 189 | void mtk_dma_monitor(struct timer_list *t) |
| 190 | { |
| 191 | struct mtk_eth *eth = from_timer(eth, t, mtk_dma_monitor_timer); |
| 192 | static u32 timestamp = 0; |
| 193 | static u32 err_cnt1 = 0, err_cnt2 = 0, err_cnt3 = 0; |
| 194 | static u32 prev_wdidx = 0; |
| 195 | u32 cur_wdidx = mtk_r32(eth, MTK_WDMA_DTX_PTR(0)); |
| 196 | u32 is_wtx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(0)) & MTK_TX_DMA_BUSY; |
| 197 | u32 is_oq_free = ((mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x01FF0000) == 0) && |
| 198 | ((mtk_r32(eth, MTK_PSE_OQ_STA(1)) & 0x000001FF) == 0) && |
| 199 | ((mtk_r32(eth, MTK_PSE_OQ_STA(4)) & 0x01FF0000) == 0); |
| 200 | u32 is_cdm_full = |
| 201 | !(mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0)) & MTK_CDM_TXFIFO_RDY); |
| 202 | u32 is_qfsm_hang = mtk_r32(eth, MTK_QDMA_FSM) != 0; |
| 203 | u32 is_qfwd_hang = mtk_r32(eth, MTK_QDMA_FWD_CNT) == 0; |
| 204 | u32 is_qfq_hang = mtk_r32(eth, MTK_QDMA_FQ_CNT) != |
| 205 | ((MTK_DMA_SIZE << 16) | MTK_DMA_SIZE); |
| 206 | u32 is_oq0_stuck = (mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x1FF) != 0; |
| 207 | u32 is_cdm1_busy = (mtk_r32(eth, MTK_FE_CDM1_FSM) & 0xFFFF0000) != 0; |
| 208 | u32 is_adma_busy = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x1F) == 0) && |
developer | a1729cd | 2022-05-11 13:42:14 +0800 | [diff] [blame] | 209 | ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x40) == 0); |
developer | 8051e04 | 2022-04-08 13:26:36 +0800 | [diff] [blame] | 210 | |
| 211 | if (cur_wdidx == prev_wdidx && is_wtx_busy && |
| 212 | is_oq_free && is_cdm_full) { |
| 213 | err_cnt1++; |
developer | bd42c17 | 2022-07-18 17:51:30 +0800 | [diff] [blame] | 214 | if (err_cnt1 >= 3) { |
developer | 8051e04 | 2022-04-08 13:26:36 +0800 | [diff] [blame] | 215 | pr_info("WDMA CDM Hang !\n"); |
| 216 | pr_info("============== Time: %d ================\n", |
| 217 | timestamp); |
| 218 | pr_info("err_cnt1 = %d", err_cnt1); |
| 219 | pr_info("prev_wdidx = 0x%x | cur_wdidx = 0x%x\n", |
| 220 | prev_wdidx, cur_wdidx); |
| 221 | pr_info("is_wtx_busy = %d | is_oq_free = %d | is_cdm_full = %d\n", |
| 222 | is_wtx_busy, is_oq_free, is_cdm_full); |
| 223 | pr_info("-- -- -- -- -- -- --\n"); |
| 224 | pr_info("WDMA_CTX_PTR = 0x%x\n", mtk_r32(eth, 0x4808)); |
| 225 | pr_info("WDMA_DTX_PTR = 0x%x\n", |
| 226 | mtk_r32(eth, MTK_WDMA_DTX_PTR(0))); |
| 227 | pr_info("WDMA_GLO_CFG = 0x%x\n", |
| 228 | mtk_r32(eth, MTK_WDMA_GLO_CFG(0))); |
| 229 | pr_info("WDMA_TX_DBG_MON0 = 0x%x\n", |
| 230 | mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0))); |
| 231 | pr_info("PSE_OQ_STA1 = 0x%x\n", |
| 232 | mtk_r32(eth, MTK_PSE_OQ_STA(0))); |
| 233 | pr_info("PSE_OQ_STA2 = 0x%x\n", |
| 234 | mtk_r32(eth, MTK_PSE_OQ_STA(1))); |
| 235 | pr_info("PSE_OQ_STA5 = 0x%x\n", |
| 236 | mtk_r32(eth, MTK_PSE_OQ_STA(4))); |
| 237 | pr_info("==============================\n"); |
| 238 | |
| 239 | if ((atomic_read(&reset_lock) == 0) && |
| 240 | (atomic_read(&force) == 0)){ |
| 241 | atomic_inc(&force); |
| 242 | schedule_work(ð->pending_work); |
| 243 | } |
| 244 | } |
| 245 | } else if (is_qfsm_hang && is_qfwd_hang) { |
| 246 | err_cnt2++; |
developer | bd42c17 | 2022-07-18 17:51:30 +0800 | [diff] [blame] | 247 | if (err_cnt2 >= 3) { |
developer | 8051e04 | 2022-04-08 13:26:36 +0800 | [diff] [blame] | 248 | pr_info("QDMA Tx Hang !\n"); |
| 249 | pr_info("============== Time: %d ================\n", |
| 250 | timestamp); |
| 251 | pr_info("err_cnt2 = %d", err_cnt2); |
| 252 | pr_info("is_qfsm_hang = %d\n", is_qfsm_hang); |
| 253 | pr_info("is_qfwd_hang = %d\n", is_qfwd_hang); |
| 254 | pr_info("is_qfq_hang = %d\n", is_qfq_hang); |
| 255 | pr_info("-- -- -- -- -- -- --\n"); |
| 256 | pr_info("MTK_QDMA_FSM = 0x%x\n", |
| 257 | mtk_r32(eth, MTK_QDMA_FSM)); |
| 258 | pr_info("MTK_QDMA_FWD_CNT = 0x%x\n", |
| 259 | mtk_r32(eth, MTK_QDMA_FWD_CNT)); |
| 260 | pr_info("MTK_QDMA_FQ_CNT = 0x%x\n", |
| 261 | mtk_r32(eth, MTK_QDMA_FQ_CNT)); |
| 262 | pr_info("==============================\n"); |
| 263 | |
| 264 | if ((atomic_read(&reset_lock) == 0) && |
| 265 | (atomic_read(&force) == 0)){ |
| 266 | atomic_inc(&force); |
| 267 | schedule_work(ð->pending_work); |
| 268 | } |
| 269 | } |
| 270 | } else if (is_oq0_stuck && is_cdm1_busy && is_adma_busy) { |
| 271 | err_cnt3++; |
developer | bd42c17 | 2022-07-18 17:51:30 +0800 | [diff] [blame] | 272 | if (err_cnt3 >= 3) { |
developer | 8051e04 | 2022-04-08 13:26:36 +0800 | [diff] [blame] | 273 | pr_info("ADMA Rx Hang !\n"); |
| 274 | pr_info("============== Time: %d ================\n", |
| 275 | timestamp); |
| 276 | pr_info("err_cnt3 = %d", err_cnt3); |
| 277 | pr_info("is_oq0_stuck = %d\n", is_oq0_stuck); |
| 278 | pr_info("is_cdm1_busy = %d\n", is_cdm1_busy); |
| 279 | pr_info("is_adma_busy = %d\n", is_adma_busy); |
| 280 | pr_info("-- -- -- -- -- -- --\n"); |
| 281 | pr_info("MTK_PSE_OQ_STA1 = 0x%x\n", |
| 282 | mtk_r32(eth, MTK_PSE_OQ_STA(0))); |
| 283 | pr_info("MTK_ADMA_RX_DBG0 = 0x%x\n", |
| 284 | mtk_r32(eth, MTK_ADMA_RX_DBG0)); |
| 285 | pr_info("MTK_ADMA_RX_DBG1 = 0x%x\n", |
| 286 | mtk_r32(eth, MTK_ADMA_RX_DBG1)); |
| 287 | pr_info("==============================\n"); |
| 288 | if ((atomic_read(&reset_lock) == 0) && |
| 289 | (atomic_read(&force) == 0)){ |
| 290 | atomic_inc(&force); |
| 291 | schedule_work(ð->pending_work); |
| 292 | } |
| 293 | } |
| 294 | } else { |
| 295 | err_cnt1 = 0; |
| 296 | err_cnt2 = 0; |
| 297 | err_cnt3 = 0; |
| 298 | } |
| 299 | |
| 300 | prev_wdidx = cur_wdidx; |
| 301 | mod_timer(ð->mtk_dma_monitor_timer, jiffies + 1 * HZ); |
| 302 | } |
| 303 | |
| 304 | void mtk_prepare_reset_fe(struct mtk_eth *eth) |
| 305 | { |
| 306 | u32 i = 0, val = 0; |
| 307 | |
| 308 | /* Disable NETSYS Interrupt */ |
| 309 | mtk_w32(eth, 0, MTK_FE_INT_ENABLE); |
| 310 | mtk_w32(eth, 0, MTK_PDMA_INT_MASK); |
| 311 | mtk_w32(eth, 0, MTK_QDMA_INT_MASK); |
| 312 | |
| 313 | /* Disable Linux netif Tx path */ |
| 314 | for (i = 0; i < MTK_MAC_COUNT; i++) { |
| 315 | if (!eth->netdev[i]) |
| 316 | continue; |
| 317 | netif_tx_disable(eth->netdev[i]); |
| 318 | } |
| 319 | |
| 320 | /* Disable QDMA Tx */ |
| 321 | val = mtk_r32(eth, MTK_QDMA_GLO_CFG); |
| 322 | mtk_w32(eth, val & ~(MTK_TX_DMA_EN), MTK_QDMA_GLO_CFG); |
| 323 | |
| 324 | /* Power down sgmii */ |
developer | 793f7b4 | 2022-05-20 13:54:51 +0800 | [diff] [blame] | 325 | for (i = 0; i < MTK_MAX_DEVS; i++) { |
| 326 | if (!eth->sgmii->regmap[i]) |
| 327 | continue; |
| 328 | |
| 329 | regmap_read(eth->sgmii->regmap[i], SGMSYS_QPHY_PWR_STATE_CTRL, &val); |
| 330 | val |= SGMII_PHYA_PWD; |
| 331 | regmap_write(eth->sgmii->regmap[i], SGMSYS_QPHY_PWR_STATE_CTRL, val); |
| 332 | } |
developer | 8051e04 | 2022-04-08 13:26:36 +0800 | [diff] [blame] | 333 | |
| 334 | /* Force link down GMAC */ |
| 335 | val = mtk_r32(eth, MTK_MAC_MCR(0)); |
| 336 | mtk_w32(eth, val & ~(MAC_MCR_FORCE_LINK), MTK_MAC_MCR(0)); |
| 337 | val = mtk_r32(eth, MTK_MAC_MCR(1)); |
| 338 | mtk_w32(eth, val & ~(MAC_MCR_FORCE_LINK), MTK_MAC_MCR(1)); |
| 339 | |
| 340 | /* Disable GMAC Rx */ |
| 341 | val = mtk_r32(eth, MTK_MAC_MCR(0)); |
| 342 | mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(0)); |
| 343 | val = mtk_r32(eth, MTK_MAC_MCR(1)); |
| 344 | mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(1)); |
| 345 | |
| 346 | /* Enable GDM drop */ |
| 347 | mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); |
| 348 | |
| 349 | /* Disable ADMA Rx */ |
| 350 | val = mtk_r32(eth, MTK_PDMA_GLO_CFG); |
| 351 | mtk_w32(eth, val & ~(MTK_RX_DMA_EN), MTK_PDMA_GLO_CFG); |
| 352 | } |
| 353 | |
| 354 | void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id) |
| 355 | { |
| 356 | u32 i = 0, poll_time = 5000, val; |
| 357 | |
| 358 | /* Disable KA */ |
| 359 | mtk_m32(eth, MTK_PPE_KA_CFG_MASK, 0, MTK_PPE_TB_CFG(ppe_id)); |
| 360 | mtk_m32(eth, MTK_PPE_NTU_KA_MASK, 0, MTK_PPE_BIND_LMT_1(ppe_id)); |
| 361 | mtk_w32(eth, 0, MTK_PPE_KA(ppe_id)); |
| 362 | mdelay(10); |
| 363 | |
| 364 | /* Set KA timer to maximum */ |
| 365 | mtk_m32(eth, MTK_PPE_NTU_KA_MASK, (0xFF << 16), MTK_PPE_BIND_LMT_1(ppe_id)); |
| 366 | mtk_w32(eth, 0xFFFFFFFF, MTK_PPE_KA(ppe_id)); |
| 367 | |
| 368 | /* Set KA tick select */ |
| 369 | mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, (0x1 << 24), MTK_PPE_TB_CFG(ppe_id)); |
| 370 | mtk_m32(eth, MTK_PPE_KA_CFG_MASK, (0x3 << 12), MTK_PPE_TB_CFG(ppe_id)); |
| 371 | mdelay(10); |
| 372 | |
| 373 | /* Disable scan mode */ |
| 374 | mtk_m32(eth, MTK_PPE_SCAN_MODE_MASK, 0, MTK_PPE_TB_CFG(ppe_id)); |
| 375 | mdelay(10); |
| 376 | |
| 377 | /* Check PPE idle */ |
| 378 | while (i++ < poll_time) { |
| 379 | val = mtk_r32(eth, MTK_PPE_GLO_CFG(ppe_id)); |
| 380 | if (!(val & MTK_PPE_BUSY)) |
| 381 | break; |
| 382 | mdelay(1); |
| 383 | } |
| 384 | |
| 385 | if (i >= poll_time) { |
| 386 | pr_info("[%s] PPE keeps busy !\n", __func__); |
| 387 | mtk_dump_reg(eth, "FE", 0x0, 0x500); |
| 388 | mtk_dump_reg(eth, "PPE", 0x2200, 0x200); |
| 389 | } |
| 390 | } |
| 391 | |
| 392 | static int mtk_eth_netdevice_event(struct notifier_block *unused, |
| 393 | unsigned long event, void *ptr) |
| 394 | { |
| 395 | switch (event) { |
| 396 | case MTK_WIFI_RESET_DONE: |
developer | be97172 | 2022-05-23 13:51:05 +0800 | [diff] [blame] | 397 | mtk_rest_cnt--; |
| 398 | if(!mtk_rest_cnt) { |
| 399 | complete(&wait_ser_done); |
| 400 | mtk_rest_cnt = mtk_wifi_num; |
| 401 | } |
| 402 | break; |
| 403 | case MTK_WIFI_CHIP_ONLINE: |
| 404 | mtk_wifi_num++; |
| 405 | mtk_rest_cnt = mtk_wifi_num; |
| 406 | break; |
| 407 | case MTK_WIFI_CHIP_OFFLINE: |
| 408 | mtk_wifi_num--; |
| 409 | mtk_rest_cnt = mtk_wifi_num; |
developer | 8051e04 | 2022-04-08 13:26:36 +0800 | [diff] [blame] | 410 | break; |
| 411 | default: |
| 412 | break; |
| 413 | } |
| 414 | |
| 415 | return NOTIFY_DONE; |
| 416 | } |
| 417 | |
| 418 | struct notifier_block mtk_eth_netdevice_nb __read_mostly = { |
| 419 | .notifier_call = mtk_eth_netdevice_event, |
| 420 | }; |