blob: 648f4ab9d684d8f10960f6321f34356738b48e5b [file] [log] [blame]
developer0f312e82022-11-01 12:31:52 +08001// SPDX-License-Identifier: ISC
2/*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
5 */
6
7#include <linux/kernel.h>
8#include <linux/irq.h>
9
10#include "mt76x02.h"
11#include "mt76x02_mcu.h"
12#include "trace.h"
13
14static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
15{
16 struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet);
17 struct mt76_dev *mdev = &dev->mt76;
18 struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD];
19 struct beacon_bc_data data = {};
20 struct sk_buff *skb;
21 int i;
22
23 if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
24 return;
25
26 mt76x02_resync_beacon_timer(dev);
27
28 /* Prevent corrupt transmissions during update */
29 mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
30 dev->beacon_data_count = 0;
31
32 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
33 IEEE80211_IFACE_ITER_RESUME_ALL,
34 mt76x02_update_beacon_iter, dev);
35
36 mt76_wr(dev, MT_BCN_BYPASS_MASK,
37 0xff00 | ~(0xff00 >> dev->beacon_data_count));
38
39 mt76_csa_check(mdev);
40
41 if (mdev->csa_complete)
42 return;
43
44 mt76x02_enqueue_buffered_bc(dev, &data, 8);
45
46 if (!skb_queue_len(&data.q))
47 return;
48
49 for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
50 if (!data.tail[i])
51 continue;
52
53 mt76_skb_set_moredata(data.tail[i], false);
54 }
55
56 spin_lock(&q->lock);
57 while ((skb = __skb_dequeue(&data.q)) != NULL) {
58 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
59 struct ieee80211_vif *vif = info->control.vif;
60 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
61
62 mt76_tx_queue_skb(dev, q, MT_TXQ_PSD, skb, &mvif->group_wcid,
63 NULL);
64 }
65 spin_unlock(&q->lock);
66}
67
68static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
69{
70 if (en)
71 tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
72 else
73 tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
74}
75
76static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en)
77{
78 mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
79 if (en)
80 mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
81 else
82 mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
83}
84
85void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
86{
87 static const struct mt76x02_beacon_ops beacon_ops = {
88 .nslots = 8,
89 .slot_size = 1024,
90 .pre_tbtt_enable = mt76x02e_pre_tbtt_enable,
91 .beacon_enable = mt76x02e_beacon_enable,
92 };
93
94 dev->beacon_ops = &beacon_ops;
95
96 /* Fire a pre-TBTT interrupt 8 ms before TBTT */
97 mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
98 8 << 4);
99 mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
100 MT_DFS_GP_INTERVAL);
101 mt76_wr(dev, MT_INT_TIMER_EN, 0);
102
103 mt76x02_init_beacon_config(dev);
104}
105EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
106
107static int
108mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
109 int idx, int n_desc, int bufsize)
110{
111 int err;
112
113 err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
114 MT_RX_RING_BASE);
115 if (err < 0)
116 return err;
117
118 mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
119
120 return 0;
121}
122
123static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
124{
125 struct mt76x02_tx_status stat;
126 u8 update = 1;
127
128 while (kfifo_get(&dev->txstatus_fifo, &stat))
129 mt76x02_send_tx_status(dev, &stat, &update);
130}
131
132static void mt76x02_tx_worker(struct mt76_worker *w)
133{
134 struct mt76x02_dev *dev;
135
136 dev = container_of(w, struct mt76x02_dev, mt76.tx_worker);
137
138 mt76x02_mac_poll_tx_status(dev, false);
139 mt76x02_process_tx_status_fifo(dev);
140
141 mt76_txq_schedule_all(&dev->mphy);
142}
143
144static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
145{
146 struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev,
147 mt76.tx_napi);
148 int i;
149
150 mt76x02_mac_poll_tx_status(dev, false);
151
152 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
153 for (i = MT_TXQ_PSD; i >= 0; i--)
154 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
155
156 if (napi_complete_done(napi, 0))
157 mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
158
159 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
160 for (i = MT_TXQ_PSD; i >= 0; i--)
161 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
162
163 mt76_worker_schedule(&dev->mt76.tx_worker);
164
165 return 0;
166}
167
168int mt76x02_dma_init(struct mt76x02_dev *dev)
169{
170 struct mt76_txwi_cache __maybe_unused *t;
171 int i, ret, fifo_size;
172 struct mt76_queue *q;
173 void *status_fifo;
174
175 BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
176
177 fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
178 status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
179 if (!status_fifo)
180 return -ENOMEM;
181
182 dev->mt76.tx_worker.fn = mt76x02_tx_worker;
183 tasklet_setup(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet);
184
185 spin_lock_init(&dev->txstatus_fifo_lock);
186 kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
187
188 mt76_dma_attach(&dev->mt76);
189
190 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
191
192 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
193 ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
194 MT76x02_TX_RING_SIZE,
195 MT_TX_RING_BASE, 0);
196 if (ret)
197 return ret;
198 }
199
200 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
201 MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
202 if (ret)
203 return ret;
204
205 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU,
206 MT_MCU_RING_SIZE, MT_TX_RING_BASE);
207 if (ret)
208 return ret;
209
210 mt76x02_irq_enable(dev,
211 MT_INT_TX_DONE(IEEE80211_AC_VO) |
212 MT_INT_TX_DONE(IEEE80211_AC_VI) |
213 MT_INT_TX_DONE(IEEE80211_AC_BE) |
214 MT_INT_TX_DONE(IEEE80211_AC_BK) |
215 MT_INT_TX_DONE(MT_TX_HW_QUEUE_MGMT) |
216 MT_INT_TX_DONE(MT_TX_HW_QUEUE_MCU));
217
218 ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
219 MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
220 if (ret)
221 return ret;
222
223 q = &dev->mt76.q_rx[MT_RXQ_MAIN];
224 q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
225 ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
226 MT_RX_BUF_SIZE);
227 if (ret)
228 return ret;
229
230 ret = mt76_init_queues(dev, mt76_dma_rx_poll);
231 if (ret)
232 return ret;
233
234 netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
235 mt76x02_poll_tx, NAPI_POLL_WEIGHT);
236 napi_enable(&dev->mt76.tx_napi);
237
238 return 0;
239}
240EXPORT_SYMBOL_GPL(mt76x02_dma_init);
241
242void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
243{
244 struct mt76x02_dev *dev;
245
246 dev = container_of(mdev, struct mt76x02_dev, mt76);
247 mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
248}
249EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
250
251irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
252{
253 struct mt76x02_dev *dev = dev_instance;
254 u32 intr, mask;
255
256 intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
257 intr &= dev->mt76.mmio.irqmask;
258 mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
259
260 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
261 return IRQ_NONE;
262
263 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
264
265 mask = intr & (MT_INT_RX_DONE_ALL | MT_INT_GPTIMER);
266 if (intr & (MT_INT_TX_DONE_ALL | MT_INT_TX_STAT))
267 mask |= MT_INT_TX_DONE_ALL;
268
269 mt76x02_irq_disable(dev, mask);
270
271 if (intr & MT_INT_RX_DONE(0))
272 napi_schedule(&dev->mt76.napi[0]);
273
274 if (intr & MT_INT_RX_DONE(1))
275 napi_schedule(&dev->mt76.napi[1]);
276
277 if (intr & MT_INT_PRE_TBTT)
278 tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
279
280 /* send buffered multicast frames now */
281 if (intr & MT_INT_TBTT) {
282 if (dev->mt76.csa_complete)
283 mt76_csa_finish(&dev->mt76);
284 else
285 mt76_queue_kick(dev, dev->mphy.q_tx[MT_TXQ_PSD]);
286 }
287
288 if (intr & MT_INT_TX_STAT)
289 mt76x02_mac_poll_tx_status(dev, true);
290
291 if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL))
292 napi_schedule(&dev->mt76.tx_napi);
293
294 if (intr & MT_INT_GPTIMER)
295 tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
296
297 return IRQ_HANDLED;
298}
299EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
300
301static void mt76x02_dma_enable(struct mt76x02_dev *dev)
302{
303 u32 val;
304
305 mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
306 mt76x02_wait_for_wpdma(&dev->mt76, 1000);
307 usleep_range(50, 100);
308
309 val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
310 MT_WPDMA_GLO_CFG_TX_DMA_EN |
311 MT_WPDMA_GLO_CFG_RX_DMA_EN;
312 mt76_set(dev, MT_WPDMA_GLO_CFG, val);
313 mt76_clear(dev, MT_WPDMA_GLO_CFG,
314 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
315}
316
317void mt76x02_dma_disable(struct mt76x02_dev *dev)
318{
319 u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
320
321 val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
322 MT_WPDMA_GLO_CFG_BIG_ENDIAN |
323 MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
324 val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
325 mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
326}
327EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
328
329void mt76x02_mac_start(struct mt76x02_dev *dev)
330{
331 mt76x02_mac_reset_counters(dev);
332 mt76x02_dma_enable(dev);
333 mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
334 mt76_wr(dev, MT_MAC_SYS_CTRL,
335 MT_MAC_SYS_CTRL_ENABLE_TX |
336 MT_MAC_SYS_CTRL_ENABLE_RX);
337 mt76x02_irq_enable(dev,
338 MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
339 MT_INT_TX_STAT);
340}
341EXPORT_SYMBOL_GPL(mt76x02_mac_start);
342
343static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
344{
345 u32 dma_idx, prev_dma_idx;
346 struct mt76_queue *q;
347 int i;
348
349 for (i = 0; i < 4; i++) {
350 q = dev->mphy.q_tx[i];
351
352 prev_dma_idx = dev->mt76.tx_dma_idx[i];
353 dma_idx = readl(&q->regs->dma_idx);
354 dev->mt76.tx_dma_idx[i] = dma_idx;
355
356 if (!q->queued || prev_dma_idx != dma_idx) {
357 dev->tx_hang_check[i] = 0;
358 continue;
359 }
360
361 if (++dev->tx_hang_check[i] >= MT_TX_HANG_TH)
362 return true;
363 }
364
365 return false;
366}
367
368static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
369 struct ieee80211_sta *sta,
370 struct ieee80211_key_conf *key, void *data)
371{
372 struct mt76x02_dev *dev = hw->priv;
373 struct mt76_wcid *wcid;
374
375 if (!sta)
376 return;
377
378 wcid = (struct mt76_wcid *)sta->drv_priv;
379
380 if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
381 return;
382
383 mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
384}
385
386static void mt76x02_reset_state(struct mt76x02_dev *dev)
387{
388 int i;
389
390 lockdep_assert_held(&dev->mt76.mutex);
391
392 clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
393
394 rcu_read_lock();
395 ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
396 rcu_read_unlock();
397
398 for (i = 0; i < MT76x02_N_WCIDS; i++) {
399 struct ieee80211_sta *sta;
400 struct ieee80211_vif *vif;
401 struct mt76x02_sta *msta;
402 struct mt76_wcid *wcid;
403 void *priv;
404
405 wcid = rcu_dereference_protected(dev->mt76.wcid[i],
406 lockdep_is_held(&dev->mt76.mutex));
407 if (!wcid)
408 continue;
409
410 rcu_assign_pointer(dev->mt76.wcid[i], NULL);
411
412 priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
413 sta = container_of(priv, struct ieee80211_sta, drv_priv);
414
415 priv = msta->vif;
416 vif = container_of(priv, struct ieee80211_vif, drv_priv);
417
418 __mt76_sta_remove(&dev->mt76, vif, sta);
419 memset(msta, 0, sizeof(*msta));
420 }
421
422 dev->mt76.vif_mask = 0;
423 dev->mt76.beacon_mask = 0;
424}
425
426static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
427{
428 u32 mask = dev->mt76.mmio.irqmask;
429 bool restart = dev->mt76.mcu_ops->mcu_restart;
430 int i;
431
432 ieee80211_stop_queues(dev->mt76.hw);
433 set_bit(MT76_RESET, &dev->mphy.state);
434
435 tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
436 mt76_worker_disable(&dev->mt76.tx_worker);
437 napi_disable(&dev->mt76.tx_napi);
438
439 mt76_for_each_q_rx(&dev->mt76, i) {
440 napi_disable(&dev->mt76.napi[i]);
441 }
442
443 mutex_lock(&dev->mt76.mutex);
444
445 dev->mcu_timeout = 0;
446 if (restart)
447 mt76x02_reset_state(dev);
448
449 if (dev->mt76.beacon_mask)
450 mt76_clear(dev, MT_BEACON_TIME_CFG,
451 MT_BEACON_TIME_CFG_BEACON_TX |
452 MT_BEACON_TIME_CFG_TBTT_EN);
453
454 mt76x02_irq_disable(dev, mask);
455
456 /* perform device reset */
457 mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
458 mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
459 mt76_clear(dev, MT_WPDMA_GLO_CFG,
460 MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
461 usleep_range(5000, 10000);
462 mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff);
463
464 /* let fw reset DMA */
465 mt76_set(dev, 0x734, 0x3);
466
467 if (restart)
468 mt76_mcu_restart(dev);
469
470 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
471 for (i = 0; i < __MT_TXQ_MAX; i++)
472 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
473
474 mt76_for_each_q_rx(&dev->mt76, i) {
475 mt76_queue_rx_reset(dev, i);
476 }
477
478 mt76_tx_status_check(&dev->mt76, true);
479
480 mt76x02_mac_start(dev);
481
482 if (dev->ed_monitor)
483 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
484
485 if (dev->mt76.beacon_mask && !restart)
486 mt76_set(dev, MT_BEACON_TIME_CFG,
487 MT_BEACON_TIME_CFG_BEACON_TX |
488 MT_BEACON_TIME_CFG_TBTT_EN);
489
490 mt76x02_irq_enable(dev, mask);
491
492 mutex_unlock(&dev->mt76.mutex);
493
494 clear_bit(MT76_RESET, &dev->mphy.state);
495
496 mt76_worker_enable(&dev->mt76.tx_worker);
497 tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
498
499 local_bh_disable();
500 napi_enable(&dev->mt76.tx_napi);
501 napi_schedule(&dev->mt76.tx_napi);
502
503 mt76_for_each_q_rx(&dev->mt76, i) {
504 napi_enable(&dev->mt76.napi[i]);
505 napi_schedule(&dev->mt76.napi[i]);
506 }
507 local_bh_enable();
508
509 if (restart) {
510 set_bit(MT76_RESTART, &dev->mphy.state);
511 mt76x02_mcu_function_select(dev, Q_SELECT, 1);
512 ieee80211_restart_hw(dev->mt76.hw);
513 } else {
514 ieee80211_wake_queues(dev->mt76.hw);
515 mt76_txq_schedule_all(&dev->mphy);
516 }
517}
518
519void mt76x02_reconfig_complete(struct ieee80211_hw *hw,
520 enum ieee80211_reconfig_type reconfig_type)
521{
522 struct mt76x02_dev *dev = hw->priv;
523
524 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
525 return;
526
527 clear_bit(MT76_RESTART, &dev->mphy.state);
528}
529EXPORT_SYMBOL_GPL(mt76x02_reconfig_complete);
530
531static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
532{
533 if (test_bit(MT76_RESTART, &dev->mphy.state))
534 return;
535
536 if (!mt76x02_tx_hang(dev) && !dev->mcu_timeout)
537 return;
538
539 mt76x02_watchdog_reset(dev);
540
541 dev->tx_hang_reset++;
542 memset(dev->tx_hang_check, 0, sizeof(dev->tx_hang_check));
543 memset(dev->mt76.tx_dma_idx, 0xff,
544 sizeof(dev->mt76.tx_dma_idx));
545}
546
547void mt76x02_wdt_work(struct work_struct *work)
548{
549 struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
550 wdt_work.work);
551
552 mt76x02_check_tx_hang(dev);
553
554 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
555 MT_WATCHDOG_TIME);
556}