blob: 3313dc7c611b62c5f8abefc1d4e469d8bf989a60 [file] [log] [blame]
developer8cb3ac72022-07-04 10:55:14 +08001From bcc5b5f90672a1afef292f0cdf96cca62367e541 Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Wed, 15 Jun 2022 14:48:25 +0800
4Subject: [PATCH 2/3] mt76 add wed rx support
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 dma.c | 219 +++++++++++++++++++++++++++++++++++++---------
9 dma.h | 10 +++
10 mac80211.c | 8 +-
11 mt76.h | 24 ++++-
12 mt7603/dma.c | 2 +-
13 mt7603/mt7603.h | 2 +-
14 mt7615/mac.c | 2 +-
15 mt7615/mt7615.h | 2 +-
16 mt76_connac_mcu.c | 7 ++
17 mt76x02.h | 2 +-
18 mt76x02_txrx.c | 2 +-
19 mt7915/dma.c | 8 ++
20 mt7915/mac.c | 89 ++++++++++++++++++-
21 mt7915/mcu.c | 2 +
22 mt7915/mmio.c | 22 +++++
23 mt7915/mt7915.h | 7 +-
24 mt7915/regs.h | 14 ++-
25 mt7921/mac.c | 2 +-
26 mt7921/mt7921.h | 4 +-
27 mt7921/pci_mac.c | 4 +-
28 tx.c | 34 +++++++
29 21 files changed, 404 insertions(+), 62 deletions(-)
30 mode change 100644 => 100755 dma.c
31 mode change 100644 => 100755 mt7603/dma.c
32 mode change 100644 => 100755 mt7603/mt7603.h
33 mode change 100644 => 100755 mt7615/mac.c
34 mode change 100644 => 100755 mt7615/mt7615.h
35 mode change 100644 => 100755 mt76x02.h
36 mode change 100644 => 100755 mt76x02_txrx.c
37 mode change 100644 => 100755 mt7921/mac.c
38 mode change 100644 => 100755 mt7921/mt7921.h
39 mode change 100644 => 100755 mt7921/pci_mac.c
40
41diff --git a/dma.c b/dma.c
42old mode 100644
43new mode 100755
44index f6f5f129..7ef4bcbc
45--- a/dma.c
46+++ b/dma.c
47@@ -98,6 +98,63 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
48 }
49 EXPORT_SYMBOL_GPL(mt76_put_txwi);
50
51+static struct mt76_txwi_cache *
52+mt76_alloc_rxwi(struct mt76_dev *dev)
53+{
54+ struct mt76_txwi_cache *r;
55+ int size;
56+
57+ size = L1_CACHE_ALIGN(sizeof(*r));
58+ r = kzalloc(size, GFP_ATOMIC);
59+ if (!r)
60+ return NULL;
61+
62+ r->buf = NULL;
63+
64+ return r;
65+}
66+
67+static struct mt76_txwi_cache *
68+__mt76_get_rxwi(struct mt76_dev *dev)
69+{
70+ struct mt76_txwi_cache *r = NULL;
71+
72+ spin_lock(&dev->wed_lock);
73+ if (!list_empty(&dev->rxwi_cache)) {
74+ r = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
75+ list);
76+ if(r)
77+ list_del(&r->list);
78+ }
79+ spin_unlock(&dev->wed_lock);
80+
81+ return r;
82+}
83+
84+struct mt76_txwi_cache *
85+mt76_get_rxwi(struct mt76_dev *dev)
86+{
87+ struct mt76_txwi_cache *r = __mt76_get_rxwi(dev);
88+
89+ if (r)
90+ return r;
91+
92+ return mt76_alloc_rxwi(dev);
93+}
94+EXPORT_SYMBOL_GPL(mt76_get_rxwi);
95+
96+void
97+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *r)
98+{
99+ if (!r)
100+ return;
101+
102+ spin_lock(&dev->wed_lock);
103+ list_add(&r->list, &dev->rxwi_cache);
104+ spin_unlock(&dev->wed_lock);
105+}
106+EXPORT_SYMBOL_GPL(mt76_put_rxwi);
107+
108 static void
109 mt76_free_pending_txwi(struct mt76_dev *dev)
110 {
111@@ -141,12 +198,15 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
112 static int
113 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
114 struct mt76_queue_buf *buf, int nbufs, u32 info,
115- struct sk_buff *skb, void *txwi)
116+ struct sk_buff *skb, void *txwi, void *rxwi)
117 {
118+ struct mtk_wed_device *wed = &dev->mmio.wed;
119+
120 struct mt76_queue_entry *entry;
121 struct mt76_desc *desc;
122 u32 ctrl;
123 int i, idx = -1;
124+ int type;
125
126 if (txwi) {
127 q->entry[q->head].txwi = DMA_DUMMY_DATA;
128@@ -162,28 +222,42 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
129 desc = &q->desc[idx];
130 entry = &q->entry[idx];
131
132- if (buf[0].skip_unmap)
133- entry->skip_buf0 = true;
134- entry->skip_buf1 = i == nbufs - 1;
135-
136- entry->dma_addr[0] = buf[0].addr;
137- entry->dma_len[0] = buf[0].len;
138-
139- ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
140- if (i < nbufs - 1) {
141- entry->dma_addr[1] = buf[1].addr;
142- entry->dma_len[1] = buf[1].len;
143- buf1 = buf[1].addr;
144- ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
145- if (buf[1].skip_unmap)
146- entry->skip_buf1 = true;
147+ type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
148+ if (mtk_wed_device_active(wed) && type == MT76_WED_Q_RX) {
149+ struct mt76_txwi_cache *r = rxwi;
150+ int rx_token;
151+
152+ if (!r)
153+ return -ENOMEM;
154+
155+ rx_token = mt76_rx_token_consume(dev, (void *)skb, r, buf[0].addr);
156+
157+ buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
158+ ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, MTK_WED_RX_PKT_SIZE);
159+ ctrl |= MT_DMA_CTL_TO_HOST;
160+ } else {
161+ if (buf[0].skip_unmap)
162+ entry->skip_buf0 = true;
163+ entry->skip_buf1 = i == nbufs - 1;
164+
165+ entry->dma_addr[0] = buf[0].addr;
166+ entry->dma_len[0] = buf[0].len;
167+
168+ ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
169+ if (i < nbufs - 1) {
170+ entry->dma_addr[1] = buf[1].addr;
171+ entry->dma_len[1] = buf[1].len;
172+ buf1 = buf[1].addr;
173+ ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
174+ if (buf[1].skip_unmap)
175+ entry->skip_buf1 = true;
176+ }
177+ if (i == nbufs - 1)
178+ ctrl |= MT_DMA_CTL_LAST_SEC0;
179+ else if (i == nbufs - 2)
180+ ctrl |= MT_DMA_CTL_LAST_SEC1;
181 }
182
183- if (i == nbufs - 1)
184- ctrl |= MT_DMA_CTL_LAST_SEC0;
185- else if (i == nbufs - 2)
186- ctrl |= MT_DMA_CTL_LAST_SEC1;
187-
188 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
189 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
190 WRITE_ONCE(desc->info, cpu_to_le32(info));
191@@ -272,33 +346,63 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
192
193 static void *
194 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
195- int *len, u32 *info, bool *more)
196+ int *len, u32 *info, bool *more, bool *drop)
197 {
198 struct mt76_queue_entry *e = &q->entry[idx];
199 struct mt76_desc *desc = &q->desc[idx];
200 dma_addr_t buf_addr;
201 void *buf = e->buf;
202 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
203+ struct mtk_wed_device *wed = &dev->mmio.wed;
204+ int type;
205
206- buf_addr = e->dma_addr[0];
207 if (len) {
208 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
209 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
210 *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
211 }
212
213- if (info)
214- *info = le32_to_cpu(desc->info);
215+ type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
216+ if (mtk_wed_device_active(wed) && type == MT76_WED_Q_RX) {
217+ u32 token;
218+ struct mt76_txwi_cache *r;
219+
220+ token = FIELD_GET(MT_DMA_CTL_TOKEN, desc->buf1);
221+
222+ r = mt76_rx_token_release(dev, token);
223+ if (!r)
224+ return NULL;
225+
226+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
227+ if (!buf)
228+ return NULL;
229+
230+ memcpy(buf, r->buf, MTK_WED_RX_PKT_SIZE);
231+ buf_addr = r->dma_addr;
232+ buf_len = MTK_WED_RX_PKT_SIZE;
233+ r->dma_addr = 0;
234+ //r->buf = NULL;
235+
236+ mt76_put_rxwi(dev, r);
237+
238+ if (desc->ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP))
239+ *drop = true;
240+ } else {
241+ buf_addr = e->dma_addr[0];
242+ e->buf = NULL;
243+ }
244
245 dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
246- e->buf = NULL;
247+
248+ if (info)
249+ *info = le32_to_cpu(desc->info);
250
251 return buf;
252 }
253
254 static void *
255 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
256- int *len, u32 *info, bool *more)
257+ int *len, u32 *info, bool *more, bool *drop)
258 {
259 int idx = q->tail;
260
261@@ -314,7 +418,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
262 q->tail = (q->tail + 1) % q->ndesc;
263 q->queued--;
264
265- return mt76_dma_get_buf(dev, q, idx, len, info, more);
266+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
267 }
268
269 static int
270@@ -336,7 +440,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
271 buf.len = skb->len;
272
273 spin_lock_bh(&q->lock);
274- mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
275+ mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL, NULL);
276 mt76_dma_kick_queue(dev, q);
277 spin_unlock_bh(&q->lock);
278
279@@ -413,7 +517,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
280 goto unmap;
281
282 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
283- tx_info.info, tx_info.skb, t);
284+ tx_info.info, tx_info.skb, t, NULL);
285
286 unmap:
287 for (n--; n > 0; n--)
288@@ -448,6 +552,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
289 int frames = 0;
290 int len = SKB_WITH_OVERHEAD(q->buf_size);
291 int offset = q->buf_offset;
292+ struct mtk_wed_device *wed = &dev->mmio.wed;
293
294 if (!q->ndesc)
295 return 0;
296@@ -456,10 +561,27 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
297
298 while (q->queued < q->ndesc - 1) {
299 struct mt76_queue_buf qbuf;
300+ int type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
301+ bool skip_alloc = false;
302+ struct mt76_txwi_cache *r = NULL;
303+
304+ if (mtk_wed_device_active(wed) && type == MT76_WED_Q_RX) {
305+ r = mt76_get_rxwi(dev);
306+ if (!r)
307+ return -ENOMEM;
308+
309+ if (r->buf) {
310+ skip_alloc = true;
311+ len = MTK_WED_RX_PKT_SIZE;
312+ buf = r->buf;
313+ }
314+ }
315
316- buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
317- if (!buf)
318- break;
319+ if (!skip_alloc) {
320+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
321+ if (!buf)
322+ break;
323+ }
324
325 addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
326 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
327@@ -470,7 +592,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
328 qbuf.addr = addr + offset;
329 qbuf.len = len - offset;
330 qbuf.skip_unmap = false;
331- mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
332+ mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL, r);
333 frames++;
334 }
335
336@@ -516,6 +638,11 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
337 if (!ret)
338 q->wed_regs = wed->txfree_ring.reg_base;
339 break;
340+ case MT76_WED_Q_RX:
341+ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs);
342+ if (!ret)
343+ q->wed_regs = wed->rx_ring[ring].reg_base;
344+ break;
345 default:
346 ret = -EINVAL;
347 }
348@@ -531,7 +658,8 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
349 int idx, int n_desc, int bufsize,
350 u32 ring_base)
351 {
352- int ret, size;
353+ int ret, size, type;
354+ struct mtk_wed_device *wed = &dev->mmio.wed;
355
356 spin_lock_init(&q->lock);
357 spin_lock_init(&q->cleanup_lock);
358@@ -541,6 +669,11 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
359 q->buf_size = bufsize;
360 q->hw_idx = idx;
361
362+ type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
363+ if (mtk_wed_device_active(wed) && type == MT76_WED_Q_RX)
364+ q->buf_size = SKB_DATA_ALIGN(NET_SKB_PAD + MTK_WED_RX_PKT_SIZE) +
365+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
366+
367 size = q->ndesc * sizeof(struct mt76_desc);
368 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
369 if (!q->desc)
370@@ -573,7 +706,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
371
372 spin_lock_bh(&q->lock);
373 do {
374- buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
375+ buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
376 if (!buf)
377 break;
378
379@@ -614,7 +747,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
380
381 static void
382 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
383- int len, bool more)
384+ int len, bool more, u32 info)
385 {
386 struct sk_buff *skb = q->rx_head;
387 struct skb_shared_info *shinfo = skb_shinfo(skb);
388@@ -634,7 +767,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
389
390 q->rx_head = NULL;
391 if (nr_frags < ARRAY_SIZE(shinfo->frags))
392- dev->drv->rx_skb(dev, q - dev->q_rx, skb);
393+ dev->drv->rx_skb(dev, q - dev->q_rx, skb, info);
394 else
395 dev_kfree_skb(skb);
396 }
397@@ -655,6 +788,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
398 }
399
400 while (done < budget) {
401+ bool drop = false;
402 u32 info;
403
404 if (check_ddone) {
405@@ -665,10 +799,13 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
406 break;
407 }
408
409- data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
410+ data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, &drop);
411 if (!data)
412 break;
413
414+ if (drop)
415+ goto free_frag;
416+
417 if (q->rx_head)
418 data_len = q->buf_size;
419 else
420@@ -681,7 +818,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
421 }
422
423 if (q->rx_head) {
424- mt76_add_fragment(dev, q, data, len, more);
425+ mt76_add_fragment(dev, q, data, len, more, info);
426 continue;
427 }
428
429@@ -708,7 +845,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
430 continue;
431 }
432
433- dev->drv->rx_skb(dev, q - dev->q_rx, skb);
434+ dev->drv->rx_skb(dev, q - dev->q_rx, skb, info);
435 continue;
436
437 free_frag:
438diff --git a/dma.h b/dma.h
439index fdf786f9..90370d12 100644
440--- a/dma.h
441+++ b/dma.h
442@@ -16,6 +16,16 @@
443 #define MT_DMA_CTL_LAST_SEC0 BIT(30)
444 #define MT_DMA_CTL_DMA_DONE BIT(31)
445
446+#define MT_DMA_CTL_TO_HOST BIT(8)
447+#define MT_DMA_CTL_TO_HOST_A BIT(12)
448+#define MT_DMA_CTL_DROP BIT(14)
449+
450+#define MT_DMA_CTL_TOKEN GENMASK(31, 16)
451+
452+#define MT_DMA_PPE_CPU_REASON GENMASK(15, 11)
453+#define MT_DMA_PPE_ENTRY GENMASK(30, 16)
454+#define MT_DMA_INFO_PPE_VLD BIT(31)
455+
456 #define MT_DMA_HDR_LEN 4
457 #define MT_RX_INFO_LEN 4
458 #define MT_FCE_INFO_LEN 4
459diff --git a/mac80211.c b/mac80211.c
460index af2c09ad..fa5ce6ec 100644
461--- a/mac80211.c
462+++ b/mac80211.c
463@@ -594,11 +594,14 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
464 BIT(NL80211_IFTYPE_ADHOC);
465
466 spin_lock_init(&dev->token_lock);
467+ spin_lock_init(&dev->rx_token_lock);
468 idr_init(&dev->token);
469+ idr_init(&dev->rx_token);
470
471 INIT_LIST_HEAD(&dev->wcid_list);
472
473 INIT_LIST_HEAD(&dev->txwi_cache);
474+ INIT_LIST_HEAD(&dev->rxwi_cache);
475 dev->token_size = dev->drv->token_size;
476
477 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
478@@ -1296,7 +1299,10 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
479
480 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
481 mt76_check_sta(dev, skb);
482- mt76_rx_aggr_reorder(skb, &frames);
483+ if (mtk_wed_device_active(&dev->mmio.wed))
484+ __skb_queue_tail(&frames, skb);
485+ else
486+ mt76_rx_aggr_reorder(skb, &frames);
487 }
488
489 mt76_rx_complete(dev, &frames, napi);
490diff --git a/mt76.h b/mt76.h
491index 062c5ce4..3ca480cc 100644
492--- a/mt76.h
493+++ b/mt76.h
494@@ -20,6 +20,8 @@
495
496 #define MT_MCU_RING_SIZE 32
497 #define MT_RX_BUF_SIZE 2048
498+#define MTK_WED_RX_PKT_SIZE 1700
499+
500 #define MT_SKB_HEAD_LEN 256
501
502 #define MT_MAX_NON_AQL_PKT 16
503@@ -35,6 +37,7 @@
504 FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
505 FIELD_PREP(MT_QFLAG_WED_RING, _n))
506 #define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
507+#define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n)
508 #define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
509
510 struct mt76_dev;
511@@ -56,6 +59,7 @@ enum mt76_bus_type {
512 enum mt76_wed_type {
513 MT76_WED_Q_TX,
514 MT76_WED_Q_TXFREE,
515+ MT76_WED_Q_RX,
516 };
517
518 struct mt76_bus_ops {
519@@ -305,7 +309,10 @@ struct mt76_txwi_cache {
520 struct list_head list;
521 dma_addr_t dma_addr;
522
523- struct sk_buff *skb;
524+ union {
525+ void *buf;
526+ struct sk_buff *skb;
527+ };
528 };
529
530 struct mt76_rx_tid {
531@@ -403,7 +410,7 @@ struct mt76_driver_ops {
532 bool (*rx_check)(struct mt76_dev *dev, void *data, int len);
533
534 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
535- struct sk_buff *skb);
536+ struct sk_buff *skb, u32 info);
537
538 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
539
540@@ -747,6 +754,7 @@ struct mt76_dev {
541 struct ieee80211_hw *hw;
542
543 spinlock_t lock;
544+ spinlock_t wed_lock;
545 spinlock_t cc_lock;
546
547 u32 cur_cc_bss_rx;
548@@ -772,6 +780,7 @@ struct mt76_dev {
549 struct sk_buff_head rx_skb[__MT_RXQ_MAX];
550
551 struct list_head txwi_cache;
552+ struct list_head rxwi_cache;
553 struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
554 struct mt76_queue q_rx[__MT_RXQ_MAX];
555 const struct mt76_queue_ops *queue_ops;
556@@ -785,6 +794,9 @@ struct mt76_dev {
557 u16 wed_token_count;
558 u16 token_count;
559 u16 token_size;
560+ u16 rx_token_size;
561+ spinlock_t rx_token_lock;
562+ struct idr rx_token;
563
564 wait_queue_head_t tx_wait;
565 /* spinclock used to protect wcid pktid linked list */
566@@ -1351,6 +1363,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
567 }
568
569 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
570+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
571+struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
572 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
573 struct napi_struct *napi);
574 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
575@@ -1495,6 +1509,12 @@ struct mt76_txwi_cache *
576 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
577 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
578 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
579+int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
580+ struct mt76_txwi_cache *r, dma_addr_t phys);
581+void skb_trace(const struct sk_buff *skb, bool full_pkt);
582+
583+struct mt76_txwi_cache *
584+mt76_rx_token_release(struct mt76_dev *dev, int token);
585
586 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
587 {
588diff --git a/mt7603/dma.c b/mt7603/dma.c
589old mode 100644
590new mode 100755
591index 590cff9d..2ff71c53
592--- a/mt7603/dma.c
593+++ b/mt7603/dma.c
594@@ -69,7 +69,7 @@ free:
595 }
596
597 void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
598- struct sk_buff *skb)
599+ struct sk_buff *skb, u32 info)
600 {
601 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
602 __le32 *rxd = (__le32 *)skb->data;
603diff --git a/mt7603/mt7603.h b/mt7603/mt7603.h
604old mode 100644
605new mode 100755
606index 0fd46d90..f2ce22ae
607--- a/mt7603/mt7603.h
608+++ b/mt7603/mt7603.h
609@@ -244,7 +244,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
610 void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
611
612 void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
613- struct sk_buff *skb);
614+ struct sk_buff *skb, u32 info);
615 void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
616 void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
617 int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
618diff --git a/mt7615/mac.c b/mt7615/mac.c
619old mode 100644
620new mode 100755
621index 038774b3..ed72245b
622--- a/mt7615/mac.c
623+++ b/mt7615/mac.c
624@@ -1647,7 +1647,7 @@ bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
625 EXPORT_SYMBOL_GPL(mt7615_rx_check);
626
627 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
628- struct sk_buff *skb)
629+ struct sk_buff *skb, u32 info)
630 {
631 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
632 __le32 *rxd = (__le32 *)skb->data;
633diff --git a/mt7615/mt7615.h b/mt7615/mt7615.h
634old mode 100644
635new mode 100755
636index 93a9e8f4..06ccaa52
637--- a/mt7615/mt7615.h
638+++ b/mt7615/mt7615.h
639@@ -510,7 +510,7 @@ void mt7615_tx_worker(struct mt76_worker *w);
640 void mt7615_tx_token_put(struct mt7615_dev *dev);
641 bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len);
642 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
643- struct sk_buff *skb);
644+ struct sk_buff *skb, u32 info);
645 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
646 int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
647 struct ieee80211_sta *sta);
648diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
649index cd350689..e4d0a791 100644
650--- a/mt76_connac_mcu.c
651+++ b/mt76_connac_mcu.c
652@@ -1190,6 +1190,7 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
653 int cmd, bool enable, bool tx)
654 {
655 struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
656+ struct mtk_wed_device *wed = &dev->mmio.wed;
657 struct wtbl_req_hdr *wtbl_hdr;
658 struct tlv *sta_wtbl;
659 struct sk_buff *skb;
660@@ -1210,6 +1211,7 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
661 mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
662 wtbl_hdr);
663
664+ mtk_wed_device_update_msg(wed, WED_WO_STA_REC, skb->data, skb->len);
665 ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true);
666 if (ret)
667 return ret;
668@@ -1220,6 +1222,7 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
669
670 mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
671
672+ mtk_wed_device_update_msg(wed, WED_WO_STA_REC, skb->data, skb->len);
673 return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
674 }
675 EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
676@@ -2634,6 +2637,7 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
677 struct mt76_wcid *wcid, enum set_key_cmd cmd)
678 {
679 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
680+ struct mtk_wed_device *wed = &dev->mmio.wed;
681 struct sk_buff *skb;
682 int ret;
683
684@@ -2645,6 +2649,9 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
685 if (ret)
686 return ret;
687
688+ if (mtk_wed_device_active(wed))
689+ mtk_wed_device_update_msg(wed, WED_WO_STA_REC, skb->data, skb->len);
690+
691 return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
692 }
693 EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
694diff --git a/mt76x02.h b/mt76x02.h
695old mode 100644
696new mode 100755
697index f76fd22e..0b872af1
698--- a/mt76x02.h
699+++ b/mt76x02.h
700@@ -173,7 +173,7 @@ int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
701 void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
702 bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
703 void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
704- struct sk_buff *skb);
705+ struct sk_buff *skb, u32 info);
706 void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
707 irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
708 void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
709diff --git a/mt76x02_txrx.c b/mt76x02_txrx.c
710old mode 100644
711new mode 100755
712index 96fdf423..bf24d3e0
713--- a/mt76x02_txrx.c
714+++ b/mt76x02_txrx.c
715@@ -33,7 +33,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
716 EXPORT_SYMBOL_GPL(mt76x02_tx);
717
718 void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
719- struct sk_buff *skb)
720+ struct sk_buff *skb, u32 info)
721 {
722 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
723 void *rxwi = skb->data;
724diff --git a/mt7915/dma.c b/mt7915/dma.c
725index 71223221..722727c4 100644
726--- a/mt7915/dma.c
727+++ b/mt7915/dma.c
728@@ -376,6 +376,8 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
729 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
730 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
731 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 1));
732+ mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
733+ MT_WFDMA0_EXT0_RXWB_KEEP);
734 } else {
735 mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
736 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
737@@ -451,6 +453,9 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
738
739 /* rx data queue for band0 */
740 if (!dev->phy.band_idx) {
741+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
742+ dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(MT7915_RXQ_BAND0);
743+
744 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
745 MT_RXQ_ID(MT_RXQ_MAIN),
746 MT7915_RX_RING_SIZE,
747@@ -482,6 +487,9 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
748
749 if (dev->dbdc_support || dev->phy.band_idx) {
750 /* rx data queue for band1 */
751+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
752+ dev->mt76.q_rx[MT_RXQ_EXT].flags = MT_WED_Q_RX(MT7915_RXQ_BAND1);
753+
754 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
755 MT_RXQ_ID(MT_RXQ_EXT),
756 MT7915_RX_RING_SIZE,
757diff --git a/mt7915/mac.c b/mt7915/mac.c
758index 3f059bed..2d3b9d6a 100644
759--- a/mt7915/mac.c
760+++ b/mt7915/mac.c
761@@ -217,7 +217,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
762 }
763
764 static int
765-mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
766+mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb, enum mt76_rxq_id q, u32 info)
767 {
768 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
769 struct mt76_phy *mphy = &dev->mt76.phy;
770@@ -494,6 +494,27 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
771 #endif
772 } else {
773 status->flag |= RX_FLAG_8023;
774+ if (msta || msta->vif) {
775+ struct mtk_wed_device *wed;
776+ int type;
777+
778+ wed = &dev->mt76.mmio.wed;
779+ type = FIELD_GET(MT_QFLAG_WED_TYPE, dev->mt76.q_rx[q].flags);
780+ if ((mtk_wed_device_active(wed) && type == MT76_WED_Q_RX) &&
781+ (info & MT_DMA_INFO_PPE_VLD)){
782+ struct ieee80211_vif *vif;
783+ u32 hash, reason;
784+
785+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
786+ drv_priv);
787+
788+ skb->dev = ieee80211_vif_to_netdev(vif);
789+ reason = FIELD_GET(MT_DMA_PPE_CPU_REASON, info);
790+ hash = FIELD_GET(MT_DMA_PPE_ENTRY, info);
791+
792+ mtk_wed_device_ppe_check(wed, skb, reason, hash);
793+ }
794+ }
795 }
796
797 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
798@@ -838,6 +859,68 @@ u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
799 return MT_TXD_TXP_BUF_SIZE;
800 }
801
802+u32
803+mt7915_wed_init_rx_buf(struct mtk_wed_device *wed, int pkt_num)
804+{
805+ struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
806+ struct mt7915_dev *dev;
807+ dma_addr_t buf_phys;
808+ void *buf;
809+ int i, token, buf_size;
810+
811+ buf_size = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_pkt_size) +
812+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
813+
814+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
815+ for (i = 0; i < pkt_num; i++) {
816+ struct mt76_txwi_cache *r = mt76_get_rxwi(&dev->mt76);
817+
818+ buf = page_frag_alloc(&wed->rx_page, buf_size, GFP_ATOMIC);
819+ if (!buf)
820+ return -ENOMEM;
821+
822+ buf_phys = dma_map_single(dev->mt76.dma_dev, buf, wed->wlan.rx_pkt_size,
823+ DMA_TO_DEVICE);
824+
825+ if (unlikely(dma_mapping_error(dev->mt76.dev, buf_phys))) {
826+ skb_free_frag(buf);
827+ break;
828+ }
829+
830+ desc->buf0 = buf_phys;
831+
832+ token = mt76_rx_token_consume(&dev->mt76, buf, r, buf_phys);
833+
834+ desc->token |= FIELD_PREP(MT_DMA_CTL_TOKEN, token);
835+ desc++;
836+ }
837+
838+ return 0;
839+}
840+
841+void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed)
842+{
843+ struct mt76_txwi_cache *rxwi;
844+ struct mt7915_dev *dev;
845+ int token;
846+
847+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
848+
849+ for(token = 0; token < dev->mt76.rx_token_size; token++) {
850+ rxwi = mt76_rx_token_release(&dev->mt76, token);
851+ if(!rxwi)
852+ continue;
853+
854+ dma_unmap_single(dev->mt76.dma_dev, rxwi->dma_addr,
855+ wed->wlan.rx_pkt_size, DMA_FROM_DEVICE);
856+ skb_free_frag(rxwi->buf);
857+ rxwi->buf = NULL;
858+
859+ mt76_put_rxwi(&dev->mt76, rxwi);
860+ }
861+ return;
862+}
863+
864 static void
865 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
866 {
867@@ -1118,7 +1201,7 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
868 }
869
870 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
871- struct sk_buff *skb)
872+ struct sk_buff *skb, u32 info)
873 {
874 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
875 __le32 *rxd = (__le32 *)skb->data;
876@@ -1152,7 +1235,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
877 dev_kfree_skb(skb);
878 break;
879 case PKT_TYPE_NORMAL:
880- if (!mt7915_mac_fill_rx(dev, skb)) {
881+ if (!mt7915_mac_fill_rx(dev, skb, q, info)) {
882 mt76_rx(&dev->mt76, q, skb);
883 return;
884 }
885diff --git a/mt7915/mcu.c b/mt7915/mcu.c
886index 9d2a7059..032eb1dd 100644
887--- a/mt7915/mcu.c
888+++ b/mt7915/mcu.c
889@@ -1704,6 +1704,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
890 struct ieee80211_sta *sta, bool enable)
891 {
892 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
893+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
894 struct mt7915_sta *msta;
895 struct sk_buff *skb;
896 int ret;
897@@ -1756,6 +1757,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
898 return ret;
899 }
900 out:
901+ mtk_wed_device_update_msg(wed, WED_WO_STA_REC, skb->data, skb->len);
902 return mt76_mcu_skb_send_msg(&dev->mt76, skb,
903 MCU_EXT_CMD(STA_REC_UPDATE), true);
904 }
905diff --git a/mt7915/mmio.c b/mt7915/mmio.c
906index b4a3120d..36b61a14 100755
907--- a/mt7915/mmio.c
908+++ b/mt7915/mmio.c
909@@ -28,6 +28,9 @@ static const u32 mt7915_reg[] = {
910 [FW_EXCEPTION_ADDR] = 0x219848,
911 [SWDEF_BASE_ADDR] = 0x41f200,
912 [EXCEPTION_BASE_ADDR] = 0x219848,
913+ [WED_TX_RING] = 0xd7300,
914+ [WED_RX_RING] = 0xd7410,
915+ [WED_RX_DATA_RING] = 0xd4500,
916 };
917
918 static const u32 mt7916_reg[] = {
919@@ -45,6 +48,9 @@ static const u32 mt7916_reg[] = {
920 [FW_EXCEPTION_ADDR] = 0x022050bc,
921 [SWDEF_BASE_ADDR] = 0x411400,
922 [EXCEPTION_BASE_ADDR] = 0x022050BC,
923+ [WED_TX_RING] = 0xd7300,
924+ [WED_RX_RING] = 0xd7410,
925+ [WED_RX_DATA_RING] = 0xd4540,
926 };
927
928 static const u32 mt7986_reg[] = {
929@@ -62,6 +68,9 @@ static const u32 mt7986_reg[] = {
930 [FW_EXCEPTION_ADDR] = 0x02204ffc,
931 [SWDEF_BASE_ADDR] = 0x411400,
932 [EXCEPTION_BASE_ADDR] = 0x02204FFC,
933+ [WED_TX_RING] = 0x24420,
934+ [WED_RX_RING] = 0x24520,
935+ [WED_RX_DATA_RING] = 0x24540,
936 };
937
938 static const u32 mt7915_offs[] = {
939@@ -722,12 +731,19 @@ mt7915_pci_wed_init(struct mt7915_dev *dev, struct device *pdev, int *irq)
940 wed->wlan.wpdma_int = base + MT_INT_SOURCE_CSR;
941 wed->wlan.wpdma_mask = base + MT_INT_MASK_CSR;
942 }
943+ wed->wlan.rx_pkt = MT7915_WED_RX_TOKEN_SIZE;
944+ wed->wlan.phy_base = base;
945 wed->wlan.wpdma_tx = base + MT_TXQ_WED_RING_BASE;
946 wed->wlan.wpdma_txfree = base + MT_RXQ_WED_RING_BASE;
947+ wed->wlan.wpdma_rx_glo = base + MT_WPDMA_GLO_CFG;
948+ wed->wlan.wpdma_rx = base + MT_RXQ_WED_DATA_RING_BASE;
949
950 wed->wlan.tx_tbit[0] = MT_WED_TX_DONE_BAND0;
951 wed->wlan.tx_tbit[1] = MT_WED_TX_DONE_BAND1;
952 wed->wlan.txfree_tbit = MT_WED_TX_FREE_DONE;
953+ wed->wlan.rx_tbit[0] = MT_WED_RX_DONE_BAND0;
954+ wed->wlan.rx_tbit[1] = MT_WED_RX_DONE_BAND1;
955+
956 wed->wlan.nbuf = 7168;
957 wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
958 wed->wlan.init_buf = mt7915_wed_init_buf;
959@@ -735,6 +751,12 @@ mt7915_pci_wed_init(struct mt7915_dev *dev, struct device *pdev, int *irq)
960 wed->wlan.offload_enable = mt7915_wed_offload_enable;
961 wed->wlan.offload_disable = mt7915_wed_offload_disable;
962
963+ wed->wlan.rx_nbuf = 65536;
964+ wed->wlan.rx_pkt_size = MTK_WED_RX_PKT_SIZE;
965+ wed->wlan.init_rx_buf = mt7915_wed_init_rx_buf;
966+ wed->wlan.release_rx_buf = mt7915_wed_release_rx_buf;
967+
968+ dev->mt76.rx_token_size = wed->wlan.rx_pkt + MT7915_RX_RING_SIZE * 2;
969 if (mtk_wed_device_attach(wed) != 0)
970 return 0;
971
972diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
973index d7a2e594..e29d0cb6 100644
974--- a/mt7915/mt7915.h
975+++ b/mt7915/mt7915.h
976@@ -68,6 +68,7 @@
977 #define MT7915_MAX_STA_TWT_AGRT 8
978 #define MT7915_MIN_TWT_DUR 64
979 #define MT7915_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 2)
980+#define MT7915_WED_RX_TOKEN_SIZE 12288
981
982 struct mt7915_vif;
983 struct mt7915_sta;
984@@ -530,7 +531,9 @@ void mt7915_wfsys_reset(struct mt7915_dev *dev);
985 irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
986 u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
987 u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
988-
989+u32 mt7915_wed_init_rx_buf(struct mtk_wed_device *wed,
990+ int pkt_num);
991+void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed);
992 int mt7915_register_device(struct mt7915_dev *dev);
993 void mt7915_unregister_device(struct mt7915_dev *dev);
994 int mt7915_eeprom_init(struct mt7915_dev *dev);
995@@ -681,7 +684,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
996 struct mt76_tx_info *tx_info);
997 void mt7915_tx_token_put(struct mt7915_dev *dev);
998 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
999- struct sk_buff *skb);
1000+ struct sk_buff *skb, u32 info);
1001 bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
1002 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
1003 void mt7915_stats_work(struct work_struct *work);
1004diff --git a/mt7915/regs.h b/mt7915/regs.h
1005index ffda5f6b..08bf84ce 100644
1006--- a/mt7915/regs.h
1007+++ b/mt7915/regs.h
1008@@ -33,6 +33,9 @@ enum reg_rev {
1009 FW_EXCEPTION_ADDR,
1010 SWDEF_BASE_ADDR,
1011 EXCEPTION_BASE_ADDR,
1012+ WED_TX_RING,
1013+ WED_RX_RING,
1014+ WED_RX_DATA_RING,
1015 __MT_REG_MAX,
1016 };
1017
1018@@ -570,9 +573,13 @@ enum offs_rev {
1019 #define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
1020
1021 #define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
1022+#define MT_WFDMA0_EXT0_CFG MT_WFDMA0(0x2b0)
1023+#define MT_WFDMA0_EXT0_RXWB_KEEP BIT(10)
1024+
1025 #define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
1026 #define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4)
1027 #define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8)
1028+#define MT_WPDMA_GLO_CFG MT_WFDMA0(0x208)
1029
1030 #define MT_WFDMA0_MCU_HOST_INT_ENA MT_WFDMA0(0x1f4)
1031 #define MT_WFDMA0_MT_WA_WDT_INT BIT(31)
1032@@ -670,12 +677,15 @@ enum offs_rev {
1033 #define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
1034 MT_TXQ_ID(q)* 0x4)
1035
1036-#define MT_TXQ_WED_RING_BASE (!is_mt7986(mdev)? 0xd7300 : 0x24420)
1037-#define MT_RXQ_WED_RING_BASE (!is_mt7986(mdev)? 0xd7410 : 0x24520)
1038+#define MT_TXQ_WED_RING_BASE __REG(WED_TX_RING)
1039+#define MT_RXQ_WED_RING_BASE __REG(WED_RX_RING)
1040+#define MT_RXQ_WED_DATA_RING_BASE __REG(WED_RX_DATA_RING)
1041
1042 #define MT_WED_TX_DONE_BAND0 (is_mt7915(mdev)? 4 : 30)
1043 #define MT_WED_TX_DONE_BAND1 (is_mt7915(mdev)? 5 : 31)
1044 #define MT_WED_TX_FREE_DONE (is_mt7915(mdev)? 1 : 2)
1045+#define MT_WED_RX_DONE_BAND0 (is_mt7915(mdev)? 16 : 22)
1046+#define MT_WED_RX_DONE_BAND1 (is_mt7915(mdev)? 17 : 23)
1047
1048 #define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR)
1049 #define MT_INT_MASK_CSR __REG(INT_MASK_CSR)
1050diff --git a/mt7921/mac.c b/mt7921/mac.c
1051old mode 100644
1052new mode 100755
1053index 247f5ebe..24cdd05f
1054--- a/mt7921/mac.c
1055+++ b/mt7921/mac.c
1056@@ -555,7 +555,7 @@ out:
1057 EXPORT_SYMBOL_GPL(mt7921_mac_add_txs);
1058
1059 void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1060- struct sk_buff *skb)
1061+ struct sk_buff *skb, u32 info)
1062 {
1063 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1064 __le32 *rxd = (__le32 *)skb->data;
1065diff --git a/mt7921/mt7921.h b/mt7921/mt7921.h
1066old mode 100644
1067new mode 100755
1068index efeb82cb..4b2e974b
1069--- a/mt7921/mt7921.h
1070+++ b/mt7921/mt7921.h
1071@@ -388,7 +388,7 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1072 void mt7921_tx_worker(struct mt76_worker *w);
1073 void mt7921_tx_token_put(struct mt7921_dev *dev);
1074 void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1075- struct sk_buff *skb);
1076+ struct sk_buff *skb, u32 info);
1077 void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
1078 void mt7921_stats_work(struct work_struct *work);
1079 void mt7921_set_stream_he_caps(struct mt7921_phy *phy);
1080@@ -424,7 +424,7 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
1081
1082 bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len);
1083 void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1084- struct sk_buff *skb);
1085+ struct sk_buff *skb, u32 info);
1086 int mt7921e_driver_own(struct mt7921_dev *dev);
1087 int mt7921e_mac_reset(struct mt7921_dev *dev);
1088 int mt7921e_mcu_init(struct mt7921_dev *dev);
1089diff --git a/mt7921/pci_mac.c b/mt7921/pci_mac.c
1090old mode 100644
1091new mode 100755
1092index f6c605a5..71e21844
1093--- a/mt7921/pci_mac.c
1094+++ b/mt7921/pci_mac.c
1095@@ -182,7 +182,7 @@ bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
1096 }
1097
1098 void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1099- struct sk_buff *skb)
1100+ struct sk_buff *skb, u32 info)
1101 {
1102 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1103 __le32 *rxd = (__le32 *)skb->data;
1104@@ -196,7 +196,7 @@ void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1105 napi_consume_skb(skb, 1);
1106 break;
1107 default:
1108- mt7921_queue_rx_skb(mdev, q, skb);
1109+ mt7921_queue_rx_skb(mdev, q, skb, info);
1110 break;
1111 }
1112 }
1113diff --git a/tx.c b/tx.c
1114index 0457c3eb..9dae73b6 100644
1115--- a/tx.c
1116+++ b/tx.c
1117@@ -768,3 +768,37 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
1118 return txwi;
1119 }
1120 EXPORT_SYMBOL_GPL(mt76_token_release);
1121+
1122+int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
1123+ struct mt76_txwi_cache *r, dma_addr_t phys)
1124+{
1125+ int token;
1126+
1127+ spin_lock_bh(&dev->rx_token_lock);
1128+
1129+ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size, GFP_ATOMIC);
1130+
1131+ spin_unlock_bh(&dev->rx_token_lock);
1132+
1133+ r->buf = ptr;
1134+ r->dma_addr = phys;
1135+
1136+ return token;
1137+}
1138+EXPORT_SYMBOL_GPL(mt76_rx_token_consume);
1139+
1140+struct mt76_txwi_cache *
1141+mt76_rx_token_release(struct mt76_dev *dev, int token)
1142+{
1143+
1144+ struct mt76_txwi_cache *rxwi;
1145+
1146+ spin_lock_bh(&dev->rx_token_lock);
1147+
1148+ rxwi = idr_remove(&dev->rx_token, token);
1149+
1150+ spin_unlock_bh(&dev->rx_token_lock);
1151+
1152+ return rxwi;
1153+}
1154+EXPORT_SYMBOL_GPL(mt76_rx_token_release);
1155--
11562.18.0
1157