blob: 5672878c44b70a784a48e0e0043304fb1a844df1 [file] [log] [blame]
developer617abbd2024-04-23 14:50:01 +08001From ce4b2f430de33a73028d24acac09c0db3ce335e6 Mon Sep 17 00:00:00 2001
2From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Mon, 6 Feb 2023 13:37:23 +0800
4Subject: [PATCH 058/116] mtk: wifi: mt76: rework wed rx flow
5
6Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
7Change-Id: Icd787345c811cb5ad30d9c7c1c5f9e5298bd3be6
8---
9 dma.c | 125 +++++++++++++++++++++++++++++++-----------------
10 mac80211.c | 2 +-
11 mt76.h | 25 ++++++----
12 mt7915/mmio.c | 3 +-
13 mt7915/mt7915.h | 1 +
14 tx.c | 16 +++----
15 wed.c | 57 ++++++++++++++--------
16 7 files changed, 144 insertions(+), 85 deletions(-)
17
18diff --git a/dma.c b/dma.c
19index 33a84f5fa..c54187bd6 100644
20--- a/dma.c
21+++ b/dma.c
22@@ -64,17 +64,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
23 return t;
24 }
25
26-static struct mt76_txwi_cache *
27+static struct mt76_rxwi_cache *
28 mt76_alloc_rxwi(struct mt76_dev *dev)
29 {
30- struct mt76_txwi_cache *t;
31+ struct mt76_rxwi_cache *r;
32
33- t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
34- if (!t)
35+ r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC);
36+ if (!r)
37 return NULL;
38
39- t->ptr = NULL;
40- return t;
41+ r->ptr = NULL;
42+ return r;
43 }
44
45 static struct mt76_txwi_cache *
46@@ -93,20 +93,20 @@ __mt76_get_txwi(struct mt76_dev *dev)
47 return t;
48 }
49
50-static struct mt76_txwi_cache *
51+static struct mt76_rxwi_cache *
52 __mt76_get_rxwi(struct mt76_dev *dev)
53 {
54- struct mt76_txwi_cache *t = NULL;
55+ struct mt76_rxwi_cache *r = NULL;
56
57- spin_lock_bh(&dev->wed_lock);
58+ spin_lock_bh(&dev->lock);
59 if (!list_empty(&dev->rxwi_cache)) {
60- t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
61+ r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache,
62 list);
63- list_del(&t->list);
64+ list_del(&r->list);
65 }
66- spin_unlock_bh(&dev->wed_lock);
67+ spin_unlock_bh(&dev->lock);
68
69- return t;
70+ return r;
71 }
72
73 static struct mt76_txwi_cache *
74@@ -120,13 +120,13 @@ mt76_get_txwi(struct mt76_dev *dev)
75 return mt76_alloc_txwi(dev);
76 }
77
78-struct mt76_txwi_cache *
79+struct mt76_rxwi_cache *
80 mt76_get_rxwi(struct mt76_dev *dev)
81 {
82- struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
83+ struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev);
84
85- if (t)
86- return t;
87+ if (r)
88+ return r;
89
90 return mt76_alloc_rxwi(dev);
91 }
92@@ -145,14 +145,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
93 EXPORT_SYMBOL_GPL(mt76_put_txwi);
94
95 void
96-mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
97+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r)
98 {
99- if (!t)
100+ if (!r)
101 return;
102
103- spin_lock_bh(&dev->wed_lock);
104- list_add(&t->list, &dev->rxwi_cache);
105- spin_unlock_bh(&dev->wed_lock);
106+ spin_lock_bh(&dev->lock);
107+ list_add(&r->list, &dev->rxwi_cache);
108+ spin_unlock_bh(&dev->lock);
109 }
110 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
111
112@@ -173,13 +173,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
113 void
114 mt76_free_pending_rxwi(struct mt76_dev *dev)
115 {
116- struct mt76_txwi_cache *t;
117+ struct mt76_rxwi_cache *r;
118
119 local_bh_disable();
120- while ((t = __mt76_get_rxwi(dev)) != NULL) {
121- if (t->ptr)
122- skb_free_frag(t->ptr);
123- kfree(t);
124+ while ((r = __mt76_get_rxwi(dev)) != NULL) {
125+ if (r->ptr)
126+ skb_free_frag(r->ptr);
127+ kfree(r);
128 }
129 local_bh_enable();
130 }
131@@ -225,10 +225,10 @@ void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
132
133 static int
134 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
135- struct mt76_queue_buf *buf, void *data)
136+ struct mt76_queue_buf *buf, void *data,
137+ struct mt76_rxwi_cache *rxwi)
138 {
139 struct mt76_queue_entry *entry = &q->entry[q->head];
140- struct mt76_txwi_cache *txwi = NULL;
141 struct mt76_desc *desc;
142 int idx = q->head;
143 u32 buf1 = 0, ctrl;
144@@ -249,13 +249,15 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
145 #endif
146
147 if (mt76_queue_is_wed_rx(q)) {
148- txwi = mt76_get_rxwi(dev);
149- if (!txwi)
150- return -ENOMEM;
151+ if (!rxwi) {
152+ rxwi = mt76_get_rxwi(dev);
153+ if (!rxwi)
154+ return -ENOMEM;
155+ }
156
157- rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
158+ rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
159 if (rx_token < 0) {
160- mt76_put_rxwi(dev, txwi);
161+ mt76_put_rxwi(dev, rxwi);
162 return -ENOMEM;
163 }
164
165@@ -271,7 +273,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
166 done:
167 entry->dma_addr[0] = buf->addr;
168 entry->dma_len[0] = buf->len;
169- entry->txwi = txwi;
170+ entry->rxwi = rxwi;
171 entry->buf = data;
172 entry->wcid = 0xffff;
173 entry->skip_buf1 = true;
174@@ -420,7 +422,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
175
176 static void *
177 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
178- int *len, u32 *info, bool *more, bool *drop)
179+ int *len, u32 *info, bool *more, bool *drop, bool flush)
180 {
181 struct mt76_queue_entry *e = &q->entry[idx];
182 struct mt76_desc *desc = &q->desc[idx];
183@@ -445,20 +447,53 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
184
185 if (mt76_queue_is_wed_rx(q)) {
186 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
187- struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
188+ struct mt76_rxwi_cache *r = mt76_rx_token_release(dev, token);
189
190- if (!t)
191+ if (!r)
192 return NULL;
193
194- dma_unmap_single(dev->dma_dev, t->dma_addr,
195+ dma_unmap_single(dev->dma_dev, r->dma_addr,
196 SKB_WITH_OVERHEAD(q->buf_size),
197 DMA_FROM_DEVICE);
198
199- buf = t->ptr;
200- t->dma_addr = 0;
201- t->ptr = NULL;
202+ if (flush) {
203+ buf = r->ptr;
204+ r->dma_addr = 0;
205+ r->ptr = NULL;
206+
207+ mt76_put_rxwi(dev, r);
208+ } else {
209+ struct mt76_queue_buf qbuf;
210+
211+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
212+ if (!buf)
213+ return NULL;
214+
215+ memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
216+
217+ r->dma_addr = dma_map_single(dev->dma_dev, r->ptr,
218+ SKB_WITH_OVERHEAD(q->buf_size),
219+ DMA_FROM_DEVICE);
220+ if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) {
221+ skb_free_frag(r->ptr);
222+ mt76_put_rxwi(dev, r);
223+ return NULL;
224+ }
225+
226+ qbuf.addr = r->dma_addr;
227+ qbuf.len = SKB_WITH_OVERHEAD(q->buf_size);
228+ qbuf.skip_unmap = false;
229+
230+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, r->ptr, r) < 0) {
231+ dma_unmap_single(dev->dma_dev, r->dma_addr,
232+ SKB_WITH_OVERHEAD(q->buf_size),
233+ DMA_FROM_DEVICE);
234+ skb_free_frag(r->ptr);
235+ mt76_put_rxwi(dev, r);
236+ return NULL;
237+ }
238+ }
239
240- mt76_put_rxwi(dev, t);
241 if (drop)
242 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
243 } else {
244@@ -495,7 +530,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
245 q->tail = (q->tail + 1) % q->ndesc;
246 q->queued--;
247
248- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
249+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
250 }
251
252 static int
253@@ -667,7 +702,7 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
254 done:
255 qbuf.len = len - offset;
256 qbuf.skip_unmap = false;
257- if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
258+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) {
259 dma_unmap_single(dev->dma_dev, addr, len,
260 DMA_FROM_DEVICE);
261 skb_free_frag(buf);
262diff --git a/mac80211.c b/mac80211.c
263index f7b9ba6a0..c7b222837 100644
264--- a/mac80211.c
265+++ b/mac80211.c
266@@ -595,7 +595,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
267 spin_lock_init(&dev->lock);
268 spin_lock_init(&dev->cc_lock);
269 spin_lock_init(&dev->status_lock);
270- spin_lock_init(&dev->wed_lock);
271 mutex_init(&dev->mutex);
272 init_waitqueue_head(&dev->tx_wait);
273
274@@ -628,6 +627,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
275 INIT_LIST_HEAD(&dev->txwi_cache);
276 INIT_LIST_HEAD(&dev->rxwi_cache);
277 dev->token_size = dev->drv->token_size;
278+ dev->rx_token_size = dev->drv->rx_token_size;
279
280 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
281 skb_queue_head_init(&dev->rx_skb[i]);
282diff --git a/mt76.h b/mt76.h
283index e21c6537f..aecd0a7cd 100644
284--- a/mt76.h
285+++ b/mt76.h
286@@ -205,6 +205,7 @@ struct mt76_queue_entry {
287 };
288 union {
289 struct mt76_txwi_cache *txwi;
290+ struct mt76_rxwi_cache *rxwi;
291 struct urb *urb;
292 int buf_sz;
293 };
294@@ -416,12 +417,16 @@ struct mt76_txwi_cache {
295 struct list_head list;
296 dma_addr_t dma_addr;
297
298- union {
299- struct sk_buff *skb;
300- void *ptr;
301- };
302-
303 unsigned long jiffies;
304+
305+ struct sk_buff *skb;
306+};
307+
308+struct mt76_rxwi_cache {
309+ struct list_head list;
310+ dma_addr_t dma_addr;
311+
312+ void *ptr;
313 };
314
315 struct mt76_rx_tid {
316@@ -509,6 +514,7 @@ struct mt76_driver_ops {
317 u16 txwi_size;
318 u16 token_size;
319 u8 mcs_rates;
320+ u16 rx_token_size;
321
322 void (*update_survey)(struct mt76_phy *phy);
323
324@@ -886,7 +892,6 @@ struct mt76_dev {
325
326 struct ieee80211_hw *hw;
327
328- spinlock_t wed_lock;
329 spinlock_t lock;
330 spinlock_t cc_lock;
331
332@@ -1568,8 +1573,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
333 }
334
335 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
336-void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
337-struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
338+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r);
339+struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
340 void mt76_free_pending_rxwi(struct mt76_dev *dev);
341 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
342 struct napi_struct *napi);
343@@ -1748,9 +1753,9 @@ struct mt76_txwi_cache *
344 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
345 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
346 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
347-struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
348+struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
349 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
350- struct mt76_txwi_cache *r, dma_addr_t phys);
351+ struct mt76_rxwi_cache *r, dma_addr_t phys);
352
353 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
354 {
355diff --git a/mt7915/mmio.c b/mt7915/mmio.c
356index 6004d64f5..5938bd9f2 100644
357--- a/mt7915/mmio.c
358+++ b/mt7915/mmio.c
359@@ -714,7 +714,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
360 wed->wlan.reset = mt7915_mmio_wed_reset;
361 wed->wlan.reset_complete = mt76_wed_reset_complete;
362
363- dev->mt76.rx_token_size = wed->wlan.rx_npkt;
364+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
365
366 if (mtk_wed_device_attach(wed))
367 return 0;
368@@ -921,6 +921,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
369 SURVEY_INFO_TIME_RX |
370 SURVEY_INFO_TIME_BSS_RX,
371 .token_size = MT7915_TOKEN_SIZE,
372+ .rx_token_size = MT7915_RX_TOKEN_SIZE;
373 .tx_prepare_skb = mt7915_tx_prepare_skb,
374 .tx_complete_skb = mt76_connac_tx_complete_skb,
375 .rx_skb = mt7915_queue_rx_skb,
376diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
377index a30d08eb0..f1e2c93a4 100644
378--- a/mt7915/mt7915.h
379+++ b/mt7915/mt7915.h
380@@ -62,6 +62,7 @@
381 #define MT7915_EEPROM_BLOCK_SIZE 16
382 #define MT7915_HW_TOKEN_SIZE 4096
383 #define MT7915_TOKEN_SIZE 8192
384+#define MT7915_RX_TOKEN_SIZE 4096
385
386 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
387 #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
388diff --git a/tx.c b/tx.c
389index ab42f69b8..46dae6e0a 100644
390--- a/tx.c
391+++ b/tx.c
392@@ -851,16 +851,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
393 EXPORT_SYMBOL_GPL(mt76_token_consume);
394
395 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
396- struct mt76_txwi_cache *t, dma_addr_t phys)
397+ struct mt76_rxwi_cache *r, dma_addr_t phys)
398 {
399 int token;
400
401 spin_lock_bh(&dev->rx_token_lock);
402- token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
403+ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size,
404 GFP_ATOMIC);
405 if (token >= 0) {
406- t->ptr = ptr;
407- t->dma_addr = phys;
408+ r->ptr = ptr;
409+ r->dma_addr = phys;
410 }
411 spin_unlock_bh(&dev->rx_token_lock);
412
413@@ -897,15 +897,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
414 }
415 EXPORT_SYMBOL_GPL(mt76_token_release);
416
417-struct mt76_txwi_cache *
418+struct mt76_rxwi_cache *
419 mt76_rx_token_release(struct mt76_dev *dev, int token)
420 {
421- struct mt76_txwi_cache *t;
422+ struct mt76_rxwi_cache *r;
423
424 spin_lock_bh(&dev->rx_token_lock);
425- t = idr_remove(&dev->rx_token, token);
426+ r = idr_remove(&dev->rx_token, token);
427 spin_unlock_bh(&dev->rx_token_lock);
428
429- return t;
430+ return r;
431 }
432 EXPORT_SYMBOL_GPL(mt76_rx_token_release);
433diff --git a/wed.c b/wed.c
434index 8eca4d818..0a0b5c05c 100644
435--- a/wed.c
436+++ b/wed.c
437@@ -9,28 +9,45 @@
438 void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
439 {
440 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
441- u32 length;
442+ struct page *page;
443 int i;
444
445- length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
446- sizeof(struct skb_shared_info));
447-
448 for (i = 0; i < dev->rx_token_size; i++) {
449- struct mt76_txwi_cache *t;
450+ struct mt76_rxwi_cache *r;
451
452- t = mt76_rx_token_release(dev, i);
453- if (!t || !t->ptr)
454+ r = mt76_rx_token_release(dev, i);
455+ if (!r || !r->ptr)
456 continue;
457
458- dma_unmap_single(dev->dma_dev, t->dma_addr,
459+ dma_unmap_single(dev->dma_dev, r->dma_addr,
460 wed->wlan.rx_size, DMA_FROM_DEVICE);
461- __free_pages(virt_to_page(t->ptr), get_order(length));
462- t->ptr = NULL;
463+ skb_free_frag(r->ptr);
464+ r->ptr = NULL;
465
466- mt76_put_rxwi(dev, t);
467+ mt76_put_rxwi(dev, r);
468 }
469
470 mt76_free_pending_rxwi(dev);
471+
472+ mt76_for_each_q_rx(dev, i) {
473+ struct mt76_queue *q = &dev->q_rx[i];
474+
475+ if (mt76_queue_is_wed_rx(q)) {
476+ if (!q->rx_page.va)
477+ continue;
478+
479+ page = virt_to_page(q->rx_page.va);
480+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
481+ memset(&q->rx_page, 0, sizeof(q->rx_page));
482+ }
483+ }
484+
485+ if (!wed->rx_buf_ring.rx_page.va)
486+ return;
487+
488+ page = virt_to_page(wed->rx_buf_ring.rx_page.va);
489+ __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
490+ memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
491 }
492 EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf);
493
494@@ -46,18 +63,18 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
495 sizeof(struct skb_shared_info));
496
497 for (i = 0; i < size; i++) {
498- struct mt76_txwi_cache *t = mt76_get_rxwi(dev);
499+ struct mt76_rxwi_cache *r = mt76_get_rxwi(dev);
500 dma_addr_t addr;
501 struct page *page;
502 int token;
503 void *ptr;
504
505- if (!t)
506+ if (!r)
507 goto unmap;
508
509- page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
510- if (!page) {
511- mt76_put_rxwi(dev, t);
512+ ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length, GFP_ATOMIC);
513+ if (!ptr) {
514+ mt76_put_rxwi(dev, r);
515 goto unmap;
516 }
517
518@@ -67,17 +84,17 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
519
520 if (unlikely(dma_mapping_error(dev->dev, addr))) {
521 skb_free_frag(ptr);
522- mt76_put_rxwi(dev, t);
523+ mt76_put_rxwi(dev, r);
524 goto unmap;
525 }
526
527 desc->buf0 = cpu_to_le32(addr);
528- token = mt76_rx_token_consume(dev, ptr, t, addr);
529+ token = mt76_rx_token_consume(dev, ptr, r, addr);
530 if (token < 0) {
531 dma_unmap_single(dev->dma_dev, addr,
532 wed->wlan.rx_size, DMA_TO_DEVICE);
533- __free_pages(page, get_order(length));
534- mt76_put_rxwi(dev, t);
535+ skb_free_frag(ptr);
536+ mt76_put_rxwi(dev, r);
537 goto unmap;
538 }
539
540--
5412.39.2
542