blob: a0854afd0d5e120405bdb427f1032c5eb6a5c312 [file] [log] [blame]
developer7e2761e2023-10-12 08:11:13 +08001From 78b8b86fce85c8ff6fad935bd9bd8b2df0e151ab Mon Sep 17 00:00:00 2001
2From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Mon, 6 Feb 2023 13:37:23 +0800
4Subject: [PATCH 65/98] wifi: mt76: rework wed rx flow
5
6Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
7Change-Id: Icd787345c811cb5ad30d9c7c1c5f9e5298bd3be6
8---
9 dma.c | 125 +++++++++++++++++++++++++++++++-----------------
10 mac80211.c | 2 +-
11 mmio.c | 58 ++++++++++++++--------
12 mt76.h | 23 +++++----
13 mt7915/mmio.c | 3 +-
14 mt7915/mt7915.h | 1 +
15 tx.c | 16 +++----
16 7 files changed, 143 insertions(+), 85 deletions(-)
17
18diff --git a/dma.c b/dma.c
19index 06b76ea..f48ec57 100644
20--- a/dma.c
21+++ b/dma.c
22@@ -64,17 +64,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
23 return t;
24 }
25
26-static struct mt76_txwi_cache *
27+static struct mt76_rxwi_cache *
28 mt76_alloc_rxwi(struct mt76_dev *dev)
29 {
30- struct mt76_txwi_cache *t;
31+ struct mt76_rxwi_cache *r;
32
33- t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
34- if (!t)
35+ r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC);
36+ if (!r)
37 return NULL;
38
39- t->ptr = NULL;
40- return t;
41+ r->ptr = NULL;
42+ return r;
43 }
44
45 static struct mt76_txwi_cache *
46@@ -93,20 +93,20 @@ __mt76_get_txwi(struct mt76_dev *dev)
47 return t;
48 }
49
50-static struct mt76_txwi_cache *
51+static struct mt76_rxwi_cache *
52 __mt76_get_rxwi(struct mt76_dev *dev)
53 {
54- struct mt76_txwi_cache *t = NULL;
55+ struct mt76_rxwi_cache *r = NULL;
56
57- spin_lock(&dev->wed_lock);
58+ spin_lock(&dev->lock);
59 if (!list_empty(&dev->rxwi_cache)) {
60- t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
61+ r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache,
62 list);
63- list_del(&t->list);
64+ list_del(&r->list);
65 }
66- spin_unlock(&dev->wed_lock);
67+ spin_unlock(&dev->lock);
68
69- return t;
70+ return r;
71 }
72
73 static struct mt76_txwi_cache *
74@@ -120,13 +120,13 @@ mt76_get_txwi(struct mt76_dev *dev)
75 return mt76_alloc_txwi(dev);
76 }
77
78-struct mt76_txwi_cache *
79+struct mt76_rxwi_cache *
80 mt76_get_rxwi(struct mt76_dev *dev)
81 {
82- struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
83+ struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev);
84
85- if (t)
86- return t;
87+ if (r)
88+ return r;
89
90 return mt76_alloc_rxwi(dev);
91 }
92@@ -145,14 +145,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
93 EXPORT_SYMBOL_GPL(mt76_put_txwi);
94
95 void
96-mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
97+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r)
98 {
99- if (!t)
100+ if (!r)
101 return;
102
103- spin_lock(&dev->wed_lock);
104- list_add(&t->list, &dev->rxwi_cache);
105- spin_unlock(&dev->wed_lock);
106+ spin_lock(&dev->lock);
107+ list_add(&r->list, &dev->rxwi_cache);
108+ spin_unlock(&dev->lock);
109 }
110 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
111
112@@ -173,13 +173,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
113 void
114 mt76_free_pending_rxwi(struct mt76_dev *dev)
115 {
116- struct mt76_txwi_cache *t;
117+ struct mt76_rxwi_cache *r;
118
119 local_bh_disable();
120- while ((t = __mt76_get_rxwi(dev)) != NULL) {
121- if (t->ptr)
122- skb_free_frag(t->ptr);
123- kfree(t);
124+ while ((r = __mt76_get_rxwi(dev)) != NULL) {
125+ if (r->ptr)
126+ skb_free_frag(r->ptr);
127+ kfree(r);
128 }
129 local_bh_enable();
130 }
131@@ -227,10 +227,10 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
132
133 static int
134 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
135- struct mt76_queue_buf *buf, void *data)
136+ struct mt76_queue_buf *buf, void *data,
137+ struct mt76_rxwi_cache *rxwi)
138 {
139 struct mt76_queue_entry *entry = &q->entry[q->head];
140- struct mt76_txwi_cache *txwi = NULL;
141 struct mt76_desc *desc;
142 u32 buf1 = 0, ctrl;
143 int idx = q->head;
144@@ -248,13 +248,15 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
145 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
146
147 if (mt76_queue_is_wed_rx(q)) {
148- txwi = mt76_get_rxwi(dev);
149- if (!txwi)
150- return -ENOMEM;
151+ if (!rxwi) {
152+ rxwi = mt76_get_rxwi(dev);
153+ if (!rxwi)
154+ return -ENOMEM;
155+ }
156
157- rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
158+ rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
159 if (rx_token < 0) {
160- mt76_put_rxwi(dev, txwi);
161+ mt76_put_rxwi(dev, rxwi);
162 return -ENOMEM;
163 }
164
165@@ -270,7 +272,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
166 done:
167 entry->dma_addr[0] = buf->addr;
168 entry->dma_len[0] = buf->len;
169- entry->txwi = txwi;
170+ entry->rxwi = rxwi;
171 entry->buf = data;
172 entry->wcid = 0xffff;
173 entry->skip_buf1 = true;
174@@ -412,7 +414,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
175
176 static void *
177 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
178- int *len, u32 *info, bool *more, bool *drop)
179+ int *len, u32 *info, bool *more, bool *drop, bool flush)
180 {
181 struct mt76_queue_entry *e = &q->entry[idx];
182 struct mt76_desc *desc = &q->desc[idx];
183@@ -440,20 +442,53 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
184 if (mt76_queue_is_wed_rx(q)) {
185 u32 buf1 = le32_to_cpu(desc->buf1);
186 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
187- struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
188+ struct mt76_rxwi_cache *r = mt76_rx_token_release(dev, token);
189
190- if (!t)
191+ if (!r)
192 return NULL;
193
194- dma_unmap_single(dev->dma_dev, t->dma_addr,
195+ dma_unmap_single(dev->dma_dev, r->dma_addr,
196 SKB_WITH_OVERHEAD(q->buf_size),
197 DMA_FROM_DEVICE);
198
199- buf = t->ptr;
200- t->dma_addr = 0;
201- t->ptr = NULL;
202+ if (flush) {
203+ buf = r->ptr;
204+ r->dma_addr = 0;
205+ r->ptr = NULL;
206+
207+ mt76_put_rxwi(dev, r);
208+ } else {
209+ struct mt76_queue_buf qbuf;
210+
211+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
212+ if (!buf)
213+ return NULL;
214+
215+ memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
216+
217+ r->dma_addr = dma_map_single(dev->dma_dev, r->ptr,
218+ SKB_WITH_OVERHEAD(q->buf_size),
219+ DMA_FROM_DEVICE);
220+ if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) {
221+ skb_free_frag(r->ptr);
222+ mt76_put_rxwi(dev, r);
223+ return NULL;
224+ }
225+
226+ qbuf.addr = r->dma_addr;
227+ qbuf.len = SKB_WITH_OVERHEAD(q->buf_size);
228+ qbuf.skip_unmap = false;
229+
230+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, r->ptr, r) < 0) {
231+ dma_unmap_single(dev->dma_dev, r->dma_addr,
232+ SKB_WITH_OVERHEAD(q->buf_size),
233+ DMA_FROM_DEVICE);
234+ skb_free_frag(r->ptr);
235+ mt76_put_rxwi(dev, r);
236+ return NULL;
237+ }
238+ }
239
240- mt76_put_rxwi(dev, t);
241 if (drop)
242 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
243 } else {
244@@ -490,7 +525,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
245 q->tail = (q->tail + 1) % q->ndesc;
246 q->queued--;
247
248- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
249+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
250 }
251
252 static int
253@@ -658,7 +693,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
254 done:
255 qbuf.len = len - offset;
256 qbuf.skip_unmap = false;
257- if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
258+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) {
259 dma_unmap_single(dev->dma_dev, addr, len,
260 DMA_FROM_DEVICE);
261 skb_free_frag(buf);
262diff --git a/mac80211.c b/mac80211.c
263index 3715c73..4552bc2 100644
264--- a/mac80211.c
265+++ b/mac80211.c
266@@ -575,7 +575,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
267 spin_lock_init(&dev->lock);
268 spin_lock_init(&dev->cc_lock);
269 spin_lock_init(&dev->status_lock);
270- spin_lock_init(&dev->wed_lock);
271 mutex_init(&dev->mutex);
272 init_waitqueue_head(&dev->tx_wait);
273
274@@ -608,6 +607,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
275 INIT_LIST_HEAD(&dev->txwi_cache);
276 INIT_LIST_HEAD(&dev->rxwi_cache);
277 dev->token_size = dev->drv->token_size;
278+ dev->rx_token_size = dev->drv->rx_token_size;
279
280 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
281 skb_queue_head_init(&dev->rx_skb[i]);
282diff --git a/mmio.c b/mmio.c
283index 5fb8392..f7495f6 100644
284--- a/mmio.c
285+++ b/mmio.c
286@@ -89,28 +89,45 @@ EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
287 void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
288 {
289 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
290- u32 length;
291+ struct page *page;
292 int i;
293
294- length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
295- sizeof(struct skb_shared_info));
296-
297 for (i = 0; i < dev->rx_token_size; i++) {
298- struct mt76_txwi_cache *t;
299+ struct mt76_rxwi_cache *r;
300
301- t = mt76_rx_token_release(dev, i);
302- if (!t || !t->ptr)
303+ r = mt76_rx_token_release(dev, i);
304+ if (!r || !r->ptr)
305 continue;
306
307- dma_unmap_single(dev->dma_dev, t->dma_addr,
308+ dma_unmap_single(dev->dma_dev, r->dma_addr,
309 wed->wlan.rx_size, DMA_FROM_DEVICE);
310- __free_pages(virt_to_page(t->ptr), get_order(length));
311- t->ptr = NULL;
312+ skb_free_frag(r->ptr);
313+ r->ptr = NULL;
314
315- mt76_put_rxwi(dev, t);
316+ mt76_put_rxwi(dev, r);
317 }
318
319 mt76_free_pending_rxwi(dev);
320+
321+ mt76_for_each_q_rx(dev, i) {
322+ struct mt76_queue *q = &dev->q_rx[i];
323+
324+ if (mt76_queue_is_wed_rx(q)) {
325+ if (!q->rx_page.va)
326+ continue;
327+
328+ page = virt_to_page(q->rx_page.va);
329+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
330+ memset(&q->rx_page, 0, sizeof(q->rx_page));
331+ }
332+ }
333+
334+ if (!wed->rx_buf_ring.rx_page.va)
335+ return;
336+
337+ page = virt_to_page(wed->rx_buf_ring.rx_page.va);
338+ __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
339+ memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
340 }
341 EXPORT_SYMBOL_GPL(mt76_mmio_wed_release_rx_buf);
342
343@@ -125,18 +142,17 @@ u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
344 sizeof(struct skb_shared_info));
345
346 for (i = 0; i < size; i++) {
347- struct mt76_txwi_cache *t = mt76_get_rxwi(dev);
348+ struct mt76_rxwi_cache *r = mt76_get_rxwi(dev);
349 dma_addr_t phy_addr;
350- struct page *page;
351 int token;
352 void *ptr;
353
354- if (!t)
355+ if (!r)
356 goto unmap;
357
358- page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
359- if (!page) {
360- mt76_put_rxwi(dev, t);
361+ ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length, GFP_ATOMIC);
362+ if (!ptr) {
363+ mt76_put_rxwi(dev, r);
364 goto unmap;
365 }
366
367@@ -146,17 +162,17 @@ u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
368
369 if (unlikely(dma_mapping_error(dev->dev, phy_addr))) {
370 skb_free_frag(ptr);
371- mt76_put_rxwi(dev, t);
372+ mt76_put_rxwi(dev, r);
373 goto unmap;
374 }
375
376 desc->buf0 = cpu_to_le32(phy_addr);
377- token = mt76_rx_token_consume(dev, ptr, t, phy_addr);
378+ token = mt76_rx_token_consume(dev, ptr, r, phy_addr);
379 if (token < 0) {
380 dma_unmap_single(dev->dma_dev, phy_addr,
381 wed->wlan.rx_size, DMA_TO_DEVICE);
382- __free_pages(page, get_order(length));
383- mt76_put_rxwi(dev, t);
384+ skb_free_frag(ptr);
385+ mt76_put_rxwi(dev, r);
386 goto unmap;
387 }
388
389diff --git a/mt76.h b/mt76.h
390index 3af97e5..b960f3d 100644
391--- a/mt76.h
392+++ b/mt76.h
393@@ -200,6 +200,7 @@ struct mt76_queue_entry {
394 };
395 union {
396 struct mt76_txwi_cache *txwi;
397+ struct mt76_rxwi_cache *rxwi;
398 struct urb *urb;
399 int buf_sz;
400 };
401@@ -410,10 +411,14 @@ struct mt76_txwi_cache {
402 struct list_head list;
403 dma_addr_t dma_addr;
404
405- union {
406- struct sk_buff *skb;
407- void *ptr;
408- };
409+ struct sk_buff *skb;
410+};
411+
412+struct mt76_rxwi_cache {
413+ struct list_head list;
414+ dma_addr_t dma_addr;
415+
416+ void *ptr;
417 };
418
419 struct mt76_rx_tid {
420@@ -499,6 +504,7 @@ struct mt76_driver_ops {
421 u16 txwi_size;
422 u16 token_size;
423 u8 mcs_rates;
424+ u16 rx_token_size;
425
426 void (*update_survey)(struct mt76_phy *phy);
427
428@@ -858,7 +864,6 @@ struct mt76_dev {
429
430 struct ieee80211_hw *hw;
431
432- spinlock_t wed_lock;
433 spinlock_t lock;
434 spinlock_t cc_lock;
435
436@@ -1521,8 +1526,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
437 }
438
439 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
440-void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
441-struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
442+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r);
443+struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
444 void mt76_free_pending_rxwi(struct mt76_dev *dev);
445 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
446 struct napi_struct *napi);
447@@ -1703,9 +1708,9 @@ struct mt76_txwi_cache *
448 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
449 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
450 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
451-struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
452+struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
453 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
454- struct mt76_txwi_cache *r, dma_addr_t phys);
455+ struct mt76_rxwi_cache *r, dma_addr_t phys);
456
457 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
458 {
459diff --git a/mt7915/mmio.c b/mt7915/mmio.c
460index 85cb3fe..690cac5 100644
461--- a/mt7915/mmio.c
462+++ b/mt7915/mmio.c
463@@ -687,7 +687,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
464 wed->wlan.reset = mt7915_mmio_wed_reset;
465 wed->wlan.reset_complete = mt7915_mmio_wed_reset_complete;
466
467- dev->mt76.rx_token_size = wed->wlan.rx_npkt;
468+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
469
470 if (mtk_wed_device_attach(wed))
471 return 0;
472@@ -893,6 +893,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
473 SURVEY_INFO_TIME_RX |
474 SURVEY_INFO_TIME_BSS_RX,
475 .token_size = MT7915_TOKEN_SIZE,
476+ .rx_token_size = MT7915_RX_TOKEN_SIZE;
477 .tx_prepare_skb = mt7915_tx_prepare_skb,
478 .tx_complete_skb = mt76_connac_tx_complete_skb,
479 .rx_skb = mt7915_queue_rx_skb,
480diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
481index d317c52..91eb5ad 100644
482--- a/mt7915/mt7915.h
483+++ b/mt7915/mt7915.h
484@@ -62,6 +62,7 @@
485 #define MT7915_EEPROM_BLOCK_SIZE 16
486 #define MT7915_HW_TOKEN_SIZE 4096
487 #define MT7915_TOKEN_SIZE 8192
488+#define MT7915_RX_TOKEN_SIZE 4096
489
490 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
491 #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
492diff --git a/tx.c b/tx.c
493index 1809b03..74bf0de 100644
494--- a/tx.c
495+++ b/tx.c
496@@ -843,16 +843,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
497 EXPORT_SYMBOL_GPL(mt76_token_consume);
498
499 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
500- struct mt76_txwi_cache *t, dma_addr_t phys)
501+ struct mt76_rxwi_cache *r, dma_addr_t phys)
502 {
503 int token;
504
505 spin_lock_bh(&dev->rx_token_lock);
506- token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
507+ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size,
508 GFP_ATOMIC);
509 if (token >= 0) {
510- t->ptr = ptr;
511- t->dma_addr = phys;
512+ r->ptr = ptr;
513+ r->dma_addr = phys;
514 }
515 spin_unlock_bh(&dev->rx_token_lock);
516
517@@ -889,15 +889,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
518 }
519 EXPORT_SYMBOL_GPL(mt76_token_release);
520
521-struct mt76_txwi_cache *
522+struct mt76_rxwi_cache *
523 mt76_rx_token_release(struct mt76_dev *dev, int token)
524 {
525- struct mt76_txwi_cache *t;
526+ struct mt76_rxwi_cache *r;
527
528 spin_lock_bh(&dev->rx_token_lock);
529- t = idr_remove(&dev->rx_token, token);
530+ r = idr_remove(&dev->rx_token, token);
531 spin_unlock_bh(&dev->rx_token_lock);
532
533- return t;
534+ return r;
535 }
536 EXPORT_SYMBOL_GPL(mt76_rx_token_release);
537--
5382.18.0
539