blob: 050c6f2adf22ac75ec1be2bf5ddb71e3faa0f763 [file] [log] [blame]
developer064da3c2023-06-13 15:57:26 +08001From 06c6ad7b99c07aedc5403506c91cbb8e11cf95df Mon Sep 17 00:00:00 2001
2From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Mon, 6 Feb 2023 13:37:23 +0800
4Subject: [PATCH 2000/2008] wifi: mt76: rework wed rx flow
5
6Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
7Change-Id: Icd787345c811cb5ad30d9c7c1c5f9e5298bd3be6
8---
9 dma.c | 89 +++++++++++++++++++++++++------------------------
10 mac80211.c | 2 +-
11 mt76.h | 23 ++++++++-----
12 mt7915/dma.c | 2 --
13 mt7915/mmio.c | 27 +++++++++++----
14 mt7915/mt7915.h | 1 +
15 tx.c | 16 ++++-----
16 7 files changed, 90 insertions(+), 70 deletions(-)
17
18diff --git a/dma.c b/dma.c
19index e1e9062b..35db73b9 100644
20--- a/dma.c
21+++ b/dma.c
22@@ -59,17 +59,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
23 return t;
24 }
25
26-static struct mt76_txwi_cache *
27+static struct mt76_rxwi_cache *
28 mt76_alloc_rxwi(struct mt76_dev *dev)
29 {
30- struct mt76_txwi_cache *t;
31+ struct mt76_rxwi_cache *r;
32
33- t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
34- if (!t)
35+ r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC);
36+ if (!r)
37 return NULL;
38
39- t->ptr = NULL;
40- return t;
41+ r->ptr = NULL;
42+ return r;
43 }
44
45 static struct mt76_txwi_cache *
46@@ -88,20 +88,20 @@ __mt76_get_txwi(struct mt76_dev *dev)
47 return t;
48 }
49
50-static struct mt76_txwi_cache *
51+static struct mt76_rxwi_cache *
52 __mt76_get_rxwi(struct mt76_dev *dev)
53 {
54- struct mt76_txwi_cache *t = NULL;
55+ struct mt76_rxwi_cache *r = NULL;
56
57- spin_lock(&dev->wed_lock);
58+ spin_lock(&dev->lock);
59 if (!list_empty(&dev->rxwi_cache)) {
60- t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
61+ r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache,
62 list);
63- list_del(&t->list);
64+ list_del(&r->list);
65 }
66- spin_unlock(&dev->wed_lock);
67+ spin_unlock(&dev->lock);
68
69- return t;
70+ return r;
71 }
72
73 static struct mt76_txwi_cache *
74@@ -115,13 +115,13 @@ mt76_get_txwi(struct mt76_dev *dev)
75 return mt76_alloc_txwi(dev);
76 }
77
78-struct mt76_txwi_cache *
79+struct mt76_rxwi_cache *
80 mt76_get_rxwi(struct mt76_dev *dev)
81 {
82- struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
83+ struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev);
84
85- if (t)
86- return t;
87+ if (r)
88+ return r;
89
90 return mt76_alloc_rxwi(dev);
91 }
92@@ -140,14 +140,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
93 EXPORT_SYMBOL_GPL(mt76_put_txwi);
94
95 void
96-mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
97+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r)
98 {
99- if (!t)
100+ if (!r)
101 return;
102
103- spin_lock(&dev->wed_lock);
104- list_add(&t->list, &dev->rxwi_cache);
105- spin_unlock(&dev->wed_lock);
106+ spin_lock(&dev->lock);
107+ list_add(&r->list, &dev->rxwi_cache);
108+ spin_unlock(&dev->lock);
109 }
110 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
111
112@@ -168,13 +168,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
113 void
114 mt76_free_pending_rxwi(struct mt76_dev *dev)
115 {
116- struct mt76_txwi_cache *t;
117+ struct mt76_rxwi_cache *r;
118
119 local_bh_disable();
120- while ((t = __mt76_get_rxwi(dev)) != NULL) {
121- if (t->ptr)
122- mt76_put_page_pool_buf(t->ptr, false);
123- kfree(t);
124+ while ((r = __mt76_get_rxwi(dev)) != NULL) {
125+ if (r->ptr)
126+ mt76_put_page_pool_buf(r->ptr, false);
127+ kfree(r);
128 }
129 local_bh_enable();
130 }
131@@ -212,7 +212,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
132 {
133 struct mt76_desc *desc = &q->desc[q->head];
134 struct mt76_queue_entry *entry = &q->entry[q->head];
135- struct mt76_txwi_cache *txwi = NULL;
136+ struct mt76_rxwi_cache *rxwi = NULL;
137 u32 buf1 = 0, ctrl;
138 int idx = q->head;
139 int rx_token;
140@@ -220,13 +220,13 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
141 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
142
143 if (mt76_queue_is_wed_rx(q)) {
144- txwi = mt76_get_rxwi(dev);
145- if (!txwi)
146+ rxwi = mt76_get_rxwi(dev);
147+ if (!rxwi)
148 return -ENOMEM;
149
150- rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
151+ rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
152 if (rx_token < 0) {
153- mt76_put_rxwi(dev, txwi);
154+ mt76_put_rxwi(dev, rxwi);
155 return -ENOMEM;
156 }
157
158@@ -241,7 +241,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
159
160 entry->dma_addr[0] = buf->addr;
161 entry->dma_len[0] = buf->len;
162- entry->txwi = txwi;
163+ entry->rxwi = rxwi;
164 entry->buf = data;
165 entry->wcid = 0xffff;
166 entry->skip_buf1 = true;
167@@ -404,20 +404,20 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
168 if (mt76_queue_is_wed_rx(q)) {
169 u32 buf1 = le32_to_cpu(desc->buf1);
170 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
171- struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
172+ struct mt76_rxwi_cache *r = mt76_rx_token_release(dev, token);
173
174- if (!t)
175+ if (!r)
176 return NULL;
177
178- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
179+ dma_sync_single_for_cpu(dev->dma_dev, r->dma_addr,
180 SKB_WITH_OVERHEAD(q->buf_size),
181 page_pool_get_dma_dir(q->page_pool));
182
183- buf = t->ptr;
184- t->dma_addr = 0;
185- t->ptr = NULL;
186+ buf = r->ptr;
187+ r->dma_addr = 0;
188+ r->ptr = NULL;
189
190- mt76_put_rxwi(dev, t);
191+ mt76_put_rxwi(dev, r);
192
193 if (drop) {
194 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
195@@ -977,16 +977,19 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
196 mt76_for_each_q_rx(dev, i) {
197 struct mt76_queue *q = &dev->q_rx[i];
198
199+ if (mt76_queue_is_wed_rx(q))
200+ continue;
201+
202 netif_napi_del(&dev->napi[i]);
203 mt76_dma_rx_cleanup(dev, q);
204
205 page_pool_destroy(q->page_pool);
206 }
207
208- mt76_free_pending_txwi(dev);
209- mt76_free_pending_rxwi(dev);
210-
211 if (mtk_wed_device_active(&dev->mmio.wed))
212 mtk_wed_device_detach(&dev->mmio.wed);
213+
214+ mt76_free_pending_txwi(dev);
215+ mt76_free_pending_rxwi(dev);
216 }
217 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
218diff --git a/mac80211.c b/mac80211.c
219index 6430e6ee..5a203d31 100644
220--- a/mac80211.c
221+++ b/mac80211.c
222@@ -613,7 +613,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
223 spin_lock_init(&dev->lock);
224 spin_lock_init(&dev->cc_lock);
225 spin_lock_init(&dev->status_lock);
226- spin_lock_init(&dev->wed_lock);
227 mutex_init(&dev->mutex);
228 init_waitqueue_head(&dev->tx_wait);
229
230@@ -644,6 +643,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
231 INIT_LIST_HEAD(&dev->txwi_cache);
232 INIT_LIST_HEAD(&dev->rxwi_cache);
233 dev->token_size = dev->drv->token_size;
234+ dev->rx_token_size = dev->drv->rx_token_size;
235
236 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
237 skb_queue_head_init(&dev->rx_skb[i]);
238diff --git a/mt76.h b/mt76.h
239index 8abb6f41..72c3eb8f 100644
240--- a/mt76.h
241+++ b/mt76.h
242@@ -180,6 +180,7 @@ struct mt76_queue_entry {
243 };
244 union {
245 struct mt76_txwi_cache *txwi;
246+ struct mt76_rxwi_cache *rxwi;
247 struct urb *urb;
248 int buf_sz;
249 };
250@@ -371,10 +372,14 @@ struct mt76_txwi_cache {
251 struct list_head list;
252 dma_addr_t dma_addr;
253
254- union {
255- struct sk_buff *skb;
256- void *ptr;
257- };
258+ struct sk_buff *skb;
259+};
260+
261+struct mt76_rxwi_cache {
262+ struct list_head list;
263+ dma_addr_t dma_addr;
264+
265+ void *ptr;
266 };
267
268 struct mt76_rx_tid {
269@@ -460,6 +465,7 @@ struct mt76_driver_ops {
270 u16 txwi_size;
271 u16 token_size;
272 u8 mcs_rates;
273+ u16 rx_token_size;
274
275 void (*update_survey)(struct mt76_phy *phy);
276
277@@ -810,7 +816,6 @@ struct mt76_dev {
278
279 struct ieee80211_hw *hw;
280
281- spinlock_t wed_lock;
282 spinlock_t lock;
283 spinlock_t cc_lock;
284
285@@ -1360,8 +1365,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
286 }
287
288 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
289-void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
290-struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
291+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r);
292+struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
293 void mt76_free_pending_rxwi(struct mt76_dev *dev);
294 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
295 struct napi_struct *napi);
296@@ -1515,9 +1520,9 @@ struct mt76_txwi_cache *
297 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
298 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
299 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
300-struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
301+struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
302 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
303- struct mt76_txwi_cache *r, dma_addr_t phys);
304+ struct mt76_rxwi_cache *r, dma_addr_t phys);
305 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
306 static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
307 {
308diff --git a/mt7915/dma.c b/mt7915/dma.c
309index 86a93ded..848e9843 100644
310--- a/mt7915/dma.c
311+++ b/mt7915/dma.c
312@@ -493,7 +493,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
313 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
314 dev->mt76.q_rx[MT_RXQ_MAIN].flags =
315 MT_WED_Q_RX(MT7915_RXQ_BAND0);
316- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
317 }
318
319 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
320@@ -530,7 +529,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
321 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
322 dev->mt76.q_rx[MT_RXQ_BAND1].flags =
323 MT_WED_Q_RX(MT7915_RXQ_BAND1);
324- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
325 }
326
327 /* rx data queue for band1 */
328diff --git a/mt7915/mmio.c b/mt7915/mmio.c
329index 984b5f60..46256842 100644
330--- a/mt7915/mmio.c
331+++ b/mt7915/mmio.c
332@@ -600,16 +600,28 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
333
334 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
335 for (i = 0; i < dev->mt76.rx_token_size; i++) {
336- struct mt76_txwi_cache *t;
337+ struct mt76_rxwi_cache *r;
338
339- t = mt76_rx_token_release(&dev->mt76, i);
340- if (!t || !t->ptr)
341+ r = mt76_rx_token_release(&dev->mt76, i);
342+ if (!r || !r->ptr)
343 continue;
344
345- mt76_put_page_pool_buf(t->ptr, false);
346- t->ptr = NULL;
347+ mt76_put_page_pool_buf(r->ptr, false);
348+ r->ptr = NULL;
349
350- mt76_put_rxwi(&dev->mt76, t);
351+ mt76_put_rxwi(&dev->mt76, r);
352+ }
353+
354+ mt76_for_each_q_rx(dev, i) {
355+ struct mt76_queue *q = &dev->q_rx[i];
356+
357+ if (!mt76_queue_is_wed_rx(q))
358+ continue;
359+
360+ netif_napi_del(&dev->napi[i]);
361+ mt76_dma_rx_cleanup(dev, q);
362+
363+ page_pool_destroy(q->page_pool);
364 }
365
366 mt76_free_pending_rxwi(&dev->mt76);
367@@ -812,7 +824,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
368 wed->wlan.reset = mt7915_mmio_wed_reset;
369 wed->wlan.reset_complete = mt7915_mmio_wed_reset_complete;
370
371- dev->mt76.rx_token_size = wed->wlan.rx_npkt;
372+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
373
374 if (mtk_wed_device_attach(wed))
375 return 0;
376@@ -1018,6 +1030,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
377 SURVEY_INFO_TIME_RX |
378 SURVEY_INFO_TIME_BSS_RX,
379 .token_size = MT7915_TOKEN_SIZE,
380+ .rx_token_size = MT7915_RX_TOKEN_SIZE;
381 .tx_prepare_skb = mt7915_tx_prepare_skb,
382 .tx_complete_skb = mt76_connac_tx_complete_skb,
383 .rx_skb = mt7915_queue_rx_skb,
384diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
385index 103cd0d7..8ee62f63 100644
386--- a/mt7915/mt7915.h
387+++ b/mt7915/mt7915.h
388@@ -62,6 +62,7 @@
389 #define MT7915_EEPROM_BLOCK_SIZE 16
390 #define MT7915_HW_TOKEN_SIZE 4096
391 #define MT7915_TOKEN_SIZE 8192
392+#define MT7915_RX_TOKEN_SIZE 4096
393
394 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
395 #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
396diff --git a/tx.c b/tx.c
397index 72b3ec71..6cb71f34 100644
398--- a/tx.c
399+++ b/tx.c
400@@ -761,16 +761,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
401 EXPORT_SYMBOL_GPL(mt76_token_consume);
402
403 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
404- struct mt76_txwi_cache *t, dma_addr_t phys)
405+ struct mt76_rxwi_cache *r, dma_addr_t phys)
406 {
407 int token;
408
409 spin_lock_bh(&dev->rx_token_lock);
410- token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
411+ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size,
412 GFP_ATOMIC);
413 if (token >= 0) {
414- t->ptr = ptr;
415- t->dma_addr = phys;
416+ r->ptr = ptr;
417+ r->dma_addr = phys;
418 }
419 spin_unlock_bh(&dev->rx_token_lock);
420
421@@ -807,15 +807,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
422 }
423 EXPORT_SYMBOL_GPL(mt76_token_release);
424
425-struct mt76_txwi_cache *
426+struct mt76_rxwi_cache *
427 mt76_rx_token_release(struct mt76_dev *dev, int token)
428 {
429- struct mt76_txwi_cache *t;
430+ struct mt76_rxwi_cache *r;
431
432 spin_lock_bh(&dev->rx_token_lock);
433- t = idr_remove(&dev->rx_token, token);
434+ r = idr_remove(&dev->rx_token, token);
435 spin_unlock_bh(&dev->rx_token_lock);
436
437- return t;
438+ return r;
439 }
440 EXPORT_SYMBOL_GPL(mt76_rx_token_release);
441--
4422.39.2
443