blob: 2b0701a3c32ba3deead92d7df5c70fd654ffef7f [file] [log] [blame]
developerc2cfe0f2023-09-22 04:11:09 +08001From 0d65df1371fce544fe40c7458ed572a9cb813a48 Mon Sep 17 00:00:00 2001
developer064da3c2023-06-13 15:57:26 +08002From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Mon, 6 Feb 2023 13:37:23 +0800
developerc2cfe0f2023-09-22 04:11:09 +08004Subject: [PATCH 2000/2012] wifi: mt76: rework wed rx flow
developer064da3c2023-06-13 15:57:26 +08005
6Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
7Change-Id: Icd787345c811cb5ad30d9c7c1c5f9e5298bd3be6
8---
9 dma.c | 89 +++++++++++++++++++++++++------------------------
10 mac80211.c | 2 +-
11 mt76.h | 23 ++++++++-----
12 mt7915/dma.c | 2 --
13 mt7915/mmio.c | 27 +++++++++++----
14 mt7915/mt7915.h | 1 +
15 tx.c | 16 ++++-----
16 7 files changed, 90 insertions(+), 70 deletions(-)
17
18diff --git a/dma.c b/dma.c
developerc2cfe0f2023-09-22 04:11:09 +080019index f5091a35b..8182f6dc4 100644
developer064da3c2023-06-13 15:57:26 +080020--- a/dma.c
21+++ b/dma.c
developerc2cfe0f2023-09-22 04:11:09 +080022@@ -64,17 +64,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
developer064da3c2023-06-13 15:57:26 +080023 return t;
24 }
25
26-static struct mt76_txwi_cache *
27+static struct mt76_rxwi_cache *
28 mt76_alloc_rxwi(struct mt76_dev *dev)
29 {
30- struct mt76_txwi_cache *t;
31+ struct mt76_rxwi_cache *r;
32
33- t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
34- if (!t)
35+ r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC);
36+ if (!r)
37 return NULL;
38
39- t->ptr = NULL;
40- return t;
41+ r->ptr = NULL;
42+ return r;
43 }
44
45 static struct mt76_txwi_cache *
developerc2cfe0f2023-09-22 04:11:09 +080046@@ -93,20 +93,20 @@ __mt76_get_txwi(struct mt76_dev *dev)
developer064da3c2023-06-13 15:57:26 +080047 return t;
48 }
49
50-static struct mt76_txwi_cache *
51+static struct mt76_rxwi_cache *
52 __mt76_get_rxwi(struct mt76_dev *dev)
53 {
54- struct mt76_txwi_cache *t = NULL;
55+ struct mt76_rxwi_cache *r = NULL;
56
57- spin_lock(&dev->wed_lock);
58+ spin_lock(&dev->lock);
59 if (!list_empty(&dev->rxwi_cache)) {
60- t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
61+ r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache,
62 list);
63- list_del(&t->list);
64+ list_del(&r->list);
65 }
66- spin_unlock(&dev->wed_lock);
67+ spin_unlock(&dev->lock);
68
69- return t;
70+ return r;
71 }
72
73 static struct mt76_txwi_cache *
developerc2cfe0f2023-09-22 04:11:09 +080074@@ -120,13 +120,13 @@ mt76_get_txwi(struct mt76_dev *dev)
developer064da3c2023-06-13 15:57:26 +080075 return mt76_alloc_txwi(dev);
76 }
77
78-struct mt76_txwi_cache *
79+struct mt76_rxwi_cache *
80 mt76_get_rxwi(struct mt76_dev *dev)
81 {
82- struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
83+ struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev);
84
85- if (t)
86- return t;
87+ if (r)
88+ return r;
89
90 return mt76_alloc_rxwi(dev);
91 }
developerc2cfe0f2023-09-22 04:11:09 +080092@@ -145,14 +145,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
developer064da3c2023-06-13 15:57:26 +080093 EXPORT_SYMBOL_GPL(mt76_put_txwi);
94
95 void
96-mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
97+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r)
98 {
99- if (!t)
100+ if (!r)
101 return;
102
103- spin_lock(&dev->wed_lock);
104- list_add(&t->list, &dev->rxwi_cache);
105- spin_unlock(&dev->wed_lock);
106+ spin_lock(&dev->lock);
107+ list_add(&r->list, &dev->rxwi_cache);
108+ spin_unlock(&dev->lock);
109 }
110 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
111
developerc2cfe0f2023-09-22 04:11:09 +0800112@@ -173,13 +173,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800113 void
114 mt76_free_pending_rxwi(struct mt76_dev *dev)
115 {
116- struct mt76_txwi_cache *t;
117+ struct mt76_rxwi_cache *r;
118
119 local_bh_disable();
120- while ((t = __mt76_get_rxwi(dev)) != NULL) {
121- if (t->ptr)
122- mt76_put_page_pool_buf(t->ptr, false);
123- kfree(t);
124+ while ((r = __mt76_get_rxwi(dev)) != NULL) {
125+ if (r->ptr)
126+ mt76_put_page_pool_buf(r->ptr, false);
127+ kfree(r);
128 }
129 local_bh_enable();
130 }
developerc2cfe0f2023-09-22 04:11:09 +0800131@@ -217,7 +217,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developer064da3c2023-06-13 15:57:26 +0800132 {
133 struct mt76_desc *desc = &q->desc[q->head];
134 struct mt76_queue_entry *entry = &q->entry[q->head];
135- struct mt76_txwi_cache *txwi = NULL;
136+ struct mt76_rxwi_cache *rxwi = NULL;
137 u32 buf1 = 0, ctrl;
138 int idx = q->head;
139 int rx_token;
developerc2cfe0f2023-09-22 04:11:09 +0800140@@ -225,13 +225,13 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developer064da3c2023-06-13 15:57:26 +0800141 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
142
143 if (mt76_queue_is_wed_rx(q)) {
144- txwi = mt76_get_rxwi(dev);
145- if (!txwi)
146+ rxwi = mt76_get_rxwi(dev);
147+ if (!rxwi)
148 return -ENOMEM;
149
150- rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
151+ rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
152 if (rx_token < 0) {
153- mt76_put_rxwi(dev, txwi);
154+ mt76_put_rxwi(dev, rxwi);
155 return -ENOMEM;
156 }
157
developerc2cfe0f2023-09-22 04:11:09 +0800158@@ -246,7 +246,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developer064da3c2023-06-13 15:57:26 +0800159
160 entry->dma_addr[0] = buf->addr;
161 entry->dma_len[0] = buf->len;
162- entry->txwi = txwi;
163+ entry->rxwi = rxwi;
164 entry->buf = data;
165 entry->wcid = 0xffff;
166 entry->skip_buf1 = true;
developerc2cfe0f2023-09-22 04:11:09 +0800167@@ -406,20 +406,20 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developer064da3c2023-06-13 15:57:26 +0800168 if (mt76_queue_is_wed_rx(q)) {
169 u32 buf1 = le32_to_cpu(desc->buf1);
170 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
171- struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
172+ struct mt76_rxwi_cache *r = mt76_rx_token_release(dev, token);
173
174- if (!t)
175+ if (!r)
176 return NULL;
177
178- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
179+ dma_sync_single_for_cpu(dev->dma_dev, r->dma_addr,
180 SKB_WITH_OVERHEAD(q->buf_size),
181 page_pool_get_dma_dir(q->page_pool));
182
183- buf = t->ptr;
184- t->dma_addr = 0;
185- t->ptr = NULL;
186+ buf = r->ptr;
187+ r->dma_addr = 0;
188+ r->ptr = NULL;
189
190- mt76_put_rxwi(dev, t);
191+ mt76_put_rxwi(dev, r);
192
193 if (drop) {
194 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
developerc2cfe0f2023-09-22 04:11:09 +0800195@@ -979,16 +979,19 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800196 mt76_for_each_q_rx(dev, i) {
197 struct mt76_queue *q = &dev->q_rx[i];
198
199+ if (mt76_queue_is_wed_rx(q))
200+ continue;
201+
202 netif_napi_del(&dev->napi[i]);
203 mt76_dma_rx_cleanup(dev, q);
204
205 page_pool_destroy(q->page_pool);
206 }
207
208- mt76_free_pending_txwi(dev);
209- mt76_free_pending_rxwi(dev);
210-
211 if (mtk_wed_device_active(&dev->mmio.wed))
212 mtk_wed_device_detach(&dev->mmio.wed);
213+
214+ mt76_free_pending_txwi(dev);
215+ mt76_free_pending_rxwi(dev);
216 }
217 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
218diff --git a/mac80211.c b/mac80211.c
developerc2cfe0f2023-09-22 04:11:09 +0800219index ef4b83244..abad16f31 100644
developer064da3c2023-06-13 15:57:26 +0800220--- a/mac80211.c
221+++ b/mac80211.c
developerc2cfe0f2023-09-22 04:11:09 +0800222@@ -617,7 +617,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
developer064da3c2023-06-13 15:57:26 +0800223 spin_lock_init(&dev->lock);
224 spin_lock_init(&dev->cc_lock);
225 spin_lock_init(&dev->status_lock);
226- spin_lock_init(&dev->wed_lock);
227 mutex_init(&dev->mutex);
228 init_waitqueue_head(&dev->tx_wait);
229
developerc2cfe0f2023-09-22 04:11:09 +0800230@@ -650,6 +649,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
developer064da3c2023-06-13 15:57:26 +0800231 INIT_LIST_HEAD(&dev->txwi_cache);
232 INIT_LIST_HEAD(&dev->rxwi_cache);
233 dev->token_size = dev->drv->token_size;
234+ dev->rx_token_size = dev->drv->rx_token_size;
235
236 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
237 skb_queue_head_init(&dev->rx_skb[i]);
238diff --git a/mt76.h b/mt76.h
developerc2cfe0f2023-09-22 04:11:09 +0800239index 9f84389b7..99756dce2 100644
developer064da3c2023-06-13 15:57:26 +0800240--- a/mt76.h
241+++ b/mt76.h
242@@ -180,6 +180,7 @@ struct mt76_queue_entry {
243 };
244 union {
245 struct mt76_txwi_cache *txwi;
246+ struct mt76_rxwi_cache *rxwi;
247 struct urb *urb;
248 int buf_sz;
249 };
developerc2cfe0f2023-09-22 04:11:09 +0800250@@ -377,10 +378,14 @@ struct mt76_txwi_cache {
developer064da3c2023-06-13 15:57:26 +0800251 struct list_head list;
252 dma_addr_t dma_addr;
253
254- union {
255- struct sk_buff *skb;
256- void *ptr;
257- };
258+ struct sk_buff *skb;
259+};
260+
261+struct mt76_rxwi_cache {
262+ struct list_head list;
263+ dma_addr_t dma_addr;
264+
265+ void *ptr;
266 };
267
268 struct mt76_rx_tid {
developerc2cfe0f2023-09-22 04:11:09 +0800269@@ -466,6 +471,7 @@ struct mt76_driver_ops {
developer064da3c2023-06-13 15:57:26 +0800270 u16 txwi_size;
271 u16 token_size;
272 u8 mcs_rates;
273+ u16 rx_token_size;
274
275 void (*update_survey)(struct mt76_phy *phy);
276
developerc2cfe0f2023-09-22 04:11:09 +0800277@@ -824,7 +830,6 @@ struct mt76_dev {
developer064da3c2023-06-13 15:57:26 +0800278
279 struct ieee80211_hw *hw;
280
281- spinlock_t wed_lock;
282 spinlock_t lock;
283 spinlock_t cc_lock;
284
developerc2cfe0f2023-09-22 04:11:09 +0800285@@ -1473,8 +1478,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
developer064da3c2023-06-13 15:57:26 +0800286 }
287
288 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
289-void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
290-struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
291+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r);
292+struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
293 void mt76_free_pending_rxwi(struct mt76_dev *dev);
294 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
295 struct napi_struct *napi);
developerc2cfe0f2023-09-22 04:11:09 +0800296@@ -1628,9 +1633,9 @@ struct mt76_txwi_cache *
developer064da3c2023-06-13 15:57:26 +0800297 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
298 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
299 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
300-struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
301+struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
302 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
303- struct mt76_txwi_cache *r, dma_addr_t phys);
304+ struct mt76_rxwi_cache *r, dma_addr_t phys);
305 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
306 static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
307 {
308diff --git a/mt7915/dma.c b/mt7915/dma.c
developerc2cfe0f2023-09-22 04:11:09 +0800309index 59a44d79a..326c8c8c1 100644
developer064da3c2023-06-13 15:57:26 +0800310--- a/mt7915/dma.c
311+++ b/mt7915/dma.c
developerc2cfe0f2023-09-22 04:11:09 +0800312@@ -509,7 +509,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
developer064da3c2023-06-13 15:57:26 +0800313 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
314 dev->mt76.q_rx[MT_RXQ_MAIN].flags =
315 MT_WED_Q_RX(MT7915_RXQ_BAND0);
316- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
317 }
318
319 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
developerc2cfe0f2023-09-22 04:11:09 +0800320@@ -546,7 +545,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
developer064da3c2023-06-13 15:57:26 +0800321 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
322 dev->mt76.q_rx[MT_RXQ_BAND1].flags =
323 MT_WED_Q_RX(MT7915_RXQ_BAND1);
324- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
325 }
326
327 /* rx data queue for band1 */
328diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developerc2cfe0f2023-09-22 04:11:09 +0800329index fc7ace638..a38109497 100644
developer064da3c2023-06-13 15:57:26 +0800330--- a/mt7915/mmio.c
331+++ b/mt7915/mmio.c
developerc2cfe0f2023-09-22 04:11:09 +0800332@@ -574,16 +574,28 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
developer064da3c2023-06-13 15:57:26 +0800333
334 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
335 for (i = 0; i < dev->mt76.rx_token_size; i++) {
336- struct mt76_txwi_cache *t;
337+ struct mt76_rxwi_cache *r;
338
339- t = mt76_rx_token_release(&dev->mt76, i);
340- if (!t || !t->ptr)
341+ r = mt76_rx_token_release(&dev->mt76, i);
342+ if (!r || !r->ptr)
343 continue;
344
345- mt76_put_page_pool_buf(t->ptr, false);
346- t->ptr = NULL;
347+ mt76_put_page_pool_buf(r->ptr, false);
348+ r->ptr = NULL;
349
350- mt76_put_rxwi(&dev->mt76, t);
351+ mt76_put_rxwi(&dev->mt76, r);
352+ }
353+
354+ mt76_for_each_q_rx(dev, i) {
355+ struct mt76_queue *q = &dev->q_rx[i];
356+
357+ if (!mt76_queue_is_wed_rx(q))
358+ continue;
359+
360+ netif_napi_del(&dev->napi[i]);
361+ mt76_dma_rx_cleanup(dev, q);
362+
363+ page_pool_destroy(q->page_pool);
364 }
365
366 mt76_free_pending_rxwi(&dev->mt76);
developerc2cfe0f2023-09-22 04:11:09 +0800367@@ -786,7 +798,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
developer064da3c2023-06-13 15:57:26 +0800368 wed->wlan.reset = mt7915_mmio_wed_reset;
369 wed->wlan.reset_complete = mt7915_mmio_wed_reset_complete;
370
371- dev->mt76.rx_token_size = wed->wlan.rx_npkt;
372+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
373
374 if (mtk_wed_device_attach(wed))
375 return 0;
developerc2cfe0f2023-09-22 04:11:09 +0800376@@ -992,6 +1004,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
developer064da3c2023-06-13 15:57:26 +0800377 SURVEY_INFO_TIME_RX |
378 SURVEY_INFO_TIME_BSS_RX,
379 .token_size = MT7915_TOKEN_SIZE,
380+ .rx_token_size = MT7915_RX_TOKEN_SIZE;
381 .tx_prepare_skb = mt7915_tx_prepare_skb,
382 .tx_complete_skb = mt76_connac_tx_complete_skb,
383 .rx_skb = mt7915_queue_rx_skb,
384diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developerc2cfe0f2023-09-22 04:11:09 +0800385index d317c523b..91eb5ad0f 100644
developer064da3c2023-06-13 15:57:26 +0800386--- a/mt7915/mt7915.h
387+++ b/mt7915/mt7915.h
388@@ -62,6 +62,7 @@
389 #define MT7915_EEPROM_BLOCK_SIZE 16
390 #define MT7915_HW_TOKEN_SIZE 4096
391 #define MT7915_TOKEN_SIZE 8192
392+#define MT7915_RX_TOKEN_SIZE 4096
393
394 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
395 #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
396diff --git a/tx.c b/tx.c
developerc2cfe0f2023-09-22 04:11:09 +0800397index 1809b0329..74bf0de12 100644
developer064da3c2023-06-13 15:57:26 +0800398--- a/tx.c
399+++ b/tx.c
developerc2cfe0f2023-09-22 04:11:09 +0800400@@ -843,16 +843,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
developer064da3c2023-06-13 15:57:26 +0800401 EXPORT_SYMBOL_GPL(mt76_token_consume);
402
403 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
404- struct mt76_txwi_cache *t, dma_addr_t phys)
405+ struct mt76_rxwi_cache *r, dma_addr_t phys)
406 {
407 int token;
408
409 spin_lock_bh(&dev->rx_token_lock);
410- token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
411+ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size,
412 GFP_ATOMIC);
413 if (token >= 0) {
414- t->ptr = ptr;
415- t->dma_addr = phys;
416+ r->ptr = ptr;
417+ r->dma_addr = phys;
418 }
419 spin_unlock_bh(&dev->rx_token_lock);
420
developerc2cfe0f2023-09-22 04:11:09 +0800421@@ -889,15 +889,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
developer064da3c2023-06-13 15:57:26 +0800422 }
423 EXPORT_SYMBOL_GPL(mt76_token_release);
424
425-struct mt76_txwi_cache *
426+struct mt76_rxwi_cache *
427 mt76_rx_token_release(struct mt76_dev *dev, int token)
428 {
429- struct mt76_txwi_cache *t;
430+ struct mt76_rxwi_cache *r;
431
432 spin_lock_bh(&dev->rx_token_lock);
433- t = idr_remove(&dev->rx_token, token);
434+ r = idr_remove(&dev->rx_token, token);
435 spin_unlock_bh(&dev->rx_token_lock);
436
437- return t;
438+ return r;
439 }
440 EXPORT_SYMBOL_GPL(mt76_rx_token_release);
441--
4422.39.2
443