blob: dc594ef5b03ebb875f058a0f8999b568d7ce416f [file] [log] [blame]
developer780b9152022-12-15 14:09:45 +08001From c326d38bf0da40d6b0ccbd13de2bb267398598d0 Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Wed, 14 Dec 2022 17:19:00 +0800
4Subject: [PATCH 3012/3013] mt76: mt7915: wed: add rxwi for further in chip rro
5 development
6
7Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
8---
9 dma.c | 98 +++++++++++++++++++++++++------------------------
10 mac80211.c | 2 +-
11 mt76.h | 24 +++++++-----
12 mt7915/dma.c | 2 -
13 mt7915/mmio.c | 21 ++++++-----
14 mt7915/mt7915.h | 1 +
15 tx.c | 16 ++++----
16 7 files changed, 86 insertions(+), 78 deletions(-)
17
18diff --git a/dma.c b/dma.c
19index 0914266a..7ef272e2 100644
20--- a/dma.c
21+++ b/dma.c
22@@ -59,17 +59,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
23 return t;
24 }
25
26-static struct mt76_txwi_cache *
27+static struct mt76_rxwi_cache *
28 mt76_alloc_rxwi(struct mt76_dev *dev)
29 {
30- struct mt76_txwi_cache *t;
31+ struct mt76_rxwi_cache *r;
32
33- t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
34- if (!t)
35+ r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC);
36+ if (!r)
37 return NULL;
38
39- t->ptr = NULL;
40- return t;
41+ r->ptr = NULL;
42+ return r;
43 }
44
45 static struct mt76_txwi_cache *
46@@ -88,20 +88,20 @@ __mt76_get_txwi(struct mt76_dev *dev)
47 return t;
48 }
49
50-static struct mt76_txwi_cache *
51+static struct mt76_rxwi_cache *
52 __mt76_get_rxwi(struct mt76_dev *dev)
53 {
54- struct mt76_txwi_cache *t = NULL;
55+ struct mt76_rxwi_cache *r = NULL;
56
57- spin_lock(&dev->wed_lock);
58+ spin_lock(&dev->lock);
59 if (!list_empty(&dev->rxwi_cache)) {
60- t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
61+ r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache,
62 list);
63- list_del(&t->list);
64+ list_del(&r->list);
65 }
66- spin_unlock(&dev->wed_lock);
67+ spin_unlock(&dev->lock);
68
69- return t;
70+ return r;
71 }
72
73 static struct mt76_txwi_cache *
74@@ -115,13 +115,13 @@ mt76_get_txwi(struct mt76_dev *dev)
75 return mt76_alloc_txwi(dev);
76 }
77
78-struct mt76_txwi_cache *
79+struct mt76_rxwi_cache *
80 mt76_get_rxwi(struct mt76_dev *dev)
81 {
82- struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
83+ struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev);
84
85- if (t)
86- return t;
87+ if (r)
88+ return r;
89
90 return mt76_alloc_rxwi(dev);
91 }
92@@ -140,14 +140,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
93 EXPORT_SYMBOL_GPL(mt76_put_txwi);
94
95 void
96-mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
97+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r)
98 {
99- if (!t)
100+ if (!r)
101 return;
102
103- spin_lock(&dev->wed_lock);
104- list_add(&t->list, &dev->rxwi_cache);
105- spin_unlock(&dev->wed_lock);
106+ spin_lock(&dev->lock);
107+ list_add(&r->list, &dev->rxwi_cache);
108+ spin_unlock(&dev->lock);
109 }
110 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
111
112@@ -168,13 +168,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
113 void
114 mt76_free_pending_rxwi(struct mt76_dev *dev)
115 {
116- struct mt76_txwi_cache *t;
117+ struct mt76_rxwi_cache *r;
118
119 local_bh_disable();
120- while ((t = __mt76_get_rxwi(dev)) != NULL) {
121- if (t->ptr)
122- skb_free_frag(t->ptr);
123- kfree(t);
124+ while ((r = __mt76_get_rxwi(dev)) != NULL) {
125+ if (r->ptr)
126+ skb_free_frag(r->ptr);
127+ kfree(r);
128 }
129 local_bh_enable();
130 }
131@@ -209,7 +209,7 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
132 static int
133 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
134 struct mt76_queue_buf *buf, int nbufs, u32 info,
135- struct sk_buff *skb, void *txwi)
136+ struct sk_buff *skb, void *txwi, void *rxwi)
137 {
138 struct mt76_queue_entry *entry;
139 struct mt76_desc *desc;
140@@ -227,13 +227,13 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
141
142 if ((q->flags & MT_QFLAG_WED) &&
143 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
144- struct mt76_txwi_cache *t = txwi;
145+ struct mt76_rxwi_cache *r = rxwi;
146 int rx_token;
147
148- if (!t)
149+ if (!r)
150 return -ENOMEM;
151
152- rx_token = mt76_rx_token_consume(dev, (void *)skb, t,
153+ rx_token = mt76_rx_token_consume(dev, (void *)skb, r,
154 buf[0].addr);
155 if (rx_token < 0)
156 return -ENOMEM;
157@@ -280,6 +280,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
158 }
159
160 q->entry[idx].txwi = txwi;
161+ q->entry[idx].rxwi = rxwi;
162 q->entry[idx].skb = skb;
163 q->entry[idx].wcid = 0xffff;
164
165@@ -379,13 +380,13 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
166 u32 id, find = 0;
167 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
168 le32_to_cpu(desc->buf1));
169- struct mt76_txwi_cache *t;
170+ struct mt76_rxwi_cache *r;
171
172 if (*more) {
173 spin_lock_bh(&dev->rx_token_lock);
174
175- idr_for_each_entry(&dev->rx_token, t, id) {
176- if (t->dma_addr == le32_to_cpu(desc->buf0)) {
177+ idr_for_each_entry(&dev->rx_token, r, id) {
178+ if (r->dma_addr == le32_to_cpu(desc->buf0)) {
179 find = 1;
180 desc->buf1 = FIELD_PREP(MT_DMA_CTL_TOKEN, id);
181 token = id;
182@@ -398,11 +399,11 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
183 return NULL;
184 }
185
186- t = mt76_rx_token_release(dev, token);
187- if (!t)
188+ r = mt76_rx_token_release(dev, token);
189+ if (!r)
190 return NULL;
191
192- dma_unmap_single(dev->dma_dev, t->dma_addr,
193+ dma_unmap_single(dev->dma_dev, r->dma_addr,
194 SKB_WITH_OVERHEAD(q->buf_size),
195 DMA_FROM_DEVICE);
196
197@@ -410,10 +411,10 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
198 if (!buf)
199 return NULL;
200
201- memcpy(buf, t->ptr, SKB_WITH_OVERHEAD(q->buf_size));
202- t->dma_addr = 0;
203+ memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
204+ r->dma_addr = 0;
205
206- mt76_put_rxwi(dev, t);
207+ mt76_put_rxwi(dev, r);
208
209 if (drop) {
210 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
211@@ -481,7 +482,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
212 buf.len = skb->len;
213
214 spin_lock_bh(&q->lock);
215- mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
216+ mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL, NULL);
217 mt76_dma_kick_queue(dev, q);
218 spin_unlock_bh(&q->lock);
219
220@@ -558,7 +559,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
221 goto unmap;
222
223 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
224- tx_info.info, tx_info.skb, t);
225+ tx_info.info, tx_info.skb, t, NULL);
226
227 unmap:
228 for (n--; n > 0; n--)
229@@ -598,20 +599,21 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
230 spin_lock_bh(&q->lock);
231
232 while (q->queued < q->ndesc - 1) {
233- struct mt76_txwi_cache *t = NULL;
234+ struct mt76_rxwi_cache *r = NULL;
235 struct mt76_queue_buf qbuf;
236 bool skip_alloc = false;
237 void *buf = NULL;
238
239 if ((q->flags & MT_QFLAG_WED) &&
240 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
241- t = mt76_get_rxwi(dev);
242- if (!t)
243+ r = mt76_get_rxwi(dev);
244+ if (!r)
245 break;
246
247- if (t->ptr) {
248+ /* reuse skb buf for wed rx copy*/
249+ if (r->ptr) {
250 skip_alloc = true;
251- buf = t->ptr;
252+ buf = r->ptr;
253 }
254 }
255
256@@ -630,7 +632,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
257 qbuf.addr = addr + offset;
258 qbuf.len = len - offset;
259 qbuf.skip_unmap = false;
260- if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t) < 0) {
261+ if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL, r) < 0) {
262 dma_unmap_single(dev->dma_dev, addr, len,
263 DMA_FROM_DEVICE);
264 skb_free_frag(buf);
265diff --git a/mac80211.c b/mac80211.c
266index de9ef237..818f4f0c 100644
267--- a/mac80211.c
268+++ b/mac80211.c
269@@ -597,7 +597,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
270 spin_lock_init(&dev->lock);
271 spin_lock_init(&dev->cc_lock);
272 spin_lock_init(&dev->status_lock);
273- spin_lock_init(&dev->wed_lock);
274 mutex_init(&dev->mutex);
275 init_waitqueue_head(&dev->tx_wait);
276
277@@ -628,6 +627,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
278 INIT_LIST_HEAD(&dev->txwi_cache);
279 INIT_LIST_HEAD(&dev->rxwi_cache);
280 dev->token_size = dev->drv->token_size;
281+ dev->rx_token_size = dev->drv->rx_token_size;
282
283 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
284 skb_queue_head_init(&dev->rx_skb[i]);
285diff --git a/mt76.h b/mt76.h
286index f1795778..42364b81 100644
287--- a/mt76.h
288+++ b/mt76.h
289@@ -166,6 +166,7 @@ struct mt76_queue_entry {
290 };
291 union {
292 struct mt76_txwi_cache *txwi;
293+ struct mt76_rxwi_cache *rxwi;
294 struct urb *urb;
295 int buf_sz;
296 };
297@@ -354,10 +355,15 @@ struct mt76_txwi_cache {
298 struct list_head list;
299 dma_addr_t dma_addr;
300
301- union {
302- struct sk_buff *skb;
303- void *ptr;
304- };
305+ struct sk_buff *skb;
306+};
307+
308+struct mt76_rxwi_cache {
309+ struct list_head list;
310+ dma_addr_t dma_addr;
311+
312+ void *ptr;
313+ u32 token;
314 };
315
316 struct mt76_rx_tid {
317@@ -441,6 +447,7 @@ struct mt76_driver_ops {
318 u16 txwi_size;
319 u16 token_size;
320 u8 mcs_rates;
321+ u16 rx_token_size;
322
323 void (*update_survey)(struct mt76_phy *phy);
324
325@@ -805,7 +812,6 @@ struct mt76_dev {
326
327 struct ieee80211_hw *hw;
328
329- spinlock_t wed_lock;
330 spinlock_t lock;
331 spinlock_t cc_lock;
332
333@@ -1394,8 +1400,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
334 }
335
336 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
337-void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
338-struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
339+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r);
340+struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
341 void mt76_free_pending_rxwi(struct mt76_dev *dev);
342 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
343 struct napi_struct *napi);
344@@ -1541,9 +1547,9 @@ struct mt76_txwi_cache *
345 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
346 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
347 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
348-struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
349+struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
350 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
351- struct mt76_txwi_cache *r, dma_addr_t phys);
352+ struct mt76_rxwi_cache *r, dma_addr_t phys);
353
354 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
355 {
356diff --git a/mt7915/dma.c b/mt7915/dma.c
357index 36260085..9cbd3625 100644
358--- a/mt7915/dma.c
359+++ b/mt7915/dma.c
360@@ -492,7 +492,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
361 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
362 dev->mt76.q_rx[MT_RXQ_MAIN].flags =
363 MT_WED_Q_RX(MT7915_RXQ_BAND0);
364- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
365 }
366
367 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
368@@ -529,7 +528,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
369 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
370 dev->mt76.q_rx[MT_RXQ_BAND1].flags =
371 MT_WED_Q_RX(MT7915_RXQ_BAND1);
372- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
373 }
374
375 /* rx data queue for band1 */
376diff --git a/mt7915/mmio.c b/mt7915/mmio.c
377index 992beca3..ba728dd0 100644
378--- a/mt7915/mmio.c
379+++ b/mt7915/mmio.c
380@@ -603,18 +603,18 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
381
382 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
383 for (i = 0; i < dev->mt76.rx_token_size; i++) {
384- struct mt76_txwi_cache *t;
385+ struct mt76_rxwi_cache *r;
386
387- t = mt76_rx_token_release(&dev->mt76, i);
388- if (!t || !t->ptr)
389+ r = mt76_rx_token_release(&dev->mt76, i);
390+ if (!r || !r->ptr)
391 continue;
392
393- dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
394+ dma_unmap_single(dev->mt76.dma_dev, r->dma_addr,
395 wed->wlan.rx_size, DMA_FROM_DEVICE);
396- skb_free_frag(t->ptr);
397- t->ptr = NULL;
398+ skb_free_frag(r->ptr);
399+ r->ptr = NULL;
400
401- mt76_put_rxwi(&dev->mt76, t);
402+ mt76_put_rxwi(&dev->mt76, r);
403 }
404
405 mt76_free_pending_rxwi(&dev->mt76);
406@@ -639,7 +639,7 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
407 sizeof(struct skb_shared_info));
408
409 for (i = 0; i < size; i++) {
410- struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
411+ struct mt76_rxwi_cache *r = mt76_get_rxwi(&dev->mt76);
412 dma_addr_t phy_addr;
413 int token;
414 void *ptr;
415@@ -658,7 +658,7 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
416 }
417
418 desc->buf0 = cpu_to_le32(phy_addr);
419- token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
420+ token = mt76_rx_token_consume(&dev->mt76, ptr, r, phy_addr);
421 if (token < 0) {
422 dma_unmap_single(dev->mt76.dma_dev, phy_addr,
423 wed->wlan.rx_size, DMA_TO_DEVICE);
424@@ -786,7 +786,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
425 wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
426 wed->wlan.ser_trigger = mt7915_wed_trigger_ser;
427
428- dev->mt76.rx_token_size = wed->wlan.rx_npkt;
429+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
430
431 if (mtk_wed_device_attach(wed))
432 return 0;
433@@ -992,6 +992,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
434 SURVEY_INFO_TIME_RX |
435 SURVEY_INFO_TIME_BSS_RX,
436 .token_size = MT7915_TOKEN_SIZE,
437+ .rx_token_size = MT7915_RX_TOKEN_SIZE,
438 .tx_prepare_skb = mt7915_tx_prepare_skb,
439 .tx_complete_skb = mt76_connac_tx_complete_skb,
440 .rx_skb = mt7915_queue_rx_skb,
441diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
442index ed293e70..6def0596 100644
443--- a/mt7915/mt7915.h
444+++ b/mt7915/mt7915.h
445@@ -65,6 +65,7 @@
446
447 #define MT7915_EEPROM_BLOCK_SIZE 16
448 #define MT7915_TOKEN_SIZE 8192
449+#define MT7915_RX_TOKEN_SIZE 4096
450
451 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
452 #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
453diff --git a/tx.c b/tx.c
454index 6d55566f..a72b7779 100644
455--- a/tx.c
456+++ b/tx.c
457@@ -756,16 +756,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
458 EXPORT_SYMBOL_GPL(mt76_token_consume);
459
460 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
461- struct mt76_txwi_cache *t, dma_addr_t phys)
462+ struct mt76_rxwi_cache *r, dma_addr_t phys)
463 {
464 int token;
465
466 spin_lock_bh(&dev->rx_token_lock);
467- token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
468+ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size,
469 GFP_ATOMIC);
470 if (token >= 0) {
471- t->ptr = ptr;
472- t->dma_addr = phys;
473+ r->ptr = ptr;
474+ r->dma_addr = phys;
475 }
476 spin_unlock_bh(&dev->rx_token_lock);
477
478@@ -802,15 +802,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
479 }
480 EXPORT_SYMBOL_GPL(mt76_token_release);
481
482-struct mt76_txwi_cache *
483+struct mt76_rxwi_cache *
484 mt76_rx_token_release(struct mt76_dev *dev, int token)
485 {
486- struct mt76_txwi_cache *t;
487+ struct mt76_rxwi_cache *r;
488
489 spin_lock_bh(&dev->rx_token_lock);
490- t = idr_remove(&dev->rx_token, token);
491+ r = idr_remove(&dev->rx_token, token);
492 spin_unlock_bh(&dev->rx_token_lock);
493
494- return t;
495+ return r;
496 }
497 EXPORT_SYMBOL_GPL(mt76_rx_token_release);
498--
4992.18.0
500