blob: dc542aa2239d91e65b30211b18f4841d2835b359 [file] [log] [blame]
developerbbd45e12023-05-19 08:22:06 +08001From dbd501e7d4570588f1bae9cd53177e83d3f5f81e Mon Sep 17 00:00:00 2001
developerf6ebf632023-01-06 19:15:00 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Fri, 6 Jan 2023 18:18:50 +0800
developerbbd45e12023-05-19 08:22:06 +08004Subject: [PATCH 3005/3012] wifi: mt76: mt7915: wed: add rxwi for further in
developerc9233442023-04-04 06:06:17 +08005 chip rro
developerafd75872022-12-14 21:15:46 +08006
developerf6ebf632023-01-06 19:15:00 +08007Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
developerafd75872022-12-14 21:15:46 +08008---
developerc04f5402023-02-03 09:22:26 +08009 dma.c | 93 +++++++++++++++++++++++++------------------------
10 mac80211.c | 2 +-
11 mt76.h | 24 ++++++++-----
12 mt7915/dma.c | 2 --
developerc9233442023-04-04 06:06:17 +080013 mt7915/mmio.c | 29 +++++++--------
developerc04f5402023-02-03 09:22:26 +080014 mt7915/mt7915.h | 1 +
15 tx.c | 16 ++++-----
developerc9233442023-04-04 06:06:17 +080016 7 files changed, 87 insertions(+), 80 deletions(-)
developerafd75872022-12-14 21:15:46 +080017
18diff --git a/dma.c b/dma.c
developerbbd45e12023-05-19 08:22:06 +080019index 3047f8b..b210e39 100644
developerafd75872022-12-14 21:15:46 +080020--- a/dma.c
21+++ b/dma.c
22@@ -59,17 +59,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
23 return t;
24 }
25
26-static struct mt76_txwi_cache *
27+static struct mt76_rxwi_cache *
28 mt76_alloc_rxwi(struct mt76_dev *dev)
29 {
30- struct mt76_txwi_cache *t;
31+ struct mt76_rxwi_cache *r;
32
33- t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
34- if (!t)
35+ r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC);
36+ if (!r)
37 return NULL;
38
39- t->ptr = NULL;
40- return t;
41+ r->ptr = NULL;
42+ return r;
43 }
44
45 static struct mt76_txwi_cache *
46@@ -88,20 +88,20 @@ __mt76_get_txwi(struct mt76_dev *dev)
47 return t;
48 }
49
50-static struct mt76_txwi_cache *
51+static struct mt76_rxwi_cache *
52 __mt76_get_rxwi(struct mt76_dev *dev)
53 {
54- struct mt76_txwi_cache *t = NULL;
55+ struct mt76_rxwi_cache *r = NULL;
56
57- spin_lock(&dev->wed_lock);
58+ spin_lock(&dev->lock);
59 if (!list_empty(&dev->rxwi_cache)) {
60- t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
61+ r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache,
62 list);
63- list_del(&t->list);
64+ list_del(&r->list);
65 }
66- spin_unlock(&dev->wed_lock);
67+ spin_unlock(&dev->lock);
68
69- return t;
70+ return r;
71 }
72
73 static struct mt76_txwi_cache *
74@@ -115,13 +115,13 @@ mt76_get_txwi(struct mt76_dev *dev)
75 return mt76_alloc_txwi(dev);
76 }
77
78-struct mt76_txwi_cache *
79+struct mt76_rxwi_cache *
80 mt76_get_rxwi(struct mt76_dev *dev)
81 {
82- struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
83+ struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev);
84
85- if (t)
86- return t;
87+ if (r)
88+ return r;
89
90 return mt76_alloc_rxwi(dev);
91 }
92@@ -140,14 +140,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
93 EXPORT_SYMBOL_GPL(mt76_put_txwi);
94
95 void
96-mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
97+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r)
98 {
99- if (!t)
100+ if (!r)
101 return;
102
103- spin_lock(&dev->wed_lock);
104- list_add(&t->list, &dev->rxwi_cache);
105- spin_unlock(&dev->wed_lock);
106+ spin_lock(&dev->lock);
107+ list_add(&r->list, &dev->rxwi_cache);
108+ spin_unlock(&dev->lock);
109 }
110 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
111
112@@ -168,13 +168,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
113 void
114 mt76_free_pending_rxwi(struct mt76_dev *dev)
115 {
116- struct mt76_txwi_cache *t;
117+ struct mt76_rxwi_cache *r;
118
119 local_bh_disable();
120- while ((t = __mt76_get_rxwi(dev)) != NULL) {
121- if (t->ptr)
developerc9233442023-04-04 06:06:17 +0800122- skb_free_frag(t->ptr);
developerafd75872022-12-14 21:15:46 +0800123- kfree(t);
124+ while ((r = __mt76_get_rxwi(dev)) != NULL) {
125+ if (r->ptr)
developerc9233442023-04-04 06:06:17 +0800126+ skb_free_frag(r->ptr);
developerafd75872022-12-14 21:15:46 +0800127+ kfree(r);
128 }
129 local_bh_enable();
130 }
developerc04f5402023-02-03 09:22:26 +0800131@@ -212,7 +212,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developerf6ebf632023-01-06 19:15:00 +0800132 {
133 struct mt76_desc *desc = &q->desc[q->head];
134 struct mt76_queue_entry *entry = &q->entry[q->head];
developerc04f5402023-02-03 09:22:26 +0800135- struct mt76_txwi_cache *txwi = NULL;
136+ struct mt76_rxwi_cache *rxwi = NULL;
137 u32 buf1 = 0, ctrl;
138 int idx = q->head;
139 int rx_token;
140@@ -220,13 +220,13 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
141 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
developerf6ebf632023-01-06 19:15:00 +0800142
developerc04f5402023-02-03 09:22:26 +0800143 if (mt76_queue_is_wed_rx(q)) {
144- txwi = mt76_get_rxwi(dev);
145- if (!txwi)
146+ rxwi = mt76_get_rxwi(dev);
147+ if (!rxwi)
148 return -ENOMEM;
developerf6ebf632023-01-06 19:15:00 +0800149
150- rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
151+ rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
152 if (rx_token < 0) {
153- mt76_put_rxwi(dev, txwi);
154+ mt76_put_rxwi(dev, rxwi);
155 return -ENOMEM;
156 }
157
developerc04f5402023-02-03 09:22:26 +0800158@@ -241,7 +241,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developerf6ebf632023-01-06 19:15:00 +0800159
160 entry->dma_addr[0] = buf->addr;
161 entry->dma_len[0] = buf->len;
162- entry->txwi = txwi;
163+ entry->rxwi = rxwi;
164 entry->buf = data;
165 entry->wcid = 0xffff;
166 entry->skip_buf1 = true;
developerc04f5402023-02-03 09:22:26 +0800167@@ -254,7 +254,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developerf6ebf632023-01-06 19:15:00 +0800168 static int
developerafd75872022-12-14 21:15:46 +0800169 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
170 struct mt76_queue_buf *buf, int nbufs, u32 info,
171- struct sk_buff *skb, void *txwi)
172+ struct sk_buff *skb, void *txwi, void *rxwi)
173 {
174 struct mt76_queue_entry *entry;
175 struct mt76_desc *desc;
developerc04f5402023-02-03 09:22:26 +0800176@@ -307,6 +307,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
developerafd75872022-12-14 21:15:46 +0800177 }
178
179 q->entry[idx].txwi = txwi;
180+ q->entry[idx].rxwi = rxwi;
181 q->entry[idx].skb = skb;
182 q->entry[idx].wcid = 0xffff;
183
developer4f0d84b2023-03-03 14:21:44 +0800184@@ -405,13 +406,13 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
185 u32 buf1 = le32_to_cpu(desc->buf1);
developerafd75872022-12-14 21:15:46 +0800186 u32 id, find = 0;
developer4f0d84b2023-03-03 14:21:44 +0800187 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
developerafd75872022-12-14 21:15:46 +0800188- struct mt76_txwi_cache *t;
189+ struct mt76_rxwi_cache *r;
190
191 if (*more) {
192 spin_lock_bh(&dev->rx_token_lock);
193
194- idr_for_each_entry(&dev->rx_token, t, id) {
195- if (t->dma_addr == le32_to_cpu(desc->buf0)) {
196+ idr_for_each_entry(&dev->rx_token, r, id) {
197+ if (r->dma_addr == le32_to_cpu(desc->buf0)) {
198 find = 1;
developerafd75872022-12-14 21:15:46 +0800199 token = id;
developerc04f5402023-02-03 09:22:26 +0800200
developer4f0d84b2023-03-03 14:21:44 +0800201@@ -428,19 +429,19 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developerafd75872022-12-14 21:15:46 +0800202 return NULL;
203 }
204
205- t = mt76_rx_token_release(dev, token);
206- if (!t)
207+ r = mt76_rx_token_release(dev, token);
208+ if (!r)
209 return NULL;
210
developerc9233442023-04-04 06:06:17 +0800211- dma_unmap_single(dev->dma_dev, t->dma_addr,
212+ dma_unmap_single(dev->dma_dev, r->dma_addr,
213 SKB_WITH_OVERHEAD(q->buf_size),
214 DMA_FROM_DEVICE);
developerafd75872022-12-14 21:15:46 +0800215
developerc04f5402023-02-03 09:22:26 +0800216- buf = t->ptr;
217- t->dma_addr = 0;
218- t->ptr = NULL;
219+ buf = r->ptr;
220+ r->dma_addr = 0;
221+ r->ptr = NULL;
developer28d04742023-01-18 14:02:40 +0800222
developerc04f5402023-02-03 09:22:26 +0800223- mt76_put_rxwi(dev, t);
224+ mt76_put_rxwi(dev, r);
developer28d04742023-01-18 14:02:40 +0800225
developerc04f5402023-02-03 09:22:26 +0800226 if (drop) {
227 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
developer2324aa22023-04-12 11:30:15 +0800228@@ -504,7 +505,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
developerafd75872022-12-14 21:15:46 +0800229 buf.len = skb->len;
230
231 spin_lock_bh(&q->lock);
232- mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
233+ mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL, NULL);
234 mt76_dma_kick_queue(dev, q);
235 spin_unlock_bh(&q->lock);
236
developer2324aa22023-04-12 11:30:15 +0800237@@ -584,7 +585,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
developerafd75872022-12-14 21:15:46 +0800238 goto unmap;
239
240 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
241- tx_info.info, tx_info.skb, t);
242+ tx_info.info, tx_info.skb, t, NULL);
243
244 unmap:
245 for (n--; n > 0; n--)
developerafd75872022-12-14 21:15:46 +0800246diff --git a/mac80211.c b/mac80211.c
developerbbd45e12023-05-19 08:22:06 +0800247index 553d901..4a0f333 100644
developerafd75872022-12-14 21:15:46 +0800248--- a/mac80211.c
249+++ b/mac80211.c
developerbbd45e12023-05-19 08:22:06 +0800250@@ -603,7 +603,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
developerafd75872022-12-14 21:15:46 +0800251 spin_lock_init(&dev->lock);
252 spin_lock_init(&dev->cc_lock);
253 spin_lock_init(&dev->status_lock);
254- spin_lock_init(&dev->wed_lock);
255 mutex_init(&dev->mutex);
256 init_waitqueue_head(&dev->tx_wait);
257
developerbbd45e12023-05-19 08:22:06 +0800258@@ -634,6 +633,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
developerafd75872022-12-14 21:15:46 +0800259 INIT_LIST_HEAD(&dev->txwi_cache);
260 INIT_LIST_HEAD(&dev->rxwi_cache);
261 dev->token_size = dev->drv->token_size;
262+ dev->rx_token_size = dev->drv->rx_token_size;
263
264 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
265 skb_queue_head_init(&dev->rx_skb[i]);
266diff --git a/mt76.h b/mt76.h
developerbbd45e12023-05-19 08:22:06 +0800267index 3d36913..b178b95 100644
developerafd75872022-12-14 21:15:46 +0800268--- a/mt76.h
269+++ b/mt76.h
developer4f0d84b2023-03-03 14:21:44 +0800270@@ -165,6 +165,7 @@ struct mt76_queue_entry {
developerafd75872022-12-14 21:15:46 +0800271 };
272 union {
273 struct mt76_txwi_cache *txwi;
274+ struct mt76_rxwi_cache *rxwi;
275 struct urb *urb;
276 int buf_sz;
277 };
developer5bea7322023-04-13 18:50:55 +0800278@@ -360,10 +361,15 @@ struct mt76_txwi_cache {
developerafd75872022-12-14 21:15:46 +0800279 struct list_head list;
280 dma_addr_t dma_addr;
281
282- union {
283- struct sk_buff *skb;
284- void *ptr;
285- };
286+ struct sk_buff *skb;
287+};
288+
289+struct mt76_rxwi_cache {
290+ struct list_head list;
291+ dma_addr_t dma_addr;
292+
293+ void *ptr;
294+ u32 token;
295 };
296
297 struct mt76_rx_tid {
developer5bea7322023-04-13 18:50:55 +0800298@@ -449,6 +455,7 @@ struct mt76_driver_ops {
developerafd75872022-12-14 21:15:46 +0800299 u16 txwi_size;
300 u16 token_size;
301 u8 mcs_rates;
302+ u16 rx_token_size;
303
304 void (*update_survey)(struct mt76_phy *phy);
305
developer5bea7322023-04-13 18:50:55 +0800306@@ -819,7 +826,6 @@ struct mt76_dev {
developerafd75872022-12-14 21:15:46 +0800307
308 struct ieee80211_hw *hw;
309
310- spinlock_t wed_lock;
311 spinlock_t lock;
312 spinlock_t cc_lock;
313
developerbbd45e12023-05-19 08:22:06 +0800314@@ -1411,8 +1417,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
developerafd75872022-12-14 21:15:46 +0800315 }
316
317 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
318-void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
319-struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
320+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r);
321+struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
322 void mt76_free_pending_rxwi(struct mt76_dev *dev);
323 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
324 struct napi_struct *napi);
developerbbd45e12023-05-19 08:22:06 +0800325@@ -1564,9 +1570,9 @@ struct mt76_txwi_cache *
developerafd75872022-12-14 21:15:46 +0800326 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
327 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
328 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
329-struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
330+struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
331 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
332- struct mt76_txwi_cache *r, dma_addr_t phys);
333+ struct mt76_rxwi_cache *r, dma_addr_t phys);
developerc9233442023-04-04 06:06:17 +0800334
335 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
developerafd75872022-12-14 21:15:46 +0800336 {
337diff --git a/mt7915/dma.c b/mt7915/dma.c
developerbbd45e12023-05-19 08:22:06 +0800338index 4c8cf0c..3784b7b 100644
developerafd75872022-12-14 21:15:46 +0800339--- a/mt7915/dma.c
340+++ b/mt7915/dma.c
developer2324aa22023-04-12 11:30:15 +0800341@@ -509,7 +509,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
developerafd75872022-12-14 21:15:46 +0800342 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
343 dev->mt76.q_rx[MT_RXQ_MAIN].flags =
344 MT_WED_Q_RX(MT7915_RXQ_BAND0);
345- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
346 }
347
348 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
developer2324aa22023-04-12 11:30:15 +0800349@@ -546,7 +545,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
developerafd75872022-12-14 21:15:46 +0800350 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
351 dev->mt76.q_rx[MT_RXQ_BAND1].flags =
352 MT_WED_Q_RX(MT7915_RXQ_BAND1);
353- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
354 }
355
356 /* rx data queue for band1 */
357diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developerbbd45e12023-05-19 08:22:06 +0800358index 3f4749b..1c416bc 100644
developerafd75872022-12-14 21:15:46 +0800359--- a/mt7915/mmio.c
360+++ b/mt7915/mmio.c
developerc9233442023-04-04 06:06:17 +0800361@@ -610,18 +610,18 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
362 sizeof(struct skb_shared_info));
developerafd75872022-12-14 21:15:46 +0800363
developerafd75872022-12-14 21:15:46 +0800364 for (i = 0; i < dev->mt76.rx_token_size; i++) {
365- struct mt76_txwi_cache *t;
366+ struct mt76_rxwi_cache *r;
367
368- t = mt76_rx_token_release(&dev->mt76, i);
369- if (!t || !t->ptr)
370+ r = mt76_rx_token_release(&dev->mt76, i);
371+ if (!r || !r->ptr)
372 continue;
373
developerc9233442023-04-04 06:06:17 +0800374- dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
375+ dma_unmap_single(dev->mt76.dma_dev, r->dma_addr,
376 wed->wlan.rx_size, DMA_FROM_DEVICE);
377- __free_pages(virt_to_page(t->ptr), get_order(length));
developerafd75872022-12-14 21:15:46 +0800378- t->ptr = NULL;
developerc9233442023-04-04 06:06:17 +0800379+ __free_pages(virt_to_page(r->ptr), get_order(length));
developerafd75872022-12-14 21:15:46 +0800380+ r->ptr = NULL;
381
382- mt76_put_rxwi(&dev->mt76, t);
383+ mt76_put_rxwi(&dev->mt76, r);
384 }
385
386 mt76_free_pending_rxwi(&dev->mt76);
developerc9233442023-04-04 06:06:17 +0800387@@ -639,18 +639,18 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
388 sizeof(struct skb_shared_info));
389
390 for (i = 0; i < size; i++) {
391- struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
392+ struct mt76_rxwi_cache *r = mt76_get_rxwi(&dev->mt76);
393 dma_addr_t phy_addr;
394 struct page *page;
developerafd75872022-12-14 21:15:46 +0800395 int token;
developerc9233442023-04-04 06:06:17 +0800396 void *ptr;
developerafd75872022-12-14 21:15:46 +0800397
developerc04f5402023-02-03 09:22:26 +0800398- if (!t)
developerc04f5402023-02-03 09:22:26 +0800399+ if (!r)
400 goto unmap;
401
developerc9233442023-04-04 06:06:17 +0800402 page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
403 if (!page) {
404- mt76_put_rxwi(&dev->mt76, t);
405+ mt76_put_rxwi(&dev->mt76, r);
406 goto unmap;
407 }
developerc04f5402023-02-03 09:22:26 +0800408
developerc9233442023-04-04 06:06:17 +0800409@@ -660,17 +660,17 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
410 DMA_TO_DEVICE);
411 if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
412 __free_pages(page, get_order(length));
413- mt76_put_rxwi(&dev->mt76, t);
414+ mt76_put_rxwi(&dev->mt76, r);
415 goto unmap;
416 }
417
418 desc->buf0 = cpu_to_le32(phy_addr);
419- token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
420+ token = mt76_rx_token_consume(&dev->mt76, ptr, r, phy_addr);
developerafd75872022-12-14 21:15:46 +0800421 if (token < 0) {
developerc9233442023-04-04 06:06:17 +0800422 dma_unmap_single(dev->mt76.dma_dev, phy_addr,
423 wed->wlan.rx_size, DMA_TO_DEVICE);
424 __free_pages(page, get_order(length));
425- mt76_put_rxwi(&dev->mt76, t);
426+ mt76_put_rxwi(&dev->mt76, r);
developerc04f5402023-02-03 09:22:26 +0800427 goto unmap;
developerc9233442023-04-04 06:06:17 +0800428 }
developerc04f5402023-02-03 09:22:26 +0800429
developerc9233442023-04-04 06:06:17 +0800430@@ -831,7 +831,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
developerc04f5402023-02-03 09:22:26 +0800431 wed->wlan.reset = mt7915_mmio_wed_reset;
432 wed->wlan.reset_complete = mt7915_mmio_wed_reset_complete;
developerafd75872022-12-14 21:15:46 +0800433
434- dev->mt76.rx_token_size = wed->wlan.rx_npkt;
435+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
436
437 if (mtk_wed_device_attach(wed))
438 return 0;
developerbbd45e12023-05-19 08:22:06 +0800439@@ -1038,6 +1038,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
developerafd75872022-12-14 21:15:46 +0800440 SURVEY_INFO_TIME_RX |
441 SURVEY_INFO_TIME_BSS_RX,
442 .token_size = MT7915_TOKEN_SIZE,
443+ .rx_token_size = MT7915_RX_TOKEN_SIZE,
444 .tx_prepare_skb = mt7915_tx_prepare_skb,
445 .tx_complete_skb = mt76_connac_tx_complete_skb,
446 .rx_skb = mt7915_queue_rx_skb,
447diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developerbbd45e12023-05-19 08:22:06 +0800448index fddca24..646f3e8 100644
developerafd75872022-12-14 21:15:46 +0800449--- a/mt7915/mt7915.h
450+++ b/mt7915/mt7915.h
developerbbd45e12023-05-19 08:22:06 +0800451@@ -64,6 +64,7 @@
developerafd75872022-12-14 21:15:46 +0800452 #define MT7915_EEPROM_BLOCK_SIZE 16
developer4087b602023-03-13 16:13:18 +0800453 #define MT7915_HW_TOKEN_SIZE 7168
developerafd75872022-12-14 21:15:46 +0800454 #define MT7915_TOKEN_SIZE 8192
455+#define MT7915_RX_TOKEN_SIZE 4096
456
457 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
458 #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
459diff --git a/tx.c b/tx.c
developerbbd45e12023-05-19 08:22:06 +0800460index 94f0d82..a87e361 100644
developerafd75872022-12-14 21:15:46 +0800461--- a/tx.c
462+++ b/tx.c
developerbbd45e12023-05-19 08:22:06 +0800463@@ -760,16 +760,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
developerafd75872022-12-14 21:15:46 +0800464 EXPORT_SYMBOL_GPL(mt76_token_consume);
465
466 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
467- struct mt76_txwi_cache *t, dma_addr_t phys)
468+ struct mt76_rxwi_cache *r, dma_addr_t phys)
469 {
470 int token;
471
472 spin_lock_bh(&dev->rx_token_lock);
473- token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
474+ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size,
475 GFP_ATOMIC);
476 if (token >= 0) {
477- t->ptr = ptr;
478- t->dma_addr = phys;
479+ r->ptr = ptr;
480+ r->dma_addr = phys;
481 }
482 spin_unlock_bh(&dev->rx_token_lock);
483
developerbbd45e12023-05-19 08:22:06 +0800484@@ -806,15 +806,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
developerafd75872022-12-14 21:15:46 +0800485 }
486 EXPORT_SYMBOL_GPL(mt76_token_release);
487
488-struct mt76_txwi_cache *
489+struct mt76_rxwi_cache *
490 mt76_rx_token_release(struct mt76_dev *dev, int token)
491 {
492- struct mt76_txwi_cache *t;
493+ struct mt76_rxwi_cache *r;
494
495 spin_lock_bh(&dev->rx_token_lock);
496- t = idr_remove(&dev->rx_token, token);
497+ r = idr_remove(&dev->rx_token, token);
498 spin_unlock_bh(&dev->rx_token_lock);
499
500- return t;
501+ return r;
502 }
503 EXPORT_SYMBOL_GPL(mt76_rx_token_release);
504--
developer2324aa22023-04-12 11:30:15 +08005052.18.0
developerafd75872022-12-14 21:15:46 +0800506