blob: 7de420cb64ba4b38610d319bfecced325b0721ab [file] [log] [blame]
developerd75d3632023-01-05 14:31:01 +08001From afd516f8fd841fc7fc46667edc57f2f10d92de46 Mon Sep 17 00:00:00 2001
developerf043db82022-12-21 14:59:23 +08002From: Evelyn Tsai <evelyn.tsai@mediatek.com>
3Date: Wed, 21 Dec 2022 09:47:01 +0800
developerd75d3632023-01-05 14:31:01 +08004Subject: [PATCH 3011/3013] mt76: mt7915: wed: add rxwi for further in chip rro
developerafd75872022-12-14 21:15:46 +08005 development
6
developerafd75872022-12-14 21:15:46 +08007---
8 dma.c | 98 +++++++++++++++++++++++++------------------------
9 mac80211.c | 2 +-
10 mt76.h | 24 +++++++-----
11 mt7915/dma.c | 2 -
12 mt7915/mmio.c | 21 ++++++-----
13 mt7915/mt7915.h | 1 +
14 tx.c | 16 ++++----
15 7 files changed, 86 insertions(+), 78 deletions(-)
16
17diff --git a/dma.c b/dma.c
developer699cda22022-12-17 15:21:57 +080018index 0914266..7ef272e 100644
developerafd75872022-12-14 21:15:46 +080019--- a/dma.c
20+++ b/dma.c
21@@ -59,17 +59,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
22 return t;
23 }
24
25-static struct mt76_txwi_cache *
26+static struct mt76_rxwi_cache *
27 mt76_alloc_rxwi(struct mt76_dev *dev)
28 {
29- struct mt76_txwi_cache *t;
30+ struct mt76_rxwi_cache *r;
31
32- t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
33- if (!t)
34+ r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC);
35+ if (!r)
36 return NULL;
37
38- t->ptr = NULL;
39- return t;
40+ r->ptr = NULL;
41+ return r;
42 }
43
44 static struct mt76_txwi_cache *
45@@ -88,20 +88,20 @@ __mt76_get_txwi(struct mt76_dev *dev)
46 return t;
47 }
48
49-static struct mt76_txwi_cache *
50+static struct mt76_rxwi_cache *
51 __mt76_get_rxwi(struct mt76_dev *dev)
52 {
53- struct mt76_txwi_cache *t = NULL;
54+ struct mt76_rxwi_cache *r = NULL;
55
56- spin_lock(&dev->wed_lock);
57+ spin_lock(&dev->lock);
58 if (!list_empty(&dev->rxwi_cache)) {
59- t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
60+ r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache,
61 list);
62- list_del(&t->list);
63+ list_del(&r->list);
64 }
65- spin_unlock(&dev->wed_lock);
66+ spin_unlock(&dev->lock);
67
68- return t;
69+ return r;
70 }
71
72 static struct mt76_txwi_cache *
73@@ -115,13 +115,13 @@ mt76_get_txwi(struct mt76_dev *dev)
74 return mt76_alloc_txwi(dev);
75 }
76
77-struct mt76_txwi_cache *
78+struct mt76_rxwi_cache *
79 mt76_get_rxwi(struct mt76_dev *dev)
80 {
81- struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
82+ struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev);
83
84- if (t)
85- return t;
86+ if (r)
87+ return r;
88
89 return mt76_alloc_rxwi(dev);
90 }
91@@ -140,14 +140,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
92 EXPORT_SYMBOL_GPL(mt76_put_txwi);
93
94 void
95-mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
96+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r)
97 {
98- if (!t)
99+ if (!r)
100 return;
101
102- spin_lock(&dev->wed_lock);
103- list_add(&t->list, &dev->rxwi_cache);
104- spin_unlock(&dev->wed_lock);
105+ spin_lock(&dev->lock);
106+ list_add(&r->list, &dev->rxwi_cache);
107+ spin_unlock(&dev->lock);
108 }
109 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
110
111@@ -168,13 +168,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
112 void
113 mt76_free_pending_rxwi(struct mt76_dev *dev)
114 {
115- struct mt76_txwi_cache *t;
116+ struct mt76_rxwi_cache *r;
117
118 local_bh_disable();
119- while ((t = __mt76_get_rxwi(dev)) != NULL) {
120- if (t->ptr)
121- skb_free_frag(t->ptr);
122- kfree(t);
123+ while ((r = __mt76_get_rxwi(dev)) != NULL) {
124+ if (r->ptr)
125+ skb_free_frag(r->ptr);
126+ kfree(r);
127 }
128 local_bh_enable();
129 }
130@@ -209,7 +209,7 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
131 static int
132 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
133 struct mt76_queue_buf *buf, int nbufs, u32 info,
134- struct sk_buff *skb, void *txwi)
135+ struct sk_buff *skb, void *txwi, void *rxwi)
136 {
137 struct mt76_queue_entry *entry;
138 struct mt76_desc *desc;
139@@ -227,13 +227,13 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
140
141 if ((q->flags & MT_QFLAG_WED) &&
142 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
143- struct mt76_txwi_cache *t = txwi;
144+ struct mt76_rxwi_cache *r = rxwi;
145 int rx_token;
146
147- if (!t)
148+ if (!r)
149 return -ENOMEM;
150
151- rx_token = mt76_rx_token_consume(dev, (void *)skb, t,
152+ rx_token = mt76_rx_token_consume(dev, (void *)skb, r,
153 buf[0].addr);
154 if (rx_token < 0)
155 return -ENOMEM;
156@@ -280,6 +280,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
157 }
158
159 q->entry[idx].txwi = txwi;
160+ q->entry[idx].rxwi = rxwi;
161 q->entry[idx].skb = skb;
162 q->entry[idx].wcid = 0xffff;
163
164@@ -379,13 +380,13 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
165 u32 id, find = 0;
166 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
167 le32_to_cpu(desc->buf1));
168- struct mt76_txwi_cache *t;
169+ struct mt76_rxwi_cache *r;
170
171 if (*more) {
172 spin_lock_bh(&dev->rx_token_lock);
173
174- idr_for_each_entry(&dev->rx_token, t, id) {
175- if (t->dma_addr == le32_to_cpu(desc->buf0)) {
176+ idr_for_each_entry(&dev->rx_token, r, id) {
177+ if (r->dma_addr == le32_to_cpu(desc->buf0)) {
178 find = 1;
179 desc->buf1 = FIELD_PREP(MT_DMA_CTL_TOKEN, id);
180 token = id;
181@@ -398,11 +399,11 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
182 return NULL;
183 }
184
185- t = mt76_rx_token_release(dev, token);
186- if (!t)
187+ r = mt76_rx_token_release(dev, token);
188+ if (!r)
189 return NULL;
190
191- dma_unmap_single(dev->dma_dev, t->dma_addr,
192+ dma_unmap_single(dev->dma_dev, r->dma_addr,
193 SKB_WITH_OVERHEAD(q->buf_size),
194 DMA_FROM_DEVICE);
195
196@@ -410,10 +411,10 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
197 if (!buf)
198 return NULL;
199
200- memcpy(buf, t->ptr, SKB_WITH_OVERHEAD(q->buf_size));
201- t->dma_addr = 0;
202+ memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
203+ r->dma_addr = 0;
204
205- mt76_put_rxwi(dev, t);
206+ mt76_put_rxwi(dev, r);
207
208 if (drop) {
209 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
210@@ -481,7 +482,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
211 buf.len = skb->len;
212
213 spin_lock_bh(&q->lock);
214- mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
215+ mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL, NULL);
216 mt76_dma_kick_queue(dev, q);
217 spin_unlock_bh(&q->lock);
218
219@@ -558,7 +559,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
220 goto unmap;
221
222 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
223- tx_info.info, tx_info.skb, t);
224+ tx_info.info, tx_info.skb, t, NULL);
225
226 unmap:
227 for (n--; n > 0; n--)
228@@ -598,20 +599,21 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
229 spin_lock_bh(&q->lock);
230
231 while (q->queued < q->ndesc - 1) {
232- struct mt76_txwi_cache *t = NULL;
233+ struct mt76_rxwi_cache *r = NULL;
234 struct mt76_queue_buf qbuf;
235 bool skip_alloc = false;
236 void *buf = NULL;
237
238 if ((q->flags & MT_QFLAG_WED) &&
239 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
240- t = mt76_get_rxwi(dev);
241- if (!t)
242+ r = mt76_get_rxwi(dev);
243+ if (!r)
244 break;
245
246- if (t->ptr) {
247+ /* reuse skb buf for wed rx copy*/
248+ if (r->ptr) {
249 skip_alloc = true;
250- buf = t->ptr;
251+ buf = r->ptr;
252 }
253 }
254
255@@ -630,7 +632,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
256 qbuf.addr = addr + offset;
257 qbuf.len = len - offset;
258 qbuf.skip_unmap = false;
259- if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t) < 0) {
260+ if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL, r) < 0) {
261 dma_unmap_single(dev->dma_dev, addr, len,
262 DMA_FROM_DEVICE);
263 skb_free_frag(buf);
264diff --git a/mac80211.c b/mac80211.c
developer699cda22022-12-17 15:21:57 +0800265index de9ef23..818f4f0 100644
developerafd75872022-12-14 21:15:46 +0800266--- a/mac80211.c
267+++ b/mac80211.c
268@@ -597,7 +597,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
269 spin_lock_init(&dev->lock);
270 spin_lock_init(&dev->cc_lock);
271 spin_lock_init(&dev->status_lock);
272- spin_lock_init(&dev->wed_lock);
273 mutex_init(&dev->mutex);
274 init_waitqueue_head(&dev->tx_wait);
275
276@@ -628,6 +627,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
277 INIT_LIST_HEAD(&dev->txwi_cache);
278 INIT_LIST_HEAD(&dev->rxwi_cache);
279 dev->token_size = dev->drv->token_size;
280+ dev->rx_token_size = dev->drv->rx_token_size;
281
282 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
283 skb_queue_head_init(&dev->rx_skb[i]);
284diff --git a/mt76.h b/mt76.h
developer699cda22022-12-17 15:21:57 +0800285index b10a16f..631c4cc 100644
developerafd75872022-12-14 21:15:46 +0800286--- a/mt76.h
287+++ b/mt76.h
288@@ -166,6 +166,7 @@ struct mt76_queue_entry {
289 };
290 union {
291 struct mt76_txwi_cache *txwi;
292+ struct mt76_rxwi_cache *rxwi;
293 struct urb *urb;
294 int buf_sz;
295 };
296@@ -354,10 +355,15 @@ struct mt76_txwi_cache {
297 struct list_head list;
298 dma_addr_t dma_addr;
299
300- union {
301- struct sk_buff *skb;
302- void *ptr;
303- };
304+ struct sk_buff *skb;
305+};
306+
307+struct mt76_rxwi_cache {
308+ struct list_head list;
309+ dma_addr_t dma_addr;
310+
311+ void *ptr;
312+ u32 token;
313 };
314
315 struct mt76_rx_tid {
316@@ -441,6 +447,7 @@ struct mt76_driver_ops {
317 u16 txwi_size;
318 u16 token_size;
319 u8 mcs_rates;
320+ u16 rx_token_size;
321
322 void (*update_survey)(struct mt76_phy *phy);
323
developer1346ce52022-12-15 21:36:14 +0800324@@ -809,7 +816,6 @@ struct mt76_dev {
developerafd75872022-12-14 21:15:46 +0800325
326 struct ieee80211_hw *hw;
327
328- spinlock_t wed_lock;
329 spinlock_t lock;
330 spinlock_t cc_lock;
331
developer30d39c22022-12-16 10:29:49 +0800332@@ -1400,8 +1406,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
developerafd75872022-12-14 21:15:46 +0800333 }
334
335 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
336-void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
337-struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
338+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r);
339+struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
340 void mt76_free_pending_rxwi(struct mt76_dev *dev);
341 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
342 struct napi_struct *napi);
developer30d39c22022-12-16 10:29:49 +0800343@@ -1547,9 +1553,9 @@ struct mt76_txwi_cache *
developerafd75872022-12-14 21:15:46 +0800344 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
345 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
346 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
347-struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
348+struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
349 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
350- struct mt76_txwi_cache *r, dma_addr_t phys);
351+ struct mt76_rxwi_cache *r, dma_addr_t phys);
352
353 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
354 {
355diff --git a/mt7915/dma.c b/mt7915/dma.c
developer699cda22022-12-17 15:21:57 +0800356index 3626008..9cbd362 100644
developerafd75872022-12-14 21:15:46 +0800357--- a/mt7915/dma.c
358+++ b/mt7915/dma.c
359@@ -492,7 +492,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
360 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
361 dev->mt76.q_rx[MT_RXQ_MAIN].flags =
362 MT_WED_Q_RX(MT7915_RXQ_BAND0);
363- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
364 }
365
366 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
367@@ -529,7 +528,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
368 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
369 dev->mt76.q_rx[MT_RXQ_BAND1].flags =
370 MT_WED_Q_RX(MT7915_RXQ_BAND1);
371- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
372 }
373
374 /* rx data queue for band1 */
375diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer699cda22022-12-17 15:21:57 +0800376index 4bc8e8c..09b3973 100644
developerafd75872022-12-14 21:15:46 +0800377--- a/mt7915/mmio.c
378+++ b/mt7915/mmio.c
developer1346ce52022-12-15 21:36:14 +0800379@@ -605,18 +605,18 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
developerafd75872022-12-14 21:15:46 +0800380
381 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
382 for (i = 0; i < dev->mt76.rx_token_size; i++) {
383- struct mt76_txwi_cache *t;
384+ struct mt76_rxwi_cache *r;
385
386- t = mt76_rx_token_release(&dev->mt76, i);
387- if (!t || !t->ptr)
388+ r = mt76_rx_token_release(&dev->mt76, i);
389+ if (!r || !r->ptr)
390 continue;
391
392- dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
393+ dma_unmap_single(dev->mt76.dma_dev, r->dma_addr,
394 wed->wlan.rx_size, DMA_FROM_DEVICE);
395- skb_free_frag(t->ptr);
396- t->ptr = NULL;
397+ skb_free_frag(r->ptr);
398+ r->ptr = NULL;
399
400- mt76_put_rxwi(&dev->mt76, t);
401+ mt76_put_rxwi(&dev->mt76, r);
402 }
403
404 mt76_free_pending_rxwi(&dev->mt76);
developer1346ce52022-12-15 21:36:14 +0800405@@ -641,7 +641,7 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
developerafd75872022-12-14 21:15:46 +0800406 sizeof(struct skb_shared_info));
407
408 for (i = 0; i < size; i++) {
409- struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
410+ struct mt76_rxwi_cache *r = mt76_get_rxwi(&dev->mt76);
411 dma_addr_t phy_addr;
412 int token;
413 void *ptr;
developer1346ce52022-12-15 21:36:14 +0800414@@ -660,7 +660,7 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
developerafd75872022-12-14 21:15:46 +0800415 }
416
417 desc->buf0 = cpu_to_le32(phy_addr);
418- token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
419+ token = mt76_rx_token_consume(&dev->mt76, ptr, r, phy_addr);
420 if (token < 0) {
421 dma_unmap_single(dev->mt76.dma_dev, phy_addr,
422 wed->wlan.rx_size, DMA_TO_DEVICE);
developer1346ce52022-12-15 21:36:14 +0800423@@ -788,7 +788,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
developerafd75872022-12-14 21:15:46 +0800424 wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
425 wed->wlan.ser_trigger = mt7915_wed_trigger_ser;
426
427- dev->mt76.rx_token_size = wed->wlan.rx_npkt;
428+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
429
430 if (mtk_wed_device_attach(wed))
431 return 0;
developer1346ce52022-12-15 21:36:14 +0800432@@ -994,6 +994,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
developerafd75872022-12-14 21:15:46 +0800433 SURVEY_INFO_TIME_RX |
434 SURVEY_INFO_TIME_BSS_RX,
435 .token_size = MT7915_TOKEN_SIZE,
436+ .rx_token_size = MT7915_RX_TOKEN_SIZE,
437 .tx_prepare_skb = mt7915_tx_prepare_skb,
438 .tx_complete_skb = mt76_connac_tx_complete_skb,
439 .rx_skb = mt7915_queue_rx_skb,
440diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developerf043db82022-12-21 14:59:23 +0800441index c3a0b32..91b98ed 100644
developerafd75872022-12-14 21:15:46 +0800442--- a/mt7915/mt7915.h
443+++ b/mt7915/mt7915.h
developer30d39c22022-12-16 10:29:49 +0800444@@ -57,6 +57,7 @@
developerafd75872022-12-14 21:15:46 +0800445 #define MT7915_EEPROM_BLOCK_SIZE 16
446 #define MT7915_TOKEN_SIZE 8192
developerf043db82022-12-21 14:59:23 +0800447 #define MT7915_HW_TOKEN_SIZE 7168
developerafd75872022-12-14 21:15:46 +0800448+#define MT7915_RX_TOKEN_SIZE 4096
449
450 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
451 #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
452diff --git a/tx.c b/tx.c
developer699cda22022-12-17 15:21:57 +0800453index 6d55566..a72b777 100644
developerafd75872022-12-14 21:15:46 +0800454--- a/tx.c
455+++ b/tx.c
456@@ -756,16 +756,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
457 EXPORT_SYMBOL_GPL(mt76_token_consume);
458
459 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
460- struct mt76_txwi_cache *t, dma_addr_t phys)
461+ struct mt76_rxwi_cache *r, dma_addr_t phys)
462 {
463 int token;
464
465 spin_lock_bh(&dev->rx_token_lock);
466- token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
467+ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size,
468 GFP_ATOMIC);
469 if (token >= 0) {
470- t->ptr = ptr;
471- t->dma_addr = phys;
472+ r->ptr = ptr;
473+ r->dma_addr = phys;
474 }
475 spin_unlock_bh(&dev->rx_token_lock);
476
477@@ -802,15 +802,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
478 }
479 EXPORT_SYMBOL_GPL(mt76_token_release);
480
481-struct mt76_txwi_cache *
482+struct mt76_rxwi_cache *
483 mt76_rx_token_release(struct mt76_dev *dev, int token)
484 {
485- struct mt76_txwi_cache *t;
486+ struct mt76_rxwi_cache *r;
487
488 spin_lock_bh(&dev->rx_token_lock);
489- t = idr_remove(&dev->rx_token, token);
490+ r = idr_remove(&dev->rx_token, token);
491 spin_unlock_bh(&dev->rx_token_lock);
492
493- return t;
494+ return r;
495 }
496 EXPORT_SYMBOL_GPL(mt76_rx_token_release);
497--
developerd75d3632023-01-05 14:31:01 +08004982.18.0
developerafd75872022-12-14 21:15:46 +0800499