blob: 053fc860312bec9c56e81168b111b53702f257fa [file] [log] [blame]
developera20cdc22024-05-31 18:57:31 +08001From 4faced46403673e8089b9c4cc89d55f7d9fd5e6d Mon Sep 17 00:00:00 2001
developer13655da2023-01-10 19:53:25 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Fri, 6 Jan 2023 18:18:50 +0800
developera20cdc22024-05-31 18:57:31 +08004Subject: [PATCH 2005/2015] wifi: mt76: mt7915: wed: add rxwi for further in
developer753619c2024-02-22 13:42:45 +08005 chip rro
developer57c8f1a2022-12-15 14:09:45 +08006
developer13655da2023-01-10 19:53:25 +08007Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
developer57c8f1a2022-12-15 14:09:45 +08008---
developer60a3d662023-02-07 15:24:34 +08009 dma.c | 93 +++++++++++++++++++++++++------------------------
10 mac80211.c | 2 +-
11 mt76.h | 24 ++++++++-----
12 mt7915/dma.c | 2 --
developer1a173672023-12-21 14:49:33 +080013 mt7915/mmio.c | 3 +-
developer60a3d662023-02-07 15:24:34 +080014 mt7915/mt7915.h | 1 +
15 tx.c | 16 ++++-----
developer753619c2024-02-22 13:42:45 +080016 wed.c | 26 +++++++-------
developer1a173672023-12-21 14:49:33 +080017 8 files changed, 87 insertions(+), 80 deletions(-)
developer57c8f1a2022-12-15 14:09:45 +080018
19diff --git a/dma.c b/dma.c
developerdc9eeae2024-04-08 14:36:46 +080020index 185c6f1..9cd97d2 100644
developer57c8f1a2022-12-15 14:09:45 +080021--- a/dma.c
22+++ b/dma.c
developerbd9fa1e2023-10-16 11:04:00 +080023@@ -64,17 +64,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
developer57c8f1a2022-12-15 14:09:45 +080024 return t;
25 }
26
27-static struct mt76_txwi_cache *
28+static struct mt76_rxwi_cache *
29 mt76_alloc_rxwi(struct mt76_dev *dev)
30 {
31- struct mt76_txwi_cache *t;
32+ struct mt76_rxwi_cache *r;
33
34- t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
35- if (!t)
36+ r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC);
37+ if (!r)
38 return NULL;
39
40- t->ptr = NULL;
41- return t;
42+ r->ptr = NULL;
43+ return r;
44 }
45
46 static struct mt76_txwi_cache *
developerbd9fa1e2023-10-16 11:04:00 +080047@@ -93,20 +93,20 @@ __mt76_get_txwi(struct mt76_dev *dev)
developer57c8f1a2022-12-15 14:09:45 +080048 return t;
49 }
50
51-static struct mt76_txwi_cache *
52+static struct mt76_rxwi_cache *
53 __mt76_get_rxwi(struct mt76_dev *dev)
54 {
55- struct mt76_txwi_cache *t = NULL;
56+ struct mt76_rxwi_cache *r = NULL;
57
developer1a173672023-12-21 14:49:33 +080058- spin_lock_bh(&dev->wed_lock);
59+ spin_lock_bh(&dev->lock);
developer57c8f1a2022-12-15 14:09:45 +080060 if (!list_empty(&dev->rxwi_cache)) {
61- t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
62+ r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache,
63 list);
64- list_del(&t->list);
65+ list_del(&r->list);
66 }
developer1a173672023-12-21 14:49:33 +080067- spin_unlock_bh(&dev->wed_lock);
68+ spin_unlock_bh(&dev->lock);
developer57c8f1a2022-12-15 14:09:45 +080069
70- return t;
71+ return r;
72 }
73
74 static struct mt76_txwi_cache *
developerbd9fa1e2023-10-16 11:04:00 +080075@@ -120,13 +120,13 @@ mt76_get_txwi(struct mt76_dev *dev)
developer57c8f1a2022-12-15 14:09:45 +080076 return mt76_alloc_txwi(dev);
77 }
78
79-struct mt76_txwi_cache *
80+struct mt76_rxwi_cache *
81 mt76_get_rxwi(struct mt76_dev *dev)
82 {
83- struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
84+ struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev);
85
86- if (t)
87- return t;
88+ if (r)
89+ return r;
90
91 return mt76_alloc_rxwi(dev);
92 }
developerbd9fa1e2023-10-16 11:04:00 +080093@@ -145,14 +145,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
developer57c8f1a2022-12-15 14:09:45 +080094 EXPORT_SYMBOL_GPL(mt76_put_txwi);
95
96 void
97-mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
98+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r)
99 {
100- if (!t)
101+ if (!r)
102 return;
103
developer1a173672023-12-21 14:49:33 +0800104- spin_lock_bh(&dev->wed_lock);
developer57c8f1a2022-12-15 14:09:45 +0800105- list_add(&t->list, &dev->rxwi_cache);
developer1a173672023-12-21 14:49:33 +0800106- spin_unlock_bh(&dev->wed_lock);
107+ spin_lock_bh(&dev->lock);
developer57c8f1a2022-12-15 14:09:45 +0800108+ list_add(&r->list, &dev->rxwi_cache);
developer1a173672023-12-21 14:49:33 +0800109+ spin_unlock_bh(&dev->lock);
developer57c8f1a2022-12-15 14:09:45 +0800110 }
111 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
112
developerbd9fa1e2023-10-16 11:04:00 +0800113@@ -173,13 +173,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
developer57c8f1a2022-12-15 14:09:45 +0800114 void
115 mt76_free_pending_rxwi(struct mt76_dev *dev)
116 {
117- struct mt76_txwi_cache *t;
118+ struct mt76_rxwi_cache *r;
119
120 local_bh_disable();
121- while ((t = __mt76_get_rxwi(dev)) != NULL) {
122- if (t->ptr)
developerda18a742023-04-06 13:44:00 +0800123- skb_free_frag(t->ptr);
developer57c8f1a2022-12-15 14:09:45 +0800124- kfree(t);
125+ while ((r = __mt76_get_rxwi(dev)) != NULL) {
126+ if (r->ptr)
developerda18a742023-04-06 13:44:00 +0800127+ skb_free_frag(r->ptr);
developer57c8f1a2022-12-15 14:09:45 +0800128+ kfree(r);
129 }
130 local_bh_enable();
131 }
developer753619c2024-02-22 13:42:45 +0800132@@ -228,7 +228,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developer1a173672023-12-21 14:49:33 +0800133 struct mt76_queue_buf *buf, void *data)
developer13655da2023-01-10 19:53:25 +0800134 {
developer13655da2023-01-10 19:53:25 +0800135 struct mt76_queue_entry *entry = &q->entry[q->head];
developer60a3d662023-02-07 15:24:34 +0800136- struct mt76_txwi_cache *txwi = NULL;
137+ struct mt76_rxwi_cache *rxwi = NULL;
developer1a173672023-12-21 14:49:33 +0800138 struct mt76_desc *desc;
developer60a3d662023-02-07 15:24:34 +0800139 int idx = q->head;
developer1a173672023-12-21 14:49:33 +0800140 u32 buf1 = 0, ctrl;
developer753619c2024-02-22 13:42:45 +0800141@@ -249,13 +249,13 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developer1a173672023-12-21 14:49:33 +0800142 #endif
developer13655da2023-01-10 19:53:25 +0800143
developer60a3d662023-02-07 15:24:34 +0800144 if (mt76_queue_is_wed_rx(q)) {
145- txwi = mt76_get_rxwi(dev);
146- if (!txwi)
147+ rxwi = mt76_get_rxwi(dev);
148+ if (!rxwi)
149 return -ENOMEM;
developer13655da2023-01-10 19:53:25 +0800150
151- rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
152+ rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
153 if (rx_token < 0) {
154- mt76_put_rxwi(dev, txwi);
155+ mt76_put_rxwi(dev, rxwi);
156 return -ENOMEM;
157 }
158
developer753619c2024-02-22 13:42:45 +0800159@@ -271,7 +271,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developer1a173672023-12-21 14:49:33 +0800160 done:
developer13655da2023-01-10 19:53:25 +0800161 entry->dma_addr[0] = buf->addr;
162 entry->dma_len[0] = buf->len;
163- entry->txwi = txwi;
164+ entry->rxwi = rxwi;
165 entry->buf = data;
166 entry->wcid = 0xffff;
167 entry->skip_buf1 = true;
developer753619c2024-02-22 13:42:45 +0800168@@ -284,7 +284,7 @@ done:
developer13655da2023-01-10 19:53:25 +0800169 static int
developer57c8f1a2022-12-15 14:09:45 +0800170 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
171 struct mt76_queue_buf *buf, int nbufs, u32 info,
172- struct sk_buff *skb, void *txwi)
173+ struct sk_buff *skb, void *txwi, void *rxwi)
174 {
175 struct mt76_queue_entry *entry;
176 struct mt76_desc *desc;
developer753619c2024-02-22 13:42:45 +0800177@@ -344,6 +344,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
developer57c8f1a2022-12-15 14:09:45 +0800178 }
179
180 q->entry[idx].txwi = txwi;
181+ q->entry[idx].rxwi = rxwi;
182 q->entry[idx].skb = skb;
183 q->entry[idx].wcid = 0xffff;
184
developer753619c2024-02-22 13:42:45 +0800185@@ -446,13 +447,13 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developer1a173672023-12-21 14:49:33 +0800186 if (mt76_queue_is_wed_rx(q)) {
developer57c8f1a2022-12-15 14:09:45 +0800187 u32 id, find = 0;
developerbb6ddff2023-03-08 17:22:32 +0800188 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
developer57c8f1a2022-12-15 14:09:45 +0800189- struct mt76_txwi_cache *t;
190+ struct mt76_rxwi_cache *r;
191
192 if (*more) {
193 spin_lock_bh(&dev->rx_token_lock);
194
195- idr_for_each_entry(&dev->rx_token, t, id) {
196- if (t->dma_addr == le32_to_cpu(desc->buf0)) {
197+ idr_for_each_entry(&dev->rx_token, r, id) {
198+ if (r->dma_addr == le32_to_cpu(desc->buf0)) {
199 find = 1;
developer57c8f1a2022-12-15 14:09:45 +0800200 token = id;
developer60a3d662023-02-07 15:24:34 +0800201
developer753619c2024-02-22 13:42:45 +0800202@@ -469,19 +470,19 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developer57c8f1a2022-12-15 14:09:45 +0800203 return NULL;
204 }
205
206- t = mt76_rx_token_release(dev, token);
207- if (!t)
208+ r = mt76_rx_token_release(dev, token);
209+ if (!r)
210 return NULL;
211
developerda18a742023-04-06 13:44:00 +0800212- dma_unmap_single(dev->dma_dev, t->dma_addr,
213+ dma_unmap_single(dev->dma_dev, r->dma_addr,
214 SKB_WITH_OVERHEAD(q->buf_size),
215 DMA_FROM_DEVICE);
developer57c8f1a2022-12-15 14:09:45 +0800216
developer60a3d662023-02-07 15:24:34 +0800217- buf = t->ptr;
218- t->dma_addr = 0;
219- t->ptr = NULL;
220+ buf = r->ptr;
221+ r->dma_addr = 0;
222+ r->ptr = NULL;
developer765f1892023-01-30 14:02:51 +0800223
developer60a3d662023-02-07 15:24:34 +0800224- mt76_put_rxwi(dev, t);
225+ mt76_put_rxwi(dev, r);
developer1a173672023-12-21 14:49:33 +0800226 if (drop)
227 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
228 } else {
developera46f6132024-03-26 14:09:54 +0800229@@ -547,7 +548,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
developer57c8f1a2022-12-15 14:09:45 +0800230 buf.len = skb->len;
231
232 spin_lock_bh(&q->lock);
233- mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
234+ mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL, NULL);
235 mt76_dma_kick_queue(dev, q);
236 spin_unlock_bh(&q->lock);
237
developera46f6132024-03-26 14:09:54 +0800238@@ -628,7 +629,7 @@ mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
developer57c8f1a2022-12-15 14:09:45 +0800239 goto unmap;
240
241 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
242- tx_info.info, tx_info.skb, t);
243+ tx_info.info, tx_info.skb, t, NULL);
244
245 unmap:
246 for (n--; n > 0; n--)
developer57c8f1a2022-12-15 14:09:45 +0800247diff --git a/mac80211.c b/mac80211.c
developerdc9eeae2024-04-08 14:36:46 +0800248index f9dfdf8..225b290 100644
developer57c8f1a2022-12-15 14:09:45 +0800249--- a/mac80211.c
250+++ b/mac80211.c
developer1a173672023-12-21 14:49:33 +0800251@@ -618,7 +618,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
developer57c8f1a2022-12-15 14:09:45 +0800252 spin_lock_init(&dev->lock);
253 spin_lock_init(&dev->cc_lock);
254 spin_lock_init(&dev->status_lock);
255- spin_lock_init(&dev->wed_lock);
256 mutex_init(&dev->mutex);
257 init_waitqueue_head(&dev->tx_wait);
258
developer1a173672023-12-21 14:49:33 +0800259@@ -651,6 +650,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
developer57c8f1a2022-12-15 14:09:45 +0800260 INIT_LIST_HEAD(&dev->txwi_cache);
261 INIT_LIST_HEAD(&dev->rxwi_cache);
262 dev->token_size = dev->drv->token_size;
263+ dev->rx_token_size = dev->drv->rx_token_size;
264
265 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
266 skb_queue_head_init(&dev->rx_skb[i]);
267diff --git a/mt76.h b/mt76.h
developera20cdc22024-05-31 18:57:31 +0800268index 6168758..5e71267 100644
developer57c8f1a2022-12-15 14:09:45 +0800269--- a/mt76.h
270+++ b/mt76.h
developer1a173672023-12-21 14:49:33 +0800271@@ -193,6 +193,7 @@ struct mt76_queue_entry {
developer57c8f1a2022-12-15 14:09:45 +0800272 };
273 union {
274 struct mt76_txwi_cache *txwi;
275+ struct mt76_rxwi_cache *rxwi;
276 struct urb *urb;
277 int buf_sz;
278 };
developera46f6132024-03-26 14:09:54 +0800279@@ -413,10 +414,15 @@ struct mt76_txwi_cache {
280 u8 phy_idx;
developerbddc9db2023-09-11 13:34:36 +0800281 unsigned long jiffies;
developer57c8f1a2022-12-15 14:09:45 +0800282
283- union {
284- struct sk_buff *skb;
285- void *ptr;
286- };
287+ struct sk_buff *skb;
288+};
289+
290+struct mt76_rxwi_cache {
291+ struct list_head list;
292+ dma_addr_t dma_addr;
293+
294+ void *ptr;
295+ u32 token;
296 };
297
298 struct mt76_rx_tid {
developera46f6132024-03-26 14:09:54 +0800299@@ -511,6 +517,7 @@ struct mt76_driver_ops {
developer57c8f1a2022-12-15 14:09:45 +0800300 u16 txwi_size;
301 u16 token_size;
302 u8 mcs_rates;
303+ u16 rx_token_size;
304
305 void (*update_survey)(struct mt76_phy *phy);
306
developera46f6132024-03-26 14:09:54 +0800307@@ -903,7 +910,6 @@ struct mt76_dev {
developer57c8f1a2022-12-15 14:09:45 +0800308
309 struct ieee80211_hw *hw;
310
311- spinlock_t wed_lock;
312 spinlock_t lock;
313 spinlock_t cc_lock;
314
developera20cdc22024-05-31 18:57:31 +0800315@@ -1639,8 +1645,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
developer57c8f1a2022-12-15 14:09:45 +0800316 }
317
318 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
319-void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
320-struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
321+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r);
322+struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
323 void mt76_free_pending_rxwi(struct mt76_dev *dev);
324 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
325 struct napi_struct *napi);
developera20cdc22024-05-31 18:57:31 +0800326@@ -1819,9 +1825,9 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
developera46f6132024-03-26 14:09:54 +0800327 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi,
328 u8 phy_idx);
developer57c8f1a2022-12-15 14:09:45 +0800329 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
330-struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
331+struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
332 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
333- struct mt76_txwi_cache *r, dma_addr_t phys);
334+ struct mt76_rxwi_cache *r, dma_addr_t phys);
developerda18a742023-04-06 13:44:00 +0800335
336 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
developer57c8f1a2022-12-15 14:09:45 +0800337 {
338diff --git a/mt7915/dma.c b/mt7915/dma.c
developerdc9eeae2024-04-08 14:36:46 +0800339index 0baa82c..552410a 100644
developer57c8f1a2022-12-15 14:09:45 +0800340--- a/mt7915/dma.c
341+++ b/mt7915/dma.c
developer1a173672023-12-21 14:49:33 +0800342@@ -512,7 +512,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
developer57c8f1a2022-12-15 14:09:45 +0800343 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
developer1a173672023-12-21 14:49:33 +0800344 mdev->q_rx[MT_RXQ_MAIN].flags =
developer57c8f1a2022-12-15 14:09:45 +0800345 MT_WED_Q_RX(MT7915_RXQ_BAND0);
346- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
developer1a173672023-12-21 14:49:33 +0800347 mdev->q_rx[MT_RXQ_MAIN].wed = &mdev->mmio.wed;
developer57c8f1a2022-12-15 14:09:45 +0800348 }
349
developer1a173672023-12-21 14:49:33 +0800350@@ -551,7 +550,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
developer57c8f1a2022-12-15 14:09:45 +0800351 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
developer1a173672023-12-21 14:49:33 +0800352 mdev->q_rx[MT_RXQ_BAND1].flags =
developer57c8f1a2022-12-15 14:09:45 +0800353 MT_WED_Q_RX(MT7915_RXQ_BAND1);
354- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
developer1a173672023-12-21 14:49:33 +0800355 mdev->q_rx[MT_RXQ_BAND1].wed = &mdev->mmio.wed;
developer57c8f1a2022-12-15 14:09:45 +0800356 }
357
developer57c8f1a2022-12-15 14:09:45 +0800358diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developerdc9eeae2024-04-08 14:36:46 +0800359index 91100f1..3391a94 100644
developer57c8f1a2022-12-15 14:09:45 +0800360--- a/mt7915/mmio.c
361+++ b/mt7915/mmio.c
developer753619c2024-02-22 13:42:45 +0800362@@ -725,7 +725,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
developer60a3d662023-02-07 15:24:34 +0800363 wed->wlan.reset = mt7915_mmio_wed_reset;
developer753619c2024-02-22 13:42:45 +0800364 wed->wlan.reset_complete = mt76_wed_reset_complete;
developer57c8f1a2022-12-15 14:09:45 +0800365
366- dev->mt76.rx_token_size = wed->wlan.rx_npkt;
367+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
368
369 if (mtk_wed_device_attach(wed))
370 return 0;
developer753619c2024-02-22 13:42:45 +0800371@@ -933,6 +933,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
developer57c8f1a2022-12-15 14:09:45 +0800372 SURVEY_INFO_TIME_RX |
373 SURVEY_INFO_TIME_BSS_RX,
374 .token_size = MT7915_TOKEN_SIZE,
375+ .rx_token_size = MT7915_RX_TOKEN_SIZE,
376 .tx_prepare_skb = mt7915_tx_prepare_skb,
377 .tx_complete_skb = mt76_connac_tx_complete_skb,
378 .rx_skb = mt7915_queue_rx_skb,
379diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developera20cdc22024-05-31 18:57:31 +0800380index 1d0cfa1..f5a7e1e 100644
developer57c8f1a2022-12-15 14:09:45 +0800381--- a/mt7915/mt7915.h
382+++ b/mt7915/mt7915.h
developer7af0f762023-05-22 15:16:16 +0800383@@ -64,6 +64,7 @@
developer57c8f1a2022-12-15 14:09:45 +0800384 #define MT7915_EEPROM_BLOCK_SIZE 16
developer7a520b52023-03-14 14:09:34 +0800385 #define MT7915_HW_TOKEN_SIZE 7168
developer57c8f1a2022-12-15 14:09:45 +0800386 #define MT7915_TOKEN_SIZE 8192
387+#define MT7915_RX_TOKEN_SIZE 4096
388
389 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
390 #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
391diff --git a/tx.c b/tx.c
developerdc9eeae2024-04-08 14:36:46 +0800392index db0d4df..92afbf5 100644
developer57c8f1a2022-12-15 14:09:45 +0800393--- a/tx.c
394+++ b/tx.c
developera46f6132024-03-26 14:09:54 +0800395@@ -864,16 +864,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi,
developer57c8f1a2022-12-15 14:09:45 +0800396 EXPORT_SYMBOL_GPL(mt76_token_consume);
397
398 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
399- struct mt76_txwi_cache *t, dma_addr_t phys)
400+ struct mt76_rxwi_cache *r, dma_addr_t phys)
401 {
402 int token;
403
404 spin_lock_bh(&dev->rx_token_lock);
405- token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
406+ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size,
407 GFP_ATOMIC);
408 if (token >= 0) {
409- t->ptr = ptr;
410- t->dma_addr = phys;
411+ r->ptr = ptr;
412+ r->dma_addr = phys;
413 }
414 spin_unlock_bh(&dev->rx_token_lock);
415
developera46f6132024-03-26 14:09:54 +0800416@@ -912,15 +912,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
developer57c8f1a2022-12-15 14:09:45 +0800417 }
418 EXPORT_SYMBOL_GPL(mt76_token_release);
419
420-struct mt76_txwi_cache *
421+struct mt76_rxwi_cache *
422 mt76_rx_token_release(struct mt76_dev *dev, int token)
423 {
424- struct mt76_txwi_cache *t;
425+ struct mt76_rxwi_cache *r;
426
427 spin_lock_bh(&dev->rx_token_lock);
428- t = idr_remove(&dev->rx_token, token);
429+ r = idr_remove(&dev->rx_token, token);
430 spin_unlock_bh(&dev->rx_token_lock);
431
432- return t;
433+ return r;
434 }
435 EXPORT_SYMBOL_GPL(mt76_rx_token_release);
developer753619c2024-02-22 13:42:45 +0800436diff --git a/wed.c b/wed.c
developerdc9eeae2024-04-08 14:36:46 +0800437index 47c81a2..c03b52f 100644
developer753619c2024-02-22 13:42:45 +0800438--- a/wed.c
439+++ b/wed.c
440@@ -16,18 +16,18 @@ void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
441 sizeof(struct skb_shared_info));
442
443 for (i = 0; i < dev->rx_token_size; i++) {
444- struct mt76_txwi_cache *t;
445+ struct mt76_rxwi_cache *r;
446
447- t = mt76_rx_token_release(dev, i);
448- if (!t || !t->ptr)
449+ r = mt76_rx_token_release(dev, i);
450+ if (!r || !r->ptr)
451 continue;
452
453- dma_unmap_single(dev->dma_dev, t->dma_addr,
454+ dma_unmap_single(dev->dma_dev, r->dma_addr,
455 wed->wlan.rx_size, DMA_FROM_DEVICE);
456- __free_pages(virt_to_page(t->ptr), get_order(length));
457- t->ptr = NULL;
458+ __free_pages(virt_to_page(r->ptr), get_order(length));
459+ r->ptr = NULL;
460
461- mt76_put_rxwi(dev, t);
462+ mt76_put_rxwi(dev, r);
463 }
464
465 mt76_free_pending_rxwi(dev);
466@@ -46,18 +46,18 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
467 sizeof(struct skb_shared_info));
468
469 for (i = 0; i < size; i++) {
470- struct mt76_txwi_cache *t = mt76_get_rxwi(dev);
471+ struct mt76_rxwi_cache *r = mt76_get_rxwi(dev);
472 dma_addr_t phy_addr;
473 struct page *page;
474 int token;
475 void *ptr;
476
477- if (!t)
478+ if (!r)
479 goto unmap;
480
481 page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
482 if (!page) {
483- mt76_put_rxwi(dev, t);
484+ mt76_put_rxwi(dev, r);
485 goto unmap;
486 }
487
488@@ -67,17 +67,17 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
489 DMA_TO_DEVICE);
490 if (unlikely(dma_mapping_error(dev->dev, phy_addr))) {
491 __free_pages(page, get_order(length));
492- mt76_put_rxwi(dev, t);
493+ mt76_put_rxwi(dev, r);
494 goto unmap;
495 }
496
497 desc->buf0 = cpu_to_le32(phy_addr);
498- token = mt76_rx_token_consume(dev, ptr, t, phy_addr);
499+ token = mt76_rx_token_consume(dev, ptr, r, phy_addr);
500 if (token < 0) {
501 dma_unmap_single(dev->dma_dev, phy_addr,
502 wed->wlan.rx_size, DMA_TO_DEVICE);
503 __free_pages(page, get_order(length));
504- mt76_put_rxwi(dev, t);
505+ mt76_put_rxwi(dev, r);
506 goto unmap;
507 }
508
developer57c8f1a2022-12-15 14:09:45 +0800509--
developerbddc9db2023-09-11 13:34:36 +08005102.18.0
developer57c8f1a2022-12-15 14:09:45 +0800511