blob: d637999da431205db80bddcdc49ef23d884fb980 [file] [log] [blame]
developerbd398d52022-06-06 20:53:24 +08001From 7caf2dd34d38bd98b5b1087c0f00ccdb009a461a Mon Sep 17 00:00:00 2001
developerf7d25b02022-04-12 16:20:16 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
developerbd398d52022-06-06 20:53:24 +08003Date: Mon, 6 Jun 2022 20:22:35 +0800
4Subject: [PATCH] mt76:remove WED support patch for build err
developerf7d25b02022-04-12 16:20:16 +08005
developerf7d25b02022-04-12 16:20:16 +08006---
developerbd398d52022-06-06 20:53:24 +08007 dma.c | 160 ++++++++++--------------------------------------
8 mac80211.c | 4 +-
9 mmio.c | 9 +--
10 mt76.h | 25 ++------
11 mt7603/dma.c | 8 +--
12 mt7615/dma.c | 6 +-
13 mt76x02_mmio.c | 4 +-
14 mt7915/dma.c | 43 ++-----------
15 mt7915/mac.c | 129 ++++++--------------------------------
16 mt7915/mac.h | 2 -
17 mt7915/main.c | 36 -----------
18 mt7915/mcu.c | 3 -
19 mt7915/mmio.c | 29 +++------
20 mt7915/mt7915.h | 2 -
21 mt7915/pci.c | 96 +++--------------------------
22 mt7915/regs.h | 17 +----
23 mt7921/dma.c | 2 +-
24 tx.c | 16 +----
developerf7d25b02022-04-12 16:20:16 +080025 18 files changed, 94 insertions(+), 497 deletions(-)
developerbd398d52022-06-06 20:53:24 +080026 mode change 100644 => 100755 mt7915/mcu.c
developerf7d25b02022-04-12 16:20:16 +080027
28diff --git a/dma.c b/dma.c
developer4c6b6002022-05-30 16:36:44 +080029index f6f5f129..3f7456b1 100644
developerf7d25b02022-04-12 16:20:16 +080030--- a/dma.c
31+++ b/dma.c
32@@ -7,36 +7,9 @@
33 #include "mt76.h"
34 #include "dma.h"
35
36-#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
37-
38-#define Q_READ(_dev, _q, _field) ({ \
39- u32 _offset = offsetof(struct mt76_queue_regs, _field); \
40- u32 _val; \
41- if ((_q)->flags & MT_QFLAG_WED) \
42- _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
43- ((_q)->wed_regs + \
44- _offset)); \
45- else \
46- _val = readl(&(_q)->regs->_field); \
47- _val; \
48-})
49-
50-#define Q_WRITE(_dev, _q, _field, _val) do { \
51- u32 _offset = offsetof(struct mt76_queue_regs, _field); \
52- if ((_q)->flags & MT_QFLAG_WED) \
53- mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
54- ((_q)->wed_regs + _offset), \
55- _val); \
56- else \
57- writel(_val, &(_q)->regs->_field); \
58-} while (0)
59-
60-#else
61-
62-#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
63-#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
64+#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
65+#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
66
67-#endif
68
69 static struct mt76_txwi_cache *
70 mt76_alloc_txwi(struct mt76_dev *dev)
71@@ -138,6 +111,36 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
72 mt76_dma_sync_idx(dev, q);
73 }
74
75+static int
76+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
77+ int idx, int n_desc, int bufsize,
78+ u32 ring_base)
79+{
80+ int size;
81+
82+ spin_lock_init(&q->lock);
83+ spin_lock_init(&q->cleanup_lock);
84+
85+ q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
86+ q->ndesc = n_desc;
87+ q->buf_size = bufsize;
88+ q->hw_idx = idx;
89+
90+ size = q->ndesc * sizeof(struct mt76_desc);
91+ q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
92+ if (!q->desc)
93+ return -ENOMEM;
94+
95+ size = q->ndesc * sizeof(*q->entry);
96+ q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
97+ if (!q->entry)
98+ return -ENOMEM;
99+
100+ mt76_dma_queue_reset(dev, q);
101+
102+ return 0;
103+}
104+
105 static int
106 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
107 struct mt76_queue_buf *buf, int nbufs, u32 info,
developer4c6b6002022-05-30 16:36:44 +0800108@@ -482,85 +485,6 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developerf7d25b02022-04-12 16:20:16 +0800109 return frames;
110 }
111
112-static int
113-mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
114-{
115-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
116- struct mtk_wed_device *wed = &dev->mmio.wed;
117- int ret, type, ring;
118- u8 flags = q->flags;
119-
120- if (!mtk_wed_device_active(wed))
121- q->flags &= ~MT_QFLAG_WED;
122-
123- if (!(q->flags & MT_QFLAG_WED))
124- return 0;
125-
126- type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
127- ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
128-
129- switch (type) {
130- case MT76_WED_Q_TX:
131- ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
132- if (!ret)
133- q->wed_regs = wed->tx_ring[ring].reg_base;
134- break;
135- case MT76_WED_Q_TXFREE:
136- /* WED txfree queue needs ring to be initialized before setup */
137- q->flags = 0;
138- mt76_dma_queue_reset(dev, q);
139- mt76_dma_rx_fill(dev, q);
140- q->flags = flags;
141-
142- ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
143- if (!ret)
144- q->wed_regs = wed->txfree_ring.reg_base;
145- break;
146- default:
147- ret = -EINVAL;
148- }
149-
150- return ret;
151-#else
152- return 0;
153-#endif
154-}
155-
156-static int
157-mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
158- int idx, int n_desc, int bufsize,
159- u32 ring_base)
160-{
161- int ret, size;
162-
163- spin_lock_init(&q->lock);
164- spin_lock_init(&q->cleanup_lock);
165-
166- q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
167- q->ndesc = n_desc;
168- q->buf_size = bufsize;
169- q->hw_idx = idx;
170-
171- size = q->ndesc * sizeof(struct mt76_desc);
172- q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
173- if (!q->desc)
174- return -ENOMEM;
175-
176- size = q->ndesc * sizeof(*q->entry);
177- q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
178- if (!q->entry)
179- return -ENOMEM;
180-
181- ret = mt76_dma_wed_setup(dev, q);
182- if (ret)
183- return ret;
184-
185- if (q->flags != MT_WED_Q_TXFREE)
186- mt76_dma_queue_reset(dev, q);
187-
188- return 0;
189-}
190-
191 static void
192 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
193 {
developer4c6b6002022-05-30 16:36:44 +0800194@@ -642,29 +566,14 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
developerf7d25b02022-04-12 16:20:16 +0800195 static int
196 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
197 {
198- int len, data_len, done = 0, dma_idx;
199+ int len, data_len, done = 0;
200 struct sk_buff *skb;
201 unsigned char *data;
202- bool check_ddone = false;
203 bool more;
204
205- if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
206- q->flags == MT_WED_Q_TXFREE) {
207- dma_idx = Q_READ(dev, q, dma_idx);
208- check_ddone = true;
209- }
210-
211 while (done < budget) {
212 u32 info;
213
214- if (check_ddone) {
215- if (q->tail == dma_idx)
216- dma_idx = Q_READ(dev, q, dma_idx);
217-
218- if (q->tail == dma_idx)
219- break;
220- }
221-
222 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
223 if (!data)
224 break;
developer4c6b6002022-05-30 16:36:44 +0800225@@ -805,8 +714,5 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developerf7d25b02022-04-12 16:20:16 +0800226 }
227
228 mt76_free_pending_txwi(dev);
229-
230- if (mtk_wed_device_active(&dev->mmio.wed))
231- mtk_wed_device_detach(&dev->mmio.wed);
232 }
233 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
234diff --git a/mac80211.c b/mac80211.c
developerbd398d52022-06-06 20:53:24 +0800235index 4b4d8b99..19a2b849 100644
developerf7d25b02022-04-12 16:20:16 +0800236--- a/mac80211.c
237+++ b/mac80211.c
developerbd398d52022-06-06 20:53:24 +0800238@@ -1600,7 +1600,7 @@ EXPORT_SYMBOL_GPL(mt76_get_antenna);
developerf7d25b02022-04-12 16:20:16 +0800239
240 struct mt76_queue *
241 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
242- int ring_base, u32 flags)
243+ int ring_base)
244 {
245 struct mt76_queue *hwq;
246 int err;
developerbd398d52022-06-06 20:53:24 +0800247@@ -1609,8 +1609,6 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
developerf7d25b02022-04-12 16:20:16 +0800248 if (!hwq)
249 return ERR_PTR(-ENOMEM);
250
251- hwq->flags = flags;
252-
253 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
254 if (err < 0)
255 return ERR_PTR(err);
256diff --git a/mmio.c b/mmio.c
257index 86e3d2ac..26353b6b 100644
258--- a/mmio.c
259+++ b/mmio.c
260@@ -73,13 +73,8 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
261 spin_lock_irqsave(&dev->mmio.irq_lock, flags);
262 dev->mmio.irqmask &= ~clear;
263 dev->mmio.irqmask |= set;
264- if (addr) {
265- if (mtk_wed_device_active(&dev->mmio.wed))
266- mtk_wed_device_irq_set_mask(&dev->mmio.wed,
267- dev->mmio.irqmask);
268- else
269- mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
270- }
271+ if (addr)
272+ mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
273 spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
274 }
275 EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
276diff --git a/mt76.h b/mt76.h
developerbd398d52022-06-06 20:53:24 +0800277index 062c5ce4..ed1924c1 100644
developerf7d25b02022-04-12 16:20:16 +0800278--- a/mt76.h
279+++ b/mt76.h
280@@ -13,7 +13,6 @@
281 #include <linux/leds.h>
282 #include <linux/usb.h>
283 #include <linux/average.h>
284-#include <linux/soc/mediatek/mtk_wed.h>
285 #include <net/mac80211.h>
286 #include "util.h"
287 #include "testmode.h"
288@@ -27,16 +26,6 @@
289
290 #define MT76_TOKEN_FREE_THR 64
291
292-#define MT_QFLAG_WED_RING GENMASK(1, 0)
293-#define MT_QFLAG_WED_TYPE GENMASK(3, 2)
294-#define MT_QFLAG_WED BIT(4)
295-
296-#define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
297- FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
298- FIELD_PREP(MT_QFLAG_WED_RING, _n))
299-#define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
300-#define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
301-
302 struct mt76_dev;
303 struct mt76_phy;
304 struct mt76_wcid;
305@@ -186,9 +175,6 @@ struct mt76_queue {
306 u8 buf_offset;
307 u8 hw_idx;
308 u8 qid;
309- u8 flags;
310-
311- u32 wed_regs;
312
313 dma_addr_t desc_dma;
314 struct sk_buff *rx_head;
315@@ -556,8 +542,6 @@ struct mt76_mmio {
316 void __iomem *regs;
317 spinlock_t irq_lock;
318 u32 irqmask;
319-
320- struct mtk_wed_device wed;
321 };
322
323 struct mt76_rx_status {
developer4c6b6002022-05-30 16:36:44 +0800324@@ -782,7 +766,6 @@ struct mt76_dev {
developerf7d25b02022-04-12 16:20:16 +0800325
326 spinlock_t token_lock;
327 struct idr token;
328- u16 wed_token_count;
329 u16 token_count;
330 u16 token_size;
331
developer4c6b6002022-05-30 16:36:44 +0800332@@ -1008,14 +991,14 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
developerf7d25b02022-04-12 16:20:16 +0800333
334 struct mt76_queue *
335 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
336- int ring_base, u32 flags);
337+ int ring_base);
338 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
339 static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
340- int n_desc, int ring_base, u32 flags)
341+ int n_desc, int ring_base)
342 {
343 struct mt76_queue *q;
344
345- q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags);
346+ q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base);
347 if (IS_ERR(q))
348 return PTR_ERR(q);
349
developer4c6b6002022-05-30 16:36:44 +0800350@@ -1030,7 +1013,7 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
developerf7d25b02022-04-12 16:20:16 +0800351 {
352 struct mt76_queue *q;
353
354- q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0);
355+ q = mt76_init_queue(dev, qid, idx, n_desc, ring_base);
356 if (IS_ERR(q))
357 return PTR_ERR(q);
358
359diff --git a/mt7603/dma.c b/mt7603/dma.c
360index 590cff9d..37b092e3 100644
361--- a/mt7603/dma.c
362+++ b/mt7603/dma.c
363@@ -173,13 +173,13 @@ int mt7603_dma_init(struct mt7603_dev *dev)
364
365 for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
366 ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
367- MT7603_TX_RING_SIZE, MT_TX_RING_BASE, 0);
368+ MT7603_TX_RING_SIZE, MT_TX_RING_BASE);
369 if (ret)
370 return ret;
371 }
372
373 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
374- MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
375+ MT7603_PSD_RING_SIZE, MT_TX_RING_BASE);
376 if (ret)
377 return ret;
378
379@@ -189,12 +189,12 @@ int mt7603_dma_init(struct mt7603_dev *dev)
380 return ret;
381
382 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN,
383- MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
384+ MT_MCU_RING_SIZE, MT_TX_RING_BASE);
385 if (ret)
386 return ret;
387
388 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC,
389- MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
390+ MT_MCU_RING_SIZE, MT_TX_RING_BASE);
391 if (ret)
392 return ret;
393
394diff --git a/mt7615/dma.c b/mt7615/dma.c
395index 3a79a2d4..00aefea1 100644
396--- a/mt7615/dma.c
397+++ b/mt7615/dma.c
398@@ -26,14 +26,14 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
399 for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
400 ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
401 MT7615_TX_RING_SIZE / 2,
402- MT_TX_RING_BASE, 0);
403+ MT_TX_RING_BASE);
404 if (ret)
405 return ret;
406 }
407
408 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT,
409 MT7615_TX_MGMT_RING_SIZE,
410- MT_TX_RING_BASE, 0);
411+ MT_TX_RING_BASE);
412 if (ret)
413 return ret;
414
415@@ -55,7 +55,7 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
416 return mt7622_init_tx_queues_multi(dev);
417
418 ret = mt76_init_tx_queue(&dev->mphy, 0, 0, MT7615_TX_RING_SIZE,
419- MT_TX_RING_BASE, 0);
420+ MT_TX_RING_BASE);
421 if (ret)
422 return ret;
423
424diff --git a/mt76x02_mmio.c b/mt76x02_mmio.c
425index 0fa3c7c3..8bcd8afa 100644
426--- a/mt76x02_mmio.c
427+++ b/mt76x02_mmio.c
428@@ -191,13 +191,13 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
429 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
430 ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
431 MT76x02_TX_RING_SIZE,
432- MT_TX_RING_BASE, 0);
433+ MT_TX_RING_BASE);
434 if (ret)
435 return ret;
436 }
437
438 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
439- MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
440+ MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE);
441 if (ret)
442 return ret;
443
444diff --git a/mt7915/dma.c b/mt7915/dma.c
developer68e1eb22022-05-09 17:02:12 +0800445index 9e3d14db..4358e9bf 100644
developerf7d25b02022-04-12 16:20:16 +0800446--- a/mt7915/dma.c
447+++ b/mt7915/dma.c
448@@ -8,16 +8,9 @@
449 static int
450 mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
451 {
452- struct mt7915_dev *dev = phy->dev;
453 int i, err;
454
455- if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
456- ring_base = MT_WED_TX_RING_BASE;
457- idx -= MT_TXQ_ID(0);
458- }
459-
460- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base,
461- MT_WED_Q_TX(idx));
462+ err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base);
463 if (err < 0)
464 return err;
465
466@@ -326,14 +319,6 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
467 if (dev->dbdc_support || dev->phy.band_idx)
468 irq_mask |= MT_INT_BAND1_RX_DONE;
469
470- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
471- u32 wed_irq_mask = irq_mask;
472-
473- wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
474- mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
475- mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
476- }
477-
478 mt7915_irq_enable(dev, irq_mask);
479
480 return 0;
481@@ -342,7 +327,6 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
482 int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
483 {
484 struct mt76_dev *mdev = &dev->mt76;
485- u32 wa_rx_base, wa_rx_idx;
486 u32 hif1_ofs = 0;
487 int ret;
488
489@@ -355,17 +339,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
490
491 mt7915_dma_disable(dev, true);
492
493- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
494- mt76_set(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
495-
496- mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
497- FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
498- FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
499- FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 1));
500- } else {
501- mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
502- }
503-
504 /* init tx queue */
505 ret = mt7915_init_tx_queues(&dev->phy,
506 MT_TXQ_ID(dev->phy.band_idx),
507@@ -417,17 +390,11 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
508 return ret;
509
510 /* event from WA */
511- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
512- wa_rx_base = MT_WED_RX_RING_BASE;
513- wa_rx_idx = MT7915_RXQ_MCU_WA;
514- dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
515- } else {
516- wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA);
517- wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA);
518- }
519 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
520- wa_rx_idx, MT7915_RX_MCU_RING_SIZE,
521- MT_RX_BUF_SIZE, wa_rx_base);
522+ MT_RXQ_ID(MT_RXQ_MCU_WA),
523+ MT7915_RX_MCU_RING_SIZE,
524+ MT_RX_BUF_SIZE,
525+ MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
526 if (ret)
527 return ret;
528
529diff --git a/mt7915/mac.c b/mt7915/mac.c
developerbd398d52022-06-06 20:53:24 +0800530index bae700eb..094d10a5 100644
developerf7d25b02022-04-12 16:20:16 +0800531--- a/mt7915/mac.c
532+++ b/mt7915/mac.c
developerbd398d52022-06-06 20:53:24 +0800533@@ -1413,29 +1413,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
developerf7d25b02022-04-12 16:20:16 +0800534 return 0;
535 }
536
537-u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
538-{
539- struct mt7915_txp *txp = ptr + MT_TXD_SIZE;
540- __le32 *txwi = ptr;
541- u32 val;
542-
543- memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
544-
545- val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
546- FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
547- txwi[0] = cpu_to_le32(val);
548-
549- val = MT_TXD1_LONG_FORMAT |
550- FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
551- txwi[1] = cpu_to_le32(val);
552-
553- txp->token = cpu_to_le16(token_id);
554- txp->nbuf = 1;
555- txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
556-
557- return MT_TXD_SIZE + sizeof(*txp);
558-}
559-
560 static void
561 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
562 {
developerbd398d52022-06-06 20:53:24 +0800563@@ -1469,7 +1446,7 @@ mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
developerf7d25b02022-04-12 16:20:16 +0800564
565 txp = mt7915_txwi_to_txp(dev, t);
566 for (i = 0; i < txp->nbuf; i++)
567- dma_unmap_single(dev->dma_dev, le32_to_cpu(txp->buf[i]),
568+ dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
569 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
570 }
571
developerbd398d52022-06-06 20:53:24 +0800572@@ -1478,7 +1455,6 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
developerf7d25b02022-04-12 16:20:16 +0800573 struct ieee80211_sta *sta, struct list_head *free_list)
574 {
575 struct mt76_dev *mdev = &dev->mt76;
576- struct mt7915_sta *msta;
577 struct mt76_wcid *wcid;
578 __le32 *txwi;
579 u16 wcid_idx;
developerbd398d52022-06-06 20:53:24 +0800580@@ -1491,24 +1467,13 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
developerf7d25b02022-04-12 16:20:16 +0800581 if (sta) {
582 wcid = (struct mt76_wcid *)sta->drv_priv;
583 wcid_idx = wcid->idx;
584+
585+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
586+ mt7915_tx_check_aggr(sta, txwi);
587 } else {
588 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
589- wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]);
590-
591- if (wcid && wcid->sta) {
592- msta = container_of(wcid, struct mt7915_sta, wcid);
593- sta = container_of((void *)msta, struct ieee80211_sta,
594- drv_priv);
595- spin_lock_bh(&dev->sta_poll_lock);
596- if (list_empty(&msta->poll_list))
597- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
598- spin_unlock_bh(&dev->sta_poll_lock);
599- }
600 }
601
602- if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
603- mt7915_tx_check_aggr(sta, txwi);
604-
605 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
606
607 out:
developerbd398d52022-06-06 20:53:24 +0800608@@ -1516,56 +1481,30 @@ out:
developerf7d25b02022-04-12 16:20:16 +0800609 mt76_put_txwi(mdev, t);
610 }
611
612-static void
613-mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
614-{
615- struct mt76_dev *mdev = &dev->mt76;
616- struct mt76_phy *mphy_ext = mdev->phy2;
617-
618- /* clean DMA queues and unmap buffers first */
619- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
620- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
621- if (mphy_ext) {
622- mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
623- mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
624- }
625-}
626-
627-static void
628-mt7915_mac_tx_free_done(struct mt7915_dev *dev,
629- struct list_head *free_list, bool wake)
630-{
631- struct sk_buff *skb, *tmp;
632-
633- mt7915_mac_sta_poll(dev);
634-
635- if (wake)
636- mt76_set_tx_blocked(&dev->mt76, false);
637-
638- mt76_worker_schedule(&dev->mt76.tx_worker);
639-
640- list_for_each_entry_safe(skb, tmp, free_list, list) {
641- skb_list_del_init(skb);
642- napi_consume_skb(skb, 1);
643- }
644-}
645-
646 static void
647 mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
648 {
649 struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
650 struct mt76_dev *mdev = &dev->mt76;
651+ struct mt76_phy *mphy_ext = mdev->phy2;
652 struct mt76_txwi_cache *txwi;
653 struct ieee80211_sta *sta = NULL;
developer68e1eb22022-05-09 17:02:12 +0800654 struct mt7915_sta *msta = NULL;
developerf7d25b02022-04-12 16:20:16 +0800655 LIST_HEAD(free_list);
656+ struct sk_buff *skb, *tmp;
657 void *end = data + len;
658 bool v3, wake = false;
659 u16 total, count = 0;
660 u32 txd = le32_to_cpu(free->txd);
661 __le32 *cur_info;
662
663- mt7915_mac_tx_free_prepare(dev);
664+ /* clean DMA queues and unmap buffers first */
665+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
666+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
667+ if (mphy_ext) {
668+ mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
669+ mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
670+ }
671
672 total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
673 v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
developerbd398d52022-06-06 20:53:24 +0800674@@ -1622,38 +1561,17 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
developerf7d25b02022-04-12 16:20:16 +0800675 }
676 }
677
678- mt7915_mac_tx_free_done(dev, &free_list, wake);
679-}
680-
681-static void
682-mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
683-{
684- struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
685- struct mt76_dev *mdev = &dev->mt76;
686- __le16 *info = (__le16 *)free->info;
687- void *end = data + len;
688- LIST_HEAD(free_list);
689- bool wake = false;
690- u8 i, count;
691-
692- mt7915_mac_tx_free_prepare(dev);
693-
694- count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl));
695- if (WARN_ON_ONCE((void *)&info[count] > end))
696- return;
697+ mt7915_mac_sta_poll(dev);
698
699- for (i = 0; i < count; i++) {
700- struct mt76_txwi_cache *txwi;
701- u16 msdu = le16_to_cpu(info[i]);
702+ if (wake)
703+ mt76_set_tx_blocked(&dev->mt76, false);
704
705- txwi = mt76_token_release(mdev, msdu, &wake);
706- if (!txwi)
707- continue;
708+ mt76_worker_schedule(&dev->mt76.tx_worker);
709
710- mt7915_txwi_free(dev, txwi, NULL, &free_list);
711+ list_for_each_entry_safe(skb, tmp, &free_list, list) {
712+ skb_list_del_init(skb);
713+ napi_consume_skb(skb, 1);
714 }
715-
716- mt7915_mac_tx_free_done(dev, &free_list, wake);
717 }
718
719 static bool
developerbd398d52022-06-06 20:53:24 +0800720@@ -1833,9 +1751,6 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
developerf7d25b02022-04-12 16:20:16 +0800721 case PKT_TYPE_TXRX_NOTIFY:
722 mt7915_mac_tx_free(dev, data, len);
723 return false;
724- case PKT_TYPE_TXRX_NOTIFY_V0:
725- mt7915_mac_tx_free_v0(dev, data, len);
726- return false;
727 case PKT_TYPE_TXS:
728 for (rxd += 2; rxd + 8 <= end; rxd += 8)
729 mt7915_mac_add_txs(dev, rxd);
developerbd398d52022-06-06 20:53:24 +0800730@@ -1863,10 +1778,6 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
developerf7d25b02022-04-12 16:20:16 +0800731 mt7915_mac_tx_free(dev, skb->data, skb->len);
732 napi_consume_skb(skb, 1);
733 break;
734- case PKT_TYPE_TXRX_NOTIFY_V0:
735- mt7915_mac_tx_free_v0(dev, skb->data, skb->len);
736- napi_consume_skb(skb, 1);
737- break;
738 case PKT_TYPE_RX_EVENT:
739 mt7915_mcu_rx_event(dev, skb);
740 break;
741diff --git a/mt7915/mac.h b/mt7915/mac.h
742index c5fd1a61..5add1dd3 100644
743--- a/mt7915/mac.h
744+++ b/mt7915/mac.h
745@@ -24,7 +24,6 @@ enum rx_pkt_type {
746 PKT_TYPE_TXRX_NOTIFY,
747 PKT_TYPE_RX_EVENT,
748 PKT_TYPE_RX_FW_MONITOR = 0x0c,
749- PKT_TYPE_TXRX_NOTIFY_V0 = 0x18,
750 };
751
752 /* RXD DW1 */
753@@ -312,7 +311,6 @@ struct mt7915_tx_free {
754
755 #define MT_TX_FREE_VER GENMASK(18, 16)
756 #define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
757-#define MT_TX_FREE_MSDU_CNT_V0 GENMASK(6, 0)
758 #define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
759 #define MT_TX_FREE_LATENCY GENMASK(12, 0)
760 /* 0: success, others: dropped */
761diff --git a/mt7915/main.c b/mt7915/main.c
developerbd398d52022-06-06 20:53:24 +0800762index 2d5a46d3..7da3eada 100644
developerf7d25b02022-04-12 16:20:16 +0800763--- a/mt7915/main.c
764+++ b/mt7915/main.c
developerbd398d52022-06-06 20:53:24 +0800765@@ -1421,39 +1421,6 @@ out:
developerf7d25b02022-04-12 16:20:16 +0800766 return ret;
767 }
768
769-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
770-static int
771-mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
772- struct ieee80211_vif *vif,
773- struct ieee80211_sta *sta,
774- struct net_device_path_ctx *ctx,
775- struct net_device_path *path)
776-{
777- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
778- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
779- struct mt7915_dev *dev = mt7915_hw_dev(hw);
780- struct mt7915_phy *phy = mt7915_hw_phy(hw);
781- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
782-
783- if (!mtk_wed_device_active(wed))
784- return -ENODEV;
785-
786- if (msta->wcid.idx > 0xff)
787- return -EIO;
788-
789- path->type = DEV_PATH_MTK_WDMA;
790- path->dev = ctx->dev;
791- path->mtk_wdma.wdma_idx = wed->wdma_idx;
792- path->mtk_wdma.bss = mvif->mt76.idx;
793- path->mtk_wdma.wcid = msta->wcid.idx;
794- path->mtk_wdma.queue = phy != &dev->phy;
795-
796- ctx->dev = NULL;
797-
798- return 0;
799-}
800-#endif
801-
802 const struct ieee80211_ops mt7915_ops = {
803 .tx = mt7915_tx,
804 .start = mt7915_start,
developerbd398d52022-06-06 20:53:24 +0800805@@ -1501,7 +1468,4 @@ const struct ieee80211_ops mt7915_ops = {
developerf7d25b02022-04-12 16:20:16 +0800806 .sta_add_debugfs = mt7915_sta_add_debugfs,
807 #endif
808 .set_radar_background = mt7915_set_radar_background,
809-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
810- .net_fill_forward_path = mt7915_net_fill_forward_path,
811-#endif
812 };
813diff --git a/mt7915/mcu.c b/mt7915/mcu.c
developerbd398d52022-06-06 20:53:24 +0800814old mode 100644
815new mode 100755
816index 2b9797c8..9da3e85c
developerf7d25b02022-04-12 16:20:16 +0800817--- a/mt7915/mcu.c
818+++ b/mt7915/mcu.c
developerbd398d52022-06-06 20:53:24 +0800819@@ -2656,9 +2656,6 @@ int mt7915_run_firmware(struct mt7915_dev *dev)
developerf7d25b02022-04-12 16:20:16 +0800820 if (ret)
821 return ret;
822
823- if (mtk_wed_device_active(&dev->mt76.mmio.wed))
824- mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0);
825-
826 ret = mt7915_mcu_set_mwds(dev, 1);
827 if (ret)
828 return ret;
829diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developerbd398d52022-06-06 20:53:24 +0800830index 995c9ee0..4d196d74 100644
developerf7d25b02022-04-12 16:20:16 +0800831--- a/mt7915/mmio.c
832+++ b/mt7915/mmio.c
developerbd398d52022-06-06 20:53:24 +0800833@@ -560,21 +560,15 @@ static void mt7915_rx_poll_complete(struct mt76_dev *mdev,
developerf7d25b02022-04-12 16:20:16 +0800834 static void mt7915_irq_tasklet(struct tasklet_struct *t)
835 {
836 struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
837- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
838 u32 intr, intr1, mask;
839
840- if (mtk_wed_device_active(wed)) {
841- mtk_wed_device_irq_set_mask(wed, 0);
842- intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
843- } else {
844- mt76_wr(dev, MT_INT_MASK_CSR, 0);
845- if (dev->hif2)
846- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
847+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
848+ if (dev->hif2)
849+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
850
851- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
852- intr &= dev->mt76.mmio.irqmask;
853- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
854- }
855+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
856+ intr &= dev->mt76.mmio.irqmask;
857+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
858
859 if (dev->hif2) {
860 intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
developerbd398d52022-06-06 20:53:24 +0800861@@ -628,15 +622,10 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
developerf7d25b02022-04-12 16:20:16 +0800862 irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
863 {
864 struct mt7915_dev *dev = dev_instance;
865- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
866
867- if (mtk_wed_device_active(wed)) {
868- mtk_wed_device_irq_set_mask(wed, 0);
869- } else {
870- mt76_wr(dev, MT_INT_MASK_CSR, 0);
871- if (dev->hif2)
872- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
873- }
874+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
875+ if (dev->hif2)
876+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
877
878 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
879 return IRQ_NONE;
880diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developerbd398d52022-06-06 20:53:24 +0800881index 90391a07..02a8c424 100644
developerf7d25b02022-04-12 16:20:16 +0800882--- a/mt7915/mt7915.h
883+++ b/mt7915/mt7915.h
developerbd398d52022-06-06 20:53:24 +0800884@@ -541,8 +541,6 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
developerf7d25b02022-04-12 16:20:16 +0800885 void mt7915_wfsys_reset(struct mt7915_dev *dev);
886 irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
887 u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
888-u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
889-
890 int mt7915_register_device(struct mt7915_dev *dev);
891 void mt7915_unregister_device(struct mt7915_dev *dev);
892 int mt7915_eeprom_init(struct mt7915_dev *dev);
893diff --git a/mt7915/pci.c b/mt7915/pci.c
developerbd398d52022-06-06 20:53:24 +0800894index d74f6097..7cea49f2 100644
developerf7d25b02022-04-12 16:20:16 +0800895--- a/mt7915/pci.c
896+++ b/mt7915/pci.c
897@@ -12,9 +12,6 @@
898 #include "mac.h"
899 #include "../trace.h"
900
901-static bool wed_enable = false;
902-module_param(wed_enable, bool, 0644);
903-
904 static LIST_HEAD(hif_list);
905 static DEFINE_SPINLOCK(hif_lock);
906 static u32 hif_idx;
907@@ -95,79 +92,12 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
908 return 0;
909 }
910
911-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
912-static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
913-{
914- struct mt7915_dev *dev;
915- int ret;
916-
917- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
918-
919- spin_lock_bh(&dev->mt76.token_lock);
920- dev->mt76.token_size = wed->wlan.token_start;
921- spin_unlock_bh(&dev->mt76.token_lock);
922-
923- ret = wait_event_timeout(dev->mt76.tx_wait,
924- !dev->mt76.wed_token_count, HZ);
925- if (!ret)
926- return -EAGAIN;
927-
928- return 0;
929-}
930-
931-static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
932-{
933- struct mt7915_dev *dev;
934-
935- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
936-
937- spin_lock_bh(&dev->mt76.token_lock);
938- dev->mt76.token_size = MT7915_TOKEN_SIZE;
939- spin_unlock_bh(&dev->mt76.token_lock);
940-}
941-#endif
942-
943-static int
944-mt7915_pci_wed_init(struct mt7915_dev *dev, struct pci_dev *pdev, int *irq)
945-{
946-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
947- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
948- int ret;
949-
950- if (!wed_enable)
951- return 0;
952-
953- wed->wlan.pci_dev = pdev;
954- wed->wlan.wpdma_phys = pci_resource_start(pdev, 0) +
955- MT_WFDMA_EXT_CSR_BASE;
956- wed->wlan.nbuf = 4096;
957- wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
958- wed->wlan.init_buf = mt7915_wed_init_buf;
959- wed->wlan.offload_enable = mt7915_wed_offload_enable;
960- wed->wlan.offload_disable = mt7915_wed_offload_disable;
961-
962- if (mtk_wed_device_attach(wed) != 0)
963- return 0;
964-
965- *irq = wed->irq;
966- dev->mt76.dma_dev = wed->dev;
967-
968- ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
969- if (ret)
970- return ret;
971-
972- return 1;
973-#else
974- return 0;
975-#endif
976-}
977-
978 static int mt7915_pci_probe(struct pci_dev *pdev,
979 const struct pci_device_id *id)
980 {
981- struct mt7915_hif *hif2 = NULL;
982 struct mt7915_dev *dev;
983 struct mt76_dev *mdev;
984+ struct mt7915_hif *hif2;
985 int irq;
986 int ret;
987
988@@ -199,24 +129,15 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
989 mt7915_wfsys_reset(dev);
990 hif2 = mt7915_pci_init_hif2(pdev);
991
992- ret = mt7915_pci_wed_init(dev, pdev, &irq);
993+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
994 if (ret < 0)
995- goto free_wed_or_irq_vector;
996-
997- if (!ret) {
998- hif2 = mt7915_pci_init_hif2(pdev);
999-
1000- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1001- if (ret < 0)
1002- goto free_device;
1003-
1004- irq = pdev->irq;
1005- }
1006+ goto free_device;
1007
1008+ irq = pdev->irq;
1009 ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
1010 IRQF_SHARED, KBUILD_MODNAME, dev);
1011 if (ret)
1012- goto free_wed_or_irq_vector;
1013+ goto free_irq_vector;
1014
developerbd398d52022-06-06 20:53:24 +08001015 /* master switch of PCIe tnterrupt enable */
1016 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1017@@ -251,11 +172,8 @@ free_hif2:
developerf7d25b02022-04-12 16:20:16 +08001018 if (dev->hif2)
1019 put_device(dev->hif2->dev);
1020 devm_free_irq(mdev->dev, irq, dev);
1021-free_wed_or_irq_vector:
1022- if (mtk_wed_device_active(&mdev->mmio.wed))
1023- mtk_wed_device_detach(&mdev->mmio.wed);
1024- else
1025- pci_free_irq_vectors(pdev);
1026+free_irq_vector:
1027+ pci_free_irq_vectors(pdev);
1028 free_device:
1029 mt76_free_device(&dev->mt76);
1030
1031diff --git a/mt7915/regs.h b/mt7915/regs.h
developerbd398d52022-06-06 20:53:24 +08001032index 444440e1..1e7fbcee 100644
developerf7d25b02022-04-12 16:20:16 +08001033--- a/mt7915/regs.h
1034+++ b/mt7915/regs.h
developerbd398d52022-06-06 20:53:24 +08001035@@ -603,31 +603,18 @@ enum offs_rev {
developerf7d25b02022-04-12 16:20:16 +08001036
1037 /* WFDMA CSR */
1038 #define MT_WFDMA_EXT_CSR_BASE __REG(WFDMA_EXT_CSR_ADDR)
1039-#define MT_WFDMA_EXT_CSR_PHYS_BASE 0x18027000
1040 #define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
1041-#define MT_WFDMA_EXT_CSR_PHYS(ofs) (MT_WFDMA_EXT_CSR_PHYS_BASE + (ofs))
1042
1043-#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR_PHYS(0x30)
1044+#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
1045 #define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
1046-#define MT_WFDMA_HOST_CONFIG_WED BIT(1)
1047
1048-#define MT_WFDMA_WED_RING_CONTROL MT_WFDMA_EXT_CSR_PHYS(0x34)
1049-#define MT_WFDMA_WED_RING_CONTROL_TX0 GENMASK(4, 0)
1050-#define MT_WFDMA_WED_RING_CONTROL_TX1 GENMASK(12, 8)
1051-#define MT_WFDMA_WED_RING_CONTROL_RX1 GENMASK(20, 16)
1052-
1053-#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR_PHYS(0x44)
1054+#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
1055 #define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
1056
1057 #define MT_PCIE_RECOG_ID 0xd7090
1058 #define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
1059 #define MT_PCIE_RECOG_ID_SEM BIT(31)
1060
1061-#define MT_INT_WED_MASK_CSR MT_WFDMA_EXT_CSR(0x204)
1062-
1063-#define MT_WED_TX_RING_BASE MT_WFDMA_EXT_CSR(0x300)
1064-#define MT_WED_RX_RING_BASE MT_WFDMA_EXT_CSR(0x400)
1065-
1066 /* WFDMA0 PCIE1 */
1067 #define MT_WFDMA0_PCIE1_BASE __REG(WFDMA0_PCIE1_ADDR)
1068 #define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
1069diff --git a/mt7921/dma.c b/mt7921/dma.c
1070index 2939cf9b..ca7e20fb 100644
1071--- a/mt7921/dma.c
1072+++ b/mt7921/dma.c
1073@@ -9,7 +9,7 @@ static int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
1074 {
1075 int i, err;
1076
1077- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE, 0);
1078+ err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
1079 if (err < 0)
1080 return err;
1081
1082diff --git a/tx.c b/tx.c
developerbeb74372022-05-06 16:53:33 +08001083index 0457c3eb..656b7090 100644
developerf7d25b02022-04-12 16:20:16 +08001084--- a/tx.c
1085+++ b/tx.c
1086@@ -725,12 +725,6 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
1087 if (token >= 0)
1088 dev->token_count++;
1089
1090-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
1091- if (mtk_wed_device_active(&dev->mmio.wed) &&
1092- token >= dev->mmio.wed.wlan.token_start)
1093- dev->wed_token_count++;
1094-#endif
1095-
1096 if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR)
1097 __mt76_set_tx_blocked(dev, true);
1098
1099@@ -748,17 +742,9 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
1100 spin_lock_bh(&dev->token_lock);
1101
1102 txwi = idr_remove(&dev->token, token);
1103- if (txwi) {
1104+ if (txwi)
1105 dev->token_count--;
1106
1107-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
1108- if (mtk_wed_device_active(&dev->mmio.wed) &&
1109- token >= dev->mmio.wed.wlan.token_start &&
1110- --dev->wed_token_count == 0)
1111- wake_up(&dev->tx_wait);
1112-#endif
1113- }
1114-
1115 if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR &&
1116 dev->phy.q_tx[0]->blocked)
1117 *wake = true;
1118--
developerbd398d52022-06-06 20:53:24 +080011192.18.0
developerf7d25b02022-04-12 16:20:16 +08001120