blob: 335c6341692f94a28e0cef338326080123e93c7d [file] [log] [blame]
developerc2cfe0f2023-09-22 04:11:09 +08001From fce79d49d2edfd81d8db74e0093b993cb2aff1ca Mon Sep 17 00:00:00 2001
developer064da3c2023-06-13 15:57:26 +08002From: "sujuan.chen" <sujuan.chen@mediatek.com>
developerc2cfe0f2023-09-22 04:11:09 +08003Date: Mon, 7 Aug 2023 20:05:49 +0800
4Subject: [PATCH 2003/2012] wifi: mt76: mt7996: wed: add wed3.0 tx support
developer064da3c2023-06-13 15:57:26 +08005
6Signed-off-by: sujuan.chen <sujuan.chen@mediatek.com>
7---
8 dma.c | 17 ++-
9 mt76.h | 7 ++
developerc2cfe0f2023-09-22 04:11:09 +080010 mt7996/dma.c | 126 ++++++++++++++++++---
developer064da3c2023-06-13 15:57:26 +080011 mt7996/init.c | 21 +++-
12 mt7996/mac.c | 29 ++++-
13 mt7996/main.c | 46 ++++++++
14 mt7996/mmio.c | 295 +++++++++++++++++++++++++++++++++++++++++++++---
developerc2cfe0f2023-09-22 04:11:09 +080015 mt7996/mt7996.h | 9 +-
16 mt7996/pci.c | 46 ++++++--
developer064da3c2023-06-13 15:57:26 +080017 mt7996/regs.h | 5 +
developerc2cfe0f2023-09-22 04:11:09 +080018 10 files changed, 546 insertions(+), 55 deletions(-)
developer064da3c2023-06-13 15:57:26 +080019
20diff --git a/dma.c b/dma.c
developerc2cfe0f2023-09-22 04:11:09 +080021index 3785425b4..c2dbe6f6b 100644
developer064da3c2023-06-13 15:57:26 +080022--- a/dma.c
23+++ b/dma.c
24@@ -13,6 +13,11 @@
25 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
26 u32 _val; \
27 if ((_q)->flags & MT_QFLAG_WED) \
28+ if((_q)->flags & MT_QFLAG_WED_EXT) \
29+ _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed_ext, \
30+ ((_q)->wed_regs + \
31+ _offset)); \
32+ else \
33 _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
34 ((_q)->wed_regs + \
35 _offset)); \
36@@ -24,6 +29,11 @@
37 #define Q_WRITE(_dev, _q, _field, _val) do { \
38 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
39 if ((_q)->flags & MT_QFLAG_WED) \
40+ if((_q)->flags & MT_QFLAG_WED_EXT) \
41+ mtk_wed_device_reg_write(&(_dev)->mmio.wed_ext, \
42+ ((_q)->wed_regs + _offset), \
43+ _val); \
44+ else \
45 mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
46 ((_q)->wed_regs + _offset), \
47 _val); \
developerc2cfe0f2023-09-22 04:11:09 +080048@@ -656,6 +666,9 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
developer064da3c2023-06-13 15:57:26 +080049 if (!(q->flags & MT_QFLAG_WED))
50 return 0;
51
52+ if ((q->flags & MT_QFLAG_WED_EXT))
53+ wed = &dev->mmio.wed_ext;
54+
55 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
56 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
57
developerc2cfe0f2023-09-22 04:11:09 +080058@@ -721,7 +734,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developer064da3c2023-06-13 15:57:26 +080059 if (ret)
60 return ret;
61
62- if (q->flags != MT_WED_Q_TXFREE)
63+ if (!mt76_queue_is_txfree(q))
64 mt76_dma_queue_reset(dev, q);
65
66 return 0;
developerc2cfe0f2023-09-22 04:11:09 +080067@@ -1001,6 +1014,8 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developer064da3c2023-06-13 15:57:26 +080068 if (mtk_wed_device_active(&dev->mmio.wed))
69 mtk_wed_device_detach(&dev->mmio.wed);
70
71+ if (mtk_wed_device_active(&dev->mmio.wed_ext))
72+ mtk_wed_device_detach(&dev->mmio.wed_ext);
73 mt76_free_pending_txwi(dev);
74 mt76_free_pending_rxwi(dev);
75 }
76diff --git a/mt76.h b/mt76.h
developerc2cfe0f2023-09-22 04:11:09 +080077index 5243741b5..3b2a658db 100644
developer064da3c2023-06-13 15:57:26 +080078--- a/mt76.h
79+++ b/mt76.h
80@@ -51,6 +51,7 @@
81 #define MT_QFLAG_WED_RING GENMASK(1, 0)
82 #define MT_QFLAG_WED_TYPE GENMASK(3, 2)
83 #define MT_QFLAG_WED BIT(4)
84+#define MT_QFLAG_WED_EXT BIT(11)
85
86 #define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
87 FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
developerc2cfe0f2023-09-22 04:11:09 +080088@@ -629,6 +630,7 @@ struct mt76_mmio {
developer064da3c2023-06-13 15:57:26 +080089 u32 irqmask;
90
91 struct mtk_wed_device wed;
92+ struct mtk_wed_device wed_ext;
93 struct completion wed_reset;
94 struct completion wed_reset_complete;
95 };
developerc2cfe0f2023-09-22 04:11:09 +080096@@ -1627,6 +1629,11 @@ static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
developer064da3c2023-06-13 15:57:26 +080097 return (q->flags & MT_QFLAG_WED) &&
98 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
99 }
100+static inline bool mt76_queue_is_txfree(struct mt76_queue *q)
101+{
102+ return (q->flags & MT_QFLAG_WED) &&
103+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE;
104+}
105
106 struct mt76_txwi_cache *
107 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
108diff --git a/mt7996/dma.c b/mt7996/dma.c
developerc2cfe0f2023-09-22 04:11:09 +0800109index 2e75d2794..3c8f617e0 100644
developer064da3c2023-06-13 15:57:26 +0800110--- a/mt7996/dma.c
111+++ b/mt7996/dma.c
112@@ -7,6 +7,25 @@
113 #include "../dma.h"
114 #include "mac.h"
115
116+int
117+mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
118+ int ring_base, struct mtk_wed_device *wed)
119+{
120+ struct mt7996_dev *dev = phy->dev;
121+ u32 flags = 0;
122+
123+ if (mtk_wed_device_active(wed)) {
124+ ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
125+ idx -= MT_TXQ_ID(0);
126+ flags = MT_WED_Q_TX(idx);
127+ if (phy->mt76->band_idx == MT_BAND2)
128+ flags = MT_QFLAG_WED_EXT | MT_WED_Q_TX(0) ;
129+ }
130+
131+ return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc,
132+ ring_base, flags);
133+}
134+
135 static int mt7996_poll_tx(struct napi_struct *napi, int budget)
136 {
137 struct mt7996_dev *dev;
developerc2cfe0f2023-09-22 04:11:09 +0800138@@ -140,7 +159,7 @@ static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
developer064da3c2023-06-13 15:57:26 +0800139 }
140 }
141
developerc2cfe0f2023-09-22 04:11:09 +0800142-void mt7996_dma_start(struct mt7996_dev *dev, bool reset)
143+void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
developer064da3c2023-06-13 15:57:26 +0800144 {
145 u32 hif1_ofs = 0;
146 u32 irq_mask;
developerc2cfe0f2023-09-22 04:11:09 +0800147@@ -165,11 +184,7 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset)
developer064da3c2023-06-13 15:57:26 +0800148 }
149
150 /* enable interrupts for TX/RX rings */
151- irq_mask = MT_INT_MCU_CMD;
152- if (reset)
153- goto done;
154-
developerc2cfe0f2023-09-22 04:11:09 +0800155- irq_mask = MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU;
156+ irq_mask = MT_INT_MCU_CMD | MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU;
developer064da3c2023-06-13 15:57:26 +0800157
158 if (mt7996_band_valid(dev, MT_BAND0))
159 irq_mask |= MT_INT_BAND0_RX_DONE;
developerc2cfe0f2023-09-22 04:11:09 +0800160@@ -180,7 +195,18 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset)
developer064da3c2023-06-13 15:57:26 +0800161 if (mt7996_band_valid(dev, MT_BAND2))
162 irq_mask |= MT_INT_BAND2_RX_DONE;
163
164-done:
165+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) {
166+ u32 wed_irq_mask = irq_mask;
167+
168+ wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
169+
170+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
171+
172+ mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
173+ }
174+
175+ irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
176+
177 mt7996_irq_enable(dev, irq_mask);
178 mt7996_irq_disable(dev, 0);
179 }
developerc2cfe0f2023-09-22 04:11:09 +0800180@@ -270,17 +296,22 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
developer064da3c2023-06-13 15:57:26 +0800181 /* fix hardware limitation, pcie1's rx ring3 is not available
182 * so, redirect pcie0 rx ring3 interrupt to pcie1
183 */
184- mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
185- MT_WFDMA0_RX_INT_SEL_RING3);
186-
187- /* TODO: redirect rx ring6 interrupt to pcie0 for wed function */
188+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && dev->rro_support)
189+ mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
190+ MT_WFDMA0_RX_INT_SEL_RING6);
191+ else
192+ mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
193+ MT_WFDMA0_RX_INT_SEL_RING3);
194 }
195
developerc2cfe0f2023-09-22 04:11:09 +0800196- mt7996_dma_start(dev, reset);
197+ mt7996_dma_start(dev, reset, true);
developer064da3c2023-06-13 15:57:26 +0800198 }
199
200 int mt7996_dma_init(struct mt7996_dev *dev)
201 {
202+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
203+ struct mtk_wed_device *wed_ext = &dev->mt76.mmio.wed_ext;
204+ u32 rx_base;
205 u32 hif1_ofs = 0;
206 int ret;
207
developerc2cfe0f2023-09-22 04:11:09 +0800208@@ -294,10 +325,11 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800209 mt7996_dma_disable(dev, true);
210
211 /* init tx queue */
212- ret = mt76_connac_init_tx_queues(dev->phy.mt76,
213- MT_TXQ_ID(dev->mphy.band_idx),
214- MT7996_TX_RING_SIZE,
215- MT_TXQ_RING_BASE(0), 0);
216+ ret = mt7996_init_tx_queues(&dev->phy,
217+ MT_TXQ_ID(dev->mphy.band_idx),
218+ MT7996_TX_RING_SIZE,
219+ MT_TXQ_RING_BASE(0),
220+ wed);
221 if (ret)
222 return ret;
223
developerc2cfe0f2023-09-22 04:11:09 +0800224@@ -353,6 +385,9 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800225 return ret;
226
227 /* tx free notify event from WA for band0 */
228+ if (mtk_wed_device_active(wed) && !dev->rro_support)
229+ dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
230+
231 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
232 MT_RXQ_ID(MT_RXQ_MAIN_WA),
233 MT7996_RX_MCU_RING_SIZE,
developerc2cfe0f2023-09-22 04:11:09 +0800234@@ -363,17 +398,24 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800235
236 if (mt7996_band_valid(dev, MT_BAND2)) {
237 /* rx data queue for band2 */
238+ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs;
239+ if (mtk_wed_device_active(wed))
240+ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2);
241+
242 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
243 MT_RXQ_ID(MT_RXQ_BAND2),
244 MT7996_RX_RING_SIZE,
245 MT_RX_BUF_SIZE,
246- MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs);
247+ rx_base);
248 if (ret)
249 return ret;
250
251 /* tx free notify event from WA for band2
252 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
253 */
254+ if (mtk_wed_device_active(wed_ext) && !dev->rro_support)
255+ dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE |
256+ MT_QFLAG_WED_EXT;
257 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA],
258 MT_RXQ_ID(MT_RXQ_BAND2_WA),
259 MT7996_RX_MCU_RING_SIZE,
developerc2cfe0f2023-09-22 04:11:09 +0800260@@ -383,6 +425,56 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800261 return ret;
262 }
263
264+
265+ if (dev->rro_support) {
266+ /* rx rro data queue for band0 */
267+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags = MT_RRO_Q_DATA(0);
268+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags |= MT_QFLAG_MAGIC;
269+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
270+ MT_RXQ_ID(MT_RXQ_RRO_BAND0),
271+ MT7996_RX_RING_SIZE,
272+ MT7996_RX_BUF_SIZE,
273+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0));
274+ if (ret)
275+ return ret;
276+
277+ /* tx free notify event from WA for band0 */
278+ if (mtk_wed_device_active(wed))
279+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
280+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
281+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
282+ MT7996_RX_MCU_RING_SIZE,
283+ MT7996_RX_BUF_SIZE,
284+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
285+ if (ret)
286+ return ret;
287+
288+ if (mt7996_band_valid(dev, MT_BAND2)) {
289+ /* rx rro data queue for band2 */
290+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags = MT_RRO_Q_DATA(1);
291+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags |= MT_QFLAG_MAGIC;
292+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
293+ MT_RXQ_ID(MT_RXQ_RRO_BAND2),
294+ MT7996_RX_RING_SIZE,
295+ MT7996_RX_BUF_SIZE,
296+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs);
297+ if (ret)
298+ return ret;
299+
300+ /* tx free notify event from MAC for band2 */
301+ if (mtk_wed_device_active(wed_ext))
302+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE |
303+ MT_QFLAG_WED_EXT;
304+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2],
305+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND2),
306+ MT7996_RX_MCU_RING_SIZE,
307+ MT7996_RX_BUF_SIZE,
308+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs);
309+ if (ret)
310+ return ret;
311+ }
312+ }
313+
314 ret = mt76_init_queues(dev, mt76_dma_rx_poll);
315 if (ret < 0)
316 return ret;
317diff --git a/mt7996/init.c b/mt7996/init.c
developerc2cfe0f2023-09-22 04:11:09 +0800318index 5d8ecf038..f2d43d3dc 100644
developer064da3c2023-06-13 15:57:26 +0800319--- a/mt7996/init.c
320+++ b/mt7996/init.c
developerc2cfe0f2023-09-22 04:11:09 +0800321@@ -540,6 +540,7 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
developer064da3c2023-06-13 15:57:26 +0800322 struct mt76_phy *mphy;
323 u32 mac_ofs, hif1_ofs = 0;
324 int ret;
325+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
326
327 if (!mt7996_band_valid(dev, band) || band == MT_BAND0)
328 return 0;
developerc2cfe0f2023-09-22 04:11:09 +0800329@@ -547,8 +548,10 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
developer064da3c2023-06-13 15:57:26 +0800330 if (phy)
331 return 0;
332
333- if (band == MT_BAND2 && dev->hif2)
334+ if (band == MT_BAND2 && dev->hif2) {
335 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
336+ wed = &dev->mt76.mmio.wed_ext;
337+ }
338
339 mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7996_ops, band);
340 if (!mphy)
developerc2cfe0f2023-09-22 04:11:09 +0800341@@ -582,10 +585,11 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
developer064da3c2023-06-13 15:57:26 +0800342
343 /* init wiphy according to mphy and phy */
344 mt7996_init_wiphy(mphy->hw);
345- ret = mt76_connac_init_tx_queues(phy->mt76,
346- MT_TXQ_ID(band),
347- MT7996_TX_RING_SIZE,
348- MT_TXQ_RING_BASE(band) + hif1_ofs, 0);
349+ ret = mt7996_init_tx_queues(mphy->priv,
350+ MT_TXQ_ID(band),
351+ MT7996_TX_RING_SIZE,
352+ MT_TXQ_RING_BASE(band) + hif1_ofs,
353+ wed);
354 if (ret)
355 goto error;
356
developerc2cfe0f2023-09-22 04:11:09 +0800357@@ -1126,6 +1130,13 @@ int mt7996_register_device(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800358
359 ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
360
361+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext)) {
362+ mt76_wr(dev, MT_INT1_MASK_CSR,
363+ dev->mt76.mmio.irqmask|MT_INT_TX_DONE_BAND2);
364+ mtk_wed_device_start(&dev->mt76.mmio.wed_ext,
365+ dev->mt76.mmio.irqmask |MT_INT_TX_DONE_BAND2);
366+ }
367+
368 dev->recovery.hw_init_done = true;
369
370 ret = mt7996_init_debugfs(&dev->phy);
371diff --git a/mt7996/mac.c b/mt7996/mac.c
developerc2cfe0f2023-09-22 04:11:09 +0800372index 04e14fa30..e57bdee21 100644
developer064da3c2023-06-13 15:57:26 +0800373--- a/mt7996/mac.c
374+++ b/mt7996/mac.c
developerc2cfe0f2023-09-22 04:11:09 +0800375@@ -1019,6 +1019,29 @@ out:
376 mt76_put_txwi(mdev, t);
developer064da3c2023-06-13 15:57:26 +0800377 }
378
379+u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
380+{
381+ struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
382+ __le32 *txwi = ptr;
383+ u32 val;
384+
385+ memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
386+
387+ val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
388+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
389+ txwi[0] = cpu_to_le32(val);
390+
391+ val = BIT(31) |
392+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
393+ txwi[1] = cpu_to_le32(val);
394+
395+ txp->token = cpu_to_le16(token_id);
396+ txp->nbuf = 1;
397+ txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
398+
399+ return MT_TXD_SIZE + sizeof(*txp);
400+}
401+
402 static void
developerc2cfe0f2023-09-22 04:11:09 +0800403 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
developer064da3c2023-06-13 15:57:26 +0800404 {
developerc2cfe0f2023-09-22 04:11:09 +0800405@@ -1363,6 +1386,10 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
developer064da3c2023-06-13 15:57:26 +0800406
407 switch (type) {
408 case PKT_TYPE_TXRX_NOTIFY:
409+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext) &&
410+ q == MT_RXQ_TXFREE_BAND2)
411+ return;
412+
413 mt7996_mac_tx_free(dev, skb->data, skb->len);
414 napi_consume_skb(skb, 1);
415 break;
developerc2cfe0f2023-09-22 04:11:09 +0800416@@ -1837,7 +1864,7 @@ void mt7996_mac_reset_work(struct work_struct *work)
developer064da3c2023-06-13 15:57:26 +0800417 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
418
developerc2cfe0f2023-09-22 04:11:09 +0800419 /* enable DMA Tx/Tx and interrupt */
420- mt7996_dma_start(dev, false);
421+ mt7996_dma_start(dev, false, false);
developer064da3c2023-06-13 15:57:26 +0800422
423 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
424 clear_bit(MT76_RESET, &dev->mphy.state);
425diff --git a/mt7996/main.c b/mt7996/main.c
developerc2cfe0f2023-09-22 04:11:09 +0800426index a00ebf9e6..e6be05656 100644
developer064da3c2023-06-13 15:57:26 +0800427--- a/mt7996/main.c
428+++ b/mt7996/main.c
developerc2cfe0f2023-09-22 04:11:09 +0800429@@ -1508,6 +1508,49 @@ out:
developer064da3c2023-06-13 15:57:26 +0800430 return ret;
431 }
432
433+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
434+static int
435+mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
436+ struct ieee80211_vif *vif,
437+ struct ieee80211_sta *sta,
438+ struct net_device_path_ctx *ctx,
439+ struct net_device_path *path)
440+{
441+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
442+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
443+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
444+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
445+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
446+
447+ if(phy != &dev->phy && phy->mt76->band_idx == MT_BAND2)
448+ wed = &dev->mt76.mmio.wed_ext;
449+
450+ if (!mtk_wed_device_active(wed))
451+ return -ENODEV;
452+
453+ if (msta->wcid.idx > MT7996_WTBL_STA)
454+ return -EIO;
455+
456+ path->type = DEV_PATH_MTK_WDMA;
457+ path->dev = ctx->dev;
458+ path->mtk_wdma.wdma_idx = wed->wdma_idx;
459+ path->mtk_wdma.bss = mvif->mt76.idx;
460+ path->mtk_wdma.queue = 0;
461+ path->mtk_wdma.wcid = msta->wcid.idx;
462+
463+ /* pao info */
464+ if (mtk_wed_device_support_pao(wed)) {
465+ path->mtk_wdma.amsdu_en = 1;
466+ path->mtk_wdma.is_sp = 0;
467+ path->mtk_wdma.is_fixedrate = 0;
468+ }
469+ ctx->dev = NULL;
470+
471+ return 0;
472+}
473+
474+#endif
475+
476 const struct ieee80211_ops mt7996_ops = {
477 .tx = mt7996_tx,
478 .start = mt7996_start,
developerc2cfe0f2023-09-22 04:11:09 +0800479@@ -1554,4 +1597,7 @@ const struct ieee80211_ops mt7996_ops = {
developer064da3c2023-06-13 15:57:26 +0800480 .sta_add_debugfs = mt7996_sta_add_debugfs,
481 #endif
482 .set_radar_background = mt7996_set_radar_background,
483+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
484+ .net_fill_forward_path = mt7996_net_fill_forward_path,
485+#endif
486 };
487diff --git a/mt7996/mmio.c b/mt7996/mmio.c
developerc2cfe0f2023-09-22 04:11:09 +0800488index d5eaa1bcf..ad2482ef2 100644
developer064da3c2023-06-13 15:57:26 +0800489--- a/mt7996/mmio.c
490+++ b/mt7996/mmio.c
491@@ -10,6 +10,11 @@
492 #include "mt7996.h"
493 #include "mac.h"
494 #include "../trace.h"
495+#include "../dma.h"
496+
497+
498+static bool wed_enable = true;
499+module_param(wed_enable, bool, 0644);
500
501 static const struct __base mt7996_reg_base[] = {
502 [WF_AGG_BASE] = { { 0x820e2000, 0x820f2000, 0x830e2000 } },
developerc2cfe0f2023-09-22 04:11:09 +0800503@@ -214,6 +219,228 @@ static u32 mt7996_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
504 return val;
developer064da3c2023-06-13 15:57:26 +0800505 }
506
507+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
508+static void mt7996_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
509+{
510+ struct mt7996_dev *dev;
511+ struct page *page;
512+ int i;
513+
514+ dev = container_of(wed, struct mt7996_dev, mt76.mmio.wed);
515+ for (i = 0; i < dev->mt76.rx_token_size; i++) {
516+ struct mt76_rxwi_cache *r;
517+
518+ r = mt76_rx_token_release(&dev->mt76, i);
519+ if (!r || !r->ptr)
520+ continue;
521+
522+ dma_unmap_single(dev->mt76.dma_dev, r->dma_addr,
523+ wed->wlan.rx_size, DMA_FROM_DEVICE);
524+ skb_free_frag(r->ptr);
525+ r->ptr = NULL;
526+
527+ mt76_put_rxwi(&dev->mt76, r);
528+ }
529+
530+ mt76_free_pending_rxwi(&dev->mt76);
531+
532+ mt76_for_each_q_rx(&dev->mt76, i) {
533+ struct mt76_queue *q = &dev->mt76.q_rx[i];
534+
535+ if (mt76_queue_is_wed_rx(q)) {
536+ if (!q->rx_page.va)
537+ continue;
538+
539+ page = virt_to_page(q->rx_page.va);
540+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
541+ memset(&q->rx_page, 0, sizeof(q->rx_page));
542+ }
543+ }
544+
545+ if (!wed->rx_buf_ring.rx_page.va)
546+ return;
547+
548+ page = virt_to_page(wed->rx_buf_ring.rx_page.va);
549+ __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
550+ memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
551+
552+}
553+
554+static u32 mt7996_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
555+{
556+ struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
557+ struct mt7996_dev *dev;
558+ u32 length;
559+ int i;
560+
561+ dev = container_of(wed, struct mt7996_dev, mt76.mmio.wed);
562+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
563+ sizeof(struct skb_shared_info));
564+
565+ for (i = 0; i < size; i++) {
566+ struct mt76_rxwi_cache *r = mt76_get_rxwi(&dev->mt76);
567+ dma_addr_t phy_addr;
568+ int token;
569+ void *ptr;
570+
571+ ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length,
572+ GFP_KERNEL);
573+ if (!ptr) {
574+ mt76_put_rxwi(&dev->mt76, r);
575+ goto unmap;
576+ }
577+
578+ phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
579+ wed->wlan.rx_size,
580+ DMA_TO_DEVICE);
581+ if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
582+ skb_free_frag(ptr);
583+ mt76_put_rxwi(&dev->mt76, r);
584+ goto unmap;
585+ }
586+
587+ desc->buf0 = cpu_to_le32(phy_addr);
588+ token = mt76_rx_token_consume(&dev->mt76, ptr, r, phy_addr);
589+ if (token < 0) {
590+ dma_unmap_single(dev->mt76.dma_dev, phy_addr,
591+ wed->wlan.rx_size, DMA_TO_DEVICE);
592+ skb_free_frag(ptr);
593+ mt76_put_rxwi(&dev->mt76, r);
594+ goto unmap;
595+ }
596+
597+ desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
598+ token));
599+ desc++;
600+ }
601+
602+ return 0;
603+
604+unmap:
605+ mt7996_mmio_wed_release_rx_buf(wed);
606+ return -ENOMEM;
607+}
608+#endif
609+
610+int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
611+ bool hif2, int *irq)
612+{
613+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
614+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
615+ struct pci_dev *pci_dev = pdev_ptr;
616+ u32 hif1_ofs = 0;
617+ int ret;
618+
619+ if (!wed_enable)
620+ return 0;
621+
622+ dev->rro_support = true;
623+
624+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
625+
626+ if (hif2)
627+ wed = &dev->mt76.mmio.wed_ext;
628+
629+ wed->wlan.pci_dev = pci_dev;
630+ wed->wlan.bus_type = MTK_WED_BUS_PCIE;
631+
632+ wed->wlan.base = devm_ioremap(dev->mt76.dev,
633+ pci_resource_start(pci_dev, 0),
634+ pci_resource_len(pci_dev, 0));
635+ wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
636+
637+ if (hif2) {
638+ wed->wlan.wpdma_int = wed->wlan.phy_base +
639+ MT_INT_PCIE1_SOURCE_CSR_EXT;
640+ wed->wlan.wpdma_mask = wed->wlan.phy_base +
641+ MT_INT_PCIE1_MASK_CSR;
642+ wed->wlan.wpdma_tx = wed->wlan.phy_base + hif1_ofs +
643+ MT_TXQ_RING_BASE(0) +
644+ MT7996_TXQ_BAND2 * MT_RING_SIZE;
645+ if (dev->rro_support) {
646+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
647+ MT_RXQ_RING_BASE(0) +
648+ MT7996_RXQ_TXFREE2 * MT_RING_SIZE;
649+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_EXT) - 1;
650+ } else {
651+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
652+ MT_RXQ_RING_BASE(0) +
653+ MT7996_RXQ_MCU_WA_TRI * MT_RING_SIZE;
654+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_TRI) - 1;
655+ }
656+
657+ wed->wlan.chip_id = 0x7991;
658+ wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1;
659+ } else {
660+ wed->wlan.wpdma_int = wed->wlan.phy_base + MT_INT_SOURCE_CSR;
661+ wed->wlan.wpdma_mask = wed->wlan.phy_base + MT_INT_MASK_CSR;
662+ wed->wlan.wpdma_tx = wed->wlan.phy_base + MT_TXQ_RING_BASE(0) +
663+ MT7996_TXQ_BAND0 * MT_RING_SIZE;
664+
665+ wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + MT_WFDMA0_GLO_CFG;
666+
developerc2cfe0f2023-09-22 04:11:09 +0800667+ wed->wlan.wpdma_rx[0] = wed->wlan.phy_base +
668+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
669+ MT7996_RXQ_BAND0 * MT_RING_SIZE;
developer064da3c2023-06-13 15:57:26 +0800670+
671+ wed->wlan.rx_nbuf = 65536;
672+ wed->wlan.rx_npkt = 24576;
673+ wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
674+
675+ wed->wlan.rx_tbit[0] = ffs(MT_INT_RX_DONE_BAND0) - 1;
676+ wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1;
677+
678+ wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND0) - 1;
679+ wed->wlan.tx_tbit[1] = ffs(MT_INT_TX_DONE_BAND1) - 1;
680+ if (dev->rro_support) {
681+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
682+ MT7996_RXQ_TXFREE0 * MT_RING_SIZE;
683+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_MAIN) - 1;
684+ } else {
685+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_MAIN) - 1;
686+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
687+ MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE;
688+ }
689+ }
690+
developerc2cfe0f2023-09-22 04:11:09 +0800691+ wed->wlan.nbuf = MT7996_TOKEN_SIZE;
developer064da3c2023-06-13 15:57:26 +0800692+
693+ wed->wlan.token_start = 0;
694+
695+ wed->wlan.max_amsdu_nums = 8;
696+ wed->wlan.max_amsdu_len = 1536;
697+
698+ wed->wlan.init_buf = mt7996_wed_init_buf;
699+ wed->wlan.offload_enable = NULL;
700+ wed->wlan.offload_disable = NULL;
701+ wed->wlan.init_rx_buf = mt7996_mmio_wed_init_rx_buf;
702+ wed->wlan.release_rx_buf = mt7996_mmio_wed_release_rx_buf;
703+ wed->wlan.update_wo_rx_stats = NULL;
704+
705+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
706+
707+ if (mtk_wed_device_attach(wed))
708+ return 0;
709+
710+ *irq = wed->irq;
711+ dev->mt76.dma_dev = wed->dev;
712+
developerc2cfe0f2023-09-22 04:11:09 +0800713+ dev->mt76.token_size = MT7996_SW_TOKEN_SIZE;
developer064da3c2023-06-13 15:57:26 +0800714+
715+ ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
716+ if (ret)
717+ return ret;
718+
719+ ret = dma_set_coherent_mask(wed->dev, DMA_BIT_MASK(32));
720+ if (ret)
721+ return ret;
722+
723+ return 1;
724+#else
725+ return 0;
726+#endif
727+}
728+
729 static int mt7996_mmio_init(struct mt76_dev *mdev,
730 void __iomem *mem_base,
731 u32 device_id)
developerc2cfe0f2023-09-22 04:11:09 +0800732@@ -265,8 +492,17 @@ void mt7996_dual_hif_set_irq_mask(struct mt7996_dev *dev, bool write_reg,
developer064da3c2023-06-13 15:57:26 +0800733 mdev->mmio.irqmask |= set;
734
735 if (write_reg) {
736- mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
737- mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
738+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
739+ mtk_wed_device_irq_set_mask(&mdev->mmio.wed,
740+ mdev->mmio.irqmask);
741+ if (mtk_wed_device_active(&mdev->mmio.wed_ext)) {
742+ mtk_wed_device_irq_set_mask(&mdev->mmio.wed_ext,
743+ mdev->mmio.irqmask);
744+ }
745+ } else {
746+ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
747+ mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
748+ }
749 }
750
751 spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
developerc2cfe0f2023-09-22 04:11:09 +0800752@@ -284,22 +520,36 @@ static void mt7996_rx_poll_complete(struct mt76_dev *mdev,
developer064da3c2023-06-13 15:57:26 +0800753 static void mt7996_irq_tasklet(struct tasklet_struct *t)
754 {
755 struct mt7996_dev *dev = from_tasklet(dev, t, mt76.irq_tasklet);
756+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
757+ struct mtk_wed_device *wed_ext = &dev->mt76.mmio.wed_ext;
758 u32 i, intr, mask, intr1;
759
760- mt76_wr(dev, MT_INT_MASK_CSR, 0);
761- if (dev->hif2)
762- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
763-
764- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
765- intr &= dev->mt76.mmio.irqmask;
766- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
767-
768- if (dev->hif2) {
769- intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
770- intr1 &= dev->mt76.mmio.irqmask;
771- mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
772+ if (dev->hif2 && mtk_wed_device_active(wed_ext)) {
773+ mtk_wed_device_irq_set_mask(wed_ext, 0);
774+ intr1 = mtk_wed_device_irq_get(wed_ext,
775+ dev->mt76.mmio.irqmask);
776+ if (intr1 & MT_INT_RX_TXFREE_EXT)
777+ napi_schedule(&dev->mt76.napi[MT_RXQ_TXFREE_BAND2]);
778+ }
779
780- intr |= intr1;
781+ if (mtk_wed_device_active(wed)) {
782+ mtk_wed_device_irq_set_mask(wed, 0);
783+ intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
784+ intr |= (intr1 & ~MT_INT_RX_TXFREE_EXT);
785+ } else {
786+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
787+ if (dev->hif2)
788+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
789+
790+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
791+ intr &= dev->mt76.mmio.irqmask;
792+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
793+ if (dev->hif2) {
794+ intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
795+ intr1 &= dev->mt76.mmio.irqmask;
796+ mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
797+ intr |= intr1;
798+ }
799 }
800
801 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
developerc2cfe0f2023-09-22 04:11:09 +0800802@@ -331,10 +581,19 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t)
developer064da3c2023-06-13 15:57:26 +0800803 irqreturn_t mt7996_irq_handler(int irq, void *dev_instance)
804 {
805 struct mt7996_dev *dev = dev_instance;
806+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
807
808- mt76_wr(dev, MT_INT_MASK_CSR, 0);
809- if (dev->hif2)
810- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
811+ if (mtk_wed_device_active(wed))
812+ mtk_wed_device_irq_set_mask(wed, 0);
813+ else
814+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
815+
816+ if (dev->hif2) {
817+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext))
818+ mtk_wed_device_irq_set_mask(&dev->mt76.mmio.wed_ext, 0);
819+ else
820+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
821+ }
822
823 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
824 return IRQ_NONE;
825diff --git a/mt7996/mt7996.h b/mt7996/mt7996.h
developerc2cfe0f2023-09-22 04:11:09 +0800826index 6447b2c90..d09358305 100644
developer064da3c2023-06-13 15:57:26 +0800827--- a/mt7996/mt7996.h
828+++ b/mt7996/mt7996.h
developerc2cfe0f2023-09-22 04:11:09 +0800829@@ -40,6 +40,7 @@
830 #define MT7996_EEPROM_SIZE 7680
831 #define MT7996_EEPROM_BLOCK_SIZE 16
832 #define MT7996_TOKEN_SIZE 16384
833+#define MT7996_SW_TOKEN_SIZE 1024
834
835 #define MT7996_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
836 #define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
837@@ -493,7 +494,9 @@ int mt7996_dma_init(struct mt7996_dev *dev);
developer064da3c2023-06-13 15:57:26 +0800838 void mt7996_dma_reset(struct mt7996_dev *dev, bool force);
839 void mt7996_dma_prefetch(struct mt7996_dev *dev);
840 void mt7996_dma_cleanup(struct mt7996_dev *dev);
developerc2cfe0f2023-09-22 04:11:09 +0800841-void mt7996_dma_start(struct mt7996_dev *dev, bool reset);
developer064da3c2023-06-13 15:57:26 +0800842+int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx,
843+ int n_desc, int ring_base, struct mtk_wed_device *wed);
developerc2cfe0f2023-09-22 04:11:09 +0800844+void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset);
developer064da3c2023-06-13 15:57:26 +0800845 void mt7996_init_txpower(struct mt7996_dev *dev,
846 struct ieee80211_supported_band *sband);
847 int mt7996_txbf_init(struct mt7996_dev *dev);
developerc2cfe0f2023-09-22 04:11:09 +0800848@@ -683,7 +686,9 @@ int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
developer064da3c2023-06-13 15:57:26 +0800849 void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
850 struct ieee80211_sta *sta, struct dentry *dir);
851 #endif
852-
853+int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
854+ bool hif2, int *irq);
855+u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
856 #ifdef CONFIG_MTK_VENDOR
857 void mt7996_set_wireless_vif(void *data, u8 *mac, struct ieee80211_vif *vif);
858 void mt7996_vendor_register(struct mt7996_phy *phy);
859diff --git a/mt7996/pci.c b/mt7996/pci.c
developerc2cfe0f2023-09-22 04:11:09 +0800860index c5301050f..085408571 100644
developer064da3c2023-06-13 15:57:26 +0800861--- a/mt7996/pci.c
862+++ b/mt7996/pci.c
developerc2cfe0f2023-09-22 04:11:09 +0800863@@ -125,15 +125,22 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
developer064da3c2023-06-13 15:57:26 +0800864 mt7996_wfsys_reset(dev);
865 hif2 = mt7996_pci_init_hif2(pdev);
866
867- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
868+ ret = mt7996_mmio_wed_init(dev, pdev, false, &irq);
869 if (ret < 0)
870- goto free_device;
871+ goto free_wed_or_irq_vector;
developerc2cfe0f2023-09-22 04:11:09 +0800872+
developer064da3c2023-06-13 15:57:26 +0800873+ if (!ret) {
874+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
875+ if (ret < 0)
876+ goto free_device;
developerc2cfe0f2023-09-22 04:11:09 +0800877+
878+ irq = pdev->irq;
developer064da3c2023-06-13 15:57:26 +0800879+ }
developerc2cfe0f2023-09-22 04:11:09 +0800880
881- irq = pdev->irq;
882 ret = devm_request_irq(mdev->dev, irq, mt7996_irq_handler,
developer064da3c2023-06-13 15:57:26 +0800883 IRQF_SHARED, KBUILD_MODNAME, dev);
884 if (ret)
885- goto free_irq_vector;
886+ goto free_wed_or_irq_vector;
developer064da3c2023-06-13 15:57:26 +0800887
888 mt76_wr(dev, MT_INT_MASK_CSR, 0);
889 /* master switch of PCIe tnterrupt enable */
developerc2cfe0f2023-09-22 04:11:09 +0800890@@ -143,11 +150,20 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
developer064da3c2023-06-13 15:57:26 +0800891 hif2_dev = container_of(hif2->dev, struct pci_dev, dev);
892 dev->hif2 = hif2;
893
894- ret = pci_alloc_irq_vectors(hif2_dev, 1, 1, PCI_IRQ_ALL_TYPES);
895+ ret = mt7996_mmio_wed_init(dev, hif2_dev, true, &irq);
896 if (ret < 0)
897- goto free_hif2;
developerc2cfe0f2023-09-22 04:11:09 +0800898+ goto free_wed_or_irq_vector;
developer064da3c2023-06-13 15:57:26 +0800899+
900+ if (!ret) {
901+ ret = pci_alloc_irq_vectors(hif2_dev, 1, 1, PCI_IRQ_ALL_TYPES);
902+ if (ret < 0)
903+ goto free_hif2;
developer064da3c2023-06-13 15:57:26 +0800904+
developerc2cfe0f2023-09-22 04:11:09 +0800905+ dev->hif2->irq = hif2_dev->irq;
906+ } else {
907+ dev->hif2->irq = irq;
developer064da3c2023-06-13 15:57:26 +0800908+ }
909
developerc2cfe0f2023-09-22 04:11:09 +0800910- dev->hif2->irq = hif2_dev->irq;
911 ret = devm_request_irq(mdev->dev, dev->hif2->irq,
912 mt7996_irq_handler, IRQF_SHARED,
913 KBUILD_MODNAME "-hif", dev);
914@@ -169,14 +185,22 @@ free_hif2_irq:
developer064da3c2023-06-13 15:57:26 +0800915 if (dev->hif2)
916 devm_free_irq(mdev->dev, dev->hif2->irq, dev);
developer064da3c2023-06-13 15:57:26 +0800917 free_hif2_irq_vector:
918- if (dev->hif2)
919- pci_free_irq_vectors(hif2_dev);
920+ if (dev->hif2) {
921+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext))
922+ mtk_wed_device_detach(&dev->mt76.mmio.wed_ext);
923+ else
924+ pci_free_irq_vectors(hif2_dev);
925+ }
926 free_hif2:
927 if (dev->hif2)
928 put_device(dev->hif2->dev);
developerc2cfe0f2023-09-22 04:11:09 +0800929 devm_free_irq(mdev->dev, irq, dev);
developer064da3c2023-06-13 15:57:26 +0800930-free_irq_vector:
931- pci_free_irq_vectors(pdev);
developer064da3c2023-06-13 15:57:26 +0800932+free_wed_or_irq_vector:
933+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
934+ mtk_wed_device_detach(&dev->mt76.mmio.wed);
935+ else
936+ pci_free_irq_vectors(pdev);
937+
938 free_device:
939 mt76_free_device(&dev->mt76);
940
941diff --git a/mt7996/regs.h b/mt7996/regs.h
developerc2cfe0f2023-09-22 04:11:09 +0800942index e0b51b5df..ca7c2a811 100644
developer064da3c2023-06-13 15:57:26 +0800943--- a/mt7996/regs.h
944+++ b/mt7996/regs.h
developerc2cfe0f2023-09-22 04:11:09 +0800945@@ -330,6 +330,7 @@ enum base_rev {
developer064da3c2023-06-13 15:57:26 +0800946
947 #define MT_WFDMA0_RX_INT_PCIE_SEL MT_WFDMA0(0x154)
948 #define MT_WFDMA0_RX_INT_SEL_RING3 BIT(3)
949+#define MT_WFDMA0_RX_INT_SEL_RING6 BIT(6)
950
951 #define MT_WFDMA0_MCU_HOST_INT_ENA MT_WFDMA0(0x1f4)
952
developerc2cfe0f2023-09-22 04:11:09 +0800953@@ -383,6 +384,9 @@ enum base_rev {
developer064da3c2023-06-13 15:57:26 +0800954 #define MT_WFDMA0_PCIE1_BASE 0xd8000
955 #define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
956
957+#define MT_INT_PCIE1_SOURCE_CSR_EXT MT_WFDMA0_PCIE1(0x118)
958+#define MT_INT_PCIE1_MASK_CSR MT_WFDMA0_PCIE1(0x11c)
959+
960 #define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
961 #define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
962 #define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
developerc2cfe0f2023-09-22 04:11:09 +0800963@@ -428,6 +432,7 @@ enum base_rev {
developer064da3c2023-06-13 15:57:26 +0800964 #define MT_INT_RX_TXFREE_MAIN BIT(17)
965 #define MT_INT_RX_TXFREE_TRI BIT(15)
966 #define MT_INT_MCU_CMD BIT(29)
967+#define MT_INT_RX_TXFREE_EXT BIT(26)
968
969 #define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
970 #define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
971--
9722.39.2
973