blob: feb77a6224c2020655dc4f772c59380adab3cc86 [file] [log] [blame]
developer064da3c2023-06-13 15:57:26 +08001From d9167faacb2a8466e2d19993f29b2c0770c5164e Mon Sep 17 00:00:00 2001
2From: "sujuan.chen" <sujuan.chen@mediatek.com>
3Date: Wed, 26 Apr 2023 16:44:57 +0800
4Subject: [PATCH 2003/2008] wifi: mt76: mt7996: wed: add wed3.0 tx support
5
6Signed-off-by: sujuan.chen <sujuan.chen@mediatek.com>
7---
8 dma.c | 17 ++-
9 mt76.h | 7 ++
10 mt7996/dma.c | 128 ++++++++++++++++++---
11 mt7996/init.c | 21 +++-
12 mt7996/mac.c | 29 ++++-
13 mt7996/main.c | 46 ++++++++
14 mt7996/mmio.c | 295 +++++++++++++++++++++++++++++++++++++++++++++---
15 mt7996/mt7996.h | 8 +-
16 mt7996/pci.c | 72 +++++++++---
17 mt7996/regs.h | 5 +
18 10 files changed, 567 insertions(+), 61 deletions(-)
19
20diff --git a/dma.c b/dma.c
21index 7153be47..930ec768 100644
22--- a/dma.c
23+++ b/dma.c
24@@ -13,6 +13,11 @@
25 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
26 u32 _val; \
27 if ((_q)->flags & MT_QFLAG_WED) \
28+ if((_q)->flags & MT_QFLAG_WED_EXT) \
29+ _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed_ext, \
30+ ((_q)->wed_regs + \
31+ _offset)); \
32+ else \
33 _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
34 ((_q)->wed_regs + \
35 _offset)); \
36@@ -24,6 +29,11 @@
37 #define Q_WRITE(_dev, _q, _field, _val) do { \
38 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
39 if ((_q)->flags & MT_QFLAG_WED) \
40+ if((_q)->flags & MT_QFLAG_WED_EXT) \
41+ mtk_wed_device_reg_write(&(_dev)->mmio.wed_ext, \
42+ ((_q)->wed_regs + _offset), \
43+ _val); \
44+ else \
45 mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
46 ((_q)->wed_regs + _offset), \
47 _val); \
48@@ -654,6 +664,9 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
49 if (!(q->flags & MT_QFLAG_WED))
50 return 0;
51
52+ if ((q->flags & MT_QFLAG_WED_EXT))
53+ wed = &dev->mmio.wed_ext;
54+
55 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
56 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
57
58@@ -719,7 +732,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
59 if (ret)
60 return ret;
61
62- if (q->flags != MT_WED_Q_TXFREE)
63+ if (!mt76_queue_is_txfree(q))
64 mt76_dma_queue_reset(dev, q);
65
66 return 0;
67@@ -999,6 +1012,8 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
68 if (mtk_wed_device_active(&dev->mmio.wed))
69 mtk_wed_device_detach(&dev->mmio.wed);
70
71+ if (mtk_wed_device_active(&dev->mmio.wed_ext))
72+ mtk_wed_device_detach(&dev->mmio.wed_ext);
73 mt76_free_pending_txwi(dev);
74 mt76_free_pending_rxwi(dev);
75 }
76diff --git a/mt76.h b/mt76.h
77index a0c20d36..ee0dbdd7 100644
78--- a/mt76.h
79+++ b/mt76.h
80@@ -51,6 +51,7 @@
81 #define MT_QFLAG_WED_RING GENMASK(1, 0)
82 #define MT_QFLAG_WED_TYPE GENMASK(3, 2)
83 #define MT_QFLAG_WED BIT(4)
84+#define MT_QFLAG_WED_EXT BIT(11)
85
86 #define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
87 FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
88@@ -623,6 +624,7 @@ struct mt76_mmio {
89 u32 irqmask;
90
91 struct mtk_wed_device wed;
92+ struct mtk_wed_device wed_ext;
93 struct completion wed_reset;
94 struct completion wed_reset_complete;
95 };
96@@ -1514,6 +1516,11 @@ static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
97 return (q->flags & MT_QFLAG_WED) &&
98 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
99 }
100+static inline bool mt76_queue_is_txfree(struct mt76_queue *q)
101+{
102+ return (q->flags & MT_QFLAG_WED) &&
103+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE;
104+}
105
106 struct mt76_txwi_cache *
107 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
108diff --git a/mt7996/dma.c b/mt7996/dma.c
109index b8f253d0..673b08bb 100644
110--- a/mt7996/dma.c
111+++ b/mt7996/dma.c
112@@ -7,6 +7,25 @@
113 #include "../dma.h"
114 #include "mac.h"
115
116+int
117+mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
118+ int ring_base, struct mtk_wed_device *wed)
119+{
120+ struct mt7996_dev *dev = phy->dev;
121+ u32 flags = 0;
122+
123+ if (mtk_wed_device_active(wed)) {
124+ ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
125+ idx -= MT_TXQ_ID(0);
126+ flags = MT_WED_Q_TX(idx);
127+ if (phy->mt76->band_idx == MT_BAND2)
128+ flags = MT_QFLAG_WED_EXT | MT_WED_Q_TX(0) ;
129+ }
130+
131+ return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc,
132+ ring_base, flags);
133+}
134+
135 static int mt7996_poll_tx(struct napi_struct *napi, int budget)
136 {
137 struct mt7996_dev *dev;
138@@ -128,7 +147,7 @@ static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
139 }
140 }
141
142-void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
143+void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset)
144 {
145 u32 hif1_ofs = 0;
146 u32 irq_mask;
147@@ -153,11 +172,9 @@ void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
148 }
149
150 /* enable interrupts for TX/RX rings */
151- irq_mask = MT_INT_MCU_CMD;
152- if (reset)
153- goto done;
154-
155- irq_mask |= (MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU);
156+ irq_mask = MT_INT_MCU_CMD |
157+ MT_INT_RX_DONE_MCU |
158+ MT_INT_TX_DONE_MCU;
159
160 if (mt7996_band_valid(dev, MT_BAND0))
161 irq_mask |= MT_INT_BAND0_RX_DONE;
162@@ -168,7 +185,18 @@ void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
163 if (mt7996_band_valid(dev, MT_BAND2))
164 irq_mask |= MT_INT_BAND2_RX_DONE;
165
166-done:
167+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) {
168+ u32 wed_irq_mask = irq_mask;
169+
170+ wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
171+
172+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
173+
174+ mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
175+ }
176+
177+ irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
178+
179 mt7996_irq_enable(dev, irq_mask);
180 mt7996_irq_disable(dev, 0);
181 }
182@@ -241,19 +269,24 @@ static int mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
183 /* fix hardware limitation, pcie1's rx ring3 is not available
184 * so, redirect pcie0 rx ring3 interrupt to pcie1
185 */
186- mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
187- MT_WFDMA0_RX_INT_SEL_RING3);
188-
189- /* TODO: redirect rx ring6 interrupt to pcie0 for wed function */
190+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && dev->rro_support)
191+ mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
192+ MT_WFDMA0_RX_INT_SEL_RING6);
193+ else
194+ mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
195+ MT_WFDMA0_RX_INT_SEL_RING3);
196 }
197
198- __mt7996_dma_enable(dev, reset);
199+ __mt7996_dma_enable(dev, reset, true);
200
201 return 0;
202 }
203
204 int mt7996_dma_init(struct mt7996_dev *dev)
205 {
206+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
207+ struct mtk_wed_device *wed_ext = &dev->mt76.mmio.wed_ext;
208+ u32 rx_base;
209 u32 hif1_ofs = 0;
210 int ret;
211
212@@ -267,10 +300,11 @@ int mt7996_dma_init(struct mt7996_dev *dev)
213 mt7996_dma_disable(dev, true);
214
215 /* init tx queue */
216- ret = mt76_connac_init_tx_queues(dev->phy.mt76,
217- MT_TXQ_ID(dev->mphy.band_idx),
218- MT7996_TX_RING_SIZE,
219- MT_TXQ_RING_BASE(0), 0);
220+ ret = mt7996_init_tx_queues(&dev->phy,
221+ MT_TXQ_ID(dev->mphy.band_idx),
222+ MT7996_TX_RING_SIZE,
223+ MT_TXQ_RING_BASE(0),
224+ wed);
225 if (ret)
226 return ret;
227
228@@ -326,6 +360,9 @@ int mt7996_dma_init(struct mt7996_dev *dev)
229 return ret;
230
231 /* tx free notify event from WA for band0 */
232+ if (mtk_wed_device_active(wed) && !dev->rro_support)
233+ dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
234+
235 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
236 MT_RXQ_ID(MT_RXQ_MAIN_WA),
237 MT7996_RX_MCU_RING_SIZE,
238@@ -336,17 +373,24 @@ int mt7996_dma_init(struct mt7996_dev *dev)
239
240 if (mt7996_band_valid(dev, MT_BAND2)) {
241 /* rx data queue for band2 */
242+ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs;
243+ if (mtk_wed_device_active(wed))
244+ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2);
245+
246 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
247 MT_RXQ_ID(MT_RXQ_BAND2),
248 MT7996_RX_RING_SIZE,
249 MT_RX_BUF_SIZE,
250- MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs);
251+ rx_base);
252 if (ret)
253 return ret;
254
255 /* tx free notify event from WA for band2
256 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
257 */
258+ if (mtk_wed_device_active(wed_ext) && !dev->rro_support)
259+ dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE |
260+ MT_QFLAG_WED_EXT;
261 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA],
262 MT_RXQ_ID(MT_RXQ_BAND2_WA),
263 MT7996_RX_MCU_RING_SIZE,
264@@ -356,6 +400,56 @@ int mt7996_dma_init(struct mt7996_dev *dev)
265 return ret;
266 }
267
268+
269+ if (dev->rro_support) {
270+ /* rx rro data queue for band0 */
271+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags = MT_RRO_Q_DATA(0);
272+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags |= MT_QFLAG_MAGIC;
273+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
274+ MT_RXQ_ID(MT_RXQ_RRO_BAND0),
275+ MT7996_RX_RING_SIZE,
276+ MT7996_RX_BUF_SIZE,
277+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0));
278+ if (ret)
279+ return ret;
280+
281+ /* tx free notify event from WA for band0 */
282+ if (mtk_wed_device_active(wed))
283+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
284+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
285+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
286+ MT7996_RX_MCU_RING_SIZE,
287+ MT7996_RX_BUF_SIZE,
288+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
289+ if (ret)
290+ return ret;
291+
292+ if (mt7996_band_valid(dev, MT_BAND2)) {
293+ /* rx rro data queue for band2 */
294+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags = MT_RRO_Q_DATA(1);
295+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags |= MT_QFLAG_MAGIC;
296+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
297+ MT_RXQ_ID(MT_RXQ_RRO_BAND2),
298+ MT7996_RX_RING_SIZE,
299+ MT7996_RX_BUF_SIZE,
300+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs);
301+ if (ret)
302+ return ret;
303+
304+ /* tx free notify event from MAC for band2 */
305+ if (mtk_wed_device_active(wed_ext))
306+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE |
307+ MT_QFLAG_WED_EXT;
308+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2],
309+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND2),
310+ MT7996_RX_MCU_RING_SIZE,
311+ MT7996_RX_BUF_SIZE,
312+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs);
313+ if (ret)
314+ return ret;
315+ }
316+ }
317+
318 ret = mt76_init_queues(dev, mt76_dma_rx_poll);
319 if (ret < 0)
320 return ret;
321diff --git a/mt7996/init.c b/mt7996/init.c
322index a6caf4f1..6cfbc50d 100644
323--- a/mt7996/init.c
324+++ b/mt7996/init.c
325@@ -534,6 +534,7 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
326 struct mt76_phy *mphy;
327 u32 mac_ofs, hif1_ofs = 0;
328 int ret;
329+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
330
331 if (!mt7996_band_valid(dev, band) || band == MT_BAND0)
332 return 0;
333@@ -541,8 +542,10 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
334 if (phy)
335 return 0;
336
337- if (band == MT_BAND2 && dev->hif2)
338+ if (band == MT_BAND2 && dev->hif2) {
339 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
340+ wed = &dev->mt76.mmio.wed_ext;
341+ }
342
343 mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7996_ops, band);
344 if (!mphy)
345@@ -576,10 +579,11 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
346
347 /* init wiphy according to mphy and phy */
348 mt7996_init_wiphy(mphy->hw);
349- ret = mt76_connac_init_tx_queues(phy->mt76,
350- MT_TXQ_ID(band),
351- MT7996_TX_RING_SIZE,
352- MT_TXQ_RING_BASE(band) + hif1_ofs, 0);
353+ ret = mt7996_init_tx_queues(mphy->priv,
354+ MT_TXQ_ID(band),
355+ MT7996_TX_RING_SIZE,
356+ MT_TXQ_RING_BASE(band) + hif1_ofs,
357+ wed);
358 if (ret)
359 goto error;
360
361@@ -1119,6 +1123,13 @@ int mt7996_register_device(struct mt7996_dev *dev)
362
363 ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
364
365+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext)) {
366+ mt76_wr(dev, MT_INT1_MASK_CSR,
367+ dev->mt76.mmio.irqmask|MT_INT_TX_DONE_BAND2);
368+ mtk_wed_device_start(&dev->mt76.mmio.wed_ext,
369+ dev->mt76.mmio.irqmask |MT_INT_TX_DONE_BAND2);
370+ }
371+
372 dev->recovery.hw_init_done = true;
373
374 ret = mt7996_init_debugfs(&dev->phy);
375diff --git a/mt7996/mac.c b/mt7996/mac.c
376index 993b43ce..fc2d9269 100644
377--- a/mt7996/mac.c
378+++ b/mt7996/mac.c
379@@ -1175,6 +1175,29 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
380 return 0;
381 }
382
383+u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
384+{
385+ struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
386+ __le32 *txwi = ptr;
387+ u32 val;
388+
389+ memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
390+
391+ val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
392+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
393+ txwi[0] = cpu_to_le32(val);
394+
395+ val = BIT(31) |
396+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
397+ txwi[1] = cpu_to_le32(val);
398+
399+ txp->token = cpu_to_le16(token_id);
400+ txp->nbuf = 1;
401+ txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
402+
403+ return MT_TXD_SIZE + sizeof(*txp);
404+}
405+
406 static void
407 mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
408 {
409@@ -1561,6 +1584,10 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
410
411 switch (type) {
412 case PKT_TYPE_TXRX_NOTIFY:
413+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext) &&
414+ q == MT_RXQ_TXFREE_BAND2)
415+ return;
416+
417 mt7996_mac_tx_free(dev, skb->data, skb->len);
418 napi_consume_skb(skb, 1);
419 break;
420@@ -2035,7 +2062,7 @@ void mt7996_mac_reset_work(struct work_struct *work)
421 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
422
423 /* enable dma tx/rx and interrupt */
424- __mt7996_dma_enable(dev, false);
425+ __mt7996_dma_enable(dev, false, false);
426
427 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
428 clear_bit(MT76_RESET, &dev->mphy.state);
429diff --git a/mt7996/main.c b/mt7996/main.c
430index f0bdec6b..50fa6523 100644
431--- a/mt7996/main.c
432+++ b/mt7996/main.c
433@@ -1405,6 +1405,49 @@ out:
434 return ret;
435 }
436
437+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
438+static int
439+mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
440+ struct ieee80211_vif *vif,
441+ struct ieee80211_sta *sta,
442+ struct net_device_path_ctx *ctx,
443+ struct net_device_path *path)
444+{
445+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
446+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
447+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
448+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
449+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
450+
451+ if(phy != &dev->phy && phy->mt76->band_idx == MT_BAND2)
452+ wed = &dev->mt76.mmio.wed_ext;
453+
454+ if (!mtk_wed_device_active(wed))
455+ return -ENODEV;
456+
457+ if (msta->wcid.idx > MT7996_WTBL_STA)
458+ return -EIO;
459+
460+ path->type = DEV_PATH_MTK_WDMA;
461+ path->dev = ctx->dev;
462+ path->mtk_wdma.wdma_idx = wed->wdma_idx;
463+ path->mtk_wdma.bss = mvif->mt76.idx;
464+ path->mtk_wdma.queue = 0;
465+ path->mtk_wdma.wcid = msta->wcid.idx;
466+
467+ /* pao info */
468+ if (mtk_wed_device_support_pao(wed)) {
469+ path->mtk_wdma.amsdu_en = 1;
470+ path->mtk_wdma.is_sp = 0;
471+ path->mtk_wdma.is_fixedrate = 0;
472+ }
473+ ctx->dev = NULL;
474+
475+ return 0;
476+}
477+
478+#endif
479+
480 const struct ieee80211_ops mt7996_ops = {
481 .tx = mt7996_tx,
482 .start = mt7996_start,
483@@ -1451,4 +1494,7 @@ const struct ieee80211_ops mt7996_ops = {
484 .sta_add_debugfs = mt7996_sta_add_debugfs,
485 #endif
486 .set_radar_background = mt7996_set_radar_background,
487+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
488+ .net_fill_forward_path = mt7996_net_fill_forward_path,
489+#endif
490 };
491diff --git a/mt7996/mmio.c b/mt7996/mmio.c
492index 3a591a7b..b9e47e73 100644
493--- a/mt7996/mmio.c
494+++ b/mt7996/mmio.c
495@@ -10,6 +10,11 @@
496 #include "mt7996.h"
497 #include "mac.h"
498 #include "../trace.h"
499+#include "../dma.h"
500+
501+
502+static bool wed_enable = true;
503+module_param(wed_enable, bool, 0644);
504
505 static const struct __base mt7996_reg_base[] = {
506 [WF_AGG_BASE] = { { 0x820e2000, 0x820f2000, 0x830e2000 } },
507@@ -191,6 +196,228 @@ static u32 mt7996_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
508 return dev->bus_ops->rmw(mdev, __mt7996_reg_addr(dev, offset), mask, val);
509 }
510
511+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
512+static void mt7996_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
513+{
514+ struct mt7996_dev *dev;
515+ struct page *page;
516+ int i;
517+
518+ dev = container_of(wed, struct mt7996_dev, mt76.mmio.wed);
519+ for (i = 0; i < dev->mt76.rx_token_size; i++) {
520+ struct mt76_rxwi_cache *r;
521+
522+ r = mt76_rx_token_release(&dev->mt76, i);
523+ if (!r || !r->ptr)
524+ continue;
525+
526+ dma_unmap_single(dev->mt76.dma_dev, r->dma_addr,
527+ wed->wlan.rx_size, DMA_FROM_DEVICE);
528+ skb_free_frag(r->ptr);
529+ r->ptr = NULL;
530+
531+ mt76_put_rxwi(&dev->mt76, r);
532+ }
533+
534+ mt76_free_pending_rxwi(&dev->mt76);
535+
536+ mt76_for_each_q_rx(&dev->mt76, i) {
537+ struct mt76_queue *q = &dev->mt76.q_rx[i];
538+
539+ if (mt76_queue_is_wed_rx(q)) {
540+ if (!q->rx_page.va)
541+ continue;
542+
543+ page = virt_to_page(q->rx_page.va);
544+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
545+ memset(&q->rx_page, 0, sizeof(q->rx_page));
546+ }
547+ }
548+
549+ if (!wed->rx_buf_ring.rx_page.va)
550+ return;
551+
552+ page = virt_to_page(wed->rx_buf_ring.rx_page.va);
553+ __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
554+ memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
555+
556+}
557+
558+static u32 mt7996_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
559+{
560+ struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
561+ struct mt7996_dev *dev;
562+ u32 length;
563+ int i;
564+
565+ dev = container_of(wed, struct mt7996_dev, mt76.mmio.wed);
566+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
567+ sizeof(struct skb_shared_info));
568+
569+ for (i = 0; i < size; i++) {
570+ struct mt76_rxwi_cache *r = mt76_get_rxwi(&dev->mt76);
571+ dma_addr_t phy_addr;
572+ int token;
573+ void *ptr;
574+
575+ ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length,
576+ GFP_KERNEL);
577+ if (!ptr) {
578+ mt76_put_rxwi(&dev->mt76, r);
579+ goto unmap;
580+ }
581+
582+ phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
583+ wed->wlan.rx_size,
584+ DMA_TO_DEVICE);
585+ if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
586+ skb_free_frag(ptr);
587+ mt76_put_rxwi(&dev->mt76, r);
588+ goto unmap;
589+ }
590+
591+ desc->buf0 = cpu_to_le32(phy_addr);
592+ token = mt76_rx_token_consume(&dev->mt76, ptr, r, phy_addr);
593+ if (token < 0) {
594+ dma_unmap_single(dev->mt76.dma_dev, phy_addr,
595+ wed->wlan.rx_size, DMA_TO_DEVICE);
596+ skb_free_frag(ptr);
597+ mt76_put_rxwi(&dev->mt76, r);
598+ goto unmap;
599+ }
600+
601+ desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
602+ token));
603+ desc++;
604+ }
605+
606+ return 0;
607+
608+unmap:
609+ mt7996_mmio_wed_release_rx_buf(wed);
610+ return -ENOMEM;
611+}
612+#endif
613+
614+int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
615+ bool hif2, int *irq)
616+{
617+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
618+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
619+ struct pci_dev *pci_dev = pdev_ptr;
620+ u32 hif1_ofs = 0;
621+ int ret;
622+
623+ if (!wed_enable)
624+ return 0;
625+
626+ dev->rro_support = true;
627+
628+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
629+
630+ if (hif2)
631+ wed = &dev->mt76.mmio.wed_ext;
632+
633+ wed->wlan.pci_dev = pci_dev;
634+ wed->wlan.bus_type = MTK_WED_BUS_PCIE;
635+
636+ wed->wlan.base = devm_ioremap(dev->mt76.dev,
637+ pci_resource_start(pci_dev, 0),
638+ pci_resource_len(pci_dev, 0));
639+ wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
640+
641+ if (hif2) {
642+ wed->wlan.wpdma_int = wed->wlan.phy_base +
643+ MT_INT_PCIE1_SOURCE_CSR_EXT;
644+ wed->wlan.wpdma_mask = wed->wlan.phy_base +
645+ MT_INT_PCIE1_MASK_CSR;
646+ wed->wlan.wpdma_tx = wed->wlan.phy_base + hif1_ofs +
647+ MT_TXQ_RING_BASE(0) +
648+ MT7996_TXQ_BAND2 * MT_RING_SIZE;
649+ if (dev->rro_support) {
650+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
651+ MT_RXQ_RING_BASE(0) +
652+ MT7996_RXQ_TXFREE2 * MT_RING_SIZE;
653+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_EXT) - 1;
654+ } else {
655+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
656+ MT_RXQ_RING_BASE(0) +
657+ MT7996_RXQ_MCU_WA_TRI * MT_RING_SIZE;
658+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_TRI) - 1;
659+ }
660+
661+ wed->wlan.chip_id = 0x7991;
662+ wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1;
663+ } else {
664+ wed->wlan.wpdma_int = wed->wlan.phy_base + MT_INT_SOURCE_CSR;
665+ wed->wlan.wpdma_mask = wed->wlan.phy_base + MT_INT_MASK_CSR;
666+ wed->wlan.wpdma_tx = wed->wlan.phy_base + MT_TXQ_RING_BASE(0) +
667+ MT7996_TXQ_BAND0 * MT_RING_SIZE;
668+
669+ wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + MT_WFDMA0_GLO_CFG;
670+
671+ wed->wlan.wpdma_rx = wed->wlan.phy_base +
672+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
673+ MT7996_RXQ_BAND0 * MT_RING_SIZE;
674+
675+ wed->wlan.rx_nbuf = 65536;
676+ wed->wlan.rx_npkt = 24576;
677+ wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
678+
679+ wed->wlan.rx_tbit[0] = ffs(MT_INT_RX_DONE_BAND0) - 1;
680+ wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1;
681+
682+ wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND0) - 1;
683+ wed->wlan.tx_tbit[1] = ffs(MT_INT_TX_DONE_BAND1) - 1;
684+ if (dev->rro_support) {
685+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
686+ MT7996_RXQ_TXFREE0 * MT_RING_SIZE;
687+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_MAIN) - 1;
688+ } else {
689+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_MAIN) - 1;
690+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
691+ MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE;
692+ }
693+ }
694+
695+ wed->wlan.nbuf = 16384;
696+
697+ wed->wlan.token_start = 0;
698+
699+ wed->wlan.max_amsdu_nums = 8;
700+ wed->wlan.max_amsdu_len = 1536;
701+
702+ wed->wlan.init_buf = mt7996_wed_init_buf;
703+ wed->wlan.offload_enable = NULL;
704+ wed->wlan.offload_disable = NULL;
705+ wed->wlan.init_rx_buf = mt7996_mmio_wed_init_rx_buf;
706+ wed->wlan.release_rx_buf = mt7996_mmio_wed_release_rx_buf;
707+ wed->wlan.update_wo_rx_stats = NULL;
708+
709+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
710+
711+ if (mtk_wed_device_attach(wed))
712+ return 0;
713+
714+ *irq = wed->irq;
715+ dev->mt76.dma_dev = wed->dev;
716+
717+ dev->mt76.token_size = 1024;
718+
719+ ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
720+ if (ret)
721+ return ret;
722+
723+ ret = dma_set_coherent_mask(wed->dev, DMA_BIT_MASK(32));
724+ if (ret)
725+ return ret;
726+
727+ return 1;
728+#else
729+ return 0;
730+#endif
731+}
732+
733 static int mt7996_mmio_init(struct mt76_dev *mdev,
734 void __iomem *mem_base,
735 u32 device_id)
736@@ -241,8 +468,17 @@ void mt7996_dual_hif_set_irq_mask(struct mt7996_dev *dev, bool write_reg,
737 mdev->mmio.irqmask |= set;
738
739 if (write_reg) {
740- mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
741- mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
742+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
743+ mtk_wed_device_irq_set_mask(&mdev->mmio.wed,
744+ mdev->mmio.irqmask);
745+ if (mtk_wed_device_active(&mdev->mmio.wed_ext)) {
746+ mtk_wed_device_irq_set_mask(&mdev->mmio.wed_ext,
747+ mdev->mmio.irqmask);
748+ }
749+ } else {
750+ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
751+ mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
752+ }
753 }
754
755 spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
756@@ -260,22 +496,36 @@ static void mt7996_rx_poll_complete(struct mt76_dev *mdev,
757 static void mt7996_irq_tasklet(struct tasklet_struct *t)
758 {
759 struct mt7996_dev *dev = from_tasklet(dev, t, mt76.irq_tasklet);
760+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
761+ struct mtk_wed_device *wed_ext = &dev->mt76.mmio.wed_ext;
762 u32 i, intr, mask, intr1;
763
764- mt76_wr(dev, MT_INT_MASK_CSR, 0);
765- if (dev->hif2)
766- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
767-
768- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
769- intr &= dev->mt76.mmio.irqmask;
770- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
771-
772- if (dev->hif2) {
773- intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
774- intr1 &= dev->mt76.mmio.irqmask;
775- mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
776+ if (dev->hif2 && mtk_wed_device_active(wed_ext)) {
777+ mtk_wed_device_irq_set_mask(wed_ext, 0);
778+ intr1 = mtk_wed_device_irq_get(wed_ext,
779+ dev->mt76.mmio.irqmask);
780+ if (intr1 & MT_INT_RX_TXFREE_EXT)
781+ napi_schedule(&dev->mt76.napi[MT_RXQ_TXFREE_BAND2]);
782+ }
783
784- intr |= intr1;
785+ if (mtk_wed_device_active(wed)) {
786+ mtk_wed_device_irq_set_mask(wed, 0);
787+ intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
788+ intr |= (intr1 & ~MT_INT_RX_TXFREE_EXT);
789+ } else {
790+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
791+ if (dev->hif2)
792+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
793+
794+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
795+ intr &= dev->mt76.mmio.irqmask;
796+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
797+ if (dev->hif2) {
798+ intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
799+ intr1 &= dev->mt76.mmio.irqmask;
800+ mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
801+ intr |= intr1;
802+ }
803 }
804
805 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
806@@ -307,10 +557,19 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t)
807 irqreturn_t mt7996_irq_handler(int irq, void *dev_instance)
808 {
809 struct mt7996_dev *dev = dev_instance;
810+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
811
812- mt76_wr(dev, MT_INT_MASK_CSR, 0);
813- if (dev->hif2)
814- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
815+ if (mtk_wed_device_active(wed))
816+ mtk_wed_device_irq_set_mask(wed, 0);
817+ else
818+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
819+
820+ if (dev->hif2) {
821+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext))
822+ mtk_wed_device_irq_set_mask(&dev->mt76.mmio.wed_ext, 0);
823+ else
824+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
825+ }
826
827 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
828 return IRQ_NONE;
829diff --git a/mt7996/mt7996.h b/mt7996/mt7996.h
830index e371964b..43f20da4 100644
831--- a/mt7996/mt7996.h
832+++ b/mt7996/mt7996.h
833@@ -544,7 +544,9 @@ int mt7996_dma_init(struct mt7996_dev *dev);
834 void mt7996_dma_reset(struct mt7996_dev *dev, bool force);
835 void mt7996_dma_prefetch(struct mt7996_dev *dev);
836 void mt7996_dma_cleanup(struct mt7996_dev *dev);
837-void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset);
838+int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx,
839+ int n_desc, int ring_base, struct mtk_wed_device *wed);
840+void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset);
841 void mt7996_init_txpower(struct mt7996_dev *dev,
842 struct ieee80211_supported_band *sband);
843 int mt7996_txbf_init(struct mt7996_dev *dev);
844@@ -732,7 +734,9 @@ int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
845 void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
846 struct ieee80211_sta *sta, struct dentry *dir);
847 #endif
848-
849+int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
850+ bool hif2, int *irq);
851+u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
852 #ifdef CONFIG_MTK_VENDOR
853 void mt7996_set_wireless_vif(void *data, u8 *mac, struct ieee80211_vif *vif);
854 void mt7996_vendor_register(struct mt7996_phy *phy);
855diff --git a/mt7996/pci.c b/mt7996/pci.c
856index c5301050..869f32ac 100644
857--- a/mt7996/pci.c
858+++ b/mt7996/pci.c
859@@ -125,15 +125,26 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
860 mt7996_wfsys_reset(dev);
861 hif2 = mt7996_pci_init_hif2(pdev);
862
863- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
864+ ret = mt7996_mmio_wed_init(dev, pdev, false, &irq);
865 if (ret < 0)
866- goto free_device;
867+ goto free_wed_or_irq_vector;
868
869- irq = pdev->irq;
870- ret = devm_request_irq(mdev->dev, irq, mt7996_irq_handler,
871+ if (!ret) {
872+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
873+ if (ret < 0)
874+ goto free_device;
875+ }
876+ ret = devm_request_irq(mdev->dev, pdev->irq, mt7996_irq_handler,
877 IRQF_SHARED, KBUILD_MODNAME, dev);
878 if (ret)
879- goto free_irq_vector;
880+ goto free_wed_or_irq_vector;
881+
882+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
883+ ret = devm_request_irq(mdev->dev, irq, mt7996_irq_handler,
884+ IRQF_SHARED, KBUILD_MODNAME "-wed", dev);
885+ if (ret)
886+ goto free_irq;
887+ }
888
889 mt76_wr(dev, MT_INT_MASK_CSR, 0);
890 /* master switch of PCIe tnterrupt enable */
891@@ -143,16 +154,30 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
892 hif2_dev = container_of(hif2->dev, struct pci_dev, dev);
893 dev->hif2 = hif2;
894
895- ret = pci_alloc_irq_vectors(hif2_dev, 1, 1, PCI_IRQ_ALL_TYPES);
896+ ret = mt7996_mmio_wed_init(dev, hif2_dev, true, &irq);
897 if (ret < 0)
898- goto free_hif2;
899+ goto free_irq;
900+
901+ if (!ret) {
902+ ret = pci_alloc_irq_vectors(hif2_dev, 1, 1, PCI_IRQ_ALL_TYPES);
903+ if (ret < 0)
904+ goto free_hif2;
905
906- dev->hif2->irq = hif2_dev->irq;
907- ret = devm_request_irq(mdev->dev, dev->hif2->irq,
908- mt7996_irq_handler, IRQF_SHARED,
909- KBUILD_MODNAME "-hif", dev);
910+ dev->hif2->irq = hif2_dev->irq;
911+ }
912+
913+ ret = devm_request_irq(mdev->dev, hif2_dev->irq, mt7996_irq_handler,
914+ IRQF_SHARED, KBUILD_MODNAME "-hif", dev);
915 if (ret)
916- goto free_hif2_irq_vector;
917+ goto free_hif2;
918+
919+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext)) {
920+ ret = devm_request_irq(mdev->dev, irq,
921+ mt7996_irq_handler, IRQF_SHARED,
922+ KBUILD_MODNAME "-wed-hif", dev);
923+ if (ret)
924+ goto free_hif2_irq_vector;
925+ }
926
927 mt76_wr(dev, MT_INT1_MASK_CSR, 0);
928 /* master switch of PCIe tnterrupt enable */
929@@ -168,15 +193,28 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
930 free_hif2_irq:
931 if (dev->hif2)
932 devm_free_irq(mdev->dev, dev->hif2->irq, dev);
933+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext))
934+ devm_free_irq(mdev->dev, dev->mt76.mmio.wed_ext.irq, dev);
935 free_hif2_irq_vector:
936- if (dev->hif2)
937- pci_free_irq_vectors(hif2_dev);
938+ if (dev->hif2) {
939+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext))
940+ mtk_wed_device_detach(&dev->mt76.mmio.wed_ext);
941+ else
942+ pci_free_irq_vectors(hif2_dev);
943+ }
944 free_hif2:
945 if (dev->hif2)
946 put_device(dev->hif2->dev);
947- devm_free_irq(mdev->dev, irq, dev);
948-free_irq_vector:
949- pci_free_irq_vectors(pdev);
950+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
951+ devm_free_irq(mdev->dev, dev->mt76.mmio.wed.irq, dev);
952+free_irq:
953+ devm_free_irq(mdev->dev, pdev->irq, dev);
954+free_wed_or_irq_vector:
955+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
956+ mtk_wed_device_detach(&dev->mt76.mmio.wed);
957+ else
958+ pci_free_irq_vectors(pdev);
959+
960 free_device:
961 mt76_free_device(&dev->mt76);
962
963diff --git a/mt7996/regs.h b/mt7996/regs.h
964index 6ef905a9..04658639 100644
965--- a/mt7996/regs.h
966+++ b/mt7996/regs.h
967@@ -323,6 +323,7 @@ enum base_rev {
968
969 #define MT_WFDMA0_RX_INT_PCIE_SEL MT_WFDMA0(0x154)
970 #define MT_WFDMA0_RX_INT_SEL_RING3 BIT(3)
971+#define MT_WFDMA0_RX_INT_SEL_RING6 BIT(6)
972
973 #define MT_WFDMA0_MCU_HOST_INT_ENA MT_WFDMA0(0x1f4)
974
975@@ -367,6 +368,9 @@ enum base_rev {
976 #define MT_WFDMA0_PCIE1_BASE 0xd8000
977 #define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
978
979+#define MT_INT_PCIE1_SOURCE_CSR_EXT MT_WFDMA0_PCIE1(0x118)
980+#define MT_INT_PCIE1_MASK_CSR MT_WFDMA0_PCIE1(0x11c)
981+
982 #define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
983 #define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
984 #define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
985@@ -412,6 +416,7 @@ enum base_rev {
986 #define MT_INT_RX_TXFREE_MAIN BIT(17)
987 #define MT_INT_RX_TXFREE_TRI BIT(15)
988 #define MT_INT_MCU_CMD BIT(29)
989+#define MT_INT_RX_TXFREE_EXT BIT(26)
990
991 #define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
992 #define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
993--
9942.39.2
995