blob: 0d2134b2da0e75f880963fde5946e8ce2facdf68 [file] [log] [blame]
developer064da3c2023-06-13 15:57:26 +08001From 5df084a32eac68dd66a3b833cf5f718118850b08 Mon Sep 17 00:00:00 2001
2From: mtk27745 <rex.lu@mediatek.com>
3Date: Tue, 23 May 2023 12:06:29 +0800
4Subject: [PATCH 2008/2008] wifi: mt76: add SER support for wed3.0
5
6Change-Id: I2711b9dc336fca9a1ae32a8fbf27810a7e27b1e3
7---
8 dma.c | 7 +++++--
9 mt7996/dma.c | 48 +++++++++++++++++++++++++++++++++++++++++++++---
10 mt7996/mac.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++++-
11 mt7996/mmio.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++
12 4 files changed, 145 insertions(+), 6 deletions(-)
13
14diff --git a/dma.c b/dma.c
15index e5b4d898..e31f6390 100644
16--- a/dma.c
17+++ b/dma.c
18@@ -770,8 +770,9 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
19 q->head = q->ndesc - 1;
20 q->queued = q->ndesc - 1;
21 }
22+ q->flags = flags;
23 } else {
24- ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, 0);
25+ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
26 if (!ret)
27 q->wed_regs = wed->rx_ring[ring].reg_base;
28 }
29@@ -902,7 +903,9 @@ done:
30
31 /* reset WED rx queues */
32 mt76_dma_wed_setup(dev, q, true);
33- if (q->flags != MT_WED_Q_TXFREE) {
34+ if (q->flags != MT_WED_Q_TXFREE &&
35+ !((q->flags & MT_QFLAG_RRO) &&
36+ mtk_wed_device_active(&dev->mmio.wed))) {
37 mt76_dma_sync_idx(dev, q);
38 mt76_dma_rx_fill(dev, q);
39 }
40diff --git a/mt7996/dma.c b/mt7996/dma.c
41index c5c7f160..471ae81c 100644
42--- a/mt7996/dma.c
43+++ b/mt7996/dma.c
44@@ -495,6 +495,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
45 if (mt7996_band_valid(dev, MT_BAND2)) {
46 /* rx data queue for band2 */
47 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs;
48+ if (mtk_wed_device_active(wed)) {
49+ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2);
50+ if (mtk_wed_get_rx_capa(wed))
51+ dev->mt76.q_rx[MT_RXQ_BAND2].flags = MT_WED_Q_RX(1);
52+ }
53+
54 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
55 MT_RXQ_ID(MT_RXQ_BAND2),
56 MT7996_RX_RING_SIZE,
57@@ -582,11 +588,35 @@ int mt7996_dma_init(struct mt7996_dev *dev)
58 return 0;
59 }
60
61+static void mt7996_dma_wed_reset(struct mt7996_dev *dev)
62+{
63+ struct mt76_dev *mdev = &dev->mt76;
64+
65+ if (!test_bit(MT76_STATE_WED_RESET, &dev->mphy.state))
66+ return;
67+
68+ complete(&mdev->mmio.wed_reset);
69+
70+ if (!wait_for_completion_timeout(&dev->mt76.mmio.wed_reset_complete,
71+ 3 * HZ))
72+ dev_err(dev->mt76.dev, "wed reset complete timeout\n");
73+}
74+
75+static void
76+mt7996_dma_reset_tx_queue(struct mt7996_dev *dev, struct mt76_queue *q)
77+{
78+ mt76_queue_reset(dev, q, false);
79+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
80+ mt76_dma_wed_setup(&dev->mt76, q, true);
81+}
82+
83 void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
84 {
85 struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1];
86 struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2];
87 u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
88+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
89+ struct mtk_wed_device *wed_ext = &dev->mt76.mmio.wed_ext;
90 int i;
91
92 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
93@@ -620,21 +650,33 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
94 if (force)
95 mt7996_wfsys_reset(dev);
96
97+ if (dev->hif2 && mtk_wed_device_active(wed_ext))
98+ mtk_wed_device_dma_reset(wed_ext);
99+
100+ if (mtk_wed_device_active(wed))
101+ mtk_wed_device_dma_reset(wed);
102+
103 mt7996_dma_disable(dev, force);
104+ mt7996_dma_wed_reset(dev);
105
106 /* reset hw queues */
107 for (i = 0; i < __MT_TXQ_MAX; i++) {
108- mt76_queue_reset(dev, dev->mphy.q_tx[i], false);
109+ mt7996_dma_reset_tx_queue(dev, dev->mphy.q_tx[i]);
110 if (phy2)
111- mt76_queue_reset(dev, phy2->q_tx[i], false);
112+ mt7996_dma_reset_tx_queue(dev, phy2->q_tx[i]);
113 if (phy3)
114- mt76_queue_reset(dev, phy3->q_tx[i], false);
115+ mt7996_dma_reset_tx_queue(dev, phy3->q_tx[i]);
116 }
117
118 for (i = 0; i < __MT_MCUQ_MAX; i++)
119 mt76_queue_reset(dev, dev->mt76.q_mcu[i], false);
120
121 mt76_for_each_q_rx(&dev->mt76, i) {
122+ if (mtk_wed_device_active(wed) &&
123+ ((dev->mt76.q_rx[i].flags & MT_QFLAG_RRO) ||
124+ dev->mt76.q_rx[i].flags == MT_WED_Q_TXFREE))
125+ continue;
126+
127 mt76_queue_reset(dev, &dev->mt76.q_rx[i], false);
128 }
129
130diff --git a/mt7996/mac.c b/mt7996/mac.c
131index 3a89013c..d1082e89 100644
132--- a/mt7996/mac.c
133+++ b/mt7996/mac.c
134@@ -2002,6 +2002,10 @@ mt7996_mac_restart(struct mt7996_dev *dev)
135 /* disable all tx/rx napi */
136 mt76_worker_disable(&dev->mt76.tx_worker);
137 mt76_for_each_q_rx(mdev, i) {
138+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
139+ (mdev->q_rx[i].flags & MT_QFLAG_RRO))
140+ continue;
141+
142 if (mdev->q_rx[i].ndesc)
143 napi_disable(&dev->mt76.napi[i]);
144 }
145@@ -2015,6 +2019,10 @@ mt7996_mac_restart(struct mt7996_dev *dev)
146
147 local_bh_disable();
148 mt76_for_each_q_rx(mdev, i) {
149+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
150+ (mdev->q_rx[i].flags & MT_QFLAG_RRO))
151+ continue;
152+
153 if (mdev->q_rx[i].ndesc) {
154 napi_enable(&dev->mt76.napi[i]);
155 napi_schedule(&dev->mt76.napi[i]);
156@@ -2189,6 +2197,13 @@ void mt7996_mac_reset_work(struct work_struct *work)
157
158 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
159 wiphy_name(dev->mt76.hw->wiphy));
160+
161+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext))
162+ mtk_wed_device_stop(&dev->mt76.mmio.wed_ext);
163+
164+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
165+ mtk_wed_device_stop(&dev->mt76.mmio.wed);
166+
167 ieee80211_stop_queues(mt76_hw(dev));
168 if (phy2)
169 ieee80211_stop_queues(phy2->mt76->hw);
170@@ -2212,8 +2227,13 @@ void mt7996_mac_reset_work(struct work_struct *work)
171 cancel_delayed_work_sync(&phy3->mt76->mac_work);
172 }
173 mt76_worker_disable(&dev->mt76.tx_worker);
174- mt76_for_each_q_rx(&dev->mt76, i)
175+ mt76_for_each_q_rx(&dev->mt76, i) {
176+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
177+ (dev->mt76.q_rx[i].flags & MT_QFLAG_RRO))
178+ continue;
179+
180 napi_disable(&dev->mt76.napi[i]);
181+ }
182 napi_disable(&dev->mt76.tx_napi);
183
184 mutex_lock(&dev->mt76.mutex);
185@@ -2236,6 +2256,29 @@ void mt7996_mac_reset_work(struct work_struct *work)
186 /* enable dma tx/rx and interrupt */
187 __mt7996_dma_enable(dev, false, false);
188
189+
190+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
191+ u32 wed_irq_mask = dev->mt76.mmio.irqmask |
192+ MT_INT_RRO_RX_DONE |
193+ MT_INT_TX_DONE_BAND2;
194+
195+ if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
196+ wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND;
197+
198+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
199+
200+ mtk_wed_device_start_hwrro(&dev->mt76.mmio.wed, wed_irq_mask, true);
201+ mt7996_irq_enable(dev, wed_irq_mask);
202+ mt7996_irq_disable(dev, 0);
203+ }
204+
205+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext)) {
206+ mt76_wr(dev, MT_INT1_MASK_CSR,
207+ dev->mt76.mmio.irqmask | MT_INT_TX_DONE_BAND2);
208+ mtk_wed_device_start(&dev->mt76.mmio.wed_ext,
209+ dev->mt76.mmio.irqmask | MT_INT_TX_DONE_BAND2);
210+ }
211+
212 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
213 clear_bit(MT76_RESET, &dev->mphy.state);
214 if (phy2)
215@@ -2245,6 +2288,10 @@ void mt7996_mac_reset_work(struct work_struct *work)
216
217 local_bh_disable();
218 mt76_for_each_q_rx(&dev->mt76, i) {
219+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
220+ ((dev->mt76.q_rx[i].flags & MT_QFLAG_RRO)))
221+ continue;
222+
223 napi_enable(&dev->mt76.napi[i]);
224 napi_schedule(&dev->mt76.napi[i]);
225 }
226diff --git a/mt7996/mmio.c b/mt7996/mmio.c
227index 9960dca7..fe34bb7d 100644
228--- a/mt7996/mmio.c
229+++ b/mt7996/mmio.c
230@@ -6,9 +6,11 @@
231 #include <linux/kernel.h>
232 #include <linux/module.h>
233 #include <linux/pci.h>
234+#include <linux/rtnetlink.h>
235
236 #include "mt7996.h"
237 #include "mac.h"
238+#include "mcu.h"
239 #include "../trace.h"
240 #include "../dma.h"
241
242@@ -297,6 +299,43 @@ unmap:
243 mt7996_mmio_wed_release_rx_buf(wed);
244 return -ENOMEM;
245 }
246+
247+static int mt7996_mmio_wed_reset(struct mtk_wed_device *wed)
248+{
249+ struct mt76_dev *mdev = container_of(wed, struct mt76_dev, mmio.wed);
250+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
251+ struct mt76_phy *mphy = &dev->mphy;
252+ int ret;
253+
254+ ASSERT_RTNL();
255+
256+ if (test_and_set_bit(MT76_STATE_WED_RESET, &mphy->state))
257+ return -EBUSY;
258+
259+ ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_L1,
260+ mphy->band_idx);
261+ if (ret)
262+ goto out;
263+
264+ rtnl_unlock();
265+ if (!wait_for_completion_timeout(&mdev->mmio.wed_reset, 20 * HZ)) {
266+ dev_err(mdev->dev, "wed reset timeout\n");
267+ ret = -ETIMEDOUT;
268+ }
269+ rtnl_lock();
270+out:
271+ clear_bit(MT76_STATE_WED_RESET, &mphy->state);
272+
273+ return ret;
274+}
275+
276+static void mt7996_mmio_wed_reset_complete(struct mtk_wed_device *wed)
277+{
278+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
279+
280+ complete(&dev->mmio.wed_reset_complete);
281+}
282+
283 #endif
284
285 int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
286@@ -421,6 +460,14 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
287 wed->wlan.init_rx_buf = mt7996_mmio_wed_init_rx_buf;
288 wed->wlan.release_rx_buf = mt7996_mmio_wed_release_rx_buf;
289 wed->wlan.update_wo_rx_stats = NULL;
290+ if (hif2) {
291+ wed->wlan.reset = NULL;
292+ wed->wlan.reset_complete = NULL;
293+ } else {
294+ wed->wlan.reset = mt7996_mmio_wed_reset;
295+ wed->wlan.reset_complete = mt7996_mmio_wed_reset_complete;
296+ }
297+
298
299 if (mtk_wed_device_attach(wed))
300 return 0;
301--
3022.39.2
303