blob: 73feb2640da1e23e5f8491741a7fa942378cf7d8 [file] [log] [blame]
developer6a1998b2022-12-08 18:09:45 +08001From 37113fa0b112098fc65ace049e984438629a6c4f Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Fri, 25 Nov 2022 14:07:46 +0800
4Subject: [PATCH 3005/3010] mt76: mt7915: wed: add ser support when wed on
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 dma.c | 29 ++++++++++++++++++++---------
9 dma.h | 1 +
10 mt76.h | 1 +
11 mt7915/dma.c | 36 +++++++++++++++++++++++++++++++-----
12 mt7915/mac.c | 18 ++++++++++++++++++
13 mt7915/mmio.c | 3 +++
14 mt7915/mt7915.h | 1 +
15 7 files changed, 75 insertions(+), 14 deletions(-)
16
17diff --git a/dma.c b/dma.c
18index 40885754..87ce79cb 100644
19--- a/dma.c
20+++ b/dma.c
21@@ -165,7 +165,7 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
22 local_bh_enable();
23 }
24
25-static void
26+void
27 mt76_free_pending_rxwi(struct mt76_dev *dev)
28 {
29 struct mt76_txwi_cache *t;
30@@ -178,6 +178,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
31 }
32 local_bh_enable();
33 }
34+EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
35
36 static void
37 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
38@@ -623,14 +624,18 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
39 return frames;
40 }
41
42-static int
43-mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
44+int
45+mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
46 {
47 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
48 struct mtk_wed_device *wed = &dev->mmio.wed;
49 int ret, type, ring;
50- u8 flags = q->flags;
51+ u8 flags;
52
53+ if (!q || !q->ndesc)
54+ return -EINVAL;
55+
56+ flags = q->flags;
57 if (!mtk_wed_device_active(wed))
58 q->flags &= ~MT_QFLAG_WED;
59
60@@ -642,7 +647,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
61
62 switch (type) {
63 case MT76_WED_Q_TX:
64- ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
65+ ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset);
66 if (!ret)
67 q->wed_regs = wed->tx_ring[ring].reg_base;
68 break;
69@@ -658,7 +663,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
70 q->wed_regs = wed->txfree_ring.reg_base;
71 break;
72 case MT76_WED_Q_RX:
73- ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs);
74+ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
75 if (!ret)
76 q->wed_regs = wed->rx_ring[ring].reg_base;
77 break;
78@@ -671,6 +676,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
79 return 0;
80 #endif
81 }
82+EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
83
84 static int
85 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
86@@ -697,7 +703,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
87 if (!q->entry)
88 return -ENOMEM;
89
90- ret = mt76_dma_wed_setup(dev, q);
91+ ret = mt76_dma_wed_setup(dev, q, false);
92 if (ret)
93 return ret;
94
95@@ -748,8 +754,13 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
96 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
97
98 mt76_dma_rx_cleanup(dev, q);
99- mt76_dma_sync_idx(dev, q);
100- mt76_dma_rx_fill(dev, q);
101+
102+ mt76_dma_wed_setup(dev, q, true);
103+
104+ if (q->flags != MT_WED_Q_TXFREE) {
105+ mt76_dma_sync_idx(dev, q);
106+ mt76_dma_rx_fill(dev, q);
107+ }
108
109 if (!q->rx_head)
110 return;
111diff --git a/dma.h b/dma.h
112index 53c6ce25..4b9bc7f4 100644
113--- a/dma.h
114+++ b/dma.h
115@@ -56,5 +56,6 @@ enum mt76_mcu_evt_type {
116 int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
117 void mt76_dma_attach(struct mt76_dev *dev);
118 void mt76_dma_cleanup(struct mt76_dev *dev);
119+int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
120
121 #endif
122diff --git a/mt76.h b/mt76.h
123index bb0433b2..cca8986f 100644
124--- a/mt76.h
125+++ b/mt76.h
126@@ -1380,6 +1380,7 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
127 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
128 void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
129 struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
130+void mt76_free_pending_rxwi(struct mt76_dev *dev);
131 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
132 struct napi_struct *napi);
133 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
134diff --git a/mt7915/dma.c b/mt7915/dma.c
135index 27b67800..03563919 100644
136--- a/mt7915/dma.c
137+++ b/mt7915/dma.c
138@@ -562,6 +562,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
139 int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
140 {
141 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
142+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
143 int i;
144
145 /* clean up hw queues */
146@@ -581,28 +582,53 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
147 if (force)
148 mt7915_wfsys_reset(dev);
149
150+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
151+ mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
152 mt7915_dma_disable(dev, force);
153
154+ /* set wifi reset done, wait FE reset */
155+ if (mtk_wed_device_active(wed) && atomic_read(&wed->fe_reset)) {
156+ atomic_set(&wed->fe_reset, 0);
157+ rtnl_lock();
158+ complete(&wed->wlan_reset_done);
159+ rtnl_unlock();
160+ wait_for_completion(&wed->fe_reset_done);
161+ }
162+
163 /* reset hw queues */
164 for (i = 0; i < __MT_TXQ_MAX; i++) {
165 mt76_queue_reset(dev, dev->mphy.q_tx[i]);
166- if (mphy_ext)
167+ if (mphy_ext) {
168 mt76_queue_reset(dev, mphy_ext->q_tx[i]);
169+ if (mtk_wed_device_active(wed))
170+ mt76_dma_wed_setup(&dev->mt76,
171+ mphy_ext->q_tx[i],
172+ true);
173+ }
174+ if (mtk_wed_device_active(wed))
175+ mt76_dma_wed_setup(&dev->mt76, dev->mphy.q_tx[i],
176+ true);
177 }
178
179 for (i = 0; i < __MT_MCUQ_MAX; i++)
180 mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
181
182- mt76_for_each_q_rx(&dev->mt76, i)
183- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
184+ mt76_for_each_q_rx(&dev->mt76, i) {
185+ if (dev->mt76.q_rx[i].flags != MT_WED_Q_TXFREE)
186+ mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
187+ }
188
189 mt76_tx_status_check(&dev->mt76, true);
190
191- mt7915_dma_enable(dev);
192-
193 mt76_for_each_q_rx(&dev->mt76, i)
194 mt76_queue_rx_reset(dev, i);
195
196+ if(mtk_wed_device_active(wed) && is_mt7915(&dev->mt76))
197+ mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
198+ MT_WFDMA0_EXT0_RXWB_KEEP);
199+
200+ mt7915_dma_enable(dev);
201+
202 return 0;
203 }
204
205diff --git a/mt7915/mac.c b/mt7915/mac.c
206index d07bf790..f72e2bc2 100644
207--- a/mt7915/mac.c
208+++ b/mt7915/mac.c
209@@ -895,6 +895,18 @@ u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
210 return MT_TXD_TXP_BUF_SIZE;
211 }
212
213+void mt7915_wed_trigger_ser(struct mtk_wed_device *wed)
214+{
215+ struct mt7915_dev *dev;
216+ u8 band_idx;
217+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
218+ band_idx = dev->phy.band_idx;
219+
220+ mt7915_mcu_set_ser(dev, SER_RECOVER, 1, band_idx);
221+
222+ return;
223+}
224+
225 static void
226 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
227 {
228@@ -1633,6 +1645,12 @@ void mt7915_mac_reset_work(struct work_struct *work)
229 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
230 return;
231
232+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
233+ mtk_wed_device_stop(&dev->mt76.mmio.wed, true);
234+ if (!is_mt7986(&dev->mt76))
235+ mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
236+ }
237+
238 ieee80211_stop_queues(mt76_hw(dev));
239 if (ext_phy)
240 ieee80211_stop_queues(ext_phy->hw);
241diff --git a/mt7915/mmio.c b/mt7915/mmio.c
242index f348a779..f5dfee37 100644
243--- a/mt7915/mmio.c
244+++ b/mt7915/mmio.c
245@@ -617,6 +617,8 @@ static void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed)
246 mt76_put_rxwi(&dev->mt76, t);
247 }
248
249+ mt76_free_pending_rxwi(&dev->mt76);
250+
251 if (!wed->rx_buf_ring.rx_page.va)
252 return;
253
254@@ -775,6 +777,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
255 wed->wlan.init_rx_buf = mt7915_wed_init_rx_buf;
256 wed->wlan.release_rx_buf = mt7915_wed_release_rx_buf;
257 wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
258+ wed->wlan.ser_trigger = mt7915_wed_trigger_ser;
259
260 dev->mt76.rx_token_size = wed->wlan.rx_npkt;
261
262diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
263index 6bc33f21..ec61941e 100644
264--- a/mt7915/mt7915.h
265+++ b/mt7915/mt7915.h
266@@ -562,6 +562,7 @@ void mt7915_wfsys_reset(struct mt7915_dev *dev);
267 irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
268 u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
269 u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
270+void mt7915_wed_trigger_ser(struct mtk_wed_device *wed);
271
272 int mt7915_register_device(struct mt7915_dev *dev);
273 void mt7915_unregister_device(struct mt7915_dev *dev);
274--
2752.36.1
276