blob: 3fc96e652b8da86a3f36f1bc0e8e4c4a49462674 [file] [log] [blame]
developer047bc182022-11-16 12:20:48 +08001From cfa2b02b3cbc63ccf936d0620c36ed5a5c841cb7 Mon Sep 17 00:00:00 2001
developer63541d22022-08-02 13:00:04 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 28 Jul 2022 11:16:15 +0800
developer047bc182022-11-16 12:20:48 +08004Subject: [PATCH 3005/3011] mt76 add ser spport when wed on
developer63541d22022-08-02 13:00:04 +08005
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
developerf2548b02022-08-16 15:37:38 +08008 dma.c | 29 ++++++++++++++++++++---------
9 dma.h | 2 +-
10 mt76.h | 1 +
11 mt7915/dma.c | 36 +++++++++++++++++++++++++++++++-----
12 mt7915/mac.c | 20 ++++++++++++++++++++
13 mt7915/mmio.c | 2 ++
14 mt7915/mt7915.h | 1 +
15 7 files changed, 76 insertions(+), 15 deletions(-)
developer63541d22022-08-02 13:00:04 +080016
17diff --git a/dma.c b/dma.c
developer047bc182022-11-16 12:20:48 +080018index a8739eb4..d63b02f5 100644
developer63541d22022-08-02 13:00:04 +080019--- a/dma.c
20+++ b/dma.c
developerf2548b02022-08-16 15:37:38 +080021@@ -169,7 +169,7 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
22 local_bh_enable();
23 }
24
25-static void
26+void
27 mt76_free_pending_rxwi(struct mt76_dev *dev)
28 {
29 struct mt76_txwi_cache *r;
30@@ -183,6 +183,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
31 }
32 local_bh_enable();
33 }
34+EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
35
36 static void
37 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
38@@ -624,14 +625,18 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developer63541d22022-08-02 13:00:04 +080039 return frames;
40 }
41
42-static int
43-mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
44+int
45+mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
46 {
47 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
48 struct mtk_wed_device *wed = &dev->mmio.wed;
49 int ret, type, ring;
50- u8 flags = q->flags;
51+ u8 flags;
developerf2548b02022-08-16 15:37:38 +080052
developer63541d22022-08-02 13:00:04 +080053+ if (!q || !q->ndesc)
54+ return -EINVAL;
developerf2548b02022-08-16 15:37:38 +080055+
developer63541d22022-08-02 13:00:04 +080056+ flags = q->flags;
57 if (!mtk_wed_device_active(wed))
58 q->flags &= ~MT_QFLAG_WED;
59
developerf2548b02022-08-16 15:37:38 +080060@@ -643,7 +648,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
developer63541d22022-08-02 13:00:04 +080061
62 switch (type) {
63 case MT76_WED_Q_TX:
64- ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
65+ ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset);
66 if (!ret)
67 q->wed_regs = wed->tx_ring[ring].reg_base;
68 break;
developerf2548b02022-08-16 15:37:38 +080069@@ -659,7 +664,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
developer63541d22022-08-02 13:00:04 +080070 q->wed_regs = wed->txfree_ring.reg_base;
71 break;
72 case MT76_WED_Q_RX:
73- ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs);
74+ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
75 if (!ret)
76 q->wed_regs = wed->rx_ring[ring].reg_base;
77 break;
developerf2548b02022-08-16 15:37:38 +080078@@ -672,6 +677,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
developer63541d22022-08-02 13:00:04 +080079 return 0;
80 #endif
81 }
82+EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
83
84 static int
85 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developerf2548b02022-08-16 15:37:38 +080086@@ -704,7 +710,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developer63541d22022-08-02 13:00:04 +080087 if (!q->entry)
88 return -ENOMEM;
89
90- ret = mt76_dma_wed_setup(dev, q);
91+ ret = mt76_dma_wed_setup(dev, q, false);
92 if (ret)
93 return ret;
94
developerf2548b02022-08-16 15:37:38 +080095@@ -755,8 +761,13 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
developer63541d22022-08-02 13:00:04 +080096 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
97
98 mt76_dma_rx_cleanup(dev, q);
99- mt76_dma_sync_idx(dev, q);
100- mt76_dma_rx_fill(dev, q);
101+
102+ mt76_dma_wed_setup(dev, q, true);
103+
104+ if (q->flags != MT_WED_Q_TXFREE) {
105+ mt76_dma_sync_idx(dev, q);
106+ mt76_dma_rx_fill(dev, q);
107+ }
108
109 if (!q->rx_head)
110 return;
111diff --git a/dma.h b/dma.h
developer047bc182022-11-16 12:20:48 +0800112index 90370d12..083cbca4 100644
developer63541d22022-08-02 13:00:04 +0800113--- a/dma.h
114+++ b/dma.h
115@@ -58,5 +58,5 @@ enum mt76_mcu_evt_type {
116 int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
117 void mt76_dma_attach(struct mt76_dev *dev);
118 void mt76_dma_cleanup(struct mt76_dev *dev);
119-
120+int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
121 #endif
developerf2548b02022-08-16 15:37:38 +0800122diff --git a/mt76.h b/mt76.h
developer047bc182022-11-16 12:20:48 +0800123index 627bcbf9..f22e96e0 100644
developerf2548b02022-08-16 15:37:38 +0800124--- a/mt76.h
125+++ b/mt76.h
developer047bc182022-11-16 12:20:48 +0800126@@ -1375,6 +1375,7 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
developerf2548b02022-08-16 15:37:38 +0800127 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
128 void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
129 struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
130+void mt76_free_pending_rxwi(struct mt76_dev *dev);
131 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
132 struct napi_struct *napi);
133 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
developer63541d22022-08-02 13:00:04 +0800134diff --git a/mt7915/dma.c b/mt7915/dma.c
developer047bc182022-11-16 12:20:48 +0800135index 6f6550f5..8edfa465 100644
developer63541d22022-08-02 13:00:04 +0800136--- a/mt7915/dma.c
137+++ b/mt7915/dma.c
developer047bc182022-11-16 12:20:48 +0800138@@ -553,6 +553,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
developerf2548b02022-08-16 15:37:38 +0800139 int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
140 {
developerf7a3ca32022-09-01 14:44:55 +0800141 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
developerf2548b02022-08-16 15:37:38 +0800142+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
143 int i;
144
145 /* clean up hw queues */
developer047bc182022-11-16 12:20:48 +0800146@@ -572,28 +573,53 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
147 if (force)
developer63541d22022-08-02 13:00:04 +0800148 mt7915_wfsys_reset(dev);
149
developer63541d22022-08-02 13:00:04 +0800150+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
151+ mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
152 mt7915_dma_disable(dev, force);
153
developerf2548b02022-08-16 15:37:38 +0800154+ /* set wifi reset done, wait FE reset */
155+ if (mtk_wed_device_active(wed) && atomic_read(&wed->fe_reset)) {
156+ atomic_set(&wed->fe_reset, 0);
157+ rtnl_lock();
158+ complete(&wed->wlan_reset_done);
159+ rtnl_unlock();
160+ wait_for_completion(&wed->fe_reset_done);
161+ }
162+
developer63541d22022-08-02 13:00:04 +0800163 /* reset hw queues */
164 for (i = 0; i < __MT_TXQ_MAX; i++) {
165 mt76_queue_reset(dev, dev->mphy.q_tx[i]);
166- if (mphy_ext)
167+ if (mphy_ext) {
168 mt76_queue_reset(dev, mphy_ext->q_tx[i]);
developerf2548b02022-08-16 15:37:38 +0800169+ if (mtk_wed_device_active(wed))
developer63541d22022-08-02 13:00:04 +0800170+ mt76_dma_wed_setup(&dev->mt76,
171+ mphy_ext->q_tx[i],
172+ true);
173+ }
developerf2548b02022-08-16 15:37:38 +0800174+ if (mtk_wed_device_active(wed))
developer63541d22022-08-02 13:00:04 +0800175+ mt76_dma_wed_setup(&dev->mt76, dev->mphy.q_tx[i],
176+ true);
177 }
178
179 for (i = 0; i < __MT_MCUQ_MAX; i++)
180 mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
181
182- mt76_for_each_q_rx(&dev->mt76, i)
183- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
184+ mt76_for_each_q_rx(&dev->mt76, i) {
185+ if (dev->mt76.q_rx[i].flags != MT_WED_Q_TXFREE)
186+ mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
187+ }
188
189 mt76_tx_status_check(&dev->mt76, true);
190
191- mt7915_dma_enable(dev);
192-
193 mt76_for_each_q_rx(&dev->mt76, i)
194 mt76_queue_rx_reset(dev, i);
195
developerf2548b02022-08-16 15:37:38 +0800196+ if(mtk_wed_device_active(wed) && is_mt7915(&dev->mt76))
developer63541d22022-08-02 13:00:04 +0800197+ mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
198+ MT_WFDMA0_EXT0_RXWB_KEEP);
199+
200+ mt7915_dma_enable(dev);
201+
202 return 0;
203 }
204
205diff --git a/mt7915/mac.c b/mt7915/mac.c
developer047bc182022-11-16 12:20:48 +0800206index eac49465..cbdabea0 100644
developer63541d22022-08-02 13:00:04 +0800207--- a/mt7915/mac.c
208+++ b/mt7915/mac.c
developer047bc182022-11-16 12:20:48 +0800209@@ -948,6 +948,8 @@ void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed)
developerf2548b02022-08-16 15:37:38 +0800210 mt76_put_rxwi(&dev->mt76, rxwi);
211 }
212
213+ mt76_free_pending_rxwi(&dev->mt76);
214+
215 if (wed->rx_page.va)
216 return;
217
developer047bc182022-11-16 12:20:48 +0800218@@ -958,6 +960,18 @@ void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed)
developerf2548b02022-08-16 15:37:38 +0800219 return;
220 }
221
222+void mt7915_wed_trigger_ser(struct mtk_wed_device *wed)
223+{
224+ struct mt7915_dev *dev;
225+ u8 band_idx;
226+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
227+ band_idx = dev->phy.band_idx;
228+
229+ mt7915_mcu_set_ser(dev, SER_RECOVER, 1, band_idx);
230+
231+ return;
232+}
233+
234 static void
235 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
236 {
developer047bc182022-11-16 12:20:48 +0800237@@ -1697,6 +1711,12 @@ void mt7915_mac_reset_work(struct work_struct *work)
238 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
developer63541d22022-08-02 13:00:04 +0800239 return;
240
241+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
242+ mtk_wed_device_stop(&dev->mt76.mmio.wed, true);
243+ if (!is_mt7986(&dev->mt76))
244+ mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
245+ }
246+
247 ieee80211_stop_queues(mt76_hw(dev));
248 if (ext_phy)
249 ieee80211_stop_queues(ext_phy->hw);
developerf2548b02022-08-16 15:37:38 +0800250diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer047bc182022-11-16 12:20:48 +0800251index 1e22ec98..60b7886c 100644
developerf2548b02022-08-16 15:37:38 +0800252--- a/mt7915/mmio.c
253+++ b/mt7915/mmio.c
developer047bc182022-11-16 12:20:48 +0800254@@ -666,6 +666,8 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
developerf2548b02022-08-16 15:37:38 +0800255 wed->wlan.init_rx_buf = mt7915_wed_init_rx_buf;
256 wed->wlan.release_rx_buf = mt7915_wed_release_rx_buf;
257
258+ wed->wlan.ser_trigger = mt7915_wed_trigger_ser;
259+
260 dev->mt76.rx_token_size = wed->wlan.rx_pkt;
developerec567112022-10-11 11:02:55 +0800261 if (mtk_wed_device_attach(wed))
developerf2548b02022-08-16 15:37:38 +0800262 return 0;
263diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developer047bc182022-11-16 12:20:48 +0800264index acc345a2..55fe6343 100644
developerf2548b02022-08-16 15:37:38 +0800265--- a/mt7915/mt7915.h
266+++ b/mt7915/mt7915.h
developer047bc182022-11-16 12:20:48 +0800267@@ -563,6 +563,7 @@ u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
developerf2548b02022-08-16 15:37:38 +0800268 u32 mt7915_wed_init_rx_buf(struct mtk_wed_device *wed,
269 int pkt_num);
270 void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed);
271+void mt7915_wed_trigger_ser(struct mtk_wed_device *wed);
272 int mt7915_register_device(struct mt7915_dev *dev);
273 void mt7915_unregister_device(struct mt7915_dev *dev);
developerf10a8982022-10-17 12:01:44 +0800274 void mt7915_eeprom_rebonding(struct mt7915_dev *dev);
developer63541d22022-08-02 13:00:04 +0800275--
developerb403ad02022-11-08 10:16:29 +08002762.18.0
developer63541d22022-08-02 13:00:04 +0800277