blob: 39849473a08bbd731f61aaa489812ff6a56b2955 [file] [log] [blame]
developer4df64ba2022-09-01 14:44:55 +08001From 4bae67631956c6878e1f055e1cb0e3dd2154f7b7 Mon Sep 17 00:00:00 2001
developerc89c5472022-08-02 13:00:04 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 28 Jul 2022 11:16:15 +0800
developer6e3b5d12022-08-16 15:37:38 +08004Subject: [PATCH] mt76 add ser spport when wed on
developerc89c5472022-08-02 13:00:04 +08005
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
developer6e3b5d12022-08-16 15:37:38 +08008 dma.c | 29 ++++++++++++++++++++---------
9 dma.h | 2 +-
10 mt76.h | 1 +
11 mt7915/dma.c | 36 +++++++++++++++++++++++++++++++-----
12 mt7915/mac.c | 20 ++++++++++++++++++++
13 mt7915/mmio.c | 2 ++
14 mt7915/mt7915.h | 1 +
15 7 files changed, 76 insertions(+), 15 deletions(-)
developerc89c5472022-08-02 13:00:04 +080016
17diff --git a/dma.c b/dma.c
developer4df64ba2022-09-01 14:44:55 +080018index 3317d2b9..fa56ccfb 100644
developerc89c5472022-08-02 13:00:04 +080019--- a/dma.c
20+++ b/dma.c
developer6e3b5d12022-08-16 15:37:38 +080021@@ -169,7 +169,7 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
22 local_bh_enable();
23 }
24
25-static void
26+void
27 mt76_free_pending_rxwi(struct mt76_dev *dev)
28 {
29 struct mt76_txwi_cache *r;
30@@ -183,6 +183,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
31 }
32 local_bh_enable();
33 }
34+EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
35
36 static void
37 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
38@@ -624,14 +625,18 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developerc89c5472022-08-02 13:00:04 +080039 return frames;
40 }
41
42-static int
43-mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
44+int
45+mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
46 {
47 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
48 struct mtk_wed_device *wed = &dev->mmio.wed;
49 int ret, type, ring;
50- u8 flags = q->flags;
51+ u8 flags;
developer6e3b5d12022-08-16 15:37:38 +080052
developerc89c5472022-08-02 13:00:04 +080053+ if (!q || !q->ndesc)
54+ return -EINVAL;
developer6e3b5d12022-08-16 15:37:38 +080055+
developerc89c5472022-08-02 13:00:04 +080056+ flags = q->flags;
57 if (!mtk_wed_device_active(wed))
58 q->flags &= ~MT_QFLAG_WED;
59
developer6e3b5d12022-08-16 15:37:38 +080060@@ -643,7 +648,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
developerc89c5472022-08-02 13:00:04 +080061
62 switch (type) {
63 case MT76_WED_Q_TX:
64- ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
65+ ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset);
66 if (!ret)
67 q->wed_regs = wed->tx_ring[ring].reg_base;
68 break;
developer6e3b5d12022-08-16 15:37:38 +080069@@ -659,7 +664,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
developerc89c5472022-08-02 13:00:04 +080070 q->wed_regs = wed->txfree_ring.reg_base;
71 break;
72 case MT76_WED_Q_RX:
73- ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs);
74+ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
75 if (!ret)
76 q->wed_regs = wed->rx_ring[ring].reg_base;
77 break;
developer6e3b5d12022-08-16 15:37:38 +080078@@ -672,6 +677,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
developerc89c5472022-08-02 13:00:04 +080079 return 0;
80 #endif
81 }
82+EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
83
84 static int
85 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developer6e3b5d12022-08-16 15:37:38 +080086@@ -704,7 +710,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developerc89c5472022-08-02 13:00:04 +080087 if (!q->entry)
88 return -ENOMEM;
89
90- ret = mt76_dma_wed_setup(dev, q);
91+ ret = mt76_dma_wed_setup(dev, q, false);
92 if (ret)
93 return ret;
94
developer6e3b5d12022-08-16 15:37:38 +080095@@ -755,8 +761,13 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
developerc89c5472022-08-02 13:00:04 +080096 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
97
98 mt76_dma_rx_cleanup(dev, q);
99- mt76_dma_sync_idx(dev, q);
100- mt76_dma_rx_fill(dev, q);
101+
102+ mt76_dma_wed_setup(dev, q, true);
103+
104+ if (q->flags != MT_WED_Q_TXFREE) {
105+ mt76_dma_sync_idx(dev, q);
106+ mt76_dma_rx_fill(dev, q);
107+ }
108
109 if (!q->rx_head)
110 return;
111diff --git a/dma.h b/dma.h
112index 90370d12..083cbca4 100644
113--- a/dma.h
114+++ b/dma.h
115@@ -58,5 +58,5 @@ enum mt76_mcu_evt_type {
116 int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
117 void mt76_dma_attach(struct mt76_dev *dev);
118 void mt76_dma_cleanup(struct mt76_dev *dev);
119-
120+int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
121 #endif
developer6e3b5d12022-08-16 15:37:38 +0800122diff --git a/mt76.h b/mt76.h
developer4df64ba2022-09-01 14:44:55 +0800123index 2903b625..831a47a9 100644
developer6e3b5d12022-08-16 15:37:38 +0800124--- a/mt76.h
125+++ b/mt76.h
developer4df64ba2022-09-01 14:44:55 +0800126@@ -1371,6 +1371,7 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
developer6e3b5d12022-08-16 15:37:38 +0800127 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
128 void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
129 struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
130+void mt76_free_pending_rxwi(struct mt76_dev *dev);
131 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
132 struct napi_struct *napi);
133 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
developerc89c5472022-08-02 13:00:04 +0800134diff --git a/mt7915/dma.c b/mt7915/dma.c
developer4df64ba2022-09-01 14:44:55 +0800135index 197a0169..e0a51316 100644
developerc89c5472022-08-02 13:00:04 +0800136--- a/mt7915/dma.c
137+++ b/mt7915/dma.c
developer4df64ba2022-09-01 14:44:55 +0800138@@ -522,6 +522,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
developer6e3b5d12022-08-16 15:37:38 +0800139 int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
140 {
developer4df64ba2022-09-01 14:44:55 +0800141 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
developer6e3b5d12022-08-16 15:37:38 +0800142+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
143 int i;
144
145 /* clean up hw queues */
developer4df64ba2022-09-01 14:44:55 +0800146@@ -542,28 +543,53 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
developerc89c5472022-08-02 13:00:04 +0800147 mt7915_wfsys_reset(dev);
148
149 /* disable wfdma */
150+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
151+ mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
152 mt7915_dma_disable(dev, force);
153
developer6e3b5d12022-08-16 15:37:38 +0800154+ /* set wifi reset done, wait FE reset */
155+ if (mtk_wed_device_active(wed) && atomic_read(&wed->fe_reset)) {
156+ atomic_set(&wed->fe_reset, 0);
157+ rtnl_lock();
158+ complete(&wed->wlan_reset_done);
159+ rtnl_unlock();
160+ wait_for_completion(&wed->fe_reset_done);
161+ }
162+
developerc89c5472022-08-02 13:00:04 +0800163 /* reset hw queues */
164 for (i = 0; i < __MT_TXQ_MAX; i++) {
165 mt76_queue_reset(dev, dev->mphy.q_tx[i]);
166- if (mphy_ext)
167+ if (mphy_ext) {
168 mt76_queue_reset(dev, mphy_ext->q_tx[i]);
developer6e3b5d12022-08-16 15:37:38 +0800169+ if (mtk_wed_device_active(wed))
developerc89c5472022-08-02 13:00:04 +0800170+ mt76_dma_wed_setup(&dev->mt76,
171+ mphy_ext->q_tx[i],
172+ true);
173+ }
developer6e3b5d12022-08-16 15:37:38 +0800174+ if (mtk_wed_device_active(wed))
developerc89c5472022-08-02 13:00:04 +0800175+ mt76_dma_wed_setup(&dev->mt76, dev->mphy.q_tx[i],
176+ true);
177 }
178
179 for (i = 0; i < __MT_MCUQ_MAX; i++)
180 mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
181
182- mt76_for_each_q_rx(&dev->mt76, i)
183- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
184+ mt76_for_each_q_rx(&dev->mt76, i) {
185+ if (dev->mt76.q_rx[i].flags != MT_WED_Q_TXFREE)
186+ mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
187+ }
188
189 mt76_tx_status_check(&dev->mt76, true);
190
191- mt7915_dma_enable(dev);
192-
193 mt76_for_each_q_rx(&dev->mt76, i)
194 mt76_queue_rx_reset(dev, i);
195
developer6e3b5d12022-08-16 15:37:38 +0800196+ if(mtk_wed_device_active(wed) && is_mt7915(&dev->mt76))
developerc89c5472022-08-02 13:00:04 +0800197+ mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
198+ MT_WFDMA0_EXT0_RXWB_KEEP);
199+
200+ mt7915_dma_enable(dev);
201+
202 return 0;
203 }
204
205diff --git a/mt7915/mac.c b/mt7915/mac.c
developer4df64ba2022-09-01 14:44:55 +0800206index 1a2cadc5..11b73c8f 100644
developerc89c5472022-08-02 13:00:04 +0800207--- a/mt7915/mac.c
208+++ b/mt7915/mac.c
developer4df64ba2022-09-01 14:44:55 +0800209@@ -916,6 +916,8 @@ void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed)
developer6e3b5d12022-08-16 15:37:38 +0800210 mt76_put_rxwi(&dev->mt76, rxwi);
211 }
212
213+ mt76_free_pending_rxwi(&dev->mt76);
214+
215 if (wed->rx_page.va)
216 return;
217
developer4df64ba2022-09-01 14:44:55 +0800218@@ -926,6 +928,18 @@ void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed)
developer6e3b5d12022-08-16 15:37:38 +0800219 return;
220 }
221
222+void mt7915_wed_trigger_ser(struct mtk_wed_device *wed)
223+{
224+ struct mt7915_dev *dev;
225+ u8 band_idx;
226+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
227+ band_idx = dev->phy.band_idx;
228+
229+ mt7915_mcu_set_ser(dev, SER_RECOVER, 1, band_idx);
230+
231+ return;
232+}
233+
234 static void
235 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
236 {
developer4df64ba2022-09-01 14:44:55 +0800237@@ -1662,6 +1676,12 @@ void mt7915_mac_reset_work(struct work_struct *work)
developerc89c5472022-08-02 13:00:04 +0800238 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
239 return;
240
241+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
242+ mtk_wed_device_stop(&dev->mt76.mmio.wed, true);
243+ if (!is_mt7986(&dev->mt76))
244+ mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
245+ }
246+
247 ieee80211_stop_queues(mt76_hw(dev));
248 if (ext_phy)
249 ieee80211_stop_queues(ext_phy->hw);
developer6e3b5d12022-08-16 15:37:38 +0800250diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer4df64ba2022-09-01 14:44:55 +0800251index 111444d7..1ccec14f 100644
developer6e3b5d12022-08-16 15:37:38 +0800252--- a/mt7915/mmio.c
253+++ b/mt7915/mmio.c
254@@ -757,6 +757,8 @@ mt7915_pci_wed_init(struct mt7915_dev *dev, struct device *pdev, int *irq)
255 wed->wlan.init_rx_buf = mt7915_wed_init_rx_buf;
256 wed->wlan.release_rx_buf = mt7915_wed_release_rx_buf;
257
258+ wed->wlan.ser_trigger = mt7915_wed_trigger_ser;
259+
260 dev->mt76.rx_token_size = wed->wlan.rx_pkt;
261 if (mtk_wed_device_attach(wed) != 0)
262 return 0;
263diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developer4df64ba2022-09-01 14:44:55 +0800264index d23416ca..01b2f681 100644
developer6e3b5d12022-08-16 15:37:38 +0800265--- a/mt7915/mt7915.h
266+++ b/mt7915/mt7915.h
developer4df64ba2022-09-01 14:44:55 +0800267@@ -547,6 +547,7 @@ u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
developer6e3b5d12022-08-16 15:37:38 +0800268 u32 mt7915_wed_init_rx_buf(struct mtk_wed_device *wed,
269 int pkt_num);
270 void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed);
271+void mt7915_wed_trigger_ser(struct mtk_wed_device *wed);
272 int mt7915_register_device(struct mt7915_dev *dev);
273 void mt7915_unregister_device(struct mt7915_dev *dev);
274 int mt7915_eeprom_init(struct mt7915_dev *dev);
developerc89c5472022-08-02 13:00:04 +0800275--
2762.18.0
277