blob: d1806f198aed99e866aa9683d8114eba2e92be5c [file] [log] [blame]
developerb11a5392022-03-31 00:34:47 +08001// SPDX-License-Identifier: ISC
2/* Copyright (C) 2020 MediaTek Inc.
3 *
4 * Author: Ryder Lee <ryder.lee@mediatek.com>
5 * Roy Luo <royluo@google.com>
6 * Felix Fietkau <nbd@nbd.name>
7 * Lorenzo Bianconi <lorenzo@kernel.org>
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/timekeeping.h>
12
13#include "mt7615.h"
14#include "../dma.h"
15#include "mac.h"
16
17void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
18{
19 if (!e->txwi) {
20 dev_kfree_skb_any(e->skb);
21 return;
22 }
23
24 /* error path */
25 if (e->skb == DMA_DUMMY_DATA) {
26 struct mt76_txwi_cache *t;
27 struct mt7615_dev *dev;
28 struct mt7615_txp_common *txp;
29 u16 token;
30
31 dev = container_of(mdev, struct mt7615_dev, mt76);
32 txp = mt7615_txwi_to_txp(mdev, e->txwi);
33
34 if (is_mt7615(&dev->mt76))
35 token = le16_to_cpu(txp->fw.token);
36 else
37 token = le16_to_cpu(txp->hw.msdu_id[0]) &
38 ~MT_MSDU_ID_VALID;
39
40 t = mt76_token_put(mdev, token);
41 e->skb = t ? t->skb : NULL;
42 }
43
44 if (e->skb)
45 mt76_tx_complete_skb(mdev, e->wcid, e->skb);
46}
47
48static void
49mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
50 void *txp_ptr, u32 id)
51{
52 struct mt7615_hw_txp *txp = txp_ptr;
53 struct mt7615_txp_ptr *ptr = &txp->ptr[0];
54 int i, nbuf = tx_info->nbuf - 1;
55 u32 last_mask;
56
57 tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
58 tx_info->nbuf = 1;
59
60 txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
61
62 if (is_mt7663(&dev->mt76))
63 last_mask = MT_TXD_LEN_LAST;
64 else
65 last_mask = MT_TXD_LEN_AMSDU_LAST |
66 MT_TXD_LEN_MSDU_LAST;
67
68 for (i = 0; i < nbuf; i++) {
69 u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
70 u32 addr = tx_info->buf[i + 1].addr;
71
72 if (i == nbuf - 1)
73 len |= last_mask;
74
75 if (i & 1) {
76 ptr->buf1 = cpu_to_le32(addr);
77 ptr->len1 = cpu_to_le16(len);
78 ptr++;
79 } else {
80 ptr->buf0 = cpu_to_le32(addr);
81 ptr->len0 = cpu_to_le16(len);
82 }
83 }
84}
85
86static void
87mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
88 void *txp_ptr, u32 id)
89{
90 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
91 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
92 struct ieee80211_key_conf *key = info->control.hw_key;
93 struct ieee80211_vif *vif = info->control.vif;
94 struct mt7615_fw_txp *txp = txp_ptr;
95 int nbuf = tx_info->nbuf - 1;
96 int i;
97
98 for (i = 0; i < nbuf; i++) {
99 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
100 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
101 }
102 txp->nbuf = nbuf;
103
104 /* pass partial skb header to fw */
105 tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
106 tx_info->buf[1].len = MT_CT_PARSE_LEN;
107 tx_info->buf[1].skip_unmap = true;
108 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
109
110 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
111
112 if (!key)
113 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
114
115 if (ieee80211_is_mgmt(hdr->frame_control))
116 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
117
118 if (vif) {
119 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
120
121 txp->bss_idx = mvif->idx;
122 }
123
124 txp->token = cpu_to_le16(id);
125 txp->rept_wds_wcid = 0xff;
126}
127
128int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
129 enum mt76_txq_id qid, struct mt76_wcid *wcid,
130 struct ieee80211_sta *sta,
131 struct mt76_tx_info *tx_info)
132{
133 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
134 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
135 struct ieee80211_key_conf *key = info->control.hw_key;
136 int pid, id;
137 u8 *txwi = (u8 *)txwi_ptr;
138 struct mt76_txwi_cache *t;
139 struct mt7615_sta *msta;
140 void *txp;
141
142 msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
143 if (!wcid)
144 wcid = &dev->mt76.global_wcid;
145
146 if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
147 struct mt7615_phy *phy = &dev->phy;
148
149 if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
150 phy = mdev->phy2->priv;
151
152 spin_lock_bh(&dev->mt76.lock);
153 mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
154 msta->rates);
155 spin_unlock_bh(&dev->mt76.lock);
156 }
157
158 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
159 t->skb = tx_info->skb;
160
161 id = mt76_token_get(mdev, &t);
162 if (id < 0)
163 return id;
164
165 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
166 mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
167 pid, key, false);
168
169 txp = txwi + MT_TXD_SIZE;
170 memset(txp, 0, sizeof(struct mt7615_txp_common));
171 if (is_mt7615(&dev->mt76))
172 mt7615_write_fw_txp(dev, tx_info, txp, id);
173 else
174 mt7615_write_hw_txp(dev, tx_info, txp, id);
175
176 tx_info->skb = DMA_DUMMY_DATA;
177
178 return 0;
179}
180
181void mt7615_dma_reset(struct mt7615_dev *dev)
182{
183 int i;
184
185 mt76_clear(dev, MT_WPDMA_GLO_CFG,
186 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
187 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
188
189 usleep_range(1000, 2000);
190
191 for (i = 0; i < __MT_TXQ_MAX; i++)
192 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
193
194 for (i = 0; i < __MT_MCUQ_MAX; i++)
195 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
196
197 mt76_for_each_q_rx(&dev->mt76, i)
198 mt76_queue_rx_reset(dev, i);
199
200 mt76_tx_status_check(&dev->mt76, true);
201
202 mt7615_dma_start(dev);
203}
204EXPORT_SYMBOL_GPL(mt7615_dma_reset);
205
206static void
207mt7615_hif_int_event_trigger(struct mt7615_dev *dev, u8 event)
208{
209 u32 reg = MT_MCU_INT_EVENT;
210
211 if (is_mt7663(&dev->mt76))
212 reg = MT7663_MCU_INT_EVENT;
213
214 mt76_wr(dev, reg, event);
215
216 mt7622_trigger_hif_int(dev, true);
217 mt7622_trigger_hif_int(dev, false);
218}
219
220static bool
221mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state)
222{
223 bool ret;
224
225 ret = wait_event_timeout(dev->reset_wait,
226 (READ_ONCE(dev->reset_state) & state),
227 MT7615_RESET_TIMEOUT);
228 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
229 return ret;
230}
231
232static void
233mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
234{
235 struct ieee80211_hw *hw = priv;
236 struct mt7615_dev *dev = mt7615_hw_dev(hw);
237
238 switch (vif->type) {
239 case NL80211_IFTYPE_MESH_POINT:
240 case NL80211_IFTYPE_ADHOC:
241 case NL80211_IFTYPE_AP:
242 mt7615_mcu_add_beacon(dev, hw, vif,
243 vif->bss_conf.enable_beacon);
244 break;
245 default:
246 break;
247 }
248}
249
250static void
251mt7615_update_beacons(struct mt7615_dev *dev)
252{
253 ieee80211_iterate_active_interfaces(dev->mt76.hw,
254 IEEE80211_IFACE_ITER_RESUME_ALL,
255 mt7615_update_vif_beacon, dev->mt76.hw);
256
257 if (!dev->mt76.phy2)
258 return;
259
260 ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
261 IEEE80211_IFACE_ITER_RESUME_ALL,
262 mt7615_update_vif_beacon, dev->mt76.phy2->hw);
263}
264
265void mt7615_mac_reset_work(struct work_struct *work)
266{
267 struct mt7615_phy *phy2;
268 struct mt76_phy *ext_phy;
269 struct mt7615_dev *dev;
270 unsigned long timeout;
271
272 dev = container_of(work, struct mt7615_dev, reset_work);
273 ext_phy = dev->mt76.phy2;
274 phy2 = ext_phy ? ext_phy->priv : NULL;
275
276 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA))
277 return;
278
279 ieee80211_stop_queues(mt76_hw(dev));
280 if (ext_phy)
281 ieee80211_stop_queues(ext_phy->hw);
282
283 set_bit(MT76_RESET, &dev->mphy.state);
284 set_bit(MT76_MCU_RESET, &dev->mphy.state);
285 wake_up(&dev->mt76.mcu.wait);
286 cancel_delayed_work_sync(&dev->mphy.mac_work);
287 del_timer_sync(&dev->phy.roc_timer);
288 cancel_work_sync(&dev->phy.roc_work);
289 if (phy2) {
290 set_bit(MT76_RESET, &phy2->mt76->state);
291 cancel_delayed_work_sync(&phy2->mt76->mac_work);
292 del_timer_sync(&phy2->roc_timer);
293 cancel_work_sync(&phy2->roc_work);
294 }
295
296 /* lock/unlock all queues to ensure that no tx is pending */
297 mt76_txq_schedule_all(&dev->mphy);
298 if (ext_phy)
299 mt76_txq_schedule_all(ext_phy);
300
301 mt76_worker_disable(&dev->mt76.tx_worker);
302 napi_disable(&dev->mt76.napi[0]);
303 napi_disable(&dev->mt76.napi[1]);
304 napi_disable(&dev->mt76.tx_napi);
305
306 mt7615_mutex_acquire(dev);
307
308 mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_STOPPED);
309
310 if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
311 mt7615_dma_reset(dev);
312
313 mt7615_tx_token_put(dev);
314 idr_init(&dev->mt76.token);
315
316 mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0);
317
318 mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_INIT);
319 mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
320 }
321
322 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
323 clear_bit(MT76_RESET, &dev->mphy.state);
324 if (phy2)
325 clear_bit(MT76_RESET, &phy2->mt76->state);
326
327 mt76_worker_enable(&dev->mt76.tx_worker);
328
329 local_bh_disable();
330 napi_enable(&dev->mt76.tx_napi);
331 napi_schedule(&dev->mt76.tx_napi);
332
333 napi_enable(&dev->mt76.napi[0]);
334 napi_schedule(&dev->mt76.napi[0]);
335
336 napi_enable(&dev->mt76.napi[1]);
337 napi_schedule(&dev->mt76.napi[1]);
338 local_bh_enable();
339
340 ieee80211_wake_queues(mt76_hw(dev));
341 if (ext_phy)
342 ieee80211_wake_queues(ext_phy->hw);
343
344 mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_RESET_DONE);
345 mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
346
347 mt7615_update_beacons(dev);
348
349 mt7615_mutex_release(dev);
350
351 timeout = mt7615_get_macwork_timeout(dev);
352 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
353 timeout);
354 if (phy2)
355 ieee80211_queue_delayed_work(ext_phy->hw,
356 &phy2->mt76->mac_work, timeout);
357
358}