blob: 5ca14dbbdd265921df54308e857440c90b2f1d1b [file] [log] [blame]
developerb11a5392022-03-31 00:34:47 +08001// SPDX-License-Identifier: ISC
2/* Copyright (C) 2021 MediaTek Inc. */
3
4#include "mt7921.h"
5#include "../dma.h"
6#include "mac.h"
7
8static void
9mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
10 void *txp_ptr, u32 id)
11{
12 struct mt7921_hw_txp *txp = txp_ptr;
13 struct mt7921_txp_ptr *ptr = &txp->ptr[0];
14 int i, nbuf = tx_info->nbuf - 1;
15
16 tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
17 tx_info->nbuf = 1;
18
19 txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
20
21 for (i = 0; i < nbuf; i++) {
22 u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
23 u32 addr = tx_info->buf[i + 1].addr;
24
25 if (i == nbuf - 1)
26 len |= MT_TXD_LEN_LAST;
27
28 if (i & 1) {
29 ptr->buf1 = cpu_to_le32(addr);
30 ptr->len1 = cpu_to_le16(len);
31 ptr++;
32 } else {
33 ptr->buf0 = cpu_to_le32(addr);
34 ptr->len0 = cpu_to_le16(len);
35 }
36 }
37}
38
39int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
40 enum mt76_txq_id qid, struct mt76_wcid *wcid,
41 struct ieee80211_sta *sta,
42 struct mt76_tx_info *tx_info)
43{
44 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
46 struct ieee80211_key_conf *key = info->control.hw_key;
47 struct mt76_txwi_cache *t;
48 struct mt7921_txp_common *txp;
49 int id, pid;
50 u8 *txwi = (u8 *)txwi_ptr;
51
52 if (unlikely(tx_info->skb->len <= ETH_HLEN))
53 return -EINVAL;
54
55 if (!wcid)
56 wcid = &dev->mt76.global_wcid;
57
58 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
59 t->skb = tx_info->skb;
60
61 id = mt76_token_consume(mdev, &t);
62 if (id < 0)
63 return id;
64
65 if (sta) {
66 struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
67
68 if (time_after(jiffies, msta->last_txs + HZ / 4)) {
69 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
70 msta->last_txs = jiffies;
71 }
72 }
73
74 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
75 mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
76 pid, false);
77
78 txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
79 memset(txp, 0, sizeof(struct mt7921_txp_common));
80 mt7921_write_hw_txp(dev, tx_info, txp, id);
81
82 tx_info->skb = DMA_DUMMY_DATA;
83
84 return 0;
85}
86
87static void
88mt7921_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
89{
90 struct mt7921_txp_common *txp;
91 int i;
92
93 txp = mt7921_txwi_to_txp(dev, t);
94
95 for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) {
96 struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i];
97 bool last;
98 u16 len;
99
100 len = le16_to_cpu(ptr->len0);
101 last = len & MT_TXD_LEN_LAST;
102 len &= MT_TXD_LEN_MASK;
103 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
104 DMA_TO_DEVICE);
105 if (last)
106 break;
107
108 len = le16_to_cpu(ptr->len1);
109 last = len & MT_TXD_LEN_LAST;
110 len &= MT_TXD_LEN_MASK;
111 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
112 DMA_TO_DEVICE);
113 if (last)
114 break;
115 }
116}
117
118static void
119mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
120 struct ieee80211_sta *sta, bool clear_status,
121 struct list_head *free_list)
122{
123 struct mt76_dev *mdev = &dev->mt76;
124 __le32 *txwi;
125 u16 wcid_idx;
126
127 mt7921_txp_skb_unmap(mdev, t);
128 if (!t->skb)
129 goto out;
130
131 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
132 if (sta) {
133 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
134
135 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
136 mt7921_tx_check_aggr(sta, txwi);
137
138 wcid_idx = wcid->idx;
139 } else {
140 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
141 }
142
143 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
144
145out:
146 t->skb = NULL;
147 mt76_put_txwi(mdev, t);
148}
149
150static void
151mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
152{
153 struct mt7921_tx_free *free = (struct mt7921_tx_free *)data;
154 struct mt76_dev *mdev = &dev->mt76;
155 struct mt76_txwi_cache *txwi;
156 struct ieee80211_sta *sta = NULL;
157 struct sk_buff *skb, *tmp;
158 void *end = data + len;
159 LIST_HEAD(free_list);
160 bool wake = false;
161 u8 i, count;
162
163 /* clean DMA queues and unmap buffers first */
164 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
165 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
166
167 count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
168 if (WARN_ON_ONCE((void *)&free->info[count] > end))
169 return;
170
171 for (i = 0; i < count; i++) {
172 u32 msdu, info = le32_to_cpu(free->info[i]);
173 u8 stat;
174
175 /* 1'b1: new wcid pair.
176 * 1'b0: msdu_id with the same 'wcid pair' as above.
177 */
178 if (info & MT_TX_FREE_PAIR) {
179 struct mt7921_sta *msta;
180 struct mt76_wcid *wcid;
181 u16 idx;
182
183 count++;
184 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
185 wcid = rcu_dereference(dev->mt76.wcid[idx]);
186 sta = wcid_to_sta(wcid);
187 if (!sta)
188 continue;
189
190 msta = container_of(wcid, struct mt7921_sta, wcid);
191 spin_lock_bh(&dev->sta_poll_lock);
192 if (list_empty(&msta->poll_list))
193 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
194 spin_unlock_bh(&dev->sta_poll_lock);
195 continue;
196 }
197
198 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
199 stat = FIELD_GET(MT_TX_FREE_STATUS, info);
200
201 txwi = mt76_token_release(mdev, msdu, &wake);
202 if (!txwi)
203 continue;
204
205 mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
206 }
207
208 if (wake)
209 mt76_set_tx_blocked(&dev->mt76, false);
210
211 list_for_each_entry_safe(skb, tmp, &free_list, list) {
212 skb_list_del_init(skb);
213 napi_consume_skb(skb, 1);
214 }
215
216 rcu_read_lock();
217 mt7921_mac_sta_poll(dev);
218 rcu_read_unlock();
219
220 mt76_worker_schedule(&dev->mt76.tx_worker);
221}
222
223bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
224{
225 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
226 __le32 *rxd = (__le32 *)data;
227 __le32 *end = (__le32 *)&rxd[len / 4];
228 enum rx_pkt_type type;
229
230 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
231
232 switch (type) {
233 case PKT_TYPE_TXRX_NOTIFY:
234 mt7921e_mac_tx_free(dev, data, len);
235 return false;
236 case PKT_TYPE_TXS:
237 for (rxd += 2; rxd + 8 <= end; rxd += 8)
238 mt7921_mac_add_txs(dev, rxd);
239 return false;
240 default:
241 return true;
242 }
243}
244
245void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
246 struct sk_buff *skb)
247{
248 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
249 __le32 *rxd = (__le32 *)skb->data;
250 enum rx_pkt_type type;
251
252 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
253
254 switch (type) {
255 case PKT_TYPE_TXRX_NOTIFY:
256 mt7921e_mac_tx_free(dev, skb->data, skb->len);
257 napi_consume_skb(skb, 1);
258 break;
259 default:
260 mt7921_queue_rx_skb(mdev, q, skb);
261 break;
262 }
263}
264
265void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
266{
267 if (!e->txwi) {
268 dev_kfree_skb_any(e->skb);
269 return;
270 }
271
272 /* error path */
273 if (e->skb == DMA_DUMMY_DATA) {
274 struct mt76_txwi_cache *t;
275 struct mt7921_txp_common *txp;
276 u16 token;
277
278 txp = mt7921_txwi_to_txp(mdev, e->txwi);
279 token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
280 t = mt76_token_put(mdev, token);
281 e->skb = t ? t->skb : NULL;
282 }
283
284 if (e->skb)
285 mt76_tx_complete_skb(mdev, e->wcid, e->skb);
286}
287
288void mt7921_tx_token_put(struct mt7921_dev *dev)
289{
290 struct mt76_txwi_cache *txwi;
291 int id;
292
293 spin_lock_bh(&dev->mt76.token_lock);
294 idr_for_each_entry(&dev->mt76.token, txwi, id) {
295 mt7921_txwi_free(dev, txwi, NULL, false, NULL);
296 dev->mt76.token_count--;
297 }
298 spin_unlock_bh(&dev->mt76.token_lock);
299 idr_destroy(&dev->mt76.token);
300}
301
302int mt7921e_mac_reset(struct mt7921_dev *dev)
303{
304 int i, err;
305
306 mt7921e_mcu_drv_pmctrl(dev);
307
308 mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
309
310 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
311 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
312
313 set_bit(MT76_RESET, &dev->mphy.state);
314 set_bit(MT76_MCU_RESET, &dev->mphy.state);
315 wake_up(&dev->mt76.mcu.wait);
316 skb_queue_purge(&dev->mt76.mcu.res_q);
317
318 mt76_txq_schedule_all(&dev->mphy);
319
320 mt76_worker_disable(&dev->mt76.tx_worker);
321 napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]);
322 napi_disable(&dev->mt76.napi[MT_RXQ_MCU]);
323 napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]);
324 napi_disable(&dev->mt76.tx_napi);
325
326 mt7921_tx_token_put(dev);
327 idr_init(&dev->mt76.token);
328
329 mt7921_wpdma_reset(dev, true);
330
331 local_bh_disable();
332 mt76_for_each_q_rx(&dev->mt76, i) {
333 napi_enable(&dev->mt76.napi[i]);
334 napi_schedule(&dev->mt76.napi[i]);
335 }
336 local_bh_enable();
337
338 dev->fw_assert = false;
339 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
340
341 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
342 MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
343 MT_INT_MCU_CMD);
344 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
345
346 err = mt7921e_driver_own(dev);
347 if (err)
348 return err;
349
350 err = mt7921_run_firmware(dev);
351 if (err)
352 goto out;
353
354 err = mt7921_mcu_set_eeprom(dev);
355 if (err)
356 goto out;
357
358 err = mt7921_mac_init(dev);
359 if (err)
360 goto out;
361
362 err = __mt7921_start(&dev->phy);
363out:
364 clear_bit(MT76_RESET, &dev->mphy.state);
365
366 local_bh_disable();
367 napi_enable(&dev->mt76.tx_napi);
368 napi_schedule(&dev->mt76.tx_napi);
369 local_bh_enable();
370
371 mt76_worker_enable(&dev->mt76.tx_worker);
372
373 return err;
374}