blob: 50c6e8f1dd836a0dbdc6c9fe7874359dbb8826bd [file] [log] [blame]
developer0f312e82022-11-01 12:31:52 +08001// SPDX-License-Identifier: ISC
2/*
3 * Copyright (C) 2022 MediaTek Inc.
4 */
5
6#include <linux/etherdevice.h>
7#include <linux/timekeeping.h>
8#include "mt7996.h"
9#include "../dma.h"
10#include "mac.h"
11#include "mcu.h"
12
13#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
14
15#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
16#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
17 IEEE80211_RADIOTAP_HE_##f)
18
19static const struct mt7996_dfs_radar_spec etsi_radar_specs = {
20 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
21 .radar_pattern = {
22 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
23 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
24 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
25 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
26 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
27 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
28 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
29 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
30 },
31};
32
33static const struct mt7996_dfs_radar_spec fcc_radar_specs = {
34 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
35 .radar_pattern = {
36 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
37 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
38 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
39 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
40 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
41 },
42};
43
44static const struct mt7996_dfs_radar_spec jp_radar_specs = {
45 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
46 .radar_pattern = {
47 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
48 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
49 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
50 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
51 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
52 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
53 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
54 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
55 },
56};
57
58static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
59 u16 idx, bool unicast)
60{
61 struct mt7996_sta *sta;
62 struct mt76_wcid *wcid;
63
64 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
65 return NULL;
66
67 wcid = rcu_dereference(dev->mt76.wcid[idx]);
68 if (unicast || !wcid)
69 return wcid;
70
71 if (!wcid->sta)
72 return NULL;
73
74 sta = container_of(wcid, struct mt7996_sta, wcid);
75 if (!sta->vif)
76 return NULL;
77
78 return &sta->vif->sta.wcid;
79}
80
81void mt7996_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
82{
83}
84
85bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
86{
87 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
88 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
89
90 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
91 0, 5000);
92}
93
94u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw)
95{
96 mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
97 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
98
99 return MT_WTBL_LMAC_OFFS(wcid, dw);
100}
101
102static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
103{
104 static const u8 ac_to_tid[] = {
105 [IEEE80211_AC_BE] = 0,
106 [IEEE80211_AC_BK] = 1,
107 [IEEE80211_AC_VI] = 4,
108 [IEEE80211_AC_VO] = 6
109 };
110 struct ieee80211_sta *sta;
111 struct mt7996_sta *msta;
112 struct rate_info *rate;
113 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
114 LIST_HEAD(sta_poll_list);
115 int i;
116
117 spin_lock_bh(&dev->sta_poll_lock);
118 list_splice_init(&dev->sta_poll_list, &sta_poll_list);
119 spin_unlock_bh(&dev->sta_poll_lock);
120
121 rcu_read_lock();
122
123 while (true) {
124 bool clear = false;
125 u32 addr, val;
126 u16 idx;
127 u8 bw;
128
129 spin_lock_bh(&dev->sta_poll_lock);
130 if (list_empty(&sta_poll_list)) {
131 spin_unlock_bh(&dev->sta_poll_lock);
132 break;
133 }
134 msta = list_first_entry(&sta_poll_list,
135 struct mt7996_sta, poll_list);
136 list_del_init(&msta->poll_list);
137 spin_unlock_bh(&dev->sta_poll_lock);
138
139 idx = msta->wcid.idx;
140 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
141
142 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
143 u32 tx_last = msta->airtime_ac[i];
144 u32 rx_last = msta->airtime_ac[i + 4];
145
146 msta->airtime_ac[i] = mt76_rr(dev, addr);
147 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
148
149 tx_time[i] = msta->airtime_ac[i] - tx_last;
150 rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
151
152 if ((tx_last | rx_last) & BIT(30))
153 clear = true;
154
155 addr += 8;
156 }
157
158 if (clear) {
159 mt7996_mac_wtbl_update(dev, idx,
160 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
161 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
162 }
163
164 if (!msta->wcid.sta)
165 continue;
166
167 sta = container_of((void *)msta, struct ieee80211_sta,
168 drv_priv);
169 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
170 u8 q = mt76_connac_lmac_mapping(i);
171 u32 tx_cur = tx_time[q];
172 u32 rx_cur = rx_time[q];
173 u8 tid = ac_to_tid[i];
174
175 if (!tx_cur && !rx_cur)
176 continue;
177
178 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
179 }
180
181 /*
182 * We don't support reading GI info from txs packets.
183 * For accurate tx status reporting and AQL improvement,
184 * we need to make sure that flags match so polling GI
185 * from per-sta counters directly.
186 */
187 rate = &msta->wcid.rate;
188
189 switch (rate->bw) {
190 case RATE_INFO_BW_160:
191 bw = IEEE80211_STA_RX_BW_160;
192 break;
193 case RATE_INFO_BW_80:
194 bw = IEEE80211_STA_RX_BW_80;
195 break;
196 case RATE_INFO_BW_40:
197 bw = IEEE80211_STA_RX_BW_40;
198 break;
199 default:
200 bw = IEEE80211_STA_RX_BW_20;
201 break;
202 }
203
204 if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
205 u8 offs = 24 + 2 * bw;
206
207 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 6);
208 val = mt76_rr(dev, addr);
209 rate->he_gi = (val & (0x3 << offs)) >> offs;
210 } else if (rate->flags &
211 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
212 if (val & BIT(12 + bw))
213 rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
214 else
215 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
216 }
217 }
218
219 rcu_read_unlock();
220}
221
222static void
223mt7996_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
224 struct ieee80211_radiotap_he *he,
225 __le32 *rxv)
226{
227 u32 ru_h, ru_l;
228 u8 ru, offs = 0;
229
230 ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
231 ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
232 ru = (u8)(ru_l | ru_h << 4);
233
234 status->bw = RATE_INFO_BW_HE_RU;
235
236 switch (ru) {
237 case 0 ... 36:
238 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
239 offs = ru;
240 break;
241 case 37 ... 52:
242 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
243 offs = ru - 37;
244 break;
245 case 53 ... 60:
246 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
247 offs = ru - 53;
248 break;
249 case 61 ... 64:
250 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
251 offs = ru - 61;
252 break;
253 case 65 ... 66:
254 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
255 offs = ru - 65;
256 break;
257 case 67:
258 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
259 break;
260 case 68:
261 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
262 break;
263 }
264
265 he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
266 he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
267 le16_encode_bits(offs,
268 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
269}
270
271static void
272mt7996_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
273{
274 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
275 static const struct ieee80211_radiotap_he_mu mu_known = {
276 .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
277 HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
278 HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
279 HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN),
280 .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
281 };
282 struct ieee80211_radiotap_he_mu *he_mu = NULL;
283
284 status->flag |= RX_FLAG_RADIOTAP_HE_MU;
285
286 he_mu = skb_push(skb, sizeof(mu_known));
287 memcpy(he_mu, &mu_known, sizeof(mu_known));
288
289#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
290
291 he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
292 if (status->he_dcm)
293 he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
294
295 he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
296 MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
297 le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
298
299 he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
300
301 if (status->bw >= RATE_INFO_BW_40) {
302 he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
303 he_mu->ru_ch2[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
304 }
305
306 if (status->bw >= RATE_INFO_BW_80) {
307 he_mu->ru_ch1[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
308 he_mu->ru_ch2[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
309 }
310}
311
312static void
313mt7996_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode)
314{
315 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
316 static const struct ieee80211_radiotap_he known = {
317 .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
318 HE_BITS(DATA1_DATA_DCM_KNOWN) |
319 HE_BITS(DATA1_STBC_KNOWN) |
320 HE_BITS(DATA1_CODING_KNOWN) |
321 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
322 HE_BITS(DATA1_DOPPLER_KNOWN) |
323 HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
324 HE_BITS(DATA1_BSS_COLOR_KNOWN),
325 .data2 = HE_BITS(DATA2_GI_KNOWN) |
326 HE_BITS(DATA2_TXBF_KNOWN) |
327 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
328 HE_BITS(DATA2_TXOP_KNOWN),
329 };
330 struct ieee80211_radiotap_he *he = NULL;
331 u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
332
333 status->flag |= RX_FLAG_RADIOTAP_HE;
334
335 he = skb_push(skb, sizeof(known));
336 memcpy(he, &known, sizeof(known));
337
338 he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
339 HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
340 he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
341 he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
342 le16_encode_bits(ltf_size,
343 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
344 if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
345 he->data5 |= HE_BITS(DATA5_TXBF);
346 he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
347 HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
348
349 switch (mode) {
350 case MT_PHY_TYPE_HE_SU:
351 he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
352 HE_BITS(DATA1_UL_DL_KNOWN) |
353 HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
354 HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
355
356 he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
357 HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
358 break;
359 case MT_PHY_TYPE_HE_EXT_SU:
360 he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
361 HE_BITS(DATA1_UL_DL_KNOWN) |
362 HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
363
364 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
365 break;
366 case MT_PHY_TYPE_HE_MU:
367 he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
368 HE_BITS(DATA1_UL_DL_KNOWN);
369
370 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
371 he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
372
373 mt7996_mac_decode_he_radiotap_ru(status, he, rxv);
374 mt7996_mac_decode_he_mu_radiotap(skb, rxv);
375 break;
376 case MT_PHY_TYPE_HE_TB:
377 he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
378 HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
379 HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
380 HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
381
382 he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
383 HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
384 HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
385 HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
386
387 mt7996_mac_decode_he_radiotap_ru(status, he, rxv);
388 break;
389 default:
390 break;
391 }
392}
393
394/* The HW does not translate the mac header to 802.3 for mesh point */
395static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
396{
397 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
398 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
399 struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
400 __le32 *rxd = (__le32 *)skb->data;
401 struct ieee80211_sta *sta;
402 struct ieee80211_vif *vif;
403 struct ieee80211_hdr hdr;
404 u16 frame_control;
405
406 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
407 MT_RXD3_NORMAL_U2M)
408 return -EINVAL;
409
410 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
411 return -EINVAL;
412
413 if (!msta || !msta->vif)
414 return -EINVAL;
415
416 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
417 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
418
419 /* store the info from RXD and ethhdr to avoid being overridden */
420 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
421 hdr.frame_control = cpu_to_le16(frame_control);
422 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
423 hdr.duration_id = 0;
424
425 ether_addr_copy(hdr.addr1, vif->addr);
426 ether_addr_copy(hdr.addr2, sta->addr);
427 switch (frame_control & (IEEE80211_FCTL_TODS |
428 IEEE80211_FCTL_FROMDS)) {
429 case 0:
430 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
431 break;
432 case IEEE80211_FCTL_FROMDS:
433 ether_addr_copy(hdr.addr3, eth_hdr->h_source);
434 break;
435 case IEEE80211_FCTL_TODS:
436 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
437 break;
438 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
439 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
440 ether_addr_copy(hdr.addr4, eth_hdr->h_source);
441 break;
442 default:
443 break;
444 }
445
446 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
447 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
448 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
449 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
450 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
451 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
452 else
453 skb_pull(skb, 2);
454
455 if (ieee80211_has_order(hdr.frame_control))
456 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
457 IEEE80211_HT_CTL_LEN);
458 if (ieee80211_is_data_qos(hdr.frame_control)) {
459 __le16 qos_ctrl;
460
461 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
462 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
463 IEEE80211_QOS_CTL_LEN);
464 }
465
466 if (ieee80211_has_a4(hdr.frame_control))
467 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
468 else
469 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
470
471 return 0;
472}
473
474static int
475mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
476 struct mt76_rx_status *status,
477 struct ieee80211_supported_band *sband,
478 __le32 *rxv, u8 *mode)
479{
480 u32 v0, v2;
481 u8 stbc, gi, bw, dcm, nss;
482 int i, idx;
483 bool cck = false;
484
485 v0 = le32_to_cpu(rxv[0]);
486 v2 = le32_to_cpu(rxv[2]);
487
488 idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
489 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
490
491 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
492 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
493 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
494 dcm = FIELD_GET(MT_PRXV_DCM, v2);
495 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
496
497 switch (*mode) {
498 case MT_PHY_TYPE_CCK:
499 cck = true;
500 fallthrough;
501 case MT_PHY_TYPE_OFDM:
502 i = mt76_get_rate(&dev->mt76, sband, i, cck);
503 break;
504 case MT_PHY_TYPE_HT_GF:
505 case MT_PHY_TYPE_HT:
506 status->encoding = RX_ENC_HT;
507 if (gi)
508 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
509 if (i > 31)
510 return -EINVAL;
511 break;
512 case MT_PHY_TYPE_VHT:
513 status->nss = nss;
514 status->encoding = RX_ENC_VHT;
515 if (gi)
516 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
517 if (i > 11)
518 return -EINVAL;
519 break;
520 case MT_PHY_TYPE_HE_MU:
521 case MT_PHY_TYPE_HE_SU:
522 case MT_PHY_TYPE_HE_EXT_SU:
523 case MT_PHY_TYPE_HE_TB:
524 status->nss = nss;
525 status->encoding = RX_ENC_HE;
526 i &= GENMASK(3, 0);
527
528 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
529 status->he_gi = gi;
530
531 status->he_dcm = dcm;
532 break;
533 default:
534 return -EINVAL;
535 }
536 status->rate_idx = i;
537
538 switch (bw) {
539 case IEEE80211_STA_RX_BW_20:
540 break;
541 case IEEE80211_STA_RX_BW_40:
542 if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
543 (idx & MT_PRXV_TX_ER_SU_106T)) {
544 status->bw = RATE_INFO_BW_HE_RU;
545 status->he_ru =
546 NL80211_RATE_INFO_HE_RU_ALLOC_106;
547 } else {
548 status->bw = RATE_INFO_BW_40;
549 }
550 break;
551 case IEEE80211_STA_RX_BW_80:
552 status->bw = RATE_INFO_BW_80;
553 break;
554 case IEEE80211_STA_RX_BW_160:
555 status->bw = RATE_INFO_BW_160;
556 break;
557 default:
558 return -EINVAL;
559 }
560
561 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
562 if (*mode < MT_PHY_TYPE_HE_SU && gi)
563 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
564
565 return 0;
566}
567
568static int
569mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
570{
571 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
572 struct mt76_phy *mphy = &dev->mt76.phy;
573 struct mt7996_phy *phy = &dev->phy;
574 struct ieee80211_supported_band *sband;
575 __le32 *rxd = (__le32 *)skb->data;
576 __le32 *rxv = NULL;
577 u32 rxd0 = le32_to_cpu(rxd[0]);
578 u32 rxd1 = le32_to_cpu(rxd[1]);
579 u32 rxd2 = le32_to_cpu(rxd[2]);
580 u32 rxd3 = le32_to_cpu(rxd[3]);
581 u32 rxd4 = le32_to_cpu(rxd[4]);
582 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
583 u32 csum_status = *(u32 *)skb->cb;
584 bool unicast, insert_ccmp_hdr = false;
585 u8 remove_pad, amsdu_info, band_idx;
586 u8 mode = 0, qos_ctl = 0;
587 bool hdr_trans;
588 u16 hdr_gap;
589 u16 seq_ctrl = 0;
590 __le16 fc = 0;
591 int idx;
592
593 memset(status, 0, sizeof(*status));
594
595 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
596 mphy = dev->mt76.phys[band_idx];
597 phy = mphy->priv;
598 status->phy_idx = mphy->band_idx;
599
600 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
601 return -EINVAL;
602
603 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
604 return -EINVAL;
605
606 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
607 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
608 return -EINVAL;
609
610 /* ICV error or CCMP/BIP/WPI MIC error */
611 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
612 status->flag |= RX_FLAG_ONLY_MONITOR;
613
614 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
615 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
616 status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
617
618 if (status->wcid) {
619 struct mt7996_sta *msta;
620
621 msta = container_of(status->wcid, struct mt7996_sta, wcid);
622 spin_lock_bh(&dev->sta_poll_lock);
623 if (list_empty(&msta->poll_list))
624 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
625 spin_unlock_bh(&dev->sta_poll_lock);
626 }
627
628 status->freq = mphy->chandef.chan->center_freq;
629 status->band = mphy->chandef.chan->band;
630 if (status->band == NL80211_BAND_5GHZ)
631 sband = &mphy->sband_5g.sband;
632 else if (status->band == NL80211_BAND_6GHZ)
633 sband = &mphy->sband_6g.sband;
634 else
635 sband = &mphy->sband_2g.sband;
636
637 if (!sband->channels)
638 return -EINVAL;
639
640 if ((rxd0 & csum_mask) == csum_mask &&
641 !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
642 skb->ip_summed = CHECKSUM_UNNECESSARY;
643
644 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR)
645 status->flag |= RX_FLAG_FAILED_FCS_CRC;
646
647 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
648 status->flag |= RX_FLAG_MMIC_ERROR;
649
650 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
651 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
652 status->flag |= RX_FLAG_DECRYPTED;
653 status->flag |= RX_FLAG_IV_STRIPPED;
654 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
655 }
656
657 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
658
659 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
660 return -EINVAL;
661
662 rxd += 8;
663 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
664 u32 v0 = le32_to_cpu(rxd[0]);
665 u32 v2 = le32_to_cpu(rxd[2]);
666
667 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
668 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
669 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
670
671 rxd += 4;
672 if ((u8 *)rxd - skb->data >= skb->len)
673 return -EINVAL;
674 }
675
676 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
677 u8 *data = (u8 *)rxd;
678
679 if (status->flag & RX_FLAG_DECRYPTED) {
680 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
681 case MT_CIPHER_AES_CCMP:
682 case MT_CIPHER_CCMP_CCX:
683 case MT_CIPHER_CCMP_256:
684 insert_ccmp_hdr =
685 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
686 fallthrough;
687 case MT_CIPHER_TKIP:
688 case MT_CIPHER_TKIP_NO_MIC:
689 case MT_CIPHER_GCMP:
690 case MT_CIPHER_GCMP_256:
691 status->iv[0] = data[5];
692 status->iv[1] = data[4];
693 status->iv[2] = data[3];
694 status->iv[3] = data[2];
695 status->iv[4] = data[1];
696 status->iv[5] = data[0];
697 break;
698 default:
699 break;
700 }
701 }
702 rxd += 4;
703 if ((u8 *)rxd - skb->data >= skb->len)
704 return -EINVAL;
705 }
706
707 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
708 status->timestamp = le32_to_cpu(rxd[0]);
709 status->flag |= RX_FLAG_MACTIME_START;
710
711 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
712 status->flag |= RX_FLAG_AMPDU_DETAILS;
713
714 /* all subframes of an A-MPDU have the same timestamp */
715 if (phy->rx_ampdu_ts != status->timestamp) {
716 if (!++phy->ampdu_ref)
717 phy->ampdu_ref++;
718 }
719 phy->rx_ampdu_ts = status->timestamp;
720
721 status->ampdu_ref = phy->ampdu_ref;
722 }
723
724 rxd += 4;
725 if ((u8 *)rxd - skb->data >= skb->len)
726 return -EINVAL;
727 }
728
729 /* RXD Group 3 - P-RXV */
730 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
731 u32 v3;
732 int ret;
733
734 rxv = rxd;
735 rxd += 4;
736 if ((u8 *)rxd - skb->data >= skb->len)
737 return -EINVAL;
738
739 v3 = le32_to_cpu(rxv[3]);
740
741 status->chains = mphy->antenna_mask;
742 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
743 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
744 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
745 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
746
747 /* RXD Group 5 - C-RXV */
748 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
749 rxd += 24;
750 if ((u8 *)rxd - skb->data >= skb->len)
751 return -EINVAL;
752 }
753
754 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
755 if (ret < 0)
756 return ret;
757 }
758
759 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
760 status->amsdu = !!amsdu_info;
761 if (status->amsdu) {
762 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
763 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
764 }
765
766 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
767 if (hdr_trans && ieee80211_has_morefrags(fc)) {
768 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
769 return -EINVAL;
770 hdr_trans = false;
771 } else {
772 int pad_start = 0;
773
774 skb_pull(skb, hdr_gap);
775 if (!hdr_trans && status->amsdu) {
776 pad_start = ieee80211_get_hdrlen_from_skb(skb);
777 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
778 /*
779 * When header translation failure is indicated,
780 * the hardware will insert an extra 2-byte field
781 * containing the data length after the protocol
782 * type field.
783 */
784 pad_start = 12;
785 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
786 pad_start += 4;
787 else
788 pad_start = 0;
789 }
790
791 if (pad_start) {
792 memmove(skb->data + 2, skb->data, pad_start);
793 skb_pull(skb, 2);
794 }
795 }
796
797 if (!hdr_trans) {
798 struct ieee80211_hdr *hdr;
799
800 if (insert_ccmp_hdr) {
801 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
802
803 mt76_insert_ccmp_hdr(skb, key_id);
804 }
805
806 hdr = mt76_skb_get_hdr(skb);
807 fc = hdr->frame_control;
808 if (ieee80211_is_data_qos(fc)) {
809 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
810 qos_ctl = *ieee80211_get_qos_ctl(hdr);
811 }
812 } else {
813 status->flag |= RX_FLAG_8023;
814 }
815
816 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
817 mt7996_mac_decode_he_radiotap(skb, rxv, mode);
818
819 if (!status->wcid || !ieee80211_is_data_qos(fc))
820 return 0;
821
822 status->aggr = unicast &&
823 !ieee80211_is_qos_nullfunc(fc);
824 status->qos_ctl = qos_ctl;
825 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
826
827 return 0;
828}
829
830static void
831mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
832 struct sk_buff *skb, struct mt76_wcid *wcid)
833{
834
835 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
836 u8 fc_type, fc_stype;
837 u16 ethertype;
838 bool wmm = false;
839 u32 val;
840
841 if (wcid->sta) {
842 struct ieee80211_sta *sta;
843
844 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
845 wmm = sta->wme;
846 }
847
848 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
849 FIELD_PREP(MT_TXD1_TID, tid);
850
851 ethertype = get_unaligned_be16(&skb->data[12]);
852 if (ethertype >= ETH_P_802_3_MIN)
853 val |= MT_TXD1_ETH_802_3;
854
855 txwi[1] |= cpu_to_le32(val);
856
857 fc_type = IEEE80211_FTYPE_DATA >> 2;
858 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
859
860 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
861 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
862
863 txwi[2] |= cpu_to_le32(val);
864}
865
866static void
867mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
868 struct sk_buff *skb, struct ieee80211_key_conf *key)
869{
870 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
871 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
872 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
873 bool multicast = is_multicast_ether_addr(hdr->addr1);
874 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
875 __le16 fc = hdr->frame_control;
876 u8 fc_type, fc_stype;
877 u32 val;
878
879 if (ieee80211_is_action(fc) &&
880 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
881 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
882 tid = MT_TX_ADDBA;
883 else if (ieee80211_is_mgmt(hdr->frame_control))
884 tid = MT_TX_NORMAL;
885
886 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
887 FIELD_PREP(MT_TXD1_HDR_INFO,
888 ieee80211_get_hdrlen_from_skb(skb) / 2) |
889 FIELD_PREP(MT_TXD1_TID, tid);
890
891 if (!ieee80211_is_data(fc) || multicast ||
892 info->flags & IEEE80211_TX_CTL_USE_MINRATE)
893 val |= MT_TXD1_FIXED_RATE;
894
895 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
896 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
897 val |= MT_TXD1_BIP;
898 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
899 }
900
901 txwi[1] |= cpu_to_le32(val);
902
903 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
904 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
905
906 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
907 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
908
909 txwi[2] |= cpu_to_le32(val);
910
911 txwi[3] |= FIELD_PREP(MT_TXD3_BCM, multicast);
912 if (ieee80211_is_beacon(fc)) {
913 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
914 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
915 }
916
917 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
918 u16 seqno = le16_to_cpu(hdr->seq_ctrl);
919
920 if (ieee80211_is_back_req(hdr->frame_control)) {
921 struct ieee80211_bar *bar;
922
923 bar = (struct ieee80211_bar *)skb->data;
924 seqno = le16_to_cpu(bar->start_seq_num);
925 }
926
927 val = MT_TXD3_SN_VALID |
928 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
929 txwi[3] |= cpu_to_le32(val);
930 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
931 }
932}
933
934static u16
935mt7996_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
936 bool beacon, bool mcast)
937{
938 u8 mode = 0, band = mphy->chandef.chan->band;
939 int rateidx = 0, mcast_rate;
940
941 if (beacon) {
942 struct cfg80211_bitrate_mask *mask;
943
944 mask = &vif->bss_conf.beacon_tx_rate;
945 if (hweight16(mask->control[band].he_mcs[0]) == 1) {
946 rateidx = ffs(mask->control[band].he_mcs[0]) - 1;
947 mode = MT_PHY_TYPE_HE_SU;
948 goto out;
949 } else if (hweight16(mask->control[band].vht_mcs[0]) == 1) {
950 rateidx = ffs(mask->control[band].vht_mcs[0]) - 1;
951 mode = MT_PHY_TYPE_VHT;
952 goto out;
953 } else if (hweight8(mask->control[band].ht_mcs[0]) == 1) {
954 rateidx = ffs(mask->control[band].ht_mcs[0]) - 1;
955 mode = MT_PHY_TYPE_HT;
956 goto out;
957 } else if (hweight32(mask->control[band].legacy) == 1) {
958 rateidx = ffs(mask->control[band].legacy) - 1;
959 goto legacy;
960 }
961 }
962
963 mcast_rate = vif->bss_conf.mcast_rate[band];
964 if (mcast && mcast_rate > 0)
965 rateidx = mcast_rate - 1;
966 else
967 rateidx = ffs(vif->bss_conf.basic_rates) - 1;
968
969legacy:
970 rateidx = mt76_calculate_default_rate(mphy, rateidx);
971 mode = rateidx >> 8;
972 rateidx &= GENMASK(7, 0);
973
974out:
975 return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
976 FIELD_PREP(MT_TX_RATE_MODE, mode);
977}
978
979void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
980 struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
981 struct ieee80211_key_conf *key, u32 changed)
982{
983 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
984 struct ieee80211_vif *vif = info->control.vif;
985 struct mt76_phy *mphy = &dev->mphy;
986 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
987 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
988 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
989 u16 tx_count = 15;
990 u32 val;
991 bool beacon = !!(changed & (BSS_CHANGED_BEACON |
992 BSS_CHANGED_BEACON_ENABLED));
993 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
994 BSS_CHANGED_FILS_DISCOVERY));
995
996 if (vif) {
997 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
998
999 omac_idx = mvif->mt76.omac_idx;
1000 wmm_idx = mvif->mt76.wmm_idx;
1001 band_idx = mvif->mt76.band_idx;
1002 }
1003
1004 mphy = mt76_dev_phy(&dev->mt76, band_idx);
1005
1006 if (inband_disc) {
1007 p_fmt = MT_TX_TYPE_FW;
1008 q_idx = MT_LMAC_ALTX0;
1009 } else if (beacon) {
1010 p_fmt = MT_TX_TYPE_FW;
1011 q_idx = MT_LMAC_BCN0;
1012 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
1013 p_fmt = MT_TX_TYPE_CT;
1014 q_idx = MT_LMAC_ALTX0;
1015 } else {
1016 p_fmt = MT_TX_TYPE_CT;
1017 q_idx = wmm_idx * MT7996_MAX_WMM_SETS +
1018 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
1019 }
1020
1021 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
1022 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
1023 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
1024 txwi[0] = cpu_to_le32(val);
1025
1026 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
1027 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
1028
1029 if (band_idx)
1030 val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
1031
1032 txwi[1] = cpu_to_le32(val);
1033 txwi[2] = 0;
1034
1035 val = MT_TXD3_SW_POWER_MGMT |
1036 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
1037 if (key)
1038 val |= MT_TXD3_PROTECT_FRAME;
1039 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1040 val |= MT_TXD3_NO_ACK;
1041 if (wcid->amsdu)
1042 val |= MT_TXD3_HW_AMSDU;
1043
1044 txwi[3] = cpu_to_le32(val);
1045 txwi[4] = 0;
1046
1047 val = FIELD_PREP(MT_TXD5_PID, pid);
1048 if (pid >= MT_PACKET_ID_FIRST)
1049 val |= MT_TXD5_TX_STATUS_HOST;
1050 txwi[5] = cpu_to_le32(val);
1051
1052 val = MT_TXD6_DIS_MAT | MT_TXD6_DAS |
1053 FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
1054 txwi[6] = cpu_to_le32(val);
1055 txwi[7] = 0;
1056
1057 if (is_8023)
1058 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
1059 else
1060 mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
1061
1062 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
1063 /* Fixed rata is available just for 802.11 txd */
1064 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1065 bool multicast = is_multicast_ether_addr(hdr->addr1);
1066 u16 rate = mt7996_mac_tx_rate_val(mphy, vif, beacon, multicast);
1067
1068 /* fix to bw 20 */
1069 val = MT_TXD6_FIXED_BW |
1070 FIELD_PREP(MT_TXD6_BW, 0) |
1071 FIELD_PREP(MT_TXD6_TX_RATE, rate);
1072
1073 txwi[6] |= cpu_to_le32(val);
1074 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
1075 }
1076}
1077
1078int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1079 enum mt76_txq_id qid, struct mt76_wcid *wcid,
1080 struct ieee80211_sta *sta,
1081 struct mt76_tx_info *tx_info)
1082{
1083 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1084 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1085 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1086 struct ieee80211_key_conf *key = info->control.hw_key;
1087 struct ieee80211_vif *vif = info->control.vif;
1088 struct mt76_txwi_cache *t;
1089 struct mt7996_txp *txp;
1090 int id, i, pid, nbuf = tx_info->nbuf - 1;
1091 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1092 u8 *txwi = (u8 *)txwi_ptr;
1093
1094 if (unlikely(tx_info->skb->len <= ETH_HLEN))
1095 return -EINVAL;
1096
1097 if (!wcid)
1098 wcid = &dev->mt76.global_wcid;
1099
1100 if (sta) {
1101 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
1102
1103 if (time_after(jiffies, msta->jiffies + HZ / 4)) {
1104 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1105 msta->jiffies = jiffies;
1106 }
1107 }
1108
1109 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
1110 t->skb = tx_info->skb;
1111
1112 id = mt76_token_consume(mdev, &t);
1113 if (id < 0)
1114 return id;
1115
1116 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
1117 memset(txwi_ptr, 0, MT_TXD_SIZE);
1118 /* Transmit non qos data by 802.11 header and need to fill txd by host*/
1119 if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
1120 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid,
1121 key, 0);
1122
1123 txp = (struct mt7996_txp *)(txwi + MT_TXD_SIZE);
1124 for (i = 0; i < nbuf; i++) {
1125 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
1126 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
1127 }
1128 txp->nbuf = nbuf;
1129
1130 txp->flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
1131
1132 if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
1133 txp->flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
1134
1135 if (!key)
1136 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
1137
1138 if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
1139 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
1140
1141 if (vif) {
1142 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
1143
1144 txp->bss_idx = mvif->mt76.idx;
1145 }
1146
1147 txp->token = cpu_to_le16(id);
1148 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
1149 txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
1150 else
1151 txp->rept_wds_wcid = cpu_to_le16(0xfff);
1152 tx_info->skb = DMA_DUMMY_DATA;
1153
1154 /* pass partial skb header to fw */
1155 tx_info->buf[1].len = MT_CT_PARSE_LEN;
1156 tx_info->buf[1].skip_unmap = true;
1157 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
1158
1159 return 0;
1160}
1161
1162static void
1163mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
1164{
1165 struct mt7996_sta *msta;
1166 u16 fc, tid;
1167 u32 val;
1168
1169 if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he))
1170 return;
1171
1172 tid = le32_get_bits(txwi[1], MT_TXD1_TID);
1173 if (tid >= 6) /* skip VO queue */
1174 return;
1175
1176 val = le32_to_cpu(txwi[2]);
1177 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
1178 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
1179 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1180 return;
1181
1182 msta = (struct mt7996_sta *)sta->drv_priv;
1183 if (!test_and_set_bit(tid, &msta->ampdu_state))
1184 ieee80211_start_tx_ba_session(sta, tid, 0);
1185}
1186
1187static void
1188mt7996_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
1189{
1190 struct mt7996_txp *txp;
1191 int i;
1192
1193 txp = mt7996_txwi_to_txp(dev, t);
1194 for (i = 0; i < txp->nbuf; i++)
1195 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
1196 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
1197}
1198
1199static void
1200mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
1201 struct ieee80211_sta *sta, struct list_head *free_list)
1202{
1203 struct mt76_dev *mdev = &dev->mt76;
1204 struct mt76_wcid *wcid;
1205 __le32 *txwi;
1206 u16 wcid_idx;
1207
1208 mt7996_txp_skb_unmap(mdev, t);
1209 if (!t->skb)
1210 goto out;
1211
1212 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1213 if (sta) {
1214 wcid = (struct mt76_wcid *)sta->drv_priv;
1215 wcid_idx = wcid->idx;
1216
1217 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1218 mt7996_tx_check_aggr(sta, txwi);
1219 } else {
1220 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1221 }
1222
1223 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1224
1225out:
1226 t->skb = NULL;
1227 mt76_put_txwi(mdev, t);
1228}
1229
1230static void
1231mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
1232{
1233 __le32 *tx_free = (__le32 *)data, *cur_info;
1234 struct mt76_dev *mdev = &dev->mt76;
1235 struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
1236 struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
1237 struct mt76_txwi_cache *txwi;
1238 struct ieee80211_sta *sta = NULL;
1239 LIST_HEAD(free_list);
1240 struct sk_buff *skb, *tmp;
1241 void *end = data + len;
1242 bool wake = false;
1243 u16 total, count = 0;
1244
1245 /* clean DMA queues and unmap buffers first */
1246 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1247 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1248 if (phy2) {
1249 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false);
1250 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false);
1251 }
1252 if (phy3) {
1253 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false);
1254 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
1255 }
1256
1257 if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4))
1258 return;
1259
1260 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1261 for (cur_info = &tx_free[2]; count < total; cur_info++) {
1262 u32 msdu, info;
1263 u8 i;
1264
1265 if (WARN_ON_ONCE((void *)cur_info >= end))
1266 return;
1267 /*
1268 * 1'b1: new wcid pair.
1269 * 1'b0: msdu_id with the same 'wcid pair' as above.
1270 */
1271 info = le32_to_cpu(*cur_info);
1272 if (info & MT_TXFREE_INFO_PAIR) {
1273 struct mt7996_sta *msta;
1274 struct mt76_wcid *wcid;
1275 u16 idx;
1276
1277 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1278 wcid = rcu_dereference(dev->mt76.wcid[idx]);
1279 sta = wcid_to_sta(wcid);
1280 if (!sta)
1281 continue;
1282
1283 msta = container_of(wcid, struct mt7996_sta, wcid);
1284 spin_lock_bh(&dev->sta_poll_lock);
1285 if (list_empty(&msta->poll_list))
1286 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1287 spin_unlock_bh(&dev->sta_poll_lock);
1288 continue;
1289 }
1290
1291 if (info & MT_TXFREE_INFO_HEADER)
1292 continue;
1293
1294 for (i = 0; i < 2; i++) {
1295 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1296 if (msdu == MT_TXFREE_INFO_MSDU_ID)
1297 continue;
1298
1299 count++;
1300 txwi = mt76_token_release(mdev, msdu, &wake);
1301 if (!txwi)
1302 continue;
1303
1304 mt7996_txwi_free(dev, txwi, sta, &free_list);
1305 }
1306 }
1307
1308 mt7996_mac_sta_poll(dev);
1309
1310 if (wake)
1311 mt76_set_tx_blocked(&dev->mt76, false);
1312
1313 mt76_worker_schedule(&dev->mt76.tx_worker);
1314
1315 list_for_each_entry_safe(skb, tmp, &free_list, list) {
1316 skb_list_del_init(skb);
1317 napi_consume_skb(skb, 1);
1318 }
1319}
1320
1321static bool
1322mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid,
1323 __le32 *txs_data, struct mt76_sta_stats *stats)
1324{
1325 struct ieee80211_supported_band *sband;
1326 struct mt76_dev *mdev = &dev->mt76;
1327 struct mt76_phy *mphy;
1328 struct ieee80211_tx_info *info;
1329 struct sk_buff_head list;
1330 struct rate_info rate = {};
1331 struct sk_buff *skb;
1332 bool cck = false;
1333 u32 txrate, txs, mode, stbc;
1334
1335 mt76_tx_status_lock(mdev, &list);
1336 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
1337 if (!skb)
1338 goto out_no_skb;
1339
1340 txs = le32_to_cpu(txs_data[0]);
1341
1342 info = IEEE80211_SKB_CB(skb);
1343 if (!(txs & MT_TXS0_ACK_ERROR_MASK))
1344 info->flags |= IEEE80211_TX_STAT_ACK;
1345
1346 info->status.ampdu_len = 1;
1347 info->status.ampdu_ack_len = !!(info->flags &
1348 IEEE80211_TX_STAT_ACK);
1349
1350 info->status.rates[0].idx = -1;
1351
1352 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1353
1354 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
1355 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
1356 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
1357
1358 if (stbc && rate.nss > 1)
1359 rate.nss >>= 1;
1360
1361 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
1362 stats->tx_nss[rate.nss - 1]++;
1363 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
1364 stats->tx_mcs[rate.mcs]++;
1365
1366 mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
1367 switch (mode) {
1368 case MT_PHY_TYPE_CCK:
1369 cck = true;
1370 fallthrough;
1371 case MT_PHY_TYPE_OFDM:
1372 mphy = mt76_dev_phy(mdev, wcid->phy_idx);
1373
1374 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1375 sband = &mphy->sband_5g.sband;
1376 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
1377 sband = &mphy->sband_6g.sband;
1378 else
1379 sband = &mphy->sband_2g.sband;
1380
1381 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
1382 rate.legacy = sband->bitrates[rate.mcs].bitrate;
1383 break;
1384 case MT_PHY_TYPE_HT:
1385 case MT_PHY_TYPE_HT_GF:
1386 if (rate.mcs > 31)
1387 goto out;
1388
1389 rate.flags = RATE_INFO_FLAGS_MCS;
1390 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1391 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1392 break;
1393 case MT_PHY_TYPE_VHT:
1394 if (rate.mcs > 9)
1395 goto out;
1396
1397 rate.flags = RATE_INFO_FLAGS_VHT_MCS;
1398 break;
1399 case MT_PHY_TYPE_HE_SU:
1400 case MT_PHY_TYPE_HE_EXT_SU:
1401 case MT_PHY_TYPE_HE_TB:
1402 case MT_PHY_TYPE_HE_MU:
1403 if (rate.mcs > 11)
1404 goto out;
1405
1406 rate.he_gi = wcid->rate.he_gi;
1407 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
1408 rate.flags = RATE_INFO_FLAGS_HE_MCS;
1409 break;
1410 default:
1411 goto out;
1412 }
1413
1414 stats->tx_mode[mode]++;
1415
1416 switch (FIELD_GET(MT_TXS0_BW, txs)) {
1417 case IEEE80211_STA_RX_BW_160:
1418 rate.bw = RATE_INFO_BW_160;
1419 stats->tx_bw[3]++;
1420 break;
1421 case IEEE80211_STA_RX_BW_80:
1422 rate.bw = RATE_INFO_BW_80;
1423 stats->tx_bw[2]++;
1424 break;
1425 case IEEE80211_STA_RX_BW_40:
1426 rate.bw = RATE_INFO_BW_40;
1427 stats->tx_bw[1]++;
1428 break;
1429 default:
1430 rate.bw = RATE_INFO_BW_20;
1431 stats->tx_bw[0]++;
1432 break;
1433 }
1434 wcid->rate = rate;
1435
1436out:
1437 mt76_tx_status_skb_done(mdev, skb, &list);
1438
1439out_no_skb:
1440 mt76_tx_status_unlock(mdev, &list);
1441
1442 return !!skb;
1443}
1444
1445static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
1446{
1447 struct mt7996_sta *msta = NULL;
1448 struct mt76_wcid *wcid;
1449 __le32 *txs_data = data;
1450 u16 wcidx;
1451 u8 pid;
1452
1453 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
1454 return;
1455
1456 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1457 pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1458
1459 if (pid < MT_PACKET_ID_FIRST)
1460 return;
1461
1462 if (wcidx >= MT7996_WTBL_SIZE)
1463 return;
1464
1465 rcu_read_lock();
1466
1467 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1468 if (!wcid)
1469 goto out;
1470
1471 msta = container_of(wcid, struct mt7996_sta, wcid);
1472
1473 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data, &msta->stats);
1474
1475 if (!wcid->sta)
1476 goto out;
1477
1478 spin_lock_bh(&dev->sta_poll_lock);
1479 if (list_empty(&msta->poll_list))
1480 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1481 spin_unlock_bh(&dev->sta_poll_lock);
1482
1483out:
1484 rcu_read_unlock();
1485}
1486
1487bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
1488{
1489 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1490 __le32 *rxd = (__le32 *)data;
1491 __le32 *end = (__le32 *)&rxd[len / 4];
1492 enum rx_pkt_type type;
1493
1494 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1495 if (type != PKT_TYPE_NORMAL) {
1496 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1497
1498 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1499 MT_RXD0_SW_PKT_TYPE_FRAME))
1500 return true;
1501 }
1502
1503 switch (type) {
1504 case PKT_TYPE_TXRX_NOTIFY:
1505 mt7996_mac_tx_free(dev, data, len);
1506 return false;
1507 case PKT_TYPE_TXS:
1508 for (rxd += 4; rxd + 8 <= end; rxd += 8)
1509 mt7996_mac_add_txs(dev, rxd);
1510 return false;
1511 case PKT_TYPE_RX_FW_MONITOR:
1512 mt7996_debugfs_rx_fw_monitor(dev, data, len);
1513 return false;
1514 default:
1515 return true;
1516 }
1517}
1518
1519void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1520 struct sk_buff *skb)
1521{
1522 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1523 __le32 *rxd = (__le32 *)skb->data;
1524 __le32 *end = (__le32 *)&skb->data[skb->len];
1525 enum rx_pkt_type type;
1526
1527 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1528 if (type != PKT_TYPE_NORMAL) {
1529 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1530
1531 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1532 MT_RXD0_SW_PKT_TYPE_FRAME))
1533 type = PKT_TYPE_NORMAL;
1534 }
1535
1536 switch (type) {
1537 case PKT_TYPE_TXRX_NOTIFY:
1538 mt7996_mac_tx_free(dev, skb->data, skb->len);
1539 napi_consume_skb(skb, 1);
1540 break;
1541 case PKT_TYPE_RX_EVENT:
1542 mt7996_mcu_rx_event(dev, skb);
1543 break;
1544 case PKT_TYPE_TXS:
1545 for (rxd += 4; rxd + 8 <= end; rxd += 8)
1546 mt7996_mac_add_txs(dev, rxd);
1547 dev_kfree_skb(skb);
1548 break;
1549 case PKT_TYPE_RX_FW_MONITOR:
1550 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1551 dev_kfree_skb(skb);
1552 break;
1553 case PKT_TYPE_NORMAL:
1554 if (!mt7996_mac_fill_rx(dev, skb)) {
1555 mt76_rx(&dev->mt76, q, skb);
1556 return;
1557 }
1558 fallthrough;
1559 default:
1560 dev_kfree_skb(skb);
1561 break;
1562 }
1563}
1564
1565void mt7996_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
1566{
1567 if (!e->txwi) {
1568 dev_kfree_skb_any(e->skb);
1569 return;
1570 }
1571
1572 /* error path */
1573 if (e->skb == DMA_DUMMY_DATA) {
1574 struct mt76_txwi_cache *t;
1575 struct mt7996_txp *txp;
1576
1577 txp = mt7996_txwi_to_txp(mdev, e->txwi);
1578 t = mt76_token_put(mdev, le16_to_cpu(txp->token));
1579 e->skb = t ? t->skb : NULL;
1580 }
1581
1582 if (e->skb)
1583 mt76_tx_complete_skb(mdev, e->wcid, e->skb);
1584}
1585
1586void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
1587{
1588 struct mt7996_dev *dev = phy->dev;
1589 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx);
1590
1591 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN);
1592 mt76_set(dev, reg, BIT(11) | BIT(9));
1593}
1594
1595void mt7996_mac_reset_counters(struct mt7996_phy *phy)
1596{
1597 struct mt7996_dev *dev = phy->dev;
1598 u8 band_idx = phy->mt76->band_idx;
1599 int i;
1600
1601 for (i = 0; i < 16; i++)
1602 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
1603
1604 phy->mt76->survey_time = ktime_get_boottime();
1605
1606 i = ARRAY_SIZE(dev->mt76.aggr_stats) / __MT_MAX_BAND * band_idx;
1607 memset(&dev->mt76.aggr_stats[i], 0,
1608 sizeof(dev->mt76.aggr_stats) / __MT_MAX_BAND);
1609
1610 /* reset airtime counters */
1611 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx),
1612 MT_WF_RMAC_MIB_RXTIME_CLR);
1613
1614 mt7996_mcu_get_chan_mib_info(phy, true);
1615}
1616
1617void mt7996_mac_set_timing(struct mt7996_phy *phy)
1618{
1619 s16 coverage_class = phy->coverage_class;
1620 struct mt7996_dev *dev = phy->dev;
1621 struct mt7996_phy *phy2 = mt7996_phy2(dev);
1622 struct mt7996_phy *phy3 = mt7996_phy3(dev);
1623 u32 val, reg_offset;
1624 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1625 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1626 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1627 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1628 u8 band_idx = phy->mt76->band_idx;
1629 int offset;
1630 bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
1631
1632 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1633 return;
1634
1635 if (phy2)
1636 coverage_class = max_t(s16, dev->phy.coverage_class,
1637 phy2->coverage_class);
1638
1639 if (phy3)
1640 coverage_class = max_t(s16, coverage_class,
1641 phy3->coverage_class);
1642
1643 mt76_set(dev, MT_ARB_SCR(band_idx),
1644 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1645 udelay(1);
1646
1647 offset = 3 * coverage_class;
1648 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1649 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1650
1651 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
1652 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
1653 mt76_wr(dev, MT_TMAC_ICR0(band_idx),
1654 FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) |
1655 FIELD_PREP(MT_IFS_RIFS, 2) |
1656 FIELD_PREP(MT_IFS_SIFS, 10) |
1657 FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1658
1659 if (!a_band)
1660 mt76_wr(dev, MT_TMAC_ICR1(band_idx),
1661 FIELD_PREP(MT_IFS_EIFS_CCK, 314));
1662
1663 if (phy->slottime < 20 || a_band)
1664 val = MT7996_CFEND_RATE_DEFAULT;
1665 else
1666 val = MT7996_CFEND_RATE_11B;
1667
1668 mt76_rmw_field(dev, MT_AGG_ACR0(band_idx), MT_AGG_ACR_CFEND_RATE, val);
1669 mt76_clear(dev, MT_ARB_SCR(band_idx),
1670 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1671}
1672
1673void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
1674{
1675 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band),
1676 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY |
1677 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR);
1678
1679 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band),
1680 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5));
1681}
1682
1683static u8
1684mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
1685{
1686 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1687 struct mt7996_dev *dev = phy->dev;
1688 u32 val, sum = 0, n = 0;
1689 int ant, i;
1690
1691 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) {
1692 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant);
1693
1694 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1695 val = mt76_rr(dev, reg);
1696 sum += val * nf_power[i];
1697 n += val;
1698 }
1699 }
1700
1701 return n ? sum / n : 0;
1702}
1703
1704void mt7996_update_channel(struct mt76_phy *mphy)
1705{
1706 struct mt7996_phy *phy = (struct mt7996_phy *)mphy->priv;
1707 struct mt76_channel_state *state = mphy->chan_state;
1708 int nf;
1709
1710 mt7996_mcu_get_chan_mib_info(phy, false);
1711
1712 nf = mt7996_phy_get_nf(phy, mphy->band_idx);
1713 if (!phy->noise)
1714 phy->noise = nf << 4;
1715 else if (nf)
1716 phy->noise += nf - (phy->noise >> 4);
1717
1718 state->noise = -(phy->noise >> 4);
1719}
1720
1721static bool
1722mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
1723{
1724 bool ret;
1725
1726 ret = wait_event_timeout(dev->reset_wait,
1727 (READ_ONCE(dev->reset_state) & state),
1728 MT7996_RESET_TIMEOUT);
1729
1730 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1731 return ret;
1732}
1733
1734static void
1735mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1736{
1737 struct ieee80211_hw *hw = priv;
1738
1739 switch (vif->type) {
1740 case NL80211_IFTYPE_MESH_POINT:
1741 case NL80211_IFTYPE_ADHOC:
1742 case NL80211_IFTYPE_AP:
1743 mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
1744 break;
1745 default:
1746 break;
1747 }
1748}
1749
1750static void
1751mt7996_update_beacons(struct mt7996_dev *dev)
1752{
1753 struct mt76_phy *phy2, *phy3;
1754
1755 ieee80211_iterate_active_interfaces(dev->mt76.hw,
1756 IEEE80211_IFACE_ITER_RESUME_ALL,
1757 mt7996_update_vif_beacon, dev->mt76.hw);
1758
1759 phy2 = dev->mt76.phys[MT_BAND1];
1760 if (!phy2)
1761 return;
1762
1763 ieee80211_iterate_active_interfaces(phy2->hw,
1764 IEEE80211_IFACE_ITER_RESUME_ALL,
1765 mt7996_update_vif_beacon, phy2->hw);
1766
1767 phy3 = dev->mt76.phys[MT_BAND2];
1768 if (!phy3)
1769 return;
1770
1771 ieee80211_iterate_active_interfaces(phy3->hw,
1772 IEEE80211_IFACE_ITER_RESUME_ALL,
1773 mt7996_update_vif_beacon, phy3->hw);
1774}
1775
1776static void
1777mt7996_dma_reset(struct mt7996_dev *dev)
1778{
1779 struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1];
1780 struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2];
1781 u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
1782 int i;
1783
1784 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
1785 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1786 MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1787
1788 if (dev->hif2)
1789 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
1790 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1791 MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1792
1793 usleep_range(1000, 2000);
1794
1795 for (i = 0; i < __MT_TXQ_MAX; i++) {
1796 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
1797 if (phy2)
1798 mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true);
1799 if (phy3)
1800 mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true);
1801 }
1802
1803 for (i = 0; i < __MT_MCUQ_MAX; i++)
1804 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
1805
1806 mt76_for_each_q_rx(&dev->mt76, i)
1807 mt76_queue_rx_reset(dev, i);
1808
1809 mt76_tx_status_check(&dev->mt76, true);
1810
1811 /* re-init prefetch settings after reset */
1812 mt7996_dma_prefetch(dev);
1813
1814 mt76_set(dev, MT_WFDMA0_GLO_CFG,
1815 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1816
1817 if (dev->hif2)
1818 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
1819 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1820 MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1821}
1822
1823void mt7996_tx_token_put(struct mt7996_dev *dev)
1824{
1825 struct mt76_txwi_cache *txwi;
1826 int id;
1827
1828 spin_lock_bh(&dev->mt76.token_lock);
1829 idr_for_each_entry(&dev->mt76.token, txwi, id) {
1830 mt7996_txwi_free(dev, txwi, NULL, NULL);
1831 dev->mt76.token_count--;
1832 }
1833 spin_unlock_bh(&dev->mt76.token_lock);
1834 idr_destroy(&dev->mt76.token);
1835}
1836
1837/* system error recovery */
1838void mt7996_mac_reset_work(struct work_struct *work)
1839{
1840 struct mt7996_phy *phy2, *phy3;
1841 struct mt7996_dev *dev;
1842 int i;
1843
1844 dev = container_of(work, struct mt7996_dev, reset_work);
1845 phy2 = mt7996_phy2(dev);
1846 phy3 = mt7996_phy3(dev);
1847
1848 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
1849 return;
1850
1851 ieee80211_stop_queues(mt76_hw(dev));
1852 if (phy2)
1853 ieee80211_stop_queues(phy2->mt76->hw);
1854 if (phy3)
1855 ieee80211_stop_queues(phy3->mt76->hw);
1856
1857 set_bit(MT76_RESET, &dev->mphy.state);
1858 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1859 wake_up(&dev->mt76.mcu.wait);
1860 cancel_delayed_work_sync(&dev->mphy.mac_work);
1861 if (phy2) {
1862 set_bit(MT76_RESET, &phy2->mt76->state);
1863 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1864 }
1865 if (phy3) {
1866 set_bit(MT76_RESET, &phy3->mt76->state);
1867 cancel_delayed_work_sync(&phy3->mt76->mac_work);
1868 }
1869 mt76_worker_disable(&dev->mt76.tx_worker);
1870 mt76_for_each_q_rx(&dev->mt76, i)
1871 napi_disable(&dev->mt76.napi[i]);
1872 napi_disable(&dev->mt76.tx_napi);
1873
1874 mutex_lock(&dev->mt76.mutex);
1875
1876 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1877
1878 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1879 mt7996_dma_reset(dev);
1880
1881 mt7996_tx_token_put(dev);
1882 idr_init(&dev->mt76.token);
1883
1884 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1885 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1886 }
1887
1888 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1889 clear_bit(MT76_RESET, &dev->mphy.state);
1890 if (phy2)
1891 clear_bit(MT76_RESET, &phy2->mt76->state);
1892 if (phy3)
1893 clear_bit(MT76_RESET, &phy3->mt76->state);
1894
1895 local_bh_disable();
1896 mt76_for_each_q_rx(&dev->mt76, i) {
1897 napi_enable(&dev->mt76.napi[i]);
1898 napi_schedule(&dev->mt76.napi[i]);
1899 }
1900 local_bh_enable();
1901
1902 tasklet_schedule(&dev->irq_tasklet);
1903
1904 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1905 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1906
1907 mt76_worker_enable(&dev->mt76.tx_worker);
1908
1909 local_bh_disable();
1910 napi_enable(&dev->mt76.tx_napi);
1911 napi_schedule(&dev->mt76.tx_napi);
1912 local_bh_enable();
1913
1914 ieee80211_wake_queues(mt76_hw(dev));
1915 if (phy2)
1916 ieee80211_wake_queues(phy2->mt76->hw);
1917 if (phy3)
1918 ieee80211_wake_queues(phy3->mt76->hw);
1919
1920 mutex_unlock(&dev->mt76.mutex);
1921
1922 mt7996_update_beacons(dev);
1923
1924 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1925 MT7996_WATCHDOG_TIME);
1926 if (phy2)
1927 ieee80211_queue_delayed_work(phy2->mt76->hw,
1928 &phy2->mt76->mac_work,
1929 MT7996_WATCHDOG_TIME);
1930 if (phy3)
1931 ieee80211_queue_delayed_work(phy3->mt76->hw,
1932 &phy3->mt76->mac_work,
1933 MT7996_WATCHDOG_TIME);
1934}
1935
1936void mt7996_mac_update_stats(struct mt7996_phy *phy)
1937{
1938 struct mt7996_dev *dev = phy->dev;
1939 struct mib_stats *mib = &phy->mib;
1940 int i, aggr0;
1941 u8 band_idx = phy->mt76->band_idx;
1942 u32 val, cnt;
1943
1944 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx));
1945 mib->fcs_err_cnt += cnt;
1946
1947 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx));
1948 mib->rx_fifo_full_cnt += cnt;
1949
1950 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx));
1951 mib->rx_mpdu_cnt += cnt;
1952
1953 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx));
1954 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
1955
1956 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx));
1957 mib->rx_vector_mismatch_cnt += cnt;
1958
1959 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx));
1960 mib->rx_delimiter_fail_cnt += cnt;
1961
1962 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx));
1963 mib->rx_len_mismatch_cnt += cnt;
1964
1965 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx));
1966 mib->tx_ampdu_cnt += cnt;
1967
1968 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx));
1969 mib->tx_stop_q_empty_cnt += cnt;
1970
1971 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx));
1972 mib->tx_mpdu_attempts_cnt += cnt;
1973
1974 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx));
1975 mib->tx_mpdu_success_cnt += cnt;
1976
1977 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx));
1978 mib->rx_ampdu_cnt += cnt;
1979
1980 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx));
1981 mib->rx_ampdu_bytes_cnt += cnt;
1982
1983 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx));
1984 mib->rx_ampdu_valid_subframe_cnt += cnt;
1985
1986 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx));
1987 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
1988
1989 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx));
1990 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt);
1991
1992 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx));
1993 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt);
1994
1995 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx));
1996 mib->rx_pfdrop_cnt += cnt;
1997
1998 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx));
1999 mib->rx_vec_queue_overflow_drop_cnt += cnt;
2000
2001 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx));
2002 mib->rx_ba_cnt += cnt;
2003
2004 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx));
2005 mib->tx_bf_ebf_ppdu_cnt += cnt;
2006
2007 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx));
2008 mib->tx_bf_ibf_ppdu_cnt += cnt;
2009
2010 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx));
2011 mib->tx_mu_bf_cnt += cnt;
2012
2013 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx));
2014 mib->tx_mu_mpdu_cnt += cnt;
2015
2016 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx));
2017 mib->tx_mu_acked_mpdu_cnt += cnt;
2018
2019 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx));
2020 mib->tx_su_acked_mpdu_cnt += cnt;
2021
2022 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx));
2023 mib->tx_bf_rx_fb_ht_cnt += cnt;
2024 mib->tx_bf_rx_fb_all_cnt += cnt;
2025
2026 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx));
2027 mib->tx_bf_rx_fb_vht_cnt += cnt;
2028 mib->tx_bf_rx_fb_all_cnt += cnt;
2029
2030 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx));
2031 mib->tx_bf_rx_fb_he_cnt += cnt;
2032 mib->tx_bf_rx_fb_all_cnt += cnt;
2033
2034 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx));
2035 mib->tx_bf_rx_fb_eht_cnt += cnt;
2036 mib->tx_bf_rx_fb_all_cnt += cnt;
2037
2038 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx));
2039 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
2040 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
2041 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
2042
2043 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx));
2044 mib->tx_bf_fb_trig_cnt += cnt;
2045
2046 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx));
2047 mib->tx_bf_fb_cpl_cnt += cnt;
2048
2049 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
2050 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
2051 mib->tx_amsdu[i] += cnt;
2052 mib->tx_amsdu_cnt += cnt;
2053 }
2054
2055 aggr0 = ARRAY_SIZE(dev->mt76.aggr_stats) / __MT_MAX_BAND * band_idx;
2056
2057 /* rts count */
2058 val = mt76_rr(dev, MT_MIB_BTSCR5(band_idx));
2059 mib->rts_cnt += val;
2060
2061 /* rts retry count */
2062 val = mt76_rr(dev, MT_MIB_BTSCR6(band_idx));
2063 mib->rts_retries_cnt += val;
2064
2065 /* ba miss count */
2066 val = mt76_rr(dev, MT_MIB_BTSCR0(band_idx));
2067 mib->ba_miss_cnt += val;
2068
2069 /* ack fail count */
2070 val = mt76_rr(dev, MT_MIB_BFTFCR(band_idx));
2071 mib->ack_fail_cnt += val;
2072
2073 for (i = 0; i < 16; i++) {
2074 val = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2075 dev->mt76.aggr_stats[aggr0++] += val;
2076 }
2077}
2078
2079void mt7996_mac_sta_rc_work(struct work_struct *work)
2080{
2081 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
2082 struct ieee80211_sta *sta;
2083 struct ieee80211_vif *vif;
2084 struct mt7996_sta *msta;
2085 u32 changed;
2086 LIST_HEAD(list);
2087
2088 spin_lock_bh(&dev->sta_poll_lock);
2089 list_splice_init(&dev->sta_rc_list, &list);
2090
2091 while (!list_empty(&list)) {
2092 msta = list_first_entry(&list, struct mt7996_sta, rc_list);
2093 list_del_init(&msta->rc_list);
2094 changed = msta->changed;
2095 msta->changed = 0;
2096 spin_unlock_bh(&dev->sta_poll_lock);
2097
2098 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
2099 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
2100
2101 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
2102 IEEE80211_RC_NSS_CHANGED |
2103 IEEE80211_RC_BW_CHANGED))
2104 mt7996_mcu_add_rate_ctrl(dev, vif, sta, true);
2105
2106 /* TODO: smps change */
2107
2108 spin_lock_bh(&dev->sta_poll_lock);
2109 }
2110
2111 spin_unlock_bh(&dev->sta_poll_lock);
2112}
2113
2114void mt7996_mac_work(struct work_struct *work)
2115{
2116 struct mt7996_phy *phy;
2117 struct mt76_phy *mphy;
2118
2119 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2120 mac_work.work);
2121 phy = mphy->priv;
2122
2123 mutex_lock(&mphy->dev->mutex);
2124
2125 mt76_update_survey(mphy);
2126 if (++mphy->mac_work_count == 5) {
2127 mphy->mac_work_count = 0;
2128
2129 mt7996_mac_update_stats(phy);
2130 }
2131
2132 mutex_unlock(&mphy->dev->mutex);
2133
2134 mt76_tx_status_check(mphy->dev, false);
2135
2136 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2137 MT7996_WATCHDOG_TIME);
2138}
2139
2140static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
2141{
2142 struct mt7996_dev *dev = phy->dev;
2143
2144 if (phy->rdd_state & BIT(0))
2145 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0,
2146 MT_RX_SEL0, 0);
2147 if (phy->rdd_state & BIT(1))
2148 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1,
2149 MT_RX_SEL0, 0);
2150}
2151
2152static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
2153{
2154 int err, region;
2155
2156 switch (dev->mt76.region) {
2157 case NL80211_DFS_ETSI:
2158 region = 0;
2159 break;
2160 case NL80211_DFS_JP:
2161 region = 2;
2162 break;
2163 case NL80211_DFS_FCC:
2164 default:
2165 region = 1;
2166 break;
2167 }
2168
2169 err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain,
2170 MT_RX_SEL0, region);
2171 if (err < 0)
2172 return err;
2173
2174 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
2175 MT_RX_SEL0, 1);
2176}
2177
2178static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
2179{
2180 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2181 struct mt7996_dev *dev = phy->dev;
2182 u8 band_idx = phy->mt76->band_idx;
2183 int err;
2184
2185 /* start CAC */
2186 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx,
2187 MT_RX_SEL0, 0);
2188 if (err < 0)
2189 return err;
2190
2191 err = mt7996_dfs_start_rdd(dev, band_idx);
2192 if (err < 0)
2193 return err;
2194
2195 phy->rdd_state |= BIT(band_idx);
2196
2197 if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2198 chandef->width == NL80211_CHAN_WIDTH_80P80) {
2199 err = mt7996_dfs_start_rdd(dev, 1);
2200 if (err < 0)
2201 return err;
2202
2203 phy->rdd_state |= BIT(1);
2204 }
2205
2206 return 0;
2207}
2208
2209static int
2210mt7996_dfs_init_radar_specs(struct mt7996_phy *phy)
2211{
2212 const struct mt7996_dfs_radar_spec *radar_specs;
2213 struct mt7996_dev *dev = phy->dev;
2214 int err, i;
2215
2216 switch (dev->mt76.region) {
2217 case NL80211_DFS_FCC:
2218 radar_specs = &fcc_radar_specs;
2219 err = mt7996_mcu_set_fcc5_lpn(dev, 8);
2220 if (err < 0)
2221 return err;
2222 break;
2223 case NL80211_DFS_ETSI:
2224 radar_specs = &etsi_radar_specs;
2225 break;
2226 case NL80211_DFS_JP:
2227 radar_specs = &jp_radar_specs;
2228 break;
2229 default:
2230 return -EINVAL;
2231 }
2232
2233 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2234 err = mt7996_mcu_set_radar_th(dev, i,
2235 &radar_specs->radar_pattern[i]);
2236 if (err < 0)
2237 return err;
2238 }
2239
2240 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2241}
2242
2243int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
2244{
2245 struct mt7996_dev *dev = phy->dev;
2246 enum mt76_dfs_state dfs_state, prev_state;
2247 int err;
2248
2249 prev_state = phy->mt76->dfs_state;
2250 dfs_state = mt76_phy_dfs_state(phy->mt76);
2251
2252 if (prev_state == dfs_state)
2253 return 0;
2254
2255 if (prev_state == MT_DFS_STATE_UNKNOWN)
2256 mt7996_dfs_stop_radar_detector(phy);
2257
2258 if (dfs_state == MT_DFS_STATE_DISABLED)
2259 goto stop;
2260
2261 if (prev_state <= MT_DFS_STATE_DISABLED) {
2262 err = mt7996_dfs_init_radar_specs(phy);
2263 if (err < 0)
2264 return err;
2265
2266 err = mt7996_dfs_start_radar_detector(phy);
2267 if (err < 0)
2268 return err;
2269
2270 phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2271 }
2272
2273 if (dfs_state == MT_DFS_STATE_CAC)
2274 return 0;
2275
2276 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END,
2277 phy->mt76->band_idx, MT_RX_SEL0, 0);
2278 if (err < 0) {
2279 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2280 return err;
2281 }
2282
2283 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2284 return 0;
2285
2286stop:
2287 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START,
2288 phy->mt76->band_idx, MT_RX_SEL0, 0);
2289 if (err < 0)
2290 return err;
2291
2292 mt7996_dfs_stop_radar_detector(phy);
2293 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2294
2295 return 0;
2296}
2297
2298static int
2299mt7996_mac_twt_duration_align(int duration)
2300{
2301 return duration << 8;
2302}
2303
2304static u64
2305mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev,
2306 struct mt7996_twt_flow *flow)
2307{
2308 struct mt7996_twt_flow *iter, *iter_next;
2309 u32 duration = flow->duration << 8;
2310 u64 start_tsf;
2311
2312 iter = list_first_entry_or_null(&dev->twt_list,
2313 struct mt7996_twt_flow, list);
2314 if (!iter || !iter->sched || iter->start_tsf > duration) {
2315 /* add flow as first entry in the list */
2316 list_add(&flow->list, &dev->twt_list);
2317 return 0;
2318 }
2319
2320 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
2321 start_tsf = iter->start_tsf +
2322 mt7996_mac_twt_duration_align(iter->duration);
2323 if (list_is_last(&iter->list, &dev->twt_list))
2324 break;
2325
2326 if (!iter_next->sched ||
2327 iter_next->start_tsf > start_tsf + duration) {
2328 list_add(&flow->list, &iter->list);
2329 goto out;
2330 }
2331 }
2332
2333 /* add flow as last entry in the list */
2334 list_add_tail(&flow->list, &dev->twt_list);
2335out:
2336 return start_tsf;
2337}
2338
2339static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
2340{
2341 struct ieee80211_twt_params *twt_agrt;
2342 u64 interval, duration;
2343 u16 mantissa;
2344 u8 exp;
2345
2346 /* only individual agreement supported */
2347 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
2348 return -EOPNOTSUPP;
2349
2350 /* only 256us unit supported */
2351 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
2352 return -EOPNOTSUPP;
2353
2354 twt_agrt = (struct ieee80211_twt_params *)twt->params;
2355
2356 /* explicit agreement not supported */
2357 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2358 return -EOPNOTSUPP;
2359
2360 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2361 le16_to_cpu(twt_agrt->req_type));
2362 mantissa = le16_to_cpu(twt_agrt->mantissa);
2363 duration = twt_agrt->min_twt_dur << 8;
2364
2365 interval = (u64)mantissa << exp;
2366 if (interval < duration)
2367 return -EOPNOTSUPP;
2368
2369 return 0;
2370}
2371
2372void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
2373 struct ieee80211_sta *sta,
2374 struct ieee80211_twt_setup *twt)
2375{
2376 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2377 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
2378 struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2379 u16 req_type = le16_to_cpu(twt_agrt->req_type);
2380 enum ieee80211_twt_setup_cmd sta_setup_cmd;
2381 struct mt7996_dev *dev = mt7996_hw_dev(hw);
2382 struct mt7996_twt_flow *flow;
2383 int flowid, table_id;
2384 u8 exp;
2385
2386 if (mt7996_mac_check_twt_req(twt))
2387 goto out;
2388
2389 mutex_lock(&dev->mt76.mutex);
2390
2391 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
2392 goto unlock;
2393
2394 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
2395 goto unlock;
2396
2397 flowid = ffs(~msta->twt.flowid_mask) - 1;
2398 le16p_replace_bits(&twt_agrt->req_type, flowid,
2399 IEEE80211_TWT_REQTYPE_FLOWID);
2400
2401 table_id = ffs(~dev->twt.table_mask) - 1;
2402 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2403 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2404
2405 flow = &msta->twt.flow[flowid];
2406 memset(flow, 0, sizeof(*flow));
2407 INIT_LIST_HEAD(&flow->list);
2408 flow->wcid = msta->wcid.idx;
2409 flow->table_id = table_id;
2410 flow->id = flowid;
2411 flow->duration = twt_agrt->min_twt_dur;
2412 flow->mantissa = twt_agrt->mantissa;
2413 flow->exp = exp;
2414 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2415 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2416 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2417
2418 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2419 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2420 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2421 u64 flow_tsf, curr_tsf;
2422 u32 rem;
2423
2424 flow->sched = true;
2425 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
2426 curr_tsf = __mt7996_get_tsf(hw, msta->vif);
2427 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2428 flow_tsf = curr_tsf + interval - rem;
2429 twt_agrt->twt = cpu_to_le64(flow_tsf);
2430 } else {
2431 list_add_tail(&flow->list, &dev->twt_list);
2432 }
2433 flow->tsf = le64_to_cpu(twt_agrt->twt);
2434
2435 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
2436 goto unlock;
2437
2438 setup_cmd = TWT_SETUP_CMD_ACCEPT;
2439 dev->twt.table_mask |= BIT(table_id);
2440 msta->twt.flowid_mask |= BIT(flowid);
2441 dev->twt.n_agrt++;
2442
2443unlock:
2444 mutex_unlock(&dev->mt76.mutex);
2445out:
2446 le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
2447 IEEE80211_TWT_REQTYPE_SETUP_CMD);
2448 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
2449 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
2450}
2451
2452void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
2453 struct mt7996_sta *msta,
2454 u8 flowid)
2455{
2456 struct mt7996_twt_flow *flow;
2457
2458 lockdep_assert_held(&dev->mt76.mutex);
2459
2460 if (flowid >= ARRAY_SIZE(msta->twt.flow))
2461 return;
2462
2463 if (!(msta->twt.flowid_mask & BIT(flowid)))
2464 return;
2465
2466 flow = &msta->twt.flow[flowid];
2467 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow,
2468 MCU_TWT_AGRT_DELETE))
2469 return;
2470
2471 list_del_init(&flow->list);
2472 msta->twt.flowid_mask &= ~BIT(flowid);
2473 dev->twt.table_mask &= ~BIT(flow->table_id);
2474 dev->twt.n_agrt--;
2475}