blob: 925e4bd3db315e06b2373226ce8ce1e329da2f0c [file] [log] [blame]
developer05f3b2b2024-08-19 19:17:34 +08001From 99a027c431faf039052d0d157807469231f037dc Mon Sep 17 00:00:00 2001
2From: Peter Chiu <chui-hao.chiu@mediatek.com>
3Date: Thu, 23 May 2024 02:33:47 +0800
4Subject: [PATCH 165/199] mtk: mt76: add debugfs for rx drop counters
5
6Signed-off-by: Peter Chiu <chui-hao.chiu@mediatek.com>
7---
8 agg-rx.c | 9 +++++
9 dma.c | 40 +++++++++++++++++-----
10 dma.h | 14 ++++----
11 mac80211.c | 15 ++++++++-
12 mt76.h | 39 ++++++++++++++++++++++
13 mt7996/mac.c | 13 ++++++++
14 mt7996/mtk_debugfs.c | 79 ++++++++++++++++++++++++++++++++++++++++++++
15 7 files changed, 193 insertions(+), 16 deletions(-)
16
17diff --git a/agg-rx.c b/agg-rx.c
18index b48943c4..9875baa8 100644
19--- a/agg-rx.c
20+++ b/agg-rx.c
21@@ -152,6 +152,7 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
22 struct mt76_wcid *wcid = status->wcid;
23 struct ieee80211_sta *sta;
24 struct mt76_rx_tid *tid;
25+ struct mt76_phy *phy;
26 bool sn_less;
27 u16 seqno, head, size, idx;
28 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
29@@ -178,6 +179,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
30 if (!tid)
31 return;
32
33+ phy = mt76_dev_phy(tid->dev, wcid->phy_idx);
34+
35 status->flag |= RX_FLAG_DUP_VALIDATED;
36 spin_lock_bh(&tid->lock);
37
38@@ -200,6 +203,9 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
39 if (sn_less) {
40 __skb_unlink(skb, frames);
41 dev_kfree_skb(skb);
42+ spin_lock_bh(&phy->rx_dbg_stats.lock);
43+ phy->rx_dbg_stats.rx_drop[MT_RX_DROP_AGG_SN_LESS]++;
44+ spin_unlock_bh(&phy->rx_dbg_stats.lock);
45 goto out;
46 }
47
48@@ -226,6 +232,9 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
49 /* Discard if the current slot is already in use */
50 if (tid->reorder_buf[idx]) {
51 dev_kfree_skb(skb);
52+ spin_lock_bh(&phy->rx_dbg_stats.lock);
53+ phy->rx_dbg_stats.rx_drop[MT_RX_DROP_AGG_DUP]++;
54+ spin_unlock_bh(&phy->rx_dbg_stats.lock);
55 goto out;
56 }
57
58diff --git a/dma.c b/dma.c
59index 0dae40e2..81e76191 100644
60--- a/dma.c
61+++ b/dma.c
62@@ -251,13 +251,16 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
63 if (mt76_queue_is_wed_rx(q)) {
64 if (!rxwi) {
65 rxwi = mt76_get_rxwi(dev);
66- if (!rxwi)
67+ if (!rxwi) {
68+ q->rx_drop[MT_RX_DROP_DMAD_GET_RXWI_FAIL]++;
69 return -ENOMEM;
70+ }
71 }
72
73 rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
74 if (rx_token < 0) {
75 mt76_put_rxwi(dev, rxwi);
76+ q->rx_drop[MT_RX_DROP_DMAD_GET_TOKEN_FAIL]++;
77 return -ENOMEM;
78 }
79
80@@ -428,6 +431,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
81 struct mt76_desc *desc = &q->desc[idx];
82 u32 ctrl, desc_info, buf1;
83 void *buf = e->buf;
84+ int reason;
85
86 if (mt76_queue_is_wed_rro_ind(q))
87 goto done;
88@@ -443,7 +447,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
89 *info = desc_info;
90
91 buf1 = le32_to_cpu(desc->buf1);
92- mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
93+ reason = mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
94+ if (drop && *drop && reason >= 0)
95+ q->rx_drop[reason]++;
96
97 if (mt76_queue_is_wed_rx(q)) {
98 u32 id, find = 0;
99@@ -467,13 +473,17 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
100 }
101
102 spin_unlock_bh(&dev->rx_token_lock);
103- if (!find)
104+ if (!find) {
105+ q->rx_drop[MT_RX_DROP_DMAD_ADDR_NOT_FOUND]++;
106 return NULL;
107+ }
108 }
109
110 r = mt76_rx_token_release(dev, token);
111- if (!r)
112+ if (!r) {
113+ q->rx_drop[MT_RX_DROP_DMAD_TOKEN_NOT_FOUND]++;
114 return NULL;
115+ }
116
117 dma_unmap_single(dev->dma_dev, r->dma_addr,
118 SKB_WITH_OVERHEAD(q->buf_size),
119@@ -489,8 +499,10 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
120 struct mt76_queue_buf qbuf;
121
122 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC | GFP_DMA32);
123- if (!buf)
124+ if (!buf) {
125+ q->rx_drop[MT_RX_DROP_DMAD_NOMEM]++;
126 return NULL;
127+ }
128
129 memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
130
131@@ -500,6 +512,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
132 if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) {
133 skb_free_frag(r->ptr);
134 mt76_put_rxwi(dev, r);
135+ q->rx_drop[MT_RX_DROP_DMAD_DMA_MAPPING_FAIL]++;
136 return NULL;
137 }
138
139@@ -517,8 +530,11 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
140 }
141 }
142
143- if (drop)
144+ if (drop) {
145 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
146+ if (buf1 & MT_DMA_CTL_WO_DROP)
147+ q->rx_drop[MT_RX_DROP_DMAD_WO_FRAG]++;
148+ }
149 } else {
150 dma_unmap_single(dev->dma_dev, e->dma_addr[0],
151 SKB_WITH_OVERHEAD(q->buf_size),
152@@ -892,6 +908,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
153
154 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
155 } else {
156+ q->rx_drop[MT_RX_DROP_FRAG]++;
157 skb_free_frag(data);
158 }
159
160@@ -899,10 +916,12 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
161 return;
162
163 q->rx_head = NULL;
164- if (nr_frags < ARRAY_SIZE(shinfo->frags))
165+ if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
166 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
167- else
168+ } else {
169+ q->rx_drop[MT_RX_DROP_FRAG]++;
170 dev_kfree_skb(skb);
171+ }
172 }
173
174 static int
175@@ -947,6 +966,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
176 data_len = SKB_WITH_OVERHEAD(q->buf_size);
177
178 if (data_len < len + q->buf_offset) {
179+ q->rx_drop[MT_RX_DROP_FRAG]++;
180 dev_kfree_skb(q->rx_head);
181 q->rx_head = NULL;
182 goto free_frag;
183@@ -963,8 +983,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
184 goto free_frag;
185
186 skb = build_skb(data, q->buf_size);
187- if (!skb)
188+ if (!skb) {
189+ q->rx_drop[MT_RX_DROP_BUILD_SKB_FAIL]++;
190 goto free_frag;
191+ }
192
193 skb_reserve(skb, q->buf_offset);
194
195diff --git a/dma.h b/dma.h
196index 3a8c2e55..718122d5 100644
197--- a/dma.h
198+++ b/dma.h
199@@ -93,27 +93,29 @@ mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
200 mt76_wed_dma_setup(dev, q, true);
201 }
202
203-static inline void
204+static inline int
205 mt76_dma_should_drop_buf(bool *drop, u32 ctrl, u32 buf1, u32 info)
206 {
207 if (!drop)
208- return;
209+ return -1;
210
211 *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP));
212 if (!(ctrl & MT_DMA_CTL_VER_MASK))
213- return;
214+ return MT_RX_DROP_DMAD_WO_DROP;
215
216 switch (FIELD_GET(MT_DMA_WED_IND_REASON, buf1)) {
217 case MT_DMA_WED_IND_REASON_REPEAT:
218 *drop = true;
219- break;
220+ return MT_RX_DROP_DMAD_RRO_REPEAT;
221 case MT_DMA_WED_IND_REASON_OLDPKT:
222 *drop = !(info & MT_DMA_INFO_DMA_FRAG);
223- break;
224+ return MT_RX_DROP_DMAD_RRO_OLDPKT;
225 default:
226 *drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL);
227- break;
228+ return MT_RX_DROP_DMAD_RRO_PN_CHK_FAIL;
229 }
230+
231+ return -1;
232 }
233
234 #endif
235diff --git a/mac80211.c b/mac80211.c
236index 5402366e..d5f842db 100644
237--- a/mac80211.c
238+++ b/mac80211.c
239@@ -418,6 +418,7 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
240 INIT_LIST_HEAD(&phy->tx_list);
241 spin_lock_init(&phy->tx_lock);
242 spin_lock_init(&phy->tx_dbg_stats.lock);
243+ spin_lock_init(&phy->rx_dbg_stats.lock);
244
245 SET_IEEE80211_DEV(hw, dev->dev);
246 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
247@@ -755,6 +756,9 @@ static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
248 }
249
250 if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
251+ spin_lock_bh(&phy->rx_dbg_stats.lock);
252+ phy->rx_dbg_stats.rx_drop[MT_RX_DROP_RFC_PKT]++;
253+ spin_unlock_bh(&phy->rx_dbg_stats.lock);
254 dev_kfree_skb(skb);
255 return;
256 }
257@@ -792,6 +796,9 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
258
259 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
260 dev_kfree_skb(skb);
261+ spin_lock_bh(&phy->rx_dbg_stats.lock);
262+ phy->rx_dbg_stats.rx_drop[MT_RX_DROP_STATE_ERR]++;
263+ spin_unlock_bh(&phy->rx_dbg_stats.lock);
264 return;
265 }
266
267@@ -1055,6 +1062,7 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
268 {
269 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
270 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
271+ struct mt76_phy *phy;
272 struct mt76_rx_status mstat;
273
274 mstat = *((struct mt76_rx_status *)skb->cb);
275@@ -1101,7 +1109,12 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
276 }
277
278 *sta = wcid_to_sta(mstat.wcid);
279- *hw = mt76_main_hw(dev->phys[mstat.phy_idx]);
280+ *hw = mt76_phy_hw(dev, mstat.phy_idx);
281+
282+ phy = mt76_dev_phy(dev, mstat.phy_idx);
283+ spin_lock_bh(&phy->rx_dbg_stats.lock);
284+ phy->rx_dbg_stats.rx_to_mac80211++;
285+ spin_unlock_bh(&phy->rx_dbg_stats.lock);
286 }
287
288 static void
289diff --git a/mt76.h b/mt76.h
290index ee118ee5..f2d12b89 100644
291--- a/mt76.h
292+++ b/mt76.h
293@@ -187,6 +187,34 @@ enum mt76_dfs_state {
294 MT_DFS_STATE_ACTIVE,
295 };
296
297+enum {
298+ /* Per dev counters*/
299+ MT_RX_DROP_DMAD_RRO_REPEAT,
300+ MT_RX_DROP_DMAD_RRO_OLDPKT,
301+ MT_RX_DROP_DMAD_RRO_PN_CHK_FAIL,
302+ MT_RX_DROP_DMAD_WO_FRAG,
303+ MT_RX_DROP_DMAD_WO_DROP,
304+ MT_RX_DROP_DMAD_ADDR_NOT_FOUND,
305+ MT_RX_DROP_DMAD_TOKEN_NOT_FOUND,
306+ MT_RX_DROP_DMAD_GET_TOKEN_FAIL,
307+ MT_RX_DROP_DMAD_GET_RXWI_FAIL,
308+ MT_RX_DROP_DMAD_NOMEM,
309+ MT_RX_DROP_DMAD_DMA_MAPPING_FAIL,
310+ MT_RX_DROP_FRAG,
311+ MT_RX_DROP_BUILD_SKB_FAIL,
312+
313+ MT_RX_DROP_PER_Q_MAX,
314+
315+ /* Per phy counters */
316+ MT_RX_DROP_RXD_ERR = 0,
317+ MT_RX_DROP_STATE_ERR,
318+ MT_RX_DROP_RFC_PKT,
319+ MT_RX_DROP_AGG_SN_LESS,
320+ MT_RX_DROP_AGG_DUP,
321+
322+ MT_RX_DROP_PER_PHY_MAX,
323+};
324+
325 struct mt76_queue_buf {
326 dma_addr_t addr;
327 u16 len:15,
328@@ -255,6 +283,8 @@ struct mt76_queue {
329 dma_addr_t desc_dma;
330 struct sk_buff *rx_head;
331 struct page_frag_cache rx_page;
332+
333+ u32 rx_drop[MT_RX_DROP_PER_Q_MAX];
334 };
335
336 struct mt76_mcu_ops {
337@@ -860,6 +890,14 @@ struct mt76_tx_debug {
338 spinlock_t lock;
339 };
340
341+struct mt76_rx_debug {
342+ u32 rx_from_hw;
343+ u32 rx_to_mac80211;
344+
345+ u32 rx_drop[MT_RX_DROP_PER_PHY_MAX];
346+ spinlock_t lock;
347+};
348+
349 struct mt76_phy {
350 struct ieee80211_hw *hw;
351 struct ieee80211_hw *ori_hw;
352@@ -917,6 +955,7 @@ struct mt76_phy {
353 u8 pin;
354 } leds;
355 struct mt76_tx_debug tx_dbg_stats;
356+ struct mt76_rx_debug rx_dbg_stats;
357 };
358
359 struct mt76_dev {
360diff --git a/mt7996/mac.c b/mt7996/mac.c
361index 0f282f16..2f37d31c 100644
362--- a/mt7996/mac.c
363+++ b/mt7996/mac.c
364@@ -1410,9 +1410,11 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
365 struct sk_buff *skb, u32 *info)
366 {
367 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
368+ struct mt76_phy *phy;
369 __le32 *rxd = (__le32 *)skb->data;
370 __le32 *end = (__le32 *)&skb->data[skb->len];
371 enum rx_pkt_type type;
372+ u8 band_idx;
373
374 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
375 if (type != PKT_TYPE_NORMAL) {
376@@ -1447,12 +1449,23 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
377 dev_kfree_skb(skb);
378 break;
379 case PKT_TYPE_NORMAL:
380+ band_idx = le32_get_bits(rxd[1], MT_RXD1_NORMAL_BAND_IDX);
381+ phy = mt76_dev_phy(mdev, band_idx);
382+ spin_lock_bh(&phy->rx_dbg_stats.lock);
383+ phy->rx_dbg_stats.rx_from_hw++;
384+ spin_unlock_bh(&phy->rx_dbg_stats.lock);
385+
386 if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
387 mt76_rx(&dev->mt76, q, skb);
388 return;
389 }
390 fallthrough;
391 default:
392+ band_idx = le32_get_bits(rxd[1], MT_RXD1_NORMAL_BAND_IDX);
393+ phy = mt76_dev_phy(mdev, band_idx);
394+ spin_lock_bh(&phy->rx_dbg_stats.lock);
395+ phy->rx_dbg_stats.rx_drop[MT_RX_DROP_RXD_ERR]++;
396+ spin_unlock_bh(&phy->rx_dbg_stats.lock);
397 dev_kfree_skb(skb);
398 break;
399 }
400diff --git a/mt7996/mtk_debugfs.c b/mt7996/mtk_debugfs.c
401index 759b9d8f..b16ea5fe 100644
402--- a/mt7996/mtk_debugfs.c
403+++ b/mt7996/mtk_debugfs.c
404@@ -4240,6 +4240,84 @@ out:
405 }
406 DEFINE_SHOW_ATTRIBUTE(mt7996_tx_drop);
407
408+static int
409+mt7996_rx_drop_show(struct seq_file *s, void *data)
410+{
411+ struct mt7996_dev *dev = s->private;
412+ struct mt76_dev *mdev = &dev->mt76;
413+ struct mt76_rx_debug *stats[__MT_MAX_BAND];
414+ struct mt76_queue *q[2];
415+ int i = 0;
416+
417+ q[0] = &mdev->q_rx[MT_RXQ_MAIN];
418+ q[1] = is_mt7996(mdev) ? &mdev->q_rx[MT_RXQ_BAND2] :
419+ &mdev->q_rx[MT_RXQ_BAND1];
420+
421+ seq_printf(s, "\t\t\t\t ");
422+ for (i = 0; i < 2; i++) {
423+ seq_printf(s, " RXQ%d", q[i]->hw_idx);
424+ }
425+ seq_printf(s, "\n");
426+
427+#define __pr(t) seq_printf(s, "Drop due to %-22s%12d%12d\n", #t, \
428+ q[0]->rx_drop[MT_RX_DROP_##t], \
429+ q[1]->rx_drop[MT_RX_DROP_##t]);
430+ __pr(DMAD_RRO_REPEAT);
431+ __pr(DMAD_RRO_OLDPKT);
432+ __pr(DMAD_RRO_PN_CHK_FAIL);
433+ __pr(DMAD_WO_FRAG);
434+ __pr(DMAD_WO_DROP);
435+ __pr(DMAD_ADDR_NOT_FOUND);
436+ __pr(DMAD_TOKEN_NOT_FOUND);
437+ __pr(DMAD_GET_TOKEN_FAIL);
438+ __pr(DMAD_GET_RXWI_FAIL);
439+ __pr(DMAD_NOMEM);
440+ __pr(DMAD_DMA_MAPPING_FAIL);
441+ __pr(FRAG);
442+ __pr(BUILD_SKB_FAIL);
443+#undef __pr
444+
445+ seq_printf(s, "\n\t\t\t\t ");
446+ for (i = 0; i < __MT_MAX_BAND; i++) {
447+ seq_printf(s, " Band%d", i);
448+ if (mdev->phys[i]) {
449+ stats[i] = &mdev->phys[i]->rx_dbg_stats;
450+ } else {
451+ stats[i] = kzalloc(sizeof(struct mt76_rx_debug),
452+ GFP_KERNEL);
453+ if (!stats[i])
454+ goto out;
455+ }
456+ }
457+ seq_printf(s, "\n");
458+ seq_printf(s, "%-35s%12d%12d%12d\n", "Receive from hw",
459+ stats[MT_BAND0]->rx_from_hw,
460+ stats[MT_BAND1]->rx_from_hw,
461+ stats[MT_BAND2]->rx_from_hw);
462+ seq_printf(s, "%-35s%12d%12d%12d\n\n", "Send to mac80211",
463+ stats[MT_BAND0]->rx_to_mac80211,
464+ stats[MT_BAND1]->rx_to_mac80211,
465+ stats[MT_BAND2]->rx_to_mac80211);
466+#define __pr(t) seq_printf(s, "Drop due to %-22s%12d%12d%12d\n", #t, \
467+ stats[MT_BAND0]->rx_drop[MT_RX_DROP_##t], \
468+ stats[MT_BAND1]->rx_drop[MT_RX_DROP_##t], \
469+ stats[MT_BAND2]->rx_drop[MT_RX_DROP_##t])
470+ __pr(RXD_ERR);
471+ __pr(STATE_ERR);
472+ __pr(RFC_PKT);
473+ __pr(AGG_SN_LESS);
474+ __pr(AGG_DUP);
475+#undef __pr
476+
477+out:
478+ for (i = 0; i < __MT_MAX_BAND; i++) {
479+ if (!mdev->phys[i] && stats[i])
480+ kfree(stats[i]);
481+ }
482+
483+ return 0;
484+}
485+DEFINE_SHOW_ATTRIBUTE(mt7996_rx_drop);
486 /* DRR */
487 static int
488 mt7996_drr_info(struct seq_file *s, void *data)
489@@ -4368,6 +4446,7 @@ void mt7996_mtk_init_dev_debugfs(struct mt7996_dev *dev, struct dentry *dir)
490
491 /* Drop counters */
492 debugfs_create_file("tx_drop_stats", 0400, dir, dev, &mt7996_tx_drop_fops);
493+ debugfs_create_file("rx_drop_stats", 0400, dir, dev, &mt7996_rx_drop_fops);
494 }
495
496 #endif
497--
4982.18.0
499