blob: 276e8890ad31b719e1ff339e0a90652621b4a62e [file] [log] [blame]
developerdc9eeae2024-04-08 14:36:46 +08001From 26fb1344b791a77c86f15a3f2f96aa07d68efea3 Mon Sep 17 00:00:00 2001
2From: Peter Chiu <chui-hao.chiu@mediatek.com>
3Date: Wed, 20 Mar 2024 15:14:20 +0800
4Subject: [PATCH 2014/2014] wifi: mt76: add debugfs for rx drop counters
5
6Signed-off-by: Peter Chiu <chui-hao.chiu@mediatek.com>
7---
8 agg-rx.c | 5 +++++
9 dma.c | 32 ++++++++++++++++++++++-------
10 dma.h | 14 +++++++------
11 mac80211.c | 6 ++++++
12 mt76.h | 37 +++++++++++++++++++++++++++++++++
13 mt7915/mac.c | 10 +++++++++
14 mt7915/mtk_debugfs.c | 49 ++++++++++++++++++++++++++++++++++++++++++++
15 7 files changed, 140 insertions(+), 13 deletions(-)
16
17diff --git a/agg-rx.c b/agg-rx.c
18index 07c386c..97a963a 100644
19--- a/agg-rx.c
20+++ b/agg-rx.c
21@@ -151,6 +151,7 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
22 struct mt76_wcid *wcid = status->wcid;
23 struct ieee80211_sta *sta;
24 struct mt76_rx_tid *tid;
25+ struct mt76_phy *phy;
26 bool sn_less;
27 u16 seqno, head, size, idx;
28 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
29@@ -177,6 +178,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
30 if (!tid)
31 return;
32
33+ phy = mt76_dev_phy(tid->dev, wcid->phy_idx);
34+
35 status->flag |= RX_FLAG_DUP_VALIDATED;
36 spin_lock_bh(&tid->lock);
37
38@@ -198,6 +201,7 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
39 if (sn_less) {
40 __skb_unlink(skb, frames);
41 dev_kfree_skb(skb);
42+ phy->rx_dbg_stats.rx_drop[MT_RX_DROP_AGG_SN_LESS]++;
43 goto out;
44 }
45
46@@ -224,6 +228,7 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
47 /* Discard if the current slot is already in use */
48 if (tid->reorder_buf[idx]) {
49 dev_kfree_skb(skb);
50+ phy->rx_dbg_stats.rx_drop[MT_RX_DROP_AGG_DUP]++;
51 goto out;
52 }
53
54diff --git a/dma.c b/dma.c
55index da3e8bc..782463f 100644
56--- a/dma.c
57+++ b/dma.c
58@@ -251,13 +251,16 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
59 if (mt76_queue_is_wed_rx(q)) {
60 if (!rxwi) {
61 rxwi = mt76_get_rxwi(dev);
62- if (!rxwi)
63+ if (!rxwi) {
64+ q->rx_drop[MT_RX_DROP_DMAD_GET_RXWI_FAIL]++;
65 return -ENOMEM;
66+ }
67 }
68
69 rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
70 if (rx_token < 0) {
71 mt76_put_rxwi(dev, rxwi);
72+ q->rx_drop[MT_RX_DROP_DMAD_GET_TOKEN_FAIL]++;
73 return -ENOMEM;
74 }
75
76@@ -429,6 +432,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
77 struct mt76_desc *desc = &q->desc[idx];
78 u32 ctrl, desc_info, buf1;
79 void *buf = e->buf;
80+ int reason;
81
82 if (mt76_queue_is_wed_rro_ind(q))
83 goto done;
84@@ -444,7 +448,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
85 *info = desc_info;
86
87 buf1 = le32_to_cpu(desc->buf1);
88- mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
89+ reason = mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
90+ if (drop && *drop && reason >= 0)
91+ q->rx_drop[reason]++;
92
93 if (mt76_queue_is_wed_rx(q)) {
94 u32 id, find = 0;
95@@ -468,13 +474,17 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
96 }
97
98 spin_unlock_bh(&dev->rx_token_lock);
99- if (!find)
100+ if (!find) {
101+ q->rx_drop[MT_RX_DROP_DMAD_ADDR_NOT_FOUND]++;
102 return NULL;
103+ }
104 }
105
106 r = mt76_rx_token_release(dev, token);
107- if (!r)
108+ if (!r) {
109+ q->rx_drop[MT_RX_DROP_DMAD_TOKEN_NOT_FOUND]++;
110 return NULL;
111+ }
112
113 dma_unmap_single(dev->dma_dev, r->dma_addr,
114 SKB_WITH_OVERHEAD(q->buf_size),
115@@ -490,8 +500,10 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
116 struct mt76_queue_buf qbuf;
117
118 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC | GFP_DMA32);
119- if (!buf)
120+ if (!buf) {
121+ q->rx_drop[MT_RX_DROP_DMAD_NOMEM]++;
122 return NULL;
123+ }
124
125 memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
126
127@@ -501,6 +513,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
128 if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) {
129 skb_free_frag(r->ptr);
130 mt76_put_rxwi(dev, r);
131+ q->rx_drop[MT_RX_DROP_DMAD_DMA_MAPPING_FAIL]++;
132 return NULL;
133 }
134
135@@ -518,8 +531,11 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
136 }
137 }
138
139- if (drop)
140+ if (drop) {
141 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
142+ if (buf1 & MT_DMA_CTL_WO_DROP)
143+ q->rx_drop[MT_RX_DROP_DMAD_WO_FRAG]++;
144+ }
145 } else {
146 dma_unmap_single(dev->dma_dev, e->dma_addr[0],
147 SKB_WITH_OVERHEAD(q->buf_size),
148@@ -968,8 +984,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
149 goto free_frag;
150
151 skb = build_skb(data, q->buf_size);
152- if (!skb)
153+ if (!skb) {
154+ q->rx_drop[MT_RX_DROP_BUILD_SKB_FAIL]++;
155 goto free_frag;
156+ }
157
158 skb_reserve(skb, q->buf_offset);
159
160diff --git a/dma.h b/dma.h
161index 619dc0f..6b2ee7e 100644
162--- a/dma.h
163+++ b/dma.h
164@@ -92,27 +92,29 @@ mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
165 mt76_wed_dma_setup(dev, q, true);
166 }
167
168-static inline void
169+static inline int
170 mt76_dma_should_drop_buf(bool *drop, u32 ctrl, u32 buf1, u32 info)
171 {
172 if (!drop)
173- return;
174+ return -1;
175
176 *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP));
177 if (!(ctrl & MT_DMA_CTL_VER_MASK))
178- return;
179+ return MT_RX_DROP_DMAD_WO_DROP;
180
181 switch (FIELD_GET(MT_DMA_WED_IND_REASON, buf1)) {
182 case MT_DMA_WED_IND_REASON_REPEAT:
183 *drop = true;
184- break;
185+ return MT_RX_DROP_DMAD_RRO_REPEAT;
186 case MT_DMA_WED_IND_REASON_OLDPKT:
187 *drop = !(info & MT_DMA_INFO_DMA_FRAG);
188- break;
189+ return MT_RX_DROP_DMAD_RRO_OLDPKT;
190 default:
191 *drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL);
192- break;
193+ return MT_RX_DROP_DMAD_RRO_PN_CHK_FAIL;
194 }
195+
196+ return -1;
197 }
198
199 #endif
200diff --git a/mac80211.c b/mac80211.c
201index 225b290..0360172 100644
202--- a/mac80211.c
203+++ b/mac80211.c
204@@ -774,6 +774,7 @@ static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
205 }
206
207 if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
208+ phy->rx_dbg_stats.rx_drop[MT_RX_DROP_RFC_PKT]++;
209 dev_kfree_skb(skb);
210 return;
211 }
212@@ -811,6 +812,7 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
213
214 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
215 dev_kfree_skb(skb);
216+ phy->rx_dbg_stats.rx_drop[MT_RX_DROP_STATE_ERR]++;
217 return;
218 }
219
220@@ -1048,6 +1050,7 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
221 {
222 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
223 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
224+ struct mt76_phy *phy;
225 struct mt76_rx_status mstat;
226
227 mstat = *((struct mt76_rx_status *)skb->cb);
228@@ -1090,6 +1093,9 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
229
230 *sta = wcid_to_sta(mstat.wcid);
231 *hw = mt76_phy_hw(dev, mstat.phy_idx);
232+
233+ phy = mt76_dev_phy(dev, mstat.phy_idx);
234+ phy->rx_dbg_stats.rx_to_mac80211++;
235 }
236
237 static void
238diff --git a/mt76.h b/mt76.h
239index c1128d1..9be17b7 100644
240--- a/mt76.h
241+++ b/mt76.h
242@@ -175,6 +175,33 @@ enum mt76_dfs_state {
243 MT_DFS_STATE_ACTIVE,
244 };
245
246+enum {
247+ /* Per dev counters*/
248+ MT_RX_DROP_DMAD_RRO_REPEAT,
249+ MT_RX_DROP_DMAD_RRO_OLDPKT,
250+ MT_RX_DROP_DMAD_RRO_PN_CHK_FAIL,
251+ MT_RX_DROP_DMAD_WO_FRAG,
252+ MT_RX_DROP_DMAD_WO_DROP,
253+ MT_RX_DROP_DMAD_ADDR_NOT_FOUND,
254+ MT_RX_DROP_DMAD_TOKEN_NOT_FOUND,
255+ MT_RX_DROP_DMAD_GET_TOKEN_FAIL,
256+ MT_RX_DROP_DMAD_GET_RXWI_FAIL,
257+ MT_RX_DROP_DMAD_NOMEM,
258+ MT_RX_DROP_DMAD_DMA_MAPPING_FAIL,
259+ MT_RX_DROP_BUILD_SKB_FAIL,
260+
261+ MT_RX_DROP_PER_Q_MAX,
262+
263+ /* Per phy counters */
264+ MT_RX_DROP_RXD_ERR = 0,
265+ MT_RX_DROP_STATE_ERR,
266+ MT_RX_DROP_RFC_PKT,
267+ MT_RX_DROP_AGG_SN_LESS,
268+ MT_RX_DROP_AGG_DUP,
269+
270+ MT_RX_DROP_PER_PHY_MAX,
271+};
272+
273 struct mt76_queue_buf {
274 dma_addr_t addr;
275 u16 len;
276@@ -243,6 +270,8 @@ struct mt76_queue {
277 dma_addr_t desc_dma;
278 struct sk_buff *rx_head;
279 struct page_frag_cache rx_page;
280+
281+ u32 rx_drop[MT_RX_DROP_PER_Q_MAX];
282 };
283
284 struct mt76_mcu_ops {
285@@ -870,6 +899,13 @@ struct mt76_tx_debug {
286 u32 tx_drop[MT_TX_DROP_MAX];
287 };
288
289+struct mt76_rx_debug {
290+ u32 rx_from_hw;
291+ u32 rx_to_mac80211;
292+
293+ u32 rx_drop[MT_RX_DROP_PER_PHY_MAX];
294+};
295+
296 struct mt76_phy {
297 struct ieee80211_hw *hw;
298 struct mt76_dev *dev;
299@@ -928,6 +964,7 @@ struct mt76_phy {
300 } leds;
301 int tokens;
302 struct mt76_tx_debug tx_dbg_stats;
303+ struct mt76_rx_debug rx_dbg_stats;
304 };
305
306 struct mt76_dev {
307diff --git a/mt7915/mac.c b/mt7915/mac.c
308index 1e2ef8c..195b5f6 100644
309--- a/mt7915/mac.c
310+++ b/mt7915/mac.c
311@@ -1175,9 +1175,11 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
312 struct sk_buff *skb, u32 *info)
313 {
314 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
315+ struct mt76_phy *phy;
316 __le32 *rxd = (__le32 *)skb->data;
317 __le32 *end = (__le32 *)&skb->data[skb->len];
318 enum rx_pkt_type type;
319+ u8 band_idx;
320
321 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
322
323@@ -1206,12 +1208,20 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
324 dev_kfree_skb(skb);
325 break;
326 case PKT_TYPE_NORMAL:
327+ band_idx = le32_get_bits(rxd[1], MT_RXD1_NORMAL_BAND_IDX);
328+ phy = mt76_dev_phy(mdev, band_idx);
329+ phy->rx_dbg_stats.rx_from_hw++;
330+
331 if (!mt7915_mac_fill_rx(dev, skb, q, info)) {
332 mt76_rx(&dev->mt76, q, skb);
333 return;
334 }
335 fallthrough;
336 default:
337+ band_idx = le32_get_bits(rxd[1], MT_RXD1_NORMAL_BAND_IDX);
338+ phy = mt76_dev_phy(mdev, band_idx);
339+ phy->rx_dbg_stats.rx_drop[MT_RX_DROP_RXD_ERR]++;
340+
341 dev_kfree_skb(skb);
342 break;
343 }
344diff --git a/mt7915/mtk_debugfs.c b/mt7915/mtk_debugfs.c
345index 568ecff..c9b58a2 100644
346--- a/mt7915/mtk_debugfs.c
347+++ b/mt7915/mtk_debugfs.c
348@@ -4032,9 +4032,12 @@ static int mt7915_reset_counter(void *data, u64 val)
349 struct mt7915_phy *phy = data;
350 struct mt7915_dev *dev = phy->dev;
351 struct mt76_wcid *wcid;
352+ u8 qid = phy->mt76->band_idx ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
353
354 if (!dev->wlan_idx) {
355 memset(&phy->mt76->tx_dbg_stats, 0, sizeof(struct mt76_tx_debug));
356+ memset(&phy->mt76->rx_dbg_stats, 0, sizeof(struct mt76_rx_debug));
357+ memset(&dev->mt76.q_rx[qid].rx_drop, 0, sizeof(u32) * MT_RX_DROP_PER_Q_MAX);
358
359 return 0;
360 }
361@@ -4122,6 +4125,51 @@ mt7915_tx_drop_show(struct seq_file *s, void *data)
362
363 DEFINE_SHOW_ATTRIBUTE(mt7915_tx_drop);
364
365+static int
366+mt7915_rx_drop_show(struct seq_file *s, void *data)
367+{
368+ struct mt7915_phy *phy = s->private;
369+ struct mt7915_dev *dev = phy->dev;
370+ struct mt76_rx_debug *stats = &phy->mt76->rx_dbg_stats;
371+ struct mt76_queue *q;
372+ u8 band_idx = phy->mt76->band_idx;
373+ u8 qid = band_idx ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
374+
375+ q = &dev->mt76.q_rx[qid];
376+
377+#define __pr(src, t) seq_printf(s, "Drop due to %s: %d\n", \
378+ #t, src->rx_drop[MT_RX_DROP_##t])
379+ seq_printf(s, "RXQ%d drop:\n", MT_RXQ_ID(qid));
380+ __pr(q, DMAD_RRO_REPEAT);
381+ __pr(q, DMAD_RRO_OLDPKT);
382+ __pr(q, DMAD_RRO_PN_CHK_FAIL);
383+ __pr(q, DMAD_WO_FRAG);
384+ __pr(q, DMAD_WO_DROP);
385+ __pr(q, DMAD_ADDR_NOT_FOUND);
386+ __pr(q, DMAD_TOKEN_NOT_FOUND);
387+ __pr(q, DMAD_GET_TOKEN_FAIL);
388+ __pr(q, DMAD_GET_RXWI_FAIL);
389+ __pr(q, DMAD_NOMEM);
390+ __pr(q, DMAD_DMA_MAPPING_FAIL);
391+ __pr(q, BUILD_SKB_FAIL);
392+
393+ seq_printf(s, "\nPhy%d receive from hw: %d\n", band_idx, stats->rx_from_hw);
394+ seq_printf(s, "Phy%d send to mac80211: %d\n", band_idx, stats->rx_to_mac80211);
395+
396+ seq_printf(s, "\nPhy%d drop:\n", band_idx);
397+ __pr(stats, RXD_ERR);
398+ __pr(stats, STATE_ERR);
399+ __pr(stats, RFC_PKT);
400+ __pr(stats, AGG_SN_LESS);
401+ __pr(stats, AGG_DUP);
402+
403+#undef __pr
404+
405+ return 0;
406+}
407+
408+DEFINE_SHOW_ATTRIBUTE(mt7915_rx_drop);
409+
410 int mt7915_mtk_init_debugfs(struct mt7915_phy *phy, struct dentry *dir)
411 {
412 struct mt7915_dev *dev = phy->dev;
413@@ -4222,6 +4270,7 @@ int mt7915_mtk_init_debugfs(struct mt7915_phy *phy, struct dentry *dir)
414 debugfs_create_file("reset_counter", 0200, dir, phy, &fops_reset_counter);
415 debugfs_create_devm_seqfile(dev->mt76.dev, "per", dir, mt7915_per_read);
416 debugfs_create_file("tx_drop_stats", 0400, dir, phy, &mt7915_tx_drop_fops);
417+ debugfs_create_file("rx_drop_stats", 0400, dir, phy, &mt7915_rx_drop_fops);
418
419 return 0;
420 }
421--
4222.18.0
423