blob: 3005b1631b726898d80f1db75bb030ba8bdd3e36 [file] [log] [blame]
developer8eb72a32023-03-30 08:32:07 +08001From 589b56aaeaead7d2d4fb28be20fd577e46706de3 Mon Sep 17 00:00:00 2001
developerabdbf252023-02-06 16:02:21 +08002From: Shayne Chen <shayne.chen@mediatek.com>
developer8eb72a32023-03-30 08:32:07 +08003Date: Thu, 3 Nov 2022 00:27:17 +0800
4Subject: [PATCH] wifi: mt76: mt7996: for build pass
developerabdbf252023-02-06 16:02:21 +08005
6---
developer8eb72a32023-03-30 08:32:07 +08007 debugfs.c | 3 ++
8 dma.c | 74 ++++++++++++++++++++++++-----------------------
9 eeprom.c | 8 ++++-
10 mac80211.c | 57 ------------------------------------
11 mcu.c | 1 +
12 mt76.h | 22 +-------------
13 mt7615/mcu.c | 1 +
14 mt76_connac_mcu.c | 1 +
15 mt7915/main.c | 26 +++++++----------
16 mt7915/mcu.c | 1 +
17 mt7915/mmio.c | 55 +++++++++++++++++++++--------------
18 mt7921/main.c | 31 +++-----------------
19 mt7996/dma.c | 4 +--
20 mt7996/eeprom.c | 1 +
21 mt7996/mcu.c | 1 +
22 usb.c | 43 +++++++++++++--------------
23 16 files changed, 127 insertions(+), 202 deletions(-)
developerabdbf252023-02-06 16:02:21 +080024
developer8eb72a32023-03-30 08:32:07 +080025diff --git a/debugfs.c b/debugfs.c
26index 79064a4..e10d4cb 100644
27--- a/debugfs.c
28+++ b/debugfs.c
29@@ -33,8 +33,11 @@ mt76_napi_threaded_set(void *data, u64 val)
30 if (!mt76_is_mmio(dev))
31 return -EOPNOTSUPP;
32
33+#if 0
34+ /* need to backport patch from networking stack */
35 if (dev->napi_dev.threaded != val)
36 return dev_set_threaded(&dev->napi_dev, val);
37+#endif
38
39 return 0;
40 }
developerabdbf252023-02-06 16:02:21 +080041diff --git a/dma.c b/dma.c
developer8eb72a32023-03-30 08:32:07 +080042index df2ca73..2fc70e2 100644
developerabdbf252023-02-06 16:02:21 +080043--- a/dma.c
44+++ b/dma.c
45@@ -173,7 +173,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
46 local_bh_disable();
47 while ((t = __mt76_get_rxwi(dev)) != NULL) {
48 if (t->ptr)
49- mt76_put_page_pool_buf(t->ptr, false);
50+ skb_free_frag(t->ptr);
51 kfree(t);
52 }
53 local_bh_enable();
54@@ -409,9 +409,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
55 if (!t)
56 return NULL;
57
58- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
59- SKB_WITH_OVERHEAD(q->buf_size),
60- page_pool_get_dma_dir(q->page_pool));
61+ dma_unmap_single(dev->dma_dev, t->dma_addr,
62+ SKB_WITH_OVERHEAD(q->buf_size),
63+ DMA_FROM_DEVICE);
64
65 buf = t->ptr;
66 t->dma_addr = 0;
developer483388c2023-03-08 13:52:15 +080067@@ -430,9 +430,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developerabdbf252023-02-06 16:02:21 +080068 } else {
69 buf = e->buf;
70 e->buf = NULL;
71- dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
72- SKB_WITH_OVERHEAD(q->buf_size),
73- page_pool_get_dma_dir(q->page_pool));
74+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
75+ SKB_WITH_OVERHEAD(q->buf_size),
76+ DMA_FROM_DEVICE);
77 }
78
79 return buf;
developer483388c2023-03-08 13:52:15 +080080@@ -584,11 +584,11 @@ free_skb:
developerabdbf252023-02-06 16:02:21 +080081 }
82
83 static int
84-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
85- bool allow_direct)
86+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
87 {
88 int len = SKB_WITH_OVERHEAD(q->buf_size);
89- int frames = 0;
90+ int frames = 0, offset = q->buf_offset;
91+ dma_addr_t addr;
92
93 if (!q->ndesc)
94 return 0;
developer483388c2023-03-08 13:52:15 +080095@@ -596,25 +596,26 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
developerabdbf252023-02-06 16:02:21 +080096 spin_lock_bh(&q->lock);
97
98 while (q->queued < q->ndesc - 1) {
99- enum dma_data_direction dir;
100 struct mt76_queue_buf qbuf;
101- dma_addr_t addr;
102- int offset;
103- void *buf;
104+ void *buf = NULL;
105
106- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
107+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
108 if (!buf)
109 break;
110
111- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
112- dir = page_pool_get_dma_dir(q->page_pool);
113- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
114+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
115+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
116+ skb_free_frag(buf);
117+ break;
118+ }
119
120- qbuf.addr = addr + q->buf_offset;
121- qbuf.len = len - q->buf_offset;
122+ qbuf.addr = addr + offset;
123+ qbuf.len = len - offset;
124 qbuf.skip_unmap = false;
125 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
126- mt76_put_page_pool_buf(buf, allow_direct);
127+ dma_unmap_single(dev->dma_dev, addr, len,
128+ DMA_FROM_DEVICE);
129+ skb_free_frag(buf);
130 break;
131 }
132 frames++;
developer483388c2023-03-08 13:52:15 +0800133@@ -658,7 +659,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
developerabdbf252023-02-06 16:02:21 +0800134 /* WED txfree queue needs ring to be initialized before setup */
135 q->flags = 0;
136 mt76_dma_queue_reset(dev, q);
137- mt76_dma_rx_fill(dev, q, false);
138+ mt76_dma_rx_fill(dev, q);
139 q->flags = flags;
140
141 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
developer483388c2023-03-08 13:52:15 +0800142@@ -706,10 +707,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developerabdbf252023-02-06 16:02:21 +0800143 if (!q->entry)
144 return -ENOMEM;
145
146- ret = mt76_create_page_pool(dev, q);
147- if (ret)
148- return ret;
149-
150 ret = mt76_dma_wed_setup(dev, q, false);
151 if (ret)
152 return ret;
developer483388c2023-03-08 13:52:15 +0800153@@ -723,6 +720,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developerabdbf252023-02-06 16:02:21 +0800154 static void
155 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
156 {
157+ struct page *page;
158 void *buf;
159 bool more;
160
developer483388c2023-03-08 13:52:15 +0800161@@ -736,7 +734,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developerabdbf252023-02-06 16:02:21 +0800162 if (!buf)
163 break;
164
165- mt76_put_page_pool_buf(buf, false);
166+ skb_free_frag(buf);
167 } while (1);
168
169 if (q->rx_head) {
developer483388c2023-03-08 13:52:15 +0800170@@ -745,6 +743,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developerabdbf252023-02-06 16:02:21 +0800171 }
172
173 spin_unlock_bh(&q->lock);
174+
175+ if (!q->rx_page.va)
176+ return;
177+
178+ page = virt_to_page(q->rx_page.va);
179+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
180+ memset(&q->rx_page, 0, sizeof(q->rx_page));
181 }
182
183 static void
developer483388c2023-03-08 13:52:15 +0800184@@ -765,7 +770,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
developerabdbf252023-02-06 16:02:21 +0800185 mt76_dma_wed_setup(dev, q, true);
186 if (q->flags != MT_WED_Q_TXFREE) {
187 mt76_dma_sync_idx(dev, q);
188- mt76_dma_rx_fill(dev, q, false);
189+ mt76_dma_rx_fill(dev, q);
190 }
191 }
192
developer483388c2023-03-08 13:52:15 +0800193@@ -783,7 +788,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
developerabdbf252023-02-06 16:02:21 +0800194
195 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
196 } else {
197- mt76_put_page_pool_buf(data, true);
198+ skb_free_frag(data);
199 }
200
201 if (more)
developer8eb72a32023-03-30 08:32:07 +0800202@@ -851,12 +856,11 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
203 !(dev->drv->rx_check(dev, data, len)))
204 goto free_frag;
205
206- skb = napi_build_skb(data, q->buf_size);
207+ skb = build_skb(data, q->buf_size);
208 if (!skb)
developerabdbf252023-02-06 16:02:21 +0800209 goto free_frag;
210
211 skb_reserve(skb, q->buf_offset);
212- skb_mark_for_recycle(skb);
213
214 *(u32 *)skb->cb = info;
215
developer483388c2023-03-08 13:52:15 +0800216@@ -872,10 +876,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developerabdbf252023-02-06 16:02:21 +0800217 continue;
218
219 free_frag:
220- mt76_put_page_pool_buf(data, true);
221+ skb_free_frag(data);
222 }
223
224- mt76_dma_rx_fill(dev, q, true);
225+ mt76_dma_rx_fill(dev, q);
226 return done;
227 }
228
developer483388c2023-03-08 13:52:15 +0800229@@ -920,7 +924,7 @@ mt76_dma_init(struct mt76_dev *dev,
developerabdbf252023-02-06 16:02:21 +0800230
231 mt76_for_each_q_rx(dev, i) {
232 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
233- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
234+ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
235 napi_enable(&dev->napi[i]);
236 }
237
developer483388c2023-03-08 13:52:15 +0800238@@ -971,8 +975,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developerabdbf252023-02-06 16:02:21 +0800239
240 netif_napi_del(&dev->napi[i]);
241 mt76_dma_rx_cleanup(dev, q);
242-
243- page_pool_destroy(q->page_pool);
244 }
245
246 mt76_free_pending_txwi(dev);
developer8eb72a32023-03-30 08:32:07 +0800247diff --git a/eeprom.c b/eeprom.c
248index 263e508..aa88925 100644
249--- a/eeprom.c
250+++ b/eeprom.c
251@@ -108,9 +108,15 @@ void
252 mt76_eeprom_override(struct mt76_phy *phy)
253 {
254 struct mt76_dev *dev = phy->dev;
255+#ifdef CONFIG_OF
256 struct device_node *np = dev->dev->of_node;
257+ const u8 *mac = NULL;
258
259- of_get_mac_address(np, phy->macaddr);
260+ if (np)
261+ mac = of_get_mac_address(np);
262+ if (!IS_ERR_OR_NULL(mac))
263+ ether_addr_copy(phy->macaddr, mac);
264+#endif
265
266 if (!is_valid_ether_addr(phy->macaddr)) {
267 eth_random_addr(phy->macaddr);
developerabdbf252023-02-06 16:02:21 +0800268diff --git a/mac80211.c b/mac80211.c
developer8eb72a32023-03-30 08:32:07 +0800269index a4b3d34..40fda9d 100644
developerabdbf252023-02-06 16:02:21 +0800270--- a/mac80211.c
271+++ b/mac80211.c
272@@ -4,7 +4,6 @@
273 */
274 #include <linux/sched.h>
275 #include <linux/of.h>
276-#include <net/page_pool.h>
277 #include "mt76.h"
278
279 #define CHAN2G(_idx, _freq) { \
developer483388c2023-03-08 13:52:15 +0800280@@ -562,47 +561,6 @@ void mt76_unregister_phy(struct mt76_phy *phy)
developerabdbf252023-02-06 16:02:21 +0800281 }
282 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
283
284-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
285-{
286- struct page_pool_params pp_params = {
287- .order = 0,
288- .flags = PP_FLAG_PAGE_FRAG,
289- .nid = NUMA_NO_NODE,
290- .dev = dev->dma_dev,
291- };
292- int idx = q - dev->q_rx;
293-
294- switch (idx) {
295- case MT_RXQ_MAIN:
296- case MT_RXQ_BAND1:
297- case MT_RXQ_BAND2:
298- pp_params.pool_size = 256;
299- break;
300- default:
301- pp_params.pool_size = 16;
302- break;
303- }
304-
305- if (mt76_is_mmio(dev)) {
306- /* rely on page_pool for DMA mapping */
307- pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
308- pp_params.dma_dir = DMA_FROM_DEVICE;
309- pp_params.max_len = PAGE_SIZE;
310- pp_params.offset = 0;
311- }
312-
313- q->page_pool = page_pool_create(&pp_params);
314- if (IS_ERR(q->page_pool)) {
315- int err = PTR_ERR(q->page_pool);
316-
317- q->page_pool = NULL;
318- return err;
319- }
320-
321- return 0;
322-}
323-EXPORT_SYMBOL_GPL(mt76_create_page_pool);
324-
325 struct mt76_dev *
326 mt76_alloc_device(struct device *pdev, unsigned int size,
327 const struct ieee80211_ops *ops,
developer1bc2ce22023-03-25 00:47:41 +0800328@@ -1747,21 +1705,6 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
developerabdbf252023-02-06 16:02:21 +0800329 }
330 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
331
332-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
333-{
334-#ifdef CONFIG_PAGE_POOL_STATS
335- struct page_pool_stats stats = {};
336- int i;
337-
338- mt76_for_each_q_rx(dev, i)
339- page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
340-
341- page_pool_ethtool_stats_get(data, &stats);
342- *index += page_pool_ethtool_stats_get_count();
343-#endif
344-}
345-EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
346-
347 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
348 {
349 struct ieee80211_hw *hw = phy->hw;
developer8eb72a32023-03-30 08:32:07 +0800350diff --git a/mcu.c b/mcu.c
351index a8cafa3..fa4b054 100644
352--- a/mcu.c
353+++ b/mcu.c
354@@ -4,6 +4,7 @@
355 */
356
357 #include "mt76.h"
358+#include <linux/moduleparam.h>
359
360 struct sk_buff *
361 __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
developerabdbf252023-02-06 16:02:21 +0800362diff --git a/mt76.h b/mt76.h
developer8eb72a32023-03-30 08:32:07 +0800363index 343bd91..3d96b33 100644
developerabdbf252023-02-06 16:02:21 +0800364--- a/mt76.h
365+++ b/mt76.h
366@@ -202,7 +202,7 @@ struct mt76_queue {
367
368 dma_addr_t desc_dma;
369 struct sk_buff *rx_head;
370- struct page_pool *page_pool;
371+ struct page_frag_cache rx_page;
372 };
373
374 struct mt76_mcu_ops {
developer1bc2ce22023-03-25 00:47:41 +0800375@@ -1340,7 +1340,6 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
developerabdbf252023-02-06 16:02:21 +0800376 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
377 }
378
379-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
380 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
381 struct mt76_sta_stats *stats, bool eht);
382 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
developer1bc2ce22023-03-25 00:47:41 +0800383@@ -1452,25 +1451,6 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
developerabdbf252023-02-06 16:02:21 +0800384 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
385 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
386 struct mt76_txwi_cache *r, dma_addr_t phys);
387-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
388-static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
389-{
390- struct page *page = virt_to_head_page(buf);
391-
392- page_pool_put_full_page(page->pp, page, allow_direct);
393-}
394-
395-static inline void *
396-mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
397-{
398- struct page *page;
399-
400- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
401- if (!page)
402- return NULL;
403-
404- return page_address(page) + *offset;
405-}
406
407 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
408 {
developer8eb72a32023-03-30 08:32:07 +0800409diff --git a/mt7615/mcu.c b/mt7615/mcu.c
410index eea398c..4593b2e 100644
411--- a/mt7615/mcu.c
412+++ b/mt7615/mcu.c
413@@ -10,6 +10,7 @@
414 #include "mcu.h"
415 #include "mac.h"
416 #include "eeprom.h"
417+#include <linux/moduleparam.h>
418
419 static bool prefer_offload_fw = true;
420 module_param(prefer_offload_fw, bool, 0644);
421diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
422index 4e4f6b3..e581522 100644
423--- a/mt76_connac_mcu.c
424+++ b/mt76_connac_mcu.c
425@@ -4,6 +4,7 @@
426 #include <linux/firmware.h>
427 #include "mt76_connac2_mac.h"
428 #include "mt76_connac_mcu.h"
429+#include <linux/module.h>
430
431 int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
432 {
developerabdbf252023-02-06 16:02:21 +0800433diff --git a/mt7915/main.c b/mt7915/main.c
developer8eb72a32023-03-30 08:32:07 +0800434index 3bbccbd..161a2d1 100644
developerabdbf252023-02-06 16:02:21 +0800435--- a/mt7915/main.c
436+++ b/mt7915/main.c
437@@ -1291,22 +1291,19 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
438 struct ieee80211_vif *vif,
439 u32 sset, u8 *data)
440 {
441- if (sset != ETH_SS_STATS)
442- return;
443-
444- memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
445- data += sizeof(mt7915_gstrings_stats);
446- page_pool_ethtool_stats_get_strings(data);
447+ if (sset == ETH_SS_STATS)
448+ memcpy(data, *mt7915_gstrings_stats,
449+ sizeof(mt7915_gstrings_stats));
450 }
451
452 static
453 int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
454 struct ieee80211_vif *vif, int sset)
455 {
456- if (sset != ETH_SS_STATS)
457- return 0;
458+ if (sset == ETH_SS_STATS)
459+ return MT7915_SSTATS_LEN;
460
461- return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
462+ return 0;
463 }
464
465 static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
466@@ -1334,7 +1331,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
467 };
468 struct mib_stats *mib = &phy->mib;
469 /* See mt7915_ampdu_stat_read_phy, etc */
470- int i, ei = 0, stats_size;
471+ int i, ei = 0;
472
473 mutex_lock(&dev->mt76.mutex);
474
475@@ -1415,12 +1412,9 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
476 return;
477
478 ei += wi.worker_stat_count;
479-
480- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
481-
482- stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
483- if (ei != stats_size)
484- dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
485+ if (ei != MT7915_SSTATS_LEN)
486+ dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
487+ ei, (int)MT7915_SSTATS_LEN);
488 }
489
490 static void
developer8eb72a32023-03-30 08:32:07 +0800491diff --git a/mt7915/mcu.c b/mt7915/mcu.c
492index d08907f..99ef8c9 100644
493--- a/mt7915/mcu.c
494+++ b/mt7915/mcu.c
495@@ -6,6 +6,7 @@
496 #include "mcu.h"
497 #include "mac.h"
498 #include "eeprom.h"
499+#include <linux/moduleparam.h>
500
501 #define fw_name(_dev, name, ...) ({ \
502 char *_fw; \
developerabdbf252023-02-06 16:02:21 +0800503diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer8eb72a32023-03-30 08:32:07 +0800504index 6f0c0e2..5ef43c4 100644
developerabdbf252023-02-06 16:02:21 +0800505--- a/mt7915/mmio.c
506+++ b/mt7915/mmio.c
507@@ -596,9 +596,13 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
508 static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
509 {
510 struct mt7915_dev *dev;
511+ u32 length;
512 int i;
513
514 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
515+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
516+ sizeof(struct skb_shared_info));
517+
518 for (i = 0; i < dev->mt76.rx_token_size; i++) {
519 struct mt76_txwi_cache *t;
520
521@@ -606,7 +610,9 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
522 if (!t || !t->ptr)
523 continue;
524
525- mt76_put_page_pool_buf(t->ptr, false);
526+ dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
527+ wed->wlan.rx_size, DMA_FROM_DEVICE);
528+ __free_pages(virt_to_page(t->ptr), get_order(length));
529 t->ptr = NULL;
530
531 mt76_put_rxwi(&dev->mt76, t);
532@@ -618,38 +624,47 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
533 static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
534 {
535 struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
536- struct mt76_txwi_cache *t = NULL;
537 struct mt7915_dev *dev;
538- struct mt76_queue *q;
539- int i, len;
540+ u32 length;
541+ int i;
542
543 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
544- q = &dev->mt76.q_rx[MT_RXQ_MAIN];
545- len = SKB_WITH_OVERHEAD(q->buf_size);
546+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
547+ sizeof(struct skb_shared_info));
548
549 for (i = 0; i < size; i++) {
550- enum dma_data_direction dir;
551- dma_addr_t addr;
552- u32 offset;
553+ struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
554+ dma_addr_t phy_addr;
555+ struct page *page;
556 int token;
557- void *buf;
558+ void *ptr;
559
560- t = mt76_get_rxwi(&dev->mt76);
561 if (!t)
562 goto unmap;
563
564- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
565- if (!buf)
566+ page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
567+ if (!page) {
568+ mt76_put_rxwi(&dev->mt76, t);
569 goto unmap;
570+ }
571
572- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
573- dir = page_pool_get_dma_dir(q->page_pool);
574- dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
575+ ptr = page_address(page);
576+ phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
577+ wed->wlan.rx_size,
578+ DMA_TO_DEVICE);
579+ if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
580+ __free_pages(page, get_order(length));
581+ mt76_put_rxwi(&dev->mt76, t);
582+ goto unmap;
583+ }
584
585- desc->buf0 = cpu_to_le32(addr);
586- token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
587+ desc->buf0 = cpu_to_le32(phy_addr);
588+ token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
589 if (token < 0) {
590- mt76_put_page_pool_buf(buf, false);
591+ dma_unmap_single(dev->mt76.dma_dev, phy_addr,
592+ wed->wlan.rx_size, DMA_TO_DEVICE);
593+ __free_pages(page, get_order(length));
594+ mt76_put_rxwi(&dev->mt76, t);
595 goto unmap;
596 }
597
598@@ -661,8 +676,6 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
599 return 0;
600
601 unmap:
602- if (t)
603- mt76_put_rxwi(&dev->mt76, t);
604 mt7915_mmio_wed_release_rx_buf(wed);
605 return -ENOMEM;
606 }
607diff --git a/mt7921/main.c b/mt7921/main.c
developer8eb72a32023-03-30 08:32:07 +0800608index a72964e..4c40022 100644
developerabdbf252023-02-06 16:02:21 +0800609--- a/mt7921/main.c
610+++ b/mt7921/main.c
611@@ -1090,34 +1090,17 @@ static void
612 mt7921_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
613 u32 sset, u8 *data)
614 {
615- struct mt7921_dev *dev = mt7921_hw_dev(hw);
616-
617 if (sset != ETH_SS_STATS)
618 return;
619
620 memcpy(data, *mt7921_gstrings_stats, sizeof(mt7921_gstrings_stats));
621-
622- if (mt76_is_sdio(&dev->mt76))
623- return;
624-
625- data += sizeof(mt7921_gstrings_stats);
626- page_pool_ethtool_stats_get_strings(data);
627 }
628
629 static int
630 mt7921_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
631 int sset)
632 {
633- struct mt7921_dev *dev = mt7921_hw_dev(hw);
634-
635- if (sset != ETH_SS_STATS)
636- return 0;
637-
638- if (mt76_is_sdio(&dev->mt76))
639- return ARRAY_SIZE(mt7921_gstrings_stats);
640-
641- return ARRAY_SIZE(mt7921_gstrings_stats) +
642- page_pool_ethtool_stats_get_count();
643+ return sset == ETH_SS_STATS ? ARRAY_SIZE(mt7921_gstrings_stats) : 0;
644 }
645
646 static void
647@@ -1137,7 +1120,6 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
648 struct ethtool_stats *stats, u64 *data)
649 {
650 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
651- int stats_size = ARRAY_SIZE(mt7921_gstrings_stats);
652 struct mt7921_phy *phy = mt7921_hw_phy(hw);
653 struct mt7921_dev *dev = phy->dev;
654 struct mib_stats *mib = &phy->mib;
655@@ -1193,14 +1175,9 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
656 return;
657
658 ei += wi.worker_stat_count;
659-
660- if (!mt76_is_sdio(&dev->mt76)) {
661- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
662- stats_size += page_pool_ethtool_stats_get_count();
663- }
664-
665- if (ei != stats_size)
666- dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, stats_size);
667+ if (ei != ARRAY_SIZE(mt7921_gstrings_stats))
668+ dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %zu",
669+ ei, ARRAY_SIZE(mt7921_gstrings_stats));
670 }
671
672 static u64
developer8eb72a32023-03-30 08:32:07 +0800673diff --git a/mt7996/dma.c b/mt7996/dma.c
674index 18ea758..3e2967f 100644
675--- a/mt7996/dma.c
676+++ b/mt7996/dma.c
677@@ -343,8 +343,8 @@ int mt7996_dma_init(struct mt7996_dev *dev)
678 if (ret < 0)
679 return ret;
680
681- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
682- mt7996_poll_tx);
683+ netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
684+ mt7996_poll_tx, NAPI_POLL_WEIGHT);
685 napi_enable(&dev->mt76.tx_napi);
686
687 mt7996_dma_enable(dev);
688diff --git a/mt7996/eeprom.c b/mt7996/eeprom.c
689index 64e3c4e..7bff504 100644
690--- a/mt7996/eeprom.c
691+++ b/mt7996/eeprom.c
692@@ -121,6 +121,7 @@ static int mt7996_eeprom_parse_efuse_hw_cap(struct mt7996_dev *dev)
693 if (ret)
694 return ret;
695
696+ cap = 0x4b249248; /* internal hardcode */
697 if (cap) {
698 dev->has_eht = !(cap & MODE_HE_ONLY);
699 dev->wtbl_size_group = u32_get_bits(cap, WTBL_SIZE_GROUP);
700diff --git a/mt7996/mcu.c b/mt7996/mcu.c
701index 0a52afd..cc94839 100644
702--- a/mt7996/mcu.c
703+++ b/mt7996/mcu.c
704@@ -5,6 +5,7 @@
705
706 #include <linux/firmware.h>
707 #include <linux/fs.h>
708+#include <linux/moduleparam.h>
709 #include "mt7996.h"
710 #include "mcu.h"
711 #include "mac.h"
developerabdbf252023-02-06 16:02:21 +0800712diff --git a/usb.c b/usb.c
developer8eb72a32023-03-30 08:32:07 +0800713index 5e5c7bf..3e28171 100644
developerabdbf252023-02-06 16:02:21 +0800714--- a/usb.c
715+++ b/usb.c
716@@ -319,27 +319,29 @@ mt76u_set_endpoints(struct usb_interface *intf,
717
718 static int
719 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
720- int nsgs)
721+ int nsgs, gfp_t gfp)
722 {
723 int i;
724
725 for (i = 0; i < nsgs; i++) {
726+ struct page *page;
727 void *data;
728 int offset;
729
730- data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
731+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
732 if (!data)
733 break;
734
735- sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
736- offset);
737+ page = virt_to_head_page(data);
738+ offset = data - page_address(page);
739+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
740 }
741
742 if (i < nsgs) {
743 int j;
744
745 for (j = nsgs; j < urb->num_sgs; j++)
746- mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
747+ skb_free_frag(sg_virt(&urb->sg[j]));
748 urb->num_sgs = i;
749 }
750
751@@ -352,16 +354,15 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
752
753 static int
754 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
755- struct urb *urb, int nsgs)
756+ struct urb *urb, int nsgs, gfp_t gfp)
757 {
758 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
759- int offset;
760
761 if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
762- return mt76u_fill_rx_sg(dev, q, urb, nsgs);
763+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
764
765 urb->transfer_buffer_length = q->buf_size;
766- urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
767+ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
768
769 return urb->transfer_buffer ? 0 : -ENOMEM;
770 }
771@@ -399,7 +400,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
772 if (err)
773 return err;
774
775- return mt76u_refill_rx(dev, q, e->urb, sg_size);
776+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
777 }
778
779 static void mt76u_urb_free(struct urb *urb)
780@@ -407,10 +408,10 @@ static void mt76u_urb_free(struct urb *urb)
781 int i;
782
783 for (i = 0; i < urb->num_sgs; i++)
784- mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
785+ skb_free_frag(sg_virt(&urb->sg[i]));
786
787 if (urb->transfer_buffer)
788- mt76_put_page_pool_buf(urb->transfer_buffer, false);
789+ skb_free_frag(urb->transfer_buffer);
790
791 usb_free_urb(urb);
792 }
793@@ -546,8 +547,6 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
794 len -= data_len;
795 nsgs++;
796 }
797-
798- skb_mark_for_recycle(skb);
799 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
800
801 return nsgs;
802@@ -613,7 +612,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
803
804 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
805 if (count > 0) {
806- err = mt76u_refill_rx(dev, q, urb, count);
807+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
808 if (err < 0)
809 break;
810 }
811@@ -664,10 +663,6 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
812 struct mt76_queue *q = &dev->q_rx[qid];
813 int i, err;
814
815- err = mt76_create_page_pool(dev, q);
816- if (err)
817- return err;
818-
819 spin_lock_init(&q->lock);
820 q->entry = devm_kcalloc(dev->dev,
821 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
822@@ -696,6 +691,7 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
823 static void
824 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
825 {
826+ struct page *page;
827 int i;
828
829 for (i = 0; i < q->ndesc; i++) {
developer483388c2023-03-08 13:52:15 +0800830@@ -705,8 +701,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
developerabdbf252023-02-06 16:02:21 +0800831 mt76u_urb_free(q->entry[i].urb);
832 q->entry[i].urb = NULL;
833 }
834- page_pool_destroy(q->page_pool);
developer483388c2023-03-08 13:52:15 +0800835- q->page_pool = NULL;
developerabdbf252023-02-06 16:02:21 +0800836+
837+ if (!q->rx_page.va)
838+ return;
839+
840+ page = virt_to_page(q->rx_page.va);
841+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
842+ memset(&q->rx_page, 0, sizeof(q->rx_page));
843 }
844
845 static void mt76u_free_rx(struct mt76_dev *dev)
846--
developer8eb72a32023-03-30 08:32:07 +08008472.18.0
developerabdbf252023-02-06 16:02:21 +0800848