[rdkb][common][bsp][Refactor and sync wifi from openwrt]
[Description]
a0c7684 [MAC80211][mt76][update mt76 patches]
a2e0f7c [MAC80211][core][Remove start disabled patch in eagle]
4562ef0 [MAC80211][hostapd][Refactor hostapd patch for git am]
22614f8 [mt76][Add vendor cmd to get available color bitmap]
b7b4fef [mac80211][Track obss color bitmap]
3f4ab41 [hostapd][Add hostapd_cli cmd to get available color bitmap]
cb120e7 [MAC80211][core][remove ba timer disabled patches]
24f983f [MAC80211][misc][sync iproute2 package]
3e866ee [MAC80211][core][Mark DFS channel available for CSA]
bdc6428 [MAC80211][hostapd][Mark DFS channel available for CSA]
9113e92 [MAC80211][app][Add eagle testmode iwpriv wrapper support]
d6bd8f4 [mac80211][mt76][Refactor mt76 patches]
7829a83 [MAC80211][mt76][Add monitor vif check in testmode]
19ead62 [mt76][eagle][hostapd mbssid and ema support]
[Release-log]
Change-Id: I75bf6ff01bc50054404bca23fd31cff9d1bc8d86
diff --git a/recipes-wifi/linux-mt76/files/patches-3.x/0019-mt76-revert-page-pool-changes.patch b/recipes-wifi/linux-mt76/files/patches-3.x/0019-mt76-revert-page-pool-changes.patch
new file mode 100644
index 0000000..20feead
--- /dev/null
+++ b/recipes-wifi/linux-mt76/files/patches-3.x/0019-mt76-revert-page-pool-changes.patch
@@ -0,0 +1,709 @@
+From 175bf122ac5790e7e28dadecd9410370364bc16a Mon Sep 17 00:00:00 2001
+From: Shayne Chen <shayne.chen@mediatek.com>
+Date: Mon, 6 Feb 2023 15:34:43 +0800
+Subject: [PATCH 19/19] mt76: revert page pool changes
+
+---
+ dma.c | 72 ++++++++++++++++++++++++++-------------------------
+ mac80211.c | 57 ----------------------------------------
+ mt76.h | 22 +---------------
+ mt7915/main.c | 26 +++++++------------
+ mt7915/mmio.c | 55 ++++++++++++++++++++++++---------------
+ mt7921/main.c | 31 +++-------------------
+ usb.c | 43 +++++++++++++++---------------
+ 7 files changed, 108 insertions(+), 198 deletions(-)
+
+diff --git a/dma.c b/dma.c
+index df2ca73f..7357b398 100644
+--- a/dma.c
++++ b/dma.c
+@@ -173,7 +173,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
+ local_bh_disable();
+ while ((t = __mt76_get_rxwi(dev)) != NULL) {
+ if (t->ptr)
+- mt76_put_page_pool_buf(t->ptr, false);
++ skb_free_frag(t->ptr);
+ kfree(t);
+ }
+ local_bh_enable();
+@@ -409,9 +409,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ if (!t)
+ return NULL;
+
+- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
+- SKB_WITH_OVERHEAD(q->buf_size),
+- page_pool_get_dma_dir(q->page_pool));
++ dma_unmap_single(dev->dma_dev, t->dma_addr,
++ SKB_WITH_OVERHEAD(q->buf_size),
++ DMA_FROM_DEVICE);
+
+ buf = t->ptr;
+ t->dma_addr = 0;
+@@ -430,9 +430,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ } else {
+ buf = e->buf;
+ e->buf = NULL;
+- dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
+- SKB_WITH_OVERHEAD(q->buf_size),
+- page_pool_get_dma_dir(q->page_pool));
++ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
++ SKB_WITH_OVERHEAD(q->buf_size),
++ DMA_FROM_DEVICE);
+ }
+
+ return buf;
+@@ -584,11 +584,11 @@ free_skb:
+ }
+
+ static int
+-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+- bool allow_direct)
++mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
+ {
+ int len = SKB_WITH_OVERHEAD(q->buf_size);
+- int frames = 0;
++ int frames = 0, offset = q->buf_offset;
++ dma_addr_t addr;
+
+ if (!q->ndesc)
+ return 0;
+@@ -596,25 +596,26 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ spin_lock_bh(&q->lock);
+
+ while (q->queued < q->ndesc - 1) {
+- enum dma_data_direction dir;
+ struct mt76_queue_buf qbuf;
+- dma_addr_t addr;
+- int offset;
+- void *buf;
++ void *buf = NULL;
+
+- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
++ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
+ if (!buf)
+ break;
+
+- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
+- dir = page_pool_get_dma_dir(q->page_pool);
+- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
++ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
++ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
++ skb_free_frag(buf);
++ break;
++ }
+
+- qbuf.addr = addr + q->buf_offset;
+- qbuf.len = len - q->buf_offset;
++ qbuf.addr = addr + offset;
++ qbuf.len = len - offset;
+ qbuf.skip_unmap = false;
+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
+- mt76_put_page_pool_buf(buf, allow_direct);
++ dma_unmap_single(dev->dma_dev, addr, len,
++ DMA_FROM_DEVICE);
++ skb_free_frag(buf);
+ break;
+ }
+ frames++;
+@@ -658,7 +659,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
+ /* WED txfree queue needs ring to be initialized before setup */
+ q->flags = 0;
+ mt76_dma_queue_reset(dev, q);
+- mt76_dma_rx_fill(dev, q, false);
++ mt76_dma_rx_fill(dev, q);
+ q->flags = flags;
+
+ ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
+@@ -706,10 +707,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ if (!q->entry)
+ return -ENOMEM;
+
+- ret = mt76_create_page_pool(dev, q);
+- if (ret)
+- return ret;
+-
+ ret = mt76_dma_wed_setup(dev, q, false);
+ if (ret)
+ return ret;
+@@ -723,6 +720,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ static void
+ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+ {
++ struct page *page;
+ void *buf;
+ bool more;
+
+@@ -736,7 +734,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+ if (!buf)
+ break;
+
+- mt76_put_page_pool_buf(buf, false);
++ skb_free_frag(buf);
+ } while (1);
+
+ if (q->rx_head) {
+@@ -745,6 +743,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+ }
+
+ spin_unlock_bh(&q->lock);
++
++ if (!q->rx_page.va)
++ return;
++
++ page = virt_to_page(q->rx_page.va);
++ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
++ memset(&q->rx_page, 0, sizeof(q->rx_page));
+ }
+
+ static void
+@@ -765,7 +770,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
+ mt76_dma_wed_setup(dev, q, true);
+ if (q->flags != MT_WED_Q_TXFREE) {
+ mt76_dma_sync_idx(dev, q);
+- mt76_dma_rx_fill(dev, q, false);
++ mt76_dma_rx_fill(dev, q);
+ }
+ }
+
+@@ -783,7 +788,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+
+ skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
+ } else {
+- mt76_put_page_pool_buf(data, true);
++ skb_free_frag(data);
+ }
+
+ if (more)
+@@ -856,7 +861,6 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+ goto free_frag;
+
+ skb_reserve(skb, q->buf_offset);
+- skb_mark_for_recycle(skb);
+
+ *(u32 *)skb->cb = info;
+
+@@ -872,10 +876,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+ continue;
+
+ free_frag:
+- mt76_put_page_pool_buf(data, true);
++ skb_free_frag(data);
+ }
+
+- mt76_dma_rx_fill(dev, q, true);
++ mt76_dma_rx_fill(dev, q);
+ return done;
+ }
+
+@@ -920,7 +924,7 @@ mt76_dma_init(struct mt76_dev *dev,
+
+ mt76_for_each_q_rx(dev, i) {
+ netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
+- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
++ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
+ napi_enable(&dev->napi[i]);
+ }
+
+@@ -971,8 +975,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
+
+ netif_napi_del(&dev->napi[i]);
+ mt76_dma_rx_cleanup(dev, q);
+-
+- page_pool_destroy(q->page_pool);
+ }
+
+ mt76_free_pending_txwi(dev);
+diff --git a/mac80211.c b/mac80211.c
+index e53166fc..d69e7214 100644
+--- a/mac80211.c
++++ b/mac80211.c
+@@ -4,7 +4,6 @@
+ */
+ #include <linux/sched.h>
+ #include <linux/of.h>
+-#include <net/page_pool.h>
+ #include "mt76.h"
+
+ #define CHAN2G(_idx, _freq) { \
+@@ -562,47 +561,6 @@ void mt76_unregister_phy(struct mt76_phy *phy)
+ }
+ EXPORT_SYMBOL_GPL(mt76_unregister_phy);
+
+-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
+-{
+- struct page_pool_params pp_params = {
+- .order = 0,
+- .flags = PP_FLAG_PAGE_FRAG,
+- .nid = NUMA_NO_NODE,
+- .dev = dev->dma_dev,
+- };
+- int idx = q - dev->q_rx;
+-
+- switch (idx) {
+- case MT_RXQ_MAIN:
+- case MT_RXQ_BAND1:
+- case MT_RXQ_BAND2:
+- pp_params.pool_size = 256;
+- break;
+- default:
+- pp_params.pool_size = 16;
+- break;
+- }
+-
+- if (mt76_is_mmio(dev)) {
+- /* rely on page_pool for DMA mapping */
+- pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+- pp_params.dma_dir = DMA_FROM_DEVICE;
+- pp_params.max_len = PAGE_SIZE;
+- pp_params.offset = 0;
+- }
+-
+- q->page_pool = page_pool_create(&pp_params);
+- if (IS_ERR(q->page_pool)) {
+- int err = PTR_ERR(q->page_pool);
+-
+- q->page_pool = NULL;
+- return err;
+- }
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(mt76_create_page_pool);
+-
+ struct mt76_dev *
+ mt76_alloc_device(struct device *pdev, unsigned int size,
+ const struct ieee80211_ops *ops,
+@@ -1746,21 +1704,6 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
+ }
+ EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
+
+-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
+-{
+-#ifdef CONFIG_PAGE_POOL_STATS
+- struct page_pool_stats stats = {};
+- int i;
+-
+- mt76_for_each_q_rx(dev, i)
+- page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
+-
+- page_pool_ethtool_stats_get(data, &stats);
+- *index += page_pool_ethtool_stats_get_count();
+-#endif
+-}
+-EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
+-
+ enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
+ {
+ struct ieee80211_hw *hw = phy->hw;
+diff --git a/mt76.h b/mt76.h
+index c3d1313e..49da2caa 100644
+--- a/mt76.h
++++ b/mt76.h
+@@ -202,7 +202,7 @@ struct mt76_queue {
+
+ dma_addr_t desc_dma;
+ struct sk_buff *rx_head;
+- struct page_pool *page_pool;
++ struct page_frag_cache rx_page;
+ };
+
+ struct mt76_mcu_ops {
+@@ -1329,7 +1329,6 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
+ return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
+ }
+
+-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
+ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
+ struct mt76_sta_stats *stats, bool eht);
+ int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
+@@ -1441,25 +1440,6 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
+ struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
+ int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
+ struct mt76_txwi_cache *r, dma_addr_t phys);
+-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
+-static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
+-{
+- struct page *page = virt_to_head_page(buf);
+-
+- page_pool_put_full_page(page->pp, page, allow_direct);
+-}
+-
+-static inline void *
+-mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
+-{
+- struct page *page;
+-
+- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
+- if (!page)
+- return NULL;
+-
+- return page_address(page) + *offset;
+-}
+
+ static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
+ {
+diff --git a/mt7915/main.c b/mt7915/main.c
+index 3bbccbdf..161a2d1e 100644
+--- a/mt7915/main.c
++++ b/mt7915/main.c
+@@ -1291,22 +1291,19 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+ {
+- if (sset != ETH_SS_STATS)
+- return;
+-
+- memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
+- data += sizeof(mt7915_gstrings_stats);
+- page_pool_ethtool_stats_get_strings(data);
++ if (sset == ETH_SS_STATS)
++ memcpy(data, *mt7915_gstrings_stats,
++ sizeof(mt7915_gstrings_stats));
+ }
+
+ static
+ int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+ {
+- if (sset != ETH_SS_STATS)
+- return 0;
++ if (sset == ETH_SS_STATS)
++ return MT7915_SSTATS_LEN;
+
+- return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
++ return 0;
+ }
+
+ static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
+@@ -1334,7 +1331,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
+ };
+ struct mib_stats *mib = &phy->mib;
+ /* See mt7915_ampdu_stat_read_phy, etc */
+- int i, ei = 0, stats_size;
++ int i, ei = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+@@ -1415,12 +1412,9 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
+ return;
+
+ ei += wi.worker_stat_count;
+-
+- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
+-
+- stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
+- if (ei != stats_size)
+- dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
++ if (ei != MT7915_SSTATS_LEN)
++ dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
++ ei, (int)MT7915_SSTATS_LEN);
+ }
+
+ static void
+diff --git a/mt7915/mmio.c b/mt7915/mmio.c
+index 6f0c0e2a..5ef43c44 100644
+--- a/mt7915/mmio.c
++++ b/mt7915/mmio.c
+@@ -596,9 +596,13 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
+ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
+ {
+ struct mt7915_dev *dev;
++ u32 length;
+ int i;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
++ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
++ sizeof(struct skb_shared_info));
++
+ for (i = 0; i < dev->mt76.rx_token_size; i++) {
+ struct mt76_txwi_cache *t;
+
+@@ -606,7 +610,9 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
+ if (!t || !t->ptr)
+ continue;
+
+- mt76_put_page_pool_buf(t->ptr, false);
++ dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
++ wed->wlan.rx_size, DMA_FROM_DEVICE);
++ __free_pages(virt_to_page(t->ptr), get_order(length));
+ t->ptr = NULL;
+
+ mt76_put_rxwi(&dev->mt76, t);
+@@ -618,38 +624,47 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
+ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+ {
+ struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
+- struct mt76_txwi_cache *t = NULL;
+ struct mt7915_dev *dev;
+- struct mt76_queue *q;
+- int i, len;
++ u32 length;
++ int i;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+- q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+- len = SKB_WITH_OVERHEAD(q->buf_size);
++ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
++ sizeof(struct skb_shared_info));
+
+ for (i = 0; i < size; i++) {
+- enum dma_data_direction dir;
+- dma_addr_t addr;
+- u32 offset;
++ struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
++ dma_addr_t phy_addr;
++ struct page *page;
+ int token;
+- void *buf;
++ void *ptr;
+
+- t = mt76_get_rxwi(&dev->mt76);
+ if (!t)
+ goto unmap;
+
+- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
+- if (!buf)
++ page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
++ if (!page) {
++ mt76_put_rxwi(&dev->mt76, t);
+ goto unmap;
++ }
+
+- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
+- dir = page_pool_get_dma_dir(q->page_pool);
+- dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
++ ptr = page_address(page);
++ phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
++ wed->wlan.rx_size,
++ DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
++ __free_pages(page, get_order(length));
++ mt76_put_rxwi(&dev->mt76, t);
++ goto unmap;
++ }
+
+- desc->buf0 = cpu_to_le32(addr);
+- token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
++ desc->buf0 = cpu_to_le32(phy_addr);
++ token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
+ if (token < 0) {
+- mt76_put_page_pool_buf(buf, false);
++ dma_unmap_single(dev->mt76.dma_dev, phy_addr,
++ wed->wlan.rx_size, DMA_TO_DEVICE);
++ __free_pages(page, get_order(length));
++ mt76_put_rxwi(&dev->mt76, t);
+ goto unmap;
+ }
+
+@@ -661,8 +676,6 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+ return 0;
+
+ unmap:
+- if (t)
+- mt76_put_rxwi(&dev->mt76, t);
+ mt7915_mmio_wed_release_rx_buf(wed);
+ return -ENOMEM;
+ }
+diff --git a/mt7921/main.c b/mt7921/main.c
+index a72964e7..4c400223 100644
+--- a/mt7921/main.c
++++ b/mt7921/main.c
+@@ -1090,34 +1090,17 @@ static void
+ mt7921_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+ {
+- struct mt7921_dev *dev = mt7921_hw_dev(hw);
+-
+ if (sset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, *mt7921_gstrings_stats, sizeof(mt7921_gstrings_stats));
+-
+- if (mt76_is_sdio(&dev->mt76))
+- return;
+-
+- data += sizeof(mt7921_gstrings_stats);
+- page_pool_ethtool_stats_get_strings(data);
+ }
+
+ static int
+ mt7921_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int sset)
+ {
+- struct mt7921_dev *dev = mt7921_hw_dev(hw);
+-
+- if (sset != ETH_SS_STATS)
+- return 0;
+-
+- if (mt76_is_sdio(&dev->mt76))
+- return ARRAY_SIZE(mt7921_gstrings_stats);
+-
+- return ARRAY_SIZE(mt7921_gstrings_stats) +
+- page_pool_ethtool_stats_get_count();
++ return sset == ETH_SS_STATS ? ARRAY_SIZE(mt7921_gstrings_stats) : 0;
+ }
+
+ static void
+@@ -1137,7 +1120,6 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+ {
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+- int stats_size = ARRAY_SIZE(mt7921_gstrings_stats);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt7921_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+@@ -1193,14 +1175,9 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ return;
+
+ ei += wi.worker_stat_count;
+-
+- if (!mt76_is_sdio(&dev->mt76)) {
+- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
+- stats_size += page_pool_ethtool_stats_get_count();
+- }
+-
+- if (ei != stats_size)
+- dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, stats_size);
++ if (ei != ARRAY_SIZE(mt7921_gstrings_stats))
++ dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %zu",
++ ei, ARRAY_SIZE(mt7921_gstrings_stats));
+ }
+
+ static u64
+diff --git a/usb.c b/usb.c
+index 5e5c7bf5..3e281715 100644
+--- a/usb.c
++++ b/usb.c
+@@ -319,27 +319,29 @@ mt76u_set_endpoints(struct usb_interface *intf,
+
+ static int
+ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
+- int nsgs)
++ int nsgs, gfp_t gfp)
+ {
+ int i;
+
+ for (i = 0; i < nsgs; i++) {
++ struct page *page;
+ void *data;
+ int offset;
+
+- data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
++ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
+ if (!data)
+ break;
+
+- sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
+- offset);
++ page = virt_to_head_page(data);
++ offset = data - page_address(page);
++ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
+ }
+
+ if (i < nsgs) {
+ int j;
+
+ for (j = nsgs; j < urb->num_sgs; j++)
+- mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
++ skb_free_frag(sg_virt(&urb->sg[j]));
+ urb->num_sgs = i;
+ }
+
+@@ -352,16 +354,15 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
+
+ static int
+ mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
+- struct urb *urb, int nsgs)
++ struct urb *urb, int nsgs, gfp_t gfp)
+ {
+ enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
+- int offset;
+
+ if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
+- return mt76u_fill_rx_sg(dev, q, urb, nsgs);
++ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
+
+ urb->transfer_buffer_length = q->buf_size;
+- urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
++ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
+
+ return urb->transfer_buffer ? 0 : -ENOMEM;
+ }
+@@ -399,7 +400,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
+ if (err)
+ return err;
+
+- return mt76u_refill_rx(dev, q, e->urb, sg_size);
++ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
+ }
+
+ static void mt76u_urb_free(struct urb *urb)
+@@ -407,10 +408,10 @@ static void mt76u_urb_free(struct urb *urb)
+ int i;
+
+ for (i = 0; i < urb->num_sgs; i++)
+- mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
++ skb_free_frag(sg_virt(&urb->sg[i]));
+
+ if (urb->transfer_buffer)
+- mt76_put_page_pool_buf(urb->transfer_buffer, false);
++ skb_free_frag(urb->transfer_buffer);
+
+ usb_free_urb(urb);
+ }
+@@ -546,8 +547,6 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
+ len -= data_len;
+ nsgs++;
+ }
+-
+- skb_mark_for_recycle(skb);
+ dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
+
+ return nsgs;
+@@ -613,7 +612,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
+
+ count = mt76u_process_rx_entry(dev, urb, q->buf_size);
+ if (count > 0) {
+- err = mt76u_refill_rx(dev, q, urb, count);
++ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
+ if (err < 0)
+ break;
+ }
+@@ -664,10 +663,6 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
+ struct mt76_queue *q = &dev->q_rx[qid];
+ int i, err;
+
+- err = mt76_create_page_pool(dev, q);
+- if (err)
+- return err;
+-
+ spin_lock_init(&q->lock);
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_RX_ENTRIES, sizeof(*q->entry),
+@@ -696,6 +691,7 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
+ static void
+ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
+ {
++ struct page *page;
+ int i;
+
+ for (i = 0; i < q->ndesc; i++) {
+@@ -705,8 +701,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
+ mt76u_urb_free(q->entry[i].urb);
+ q->entry[i].urb = NULL;
+ }
+- page_pool_destroy(q->page_pool);
+- q->page_pool = NULL;
++
++ if (!q->rx_page.va)
++ return;
++
++ page = virt_to_page(q->rx_page.va);
++ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
++ memset(&q->rx_page, 0, sizeof(q->rx_page));
+ }
+
+ static void mt76u_free_rx(struct mt76_dev *dev)
+--
+2.39.2
+