developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 1 | From 43dacb36843a57b2c42ab7846ff853c3c04260b7 Mon Sep 17 00:00:00 2001 |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 2 | From: Bo Jiao <Bo.Jiao@mediatek.com> |
| 3 | Date: Mon, 6 Feb 2023 19:49:22 +0800 |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 4 | Subject: [PATCH 2001/2012] wifi: mt76: revert page_poll for kernel 5.4 |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 5 | |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 6 | This reverts commit e8c10835cf062c577ddf426913788c39d30b4bd7. |
| 7 | |
| 8 | Change-Id: I4e5764fc545087f691fb4c2f43e7a9cefd1e1657 |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 9 | --- |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 10 | dma.c | 78 +++++++++++++++++++++++++++++---------------------- |
| 11 | mac80211.c | 57 ------------------------------------- |
| 12 | mt76.h | 22 +-------------- |
| 13 | mt7915/main.c | 26 +++++++---------- |
| 14 | mt7915/mmio.c | 55 ++++++++++++++++++++++-------------- |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 15 | usb.c | 43 ++++++++++++++-------------- |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 16 | 6 files changed, 111 insertions(+), 170 deletions(-) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 17 | |
| 18 | diff --git a/dma.c b/dma.c |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 19 | index 8182f6dc4..3785425b4 100644 |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 20 | --- a/dma.c |
| 21 | +++ b/dma.c |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 22 | @@ -178,7 +178,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 23 | local_bh_disable(); |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 24 | while ((r = __mt76_get_rxwi(dev)) != NULL) { |
| 25 | if (r->ptr) |
| 26 | - mt76_put_page_pool_buf(r->ptr, false); |
| 27 | + skb_free_frag(r->ptr); |
| 28 | kfree(r); |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 29 | } |
| 30 | local_bh_enable(); |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 31 | @@ -411,9 +411,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 32 | if (!r) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 33 | return NULL; |
| 34 | |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 35 | - dma_sync_single_for_cpu(dev->dma_dev, r->dma_addr, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 36 | - SKB_WITH_OVERHEAD(q->buf_size), |
| 37 | - page_pool_get_dma_dir(q->page_pool)); |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 38 | + dma_unmap_single(dev->dma_dev, r->dma_addr, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 39 | + SKB_WITH_OVERHEAD(q->buf_size), |
| 40 | + DMA_FROM_DEVICE); |
| 41 | |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 42 | buf = r->ptr; |
| 43 | r->dma_addr = 0; |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 44 | @@ -432,9 +432,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 45 | } else { |
| 46 | buf = e->buf; |
| 47 | e->buf = NULL; |
| 48 | - dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0], |
| 49 | - SKB_WITH_OVERHEAD(q->buf_size), |
| 50 | - page_pool_get_dma_dir(q->page_pool)); |
| 51 | + dma_unmap_single(dev->dma_dev, e->dma_addr[0], |
| 52 | + SKB_WITH_OVERHEAD(q->buf_size), |
| 53 | + DMA_FROM_DEVICE); |
| 54 | } |
| 55 | |
| 56 | return buf; |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 57 | @@ -594,11 +594,11 @@ free_skb: |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 58 | } |
| 59 | |
| 60 | static int |
| 61 | -mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, |
| 62 | - bool allow_direct) |
| 63 | +mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| 64 | { |
| 65 | int len = SKB_WITH_OVERHEAD(q->buf_size); |
| 66 | - int frames = 0; |
| 67 | + int frames = 0, offset = q->buf_offset; |
| 68 | + dma_addr_t addr; |
| 69 | |
| 70 | if (!q->ndesc) |
| 71 | return 0; |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 72 | @@ -606,25 +606,26 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 73 | spin_lock_bh(&q->lock); |
| 74 | |
| 75 | while (q->queued < q->ndesc - 1) { |
| 76 | - enum dma_data_direction dir; |
| 77 | struct mt76_queue_buf qbuf; |
| 78 | - dma_addr_t addr; |
| 79 | - int offset; |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 80 | void *buf; |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 81 | |
| 82 | - buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); |
| 83 | + buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
| 84 | if (!buf) |
| 85 | break; |
| 86 | |
| 87 | - addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset; |
| 88 | - dir = page_pool_get_dma_dir(q->page_pool); |
| 89 | - dma_sync_single_for_device(dev->dma_dev, addr, len, dir); |
| 90 | + addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE); |
| 91 | + if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { |
| 92 | + skb_free_frag(buf); |
| 93 | + break; |
| 94 | + } |
| 95 | |
| 96 | - qbuf.addr = addr + q->buf_offset; |
| 97 | - qbuf.len = len - q->buf_offset; |
| 98 | + qbuf.addr = addr + offset; |
| 99 | + qbuf.len = len - offset; |
| 100 | qbuf.skip_unmap = false; |
| 101 | if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { |
| 102 | - mt76_put_page_pool_buf(buf, allow_direct); |
| 103 | + dma_unmap_single(dev->dma_dev, addr, len, |
| 104 | + DMA_FROM_DEVICE); |
| 105 | + skb_free_frag(buf); |
| 106 | break; |
| 107 | } |
| 108 | frames++; |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 109 | @@ -668,7 +669,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 110 | /* WED txfree queue needs ring to be initialized before setup */ |
| 111 | q->flags = 0; |
| 112 | mt76_dma_queue_reset(dev, q); |
| 113 | - mt76_dma_rx_fill(dev, q, false); |
| 114 | + mt76_dma_rx_fill(dev, q); |
| 115 | q->flags = flags; |
| 116 | |
| 117 | ret = mtk_wed_device_txfree_ring_setup(wed, q->regs); |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 118 | @@ -716,10 +717,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 119 | if (!q->entry) |
| 120 | return -ENOMEM; |
| 121 | |
| 122 | - ret = mt76_create_page_pool(dev, q); |
| 123 | - if (ret) |
| 124 | - return ret; |
| 125 | - |
| 126 | ret = mt76_dma_wed_setup(dev, q, false); |
| 127 | if (ret) |
| 128 | return ret; |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 129 | @@ -733,6 +730,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 130 | static void |
| 131 | mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) |
| 132 | { |
| 133 | + struct page *page; |
| 134 | void *buf; |
| 135 | bool more; |
| 136 | |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 137 | @@ -746,7 +744,10 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 138 | if (!buf) |
| 139 | break; |
| 140 | |
| 141 | - mt76_put_page_pool_buf(buf, false); |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 142 | + if (q->flags & MT_QFLAG_RRO) |
| 143 | + continue; |
| 144 | + |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 145 | + skb_free_frag(buf); |
| 146 | } while (1); |
| 147 | |
| 148 | if (q->rx_head) { |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 149 | @@ -755,6 +756,18 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 150 | } |
| 151 | |
| 152 | spin_unlock_bh(&q->lock); |
| 153 | + |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 154 | + if (((q->flags & MT_QFLAG_WED) && |
| 155 | + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) || |
| 156 | + (q->flags & MT_QFLAG_RRO)) |
| 157 | + return; |
| 158 | + |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 159 | + if (!q->rx_page.va) |
| 160 | + return; |
| 161 | + |
| 162 | + page = virt_to_page(q->rx_page.va); |
| 163 | + __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); |
| 164 | + memset(&q->rx_page, 0, sizeof(q->rx_page)); |
| 165 | } |
| 166 | |
| 167 | static void |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 168 | @@ -775,7 +788,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 169 | mt76_dma_wed_setup(dev, q, true); |
| 170 | if (q->flags != MT_WED_Q_TXFREE) { |
| 171 | mt76_dma_sync_idx(dev, q); |
| 172 | - mt76_dma_rx_fill(dev, q, false); |
| 173 | + mt76_dma_rx_fill(dev, q); |
| 174 | } |
| 175 | } |
| 176 | |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 177 | @@ -793,7 +806,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 178 | |
| 179 | skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); |
| 180 | } else { |
| 181 | - mt76_put_page_pool_buf(data, true); |
| 182 | + skb_free_frag(data); |
| 183 | } |
| 184 | |
| 185 | if (more) |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 186 | @@ -866,7 +879,6 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 187 | goto free_frag; |
| 188 | |
| 189 | skb_reserve(skb, q->buf_offset); |
| 190 | - skb_mark_for_recycle(skb); |
| 191 | |
| 192 | *(u32 *)skb->cb = info; |
| 193 | |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 194 | @@ -882,10 +894,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 195 | continue; |
| 196 | |
| 197 | free_frag: |
| 198 | - mt76_put_page_pool_buf(data, true); |
| 199 | + skb_free_frag(data); |
| 200 | } |
| 201 | |
| 202 | - mt76_dma_rx_fill(dev, q, true); |
| 203 | + mt76_dma_rx_fill(dev, q); |
| 204 | return done; |
| 205 | } |
| 206 | |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 207 | @@ -930,7 +942,7 @@ mt76_dma_init(struct mt76_dev *dev, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 208 | |
| 209 | mt76_for_each_q_rx(dev, i) { |
| 210 | netif_napi_add(&dev->napi_dev, &dev->napi[i], poll); |
| 211 | - mt76_dma_rx_fill(dev, &dev->q_rx[i], false); |
| 212 | + mt76_dma_rx_fill(dev, &dev->q_rx[i]); |
| 213 | napi_enable(&dev->napi[i]); |
| 214 | } |
| 215 | |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 216 | @@ -984,8 +996,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 217 | |
| 218 | netif_napi_del(&dev->napi[i]); |
| 219 | mt76_dma_rx_cleanup(dev, q); |
| 220 | - |
| 221 | - page_pool_destroy(q->page_pool); |
| 222 | } |
| 223 | |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 224 | if (mtk_wed_device_active(&dev->mmio.wed)) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 225 | diff --git a/mac80211.c b/mac80211.c |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 226 | index abad16f31..7cd9b6fc7 100644 |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 227 | --- a/mac80211.c |
| 228 | +++ b/mac80211.c |
| 229 | @@ -4,7 +4,6 @@ |
| 230 | */ |
| 231 | #include <linux/sched.h> |
| 232 | #include <linux/of.h> |
| 233 | -#include <net/page_pool.h> |
| 234 | #include "mt76.h" |
| 235 | |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 236 | static const struct ieee80211_channel mt76_channels_2ghz[] = { |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 237 | @@ -546,47 +545,6 @@ void mt76_unregister_phy(struct mt76_phy *phy) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 238 | } |
| 239 | EXPORT_SYMBOL_GPL(mt76_unregister_phy); |
| 240 | |
| 241 | -int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q) |
| 242 | -{ |
| 243 | - struct page_pool_params pp_params = { |
| 244 | - .order = 0, |
| 245 | - .flags = PP_FLAG_PAGE_FRAG, |
| 246 | - .nid = NUMA_NO_NODE, |
| 247 | - .dev = dev->dma_dev, |
| 248 | - }; |
| 249 | - int idx = q - dev->q_rx; |
| 250 | - |
| 251 | - switch (idx) { |
| 252 | - case MT_RXQ_MAIN: |
| 253 | - case MT_RXQ_BAND1: |
| 254 | - case MT_RXQ_BAND2: |
| 255 | - pp_params.pool_size = 256; |
| 256 | - break; |
| 257 | - default: |
| 258 | - pp_params.pool_size = 16; |
| 259 | - break; |
| 260 | - } |
| 261 | - |
| 262 | - if (mt76_is_mmio(dev)) { |
| 263 | - /* rely on page_pool for DMA mapping */ |
| 264 | - pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; |
| 265 | - pp_params.dma_dir = DMA_FROM_DEVICE; |
| 266 | - pp_params.max_len = PAGE_SIZE; |
| 267 | - pp_params.offset = 0; |
| 268 | - } |
| 269 | - |
| 270 | - q->page_pool = page_pool_create(&pp_params); |
| 271 | - if (IS_ERR(q->page_pool)) { |
| 272 | - int err = PTR_ERR(q->page_pool); |
| 273 | - |
| 274 | - q->page_pool = NULL; |
| 275 | - return err; |
| 276 | - } |
| 277 | - |
| 278 | - return 0; |
| 279 | -} |
| 280 | -EXPORT_SYMBOL_GPL(mt76_create_page_pool); |
| 281 | - |
| 282 | struct mt76_dev * |
| 283 | mt76_alloc_device(struct device *pdev, unsigned int size, |
| 284 | const struct ieee80211_ops *ops, |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 285 | @@ -1785,21 +1743,6 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 286 | } |
| 287 | EXPORT_SYMBOL_GPL(mt76_ethtool_worker); |
| 288 | |
| 289 | -void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index) |
| 290 | -{ |
| 291 | -#ifdef CONFIG_PAGE_POOL_STATS |
| 292 | - struct page_pool_stats stats = {}; |
| 293 | - int i; |
| 294 | - |
| 295 | - mt76_for_each_q_rx(dev, i) |
| 296 | - page_pool_get_stats(dev->q_rx[i].page_pool, &stats); |
| 297 | - |
| 298 | - page_pool_ethtool_stats_get(data, &stats); |
| 299 | - *index += page_pool_ethtool_stats_get_count(); |
| 300 | -#endif |
| 301 | -} |
| 302 | -EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats); |
| 303 | - |
| 304 | enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy) |
| 305 | { |
| 306 | struct ieee80211_hw *hw = phy->hw; |
| 307 | diff --git a/mt76.h b/mt76.h |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 308 | index 99756dce2..5243741b5 100644 |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 309 | --- a/mt76.h |
| 310 | +++ b/mt76.h |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 311 | @@ -224,7 +224,7 @@ struct mt76_queue { |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 312 | |
| 313 | dma_addr_t desc_dma; |
| 314 | struct sk_buff *rx_head; |
| 315 | - struct page_pool *page_pool; |
| 316 | + struct page_frag_cache rx_page; |
| 317 | }; |
| 318 | |
| 319 | struct mt76_mcu_ops { |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 320 | @@ -1523,7 +1523,6 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 321 | return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); |
| 322 | } |
| 323 | |
| 324 | -void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index); |
| 325 | void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, |
| 326 | struct mt76_sta_stats *stats, bool eht); |
| 327 | int mt76_skb_adjust_pad(struct sk_buff *skb, int pad); |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 328 | @@ -1636,25 +1635,6 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked); |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 329 | struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 330 | int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 331 | struct mt76_rxwi_cache *r, dma_addr_t phys); |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 332 | -int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q); |
| 333 | -static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct) |
| 334 | -{ |
| 335 | - struct page *page = virt_to_head_page(buf); |
| 336 | - |
| 337 | - page_pool_put_full_page(page->pp, page, allow_direct); |
| 338 | -} |
| 339 | - |
| 340 | -static inline void * |
| 341 | -mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size) |
| 342 | -{ |
| 343 | - struct page *page; |
| 344 | - |
| 345 | - page = page_pool_dev_alloc_frag(q->page_pool, offset, size); |
| 346 | - if (!page) |
| 347 | - return NULL; |
| 348 | - |
| 349 | - return page_address(page) + *offset; |
| 350 | -} |
| 351 | |
| 352 | static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) |
| 353 | { |
| 354 | diff --git a/mt7915/main.c b/mt7915/main.c |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 355 | index a3fd54cc1..796cd5f04 100644 |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 356 | --- a/mt7915/main.c |
| 357 | +++ b/mt7915/main.c |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 358 | @@ -1397,22 +1397,19 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 359 | struct ieee80211_vif *vif, |
| 360 | u32 sset, u8 *data) |
| 361 | { |
| 362 | - if (sset != ETH_SS_STATS) |
| 363 | - return; |
| 364 | - |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 365 | - memcpy(data, mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats)); |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 366 | - data += sizeof(mt7915_gstrings_stats); |
| 367 | - page_pool_ethtool_stats_get_strings(data); |
| 368 | + if (sset == ETH_SS_STATS) |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 369 | + memcpy(data, mt7915_gstrings_stats, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 370 | + sizeof(mt7915_gstrings_stats)); |
| 371 | } |
| 372 | |
| 373 | static |
| 374 | int mt7915_get_et_sset_count(struct ieee80211_hw *hw, |
| 375 | struct ieee80211_vif *vif, int sset) |
| 376 | { |
| 377 | - if (sset != ETH_SS_STATS) |
| 378 | - return 0; |
| 379 | + if (sset == ETH_SS_STATS) |
| 380 | + return MT7915_SSTATS_LEN; |
| 381 | |
| 382 | - return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count(); |
| 383 | + return 0; |
| 384 | } |
| 385 | |
| 386 | static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta) |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 387 | @@ -1440,7 +1437,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw, |
| 388 | .idx = mvif->mt76.idx, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 389 | }; |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 390 | /* See mt7915_ampdu_stat_read_phy, etc */ |
| 391 | - int i, ei = 0, stats_size; |
| 392 | + int i, ei = 0; |
| 393 | |
| 394 | mutex_lock(&dev->mt76.mutex); |
| 395 | |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 396 | @@ -1552,12 +1549,9 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 397 | return; |
| 398 | |
| 399 | ei += wi.worker_stat_count; |
| 400 | - |
| 401 | - mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei); |
| 402 | - |
| 403 | - stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count(); |
| 404 | - if (ei != stats_size) |
| 405 | - dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size); |
| 406 | + if (ei != MT7915_SSTATS_LEN) |
| 407 | + dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d", |
| 408 | + ei, (int)MT7915_SSTATS_LEN); |
| 409 | } |
| 410 | |
| 411 | static void |
| 412 | diff --git a/mt7915/mmio.c b/mt7915/mmio.c |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 413 | index a38109497..a28ab0290 100644 |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 414 | --- a/mt7915/mmio.c |
| 415 | +++ b/mt7915/mmio.c |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 416 | @@ -570,9 +570,13 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 417 | static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) |
| 418 | { |
| 419 | struct mt7915_dev *dev; |
| 420 | + u32 length; |
| 421 | int i; |
| 422 | |
| 423 | dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed); |
| 424 | + length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size + |
| 425 | + sizeof(struct skb_shared_info)); |
| 426 | + |
| 427 | for (i = 0; i < dev->mt76.rx_token_size; i++) { |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 428 | struct mt76_rxwi_cache *r; |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 429 | |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 430 | @@ -580,7 +584,9 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 431 | if (!r || !r->ptr) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 432 | continue; |
| 433 | |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 434 | - mt76_put_page_pool_buf(r->ptr, false); |
| 435 | + dma_unmap_single(dev->mt76.dma_dev, r->dma_addr, |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 436 | + wed->wlan.rx_size, DMA_FROM_DEVICE); |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 437 | + __free_pages(virt_to_page(r->ptr), get_order(length)); |
| 438 | r->ptr = NULL; |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 439 | |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 440 | mt76_put_rxwi(&dev->mt76, r); |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 441 | @@ -604,38 +610,47 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 442 | static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| 443 | { |
| 444 | struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc; |
| 445 | - struct mt76_txwi_cache *t = NULL; |
| 446 | struct mt7915_dev *dev; |
| 447 | - struct mt76_queue *q; |
| 448 | - int i, len; |
| 449 | + u32 length; |
| 450 | + int i; |
| 451 | |
| 452 | dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed); |
| 453 | - q = &dev->mt76.q_rx[MT_RXQ_MAIN]; |
| 454 | - len = SKB_WITH_OVERHEAD(q->buf_size); |
| 455 | + length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size + |
| 456 | + sizeof(struct skb_shared_info)); |
| 457 | |
| 458 | for (i = 0; i < size; i++) { |
| 459 | - enum dma_data_direction dir; |
| 460 | - dma_addr_t addr; |
| 461 | - u32 offset; |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 462 | + struct mt76_rxwi_cache *r = mt76_get_rxwi(&dev->mt76); |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 463 | + dma_addr_t phy_addr; |
| 464 | + struct page *page; |
| 465 | int token; |
| 466 | - void *buf; |
| 467 | + void *ptr; |
| 468 | |
| 469 | - t = mt76_get_rxwi(&dev->mt76); |
| 470 | if (!t) |
| 471 | goto unmap; |
| 472 | |
| 473 | - buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); |
| 474 | - if (!buf) |
| 475 | + page = __dev_alloc_pages(GFP_KERNEL, get_order(length)); |
| 476 | + if (!page) { |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 477 | + mt76_put_rxwi(&dev->mt76, r); |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 478 | goto unmap; |
| 479 | + } |
| 480 | |
| 481 | - addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset; |
| 482 | - dir = page_pool_get_dma_dir(q->page_pool); |
| 483 | - dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir); |
| 484 | + ptr = page_address(page); |
| 485 | + phy_addr = dma_map_single(dev->mt76.dma_dev, ptr, |
| 486 | + wed->wlan.rx_size, |
| 487 | + DMA_TO_DEVICE); |
| 488 | + if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) { |
| 489 | + __free_pages(page, get_order(length)); |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 490 | + mt76_put_rxwi(&dev->mt76, r); |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 491 | + goto unmap; |
| 492 | + } |
| 493 | |
| 494 | - desc->buf0 = cpu_to_le32(addr); |
| 495 | - token = mt76_rx_token_consume(&dev->mt76, buf, t, addr); |
| 496 | + desc->buf0 = cpu_to_le32(phy_addr); |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 497 | + token = mt76_rx_token_consume(&dev->mt76, ptr, r, phy_addr); |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 498 | if (token < 0) { |
| 499 | - mt76_put_page_pool_buf(buf, false); |
| 500 | + dma_unmap_single(dev->mt76.dma_dev, phy_addr, |
| 501 | + wed->wlan.rx_size, DMA_TO_DEVICE); |
| 502 | + __free_pages(page, get_order(length)); |
developer | 064da3c | 2023-06-13 15:57:26 +0800 | [diff] [blame] | 503 | + mt76_put_rxwi(&dev->mt76, r); |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 504 | goto unmap; |
| 505 | } |
| 506 | |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 507 | @@ -647,8 +662,6 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 508 | return 0; |
| 509 | |
| 510 | unmap: |
| 511 | - if (t) |
| 512 | - mt76_put_rxwi(&dev->mt76, t); |
| 513 | mt7915_mmio_wed_release_rx_buf(wed); |
| 514 | return -ENOMEM; |
| 515 | } |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 516 | diff --git a/usb.c b/usb.c |
developer | c2cfe0f | 2023-09-22 04:11:09 +0800 | [diff] [blame] | 517 | index 5e5c7bf51..3e281715f 100644 |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 518 | --- a/usb.c |
| 519 | +++ b/usb.c |
| 520 | @@ -319,27 +319,29 @@ mt76u_set_endpoints(struct usb_interface *intf, |
| 521 | |
| 522 | static int |
| 523 | mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, |
| 524 | - int nsgs) |
| 525 | + int nsgs, gfp_t gfp) |
| 526 | { |
| 527 | int i; |
| 528 | |
| 529 | for (i = 0; i < nsgs; i++) { |
| 530 | + struct page *page; |
| 531 | void *data; |
| 532 | int offset; |
| 533 | |
| 534 | - data = mt76_get_page_pool_buf(q, &offset, q->buf_size); |
| 535 | + data = page_frag_alloc(&q->rx_page, q->buf_size, gfp); |
| 536 | if (!data) |
| 537 | break; |
| 538 | |
| 539 | - sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size, |
| 540 | - offset); |
| 541 | + page = virt_to_head_page(data); |
| 542 | + offset = data - page_address(page); |
| 543 | + sg_set_page(&urb->sg[i], page, q->buf_size, offset); |
| 544 | } |
| 545 | |
| 546 | if (i < nsgs) { |
| 547 | int j; |
| 548 | |
| 549 | for (j = nsgs; j < urb->num_sgs; j++) |
| 550 | - mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false); |
| 551 | + skb_free_frag(sg_virt(&urb->sg[j])); |
| 552 | urb->num_sgs = i; |
| 553 | } |
| 554 | |
| 555 | @@ -352,16 +354,15 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, |
| 556 | |
| 557 | static int |
| 558 | mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q, |
| 559 | - struct urb *urb, int nsgs) |
| 560 | + struct urb *urb, int nsgs, gfp_t gfp) |
| 561 | { |
| 562 | enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN]; |
| 563 | - int offset; |
| 564 | |
| 565 | if (qid == MT_RXQ_MAIN && dev->usb.sg_en) |
| 566 | - return mt76u_fill_rx_sg(dev, q, urb, nsgs); |
| 567 | + return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp); |
| 568 | |
| 569 | urb->transfer_buffer_length = q->buf_size; |
| 570 | - urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size); |
| 571 | + urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp); |
| 572 | |
| 573 | return urb->transfer_buffer ? 0 : -ENOMEM; |
| 574 | } |
| 575 | @@ -399,7 +400,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q, |
| 576 | if (err) |
| 577 | return err; |
| 578 | |
| 579 | - return mt76u_refill_rx(dev, q, e->urb, sg_size); |
| 580 | + return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL); |
| 581 | } |
| 582 | |
| 583 | static void mt76u_urb_free(struct urb *urb) |
| 584 | @@ -407,10 +408,10 @@ static void mt76u_urb_free(struct urb *urb) |
| 585 | int i; |
| 586 | |
| 587 | for (i = 0; i < urb->num_sgs; i++) |
| 588 | - mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false); |
| 589 | + skb_free_frag(sg_virt(&urb->sg[i])); |
| 590 | |
| 591 | if (urb->transfer_buffer) |
| 592 | - mt76_put_page_pool_buf(urb->transfer_buffer, false); |
| 593 | + skb_free_frag(urb->transfer_buffer); |
| 594 | |
| 595 | usb_free_urb(urb); |
| 596 | } |
| 597 | @@ -546,8 +547,6 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb, |
| 598 | len -= data_len; |
| 599 | nsgs++; |
| 600 | } |
| 601 | - |
| 602 | - skb_mark_for_recycle(skb); |
| 603 | dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL); |
| 604 | |
| 605 | return nsgs; |
| 606 | @@ -613,7 +612,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) |
| 607 | |
| 608 | count = mt76u_process_rx_entry(dev, urb, q->buf_size); |
| 609 | if (count > 0) { |
| 610 | - err = mt76u_refill_rx(dev, q, urb, count); |
| 611 | + err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC); |
| 612 | if (err < 0) |
| 613 | break; |
| 614 | } |
| 615 | @@ -664,10 +663,6 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) |
| 616 | struct mt76_queue *q = &dev->q_rx[qid]; |
| 617 | int i, err; |
| 618 | |
| 619 | - err = mt76_create_page_pool(dev, q); |
| 620 | - if (err) |
| 621 | - return err; |
| 622 | - |
| 623 | spin_lock_init(&q->lock); |
| 624 | q->entry = devm_kcalloc(dev->dev, |
| 625 | MT_NUM_RX_ENTRIES, sizeof(*q->entry), |
| 626 | @@ -696,6 +691,7 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue); |
| 627 | static void |
| 628 | mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) |
| 629 | { |
| 630 | + struct page *page; |
| 631 | int i; |
| 632 | |
| 633 | for (i = 0; i < q->ndesc; i++) { |
developer | 483388c | 2023-03-08 13:52:15 +0800 | [diff] [blame] | 634 | @@ -705,8 +701,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 635 | mt76u_urb_free(q->entry[i].urb); |
| 636 | q->entry[i].urb = NULL; |
| 637 | } |
| 638 | - page_pool_destroy(q->page_pool); |
developer | 483388c | 2023-03-08 13:52:15 +0800 | [diff] [blame] | 639 | - q->page_pool = NULL; |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 640 | + |
| 641 | + if (!q->rx_page.va) |
| 642 | + return; |
| 643 | + |
| 644 | + page = virt_to_page(q->rx_page.va); |
| 645 | + __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); |
| 646 | + memset(&q->rx_page, 0, sizeof(q->rx_page)); |
| 647 | } |
| 648 | |
| 649 | static void mt76u_free_rx(struct mt76_dev *dev) |
| 650 | -- |
developer | de9ecce | 2023-05-22 11:17:16 +0800 | [diff] [blame] | 651 | 2.39.2 |
developer | abdbf25 | 2023-02-06 16:02:21 +0800 | [diff] [blame] | 652 | |