developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 1 | From 633bf63e524f39224f9e277e192f53171733430c Mon Sep 17 00:00:00 2001 |
developer | b9a9660 | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 2 | From: Sujuan Chen <sujuan.chen@mediatek.com> |
| 3 | Date: Fri, 6 Jan 2023 18:18:50 +0800 |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 4 | Subject: [PATCH 3005/3013] wifi: mt76: mt7915: wed: add rxwi for further in |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 5 | chip rro |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 6 | |
developer | b9a9660 | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 7 | Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 8 | --- |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 9 | dma.c | 93 +++++++++++++++++++++++++------------------------ |
| 10 | mac80211.c | 2 +- |
| 11 | mt76.h | 24 ++++++++----- |
| 12 | mt7915/dma.c | 2 -- |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 13 | mt7915/mmio.c | 29 +++++++-------- |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 14 | mt7915/mt7915.h | 1 + |
| 15 | tx.c | 16 ++++----- |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 16 | 7 files changed, 87 insertions(+), 80 deletions(-) |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 17 | |
| 18 | diff --git a/dma.c b/dma.c |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 19 | index 550bdaf..b2b17cc 100644 |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 20 | --- a/dma.c |
| 21 | +++ b/dma.c |
| 22 | @@ -59,17 +59,17 @@ mt76_alloc_txwi(struct mt76_dev *dev) |
| 23 | return t; |
| 24 | } |
| 25 | |
| 26 | -static struct mt76_txwi_cache * |
| 27 | +static struct mt76_rxwi_cache * |
| 28 | mt76_alloc_rxwi(struct mt76_dev *dev) |
| 29 | { |
| 30 | - struct mt76_txwi_cache *t; |
| 31 | + struct mt76_rxwi_cache *r; |
| 32 | |
| 33 | - t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC); |
| 34 | - if (!t) |
| 35 | + r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC); |
| 36 | + if (!r) |
| 37 | return NULL; |
| 38 | |
| 39 | - t->ptr = NULL; |
| 40 | - return t; |
| 41 | + r->ptr = NULL; |
| 42 | + return r; |
| 43 | } |
| 44 | |
| 45 | static struct mt76_txwi_cache * |
| 46 | @@ -88,20 +88,20 @@ __mt76_get_txwi(struct mt76_dev *dev) |
| 47 | return t; |
| 48 | } |
| 49 | |
| 50 | -static struct mt76_txwi_cache * |
| 51 | +static struct mt76_rxwi_cache * |
| 52 | __mt76_get_rxwi(struct mt76_dev *dev) |
| 53 | { |
| 54 | - struct mt76_txwi_cache *t = NULL; |
| 55 | + struct mt76_rxwi_cache *r = NULL; |
| 56 | |
| 57 | - spin_lock(&dev->wed_lock); |
| 58 | + spin_lock(&dev->lock); |
| 59 | if (!list_empty(&dev->rxwi_cache)) { |
| 60 | - t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, |
| 61 | + r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache, |
| 62 | list); |
| 63 | - list_del(&t->list); |
| 64 | + list_del(&r->list); |
| 65 | } |
| 66 | - spin_unlock(&dev->wed_lock); |
| 67 | + spin_unlock(&dev->lock); |
| 68 | |
| 69 | - return t; |
| 70 | + return r; |
| 71 | } |
| 72 | |
| 73 | static struct mt76_txwi_cache * |
| 74 | @@ -115,13 +115,13 @@ mt76_get_txwi(struct mt76_dev *dev) |
| 75 | return mt76_alloc_txwi(dev); |
| 76 | } |
| 77 | |
| 78 | -struct mt76_txwi_cache * |
| 79 | +struct mt76_rxwi_cache * |
| 80 | mt76_get_rxwi(struct mt76_dev *dev) |
| 81 | { |
| 82 | - struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); |
| 83 | + struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev); |
| 84 | |
| 85 | - if (t) |
| 86 | - return t; |
| 87 | + if (r) |
| 88 | + return r; |
| 89 | |
| 90 | return mt76_alloc_rxwi(dev); |
| 91 | } |
| 92 | @@ -140,14 +140,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) |
| 93 | EXPORT_SYMBOL_GPL(mt76_put_txwi); |
| 94 | |
| 95 | void |
| 96 | -mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) |
| 97 | +mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r) |
| 98 | { |
| 99 | - if (!t) |
| 100 | + if (!r) |
| 101 | return; |
| 102 | |
| 103 | - spin_lock(&dev->wed_lock); |
| 104 | - list_add(&t->list, &dev->rxwi_cache); |
| 105 | - spin_unlock(&dev->wed_lock); |
| 106 | + spin_lock(&dev->lock); |
| 107 | + list_add(&r->list, &dev->rxwi_cache); |
| 108 | + spin_unlock(&dev->lock); |
| 109 | } |
| 110 | EXPORT_SYMBOL_GPL(mt76_put_rxwi); |
| 111 | |
| 112 | @@ -168,13 +168,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev) |
| 113 | void |
| 114 | mt76_free_pending_rxwi(struct mt76_dev *dev) |
| 115 | { |
| 116 | - struct mt76_txwi_cache *t; |
| 117 | + struct mt76_rxwi_cache *r; |
| 118 | |
| 119 | local_bh_disable(); |
| 120 | - while ((t = __mt76_get_rxwi(dev)) != NULL) { |
| 121 | - if (t->ptr) |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 122 | - skb_free_frag(t->ptr); |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 123 | - kfree(t); |
| 124 | + while ((r = __mt76_get_rxwi(dev)) != NULL) { |
| 125 | + if (r->ptr) |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 126 | + skb_free_frag(r->ptr); |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 127 | + kfree(r); |
| 128 | } |
| 129 | local_bh_enable(); |
| 130 | } |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 131 | @@ -212,7 +212,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
developer | b9a9660 | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 132 | { |
| 133 | struct mt76_desc *desc = &q->desc[q->head]; |
| 134 | struct mt76_queue_entry *entry = &q->entry[q->head]; |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 135 | - struct mt76_txwi_cache *txwi = NULL; |
| 136 | + struct mt76_rxwi_cache *rxwi = NULL; |
| 137 | u32 buf1 = 0, ctrl; |
| 138 | int idx = q->head; |
| 139 | int rx_token; |
| 140 | @@ -220,13 +220,13 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| 141 | ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); |
developer | b9a9660 | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 142 | |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 143 | if (mt76_queue_is_wed_rx(q)) { |
| 144 | - txwi = mt76_get_rxwi(dev); |
| 145 | - if (!txwi) |
| 146 | + rxwi = mt76_get_rxwi(dev); |
| 147 | + if (!rxwi) |
| 148 | return -ENOMEM; |
developer | b9a9660 | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 149 | |
| 150 | - rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr); |
| 151 | + rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr); |
| 152 | if (rx_token < 0) { |
| 153 | - mt76_put_rxwi(dev, txwi); |
| 154 | + mt76_put_rxwi(dev, rxwi); |
| 155 | return -ENOMEM; |
| 156 | } |
| 157 | |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 158 | @@ -241,7 +241,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
developer | b9a9660 | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 159 | |
| 160 | entry->dma_addr[0] = buf->addr; |
| 161 | entry->dma_len[0] = buf->len; |
| 162 | - entry->txwi = txwi; |
| 163 | + entry->rxwi = rxwi; |
| 164 | entry->buf = data; |
| 165 | entry->wcid = 0xffff; |
| 166 | entry->skip_buf1 = true; |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 167 | @@ -254,7 +254,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
developer | b9a9660 | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 168 | static int |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 169 | mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| 170 | struct mt76_queue_buf *buf, int nbufs, u32 info, |
| 171 | - struct sk_buff *skb, void *txwi) |
| 172 | + struct sk_buff *skb, void *txwi, void *rxwi) |
| 173 | { |
| 174 | struct mt76_queue_entry *entry; |
| 175 | struct mt76_desc *desc; |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 176 | @@ -307,6 +307,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 177 | } |
| 178 | |
| 179 | q->entry[idx].txwi = txwi; |
| 180 | + q->entry[idx].rxwi = rxwi; |
| 181 | q->entry[idx].skb = skb; |
| 182 | q->entry[idx].wcid = 0xffff; |
| 183 | |
developer | f8871e8 | 2023-03-08 17:22:32 +0800 | [diff] [blame] | 184 | @@ -405,13 +406,13 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
| 185 | u32 buf1 = le32_to_cpu(desc->buf1); |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 186 | u32 id, find = 0; |
developer | f8871e8 | 2023-03-08 17:22:32 +0800 | [diff] [blame] | 187 | u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1); |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 188 | - struct mt76_txwi_cache *t; |
| 189 | + struct mt76_rxwi_cache *r; |
| 190 | |
| 191 | if (*more) { |
| 192 | spin_lock_bh(&dev->rx_token_lock); |
| 193 | |
| 194 | - idr_for_each_entry(&dev->rx_token, t, id) { |
| 195 | - if (t->dma_addr == le32_to_cpu(desc->buf0)) { |
| 196 | + idr_for_each_entry(&dev->rx_token, r, id) { |
| 197 | + if (r->dma_addr == le32_to_cpu(desc->buf0)) { |
| 198 | find = 1; |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 199 | token = id; |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 200 | |
developer | f8871e8 | 2023-03-08 17:22:32 +0800 | [diff] [blame] | 201 | @@ -428,19 +429,19 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 202 | return NULL; |
| 203 | } |
| 204 | |
| 205 | - t = mt76_rx_token_release(dev, token); |
| 206 | - if (!t) |
| 207 | + r = mt76_rx_token_release(dev, token); |
| 208 | + if (!r) |
| 209 | return NULL; |
| 210 | |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 211 | - dma_unmap_single(dev->dma_dev, t->dma_addr, |
| 212 | + dma_unmap_single(dev->dma_dev, r->dma_addr, |
| 213 | SKB_WITH_OVERHEAD(q->buf_size), |
| 214 | DMA_FROM_DEVICE); |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 215 | |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 216 | - buf = t->ptr; |
| 217 | - t->dma_addr = 0; |
| 218 | - t->ptr = NULL; |
| 219 | + buf = r->ptr; |
| 220 | + r->dma_addr = 0; |
| 221 | + r->ptr = NULL; |
developer | 21e74f6 | 2023-01-30 14:02:51 +0800 | [diff] [blame] | 222 | |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 223 | - mt76_put_rxwi(dev, t); |
| 224 | + mt76_put_rxwi(dev, r); |
developer | 21e74f6 | 2023-01-30 14:02:51 +0800 | [diff] [blame] | 225 | |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 226 | if (drop) { |
| 227 | u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 228 | @@ -504,7 +505,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 229 | buf.len = skb->len; |
| 230 | |
| 231 | spin_lock_bh(&q->lock); |
| 232 | - mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); |
| 233 | + mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL, NULL); |
| 234 | mt76_dma_kick_queue(dev, q); |
| 235 | spin_unlock_bh(&q->lock); |
| 236 | |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 237 | @@ -584,7 +585,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 238 | goto unmap; |
| 239 | |
| 240 | return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, |
| 241 | - tx_info.info, tx_info.skb, t); |
| 242 | + tx_info.info, tx_info.skb, t, NULL); |
| 243 | |
| 244 | unmap: |
| 245 | for (n--; n > 0; n--) |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 246 | diff --git a/mac80211.c b/mac80211.c |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 247 | index 1654cc9..4dc7627 100644 |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 248 | --- a/mac80211.c |
| 249 | +++ b/mac80211.c |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 250 | @@ -602,7 +602,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size, |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 251 | spin_lock_init(&dev->lock); |
| 252 | spin_lock_init(&dev->cc_lock); |
| 253 | spin_lock_init(&dev->status_lock); |
| 254 | - spin_lock_init(&dev->wed_lock); |
| 255 | mutex_init(&dev->mutex); |
| 256 | init_waitqueue_head(&dev->tx_wait); |
| 257 | |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 258 | @@ -633,6 +632,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size, |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 259 | INIT_LIST_HEAD(&dev->txwi_cache); |
| 260 | INIT_LIST_HEAD(&dev->rxwi_cache); |
| 261 | dev->token_size = dev->drv->token_size; |
| 262 | + dev->rx_token_size = dev->drv->rx_token_size; |
| 263 | |
| 264 | for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) |
| 265 | skb_queue_head_init(&dev->rx_skb[i]); |
| 266 | diff --git a/mt76.h b/mt76.h |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 267 | index 0ee2291..a215d2f 100644 |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 268 | --- a/mt76.h |
| 269 | +++ b/mt76.h |
developer | f8871e8 | 2023-03-08 17:22:32 +0800 | [diff] [blame] | 270 | @@ -165,6 +165,7 @@ struct mt76_queue_entry { |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 271 | }; |
| 272 | union { |
| 273 | struct mt76_txwi_cache *txwi; |
| 274 | + struct mt76_rxwi_cache *rxwi; |
| 275 | struct urb *urb; |
| 276 | int buf_sz; |
| 277 | }; |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 278 | @@ -360,10 +361,15 @@ struct mt76_txwi_cache { |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 279 | struct list_head list; |
| 280 | dma_addr_t dma_addr; |
| 281 | |
| 282 | - union { |
| 283 | - struct sk_buff *skb; |
| 284 | - void *ptr; |
| 285 | - }; |
| 286 | + struct sk_buff *skb; |
| 287 | +}; |
| 288 | + |
| 289 | +struct mt76_rxwi_cache { |
| 290 | + struct list_head list; |
| 291 | + dma_addr_t dma_addr; |
| 292 | + |
| 293 | + void *ptr; |
| 294 | + u32 token; |
| 295 | }; |
| 296 | |
| 297 | struct mt76_rx_tid { |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 298 | @@ -449,6 +455,7 @@ struct mt76_driver_ops { |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 299 | u16 txwi_size; |
| 300 | u16 token_size; |
| 301 | u8 mcs_rates; |
| 302 | + u16 rx_token_size; |
| 303 | |
| 304 | void (*update_survey)(struct mt76_phy *phy); |
| 305 | |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 306 | @@ -819,7 +826,6 @@ struct mt76_dev { |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 307 | |
| 308 | struct ieee80211_hw *hw; |
| 309 | |
| 310 | - spinlock_t wed_lock; |
| 311 | spinlock_t lock; |
| 312 | spinlock_t cc_lock; |
| 313 | |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 314 | @@ -1410,8 +1416,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 315 | } |
| 316 | |
| 317 | void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); |
| 318 | -void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); |
| 319 | -struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev); |
| 320 | +void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r); |
| 321 | +struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev); |
| 322 | void mt76_free_pending_rxwi(struct mt76_dev *dev); |
| 323 | void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, |
| 324 | struct napi_struct *napi); |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 325 | @@ -1563,9 +1569,9 @@ struct mt76_txwi_cache * |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 326 | mt76_token_release(struct mt76_dev *dev, int token, bool *wake); |
| 327 | int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi); |
| 328 | void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked); |
| 329 | -struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); |
| 330 | +struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); |
| 331 | int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, |
| 332 | - struct mt76_txwi_cache *r, dma_addr_t phys); |
| 333 | + struct mt76_rxwi_cache *r, dma_addr_t phys); |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 334 | |
| 335 | static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 336 | { |
| 337 | diff --git a/mt7915/dma.c b/mt7915/dma.c |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 338 | index 3b8a2ab..7a9ced4 100644 |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 339 | --- a/mt7915/dma.c |
| 340 | +++ b/mt7915/dma.c |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 341 | @@ -509,7 +509,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 342 | mtk_wed_get_rx_capa(&mdev->mmio.wed)) { |
| 343 | dev->mt76.q_rx[MT_RXQ_MAIN].flags = |
| 344 | MT_WED_Q_RX(MT7915_RXQ_BAND0); |
| 345 | - dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; |
| 346 | } |
| 347 | |
| 348 | ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 349 | @@ -546,7 +545,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 350 | mtk_wed_get_rx_capa(&mdev->mmio.wed)) { |
| 351 | dev->mt76.q_rx[MT_RXQ_BAND1].flags = |
| 352 | MT_WED_Q_RX(MT7915_RXQ_BAND1); |
| 353 | - dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; |
| 354 | } |
| 355 | |
| 356 | /* rx data queue for band1 */ |
| 357 | diff --git a/mt7915/mmio.c b/mt7915/mmio.c |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 358 | index 0e79faf..fc9aadb 100644 |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 359 | --- a/mt7915/mmio.c |
| 360 | +++ b/mt7915/mmio.c |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 361 | @@ -610,18 +610,18 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) |
| 362 | sizeof(struct skb_shared_info)); |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 363 | |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 364 | for (i = 0; i < dev->mt76.rx_token_size; i++) { |
| 365 | - struct mt76_txwi_cache *t; |
| 366 | + struct mt76_rxwi_cache *r; |
| 367 | |
| 368 | - t = mt76_rx_token_release(&dev->mt76, i); |
| 369 | - if (!t || !t->ptr) |
| 370 | + r = mt76_rx_token_release(&dev->mt76, i); |
| 371 | + if (!r || !r->ptr) |
| 372 | continue; |
| 373 | |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 374 | - dma_unmap_single(dev->mt76.dma_dev, t->dma_addr, |
| 375 | + dma_unmap_single(dev->mt76.dma_dev, r->dma_addr, |
| 376 | wed->wlan.rx_size, DMA_FROM_DEVICE); |
| 377 | - __free_pages(virt_to_page(t->ptr), get_order(length)); |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 378 | - t->ptr = NULL; |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 379 | + __free_pages(virt_to_page(r->ptr), get_order(length)); |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 380 | + r->ptr = NULL; |
| 381 | |
| 382 | - mt76_put_rxwi(&dev->mt76, t); |
| 383 | + mt76_put_rxwi(&dev->mt76, r); |
| 384 | } |
| 385 | |
| 386 | mt76_free_pending_rxwi(&dev->mt76); |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 387 | @@ -639,18 +639,18 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| 388 | sizeof(struct skb_shared_info)); |
| 389 | |
| 390 | for (i = 0; i < size; i++) { |
| 391 | - struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76); |
| 392 | + struct mt76_rxwi_cache *r = mt76_get_rxwi(&dev->mt76); |
| 393 | dma_addr_t phy_addr; |
| 394 | struct page *page; |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 395 | int token; |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 396 | void *ptr; |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 397 | |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 398 | - if (!t) |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 399 | + if (!r) |
| 400 | goto unmap; |
| 401 | |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 402 | page = __dev_alloc_pages(GFP_KERNEL, get_order(length)); |
| 403 | if (!page) { |
| 404 | - mt76_put_rxwi(&dev->mt76, t); |
| 405 | + mt76_put_rxwi(&dev->mt76, r); |
| 406 | goto unmap; |
| 407 | } |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 408 | |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 409 | @@ -660,17 +660,17 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| 410 | DMA_TO_DEVICE); |
| 411 | if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) { |
| 412 | __free_pages(page, get_order(length)); |
| 413 | - mt76_put_rxwi(&dev->mt76, t); |
| 414 | + mt76_put_rxwi(&dev->mt76, r); |
| 415 | goto unmap; |
| 416 | } |
| 417 | |
| 418 | desc->buf0 = cpu_to_le32(phy_addr); |
| 419 | - token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr); |
| 420 | + token = mt76_rx_token_consume(&dev->mt76, ptr, r, phy_addr); |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 421 | if (token < 0) { |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 422 | dma_unmap_single(dev->mt76.dma_dev, phy_addr, |
| 423 | wed->wlan.rx_size, DMA_TO_DEVICE); |
| 424 | __free_pages(page, get_order(length)); |
| 425 | - mt76_put_rxwi(&dev->mt76, t); |
| 426 | + mt76_put_rxwi(&dev->mt76, r); |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 427 | goto unmap; |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 428 | } |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 429 | |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 430 | @@ -831,7 +831,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, |
developer | f3f5d9b | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 431 | wed->wlan.reset = mt7915_mmio_wed_reset; |
| 432 | wed->wlan.reset_complete = mt7915_mmio_wed_reset_complete; |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 433 | |
| 434 | - dev->mt76.rx_token_size = wed->wlan.rx_npkt; |
| 435 | + dev->mt76.rx_token_size += wed->wlan.rx_npkt; |
| 436 | |
| 437 | if (mtk_wed_device_attach(wed)) |
| 438 | return 0; |
developer | 78848c6 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 439 | @@ -1037,6 +1037,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev, |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 440 | SURVEY_INFO_TIME_RX | |
| 441 | SURVEY_INFO_TIME_BSS_RX, |
| 442 | .token_size = MT7915_TOKEN_SIZE, |
| 443 | + .rx_token_size = MT7915_RX_TOKEN_SIZE, |
| 444 | .tx_prepare_skb = mt7915_tx_prepare_skb, |
| 445 | .tx_complete_skb = mt76_connac_tx_complete_skb, |
| 446 | .rx_skb = mt7915_queue_rx_skb, |
| 447 | diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 448 | index 0ec94aa..81970ef 100644 |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 449 | --- a/mt7915/mt7915.h |
| 450 | +++ b/mt7915/mt7915.h |
developer | 2aa1e64 | 2022-12-19 11:33:22 +0800 | [diff] [blame] | 451 | @@ -57,6 +57,7 @@ |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 452 | #define MT7915_EEPROM_BLOCK_SIZE 16 |
developer | 81e9eb7 | 2023-03-14 14:09:34 +0800 | [diff] [blame] | 453 | #define MT7915_HW_TOKEN_SIZE 7168 |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 454 | #define MT7915_TOKEN_SIZE 8192 |
| 455 | +#define MT7915_RX_TOKEN_SIZE 4096 |
| 456 | |
| 457 | #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ |
| 458 | #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ |
| 459 | diff --git a/tx.c b/tx.c |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 460 | index 6d55566..a72b777 100644 |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 461 | --- a/tx.c |
| 462 | +++ b/tx.c |
| 463 | @@ -756,16 +756,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) |
| 464 | EXPORT_SYMBOL_GPL(mt76_token_consume); |
| 465 | |
| 466 | int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, |
| 467 | - struct mt76_txwi_cache *t, dma_addr_t phys) |
| 468 | + struct mt76_rxwi_cache *r, dma_addr_t phys) |
| 469 | { |
| 470 | int token; |
| 471 | |
| 472 | spin_lock_bh(&dev->rx_token_lock); |
| 473 | - token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size, |
| 474 | + token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size, |
| 475 | GFP_ATOMIC); |
| 476 | if (token >= 0) { |
| 477 | - t->ptr = ptr; |
| 478 | - t->dma_addr = phys; |
| 479 | + r->ptr = ptr; |
| 480 | + r->dma_addr = phys; |
| 481 | } |
| 482 | spin_unlock_bh(&dev->rx_token_lock); |
| 483 | |
| 484 | @@ -802,15 +802,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake) |
| 485 | } |
| 486 | EXPORT_SYMBOL_GPL(mt76_token_release); |
| 487 | |
| 488 | -struct mt76_txwi_cache * |
| 489 | +struct mt76_rxwi_cache * |
| 490 | mt76_rx_token_release(struct mt76_dev *dev, int token) |
| 491 | { |
| 492 | - struct mt76_txwi_cache *t; |
| 493 | + struct mt76_rxwi_cache *r; |
| 494 | |
| 495 | spin_lock_bh(&dev->rx_token_lock); |
| 496 | - t = idr_remove(&dev->rx_token, token); |
| 497 | + r = idr_remove(&dev->rx_token, token); |
| 498 | spin_unlock_bh(&dev->rx_token_lock); |
| 499 | |
| 500 | - return t; |
| 501 | + return r; |
| 502 | } |
| 503 | EXPORT_SYMBOL_GPL(mt76_rx_token_release); |
| 504 | -- |
developer | a43cc48 | 2023-04-17 15:57:28 +0800 | [diff] [blame^] | 505 | 2.18.0 |
developer | 780b915 | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 506 | |