developer | a20cdc2 | 2024-05-31 18:57:31 +0800 | [diff] [blame] | 1 | From 4faced46403673e8089b9c4cc89d55f7d9fd5e6d Mon Sep 17 00:00:00 2001 |
developer | 13655da | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 2 | From: Sujuan Chen <sujuan.chen@mediatek.com> |
| 3 | Date: Fri, 6 Jan 2023 18:18:50 +0800 |
developer | a20cdc2 | 2024-05-31 18:57:31 +0800 | [diff] [blame] | 4 | Subject: [PATCH 2005/2015] wifi: mt76: mt7915: wed: add rxwi for further in |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 5 | chip rro |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 6 | |
developer | 13655da | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 7 | Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 8 | --- |
developer | 60a3d66 | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 9 | dma.c | 93 +++++++++++++++++++++++++------------------------ |
| 10 | mac80211.c | 2 +- |
| 11 | mt76.h | 24 ++++++++----- |
| 12 | mt7915/dma.c | 2 -- |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 13 | mt7915/mmio.c | 3 +- |
developer | 60a3d66 | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 14 | mt7915/mt7915.h | 1 + |
| 15 | tx.c | 16 ++++----- |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 16 | wed.c | 26 +++++++------- |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 17 | 8 files changed, 87 insertions(+), 80 deletions(-) |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 18 | |
| 19 | diff --git a/dma.c b/dma.c |
developer | dc9eeae | 2024-04-08 14:36:46 +0800 | [diff] [blame] | 20 | index 185c6f1..9cd97d2 100644 |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 21 | --- a/dma.c |
| 22 | +++ b/dma.c |
developer | bd9fa1e | 2023-10-16 11:04:00 +0800 | [diff] [blame] | 23 | @@ -64,17 +64,17 @@ mt76_alloc_txwi(struct mt76_dev *dev) |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 24 | return t; |
| 25 | } |
| 26 | |
| 27 | -static struct mt76_txwi_cache * |
| 28 | +static struct mt76_rxwi_cache * |
| 29 | mt76_alloc_rxwi(struct mt76_dev *dev) |
| 30 | { |
| 31 | - struct mt76_txwi_cache *t; |
| 32 | + struct mt76_rxwi_cache *r; |
| 33 | |
| 34 | - t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC); |
| 35 | - if (!t) |
| 36 | + r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC); |
| 37 | + if (!r) |
| 38 | return NULL; |
| 39 | |
| 40 | - t->ptr = NULL; |
| 41 | - return t; |
| 42 | + r->ptr = NULL; |
| 43 | + return r; |
| 44 | } |
| 45 | |
| 46 | static struct mt76_txwi_cache * |
developer | bd9fa1e | 2023-10-16 11:04:00 +0800 | [diff] [blame] | 47 | @@ -93,20 +93,20 @@ __mt76_get_txwi(struct mt76_dev *dev) |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 48 | return t; |
| 49 | } |
| 50 | |
| 51 | -static struct mt76_txwi_cache * |
| 52 | +static struct mt76_rxwi_cache * |
| 53 | __mt76_get_rxwi(struct mt76_dev *dev) |
| 54 | { |
| 55 | - struct mt76_txwi_cache *t = NULL; |
| 56 | + struct mt76_rxwi_cache *r = NULL; |
| 57 | |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 58 | - spin_lock_bh(&dev->wed_lock); |
| 59 | + spin_lock_bh(&dev->lock); |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 60 | if (!list_empty(&dev->rxwi_cache)) { |
| 61 | - t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, |
| 62 | + r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache, |
| 63 | list); |
| 64 | - list_del(&t->list); |
| 65 | + list_del(&r->list); |
| 66 | } |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 67 | - spin_unlock_bh(&dev->wed_lock); |
| 68 | + spin_unlock_bh(&dev->lock); |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 69 | |
| 70 | - return t; |
| 71 | + return r; |
| 72 | } |
| 73 | |
| 74 | static struct mt76_txwi_cache * |
developer | bd9fa1e | 2023-10-16 11:04:00 +0800 | [diff] [blame] | 75 | @@ -120,13 +120,13 @@ mt76_get_txwi(struct mt76_dev *dev) |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 76 | return mt76_alloc_txwi(dev); |
| 77 | } |
| 78 | |
| 79 | -struct mt76_txwi_cache * |
| 80 | +struct mt76_rxwi_cache * |
| 81 | mt76_get_rxwi(struct mt76_dev *dev) |
| 82 | { |
| 83 | - struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); |
| 84 | + struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev); |
| 85 | |
| 86 | - if (t) |
| 87 | - return t; |
| 88 | + if (r) |
| 89 | + return r; |
| 90 | |
| 91 | return mt76_alloc_rxwi(dev); |
| 92 | } |
developer | bd9fa1e | 2023-10-16 11:04:00 +0800 | [diff] [blame] | 93 | @@ -145,14 +145,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 94 | EXPORT_SYMBOL_GPL(mt76_put_txwi); |
| 95 | |
| 96 | void |
| 97 | -mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) |
| 98 | +mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r) |
| 99 | { |
| 100 | - if (!t) |
| 101 | + if (!r) |
| 102 | return; |
| 103 | |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 104 | - spin_lock_bh(&dev->wed_lock); |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 105 | - list_add(&t->list, &dev->rxwi_cache); |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 106 | - spin_unlock_bh(&dev->wed_lock); |
| 107 | + spin_lock_bh(&dev->lock); |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 108 | + list_add(&r->list, &dev->rxwi_cache); |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 109 | + spin_unlock_bh(&dev->lock); |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 110 | } |
| 111 | EXPORT_SYMBOL_GPL(mt76_put_rxwi); |
| 112 | |
developer | bd9fa1e | 2023-10-16 11:04:00 +0800 | [diff] [blame] | 113 | @@ -173,13 +173,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev) |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 114 | void |
| 115 | mt76_free_pending_rxwi(struct mt76_dev *dev) |
| 116 | { |
| 117 | - struct mt76_txwi_cache *t; |
| 118 | + struct mt76_rxwi_cache *r; |
| 119 | |
| 120 | local_bh_disable(); |
| 121 | - while ((t = __mt76_get_rxwi(dev)) != NULL) { |
| 122 | - if (t->ptr) |
developer | da18a74 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 123 | - skb_free_frag(t->ptr); |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 124 | - kfree(t); |
| 125 | + while ((r = __mt76_get_rxwi(dev)) != NULL) { |
| 126 | + if (r->ptr) |
developer | da18a74 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 127 | + skb_free_frag(r->ptr); |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 128 | + kfree(r); |
| 129 | } |
| 130 | local_bh_enable(); |
| 131 | } |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 132 | @@ -228,7 +228,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 133 | struct mt76_queue_buf *buf, void *data) |
developer | 13655da | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 134 | { |
developer | 13655da | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 135 | struct mt76_queue_entry *entry = &q->entry[q->head]; |
developer | 60a3d66 | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 136 | - struct mt76_txwi_cache *txwi = NULL; |
| 137 | + struct mt76_rxwi_cache *rxwi = NULL; |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 138 | struct mt76_desc *desc; |
developer | 60a3d66 | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 139 | int idx = q->head; |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 140 | u32 buf1 = 0, ctrl; |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 141 | @@ -249,13 +249,13 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 142 | #endif |
developer | 13655da | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 143 | |
developer | 60a3d66 | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 144 | if (mt76_queue_is_wed_rx(q)) { |
| 145 | - txwi = mt76_get_rxwi(dev); |
| 146 | - if (!txwi) |
| 147 | + rxwi = mt76_get_rxwi(dev); |
| 148 | + if (!rxwi) |
| 149 | return -ENOMEM; |
developer | 13655da | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 150 | |
| 151 | - rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr); |
| 152 | + rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr); |
| 153 | if (rx_token < 0) { |
| 154 | - mt76_put_rxwi(dev, txwi); |
| 155 | + mt76_put_rxwi(dev, rxwi); |
| 156 | return -ENOMEM; |
| 157 | } |
| 158 | |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 159 | @@ -271,7 +271,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 160 | done: |
developer | 13655da | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 161 | entry->dma_addr[0] = buf->addr; |
| 162 | entry->dma_len[0] = buf->len; |
| 163 | - entry->txwi = txwi; |
| 164 | + entry->rxwi = rxwi; |
| 165 | entry->buf = data; |
| 166 | entry->wcid = 0xffff; |
| 167 | entry->skip_buf1 = true; |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 168 | @@ -284,7 +284,7 @@ done: |
developer | 13655da | 2023-01-10 19:53:25 +0800 | [diff] [blame] | 169 | static int |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 170 | mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| 171 | struct mt76_queue_buf *buf, int nbufs, u32 info, |
| 172 | - struct sk_buff *skb, void *txwi) |
| 173 | + struct sk_buff *skb, void *txwi, void *rxwi) |
| 174 | { |
| 175 | struct mt76_queue_entry *entry; |
| 176 | struct mt76_desc *desc; |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 177 | @@ -344,6 +344,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | q->entry[idx].txwi = txwi; |
| 181 | + q->entry[idx].rxwi = rxwi; |
| 182 | q->entry[idx].skb = skb; |
| 183 | q->entry[idx].wcid = 0xffff; |
| 184 | |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 185 | @@ -446,13 +447,13 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 186 | if (mt76_queue_is_wed_rx(q)) { |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 187 | u32 id, find = 0; |
developer | bb6ddff | 2023-03-08 17:22:32 +0800 | [diff] [blame] | 188 | u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1); |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 189 | - struct mt76_txwi_cache *t; |
| 190 | + struct mt76_rxwi_cache *r; |
| 191 | |
| 192 | if (*more) { |
| 193 | spin_lock_bh(&dev->rx_token_lock); |
| 194 | |
| 195 | - idr_for_each_entry(&dev->rx_token, t, id) { |
| 196 | - if (t->dma_addr == le32_to_cpu(desc->buf0)) { |
| 197 | + idr_for_each_entry(&dev->rx_token, r, id) { |
| 198 | + if (r->dma_addr == le32_to_cpu(desc->buf0)) { |
| 199 | find = 1; |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 200 | token = id; |
developer | 60a3d66 | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 201 | |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 202 | @@ -469,19 +470,19 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 203 | return NULL; |
| 204 | } |
| 205 | |
| 206 | - t = mt76_rx_token_release(dev, token); |
| 207 | - if (!t) |
| 208 | + r = mt76_rx_token_release(dev, token); |
| 209 | + if (!r) |
| 210 | return NULL; |
| 211 | |
developer | da18a74 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 212 | - dma_unmap_single(dev->dma_dev, t->dma_addr, |
| 213 | + dma_unmap_single(dev->dma_dev, r->dma_addr, |
| 214 | SKB_WITH_OVERHEAD(q->buf_size), |
| 215 | DMA_FROM_DEVICE); |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 216 | |
developer | 60a3d66 | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 217 | - buf = t->ptr; |
| 218 | - t->dma_addr = 0; |
| 219 | - t->ptr = NULL; |
| 220 | + buf = r->ptr; |
| 221 | + r->dma_addr = 0; |
| 222 | + r->ptr = NULL; |
developer | 765f189 | 2023-01-30 14:02:51 +0800 | [diff] [blame] | 223 | |
developer | 60a3d66 | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 224 | - mt76_put_rxwi(dev, t); |
| 225 | + mt76_put_rxwi(dev, r); |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 226 | if (drop) |
| 227 | *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP); |
| 228 | } else { |
developer | a46f613 | 2024-03-26 14:09:54 +0800 | [diff] [blame] | 229 | @@ -547,7 +548,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 230 | buf.len = skb->len; |
| 231 | |
| 232 | spin_lock_bh(&q->lock); |
| 233 | - mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); |
| 234 | + mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL, NULL); |
| 235 | mt76_dma_kick_queue(dev, q); |
| 236 | spin_unlock_bh(&q->lock); |
| 237 | |
developer | a46f613 | 2024-03-26 14:09:54 +0800 | [diff] [blame] | 238 | @@ -628,7 +629,7 @@ mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 239 | goto unmap; |
| 240 | |
| 241 | return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, |
| 242 | - tx_info.info, tx_info.skb, t); |
| 243 | + tx_info.info, tx_info.skb, t, NULL); |
| 244 | |
| 245 | unmap: |
| 246 | for (n--; n > 0; n--) |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 247 | diff --git a/mac80211.c b/mac80211.c |
developer | dc9eeae | 2024-04-08 14:36:46 +0800 | [diff] [blame] | 248 | index f9dfdf8..225b290 100644 |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 249 | --- a/mac80211.c |
| 250 | +++ b/mac80211.c |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 251 | @@ -618,7 +618,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size, |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 252 | spin_lock_init(&dev->lock); |
| 253 | spin_lock_init(&dev->cc_lock); |
| 254 | spin_lock_init(&dev->status_lock); |
| 255 | - spin_lock_init(&dev->wed_lock); |
| 256 | mutex_init(&dev->mutex); |
| 257 | init_waitqueue_head(&dev->tx_wait); |
| 258 | |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 259 | @@ -651,6 +650,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size, |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 260 | INIT_LIST_HEAD(&dev->txwi_cache); |
| 261 | INIT_LIST_HEAD(&dev->rxwi_cache); |
| 262 | dev->token_size = dev->drv->token_size; |
| 263 | + dev->rx_token_size = dev->drv->rx_token_size; |
| 264 | |
| 265 | for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) |
| 266 | skb_queue_head_init(&dev->rx_skb[i]); |
| 267 | diff --git a/mt76.h b/mt76.h |
developer | a20cdc2 | 2024-05-31 18:57:31 +0800 | [diff] [blame] | 268 | index 6168758..5e71267 100644 |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 269 | --- a/mt76.h |
| 270 | +++ b/mt76.h |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 271 | @@ -193,6 +193,7 @@ struct mt76_queue_entry { |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 272 | }; |
| 273 | union { |
| 274 | struct mt76_txwi_cache *txwi; |
| 275 | + struct mt76_rxwi_cache *rxwi; |
| 276 | struct urb *urb; |
| 277 | int buf_sz; |
| 278 | }; |
developer | a46f613 | 2024-03-26 14:09:54 +0800 | [diff] [blame] | 279 | @@ -413,10 +414,15 @@ struct mt76_txwi_cache { |
| 280 | u8 phy_idx; |
developer | bddc9db | 2023-09-11 13:34:36 +0800 | [diff] [blame] | 281 | unsigned long jiffies; |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 282 | |
| 283 | - union { |
| 284 | - struct sk_buff *skb; |
| 285 | - void *ptr; |
| 286 | - }; |
| 287 | + struct sk_buff *skb; |
| 288 | +}; |
| 289 | + |
| 290 | +struct mt76_rxwi_cache { |
| 291 | + struct list_head list; |
| 292 | + dma_addr_t dma_addr; |
| 293 | + |
| 294 | + void *ptr; |
| 295 | + u32 token; |
| 296 | }; |
| 297 | |
| 298 | struct mt76_rx_tid { |
developer | a46f613 | 2024-03-26 14:09:54 +0800 | [diff] [blame] | 299 | @@ -511,6 +517,7 @@ struct mt76_driver_ops { |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 300 | u16 txwi_size; |
| 301 | u16 token_size; |
| 302 | u8 mcs_rates; |
| 303 | + u16 rx_token_size; |
| 304 | |
| 305 | void (*update_survey)(struct mt76_phy *phy); |
| 306 | |
developer | a46f613 | 2024-03-26 14:09:54 +0800 | [diff] [blame] | 307 | @@ -903,7 +910,6 @@ struct mt76_dev { |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 308 | |
| 309 | struct ieee80211_hw *hw; |
| 310 | |
| 311 | - spinlock_t wed_lock; |
| 312 | spinlock_t lock; |
| 313 | spinlock_t cc_lock; |
| 314 | |
developer | a20cdc2 | 2024-05-31 18:57:31 +0800 | [diff] [blame] | 315 | @@ -1639,8 +1645,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 316 | } |
| 317 | |
| 318 | void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); |
| 319 | -void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); |
| 320 | -struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev); |
| 321 | +void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r); |
| 322 | +struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev); |
| 323 | void mt76_free_pending_rxwi(struct mt76_dev *dev); |
| 324 | void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, |
| 325 | struct napi_struct *napi); |
developer | a20cdc2 | 2024-05-31 18:57:31 +0800 | [diff] [blame] | 326 | @@ -1819,9 +1825,9 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake); |
developer | a46f613 | 2024-03-26 14:09:54 +0800 | [diff] [blame] | 327 | int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi, |
| 328 | u8 phy_idx); |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 329 | void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked); |
| 330 | -struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); |
| 331 | +struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); |
| 332 | int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, |
| 333 | - struct mt76_txwi_cache *r, dma_addr_t phys); |
| 334 | + struct mt76_rxwi_cache *r, dma_addr_t phys); |
developer | da18a74 | 2023-04-06 13:44:00 +0800 | [diff] [blame] | 335 | |
| 336 | static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 337 | { |
| 338 | diff --git a/mt7915/dma.c b/mt7915/dma.c |
developer | dc9eeae | 2024-04-08 14:36:46 +0800 | [diff] [blame] | 339 | index 0baa82c..552410a 100644 |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 340 | --- a/mt7915/dma.c |
| 341 | +++ b/mt7915/dma.c |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 342 | @@ -512,7 +512,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 343 | mtk_wed_get_rx_capa(&mdev->mmio.wed)) { |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 344 | mdev->q_rx[MT_RXQ_MAIN].flags = |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 345 | MT_WED_Q_RX(MT7915_RXQ_BAND0); |
| 346 | - dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 347 | mdev->q_rx[MT_RXQ_MAIN].wed = &mdev->mmio.wed; |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 348 | } |
| 349 | |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 350 | @@ -551,7 +550,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 351 | mtk_wed_get_rx_capa(&mdev->mmio.wed)) { |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 352 | mdev->q_rx[MT_RXQ_BAND1].flags = |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 353 | MT_WED_Q_RX(MT7915_RXQ_BAND1); |
| 354 | - dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; |
developer | 1a17367 | 2023-12-21 14:49:33 +0800 | [diff] [blame] | 355 | mdev->q_rx[MT_RXQ_BAND1].wed = &mdev->mmio.wed; |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 356 | } |
| 357 | |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 358 | diff --git a/mt7915/mmio.c b/mt7915/mmio.c |
developer | dc9eeae | 2024-04-08 14:36:46 +0800 | [diff] [blame] | 359 | index 91100f1..3391a94 100644 |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 360 | --- a/mt7915/mmio.c |
| 361 | +++ b/mt7915/mmio.c |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 362 | @@ -725,7 +725,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, |
developer | 60a3d66 | 2023-02-07 15:24:34 +0800 | [diff] [blame] | 363 | wed->wlan.reset = mt7915_mmio_wed_reset; |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 364 | wed->wlan.reset_complete = mt76_wed_reset_complete; |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 365 | |
| 366 | - dev->mt76.rx_token_size = wed->wlan.rx_npkt; |
| 367 | + dev->mt76.rx_token_size += wed->wlan.rx_npkt; |
| 368 | |
| 369 | if (mtk_wed_device_attach(wed)) |
| 370 | return 0; |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 371 | @@ -933,6 +933,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev, |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 372 | SURVEY_INFO_TIME_RX | |
| 373 | SURVEY_INFO_TIME_BSS_RX, |
| 374 | .token_size = MT7915_TOKEN_SIZE, |
| 375 | + .rx_token_size = MT7915_RX_TOKEN_SIZE, |
| 376 | .tx_prepare_skb = mt7915_tx_prepare_skb, |
| 377 | .tx_complete_skb = mt76_connac_tx_complete_skb, |
| 378 | .rx_skb = mt7915_queue_rx_skb, |
| 379 | diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h |
developer | a20cdc2 | 2024-05-31 18:57:31 +0800 | [diff] [blame] | 380 | index 1d0cfa1..f5a7e1e 100644 |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 381 | --- a/mt7915/mt7915.h |
| 382 | +++ b/mt7915/mt7915.h |
developer | 7af0f76 | 2023-05-22 15:16:16 +0800 | [diff] [blame] | 383 | @@ -64,6 +64,7 @@ |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 384 | #define MT7915_EEPROM_BLOCK_SIZE 16 |
developer | 7a520b5 | 2023-03-14 14:09:34 +0800 | [diff] [blame] | 385 | #define MT7915_HW_TOKEN_SIZE 7168 |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 386 | #define MT7915_TOKEN_SIZE 8192 |
| 387 | +#define MT7915_RX_TOKEN_SIZE 4096 |
| 388 | |
| 389 | #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ |
| 390 | #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ |
| 391 | diff --git a/tx.c b/tx.c |
developer | dc9eeae | 2024-04-08 14:36:46 +0800 | [diff] [blame] | 392 | index db0d4df..92afbf5 100644 |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 393 | --- a/tx.c |
| 394 | +++ b/tx.c |
developer | a46f613 | 2024-03-26 14:09:54 +0800 | [diff] [blame] | 395 | @@ -864,16 +864,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi, |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 396 | EXPORT_SYMBOL_GPL(mt76_token_consume); |
| 397 | |
| 398 | int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, |
| 399 | - struct mt76_txwi_cache *t, dma_addr_t phys) |
| 400 | + struct mt76_rxwi_cache *r, dma_addr_t phys) |
| 401 | { |
| 402 | int token; |
| 403 | |
| 404 | spin_lock_bh(&dev->rx_token_lock); |
| 405 | - token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size, |
| 406 | + token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size, |
| 407 | GFP_ATOMIC); |
| 408 | if (token >= 0) { |
| 409 | - t->ptr = ptr; |
| 410 | - t->dma_addr = phys; |
| 411 | + r->ptr = ptr; |
| 412 | + r->dma_addr = phys; |
| 413 | } |
| 414 | spin_unlock_bh(&dev->rx_token_lock); |
| 415 | |
developer | a46f613 | 2024-03-26 14:09:54 +0800 | [diff] [blame] | 416 | @@ -912,15 +912,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake) |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 417 | } |
| 418 | EXPORT_SYMBOL_GPL(mt76_token_release); |
| 419 | |
| 420 | -struct mt76_txwi_cache * |
| 421 | +struct mt76_rxwi_cache * |
| 422 | mt76_rx_token_release(struct mt76_dev *dev, int token) |
| 423 | { |
| 424 | - struct mt76_txwi_cache *t; |
| 425 | + struct mt76_rxwi_cache *r; |
| 426 | |
| 427 | spin_lock_bh(&dev->rx_token_lock); |
| 428 | - t = idr_remove(&dev->rx_token, token); |
| 429 | + r = idr_remove(&dev->rx_token, token); |
| 430 | spin_unlock_bh(&dev->rx_token_lock); |
| 431 | |
| 432 | - return t; |
| 433 | + return r; |
| 434 | } |
| 435 | EXPORT_SYMBOL_GPL(mt76_rx_token_release); |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 436 | diff --git a/wed.c b/wed.c |
developer | dc9eeae | 2024-04-08 14:36:46 +0800 | [diff] [blame] | 437 | index 47c81a2..c03b52f 100644 |
developer | 753619c | 2024-02-22 13:42:45 +0800 | [diff] [blame] | 438 | --- a/wed.c |
| 439 | +++ b/wed.c |
| 440 | @@ -16,18 +16,18 @@ void mt76_wed_release_rx_buf(struct mtk_wed_device *wed) |
| 441 | sizeof(struct skb_shared_info)); |
| 442 | |
| 443 | for (i = 0; i < dev->rx_token_size; i++) { |
| 444 | - struct mt76_txwi_cache *t; |
| 445 | + struct mt76_rxwi_cache *r; |
| 446 | |
| 447 | - t = mt76_rx_token_release(dev, i); |
| 448 | - if (!t || !t->ptr) |
| 449 | + r = mt76_rx_token_release(dev, i); |
| 450 | + if (!r || !r->ptr) |
| 451 | continue; |
| 452 | |
| 453 | - dma_unmap_single(dev->dma_dev, t->dma_addr, |
| 454 | + dma_unmap_single(dev->dma_dev, r->dma_addr, |
| 455 | wed->wlan.rx_size, DMA_FROM_DEVICE); |
| 456 | - __free_pages(virt_to_page(t->ptr), get_order(length)); |
| 457 | - t->ptr = NULL; |
| 458 | + __free_pages(virt_to_page(r->ptr), get_order(length)); |
| 459 | + r->ptr = NULL; |
| 460 | |
| 461 | - mt76_put_rxwi(dev, t); |
| 462 | + mt76_put_rxwi(dev, r); |
| 463 | } |
| 464 | |
| 465 | mt76_free_pending_rxwi(dev); |
| 466 | @@ -46,18 +46,18 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| 467 | sizeof(struct skb_shared_info)); |
| 468 | |
| 469 | for (i = 0; i < size; i++) { |
| 470 | - struct mt76_txwi_cache *t = mt76_get_rxwi(dev); |
| 471 | + struct mt76_rxwi_cache *r = mt76_get_rxwi(dev); |
| 472 | dma_addr_t phy_addr; |
| 473 | struct page *page; |
| 474 | int token; |
| 475 | void *ptr; |
| 476 | |
| 477 | - if (!t) |
| 478 | + if (!r) |
| 479 | goto unmap; |
| 480 | |
| 481 | page = __dev_alloc_pages(GFP_KERNEL, get_order(length)); |
| 482 | if (!page) { |
| 483 | - mt76_put_rxwi(dev, t); |
| 484 | + mt76_put_rxwi(dev, r); |
| 485 | goto unmap; |
| 486 | } |
| 487 | |
| 488 | @@ -67,17 +67,17 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| 489 | DMA_TO_DEVICE); |
| 490 | if (unlikely(dma_mapping_error(dev->dev, phy_addr))) { |
| 491 | __free_pages(page, get_order(length)); |
| 492 | - mt76_put_rxwi(dev, t); |
| 493 | + mt76_put_rxwi(dev, r); |
| 494 | goto unmap; |
| 495 | } |
| 496 | |
| 497 | desc->buf0 = cpu_to_le32(phy_addr); |
| 498 | - token = mt76_rx_token_consume(dev, ptr, t, phy_addr); |
| 499 | + token = mt76_rx_token_consume(dev, ptr, r, phy_addr); |
| 500 | if (token < 0) { |
| 501 | dma_unmap_single(dev->dma_dev, phy_addr, |
| 502 | wed->wlan.rx_size, DMA_TO_DEVICE); |
| 503 | __free_pages(page, get_order(length)); |
| 504 | - mt76_put_rxwi(dev, t); |
| 505 | + mt76_put_rxwi(dev, r); |
| 506 | goto unmap; |
| 507 | } |
| 508 | |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 509 | -- |
developer | bddc9db | 2023-09-11 13:34:36 +0800 | [diff] [blame] | 510 | 2.18.0 |
developer | 57c8f1a | 2022-12-15 14:09:45 +0800 | [diff] [blame] | 511 | |