developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 1 | From 017ed7925cbdfb41d3d85fed54a97cff9fcf2f78 Mon Sep 17 00:00:00 2001 |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 2 | From: Bo Jiao <Bo.Jiao@mediatek.com> |
| 3 | Date: Mon, 6 Feb 2023 13:50:56 +0800 |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 4 | Subject: [PATCH] wifi: mt76: mt7996: wed: add wed3.0 rx support |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 5 | |
| 6 | add hardware rro support, This is the preliminary patch for WED3.0 support. |
| 7 | |
| 8 | Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com> |
| 9 | Change-Id: I7e113b1392bcf085ec02c8a44ffbb7cf7c3fa027 |
| 10 | Signed-off-by: sujuan.chen <sujuan.chen@mediatek.com> |
| 11 | --- |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 12 | dma.c | 205 +++++++++++++++++++++++++++++++++++++----------- |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 13 | dma.h | 12 +++ |
| 14 | mac80211.c | 1 + |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 15 | mt76.h | 63 +++++++++++++-- |
| 16 | mt7996/dma.c | 163 ++++++++++++++++++++++++++++++++------ |
| 17 | mt7996/init.c | 124 ++++++++++++++++++++++++++++- |
| 18 | mt7996/mac.c | 42 ++++++++-- |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 19 | mt7996/mcu.c | 8 +- |
| 20 | mt7996/mmio.c | 36 +++++++-- |
| 21 | mt7996/mt7996.h | 58 ++++++++++++++ |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 22 | mt7996/regs.h | 63 ++++++++++++++- |
| 23 | 11 files changed, 683 insertions(+), 92 deletions(-) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 24 | |
| 25 | diff --git a/dma.c b/dma.c |
| 26 | index 930ec768..e5b4d898 100644 |
| 27 | --- a/dma.c |
| 28 | +++ b/dma.c |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 29 | @@ -193,46 +193,68 @@ EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi); |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 30 | static void |
| 31 | mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) |
| 32 | { |
| 33 | + int ndesc = q->ndesc; |
| 34 | + |
| 35 | + if (q->flags & MT_QFLAG_MAGIC) |
| 36 | + ndesc |= MT_DMA_MAGIC_EN; |
| 37 | + |
| 38 | Q_WRITE(dev, q, desc_base, q->desc_dma); |
| 39 | - Q_WRITE(dev, q, ring_size, q->ndesc); |
| 40 | + Q_WRITE(dev, q, ring_size, ndesc); |
| 41 | q->head = Q_READ(dev, q, dma_idx); |
| 42 | q->tail = q->head; |
| 43 | } |
| 44 | |
| 45 | static void |
| 46 | -mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) |
| 47 | +mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, bool skip) |
| 48 | { |
| 49 | int i; |
| 50 | |
| 51 | if (!q || !q->ndesc) |
| 52 | return; |
| 53 | |
| 54 | + if (!q->desc) |
| 55 | + goto done; |
| 56 | + |
| 57 | /* clear descriptors */ |
| 58 | for (i = 0; i < q->ndesc; i++) |
| 59 | q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
| 60 | |
| 61 | + if (skip) |
| 62 | + goto sync; |
| 63 | + |
| 64 | +done: |
| 65 | Q_WRITE(dev, q, cpu_idx, 0); |
| 66 | Q_WRITE(dev, q, dma_idx, 0); |
| 67 | +sync: |
| 68 | mt76_dma_sync_idx(dev, q); |
| 69 | } |
| 70 | |
| 71 | static int |
| 72 | mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| 73 | - struct mt76_queue_buf *buf, void *data) |
| 74 | + struct mt76_queue_buf *buf, void *data, |
| 75 | + struct mt76_rxwi_cache *rxwi) |
| 76 | { |
| 77 | - struct mt76_desc *desc = &q->desc[q->head]; |
| 78 | + struct mt76_desc *desc; |
| 79 | struct mt76_queue_entry *entry = &q->entry[q->head]; |
| 80 | - struct mt76_rxwi_cache *rxwi = NULL; |
| 81 | u32 buf1 = 0, ctrl; |
| 82 | int idx = q->head; |
| 83 | int rx_token; |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 84 | + void *e_buf = data; |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 85 | + |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 86 | + if (mt76_queue_is_rro_ind(q)) { |
| 87 | + e_buf = &q->rro_desc[q->head]; |
| 88 | + goto done; |
| 89 | + } |
| 90 | |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 91 | + desc = &q->desc[q->head]; |
| 92 | ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); |
| 93 | |
| 94 | if (mt76_queue_is_wed_rx(q)) { |
| 95 | - rxwi = mt76_get_rxwi(dev); |
| 96 | - if (!rxwi) |
| 97 | - return -ENOMEM; |
| 98 | + if (!rxwi) { |
| 99 | + rxwi = mt76_get_rxwi(dev); |
| 100 | + if (!rxwi) |
| 101 | + return -ENOMEM; |
| 102 | + } |
| 103 | |
| 104 | rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr); |
| 105 | if (rx_token < 0) { |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 106 | @@ -249,10 +271,11 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 107 | WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); |
| 108 | WRITE_ONCE(desc->info, 0); |
| 109 | |
| 110 | +done: |
| 111 | entry->dma_addr[0] = buf->addr; |
| 112 | entry->dma_len[0] = buf->len; |
| 113 | entry->rxwi = rxwi; |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 114 | - entry->buf = data; |
| 115 | + entry->buf = e_buf; |
| 116 | entry->wcid = 0xffff; |
| 117 | entry->skip_buf1 = true; |
| 118 | q->head = (q->head + 1) % q->ndesc; |
| 119 | @@ -396,14 +419,18 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 120 | |
| 121 | static void * |
| 122 | mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
| 123 | - int *len, u32 *info, bool *more, bool *drop) |
| 124 | + int *len, u32 *info, bool *more, bool *drop, bool flush) |
| 125 | { |
| 126 | struct mt76_queue_entry *e = &q->entry[idx]; |
| 127 | struct mt76_desc *desc = &q->desc[idx]; |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 128 | - void *buf; |
| 129 | + void *buf = e->buf; |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 130 | + u32 ctrl; |
| 131 | |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 132 | + if (mt76_queue_is_rro_ind(q)) |
| 133 | + goto done; |
| 134 | + |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 135 | + ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
| 136 | if (len) { |
| 137 | - u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
| 138 | *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); |
| 139 | *more = !(ctrl & MT_DMA_CTL_LAST_SEC0); |
| 140 | } |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 141 | @@ -411,6 +438,12 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 142 | if (info) |
| 143 | *info = le32_to_cpu(desc->info); |
| 144 | |
| 145 | + if (drop) { |
| 146 | + *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP)); |
| 147 | + if (ctrl & MT_DMA_CTL_VER_MASK) |
| 148 | + *drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL); |
| 149 | + } |
| 150 | + |
| 151 | if (mt76_queue_is_wed_rx(q)) { |
| 152 | u32 buf1 = le32_to_cpu(desc->buf1); |
| 153 | u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1); |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 154 | @@ -423,28 +456,54 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 155 | SKB_WITH_OVERHEAD(q->buf_size), |
| 156 | DMA_FROM_DEVICE); |
| 157 | |
| 158 | - buf = r->ptr; |
| 159 | - r->dma_addr = 0; |
| 160 | - r->ptr = NULL; |
| 161 | - |
| 162 | - mt76_put_rxwi(dev, r); |
| 163 | - |
| 164 | - if (drop) { |
| 165 | - u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
| 166 | - |
| 167 | - *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | |
| 168 | - MT_DMA_CTL_DROP)); |
| 169 | + if (flush) { |
| 170 | + buf = r->ptr; |
| 171 | + r->dma_addr = 0; |
| 172 | + r->ptr = NULL; |
| 173 | + |
| 174 | + mt76_put_rxwi(dev, r); |
| 175 | + } else { |
| 176 | + struct mt76_queue_buf qbuf; |
| 177 | + |
| 178 | + buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
| 179 | + if (!buf) |
| 180 | + return NULL; |
| 181 | + |
| 182 | + memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size)); |
| 183 | + |
| 184 | + r->dma_addr = dma_map_single(dev->dma_dev, r->ptr, |
| 185 | + SKB_WITH_OVERHEAD(q->buf_size), |
| 186 | + DMA_FROM_DEVICE); |
| 187 | + if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) { |
| 188 | + skb_free_frag(r->ptr); |
| 189 | + mt76_put_rxwi(dev, r); |
| 190 | + return NULL; |
| 191 | + } |
| 192 | + |
| 193 | + qbuf.addr = r->dma_addr; |
| 194 | + qbuf.len = SKB_WITH_OVERHEAD(q->buf_size); |
| 195 | + qbuf.skip_unmap = false; |
| 196 | + |
| 197 | + if (mt76_dma_add_rx_buf(dev, q, &qbuf, r->ptr, r) < 0) { |
| 198 | + dma_unmap_single(dev->dma_dev, r->dma_addr, |
| 199 | + SKB_WITH_OVERHEAD(q->buf_size), |
| 200 | + DMA_FROM_DEVICE); |
| 201 | + skb_free_frag(r->ptr); |
| 202 | + mt76_put_rxwi(dev, r); |
| 203 | + return NULL; |
| 204 | + } |
| 205 | + } |
| 206 | |
| 207 | + if (drop) |
| 208 | *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP); |
| 209 | - } |
| 210 | } else { |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 211 | - buf = e->buf; |
| 212 | - e->buf = NULL; |
| 213 | dma_unmap_single(dev->dma_dev, e->dma_addr[0], |
| 214 | SKB_WITH_OVERHEAD(q->buf_size), |
| 215 | DMA_FROM_DEVICE); |
| 216 | } |
| 217 | |
| 218 | +done: |
| 219 | + e->buf = NULL; |
| 220 | return buf; |
| 221 | } |
| 222 | |
| 223 | @@ -458,15 +517,22 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 224 | if (!q->queued) |
| 225 | return NULL; |
| 226 | |
| 227 | - if (flush) |
| 228 | - q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
| 229 | - else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 230 | + if (mt76_queue_is_rro_ind(q)) { |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 231 | + goto done; |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 232 | + } else if (q->flags & MT_QFLAG_RRO) { |
| 233 | return NULL; |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 234 | + } else { |
| 235 | + if (flush) |
| 236 | + q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
| 237 | + else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) |
| 238 | + return NULL; |
| 239 | + } |
| 240 | |
| 241 | +done: |
| 242 | q->tail = (q->tail + 1) % q->ndesc; |
| 243 | q->queued--; |
| 244 | |
| 245 | - return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); |
| 246 | + return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush); |
| 247 | } |
| 248 | |
| 249 | static int |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 250 | @@ -615,7 +681,10 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 251 | |
| 252 | while (q->queued < q->ndesc - 1) { |
| 253 | struct mt76_queue_buf qbuf; |
| 254 | - void *buf; |
| 255 | + void *buf = NULL; |
| 256 | + |
| 257 | + if (mt76_queue_is_rro_ind(q)) |
| 258 | + goto done; |
| 259 | |
| 260 | buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
| 261 | if (!buf) |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 262 | @@ -627,10 +696,11 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 263 | break; |
| 264 | } |
| 265 | |
| 266 | +done: |
| 267 | qbuf.addr = addr + offset; |
| 268 | qbuf.len = len - offset; |
| 269 | qbuf.skip_unmap = false; |
| 270 | - if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { |
| 271 | + if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) { |
| 272 | dma_unmap_single(dev->dma_dev, addr, len, |
| 273 | DMA_FROM_DEVICE); |
| 274 | skb_free_frag(buf); |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 275 | @@ -639,7 +709,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 276 | frames++; |
| 277 | } |
| 278 | |
| 279 | - if (frames) |
| 280 | + if (frames || mt76_queue_is_wed_rx(q)) |
| 281 | mt76_dma_kick_queue(dev, q); |
| 282 | |
| 283 | spin_unlock_bh(&q->lock); |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 284 | @@ -652,7 +722,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 285 | #ifdef CONFIG_NET_MEDIATEK_SOC_WED |
| 286 | struct mtk_wed_device *wed = &dev->mmio.wed; |
| 287 | int ret, type, ring; |
| 288 | - u8 flags; |
| 289 | + u16 flags; |
| 290 | |
| 291 | if (!q || !q->ndesc) |
| 292 | return -EINVAL; |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 293 | @@ -679,7 +749,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 294 | case MT76_WED_Q_TXFREE: |
| 295 | /* WED txfree queue needs ring to be initialized before setup */ |
| 296 | q->flags = 0; |
| 297 | - mt76_dma_queue_reset(dev, q); |
| 298 | + mt76_dma_queue_reset(dev, q, false); |
| 299 | mt76_dma_rx_fill(dev, q); |
| 300 | q->flags = flags; |
| 301 | |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 302 | @@ -688,9 +758,31 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 303 | q->wed_regs = wed->txfree_ring.reg_base; |
| 304 | break; |
| 305 | case MT76_WED_Q_RX: |
| 306 | - ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset); |
| 307 | - if (!ret) |
| 308 | - q->wed_regs = wed->rx_ring[ring].reg_base; |
| 309 | + if (q->flags & MT_QFLAG_RRO) { |
| 310 | + q->flags &= ~0x1f; |
| 311 | + |
| 312 | + ring = FIELD_GET(MT_QFLAG_RRO_RING, q->flags); |
| 313 | + type = FIELD_GET(MT_QFLAG_RRO_TYPE, q->flags); |
| 314 | + if (type == MT76_RRO_Q_DATA) { |
| 315 | + mt76_dma_queue_reset(dev, q, true); |
| 316 | + ret = mtk_wed_device_rro_rx_ring_setup(wed, ring, q->regs); |
| 317 | + } else if (type == MT76_RRO_Q_MSDU_PG) { |
| 318 | + mt76_dma_queue_reset(dev, q, true); |
| 319 | + ret = mtk_wed_device_msdu_pg_rx_ring_setup(wed, ring, q->regs); |
| 320 | + } else if (type == MT76_RRO_Q_IND) { |
| 321 | + mt76_dma_queue_reset(dev, q, false); |
| 322 | + mt76_dma_rx_fill(dev, q); |
| 323 | + ret = mtk_wed_device_ind_rx_ring_setup(wed, q->regs); |
| 324 | + } |
| 325 | + if (type != MT76_RRO_Q_IND) { |
| 326 | + q->head = q->ndesc - 1; |
| 327 | + q->queued = q->ndesc - 1; |
| 328 | + } |
| 329 | + } else { |
| 330 | + ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, 0); |
| 331 | + if (!ret) |
| 332 | + q->wed_regs = wed->rx_ring[ring].reg_base; |
| 333 | + } |
| 334 | break; |
| 335 | default: |
| 336 | ret = -EINVAL; |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 337 | @@ -719,10 +811,25 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 338 | q->hw_idx = idx; |
| 339 | |
| 340 | size = q->ndesc * sizeof(struct mt76_desc); |
| 341 | + if (mt76_queue_is_rro_ind(q)) |
| 342 | + size = q->ndesc * sizeof(struct mt76_rro_desc); |
| 343 | + |
| 344 | q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); |
| 345 | if (!q->desc) |
| 346 | return -ENOMEM; |
| 347 | |
| 348 | + if (mt76_queue_is_rro_ind(q)) { |
| 349 | + struct mt76_rro_ind *cmd; |
| 350 | + int i; |
| 351 | + |
| 352 | + q->rro_desc = (struct mt76_rro_desc *)(q->desc); |
| 353 | + q->desc = NULL; |
| 354 | + for (i = 0; i < q->ndesc; i++) { |
| 355 | + cmd = (struct mt76_rro_ind *) &q->rro_desc[i]; |
| 356 | + cmd->magic_cnt = MT_DMA_IND_CMD_MAGIC_CNT - 1; |
| 357 | + } |
| 358 | + } |
| 359 | + |
| 360 | size = q->ndesc * sizeof(*q->entry); |
| 361 | q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); |
| 362 | if (!q->entry) |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 363 | @@ -732,8 +839,11 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 364 | if (ret) |
| 365 | return ret; |
| 366 | |
| 367 | - if (!mt76_queue_is_txfree(q)) |
| 368 | - mt76_dma_queue_reset(dev, q); |
| 369 | + if (!mtk_wed_device_active(&dev->mmio.wed) || |
| 370 | + (!mt76_queue_is_wed_txfree(q) && |
| 371 | + !(mtk_wed_get_rx_capa(&dev->mmio.wed) && |
| 372 | + q->flags & MT_QFLAG_RRO))) |
| 373 | + mt76_dma_queue_reset(dev, q, false); |
| 374 | |
| 375 | return 0; |
| 376 | } |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 377 | @@ -768,8 +878,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 378 | |
| 379 | spin_unlock_bh(&q->lock); |
| 380 | |
| 381 | - if (((q->flags & MT_QFLAG_WED) && |
| 382 | - FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) || |
| 383 | + if (mt76_queue_is_wed_rx(q) || |
| 384 | (q->flags & MT_QFLAG_RRO)) |
| 385 | return; |
| 386 | |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 387 | @@ -790,9 +899,13 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 388 | if (!q->ndesc) |
| 389 | return; |
| 390 | |
| 391 | + if (!q->desc) |
| 392 | + goto done; |
| 393 | + |
| 394 | for (i = 0; i < q->ndesc; i++) |
| 395 | q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
| 396 | |
| 397 | +done: |
| 398 | mt76_dma_rx_cleanup(dev, q); |
| 399 | |
| 400 | /* reset WED rx queues */ |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 401 | @@ -839,8 +952,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 402 | bool check_ddone = false; |
| 403 | bool more; |
| 404 | |
| 405 | - if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && |
| 406 | - q->flags == MT_WED_Q_TXFREE) { |
| 407 | + if ((IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && |
| 408 | + q->flags == MT_WED_Q_TXFREE)) { |
| 409 | dma_idx = Q_READ(dev, q, dma_idx); |
| 410 | check_ddone = true; |
| 411 | } |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 412 | @@ -1002,7 +1115,8 @@ void mt76_dma_cleanup(struct mt76_dev *dev) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 413 | mt76_for_each_q_rx(dev, i) { |
| 414 | struct mt76_queue *q = &dev->q_rx[i]; |
| 415 | |
| 416 | - if (mt76_queue_is_wed_rx(q)) |
| 417 | + if (mtk_wed_device_active(&dev->mmio.wed) && |
| 418 | + (q->flags & MT_QFLAG_RRO)) |
| 419 | continue; |
| 420 | |
| 421 | netif_napi_del(&dev->napi[i]); |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 422 | @@ -1014,6 +1128,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 423 | |
| 424 | if (mtk_wed_device_active(&dev->mmio.wed_ext)) |
| 425 | mtk_wed_device_detach(&dev->mmio.wed_ext); |
| 426 | + |
| 427 | mt76_free_pending_txwi(dev); |
| 428 | mt76_free_pending_rxwi(dev); |
| 429 | } |
| 430 | diff --git a/dma.h b/dma.h |
| 431 | index 1b090d78..48037092 100644 |
| 432 | --- a/dma.h |
| 433 | +++ b/dma.h |
| 434 | @@ -25,6 +25,13 @@ |
| 435 | #define MT_DMA_PPE_ENTRY GENMASK(30, 16) |
| 436 | #define MT_DMA_INFO_PPE_VLD BIT(31) |
| 437 | |
| 438 | +#define MT_DMA_CTL_PN_CHK_FAIL BIT(13) |
| 439 | +#define MT_DMA_CTL_VER_MASK BIT(7) |
| 440 | + |
| 441 | +#define MT_DMA_MAGIC_EN BIT(13) |
| 442 | + |
| 443 | +#define MT_DMA_IND_CMD_MAGIC_CNT 8 |
| 444 | + |
| 445 | #define MT_DMA_HDR_LEN 4 |
| 446 | #define MT_RX_INFO_LEN 4 |
| 447 | #define MT_FCE_INFO_LEN 4 |
| 448 | @@ -37,6 +44,11 @@ struct mt76_desc { |
| 449 | __le32 info; |
| 450 | } __packed __aligned(4); |
| 451 | |
| 452 | +struct mt76_rro_desc { |
| 453 | + __le32 buf0; |
| 454 | + __le32 buf1; |
| 455 | +} __packed __aligned(4); |
| 456 | + |
| 457 | enum mt76_qsel { |
| 458 | MT_QSEL_MGMT, |
| 459 | MT_QSEL_HCCA, |
| 460 | diff --git a/mac80211.c b/mac80211.c |
| 461 | index f7578308..3a5755f9 100644 |
| 462 | --- a/mac80211.c |
| 463 | +++ b/mac80211.c |
| 464 | @@ -727,6 +727,7 @@ static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q) |
| 465 | return; |
| 466 | } |
| 467 | } |
| 468 | + |
| 469 | __skb_queue_tail(&dev->rx_skb[q], skb); |
| 470 | } |
| 471 | |
| 472 | diff --git a/mt76.h b/mt76.h |
| 473 | index ee0dbdd7..e4351338 100644 |
| 474 | --- a/mt76.h |
| 475 | +++ b/mt76.h |
| 476 | @@ -48,6 +48,18 @@ |
| 477 | |
| 478 | #define MT76_TOKEN_FREE_THR 64 |
| 479 | |
| 480 | +#define MT_QFLAG_RRO_RING GENMASK(6, 5) |
| 481 | +#define MT_QFLAG_RRO_TYPE GENMASK(8, 7) |
| 482 | +#define MT_QFLAG_RRO BIT(9) |
| 483 | +#define MT_QFLAG_MAGIC BIT(10) |
| 484 | + |
| 485 | +#define __MT_RRO_Q(_type, _n) (MT_QFLAG_RRO | \ |
| 486 | + FIELD_PREP(MT_QFLAG_RRO_TYPE, _type) | \ |
| 487 | + FIELD_PREP(MT_QFLAG_RRO_RING, _n)) |
| 488 | +#define MT_RRO_Q_DATA(_n) __MT_RRO_Q(MT76_RRO_Q_DATA, _n) |
| 489 | +#define MT_RRO_Q_MSDU_PG(_n) __MT_RRO_Q(MT76_RRO_Q_MSDU_PG, _n) |
| 490 | +#define MT_RRO_Q_IND __MT_RRO_Q(MT76_RRO_Q_IND, 0) |
| 491 | + |
| 492 | #define MT_QFLAG_WED_RING GENMASK(1, 0) |
| 493 | #define MT_QFLAG_WED_TYPE GENMASK(3, 2) |
| 494 | #define MT_QFLAG_WED BIT(4) |
| 495 | @@ -82,6 +94,12 @@ enum mt76_wed_type { |
| 496 | MT76_WED_Q_RX, |
| 497 | }; |
| 498 | |
| 499 | +enum mt76_RRO_type { |
| 500 | + MT76_RRO_Q_DATA, |
| 501 | + MT76_RRO_Q_MSDU_PG, |
| 502 | + MT76_RRO_Q_IND, |
| 503 | +}; |
| 504 | + |
| 505 | struct mt76_bus_ops { |
| 506 | u32 (*rr)(struct mt76_dev *dev, u32 offset); |
| 507 | void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); |
| 508 | @@ -128,6 +146,16 @@ enum mt76_rxq_id { |
| 509 | MT_RXQ_MAIN_WA, |
| 510 | MT_RXQ_BAND2, |
| 511 | MT_RXQ_BAND2_WA, |
| 512 | + MT_RXQ_RRO_BAND0, |
| 513 | + MT_RXQ_RRO_BAND1, |
| 514 | + MT_RXQ_RRO_BAND2, |
| 515 | + MT_RXQ_MSDU_PAGE_BAND0, |
| 516 | + MT_RXQ_MSDU_PAGE_BAND1, |
| 517 | + MT_RXQ_MSDU_PAGE_BAND2, |
| 518 | + MT_RXQ_TXFREE_BAND0, |
| 519 | + MT_RXQ_TXFREE_BAND1, |
| 520 | + MT_RXQ_TXFREE_BAND2, |
| 521 | + MT_RXQ_RRO_IND, |
| 522 | __MT_RXQ_MAX |
| 523 | }; |
| 524 | |
| 525 | @@ -206,6 +234,7 @@ struct mt76_queue { |
| 526 | spinlock_t lock; |
| 527 | spinlock_t cleanup_lock; |
| 528 | struct mt76_queue_entry *entry; |
| 529 | + struct mt76_rro_desc *rro_desc; |
| 530 | struct mt76_desc *desc; |
| 531 | |
| 532 | u16 first; |
| 533 | @@ -219,8 +248,8 @@ struct mt76_queue { |
| 534 | |
| 535 | u8 buf_offset; |
| 536 | u8 hw_idx; |
| 537 | - u8 flags; |
| 538 | - |
| 539 | + u8 magic_cnt; |
| 540 | + u32 flags; |
| 541 | u32 wed_regs; |
| 542 | |
| 543 | dma_addr_t desc_dma; |
| 544 | @@ -274,7 +303,7 @@ struct mt76_queue_ops { |
| 545 | |
| 546 | void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); |
| 547 | |
| 548 | - void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q); |
| 549 | + void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q, bool skip); |
| 550 | }; |
| 551 | |
| 552 | enum mt76_phy_type { |
| 553 | @@ -369,6 +398,17 @@ struct mt76_txq { |
| 554 | bool aggr; |
| 555 | }; |
| 556 | |
| 557 | +struct mt76_rro_ind { |
| 558 | + u32 se_id : 12; |
| 559 | + u32 rsv : 4; |
| 560 | + u32 start_sn : 12; |
| 561 | + u32 ind_reason : 4; |
| 562 | + u32 ind_cnt : 13; |
| 563 | + u32 win_sz : 3; |
| 564 | + u32 rsv2 : 13; |
| 565 | + u32 magic_cnt : 3; |
| 566 | +}; |
| 567 | + |
| 568 | struct mt76_txwi_cache { |
| 569 | struct list_head list; |
| 570 | dma_addr_t dma_addr; |
| 571 | @@ -1516,12 +1556,19 @@ static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q) |
| 572 | return (q->flags & MT_QFLAG_WED) && |
| 573 | FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX; |
| 574 | } |
| 575 | -static inline bool mt76_queue_is_txfree(struct mt76_queue *q) |
| 576 | + |
| 577 | +static inline bool mt76_queue_is_wed_txfree(struct mt76_queue *q) |
| 578 | { |
| 579 | return (q->flags & MT_QFLAG_WED) && |
| 580 | FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE; |
| 581 | } |
| 582 | |
| 583 | +static inline bool mt76_queue_is_rro_ind(struct mt76_queue *q) |
| 584 | +{ |
| 585 | + return (q->flags & MT_QFLAG_RRO) && |
| 586 | + FIELD_GET(MT_QFLAG_RRO_TYPE, q->flags) == MT76_RRO_Q_IND; |
| 587 | +} |
| 588 | + |
| 589 | struct mt76_txwi_cache * |
| 590 | mt76_token_release(struct mt76_dev *dev, int token, bool *wake); |
| 591 | int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi); |
| 592 | @@ -1540,10 +1587,14 @@ static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) |
| 593 | static inline int |
| 594 | mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) |
| 595 | { |
| 596 | - int token; |
| 597 | + int token, start = 0; |
| 598 | + |
| 599 | + if (mtk_wed_device_active(&dev->mmio.wed)) |
| 600 | + start = dev->mmio.wed.wlan.nbuf; |
| 601 | |
| 602 | spin_lock_bh(&dev->token_lock); |
| 603 | - token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); |
| 604 | + token = idr_alloc(&dev->token, *ptxwi, start, start + dev->token_size, |
| 605 | + GFP_ATOMIC); |
| 606 | spin_unlock_bh(&dev->token_lock); |
| 607 | |
| 608 | return token; |
| 609 | diff --git a/mt7996/dma.c b/mt7996/dma.c |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 610 | index 428f3d08..45ccc7b5 100644 |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 611 | --- a/mt7996/dma.c |
| 612 | +++ b/mt7996/dma.c |
| 613 | @@ -64,6 +64,29 @@ static void mt7996_dma_config(struct mt7996_dev *dev) |
| 614 | RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2); |
| 615 | RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI); |
| 616 | |
| 617 | + if (dev->rro_support) { |
| 618 | + /* band0 */ |
| 619 | + RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0, |
| 620 | + MT7996_RXQ_RRO_BAND0); |
| 621 | + RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0, |
| 622 | + MT7996_RXQ_MSDU_PG_BAND0); |
| 623 | + RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN, |
| 624 | + MT7996_RXQ_TXFREE0); |
| 625 | + /* band1 */ |
| 626 | + RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1, |
| 627 | + MT7996_RXQ_MSDU_PG_BAND1); |
| 628 | + /* band2 */ |
| 629 | + RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2, |
| 630 | + MT7996_RXQ_RRO_BAND2); |
| 631 | + RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2, |
| 632 | + MT7996_RXQ_MSDU_PG_BAND2); |
| 633 | + RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI, |
| 634 | + MT7996_RXQ_TXFREE2); |
| 635 | + |
| 636 | + RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND, |
| 637 | + MT7996_RXQ_RRO_IND); |
| 638 | + } |
| 639 | + |
| 640 | /* data tx queue */ |
| 641 | TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0); |
| 642 | TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1); |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 643 | @@ -102,6 +125,22 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs) |
| 644 | mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2_WA) + ofs, PREFETCH(0x2)); |
| 645 | mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10)); |
| 646 | mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2) + ofs, PREFETCH(0x10)); |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 647 | + if (dev->rro_support) { |
| 648 | + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 649 | + PREFETCH(0x10)); |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 650 | + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 651 | + PREFETCH(0x10)); |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 652 | + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 653 | + PREFETCH(0x4)); |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 654 | + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 655 | + PREFETCH(0x4)); |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 656 | + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 657 | + PREFETCH(0x4)); |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 658 | + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 659 | + PREFETCH(0x4)); |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 660 | + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 661 | + PREFETCH(0x4)); |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 662 | + } |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 663 | #undef PREFETCH |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 664 | |
| 665 | mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE); |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 666 | @@ -161,6 +200,7 @@ static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 667 | |
| 668 | void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset) |
| 669 | { |
| 670 | + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| 671 | u32 hif1_ofs = 0; |
| 672 | u32 irq_mask; |
| 673 | |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 674 | @@ -169,11 +209,16 @@ void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 675 | |
| 676 | /* enable wpdma tx/rx */ |
| 677 | if (!reset) { |
| 678 | - mt76_set(dev, MT_WFDMA0_GLO_CFG, |
| 679 | - MT_WFDMA0_GLO_CFG_TX_DMA_EN | |
| 680 | - MT_WFDMA0_GLO_CFG_RX_DMA_EN | |
| 681 | - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | |
| 682 | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); |
| 683 | + if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) |
| 684 | + mt76_set(dev, MT_WFDMA0_GLO_CFG, |
| 685 | + MT_WFDMA0_GLO_CFG_TX_DMA_EN | |
| 686 | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO); |
| 687 | + else |
| 688 | + mt76_set(dev, MT_WFDMA0_GLO_CFG, |
| 689 | + MT_WFDMA0_GLO_CFG_TX_DMA_EN | |
| 690 | + MT_WFDMA0_GLO_CFG_RX_DMA_EN | |
| 691 | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | |
| 692 | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); |
| 693 | |
| 694 | if (dev->hif2) |
| 695 | mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 696 | @@ -185,8 +230,8 @@ void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 697 | |
| 698 | /* enable interrupts for TX/RX rings */ |
| 699 | irq_mask = MT_INT_MCU_CMD | |
| 700 | - MT_INT_RX_DONE_MCU | |
| 701 | - MT_INT_TX_DONE_MCU; |
| 702 | + MT_INT_RX_DONE_MCU | |
| 703 | + MT_INT_TX_DONE_MCU; |
| 704 | |
| 705 | if (mt7996_band_valid(dev, MT_BAND0)) |
| 706 | irq_mask |= MT_INT_BAND0_RX_DONE; |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 707 | @@ -197,14 +242,14 @@ void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 708 | if (mt7996_band_valid(dev, MT_BAND2)) |
| 709 | irq_mask |= MT_INT_BAND2_RX_DONE; |
| 710 | |
| 711 | - if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) { |
| 712 | + if (mtk_wed_device_active(wed) && wed_reset) { |
| 713 | u32 wed_irq_mask = irq_mask; |
| 714 | |
| 715 | wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; |
| 716 | |
| 717 | mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); |
| 718 | |
| 719 | - mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask); |
| 720 | + mtk_wed_device_start(wed, wed_irq_mask); |
| 721 | } |
| 722 | |
| 723 | irq_mask = reset ? MT_INT_MCU_CMD : irq_mask; |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 724 | @@ -298,7 +343,8 @@ static int mt7996_dma_enable(struct mt7996_dev *dev, bool reset) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 725 | /* fix hardware limitation, pcie1's rx ring3 is not available |
| 726 | * so, redirect pcie0 rx ring3 interrupt to pcie1 |
| 727 | */ |
| 728 | - if (mtk_wed_device_active(&dev->mt76.mmio.wed) && dev->rro_support) |
| 729 | + if (mtk_wed_device_active(&dev->mt76.mmio.wed) && |
| 730 | + dev->rro_support) |
| 731 | mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs, |
| 732 | MT_WFDMA0_RX_INT_SEL_RING6); |
| 733 | else |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 734 | @@ -311,6 +357,78 @@ static int mt7996_dma_enable(struct mt7996_dev *dev, bool reset) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 735 | return 0; |
| 736 | } |
| 737 | |
| 738 | +int mt7996_dma_rro_init(struct mt7996_dev *dev) |
| 739 | +{ |
| 740 | + int ret; |
| 741 | + u32 hif1_ofs = 0; |
| 742 | + u32 wed_irq_mask; |
| 743 | + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| 744 | + |
| 745 | + if (dev->hif2) |
| 746 | + hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); |
| 747 | + |
| 748 | + /* ind cmd */ |
| 749 | + dev->mt76.q_rx[MT_RXQ_RRO_IND].flags = MT_RRO_Q_IND | MT_WED_Q_RX(0); |
| 750 | + dev->mt76.q_rx[MT_RXQ_RRO_IND].flags |= MT_WED_Q_RX(0); |
| 751 | + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_IND], |
| 752 | + MT_RXQ_ID(MT_RXQ_RRO_IND), |
| 753 | + MT7996_RX_RING_SIZE, |
| 754 | + 0, MT_RXQ_RRO_IND_RING_BASE); |
| 755 | + if (ret) |
| 756 | + return ret; |
| 757 | + |
| 758 | + /* rx msdu page queue for band0 */ |
| 759 | + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags = MT_RRO_Q_MSDU_PG(0); |
| 760 | + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags |= MT_QFLAG_MAGIC; |
| 761 | + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags |= MT_WED_Q_RX(0); |
| 762 | + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0], |
| 763 | + MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0), |
| 764 | + MT7996_RX_RING_SIZE, |
| 765 | + MT7996_RX_MSDU_PAGE_SIZE, |
| 766 | + MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0)); |
| 767 | + if (ret) |
| 768 | + return ret; |
| 769 | + |
| 770 | + if (mt7996_band_valid(dev, MT_BAND1)) { |
| 771 | + /* rx msdu page queue for band1 */ |
| 772 | + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags = MT_RRO_Q_MSDU_PG(1); |
| 773 | + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags |= MT_QFLAG_MAGIC; |
| 774 | + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags |= MT_WED_Q_RX(1); |
| 775 | + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1], |
| 776 | + MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1), |
| 777 | + MT7996_RX_RING_SIZE, |
| 778 | + MT7996_RX_MSDU_PAGE_SIZE, |
| 779 | + MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1)); |
| 780 | + if (ret) |
| 781 | + return ret; |
| 782 | + } |
| 783 | + |
| 784 | + if (mt7996_band_valid(dev, MT_BAND2)) { |
| 785 | + /* rx msdu page queue for band2 */ |
| 786 | + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags = MT_RRO_Q_MSDU_PG(2); |
| 787 | + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags |= MT_QFLAG_MAGIC; |
| 788 | + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags |= MT_WED_Q_RX(0); |
| 789 | + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2], |
| 790 | + MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2), |
| 791 | + MT7996_RX_RING_SIZE, |
| 792 | + MT7996_RX_MSDU_PAGE_SIZE, |
| 793 | + MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2)); |
| 794 | + if (ret) |
| 795 | + return ret; |
| 796 | + } |
| 797 | + |
| 798 | + wed_irq_mask = dev->mt76.mmio.irqmask | |
| 799 | + MT_INT_RRO_RX_DONE | |
| 800 | + MT_INT_TX_DONE_BAND2; |
| 801 | + |
| 802 | + mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); |
| 803 | + |
| 804 | + mtk_wed_device_start_hwrro(wed, wed_irq_mask, false); |
| 805 | + mt7996_irq_enable(dev, wed_irq_mask); |
| 806 | + |
| 807 | + return 0; |
| 808 | +} |
| 809 | + |
| 810 | int mt7996_dma_init(struct mt7996_dev *dev) |
| 811 | { |
| 812 | struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 813 | @@ -380,6 +498,9 @@ int mt7996_dma_init(struct mt7996_dev *dev) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 814 | return ret; |
| 815 | |
| 816 | /* rx data queue for band0 and band1 */ |
| 817 | + if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) |
| 818 | + dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0); |
| 819 | + |
| 820 | ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], |
| 821 | MT_RXQ_ID(MT_RXQ_MAIN), |
| 822 | MT7996_RX_RING_SIZE, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 823 | @@ -403,9 +524,6 @@ int mt7996_dma_init(struct mt7996_dev *dev) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 824 | if (mt7996_band_valid(dev, MT_BAND2)) { |
| 825 | /* rx data queue for band2 */ |
| 826 | rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs; |
| 827 | - if (mtk_wed_device_active(wed)) |
| 828 | - rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2); |
| 829 | - |
| 830 | ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2], |
| 831 | MT_RXQ_ID(MT_RXQ_BAND2), |
| 832 | MT7996_RX_RING_SIZE, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 833 | @@ -429,11 +547,12 @@ int mt7996_dma_init(struct mt7996_dev *dev) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 834 | return ret; |
| 835 | } |
| 836 | |
| 837 | - |
| 838 | - if (dev->rro_support) { |
| 839 | + if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) && |
| 840 | + dev->rro_support) { |
| 841 | /* rx rro data queue for band0 */ |
| 842 | dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags = MT_RRO_Q_DATA(0); |
| 843 | dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags |= MT_QFLAG_MAGIC; |
| 844 | + dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags |= MT_WED_Q_RX(0); |
| 845 | ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0], |
| 846 | MT_RXQ_ID(MT_RXQ_RRO_BAND0), |
| 847 | MT7996_RX_RING_SIZE, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 848 | @@ -443,8 +562,7 @@ int mt7996_dma_init(struct mt7996_dev *dev) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 849 | return ret; |
| 850 | |
| 851 | /* tx free notify event from WA for band0 */ |
| 852 | - if (mtk_wed_device_active(wed)) |
| 853 | - dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE; |
| 854 | + dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE; |
| 855 | ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0], |
| 856 | MT_RXQ_ID(MT_RXQ_TXFREE_BAND0), |
| 857 | MT7996_RX_MCU_RING_SIZE, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 858 | @@ -457,6 +575,7 @@ int mt7996_dma_init(struct mt7996_dev *dev) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 859 | /* rx rro data queue for band2 */ |
| 860 | dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags = MT_RRO_Q_DATA(1); |
| 861 | dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags |= MT_QFLAG_MAGIC; |
| 862 | + dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags |= MT_WED_Q_RX(1); |
| 863 | ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2], |
| 864 | MT_RXQ_ID(MT_RXQ_RRO_BAND2), |
| 865 | MT7996_RX_RING_SIZE, |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 866 | @@ -534,18 +653,18 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force) |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 867 | |
| 868 | /* reset hw queues */ |
| 869 | for (i = 0; i < __MT_TXQ_MAX; i++) { |
| 870 | - mt76_queue_reset(dev, dev->mphy.q_tx[i]); |
| 871 | + mt76_queue_reset(dev, dev->mphy.q_tx[i], false); |
| 872 | if (phy2) |
| 873 | - mt76_queue_reset(dev, phy2->q_tx[i]); |
| 874 | + mt76_queue_reset(dev, phy2->q_tx[i], false); |
| 875 | if (phy3) |
| 876 | - mt76_queue_reset(dev, phy3->q_tx[i]); |
| 877 | + mt76_queue_reset(dev, phy3->q_tx[i], false); |
| 878 | } |
| 879 | |
| 880 | for (i = 0; i < __MT_MCUQ_MAX; i++) |
| 881 | - mt76_queue_reset(dev, dev->mt76.q_mcu[i]); |
| 882 | + mt76_queue_reset(dev, dev->mt76.q_mcu[i], false); |
| 883 | |
| 884 | mt76_for_each_q_rx(&dev->mt76, i) { |
| 885 | - mt76_queue_reset(dev, &dev->mt76.q_rx[i]); |
| 886 | + mt76_queue_reset(dev, &dev->mt76.q_rx[i], false); |
| 887 | } |
| 888 | |
| 889 | mt76_tx_status_check(&dev->mt76, true); |
| 890 | diff --git a/mt7996/init.c b/mt7996/init.c |
| 891 | index 6cfbc50d..d70dcf9f 100644 |
| 892 | --- a/mt7996/init.c |
| 893 | +++ b/mt7996/init.c |
| 894 | @@ -496,8 +496,13 @@ void mt7996_mac_init(struct mt7996_dev *dev) |
| 895 | |
| 896 | /* rro module init */ |
| 897 | mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2); |
| 898 | - mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3); |
| 899 | - mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1); |
| 900 | + if (dev->rro_support) { |
| 901 | + mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 1); |
| 902 | + mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 0); |
| 903 | + } else { |
| 904 | + mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3); |
| 905 | + mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1); |
| 906 | + } |
| 907 | |
| 908 | mt7996_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), |
| 909 | MCU_WA_PARAM_HW_PATH_HIF_VER, |
| 910 | @@ -650,6 +655,114 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev) |
| 911 | msleep(20); |
| 912 | } |
| 913 | |
| 914 | +static int mt7996_rro_init(struct mt7996_dev *dev) |
| 915 | +{ |
| 916 | + struct mt7996_rro_addr *ptr; |
| 917 | + struct mt7996_rro_cfg *rro = &dev->rro; |
| 918 | + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| 919 | + u32 size, val = 0, reg = MT_RRO_ADDR_ELEM_SEG_ADDR0; |
| 920 | + int i, j; |
| 921 | + void *buf; |
| 922 | + |
| 923 | + for (i = 0; i < MT7996_RRO_BA_BITMAP_CR_CNT; i++) { |
| 924 | + buf = dmam_alloc_coherent(dev->mt76.dma_dev, |
| 925 | + MT7996_BA_BITMAP_SZ_PER_CR, |
| 926 | + &rro->ba_bitmap_cache_pa[i], |
| 927 | + GFP_KERNEL); |
| 928 | + if (!buf) |
| 929 | + return -ENOMEM; |
| 930 | + |
| 931 | + rro->ba_bitmap_cache_va[i] = buf; |
| 932 | + } |
| 933 | + |
| 934 | + rro->win_sz = MT7996_RRO_WIN_SIZE_MAX; |
| 935 | + for (i = 0; i < MT7996_RRO_ADDR_ELEM_CR_CNT; i++) { |
| 936 | + size = MT7996_RRO_SESSION_PER_CR * |
| 937 | + rro->win_sz * sizeof(struct mt7996_rro_addr); |
| 938 | + |
| 939 | + buf = dmam_alloc_coherent(dev->mt76.dma_dev, size, |
| 940 | + &rro->addr_elem_alloc_pa[i], |
| 941 | + GFP_KERNEL); |
| 942 | + if (!buf) |
| 943 | + return -ENOMEM; |
| 944 | + rro->addr_elem_alloc_va[i] = buf; |
| 945 | + |
| 946 | + memset(rro->addr_elem_alloc_va[i], 0, size); |
| 947 | + |
| 948 | + ptr = rro->addr_elem_alloc_va[i]; |
| 949 | + for (j = 0; j < MT7996_RRO_SESSION_PER_CR * rro->win_sz; j++, ptr++) |
| 950 | + ptr->signature = 0xff; |
| 951 | + |
| 952 | + wed->wlan.ind_cmd.addr_elem_phys[i] = rro->addr_elem_alloc_pa[i]; |
| 953 | + } |
| 954 | + |
| 955 | + rro->particular_se_id = MT7996_RRO_SESSION_MAX; |
| 956 | + size = rro->win_sz * sizeof(struct mt7996_rro_addr); |
| 957 | + buf = dmam_alloc_coherent(dev->mt76.dma_dev, size, |
| 958 | + &rro->particular_session_pa, |
| 959 | + GFP_KERNEL); |
| 960 | + if (!buf) |
| 961 | + return -ENOMEM; |
| 962 | + |
| 963 | + rro->particular_session_va = buf; |
| 964 | + ptr = rro->particular_session_va; |
| 965 | + for (j = 0; j < rro->win_sz; j++, ptr++) |
| 966 | + ptr->signature = 0xff; |
| 967 | + |
| 968 | + INIT_LIST_HEAD(&rro->pg_addr_cache); |
| 969 | + for (i = 0; i < MT7996_RRO_MSDU_PG_HASH_SIZE; i++) |
| 970 | + INIT_LIST_HEAD(&rro->pg_hash_head[i]); |
| 971 | + |
| 972 | + /* rro hw init */ |
| 973 | + /* TODO: remove line after WM has set */ |
| 974 | + mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK); |
| 975 | + |
| 976 | + /* setup BA bitmap cache address */ |
| 977 | + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0, |
| 978 | + rro->ba_bitmap_cache_pa[0]); |
| 979 | + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0); |
| 980 | + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0, |
| 981 | + rro->ba_bitmap_cache_pa[1]); |
| 982 | + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0); |
| 983 | + |
| 984 | + /* setup Address element address */ |
| 985 | + for (i = 0; i < MT7996_RRO_ADDR_ELEM_CR_CNT; i++) { |
| 986 | + mt76_wr(dev, reg, rro->addr_elem_alloc_pa[i] >> 4); |
| 987 | + reg += 4; |
| 988 | + } |
| 989 | + |
| 990 | + /* setup Address element address - separate address segment mode */ |
| 991 | + mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1, |
| 992 | + MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE); |
| 993 | + |
| 994 | + wed->wlan.ind_cmd.win_size = ffs(rro->win_sz) - 6; |
| 995 | + wed->wlan.ind_cmd.particular_sid = rro->particular_se_id; |
| 996 | + wed->wlan.ind_cmd.particular_se_phys = rro->particular_session_pa; |
| 997 | + wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_CR_CNT; |
| 998 | + wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL; |
| 999 | + |
| 1000 | + mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00); |
| 1001 | + mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1, |
| 1002 | + MT_RRO_IND_CMD_SIGNATURE_BASE1_EN); |
| 1003 | + |
| 1004 | + /* particular session configure */ |
| 1005 | + /* use max session idx + 1 as particular session id */ |
| 1006 | + mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, |
| 1007 | + rro->particular_session_pa); |
| 1008 | + |
| 1009 | + val = FIELD_PREP(MT_RRO_PARTICULAR_SID, |
| 1010 | + MT7996_RRO_SESSION_MAX); |
| 1011 | + val |= MT_RRO_PARTICULAR_CONFG_EN; |
| 1012 | + mt76_wr(dev, MT_RRO_PARTICULAR_CFG1, val); |
| 1013 | + |
| 1014 | + /* interrupt enable */ |
| 1015 | + mt76_wr(dev, MT_RRO_HOST_INT_ENA, |
| 1016 | + MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA); |
| 1017 | + |
| 1018 | + /* rro ind cmd queue init */ |
| 1019 | + return mt7996_dma_rro_init(dev); |
| 1020 | +} |
| 1021 | + |
| 1022 | static int mt7996_init_hardware(struct mt7996_dev *dev) |
| 1023 | { |
| 1024 | int ret, idx; |
| 1025 | @@ -677,6 +790,13 @@ static int mt7996_init_hardware(struct mt7996_dev *dev) |
| 1026 | if (ret) |
| 1027 | return ret; |
| 1028 | |
| 1029 | + if (mtk_wed_device_active(&dev->mt76.mmio.wed) && |
| 1030 | + dev->rro_support) { |
| 1031 | + ret = mt7996_rro_init(dev); |
| 1032 | + if (ret) |
| 1033 | + return ret; |
| 1034 | + } |
| 1035 | + |
| 1036 | ret = mt7996_eeprom_init(dev); |
| 1037 | if (ret < 0) |
| 1038 | return ret; |
| 1039 | diff --git a/mt7996/mac.c b/mt7996/mac.c |
| 1040 | index fc2d9269..4fbbc077 100644 |
| 1041 | --- a/mt7996/mac.c |
| 1042 | +++ b/mt7996/mac.c |
| 1043 | @@ -614,8 +614,37 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev, |
| 1044 | return 0; |
| 1045 | } |
| 1046 | |
| 1047 | +static void |
| 1048 | +mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q, |
| 1049 | + struct mt7996_sta *msta, struct sk_buff *skb, |
| 1050 | + u32 info) |
| 1051 | +{ |
| 1052 | + struct ieee80211_vif *vif; |
| 1053 | + struct wireless_dev *wdev; |
| 1054 | + |
| 1055 | + if (!msta || !msta->vif) |
| 1056 | + return; |
| 1057 | + |
| 1058 | + if (!mt76_queue_is_wed_rx(q)) |
| 1059 | + return; |
| 1060 | + |
| 1061 | + if (!(info & MT_DMA_INFO_PPE_VLD)) |
| 1062 | + return; |
| 1063 | + |
| 1064 | + vif = container_of((void *)msta->vif, struct ieee80211_vif, |
| 1065 | + drv_priv); |
| 1066 | + wdev = ieee80211_vif_to_wdev(vif); |
| 1067 | + skb->dev = wdev->netdev; |
| 1068 | + |
| 1069 | + mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb, |
| 1070 | + FIELD_GET(MT_DMA_PPE_CPU_REASON, info), |
| 1071 | + FIELD_GET(MT_DMA_PPE_ENTRY, info)); |
| 1072 | +} |
| 1073 | + |
| 1074 | + |
| 1075 | static int |
| 1076 | -mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) |
| 1077 | +mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, |
| 1078 | + struct sk_buff *skb, u32 *info) |
| 1079 | { |
| 1080 | struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; |
| 1081 | struct mt76_phy *mphy = &dev->mt76.phy; |
| 1082 | @@ -640,7 +669,10 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) |
| 1083 | u16 seq_ctrl = 0; |
| 1084 | __le16 fc = 0; |
| 1085 | int idx; |
| 1086 | + u8 hw_aggr = false; |
| 1087 | + struct mt7996_sta *msta = NULL; |
| 1088 | |
| 1089 | + hw_aggr = status->aggr; |
| 1090 | memset(status, 0, sizeof(*status)); |
| 1091 | |
| 1092 | band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1); |
| 1093 | @@ -667,8 +699,6 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) |
| 1094 | status->wcid = mt7996_rx_get_wcid(dev, idx, unicast); |
| 1095 | |
| 1096 | if (status->wcid) { |
| 1097 | - struct mt7996_sta *msta; |
| 1098 | - |
| 1099 | msta = container_of(status->wcid, struct mt7996_sta, wcid); |
| 1100 | spin_lock_bh(&dev->sta_poll_lock); |
| 1101 | if (list_empty(&msta->poll_list)) |
| 1102 | @@ -871,12 +901,14 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) |
| 1103 | #endif |
| 1104 | } else { |
| 1105 | status->flag |= RX_FLAG_8023; |
| 1106 | + mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb, |
| 1107 | + *info); |
| 1108 | } |
| 1109 | |
| 1110 | if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) |
| 1111 | mt7996_mac_decode_he_radiotap(skb, rxv, mode); |
| 1112 | |
| 1113 | - if (!status->wcid || !ieee80211_is_data_qos(fc)) |
| 1114 | + if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr) |
| 1115 | return 0; |
| 1116 | |
| 1117 | status->aggr = unicast && |
| 1118 | @@ -1604,7 +1636,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| 1119 | dev_kfree_skb(skb); |
| 1120 | break; |
| 1121 | case PKT_TYPE_NORMAL: |
| 1122 | - if (!mt7996_mac_fill_rx(dev, skb)) { |
| 1123 | + if (!mt7996_mac_fill_rx(dev, q, skb, info)) { |
| 1124 | mt76_rx(&dev->mt76, q, skb); |
| 1125 | return; |
| 1126 | } |
| 1127 | diff --git a/mt7996/mcu.c b/mt7996/mcu.c |
| 1128 | index 59f22f6d..1891c0d7 100644 |
| 1129 | --- a/mt7996/mcu.c |
| 1130 | +++ b/mt7996/mcu.c |
| 1131 | @@ -949,7 +949,7 @@ int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif) |
| 1132 | static int |
| 1133 | mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, |
| 1134 | struct ieee80211_ampdu_params *params, |
| 1135 | - bool enable, bool tx) |
| 1136 | + bool enable, bool tx, bool rro_enable) |
| 1137 | { |
| 1138 | struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv; |
| 1139 | struct sta_rec_ba_uni *ba; |
| 1140 | @@ -970,6 +970,8 @@ mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, |
| 1141 | ba->ba_en = enable << params->tid; |
| 1142 | ba->amsdu = params->amsdu; |
| 1143 | ba->tid = params->tid; |
| 1144 | + if (rro_enable && !tx && enable) |
| 1145 | + ba->ba_rdd_rro = true; |
| 1146 | |
| 1147 | return mt76_mcu_skb_send_msg(dev, skb, |
| 1148 | MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); |
| 1149 | @@ -987,7 +989,7 @@ int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev, |
| 1150 | msta->wcid.amsdu = false; |
| 1151 | |
| 1152 | return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, |
| 1153 | - enable, true); |
| 1154 | + enable, true, dev->rro_support); |
| 1155 | } |
| 1156 | |
| 1157 | int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev, |
| 1158 | @@ -998,7 +1000,7 @@ int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev, |
| 1159 | struct mt7996_vif *mvif = msta->vif; |
| 1160 | |
| 1161 | return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, |
| 1162 | - enable, false); |
| 1163 | + enable, false, dev->rro_support); |
| 1164 | } |
| 1165 | |
| 1166 | static void |
| 1167 | diff --git a/mt7996/mmio.c b/mt7996/mmio.c |
| 1168 | index b9e47e73..9960dca7 100644 |
| 1169 | --- a/mt7996/mmio.c |
| 1170 | +++ b/mt7996/mmio.c |
| 1171 | @@ -346,9 +346,15 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr, |
| 1172 | wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_TRI) - 1; |
| 1173 | } |
| 1174 | |
| 1175 | + wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + hif1_ofs + MT_WFDMA0_GLO_CFG; |
| 1176 | + wed->wlan.wpdma_rx = wed->wlan.phy_base + hif1_ofs + |
| 1177 | + MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) + |
| 1178 | + MT7996_RXQ_BAND0 * MT_RING_SIZE; |
| 1179 | + |
| 1180 | wed->wlan.chip_id = 0x7991; |
| 1181 | wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1; |
| 1182 | } else { |
| 1183 | + wed->wlan.hwrro = dev->rro_support; /* default on */ |
| 1184 | wed->wlan.wpdma_int = wed->wlan.phy_base + MT_INT_SOURCE_CSR; |
| 1185 | wed->wlan.wpdma_mask = wed->wlan.phy_base + MT_INT_MASK_CSR; |
| 1186 | wed->wlan.wpdma_tx = wed->wlan.phy_base + MT_TXQ_RING_BASE(0) + |
| 1187 | @@ -360,13 +366,33 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr, |
| 1188 | MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) + |
| 1189 | MT7996_RXQ_BAND0 * MT_RING_SIZE; |
| 1190 | |
| 1191 | + wed->wlan.wpdma_rx_rro[0] = wed->wlan.phy_base + |
| 1192 | + MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND0) + |
| 1193 | + MT7996_RXQ_RRO_BAND0 * MT_RING_SIZE; |
| 1194 | + wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs + |
| 1195 | + MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND2) + |
| 1196 | + MT7996_RXQ_RRO_BAND2 * MT_RING_SIZE; |
| 1197 | + wed->wlan.wpdma_rx_pg = wed->wlan.phy_base + |
| 1198 | + MT_RXQ_RING_BASE(MT7996_RXQ_MSDU_PG_BAND0) + |
| 1199 | + MT7996_RXQ_MSDU_PG_BAND0 * MT_RING_SIZE; |
| 1200 | + |
| 1201 | wed->wlan.rx_nbuf = 65536; |
| 1202 | wed->wlan.rx_npkt = 24576; |
| 1203 | + if (dev->hif2) |
| 1204 | + wed->wlan.rx_npkt += 8192; |
| 1205 | + |
| 1206 | wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE); |
| 1207 | |
| 1208 | wed->wlan.rx_tbit[0] = ffs(MT_INT_RX_DONE_BAND0) - 1; |
| 1209 | wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1; |
| 1210 | |
| 1211 | + wed->wlan.rro_rx_tbit[0] = ffs(MT_INT_RX_DONE_RRO_BAND0) - 1; |
| 1212 | + wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND2) - 1; |
| 1213 | + |
| 1214 | + wed->wlan.rx_pg_tbit[0] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND0) - 1; |
| 1215 | + wed->wlan.rx_pg_tbit[1] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND1) - 1; |
| 1216 | + wed->wlan.rx_pg_tbit[2] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND2) - 1; |
| 1217 | + |
| 1218 | wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND0) - 1; |
| 1219 | wed->wlan.tx_tbit[1] = ffs(MT_INT_TX_DONE_BAND1) - 1; |
| 1220 | if (dev->rro_support) { |
| 1221 | @@ -378,6 +404,8 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr, |
| 1222 | wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) + |
| 1223 | MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE; |
| 1224 | } |
| 1225 | + |
| 1226 | + dev->mt76.rx_token_size += wed->wlan.rx_npkt; |
| 1227 | } |
| 1228 | |
| 1229 | wed->wlan.nbuf = 16384; |
| 1230 | @@ -394,8 +422,6 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr, |
| 1231 | wed->wlan.release_rx_buf = mt7996_mmio_wed_release_rx_buf; |
| 1232 | wed->wlan.update_wo_rx_stats = NULL; |
| 1233 | |
| 1234 | - dev->mt76.rx_token_size += wed->wlan.rx_npkt; |
| 1235 | - |
| 1236 | if (mtk_wed_device_attach(wed)) |
| 1237 | return 0; |
| 1238 | |
| 1239 | @@ -557,10 +583,9 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t) |
| 1240 | irqreturn_t mt7996_irq_handler(int irq, void *dev_instance) |
| 1241 | { |
| 1242 | struct mt7996_dev *dev = dev_instance; |
| 1243 | - struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| 1244 | |
| 1245 | - if (mtk_wed_device_active(wed)) |
| 1246 | - mtk_wed_device_irq_set_mask(wed, 0); |
| 1247 | + if (mtk_wed_device_active(&dev->mt76.mmio.wed)) |
| 1248 | + mtk_wed_device_irq_set_mask(&dev->mt76.mmio.wed, 0); |
| 1249 | else |
| 1250 | mt76_wr(dev, MT_INT_MASK_CSR, 0); |
| 1251 | |
| 1252 | @@ -592,6 +617,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev, |
| 1253 | SURVEY_INFO_TIME_RX | |
| 1254 | SURVEY_INFO_TIME_BSS_RX, |
| 1255 | .token_size = MT7996_TOKEN_SIZE, |
| 1256 | + .rx_token_size = MT7996_RX_TOKEN_SIZE, |
| 1257 | .tx_prepare_skb = mt7996_tx_prepare_skb, |
| 1258 | .tx_complete_skb = mt76_connac_tx_complete_skb, |
| 1259 | .rx_skb = mt7996_queue_rx_skb, |
| 1260 | diff --git a/mt7996/mt7996.h b/mt7996/mt7996.h |
| 1261 | index 43f20da4..836c7db7 100644 |
| 1262 | --- a/mt7996/mt7996.h |
| 1263 | +++ b/mt7996/mt7996.h |
| 1264 | @@ -39,6 +39,7 @@ |
| 1265 | #define MT7996_EEPROM_SIZE 7680 |
| 1266 | #define MT7996_EEPROM_BLOCK_SIZE 16 |
| 1267 | #define MT7996_TOKEN_SIZE 16384 |
| 1268 | +#define MT7996_RX_TOKEN_SIZE 16384 |
| 1269 | |
| 1270 | #define MT7996_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ |
| 1271 | #define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ |
| 1272 | @@ -63,6 +64,24 @@ |
| 1273 | #define MT7996_SKU_RATE_NUM 417 |
| 1274 | #define MT7996_SKU_PATH_NUM 494 |
| 1275 | |
| 1276 | +#define MT7996_RRO_MSDU_PG_HASH_SIZE 127 |
| 1277 | +#define MT7996_RRO_SESSION_MAX 1024 |
| 1278 | +#define MT7996_RRO_WIN_SIZE_MAX 1024 |
| 1279 | +#define MT7996_RRO_ADDR_ELEM_CR_CNT 128 |
| 1280 | +#define MT7996_RRO_BA_BITMAP_CR_CNT 2 |
| 1281 | +#define MT7996_RRO_SESSION_PER_CR (MT7996_RRO_SESSION_MAX / \ |
| 1282 | + MT7996_RRO_ADDR_ELEM_CR_CNT) |
| 1283 | +#define MT7996_BA_BITMAP_SZ_PER_SESSION 128 |
| 1284 | +#define MT7996_BA_BITMAP_SZ_PER_CR ((MT7996_RRO_SESSION_MAX * \ |
| 1285 | + MT7996_BA_BITMAP_SZ_PER_SESSION) / \ |
| 1286 | + MT7996_RRO_BA_BITMAP_CR_CNT) |
| 1287 | +#define MT7996_SKB_TRUESIZE(x) ((x) + \ |
| 1288 | + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
| 1289 | +#define MT7996_RX_BUF_SIZE MT7996_SKB_TRUESIZE(1800) |
| 1290 | +#define MT7996_RX_MSDU_PAGE_SIZE MT7996_SKB_TRUESIZE(128) |
| 1291 | + |
| 1292 | +#define MT7996_WED_RX_TOKEN_SIZE 32768 |
| 1293 | + |
| 1294 | struct mt7996_vif; |
| 1295 | struct mt7996_sta; |
| 1296 | struct mt7996_dfs_pulse; |
| 1297 | @@ -102,6 +121,16 @@ enum mt7996_rxq_id { |
| 1298 | MT7996_RXQ_BAND0 = 4, |
| 1299 | MT7996_RXQ_BAND1 = 4,/* unused */ |
| 1300 | MT7996_RXQ_BAND2 = 5, |
| 1301 | + MT7996_RXQ_RRO_BAND0 = 8, |
| 1302 | + MT7996_RXQ_RRO_BAND1 = 8,/* unused */ |
| 1303 | + MT7996_RXQ_RRO_BAND2 = 6, |
| 1304 | + MT7996_RXQ_MSDU_PG_BAND0 = 10, |
| 1305 | + MT7996_RXQ_MSDU_PG_BAND1 = 11, |
| 1306 | + MT7996_RXQ_MSDU_PG_BAND2 = 12, |
| 1307 | + MT7996_RXQ_TXFREE0 = 9, |
| 1308 | + MT7996_RXQ_TXFREE1 = 9, |
| 1309 | + MT7996_RXQ_TXFREE2 = 7, |
| 1310 | + MT7996_RXQ_RRO_IND = 0, |
| 1311 | }; |
| 1312 | |
| 1313 | struct mt7996_twt_flow { |
| 1314 | @@ -272,6 +301,31 @@ struct mt7996_air_monitor_ctrl { |
| 1315 | }; |
| 1316 | #endif |
| 1317 | |
| 1318 | +struct mt7996_rro_addr { |
| 1319 | + u32 head_pkt_l; |
| 1320 | + u32 head_pkt_h : 4; |
| 1321 | + u32 seg_cnt : 11; |
| 1322 | + u32 out_of_range: 1; |
| 1323 | + u32 rsv : 8; |
| 1324 | + u32 signature : 8; |
| 1325 | +}; |
| 1326 | + |
| 1327 | +struct mt7996_rro_cfg { |
| 1328 | + u32 ind_signature; |
| 1329 | + void *ba_bitmap_cache_va[MT7996_RRO_BA_BITMAP_CR_CNT]; |
| 1330 | + void *addr_elem_alloc_va[MT7996_RRO_ADDR_ELEM_CR_CNT]; |
| 1331 | + void *particular_session_va; |
| 1332 | + u32 particular_se_id; |
| 1333 | + dma_addr_t ba_bitmap_cache_pa[MT7996_RRO_BA_BITMAP_CR_CNT]; |
| 1334 | + dma_addr_t addr_elem_alloc_pa[MT7996_RRO_ADDR_ELEM_CR_CNT]; |
| 1335 | + dma_addr_t particular_session_pa; |
| 1336 | + u16 win_sz; |
| 1337 | + |
| 1338 | + spinlock_t lock; |
| 1339 | + struct list_head pg_addr_cache; |
| 1340 | + struct list_head pg_hash_head[MT7996_RRO_MSDU_PG_HASH_SIZE]; |
| 1341 | +}; |
| 1342 | + |
| 1343 | struct mt7996_phy { |
| 1344 | struct mt76_phy *mt76; |
| 1345 | struct mt7996_dev *dev; |
| 1346 | @@ -390,6 +444,9 @@ struct mt7996_dev { |
| 1347 | bool flash_mode:1; |
| 1348 | bool has_eht:1; |
| 1349 | |
| 1350 | + bool rro_support:1; |
| 1351 | + struct mt7996_rro_cfg rro; |
| 1352 | + |
| 1353 | bool testmode_enable; |
| 1354 | bool bin_file_mode; |
| 1355 | u8 eeprom_mode; |
| 1356 | @@ -709,6 +766,7 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, |
| 1357 | struct ieee80211_sta *sta, |
| 1358 | struct mt76_tx_info *tx_info); |
| 1359 | void mt7996_tx_token_put(struct mt7996_dev *dev); |
| 1360 | +int mt7996_dma_rro_init(struct mt7996_dev *dev); |
| 1361 | void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| 1362 | struct sk_buff *skb, u32 *info); |
| 1363 | bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len); |
| 1364 | diff --git a/mt7996/regs.h b/mt7996/regs.h |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 1365 | index 5ed7bcca..47fa965f 100644 |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 1366 | --- a/mt7996/regs.h |
| 1367 | +++ b/mt7996/regs.h |
| 1368 | @@ -39,6 +39,40 @@ enum base_rev { |
| 1369 | |
| 1370 | #define __BASE(_id, _band) (dev->reg.base[(_id)].band_base[(_band)]) |
| 1371 | |
| 1372 | + |
| 1373 | +/* RRO TOP */ |
| 1374 | +#define MT_RRO_TOP_BASE 0xA000 |
| 1375 | +#define MT_RRO_TOP(ofs) (MT_RRO_TOP_BASE + (ofs)) |
| 1376 | + |
| 1377 | +#define MT_RRO_BA_BITMAP_BASE0 MT_RRO_TOP(0x8) |
| 1378 | +#define MT_RRO_BA_BITMAP_BASE1 MT_RRO_TOP(0xC) |
| 1379 | +#define WF_RRO_AXI_MST_CFG MT_RRO_TOP(0xB8) |
| 1380 | +#define WF_RRO_AXI_MST_CFG_DIDX_OK BIT(12) |
| 1381 | +#define MT_RRO_ADDR_ARRAY_BASE1 MT_RRO_TOP(0x34) |
| 1382 | +#define MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE BIT(31) |
| 1383 | + |
| 1384 | +#define MT_RRO_IND_CMD_SIGNATURE_BASE0 MT_RRO_TOP(0x38) |
| 1385 | +#define MT_RRO_IND_CMD_SIGNATURE_BASE1 MT_RRO_TOP(0x3C) |
| 1386 | +#define MT_RRO_IND_CMD_0_CTRL0 MT_RRO_TOP(0x40) |
| 1387 | +#define MT_RRO_IND_CMD_SIGNATURE_BASE1_EN BIT(31) |
| 1388 | + |
| 1389 | +#define MT_RRO_PARTICULAR_CFG0 MT_RRO_TOP(0x5C) |
| 1390 | +#define MT_RRO_PARTICULAR_CFG1 MT_RRO_TOP(0x60) |
| 1391 | +#define MT_RRO_PARTICULAR_CONFG_EN BIT(31) |
| 1392 | +#define MT_RRO_PARTICULAR_SID GENMASK(30, 16) |
| 1393 | + |
| 1394 | +#define MT_RRO_BA_BITMAP_BASE_EXT0 MT_RRO_TOP(0x70) |
| 1395 | +#define MT_RRO_BA_BITMAP_BASE_EXT1 MT_RRO_TOP(0x74) |
| 1396 | +#define MT_RRO_HOST_INT_ENA MT_RRO_TOP(0x204) |
| 1397 | +#define MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA BIT(0) |
| 1398 | + |
| 1399 | +#define MT_RRO_ADDR_ELEM_SEG_ADDR0 MT_RRO_TOP(0x400) |
| 1400 | + |
| 1401 | +#define MT_RRO_ACK_SN_CTRL MT_RRO_TOP(0x50) |
| 1402 | +#define MT_RRO_ACK_SN_CTRL_SN_MASK GENMASK(27, 16) |
| 1403 | +#define MT_RRO_ACK_SN_CTRL_SESSION_MASK GENMASK(11, 0) |
| 1404 | + |
| 1405 | + |
| 1406 | #define MT_MCU_INT_EVENT 0x2108 |
| 1407 | #define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0) |
| 1408 | #define MT_MCU_INT_EVENT_DMA_INIT BIT(1) |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 1409 | @@ -400,6 +434,7 @@ enum base_rev { |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 1410 | #define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300) |
| 1411 | #define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300) |
| 1412 | #define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500) |
| 1413 | +#define MT_RXQ_RRO_IND_RING_BASE MT_RRO_TOP(0x40) |
| 1414 | |
| 1415 | #define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \ |
| 1416 | MT_MCUQ_ID(q) * 0x4) |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 1417 | @@ -427,6 +462,15 @@ enum base_rev { |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 1418 | #define MT_INT_MCU_CMD BIT(29) |
| 1419 | #define MT_INT_RX_TXFREE_EXT BIT(26) |
| 1420 | |
| 1421 | +#define MT_INT_RX_DONE_RRO_BAND0 BIT(16) |
| 1422 | +#define MT_INT_RX_DONE_RRO_BAND1 BIT(16) |
| 1423 | +#define MT_INT_RX_DONE_RRO_BAND2 BIT(14) |
| 1424 | +#define MT_INT_RX_DONE_RRO_IND BIT(11) |
| 1425 | +#define MT_INT_RX_DONE_MSDU_PG_BAND0 BIT(18) |
| 1426 | +#define MT_INT_RX_DONE_MSDU_PG_BAND1 BIT(19) |
| 1427 | +#define MT_INT_RX_DONE_MSDU_PG_BAND2 BIT(23) |
| 1428 | + |
| 1429 | + |
| 1430 | #define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)]) |
| 1431 | #define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)]) |
| 1432 | |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 1433 | @@ -434,20 +478,31 @@ enum base_rev { |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 1434 | MT_INT_RX(MT_RXQ_MCU_WA)) |
| 1435 | |
| 1436 | #define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \ |
| 1437 | - MT_INT_RX(MT_RXQ_MAIN_WA)) |
| 1438 | + MT_INT_RX(MT_RXQ_MAIN_WA) | \ |
| 1439 | + MT_INT_RX(MT_RXQ_TXFREE_BAND0)) |
| 1440 | |
| 1441 | #define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_BAND1) | \ |
| 1442 | MT_INT_RX(MT_RXQ_BAND1_WA) | \ |
| 1443 | - MT_INT_RX(MT_RXQ_MAIN_WA)) |
| 1444 | + MT_INT_RX(MT_RXQ_MAIN_WA) | \ |
| 1445 | + MT_INT_RX(MT_RXQ_TXFREE_BAND0)) |
| 1446 | |
| 1447 | #define MT_INT_BAND2_RX_DONE (MT_INT_RX(MT_RXQ_BAND2) | \ |
| 1448 | MT_INT_RX(MT_RXQ_BAND2_WA) | \ |
| 1449 | - MT_INT_RX(MT_RXQ_MAIN_WA)) |
| 1450 | + MT_INT_RX(MT_RXQ_MAIN_WA) | \ |
| 1451 | + MT_INT_RX(MT_RXQ_TXFREE_BAND0)) |
| 1452 | + |
| 1453 | +#define MT_INT_RRO_RX_DONE (MT_INT_RX(MT_RXQ_RRO_BAND0) | \ |
| 1454 | + MT_INT_RX(MT_RXQ_RRO_BAND1) | \ |
| 1455 | + MT_INT_RX(MT_RXQ_RRO_BAND2) | \ |
| 1456 | + MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND0) | \ |
| 1457 | + MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND1) | \ |
| 1458 | + MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND2)) |
| 1459 | |
| 1460 | #define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \ |
| 1461 | MT_INT_BAND0_RX_DONE | \ |
| 1462 | MT_INT_BAND1_RX_DONE | \ |
| 1463 | - MT_INT_BAND2_RX_DONE) |
| 1464 | + MT_INT_BAND2_RX_DONE | \ |
| 1465 | + MT_INT_RRO_RX_DONE) |
| 1466 | |
| 1467 | #define MT_INT_TX_DONE_FWDL BIT(26) |
| 1468 | #define MT_INT_TX_DONE_MCU_WM BIT(27) |
| 1469 | -- |
developer | 47efbdb | 2023-06-29 20:33:22 +0800 | [diff] [blame^] | 1470 | 2.18.0 |
developer | 692ed9b | 2023-06-19 12:03:50 +0800 | [diff] [blame] | 1471 | |