| From 017ed7925cbdfb41d3d85fed54a97cff9fcf2f78 Mon Sep 17 00:00:00 2001 |
| From: Bo Jiao <Bo.Jiao@mediatek.com> |
| Date: Mon, 6 Feb 2023 13:50:56 +0800 |
| Subject: [PATCH] wifi: mt76: mt7996: wed: add wed3.0 rx support |
| |
| add hardware rro support, This is the preliminary patch for WED3.0 support. |
| |
| Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com> |
| Change-Id: I7e113b1392bcf085ec02c8a44ffbb7cf7c3fa027 |
| Signed-off-by: sujuan.chen <sujuan.chen@mediatek.com> |
| --- |
| dma.c | 205 +++++++++++++++++++++++++++++++++++++----------- |
| dma.h | 12 +++ |
| mac80211.c | 1 + |
| mt76.h | 63 +++++++++++++-- |
| mt7996/dma.c | 163 ++++++++++++++++++++++++++++++++------ |
| mt7996/init.c | 124 ++++++++++++++++++++++++++++- |
| mt7996/mac.c | 42 ++++++++-- |
| mt7996/mcu.c | 8 +- |
| mt7996/mmio.c | 36 +++++++-- |
| mt7996/mt7996.h | 58 ++++++++++++++ |
| mt7996/regs.h | 63 ++++++++++++++- |
| 11 files changed, 683 insertions(+), 92 deletions(-) |
| |
| diff --git a/dma.c b/dma.c |
| index 930ec768..e5b4d898 100644 |
| --- a/dma.c |
| +++ b/dma.c |
| @@ -193,46 +193,68 @@ EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi); |
| static void |
| mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) |
| { |
| + int ndesc = q->ndesc; |
| + |
| + if (q->flags & MT_QFLAG_MAGIC) |
| + ndesc |= MT_DMA_MAGIC_EN; |
| + |
| Q_WRITE(dev, q, desc_base, q->desc_dma); |
| - Q_WRITE(dev, q, ring_size, q->ndesc); |
| + Q_WRITE(dev, q, ring_size, ndesc); |
| q->head = Q_READ(dev, q, dma_idx); |
| q->tail = q->head; |
| } |
| |
| static void |
| -mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) |
| +mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, bool skip) |
| { |
| int i; |
| |
| if (!q || !q->ndesc) |
| return; |
| |
| + if (!q->desc) |
| + goto done; |
| + |
| /* clear descriptors */ |
| for (i = 0; i < q->ndesc; i++) |
| q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
| |
| + if (skip) |
| + goto sync; |
| + |
| +done: |
| Q_WRITE(dev, q, cpu_idx, 0); |
| Q_WRITE(dev, q, dma_idx, 0); |
| +sync: |
| mt76_dma_sync_idx(dev, q); |
| } |
| |
| static int |
| mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| - struct mt76_queue_buf *buf, void *data) |
| + struct mt76_queue_buf *buf, void *data, |
| + struct mt76_rxwi_cache *rxwi) |
| { |
| - struct mt76_desc *desc = &q->desc[q->head]; |
| + struct mt76_desc *desc; |
| struct mt76_queue_entry *entry = &q->entry[q->head]; |
| - struct mt76_rxwi_cache *rxwi = NULL; |
| u32 buf1 = 0, ctrl; |
| int idx = q->head; |
| int rx_token; |
| + void *e_buf = data; |
| + |
| + if (mt76_queue_is_rro_ind(q)) { |
| + e_buf = &q->rro_desc[q->head]; |
| + goto done; |
| + } |
| |
| + desc = &q->desc[q->head]; |
| ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); |
| |
| if (mt76_queue_is_wed_rx(q)) { |
| - rxwi = mt76_get_rxwi(dev); |
| - if (!rxwi) |
| - return -ENOMEM; |
| + if (!rxwi) { |
| + rxwi = mt76_get_rxwi(dev); |
| + if (!rxwi) |
| + return -ENOMEM; |
| + } |
| |
| rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr); |
| if (rx_token < 0) { |
| @@ -249,10 +271,11 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); |
| WRITE_ONCE(desc->info, 0); |
| |
| +done: |
| entry->dma_addr[0] = buf->addr; |
| entry->dma_len[0] = buf->len; |
| entry->rxwi = rxwi; |
| - entry->buf = data; |
| + entry->buf = e_buf; |
| entry->wcid = 0xffff; |
| entry->skip_buf1 = true; |
| q->head = (q->head + 1) % q->ndesc; |
| @@ -396,14 +419,18 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) |
| |
| static void * |
| mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
| - int *len, u32 *info, bool *more, bool *drop) |
| + int *len, u32 *info, bool *more, bool *drop, bool flush) |
| { |
| struct mt76_queue_entry *e = &q->entry[idx]; |
| struct mt76_desc *desc = &q->desc[idx]; |
| - void *buf; |
| + void *buf = e->buf; |
| + u32 ctrl; |
| |
| + if (mt76_queue_is_rro_ind(q)) |
| + goto done; |
| + |
| + ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
| if (len) { |
| - u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
| *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); |
| *more = !(ctrl & MT_DMA_CTL_LAST_SEC0); |
| } |
| @@ -411,6 +438,12 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
| if (info) |
| *info = le32_to_cpu(desc->info); |
| |
| + if (drop) { |
| + *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP)); |
| + if (ctrl & MT_DMA_CTL_VER_MASK) |
| + *drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL); |
| + } |
| + |
| if (mt76_queue_is_wed_rx(q)) { |
| u32 buf1 = le32_to_cpu(desc->buf1); |
| u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1); |
| @@ -423,28 +456,54 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
| SKB_WITH_OVERHEAD(q->buf_size), |
| DMA_FROM_DEVICE); |
| |
| - buf = r->ptr; |
| - r->dma_addr = 0; |
| - r->ptr = NULL; |
| - |
| - mt76_put_rxwi(dev, r); |
| - |
| - if (drop) { |
| - u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
| - |
| - *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | |
| - MT_DMA_CTL_DROP)); |
| + if (flush) { |
| + buf = r->ptr; |
| + r->dma_addr = 0; |
| + r->ptr = NULL; |
| + |
| + mt76_put_rxwi(dev, r); |
| + } else { |
| + struct mt76_queue_buf qbuf; |
| + |
| + buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
| + if (!buf) |
| + return NULL; |
| + |
| + memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size)); |
| + |
| + r->dma_addr = dma_map_single(dev->dma_dev, r->ptr, |
| + SKB_WITH_OVERHEAD(q->buf_size), |
| + DMA_FROM_DEVICE); |
| + if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) { |
| + skb_free_frag(r->ptr); |
| + mt76_put_rxwi(dev, r); |
| + return NULL; |
| + } |
| + |
| + qbuf.addr = r->dma_addr; |
| + qbuf.len = SKB_WITH_OVERHEAD(q->buf_size); |
| + qbuf.skip_unmap = false; |
| + |
| + if (mt76_dma_add_rx_buf(dev, q, &qbuf, r->ptr, r) < 0) { |
| + dma_unmap_single(dev->dma_dev, r->dma_addr, |
| + SKB_WITH_OVERHEAD(q->buf_size), |
| + DMA_FROM_DEVICE); |
| + skb_free_frag(r->ptr); |
| + mt76_put_rxwi(dev, r); |
| + return NULL; |
| + } |
| + } |
| |
| + if (drop) |
| *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP); |
| - } |
| } else { |
| - buf = e->buf; |
| - e->buf = NULL; |
| dma_unmap_single(dev->dma_dev, e->dma_addr[0], |
| SKB_WITH_OVERHEAD(q->buf_size), |
| DMA_FROM_DEVICE); |
| } |
| |
| +done: |
| + e->buf = NULL; |
| return buf; |
| } |
| |
| @@ -458,15 +517,22 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, |
| if (!q->queued) |
| return NULL; |
| |
| - if (flush) |
| - q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
| - else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) |
| + if (mt76_queue_is_rro_ind(q)) { |
| + goto done; |
| + } else if (q->flags & MT_QFLAG_RRO) { |
| return NULL; |
| + } else { |
| + if (flush) |
| + q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
| + else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) |
| + return NULL; |
| + } |
| |
| +done: |
| q->tail = (q->tail + 1) % q->ndesc; |
| q->queued--; |
| |
| - return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); |
| + return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush); |
| } |
| |
| static int |
| @@ -615,7 +681,10 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| |
| while (q->queued < q->ndesc - 1) { |
| struct mt76_queue_buf qbuf; |
| - void *buf; |
| + void *buf = NULL; |
| + |
| + if (mt76_queue_is_rro_ind(q)) |
| + goto done; |
| |
| buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
| if (!buf) |
| @@ -627,10 +696,11 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| break; |
| } |
| |
| +done: |
| qbuf.addr = addr + offset; |
| qbuf.len = len - offset; |
| qbuf.skip_unmap = false; |
| - if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { |
| + if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) { |
| dma_unmap_single(dev->dma_dev, addr, len, |
| DMA_FROM_DEVICE); |
| skb_free_frag(buf); |
| @@ -639,7 +709,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| frames++; |
| } |
| |
| - if (frames) |
| + if (frames || mt76_queue_is_wed_rx(q)) |
| mt76_dma_kick_queue(dev, q); |
| |
| spin_unlock_bh(&q->lock); |
| @@ -652,7 +722,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) |
| #ifdef CONFIG_NET_MEDIATEK_SOC_WED |
| struct mtk_wed_device *wed = &dev->mmio.wed; |
| int ret, type, ring; |
| - u8 flags; |
| + u16 flags; |
| |
| if (!q || !q->ndesc) |
| return -EINVAL; |
| @@ -679,7 +749,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) |
| case MT76_WED_Q_TXFREE: |
| /* WED txfree queue needs ring to be initialized before setup */ |
| q->flags = 0; |
| - mt76_dma_queue_reset(dev, q); |
| + mt76_dma_queue_reset(dev, q, false); |
| mt76_dma_rx_fill(dev, q); |
| q->flags = flags; |
| |
| @@ -688,9 +758,31 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) |
| q->wed_regs = wed->txfree_ring.reg_base; |
| break; |
| case MT76_WED_Q_RX: |
| - ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset); |
| - if (!ret) |
| - q->wed_regs = wed->rx_ring[ring].reg_base; |
| + if (q->flags & MT_QFLAG_RRO) { |
| + q->flags &= ~0x1f; |
| + |
| + ring = FIELD_GET(MT_QFLAG_RRO_RING, q->flags); |
| + type = FIELD_GET(MT_QFLAG_RRO_TYPE, q->flags); |
| + if (type == MT76_RRO_Q_DATA) { |
| + mt76_dma_queue_reset(dev, q, true); |
| + ret = mtk_wed_device_rro_rx_ring_setup(wed, ring, q->regs); |
| + } else if (type == MT76_RRO_Q_MSDU_PG) { |
| + mt76_dma_queue_reset(dev, q, true); |
| + ret = mtk_wed_device_msdu_pg_rx_ring_setup(wed, ring, q->regs); |
| + } else if (type == MT76_RRO_Q_IND) { |
| + mt76_dma_queue_reset(dev, q, false); |
| + mt76_dma_rx_fill(dev, q); |
| + ret = mtk_wed_device_ind_rx_ring_setup(wed, q->regs); |
| + } |
| + if (type != MT76_RRO_Q_IND) { |
| + q->head = q->ndesc - 1; |
| + q->queued = q->ndesc - 1; |
| + } |
| + } else { |
| + ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, 0); |
| + if (!ret) |
| + q->wed_regs = wed->rx_ring[ring].reg_base; |
| + } |
| break; |
| default: |
| ret = -EINVAL; |
| @@ -719,10 +811,25 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, |
| q->hw_idx = idx; |
| |
| size = q->ndesc * sizeof(struct mt76_desc); |
| + if (mt76_queue_is_rro_ind(q)) |
| + size = q->ndesc * sizeof(struct mt76_rro_desc); |
| + |
| q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); |
| if (!q->desc) |
| return -ENOMEM; |
| |
| + if (mt76_queue_is_rro_ind(q)) { |
| + struct mt76_rro_ind *cmd; |
| + int i; |
| + |
| + q->rro_desc = (struct mt76_rro_desc *)(q->desc); |
| + q->desc = NULL; |
| + for (i = 0; i < q->ndesc; i++) { |
| + cmd = (struct mt76_rro_ind *) &q->rro_desc[i]; |
| + cmd->magic_cnt = MT_DMA_IND_CMD_MAGIC_CNT - 1; |
| + } |
| + } |
| + |
| size = q->ndesc * sizeof(*q->entry); |
| q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); |
| if (!q->entry) |
| @@ -732,8 +839,11 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, |
| if (ret) |
| return ret; |
| |
| - if (!mt76_queue_is_txfree(q)) |
| - mt76_dma_queue_reset(dev, q); |
| + if (!mtk_wed_device_active(&dev->mmio.wed) || |
| + (!mt76_queue_is_wed_txfree(q) && |
| + !(mtk_wed_get_rx_capa(&dev->mmio.wed) && |
| + q->flags & MT_QFLAG_RRO))) |
| + mt76_dma_queue_reset(dev, q, false); |
| |
| return 0; |
| } |
| @@ -768,8 +878,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) |
| |
| spin_unlock_bh(&q->lock); |
| |
| - if (((q->flags & MT_QFLAG_WED) && |
| - FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) || |
| + if (mt76_queue_is_wed_rx(q) || |
| (q->flags & MT_QFLAG_RRO)) |
| return; |
| |
| @@ -790,9 +899,13 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) |
| if (!q->ndesc) |
| return; |
| |
| + if (!q->desc) |
| + goto done; |
| + |
| for (i = 0; i < q->ndesc; i++) |
| q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
| |
| +done: |
| mt76_dma_rx_cleanup(dev, q); |
| |
| /* reset WED rx queues */ |
| @@ -839,8 +952,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) |
| bool check_ddone = false; |
| bool more; |
| |
| - if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && |
| - q->flags == MT_WED_Q_TXFREE) { |
| + if ((IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && |
| + q->flags == MT_WED_Q_TXFREE)) { |
| dma_idx = Q_READ(dev, q, dma_idx); |
| check_ddone = true; |
| } |
| @@ -1002,7 +1115,8 @@ void mt76_dma_cleanup(struct mt76_dev *dev) |
| mt76_for_each_q_rx(dev, i) { |
| struct mt76_queue *q = &dev->q_rx[i]; |
| |
| - if (mt76_queue_is_wed_rx(q)) |
| + if (mtk_wed_device_active(&dev->mmio.wed) && |
| + (q->flags & MT_QFLAG_RRO)) |
| continue; |
| |
| netif_napi_del(&dev->napi[i]); |
| @@ -1014,6 +1128,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev) |
| |
| if (mtk_wed_device_active(&dev->mmio.wed_ext)) |
| mtk_wed_device_detach(&dev->mmio.wed_ext); |
| + |
| mt76_free_pending_txwi(dev); |
| mt76_free_pending_rxwi(dev); |
| } |
| diff --git a/dma.h b/dma.h |
| index 1b090d78..48037092 100644 |
| --- a/dma.h |
| +++ b/dma.h |
| @@ -25,6 +25,13 @@ |
| #define MT_DMA_PPE_ENTRY GENMASK(30, 16) |
| #define MT_DMA_INFO_PPE_VLD BIT(31) |
| |
| +#define MT_DMA_CTL_PN_CHK_FAIL BIT(13) |
| +#define MT_DMA_CTL_VER_MASK BIT(7) |
| + |
| +#define MT_DMA_MAGIC_EN BIT(13) |
| + |
| +#define MT_DMA_IND_CMD_MAGIC_CNT 8 |
| + |
| #define MT_DMA_HDR_LEN 4 |
| #define MT_RX_INFO_LEN 4 |
| #define MT_FCE_INFO_LEN 4 |
| @@ -37,6 +44,11 @@ struct mt76_desc { |
| __le32 info; |
| } __packed __aligned(4); |
| |
| +struct mt76_rro_desc { |
| + __le32 buf0; |
| + __le32 buf1; |
| +} __packed __aligned(4); |
| + |
| enum mt76_qsel { |
| MT_QSEL_MGMT, |
| MT_QSEL_HCCA, |
| diff --git a/mac80211.c b/mac80211.c |
| index f7578308..3a5755f9 100644 |
| --- a/mac80211.c |
| +++ b/mac80211.c |
| @@ -727,6 +727,7 @@ static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q) |
| return; |
| } |
| } |
| + |
| __skb_queue_tail(&dev->rx_skb[q], skb); |
| } |
| |
| diff --git a/mt76.h b/mt76.h |
| index ee0dbdd7..e4351338 100644 |
| --- a/mt76.h |
| +++ b/mt76.h |
| @@ -48,6 +48,18 @@ |
| |
| #define MT76_TOKEN_FREE_THR 64 |
| |
| +#define MT_QFLAG_RRO_RING GENMASK(6, 5) |
| +#define MT_QFLAG_RRO_TYPE GENMASK(8, 7) |
| +#define MT_QFLAG_RRO BIT(9) |
| +#define MT_QFLAG_MAGIC BIT(10) |
| + |
| +#define __MT_RRO_Q(_type, _n) (MT_QFLAG_RRO | \ |
| + FIELD_PREP(MT_QFLAG_RRO_TYPE, _type) | \ |
| + FIELD_PREP(MT_QFLAG_RRO_RING, _n)) |
| +#define MT_RRO_Q_DATA(_n) __MT_RRO_Q(MT76_RRO_Q_DATA, _n) |
| +#define MT_RRO_Q_MSDU_PG(_n) __MT_RRO_Q(MT76_RRO_Q_MSDU_PG, _n) |
| +#define MT_RRO_Q_IND __MT_RRO_Q(MT76_RRO_Q_IND, 0) |
| + |
| #define MT_QFLAG_WED_RING GENMASK(1, 0) |
| #define MT_QFLAG_WED_TYPE GENMASK(3, 2) |
| #define MT_QFLAG_WED BIT(4) |
| @@ -82,6 +94,12 @@ enum mt76_wed_type { |
| MT76_WED_Q_RX, |
| }; |
| |
| +enum mt76_RRO_type { |
| + MT76_RRO_Q_DATA, |
| + MT76_RRO_Q_MSDU_PG, |
| + MT76_RRO_Q_IND, |
| +}; |
| + |
| struct mt76_bus_ops { |
| u32 (*rr)(struct mt76_dev *dev, u32 offset); |
| void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); |
| @@ -128,6 +146,16 @@ enum mt76_rxq_id { |
| MT_RXQ_MAIN_WA, |
| MT_RXQ_BAND2, |
| MT_RXQ_BAND2_WA, |
| + MT_RXQ_RRO_BAND0, |
| + MT_RXQ_RRO_BAND1, |
| + MT_RXQ_RRO_BAND2, |
| + MT_RXQ_MSDU_PAGE_BAND0, |
| + MT_RXQ_MSDU_PAGE_BAND1, |
| + MT_RXQ_MSDU_PAGE_BAND2, |
| + MT_RXQ_TXFREE_BAND0, |
| + MT_RXQ_TXFREE_BAND1, |
| + MT_RXQ_TXFREE_BAND2, |
| + MT_RXQ_RRO_IND, |
| __MT_RXQ_MAX |
| }; |
| |
| @@ -206,6 +234,7 @@ struct mt76_queue { |
| spinlock_t lock; |
| spinlock_t cleanup_lock; |
| struct mt76_queue_entry *entry; |
| + struct mt76_rro_desc *rro_desc; |
| struct mt76_desc *desc; |
| |
| u16 first; |
| @@ -219,8 +248,8 @@ struct mt76_queue { |
| |
| u8 buf_offset; |
| u8 hw_idx; |
| - u8 flags; |
| - |
| + u8 magic_cnt; |
| + u32 flags; |
| u32 wed_regs; |
| |
| dma_addr_t desc_dma; |
| @@ -274,7 +303,7 @@ struct mt76_queue_ops { |
| |
| void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); |
| |
| - void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q); |
| + void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q, bool skip); |
| }; |
| |
| enum mt76_phy_type { |
| @@ -369,6 +398,17 @@ struct mt76_txq { |
| bool aggr; |
| }; |
| |
| +struct mt76_rro_ind { |
| + u32 se_id : 12; |
| + u32 rsv : 4; |
| + u32 start_sn : 12; |
| + u32 ind_reason : 4; |
| + u32 ind_cnt : 13; |
| + u32 win_sz : 3; |
| + u32 rsv2 : 13; |
| + u32 magic_cnt : 3; |
| +}; |
| + |
| struct mt76_txwi_cache { |
| struct list_head list; |
| dma_addr_t dma_addr; |
| @@ -1516,12 +1556,19 @@ static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q) |
| return (q->flags & MT_QFLAG_WED) && |
| FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX; |
| } |
| -static inline bool mt76_queue_is_txfree(struct mt76_queue *q) |
| + |
| +static inline bool mt76_queue_is_wed_txfree(struct mt76_queue *q) |
| { |
| return (q->flags & MT_QFLAG_WED) && |
| FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE; |
| } |
| |
| +static inline bool mt76_queue_is_rro_ind(struct mt76_queue *q) |
| +{ |
| + return (q->flags & MT_QFLAG_RRO) && |
| + FIELD_GET(MT_QFLAG_RRO_TYPE, q->flags) == MT76_RRO_Q_IND; |
| +} |
| + |
| struct mt76_txwi_cache * |
| mt76_token_release(struct mt76_dev *dev, int token, bool *wake); |
| int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi); |
| @@ -1540,10 +1587,14 @@ static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) |
| static inline int |
| mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) |
| { |
| - int token; |
| + int token, start = 0; |
| + |
| + if (mtk_wed_device_active(&dev->mmio.wed)) |
| + start = dev->mmio.wed.wlan.nbuf; |
| |
| spin_lock_bh(&dev->token_lock); |
| - token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); |
| + token = idr_alloc(&dev->token, *ptxwi, start, start + dev->token_size, |
| + GFP_ATOMIC); |
| spin_unlock_bh(&dev->token_lock); |
| |
| return token; |
| diff --git a/mt7996/dma.c b/mt7996/dma.c |
| index 428f3d08..45ccc7b5 100644 |
| --- a/mt7996/dma.c |
| +++ b/mt7996/dma.c |
| @@ -64,6 +64,29 @@ static void mt7996_dma_config(struct mt7996_dev *dev) |
| RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2); |
| RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI); |
| |
| + if (dev->rro_support) { |
| + /* band0 */ |
| + RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0, |
| + MT7996_RXQ_RRO_BAND0); |
| + RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0, |
| + MT7996_RXQ_MSDU_PG_BAND0); |
| + RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN, |
| + MT7996_RXQ_TXFREE0); |
| + /* band1 */ |
| + RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1, |
| + MT7996_RXQ_MSDU_PG_BAND1); |
| + /* band2 */ |
| + RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2, |
| + MT7996_RXQ_RRO_BAND2); |
| + RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2, |
| + MT7996_RXQ_MSDU_PG_BAND2); |
| + RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI, |
| + MT7996_RXQ_TXFREE2); |
| + |
| + RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND, |
| + MT7996_RXQ_RRO_IND); |
| + } |
| + |
| /* data tx queue */ |
| TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0); |
| TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1); |
| @@ -102,6 +125,22 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs) |
| mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2_WA) + ofs, PREFETCH(0x2)); |
| mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10)); |
| mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2) + ofs, PREFETCH(0x10)); |
| + if (dev->rro_support) { |
| + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs, |
| + PREFETCH(0x10)); |
| + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs, |
| + PREFETCH(0x10)); |
| + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, |
| + PREFETCH(0x4)); |
| + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs, |
| + PREFETCH(0x4)); |
| + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs, |
| + PREFETCH(0x4)); |
| + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, |
| + PREFETCH(0x4)); |
| + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, |
| + PREFETCH(0x4)); |
| + } |
| #undef PREFETCH |
| |
| mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE); |
| @@ -161,6 +200,7 @@ static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset) |
| |
| void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset) |
| { |
| + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| u32 hif1_ofs = 0; |
| u32 irq_mask; |
| |
| @@ -169,11 +209,16 @@ void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset) |
| |
| /* enable wpdma tx/rx */ |
| if (!reset) { |
| - mt76_set(dev, MT_WFDMA0_GLO_CFG, |
| - MT_WFDMA0_GLO_CFG_TX_DMA_EN | |
| - MT_WFDMA0_GLO_CFG_RX_DMA_EN | |
| - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | |
| - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); |
| + if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) |
| + mt76_set(dev, MT_WFDMA0_GLO_CFG, |
| + MT_WFDMA0_GLO_CFG_TX_DMA_EN | |
| + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO); |
| + else |
| + mt76_set(dev, MT_WFDMA0_GLO_CFG, |
| + MT_WFDMA0_GLO_CFG_TX_DMA_EN | |
| + MT_WFDMA0_GLO_CFG_RX_DMA_EN | |
| + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | |
| + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); |
| |
| if (dev->hif2) |
| mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, |
| @@ -185,8 +230,8 @@ void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset) |
| |
| /* enable interrupts for TX/RX rings */ |
| irq_mask = MT_INT_MCU_CMD | |
| - MT_INT_RX_DONE_MCU | |
| - MT_INT_TX_DONE_MCU; |
| + MT_INT_RX_DONE_MCU | |
| + MT_INT_TX_DONE_MCU; |
| |
| if (mt7996_band_valid(dev, MT_BAND0)) |
| irq_mask |= MT_INT_BAND0_RX_DONE; |
| @@ -197,14 +242,14 @@ void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset) |
| if (mt7996_band_valid(dev, MT_BAND2)) |
| irq_mask |= MT_INT_BAND2_RX_DONE; |
| |
| - if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) { |
| + if (mtk_wed_device_active(wed) && wed_reset) { |
| u32 wed_irq_mask = irq_mask; |
| |
| wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; |
| |
| mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); |
| |
| - mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask); |
| + mtk_wed_device_start(wed, wed_irq_mask); |
| } |
| |
| irq_mask = reset ? MT_INT_MCU_CMD : irq_mask; |
| @@ -298,7 +343,8 @@ static int mt7996_dma_enable(struct mt7996_dev *dev, bool reset) |
| /* fix hardware limitation, pcie1's rx ring3 is not available |
| * so, redirect pcie0 rx ring3 interrupt to pcie1 |
| */ |
| - if (mtk_wed_device_active(&dev->mt76.mmio.wed) && dev->rro_support) |
| + if (mtk_wed_device_active(&dev->mt76.mmio.wed) && |
| + dev->rro_support) |
| mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs, |
| MT_WFDMA0_RX_INT_SEL_RING6); |
| else |
| @@ -311,6 +357,78 @@ static int mt7996_dma_enable(struct mt7996_dev *dev, bool reset) |
| return 0; |
| } |
| |
| +int mt7996_dma_rro_init(struct mt7996_dev *dev) |
| +{ |
| + int ret; |
| + u32 hif1_ofs = 0; |
| + u32 wed_irq_mask; |
| + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| + |
| + if (dev->hif2) |
| + hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); |
| + |
| + /* ind cmd */ |
| + dev->mt76.q_rx[MT_RXQ_RRO_IND].flags = MT_RRO_Q_IND | MT_WED_Q_RX(0); |
| + dev->mt76.q_rx[MT_RXQ_RRO_IND].flags |= MT_WED_Q_RX(0); |
| + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_IND], |
| + MT_RXQ_ID(MT_RXQ_RRO_IND), |
| + MT7996_RX_RING_SIZE, |
| + 0, MT_RXQ_RRO_IND_RING_BASE); |
| + if (ret) |
| + return ret; |
| + |
| + /* rx msdu page queue for band0 */ |
| + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags = MT_RRO_Q_MSDU_PG(0); |
| + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags |= MT_QFLAG_MAGIC; |
| + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags |= MT_WED_Q_RX(0); |
| + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0], |
| + MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0), |
| + MT7996_RX_RING_SIZE, |
| + MT7996_RX_MSDU_PAGE_SIZE, |
| + MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0)); |
| + if (ret) |
| + return ret; |
| + |
| + if (mt7996_band_valid(dev, MT_BAND1)) { |
| + /* rx msdu page queue for band1 */ |
| + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags = MT_RRO_Q_MSDU_PG(1); |
| + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags |= MT_QFLAG_MAGIC; |
| + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags |= MT_WED_Q_RX(1); |
| + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1], |
| + MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1), |
| + MT7996_RX_RING_SIZE, |
| + MT7996_RX_MSDU_PAGE_SIZE, |
| + MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1)); |
| + if (ret) |
| + return ret; |
| + } |
| + |
| + if (mt7996_band_valid(dev, MT_BAND2)) { |
| + /* rx msdu page queue for band2 */ |
| + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags = MT_RRO_Q_MSDU_PG(2); |
| + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags |= MT_QFLAG_MAGIC; |
| + dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags |= MT_WED_Q_RX(0); |
| + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2], |
| + MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2), |
| + MT7996_RX_RING_SIZE, |
| + MT7996_RX_MSDU_PAGE_SIZE, |
| + MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2)); |
| + if (ret) |
| + return ret; |
| + } |
| + |
| + wed_irq_mask = dev->mt76.mmio.irqmask | |
| + MT_INT_RRO_RX_DONE | |
| + MT_INT_TX_DONE_BAND2; |
| + |
| + mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); |
| + |
| + mtk_wed_device_start_hwrro(wed, wed_irq_mask, false); |
| + mt7996_irq_enable(dev, wed_irq_mask); |
| + |
| + return 0; |
| +} |
| + |
| int mt7996_dma_init(struct mt7996_dev *dev) |
| { |
| struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| @@ -380,6 +498,9 @@ int mt7996_dma_init(struct mt7996_dev *dev) |
| return ret; |
| |
| /* rx data queue for band0 and band1 */ |
| + if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) |
| + dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0); |
| + |
| ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], |
| MT_RXQ_ID(MT_RXQ_MAIN), |
| MT7996_RX_RING_SIZE, |
| @@ -403,9 +524,6 @@ int mt7996_dma_init(struct mt7996_dev *dev) |
| if (mt7996_band_valid(dev, MT_BAND2)) { |
| /* rx data queue for band2 */ |
| rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs; |
| - if (mtk_wed_device_active(wed)) |
| - rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2); |
| - |
| ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2], |
| MT_RXQ_ID(MT_RXQ_BAND2), |
| MT7996_RX_RING_SIZE, |
| @@ -429,11 +547,12 @@ int mt7996_dma_init(struct mt7996_dev *dev) |
| return ret; |
| } |
| |
| - |
| - if (dev->rro_support) { |
| + if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) && |
| + dev->rro_support) { |
| /* rx rro data queue for band0 */ |
| dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags = MT_RRO_Q_DATA(0); |
| dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags |= MT_QFLAG_MAGIC; |
| + dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags |= MT_WED_Q_RX(0); |
| ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0], |
| MT_RXQ_ID(MT_RXQ_RRO_BAND0), |
| MT7996_RX_RING_SIZE, |
| @@ -443,8 +562,7 @@ int mt7996_dma_init(struct mt7996_dev *dev) |
| return ret; |
| |
| /* tx free notify event from WA for band0 */ |
| - if (mtk_wed_device_active(wed)) |
| - dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE; |
| + dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE; |
| ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0], |
| MT_RXQ_ID(MT_RXQ_TXFREE_BAND0), |
| MT7996_RX_MCU_RING_SIZE, |
| @@ -457,6 +575,7 @@ int mt7996_dma_init(struct mt7996_dev *dev) |
| /* rx rro data queue for band2 */ |
| dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags = MT_RRO_Q_DATA(1); |
| dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags |= MT_QFLAG_MAGIC; |
| + dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags |= MT_WED_Q_RX(1); |
| ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2], |
| MT_RXQ_ID(MT_RXQ_RRO_BAND2), |
| MT7996_RX_RING_SIZE, |
| @@ -534,18 +653,18 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force) |
| |
| /* reset hw queues */ |
| for (i = 0; i < __MT_TXQ_MAX; i++) { |
| - mt76_queue_reset(dev, dev->mphy.q_tx[i]); |
| + mt76_queue_reset(dev, dev->mphy.q_tx[i], false); |
| if (phy2) |
| - mt76_queue_reset(dev, phy2->q_tx[i]); |
| + mt76_queue_reset(dev, phy2->q_tx[i], false); |
| if (phy3) |
| - mt76_queue_reset(dev, phy3->q_tx[i]); |
| + mt76_queue_reset(dev, phy3->q_tx[i], false); |
| } |
| |
| for (i = 0; i < __MT_MCUQ_MAX; i++) |
| - mt76_queue_reset(dev, dev->mt76.q_mcu[i]); |
| + mt76_queue_reset(dev, dev->mt76.q_mcu[i], false); |
| |
| mt76_for_each_q_rx(&dev->mt76, i) { |
| - mt76_queue_reset(dev, &dev->mt76.q_rx[i]); |
| + mt76_queue_reset(dev, &dev->mt76.q_rx[i], false); |
| } |
| |
| mt76_tx_status_check(&dev->mt76, true); |
| diff --git a/mt7996/init.c b/mt7996/init.c |
| index 6cfbc50d..d70dcf9f 100644 |
| --- a/mt7996/init.c |
| +++ b/mt7996/init.c |
| @@ -496,8 +496,13 @@ void mt7996_mac_init(struct mt7996_dev *dev) |
| |
| /* rro module init */ |
| mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2); |
| - mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3); |
| - mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1); |
| + if (dev->rro_support) { |
| + mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 1); |
| + mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 0); |
| + } else { |
| + mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3); |
| + mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1); |
| + } |
| |
| mt7996_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), |
| MCU_WA_PARAM_HW_PATH_HIF_VER, |
| @@ -650,6 +655,114 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev) |
| msleep(20); |
| } |
| |
| +static int mt7996_rro_init(struct mt7996_dev *dev) |
| +{ |
| + struct mt7996_rro_addr *ptr; |
| + struct mt7996_rro_cfg *rro = &dev->rro; |
| + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| + u32 size, val = 0, reg = MT_RRO_ADDR_ELEM_SEG_ADDR0; |
| + int i, j; |
| + void *buf; |
| + |
| + for (i = 0; i < MT7996_RRO_BA_BITMAP_CR_CNT; i++) { |
| + buf = dmam_alloc_coherent(dev->mt76.dma_dev, |
| + MT7996_BA_BITMAP_SZ_PER_CR, |
| + &rro->ba_bitmap_cache_pa[i], |
| + GFP_KERNEL); |
| + if (!buf) |
| + return -ENOMEM; |
| + |
| + rro->ba_bitmap_cache_va[i] = buf; |
| + } |
| + |
| + rro->win_sz = MT7996_RRO_WIN_SIZE_MAX; |
| + for (i = 0; i < MT7996_RRO_ADDR_ELEM_CR_CNT; i++) { |
| + size = MT7996_RRO_SESSION_PER_CR * |
| + rro->win_sz * sizeof(struct mt7996_rro_addr); |
| + |
| + buf = dmam_alloc_coherent(dev->mt76.dma_dev, size, |
| + &rro->addr_elem_alloc_pa[i], |
| + GFP_KERNEL); |
| + if (!buf) |
| + return -ENOMEM; |
| + rro->addr_elem_alloc_va[i] = buf; |
| + |
| + memset(rro->addr_elem_alloc_va[i], 0, size); |
| + |
| + ptr = rro->addr_elem_alloc_va[i]; |
| + for (j = 0; j < MT7996_RRO_SESSION_PER_CR * rro->win_sz; j++, ptr++) |
| + ptr->signature = 0xff; |
| + |
| + wed->wlan.ind_cmd.addr_elem_phys[i] = rro->addr_elem_alloc_pa[i]; |
| + } |
| + |
| + rro->particular_se_id = MT7996_RRO_SESSION_MAX; |
| + size = rro->win_sz * sizeof(struct mt7996_rro_addr); |
| + buf = dmam_alloc_coherent(dev->mt76.dma_dev, size, |
| + &rro->particular_session_pa, |
| + GFP_KERNEL); |
| + if (!buf) |
| + return -ENOMEM; |
| + |
| + rro->particular_session_va = buf; |
| + ptr = rro->particular_session_va; |
| + for (j = 0; j < rro->win_sz; j++, ptr++) |
| + ptr->signature = 0xff; |
| + |
| + INIT_LIST_HEAD(&rro->pg_addr_cache); |
| + for (i = 0; i < MT7996_RRO_MSDU_PG_HASH_SIZE; i++) |
| + INIT_LIST_HEAD(&rro->pg_hash_head[i]); |
| + |
| + /* rro hw init */ |
| + /* TODO: remove line after WM has set */ |
| + mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK); |
| + |
| + /* setup BA bitmap cache address */ |
| + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0, |
| + rro->ba_bitmap_cache_pa[0]); |
| + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0); |
| + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0, |
| + rro->ba_bitmap_cache_pa[1]); |
| + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0); |
| + |
| + /* setup Address element address */ |
| + for (i = 0; i < MT7996_RRO_ADDR_ELEM_CR_CNT; i++) { |
| + mt76_wr(dev, reg, rro->addr_elem_alloc_pa[i] >> 4); |
| + reg += 4; |
| + } |
| + |
| + /* setup Address element address - separate address segment mode */ |
| + mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1, |
| + MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE); |
| + |
| + wed->wlan.ind_cmd.win_size = ffs(rro->win_sz) - 6; |
| + wed->wlan.ind_cmd.particular_sid = rro->particular_se_id; |
| + wed->wlan.ind_cmd.particular_se_phys = rro->particular_session_pa; |
| + wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_CR_CNT; |
| + wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL; |
| + |
| + mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00); |
| + mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1, |
| + MT_RRO_IND_CMD_SIGNATURE_BASE1_EN); |
| + |
| + /* particular session configure */ |
| + /* use max session idx + 1 as particular session id */ |
| + mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, |
| + rro->particular_session_pa); |
| + |
| + val = FIELD_PREP(MT_RRO_PARTICULAR_SID, |
| + MT7996_RRO_SESSION_MAX); |
| + val |= MT_RRO_PARTICULAR_CONFG_EN; |
| + mt76_wr(dev, MT_RRO_PARTICULAR_CFG1, val); |
| + |
| + /* interrupt enable */ |
| + mt76_wr(dev, MT_RRO_HOST_INT_ENA, |
| + MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA); |
| + |
| + /* rro ind cmd queue init */ |
| + return mt7996_dma_rro_init(dev); |
| +} |
| + |
| static int mt7996_init_hardware(struct mt7996_dev *dev) |
| { |
| int ret, idx; |
| @@ -677,6 +790,13 @@ static int mt7996_init_hardware(struct mt7996_dev *dev) |
| if (ret) |
| return ret; |
| |
| + if (mtk_wed_device_active(&dev->mt76.mmio.wed) && |
| + dev->rro_support) { |
| + ret = mt7996_rro_init(dev); |
| + if (ret) |
| + return ret; |
| + } |
| + |
| ret = mt7996_eeprom_init(dev); |
| if (ret < 0) |
| return ret; |
| diff --git a/mt7996/mac.c b/mt7996/mac.c |
| index fc2d9269..4fbbc077 100644 |
| --- a/mt7996/mac.c |
| +++ b/mt7996/mac.c |
| @@ -614,8 +614,37 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev, |
| return 0; |
| } |
| |
| +static void |
| +mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q, |
| + struct mt7996_sta *msta, struct sk_buff *skb, |
| + u32 info) |
| +{ |
| + struct ieee80211_vif *vif; |
| + struct wireless_dev *wdev; |
| + |
| + if (!msta || !msta->vif) |
| + return; |
| + |
| + if (!mt76_queue_is_wed_rx(q)) |
| + return; |
| + |
| + if (!(info & MT_DMA_INFO_PPE_VLD)) |
| + return; |
| + |
| + vif = container_of((void *)msta->vif, struct ieee80211_vif, |
| + drv_priv); |
| + wdev = ieee80211_vif_to_wdev(vif); |
| + skb->dev = wdev->netdev; |
| + |
| + mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb, |
| + FIELD_GET(MT_DMA_PPE_CPU_REASON, info), |
| + FIELD_GET(MT_DMA_PPE_ENTRY, info)); |
| +} |
| + |
| + |
| static int |
| -mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) |
| +mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, |
| + struct sk_buff *skb, u32 *info) |
| { |
| struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; |
| struct mt76_phy *mphy = &dev->mt76.phy; |
| @@ -640,7 +669,10 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) |
| u16 seq_ctrl = 0; |
| __le16 fc = 0; |
| int idx; |
| + u8 hw_aggr = false; |
| + struct mt7996_sta *msta = NULL; |
| |
| + hw_aggr = status->aggr; |
| memset(status, 0, sizeof(*status)); |
| |
| band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1); |
| @@ -667,8 +699,6 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) |
| status->wcid = mt7996_rx_get_wcid(dev, idx, unicast); |
| |
| if (status->wcid) { |
| - struct mt7996_sta *msta; |
| - |
| msta = container_of(status->wcid, struct mt7996_sta, wcid); |
| spin_lock_bh(&dev->sta_poll_lock); |
| if (list_empty(&msta->poll_list)) |
| @@ -871,12 +901,14 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) |
| #endif |
| } else { |
| status->flag |= RX_FLAG_8023; |
| + mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb, |
| + *info); |
| } |
| |
| if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) |
| mt7996_mac_decode_he_radiotap(skb, rxv, mode); |
| |
| - if (!status->wcid || !ieee80211_is_data_qos(fc)) |
| + if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr) |
| return 0; |
| |
| status->aggr = unicast && |
| @@ -1604,7 +1636,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| dev_kfree_skb(skb); |
| break; |
| case PKT_TYPE_NORMAL: |
| - if (!mt7996_mac_fill_rx(dev, skb)) { |
| + if (!mt7996_mac_fill_rx(dev, q, skb, info)) { |
| mt76_rx(&dev->mt76, q, skb); |
| return; |
| } |
| diff --git a/mt7996/mcu.c b/mt7996/mcu.c |
| index 59f22f6d..1891c0d7 100644 |
| --- a/mt7996/mcu.c |
| +++ b/mt7996/mcu.c |
| @@ -949,7 +949,7 @@ int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif) |
| static int |
| mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, |
| struct ieee80211_ampdu_params *params, |
| - bool enable, bool tx) |
| + bool enable, bool tx, bool rro_enable) |
| { |
| struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv; |
| struct sta_rec_ba_uni *ba; |
| @@ -970,6 +970,8 @@ mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, |
| ba->ba_en = enable << params->tid; |
| ba->amsdu = params->amsdu; |
| ba->tid = params->tid; |
| + if (rro_enable && !tx && enable) |
| + ba->ba_rdd_rro = true; |
| |
| return mt76_mcu_skb_send_msg(dev, skb, |
| MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); |
| @@ -987,7 +989,7 @@ int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev, |
| msta->wcid.amsdu = false; |
| |
| return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, |
| - enable, true); |
| + enable, true, dev->rro_support); |
| } |
| |
| int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev, |
| @@ -998,7 +1000,7 @@ int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev, |
| struct mt7996_vif *mvif = msta->vif; |
| |
| return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, |
| - enable, false); |
| + enable, false, dev->rro_support); |
| } |
| |
| static void |
| diff --git a/mt7996/mmio.c b/mt7996/mmio.c |
| index b9e47e73..9960dca7 100644 |
| --- a/mt7996/mmio.c |
| +++ b/mt7996/mmio.c |
| @@ -346,9 +346,15 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr, |
| wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_TRI) - 1; |
| } |
| |
| + wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + hif1_ofs + MT_WFDMA0_GLO_CFG; |
| + wed->wlan.wpdma_rx = wed->wlan.phy_base + hif1_ofs + |
| + MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) + |
| + MT7996_RXQ_BAND0 * MT_RING_SIZE; |
| + |
| wed->wlan.chip_id = 0x7991; |
| wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1; |
| } else { |
| + wed->wlan.hwrro = dev->rro_support; /* default on */ |
| wed->wlan.wpdma_int = wed->wlan.phy_base + MT_INT_SOURCE_CSR; |
| wed->wlan.wpdma_mask = wed->wlan.phy_base + MT_INT_MASK_CSR; |
| wed->wlan.wpdma_tx = wed->wlan.phy_base + MT_TXQ_RING_BASE(0) + |
| @@ -360,13 +366,33 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr, |
| MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) + |
| MT7996_RXQ_BAND0 * MT_RING_SIZE; |
| |
| + wed->wlan.wpdma_rx_rro[0] = wed->wlan.phy_base + |
| + MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND0) + |
| + MT7996_RXQ_RRO_BAND0 * MT_RING_SIZE; |
| + wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs + |
| + MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND2) + |
| + MT7996_RXQ_RRO_BAND2 * MT_RING_SIZE; |
| + wed->wlan.wpdma_rx_pg = wed->wlan.phy_base + |
| + MT_RXQ_RING_BASE(MT7996_RXQ_MSDU_PG_BAND0) + |
| + MT7996_RXQ_MSDU_PG_BAND0 * MT_RING_SIZE; |
| + |
| wed->wlan.rx_nbuf = 65536; |
| wed->wlan.rx_npkt = 24576; |
| + if (dev->hif2) |
| + wed->wlan.rx_npkt += 8192; |
| + |
| wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE); |
| |
| wed->wlan.rx_tbit[0] = ffs(MT_INT_RX_DONE_BAND0) - 1; |
| wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1; |
| |
| + wed->wlan.rro_rx_tbit[0] = ffs(MT_INT_RX_DONE_RRO_BAND0) - 1; |
| + wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND2) - 1; |
| + |
| + wed->wlan.rx_pg_tbit[0] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND0) - 1; |
| + wed->wlan.rx_pg_tbit[1] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND1) - 1; |
| + wed->wlan.rx_pg_tbit[2] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND2) - 1; |
| + |
| wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND0) - 1; |
| wed->wlan.tx_tbit[1] = ffs(MT_INT_TX_DONE_BAND1) - 1; |
| if (dev->rro_support) { |
| @@ -378,6 +404,8 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr, |
| wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) + |
| MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE; |
| } |
| + |
| + dev->mt76.rx_token_size += wed->wlan.rx_npkt; |
| } |
| |
| wed->wlan.nbuf = 16384; |
| @@ -394,8 +422,6 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr, |
| wed->wlan.release_rx_buf = mt7996_mmio_wed_release_rx_buf; |
| wed->wlan.update_wo_rx_stats = NULL; |
| |
| - dev->mt76.rx_token_size += wed->wlan.rx_npkt; |
| - |
| if (mtk_wed_device_attach(wed)) |
| return 0; |
| |
| @@ -557,10 +583,9 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t) |
| irqreturn_t mt7996_irq_handler(int irq, void *dev_instance) |
| { |
| struct mt7996_dev *dev = dev_instance; |
| - struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| |
| - if (mtk_wed_device_active(wed)) |
| - mtk_wed_device_irq_set_mask(wed, 0); |
| + if (mtk_wed_device_active(&dev->mt76.mmio.wed)) |
| + mtk_wed_device_irq_set_mask(&dev->mt76.mmio.wed, 0); |
| else |
| mt76_wr(dev, MT_INT_MASK_CSR, 0); |
| |
| @@ -592,6 +617,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev, |
| SURVEY_INFO_TIME_RX | |
| SURVEY_INFO_TIME_BSS_RX, |
| .token_size = MT7996_TOKEN_SIZE, |
| + .rx_token_size = MT7996_RX_TOKEN_SIZE, |
| .tx_prepare_skb = mt7996_tx_prepare_skb, |
| .tx_complete_skb = mt76_connac_tx_complete_skb, |
| .rx_skb = mt7996_queue_rx_skb, |
| diff --git a/mt7996/mt7996.h b/mt7996/mt7996.h |
| index 43f20da4..836c7db7 100644 |
| --- a/mt7996/mt7996.h |
| +++ b/mt7996/mt7996.h |
| @@ -39,6 +39,7 @@ |
| #define MT7996_EEPROM_SIZE 7680 |
| #define MT7996_EEPROM_BLOCK_SIZE 16 |
| #define MT7996_TOKEN_SIZE 16384 |
| +#define MT7996_RX_TOKEN_SIZE 16384 |
| |
| #define MT7996_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ |
| #define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ |
| @@ -63,6 +64,24 @@ |
| #define MT7996_SKU_RATE_NUM 417 |
| #define MT7996_SKU_PATH_NUM 494 |
| |
| +#define MT7996_RRO_MSDU_PG_HASH_SIZE 127 |
| +#define MT7996_RRO_SESSION_MAX 1024 |
| +#define MT7996_RRO_WIN_SIZE_MAX 1024 |
| +#define MT7996_RRO_ADDR_ELEM_CR_CNT 128 |
| +#define MT7996_RRO_BA_BITMAP_CR_CNT 2 |
| +#define MT7996_RRO_SESSION_PER_CR (MT7996_RRO_SESSION_MAX / \ |
| + MT7996_RRO_ADDR_ELEM_CR_CNT) |
| +#define MT7996_BA_BITMAP_SZ_PER_SESSION 128 |
| +#define MT7996_BA_BITMAP_SZ_PER_CR ((MT7996_RRO_SESSION_MAX * \ |
| + MT7996_BA_BITMAP_SZ_PER_SESSION) / \ |
| + MT7996_RRO_BA_BITMAP_CR_CNT) |
| +#define MT7996_SKB_TRUESIZE(x) ((x) + \ |
| + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
| +#define MT7996_RX_BUF_SIZE MT7996_SKB_TRUESIZE(1800) |
| +#define MT7996_RX_MSDU_PAGE_SIZE MT7996_SKB_TRUESIZE(128) |
| + |
| +#define MT7996_WED_RX_TOKEN_SIZE 32768 |
| + |
| struct mt7996_vif; |
| struct mt7996_sta; |
| struct mt7996_dfs_pulse; |
| @@ -102,6 +121,16 @@ enum mt7996_rxq_id { |
| MT7996_RXQ_BAND0 = 4, |
| MT7996_RXQ_BAND1 = 4,/* unused */ |
| MT7996_RXQ_BAND2 = 5, |
| + MT7996_RXQ_RRO_BAND0 = 8, |
| + MT7996_RXQ_RRO_BAND1 = 8,/* unused */ |
| + MT7996_RXQ_RRO_BAND2 = 6, |
| + MT7996_RXQ_MSDU_PG_BAND0 = 10, |
| + MT7996_RXQ_MSDU_PG_BAND1 = 11, |
| + MT7996_RXQ_MSDU_PG_BAND2 = 12, |
| + MT7996_RXQ_TXFREE0 = 9, |
| + MT7996_RXQ_TXFREE1 = 9, |
| + MT7996_RXQ_TXFREE2 = 7, |
| + MT7996_RXQ_RRO_IND = 0, |
| }; |
| |
| struct mt7996_twt_flow { |
| @@ -272,6 +301,31 @@ struct mt7996_air_monitor_ctrl { |
| }; |
| #endif |
| |
| +struct mt7996_rro_addr { |
| + u32 head_pkt_l; |
| + u32 head_pkt_h : 4; |
| + u32 seg_cnt : 11; |
| + u32 out_of_range: 1; |
| + u32 rsv : 8; |
| + u32 signature : 8; |
| +}; |
| + |
| +struct mt7996_rro_cfg { |
| + u32 ind_signature; |
| + void *ba_bitmap_cache_va[MT7996_RRO_BA_BITMAP_CR_CNT]; |
| + void *addr_elem_alloc_va[MT7996_RRO_ADDR_ELEM_CR_CNT]; |
| + void *particular_session_va; |
| + u32 particular_se_id; |
| + dma_addr_t ba_bitmap_cache_pa[MT7996_RRO_BA_BITMAP_CR_CNT]; |
| + dma_addr_t addr_elem_alloc_pa[MT7996_RRO_ADDR_ELEM_CR_CNT]; |
| + dma_addr_t particular_session_pa; |
| + u16 win_sz; |
| + |
| + spinlock_t lock; |
| + struct list_head pg_addr_cache; |
| + struct list_head pg_hash_head[MT7996_RRO_MSDU_PG_HASH_SIZE]; |
| +}; |
| + |
| struct mt7996_phy { |
| struct mt76_phy *mt76; |
| struct mt7996_dev *dev; |
| @@ -390,6 +444,9 @@ struct mt7996_dev { |
| bool flash_mode:1; |
| bool has_eht:1; |
| |
| + bool rro_support:1; |
| + struct mt7996_rro_cfg rro; |
| + |
| bool testmode_enable; |
| bool bin_file_mode; |
| u8 eeprom_mode; |
| @@ -709,6 +766,7 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, |
| struct ieee80211_sta *sta, |
| struct mt76_tx_info *tx_info); |
| void mt7996_tx_token_put(struct mt7996_dev *dev); |
| +int mt7996_dma_rro_init(struct mt7996_dev *dev); |
| void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| struct sk_buff *skb, u32 *info); |
| bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len); |
| diff --git a/mt7996/regs.h b/mt7996/regs.h |
| index 5ed7bcca..47fa965f 100644 |
| --- a/mt7996/regs.h |
| +++ b/mt7996/regs.h |
| @@ -39,6 +39,40 @@ enum base_rev { |
| |
| #define __BASE(_id, _band) (dev->reg.base[(_id)].band_base[(_band)]) |
| |
| + |
| +/* RRO TOP */ |
| +#define MT_RRO_TOP_BASE 0xA000 |
| +#define MT_RRO_TOP(ofs) (MT_RRO_TOP_BASE + (ofs)) |
| + |
| +#define MT_RRO_BA_BITMAP_BASE0 MT_RRO_TOP(0x8) |
| +#define MT_RRO_BA_BITMAP_BASE1 MT_RRO_TOP(0xC) |
| +#define WF_RRO_AXI_MST_CFG MT_RRO_TOP(0xB8) |
| +#define WF_RRO_AXI_MST_CFG_DIDX_OK BIT(12) |
| +#define MT_RRO_ADDR_ARRAY_BASE1 MT_RRO_TOP(0x34) |
| +#define MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE BIT(31) |
| + |
| +#define MT_RRO_IND_CMD_SIGNATURE_BASE0 MT_RRO_TOP(0x38) |
| +#define MT_RRO_IND_CMD_SIGNATURE_BASE1 MT_RRO_TOP(0x3C) |
| +#define MT_RRO_IND_CMD_0_CTRL0 MT_RRO_TOP(0x40) |
| +#define MT_RRO_IND_CMD_SIGNATURE_BASE1_EN BIT(31) |
| + |
| +#define MT_RRO_PARTICULAR_CFG0 MT_RRO_TOP(0x5C) |
| +#define MT_RRO_PARTICULAR_CFG1 MT_RRO_TOP(0x60) |
| +#define MT_RRO_PARTICULAR_CONFG_EN BIT(31) |
| +#define MT_RRO_PARTICULAR_SID GENMASK(30, 16) |
| + |
| +#define MT_RRO_BA_BITMAP_BASE_EXT0 MT_RRO_TOP(0x70) |
| +#define MT_RRO_BA_BITMAP_BASE_EXT1 MT_RRO_TOP(0x74) |
| +#define MT_RRO_HOST_INT_ENA MT_RRO_TOP(0x204) |
| +#define MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA BIT(0) |
| + |
| +#define MT_RRO_ADDR_ELEM_SEG_ADDR0 MT_RRO_TOP(0x400) |
| + |
| +#define MT_RRO_ACK_SN_CTRL MT_RRO_TOP(0x50) |
| +#define MT_RRO_ACK_SN_CTRL_SN_MASK GENMASK(27, 16) |
| +#define MT_RRO_ACK_SN_CTRL_SESSION_MASK GENMASK(11, 0) |
| + |
| + |
| #define MT_MCU_INT_EVENT 0x2108 |
| #define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0) |
| #define MT_MCU_INT_EVENT_DMA_INIT BIT(1) |
| @@ -400,6 +434,7 @@ enum base_rev { |
| #define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300) |
| #define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300) |
| #define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500) |
| +#define MT_RXQ_RRO_IND_RING_BASE MT_RRO_TOP(0x40) |
| |
| #define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \ |
| MT_MCUQ_ID(q) * 0x4) |
| @@ -427,6 +462,15 @@ enum base_rev { |
| #define MT_INT_MCU_CMD BIT(29) |
| #define MT_INT_RX_TXFREE_EXT BIT(26) |
| |
| +#define MT_INT_RX_DONE_RRO_BAND0 BIT(16) |
| +#define MT_INT_RX_DONE_RRO_BAND1 BIT(16) |
| +#define MT_INT_RX_DONE_RRO_BAND2 BIT(14) |
| +#define MT_INT_RX_DONE_RRO_IND BIT(11) |
| +#define MT_INT_RX_DONE_MSDU_PG_BAND0 BIT(18) |
| +#define MT_INT_RX_DONE_MSDU_PG_BAND1 BIT(19) |
| +#define MT_INT_RX_DONE_MSDU_PG_BAND2 BIT(23) |
| + |
| + |
| #define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)]) |
| #define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)]) |
| |
| @@ -434,20 +478,31 @@ enum base_rev { |
| MT_INT_RX(MT_RXQ_MCU_WA)) |
| |
| #define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \ |
| - MT_INT_RX(MT_RXQ_MAIN_WA)) |
| + MT_INT_RX(MT_RXQ_MAIN_WA) | \ |
| + MT_INT_RX(MT_RXQ_TXFREE_BAND0)) |
| |
| #define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_BAND1) | \ |
| MT_INT_RX(MT_RXQ_BAND1_WA) | \ |
| - MT_INT_RX(MT_RXQ_MAIN_WA)) |
| + MT_INT_RX(MT_RXQ_MAIN_WA) | \ |
| + MT_INT_RX(MT_RXQ_TXFREE_BAND0)) |
| |
| #define MT_INT_BAND2_RX_DONE (MT_INT_RX(MT_RXQ_BAND2) | \ |
| MT_INT_RX(MT_RXQ_BAND2_WA) | \ |
| - MT_INT_RX(MT_RXQ_MAIN_WA)) |
| + MT_INT_RX(MT_RXQ_MAIN_WA) | \ |
| + MT_INT_RX(MT_RXQ_TXFREE_BAND0)) |
| + |
| +#define MT_INT_RRO_RX_DONE (MT_INT_RX(MT_RXQ_RRO_BAND0) | \ |
| + MT_INT_RX(MT_RXQ_RRO_BAND1) | \ |
| + MT_INT_RX(MT_RXQ_RRO_BAND2) | \ |
| + MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND0) | \ |
| + MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND1) | \ |
| + MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND2)) |
| |
| #define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \ |
| MT_INT_BAND0_RX_DONE | \ |
| MT_INT_BAND1_RX_DONE | \ |
| - MT_INT_BAND2_RX_DONE) |
| + MT_INT_BAND2_RX_DONE | \ |
| + MT_INT_RRO_RX_DONE) |
| |
| #define MT_INT_TX_DONE_FWDL BIT(26) |
| #define MT_INT_TX_DONE_MCU_WM BIT(27) |
| -- |
| 2.18.0 |
| |