| From fd9a307422024d5c6e953634129cc2b61425e93f Mon Sep 17 00:00:00 2001 |
| From: Ryder Lee <ryder.lee@mediatek.com> |
| Date: Mon, 14 Nov 2022 10:17:47 +0800 |
| Subject: [PATCH] mt76: sync to master lastest commit |
| |
| wifi: mt76: mt7915: fix uninitialized irq_mask |
| wifi: mt76: mt7921: introduce remain_on_channel support |
| wifi: mt76: connac: rework macros for unified command |
| wifi: mt76: connac: update struct sta_rec_phy |
| wifi: mt76: connac: rework fields for larger bandwidth support in sta_rec_bf |
| wifi: mt76: connac: add more unified command IDs |
| wifi: mt76: connac: introduce unified event table |
| wifi: mt76: connac: add more bss info command tags |
| wifi: mt76: connac: add more starec command tags |
| wifi: mt76: connac: introduce helper for mt7996 chipset |
| wifi: mt76: mt7921: fix wrong power after multiple SAR set |
| wifi: mt76: mt7915: add missing MODULE_PARM_DESC |
| wifi: mt76: mt7915: add support to configure spatial reuse parameter set |
| wifi: mt76: introduce rxwi and rx token utility routines |
| wifi: mt76: add WED RX support to mt76_dma_{add,get}_buf |
| wifi: mt76: add WED RX support to mt76_dma_rx_fill |
| wifi: mt76: add WED RX support to dma queue alloc |
| wifi: mt76: add info parameter to rx_skb signature |
| wifi: mt76: connac: introduce mt76_connac_mcu_sta_wed_update utility routine |
| wifi: mt76: mt7915: enable WED RX support |
| wifi: mt76: mt7915: enable WED RX stats |
| wifi: mt76: mt7915: add basedband Txpower info into debugfs |
| wifi: mt76: mt7915: enable .sta_set_txpwr support |
| wifi: mt76: mt7915: fix band_idx usage |
| --- |
| dma.c | 244 +++++++++++++++++++++++++------- |
| dma.h | 8 ++ |
| mac80211.c | 10 +- |
| mt76.h | 26 +++- |
| mt7603/dma.c | 2 +- |
| mt7603/mt7603.h | 2 +- |
| mt7615/mac.c | 2 +- |
| mt7615/mt7615.h | 2 +- |
| mt76_connac.h | 5 + |
| mt76_connac_mcu.c | 25 +++- |
| mt76_connac_mcu.h | 70 ++++++++- |
| mt76x02.h | 2 +- |
| mt76x02_txrx.c | 2 +- |
| mt7915/coredump.c | 1 + |
| mt7915/debugfs.c | 29 ++-- |
| mt7915/dma.c | 26 +++- |
| mt7915/init.c | 3 + |
| mt7915/mac.c | 60 ++++++-- |
| mt7915/main.c | 84 ++++++++--- |
| mt7915/mcu.c | 354 ++++++++++++++++++++++++++++++++++++++++------ |
| mt7915/mcu.h | 30 ++++ |
| mt7915/mmio.c | 320 +++++++++++++++++++++++++++++------------ |
| mt7915/mt7915.h | 13 +- |
| mt7915/regs.h | 11 ++ |
| mt7915/testmode.c | 18 +-- |
| mt7921/init.c | 64 +++++++++ |
| mt7921/mac.c | 2 +- |
| mt7921/main.c | 118 ++++++++++++++++ |
| mt7921/mcu.c | 24 ++++ |
| mt7921/mt7921.h | 52 ++++++- |
| mt7921/pci.c | 33 ++++- |
| mt7921/sdio.c | 23 ++- |
| mt7921/usb.c | 12 +- |
| sdio.c | 2 +- |
| tx.c | 30 ++++ |
| usb.c | 2 +- |
| 36 files changed, 1438 insertions(+), 273 deletions(-) |
| |
| diff --git a/dma.c b/dma.c |
| index 4b181305..ae22b959 100644 |
| --- a/dma.c |
| +++ b/dma.c |
| @@ -59,6 +59,19 @@ mt76_alloc_txwi(struct mt76_dev *dev) |
| return t; |
| } |
| |
| +static struct mt76_txwi_cache * |
| +mt76_alloc_rxwi(struct mt76_dev *dev) |
| +{ |
| + struct mt76_txwi_cache *t; |
| + |
| + t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC); |
| + if (!t) |
| + return NULL; |
| + |
| + t->ptr = NULL; |
| + return t; |
| +} |
| + |
| static struct mt76_txwi_cache * |
| __mt76_get_txwi(struct mt76_dev *dev) |
| { |
| @@ -75,6 +88,22 @@ __mt76_get_txwi(struct mt76_dev *dev) |
| return t; |
| } |
| |
| +static struct mt76_txwi_cache * |
| +__mt76_get_rxwi(struct mt76_dev *dev) |
| +{ |
| + struct mt76_txwi_cache *t = NULL; |
| + |
| + spin_lock(&dev->wed_lock); |
| + if (!list_empty(&dev->rxwi_cache)) { |
| + t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, |
| + list); |
| + list_del(&t->list); |
| + } |
| + spin_unlock(&dev->wed_lock); |
| + |
| + return t; |
| +} |
| + |
| static struct mt76_txwi_cache * |
| mt76_get_txwi(struct mt76_dev *dev) |
| { |
| @@ -86,6 +115,18 @@ mt76_get_txwi(struct mt76_dev *dev) |
| return mt76_alloc_txwi(dev); |
| } |
| |
| +struct mt76_txwi_cache * |
| +mt76_get_rxwi(struct mt76_dev *dev) |
| +{ |
| + struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); |
| + |
| + if (t) |
| + return t; |
| + |
| + return mt76_alloc_rxwi(dev); |
| +} |
| +EXPORT_SYMBOL_GPL(mt76_get_rxwi); |
| + |
| void |
| mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) |
| { |
| @@ -98,6 +139,18 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) |
| } |
| EXPORT_SYMBOL_GPL(mt76_put_txwi); |
| |
| +void |
| +mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) |
| +{ |
| + if (!t) |
| + return; |
| + |
| + spin_lock(&dev->wed_lock); |
| + list_add(&t->list, &dev->rxwi_cache); |
| + spin_unlock(&dev->wed_lock); |
| +} |
| +EXPORT_SYMBOL_GPL(mt76_put_rxwi); |
| + |
| static void |
| mt76_free_pending_txwi(struct mt76_dev *dev) |
| { |
| @@ -112,6 +165,20 @@ mt76_free_pending_txwi(struct mt76_dev *dev) |
| local_bh_enable(); |
| } |
| |
| +static void |
| +mt76_free_pending_rxwi(struct mt76_dev *dev) |
| +{ |
| + struct mt76_txwi_cache *t; |
| + |
| + local_bh_disable(); |
| + while ((t = __mt76_get_rxwi(dev)) != NULL) { |
| + if (t->ptr) |
| + skb_free_frag(t->ptr); |
| + kfree(t); |
| + } |
| + local_bh_enable(); |
| +} |
| + |
| static void |
| mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) |
| { |
| @@ -148,11 +215,6 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| u32 ctrl; |
| int i, idx = -1; |
| |
| - if (txwi) { |
| - q->entry[q->head].txwi = DMA_DUMMY_DATA; |
| - q->entry[q->head].skip_buf0 = true; |
| - } |
| - |
| for (i = 0; i < nbufs; i += 2, buf += 2) { |
| u32 buf0 = buf[0].addr, buf1 = 0; |
| |
| @@ -162,28 +224,48 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| desc = &q->desc[idx]; |
| entry = &q->entry[idx]; |
| |
| - if (buf[0].skip_unmap) |
| - entry->skip_buf0 = true; |
| - entry->skip_buf1 = i == nbufs - 1; |
| - |
| - entry->dma_addr[0] = buf[0].addr; |
| - entry->dma_len[0] = buf[0].len; |
| - |
| - ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); |
| - if (i < nbufs - 1) { |
| - entry->dma_addr[1] = buf[1].addr; |
| - entry->dma_len[1] = buf[1].len; |
| - buf1 = buf[1].addr; |
| - ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); |
| - if (buf[1].skip_unmap) |
| - entry->skip_buf1 = true; |
| + if ((q->flags & MT_QFLAG_WED) && |
| + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { |
| + struct mt76_txwi_cache *t = txwi; |
| + int rx_token; |
| + |
| + if (!t) |
| + return -ENOMEM; |
| + |
| + rx_token = mt76_rx_token_consume(dev, (void *)skb, t, |
| + buf[0].addr); |
| + buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token); |
| + ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) | |
| + MT_DMA_CTL_TO_HOST; |
| + } else { |
| + if (txwi) { |
| + q->entry[q->head].txwi = DMA_DUMMY_DATA; |
| + q->entry[q->head].skip_buf0 = true; |
| + } |
| + |
| + if (buf[0].skip_unmap) |
| + entry->skip_buf0 = true; |
| + entry->skip_buf1 = i == nbufs - 1; |
| + |
| + entry->dma_addr[0] = buf[0].addr; |
| + entry->dma_len[0] = buf[0].len; |
| + |
| + ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); |
| + if (i < nbufs - 1) { |
| + entry->dma_addr[1] = buf[1].addr; |
| + entry->dma_len[1] = buf[1].len; |
| + buf1 = buf[1].addr; |
| + ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); |
| + if (buf[1].skip_unmap) |
| + entry->skip_buf1 = true; |
| + } |
| + |
| + if (i == nbufs - 1) |
| + ctrl |= MT_DMA_CTL_LAST_SEC0; |
| + else if (i == nbufs - 2) |
| + ctrl |= MT_DMA_CTL_LAST_SEC1; |
| } |
| |
| - if (i == nbufs - 1) |
| - ctrl |= MT_DMA_CTL_LAST_SEC0; |
| - else if (i == nbufs - 2) |
| - ctrl |= MT_DMA_CTL_LAST_SEC1; |
| - |
| WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); |
| WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); |
| WRITE_ONCE(desc->info, cpu_to_le32(info)); |
| @@ -272,33 +354,60 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) |
| |
| static void * |
| mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
| - int *len, u32 *info, bool *more) |
| + int *len, u32 *info, bool *more, bool *drop) |
| { |
| struct mt76_queue_entry *e = &q->entry[idx]; |
| struct mt76_desc *desc = &q->desc[idx]; |
| - dma_addr_t buf_addr; |
| - void *buf = e->buf; |
| - int buf_len = SKB_WITH_OVERHEAD(q->buf_size); |
| + void *buf; |
| |
| - buf_addr = e->dma_addr[0]; |
| if (len) { |
| - u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
| - *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl); |
| - *more = !(ctl & MT_DMA_CTL_LAST_SEC0); |
| + u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
| + *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); |
| + *more = !(ctrl & MT_DMA_CTL_LAST_SEC0); |
| } |
| |
| if (info) |
| *info = le32_to_cpu(desc->info); |
| |
| - dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE); |
| - e->buf = NULL; |
| + if ((q->flags & MT_QFLAG_WED) && |
| + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { |
| + u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, |
| + le32_to_cpu(desc->buf1)); |
| + struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token); |
| + |
| + if (!t) |
| + return NULL; |
| + |
| + dma_unmap_single(dev->dma_dev, t->dma_addr, |
| + SKB_WITH_OVERHEAD(q->buf_size), |
| + DMA_FROM_DEVICE); |
| + |
| + buf = t->ptr; |
| + t->dma_addr = 0; |
| + t->ptr = NULL; |
| + |
| + mt76_put_rxwi(dev, t); |
| + |
| + if (drop) { |
| + u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
| + |
| + *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | |
| + MT_DMA_CTL_DROP)); |
| + } |
| + } else { |
| + buf = e->buf; |
| + e->buf = NULL; |
| + dma_unmap_single(dev->dma_dev, e->dma_addr[0], |
| + SKB_WITH_OVERHEAD(q->buf_size), |
| + DMA_FROM_DEVICE); |
| + } |
| |
| return buf; |
| } |
| |
| static void * |
| mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, |
| - int *len, u32 *info, bool *more) |
| + int *len, u32 *info, bool *more, bool *drop) |
| { |
| int idx = q->tail; |
| |
| @@ -314,7 +423,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, |
| q->tail = (q->tail + 1) % q->ndesc; |
| q->queued--; |
| |
| - return mt76_dma_get_buf(dev, q, idx, len, info, more); |
| + return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); |
| } |
| |
| static int |
| @@ -441,14 +550,26 @@ free_skb: |
| return ret; |
| } |
| |
| +static struct page_frag_cache * |
| +mt76_dma_rx_get_frag_cache(struct mt76_dev *dev, struct mt76_queue *q) |
| +{ |
| + struct page_frag_cache *rx_page = &q->rx_page; |
| + |
| +#ifdef CONFIG_NET_MEDIATEK_SOC_WED |
| + if ((q->flags & MT_QFLAG_WED) && |
| + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) |
| + rx_page = &dev->mmio.wed.rx_buf_ring.rx_page; |
| +#endif |
| + return rx_page; |
| +} |
| + |
| static int |
| mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| { |
| - dma_addr_t addr; |
| - void *buf; |
| - int frames = 0; |
| + struct page_frag_cache *rx_page = mt76_dma_rx_get_frag_cache(dev, q); |
| int len = SKB_WITH_OVERHEAD(q->buf_size); |
| - int offset = q->buf_offset; |
| + int frames = 0, offset = q->buf_offset; |
| + dma_addr_t addr; |
| |
| if (!q->ndesc) |
| return 0; |
| @@ -456,9 +577,18 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| spin_lock_bh(&q->lock); |
| |
| while (q->queued < q->ndesc - 1) { |
| + struct mt76_txwi_cache *t = NULL; |
| struct mt76_queue_buf qbuf; |
| + void *buf = NULL; |
| |
| - buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
| + if ((q->flags & MT_QFLAG_WED) && |
| + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { |
| + t = mt76_get_rxwi(dev); |
| + if (!t) |
| + break; |
| + } |
| + |
| + buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC); |
| if (!buf) |
| break; |
| |
| @@ -471,7 +601,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| qbuf.addr = addr + offset; |
| qbuf.len = len - offset; |
| qbuf.skip_unmap = false; |
| - mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); |
| + mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t); |
| frames++; |
| } |
| |
| @@ -517,6 +647,11 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q) |
| if (!ret) |
| q->wed_regs = wed->txfree_ring.reg_base; |
| break; |
| + case MT76_WED_Q_RX: |
| + ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs); |
| + if (!ret) |
| + q->wed_regs = wed->rx_ring[ring].reg_base; |
| + break; |
| default: |
| ret = -EINVAL; |
| } |
| @@ -574,7 +709,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) |
| |
| spin_lock_bh(&q->lock); |
| do { |
| - buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); |
| + buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); |
| if (!buf) |
| break; |
| |
| @@ -615,7 +750,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) |
| |
| static void |
| mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, |
| - int len, bool more) |
| + int len, bool more, u32 info) |
| { |
| struct sk_buff *skb = q->rx_head; |
| struct skb_shared_info *shinfo = skb_shinfo(skb); |
| @@ -635,7 +770,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, |
| |
| q->rx_head = NULL; |
| if (nr_frags < ARRAY_SIZE(shinfo->frags)) |
| - dev->drv->rx_skb(dev, q - dev->q_rx, skb); |
| + dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); |
| else |
| dev_kfree_skb(skb); |
| } |
| @@ -656,6 +791,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) |
| } |
| |
| while (done < budget) { |
| + bool drop = false; |
| u32 info; |
| |
| if (check_ddone) { |
| @@ -666,10 +802,14 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) |
| break; |
| } |
| |
| - data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); |
| + data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, |
| + &drop); |
| if (!data) |
| break; |
| |
| + if (drop) |
| + goto free_frag; |
| + |
| if (q->rx_head) |
| data_len = q->buf_size; |
| else |
| @@ -682,7 +822,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) |
| } |
| |
| if (q->rx_head) { |
| - mt76_add_fragment(dev, q, data, len, more); |
| + mt76_add_fragment(dev, q, data, len, more, info); |
| continue; |
| } |
| |
| @@ -706,7 +846,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) |
| continue; |
| } |
| |
| - dev->drv->rx_skb(dev, q - dev->q_rx, skb); |
| + dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); |
| continue; |
| |
| free_frag: |
| @@ -803,11 +943,15 @@ void mt76_dma_cleanup(struct mt76_dev *dev) |
| mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true); |
| |
| mt76_for_each_q_rx(dev, i) { |
| + struct mt76_queue *q = &dev->q_rx[i]; |
| + |
| netif_napi_del(&dev->napi[i]); |
| - mt76_dma_rx_cleanup(dev, &dev->q_rx[i]); |
| + if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags)) |
| + mt76_dma_rx_cleanup(dev, q); |
| } |
| |
| mt76_free_pending_txwi(dev); |
| + mt76_free_pending_rxwi(dev); |
| |
| if (mtk_wed_device_active(&dev->mmio.wed)) |
| mtk_wed_device_detach(&dev->mmio.wed); |
| diff --git a/dma.h b/dma.h |
| index fdf786f9..53c6ce25 100644 |
| --- a/dma.h |
| +++ b/dma.h |
| @@ -15,6 +15,14 @@ |
| #define MT_DMA_CTL_SD_LEN0 GENMASK(29, 16) |
| #define MT_DMA_CTL_LAST_SEC0 BIT(30) |
| #define MT_DMA_CTL_DMA_DONE BIT(31) |
| +#define MT_DMA_CTL_TO_HOST BIT(8) |
| +#define MT_DMA_CTL_TO_HOST_A BIT(12) |
| +#define MT_DMA_CTL_DROP BIT(14) |
| +#define MT_DMA_CTL_TOKEN GENMASK(31, 16) |
| + |
| +#define MT_DMA_PPE_CPU_REASON GENMASK(15, 11) |
| +#define MT_DMA_PPE_ENTRY GENMASK(30, 16) |
| +#define MT_DMA_INFO_PPE_VLD BIT(31) |
| |
| #define MT_DMA_HDR_LEN 4 |
| #define MT_RX_INFO_LEN 4 |
| diff --git a/mac80211.c b/mac80211.c |
| index 30c1bc56..acac04ef 100644 |
| --- a/mac80211.c |
| +++ b/mac80211.c |
| @@ -572,6 +572,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size, |
| spin_lock_init(&dev->lock); |
| spin_lock_init(&dev->cc_lock); |
| spin_lock_init(&dev->status_lock); |
| + spin_lock_init(&dev->wed_lock); |
| mutex_init(&dev->mutex); |
| init_waitqueue_head(&dev->tx_wait); |
| |
| @@ -594,9 +595,13 @@ mt76_alloc_device(struct device *pdev, unsigned int size, |
| spin_lock_init(&dev->token_lock); |
| idr_init(&dev->token); |
| |
| + spin_lock_init(&dev->rx_token_lock); |
| + idr_init(&dev->rx_token); |
| + |
| INIT_LIST_HEAD(&dev->wcid_list); |
| |
| INIT_LIST_HEAD(&dev->txwi_cache); |
| + INIT_LIST_HEAD(&dev->rxwi_cache); |
| dev->token_size = dev->drv->token_size; |
| |
| for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) |
| @@ -1292,7 +1297,10 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, |
| |
| while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) { |
| mt76_check_sta(dev, skb); |
| - mt76_rx_aggr_reorder(skb, &frames); |
| + if (mtk_wed_device_active(&dev->mmio.wed)) |
| + __skb_queue_tail(&frames, skb); |
| + else |
| + mt76_rx_aggr_reorder(skb, &frames); |
| } |
| |
| mt76_rx_complete(dev, &frames, napi); |
| diff --git a/mt76.h b/mt76.h |
| index a2bccf6b..33f87e51 100644 |
| --- a/mt76.h |
| +++ b/mt76.h |
| @@ -35,6 +35,7 @@ |
| FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \ |
| FIELD_PREP(MT_QFLAG_WED_RING, _n)) |
| #define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n) |
| +#define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n) |
| #define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0) |
| |
| struct mt76_dev; |
| @@ -56,6 +57,7 @@ enum mt76_bus_type { |
| enum mt76_wed_type { |
| MT76_WED_Q_TX, |
| MT76_WED_Q_TXFREE, |
| + MT76_WED_Q_RX, |
| }; |
| |
| struct mt76_bus_ops { |
| @@ -271,9 +273,15 @@ struct mt76_sta_stats { |
| u64 tx_nss[4]; /* 1, 2, 3, 4 */ |
| u64 tx_mcs[16]; /* mcs idx */ |
| u64 tx_bytes; |
| + /* WED TX */ |
| u32 tx_packets; |
| u32 tx_retries; |
| u32 tx_failed; |
| + /* WED RX */ |
| + u64 rx_bytes; |
| + u32 rx_packets; |
| + u32 rx_errors; |
| + u32 rx_drops; |
| }; |
| |
| enum mt76_wcid_flags { |
| @@ -339,7 +347,10 @@ struct mt76_txwi_cache { |
| struct list_head list; |
| dma_addr_t dma_addr; |
| |
| - struct sk_buff *skb; |
| + union { |
| + struct sk_buff *skb; |
| + void *ptr; |
| + }; |
| }; |
| |
| struct mt76_rx_tid { |
| @@ -439,7 +450,7 @@ struct mt76_driver_ops { |
| bool (*rx_check)(struct mt76_dev *dev, void *data, int len); |
| |
| void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, |
| - struct sk_buff *skb); |
| + struct sk_buff *skb, u32 *info); |
| |
| void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); |
| |
| @@ -728,6 +739,7 @@ struct mt76_dev { |
| |
| struct ieee80211_hw *hw; |
| |
| + spinlock_t wed_lock; |
| spinlock_t lock; |
| spinlock_t cc_lock; |
| |
| @@ -754,6 +766,7 @@ struct mt76_dev { |
| struct sk_buff_head rx_skb[__MT_RXQ_MAX]; |
| |
| struct list_head txwi_cache; |
| + struct list_head rxwi_cache; |
| struct mt76_queue *q_mcu[__MT_MCUQ_MAX]; |
| struct mt76_queue q_rx[__MT_RXQ_MAX]; |
| const struct mt76_queue_ops *queue_ops; |
| @@ -768,6 +781,10 @@ struct mt76_dev { |
| u16 token_count; |
| u16 token_size; |
| |
| + spinlock_t rx_token_lock; |
| + struct idr rx_token; |
| + u16 rx_token_size; |
| + |
| wait_queue_head_t tx_wait; |
| /* spinclock used to protect wcid pktid linked list */ |
| spinlock_t status_lock; |
| @@ -1247,6 +1264,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) |
| } |
| |
| void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); |
| +void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); |
| +struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev); |
| void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, |
| struct napi_struct *napi); |
| void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, |
| @@ -1391,6 +1410,9 @@ struct mt76_txwi_cache * |
| mt76_token_release(struct mt76_dev *dev, int token, bool *wake); |
| int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi); |
| void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked); |
| +struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); |
| +int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, |
| + struct mt76_txwi_cache *r, dma_addr_t phys); |
| |
| static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) |
| { |
| diff --git a/mt7603/dma.c b/mt7603/dma.c |
| index 590cff9d..06a9e6ec 100644 |
| --- a/mt7603/dma.c |
| +++ b/mt7603/dma.c |
| @@ -69,7 +69,7 @@ free: |
| } |
| |
| void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| - struct sk_buff *skb) |
| + struct sk_buff *skb, u32 *info) |
| { |
| struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); |
| __le32 *rxd = (__le32 *)skb->data; |
| diff --git a/mt7603/mt7603.h b/mt7603/mt7603.h |
| index 0fd46d90..7c3be596 100644 |
| --- a/mt7603/mt7603.h |
| +++ b/mt7603/mt7603.h |
| @@ -244,7 +244,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, |
| void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); |
| |
| void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| - struct sk_buff *skb); |
| + struct sk_buff *skb, u32 *info); |
| void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); |
| void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); |
| int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, |
| diff --git a/mt7615/mac.c b/mt7615/mac.c |
| index 305bf182..a9560247 100644 |
| --- a/mt7615/mac.c |
| +++ b/mt7615/mac.c |
| @@ -1666,7 +1666,7 @@ bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len) |
| EXPORT_SYMBOL_GPL(mt7615_rx_check); |
| |
| void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| - struct sk_buff *skb) |
| + struct sk_buff *skb, u32 *info) |
| { |
| struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); |
| __le32 *rxd = (__le32 *)skb->data; |
| diff --git a/mt7615/mt7615.h b/mt7615/mt7615.h |
| index 1080d202..43739ecf 100644 |
| --- a/mt7615/mt7615.h |
| +++ b/mt7615/mt7615.h |
| @@ -514,7 +514,7 @@ void mt7615_tx_worker(struct mt76_worker *w); |
| void mt7615_tx_token_put(struct mt7615_dev *dev); |
| bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len); |
| void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| - struct sk_buff *skb); |
| + struct sk_buff *skb, u32 *info); |
| void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); |
| int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, |
| struct ieee80211_sta *sta); |
| diff --git a/mt76_connac.h b/mt76_connac.h |
| index 0915eb57..8ba883b0 100644 |
| --- a/mt76_connac.h |
| +++ b/mt76_connac.h |
| @@ -187,6 +187,11 @@ static inline bool is_mt7986(struct mt76_dev *dev) |
| return mt76_chip(dev) == 0x7986; |
| } |
| |
| +static inline bool is_mt7996(struct mt76_dev *dev) |
| +{ |
| + return mt76_chip(dev) == 0x7990; |
| +} |
| + |
| static inline bool is_mt7622(struct mt76_dev *dev) |
| { |
| if (!IS_ENABLED(CONFIG_MT7622_WMAC)) |
| diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c |
| index dfec416e..c65267b4 100644 |
| --- a/mt76_connac_mcu.c |
| +++ b/mt76_connac_mcu.c |
| @@ -65,7 +65,8 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len, |
| int cmd; |
| |
| if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) || |
| - (is_mt7921(dev) && addr == 0x900000)) |
| + (is_mt7921(dev) && addr == 0x900000) || |
| + (is_mt7996(dev) && addr == 0x900000)) |
| cmd = MCU_CMD(PATCH_START_REQ); |
| else |
| cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ); |
| @@ -1183,6 +1184,16 @@ void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb, |
| } |
| EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv); |
| |
| +int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb) |
| +{ |
| + if (!mtk_wed_device_active(&dev->mmio.wed)) |
| + return 0; |
| + |
| + return mtk_wed_device_update_msg(&dev->mmio.wed, WED_WO_STA_REC, |
| + skb->data, skb->len); |
| +} |
| +EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_wed_update); |
| + |
| int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, |
| struct ieee80211_ampdu_params *params, |
| int cmd, bool enable, bool tx) |
| @@ -1208,6 +1219,10 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, |
| mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl, |
| wtbl_hdr); |
| |
| + ret = mt76_connac_mcu_sta_wed_update(dev, skb); |
| + if (ret) |
| + return ret; |
| + |
| ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true); |
| if (ret) |
| return ret; |
| @@ -1218,6 +1233,10 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, |
| |
| mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx); |
| |
| + ret = mt76_connac_mcu_sta_wed_update(dev, skb); |
| + if (ret) |
| + return ret; |
| + |
| return mt76_mcu_skb_send_msg(dev, skb, cmd, true); |
| } |
| EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba); |
| @@ -2658,6 +2677,10 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, |
| if (ret) |
| return ret; |
| |
| + ret = mt76_connac_mcu_sta_wed_update(dev, skb); |
| + if (ret) |
| + return ret; |
| + |
| return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true); |
| } |
| EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key); |
| diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h |
| index 87c65d25..72d235a1 100644 |
| --- a/mt76_connac_mcu.h |
| +++ b/mt76_connac_mcu.h |
| @@ -63,7 +63,7 @@ struct mt76_connac2_mcu_txd { |
| } __packed __aligned(4); |
| |
| /** |
| - * struct mt76_connac2_mcu_uni_txd - mcu command descriptor for firmware v3 |
| + * struct mt76_connac2_mcu_uni_txd - mcu command descriptor for connac2 and connac3 |
| * @txd: hardware descriptor |
| * @len: total length not including txd |
| * @cid: command identifier |
| @@ -393,7 +393,8 @@ struct sta_rec_phy { |
| u8 ampdu; |
| u8 rts_policy; |
| u8 rcpi; |
| - u8 rsv[2]; |
| + u8 max_ampdu_len; /* connac3 */ |
| + u8 rsv[1]; |
| } __packed; |
| |
| struct sta_rec_he_6g_capa { |
| @@ -454,8 +455,8 @@ struct sta_rec_bf { |
| u8 ibf_dbw; |
| u8 ibf_ncol; |
| u8 ibf_nrow; |
| - u8 nrow_bw160; |
| - u8 ncol_bw160; |
| + u8 nrow_gt_bw80; |
| + u8 ncol_gt_bw80; |
| u8 ru_start_idx; |
| u8 ru_end_idx; |
| |
| @@ -781,6 +782,8 @@ enum { |
| STA_REC_BFEE, |
| STA_REC_PHY = 0x15, |
| STA_REC_HE_6G = 0x17, |
| + STA_REC_HDRT = 0x28, |
| + STA_REC_HDR_TRANS = 0x2B, |
| STA_REC_MAX_NUM |
| }; |
| |
| @@ -986,6 +989,17 @@ enum { |
| MCU_EXT_EVENT_MURU_CTRL = 0x9f, |
| }; |
| |
| +/* unified event table */ |
| +enum { |
| + MCU_UNI_EVENT_RESULT = 0x01, |
| + MCU_UNI_EVENT_FW_LOG_2_HOST = 0x04, |
| + MCU_UNI_EVENT_IE_COUNTDOWN = 0x09, |
| + MCU_UNI_EVENT_RDD_REPORT = 0x11, |
| +}; |
| + |
| +#define MCU_UNI_CMD_EVENT BIT(1) |
| +#define MCU_UNI_CMD_UNSOLICITED_EVENT BIT(2) |
| + |
| enum { |
| MCU_Q_QUERY, |
| MCU_Q_SET, |
| @@ -1068,10 +1082,11 @@ enum { |
| |
| #define MCU_CMD_ACK BIT(0) |
| #define MCU_CMD_UNI BIT(1) |
| -#define MCU_CMD_QUERY BIT(2) |
| +#define MCU_CMD_SET BIT(2) |
| |
| #define MCU_CMD_UNI_EXT_ACK (MCU_CMD_ACK | MCU_CMD_UNI | \ |
| - MCU_CMD_QUERY) |
| + MCU_CMD_SET) |
| +#define MCU_CMD_UNI_QUERY_ACK (MCU_CMD_ACK | MCU_CMD_UNI) |
| |
| #define __MCU_CMD_FIELD_ID GENMASK(7, 0) |
| #define __MCU_CMD_FIELD_EXT_ID GENMASK(15, 8) |
| @@ -1079,6 +1094,7 @@ enum { |
| #define __MCU_CMD_FIELD_UNI BIT(17) |
| #define __MCU_CMD_FIELD_CE BIT(18) |
| #define __MCU_CMD_FIELD_WA BIT(19) |
| +#define __MCU_CMD_FIELD_WM BIT(20) |
| |
| #define MCU_CMD(_t) FIELD_PREP(__MCU_CMD_FIELD_ID, \ |
| MCU_CMD_##_t) |
| @@ -1100,6 +1116,16 @@ enum { |
| FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \ |
| MCU_WA_PARAM_CMD_##_t)) |
| |
| +#define MCU_WM_UNI_CMD(_t) (MCU_UNI_CMD(_t) | \ |
| + __MCU_CMD_FIELD_WM) |
| +#define MCU_WM_UNI_CMD_QUERY(_t) (MCU_UNI_CMD(_t) | \ |
| + __MCU_CMD_FIELD_QUERY | \ |
| + __MCU_CMD_FIELD_WM) |
| +#define MCU_WA_UNI_CMD(_t) (MCU_UNI_CMD(_t) | \ |
| + __MCU_CMD_FIELD_WA) |
| +#define MCU_WMWA_UNI_CMD(_t) (MCU_WM_UNI_CMD(_t) | \ |
| + __MCU_CMD_FIELD_WA) |
| + |
| enum { |
| MCU_EXT_CMD_EFUSE_ACCESS = 0x01, |
| MCU_EXT_CMD_RF_REG_ACCESS = 0x02, |
| @@ -1153,11 +1179,33 @@ enum { |
| MCU_UNI_CMD_DEV_INFO_UPDATE = 0x01, |
| MCU_UNI_CMD_BSS_INFO_UPDATE = 0x02, |
| MCU_UNI_CMD_STA_REC_UPDATE = 0x03, |
| + MCU_UNI_CMD_EDCA_UPDATE = 0x04, |
| MCU_UNI_CMD_SUSPEND = 0x05, |
| MCU_UNI_CMD_OFFLOAD = 0x06, |
| MCU_UNI_CMD_HIF_CTRL = 0x07, |
| + MCU_UNI_CMD_BAND_CONFIG = 0x08, |
| + MCU_UNI_CMD_REPT_MUAR = 0x09, |
| + MCU_UNI_CMD_WSYS_CONFIG = 0x0b, |
| + MCU_UNI_CMD_REG_ACCESS = 0x0d, |
| + MCU_UNI_CMD_POWER_CREL = 0x0f, |
| + MCU_UNI_CMD_RX_HDR_TRANS = 0x12, |
| + MCU_UNI_CMD_SER = 0x13, |
| + MCU_UNI_CMD_TWT = 0x14, |
| + MCU_UNI_CMD_RDD_CTRL = 0x19, |
| + MCU_UNI_CMD_GET_MIB_INFO = 0x22, |
| MCU_UNI_CMD_SNIFFER = 0x24, |
| + MCU_UNI_CMD_SR = 0x25, |
| MCU_UNI_CMD_ROC = 0x27, |
| + MCU_UNI_CMD_TXPOWER = 0x2b, |
| + MCU_UNI_CMD_EFUSE_CTRL = 0x2d, |
| + MCU_UNI_CMD_RA = 0x2f, |
| + MCU_UNI_CMD_MURU = 0x31, |
| + MCU_UNI_CMD_BF = 0x33, |
| + MCU_UNI_CMD_CHANNEL_SWITCH = 0x34, |
| + MCU_UNI_CMD_THERMAL = 0x35, |
| + MCU_UNI_CMD_VOW = 0x37, |
| + MCU_UNI_CMD_RRO = 0x57, |
| + MCU_UNI_CMD_OFFCH_SCAN_CTRL = 0x58, |
| }; |
| |
| enum { |
| @@ -1207,14 +1255,23 @@ enum { |
| |
| enum { |
| UNI_BSS_INFO_BASIC = 0, |
| + UNI_BSS_INFO_RA = 1, |
| UNI_BSS_INFO_RLM = 2, |
| UNI_BSS_INFO_BSS_COLOR = 4, |
| UNI_BSS_INFO_HE_BASIC = 5, |
| UNI_BSS_INFO_BCN_CONTENT = 7, |
| + UNI_BSS_INFO_BCN_CSA = 8, |
| + UNI_BSS_INFO_BCN_BCC = 9, |
| + UNI_BSS_INFO_BCN_MBSSID = 10, |
| + UNI_BSS_INFO_RATE = 11, |
| UNI_BSS_INFO_QBSS = 15, |
| + UNI_BSS_INFO_SEC = 16, |
| + UNI_BSS_INFO_TXCMD = 18, |
| UNI_BSS_INFO_UAPSD = 19, |
| UNI_BSS_INFO_PS = 21, |
| UNI_BSS_INFO_BCNFT = 22, |
| + UNI_BSS_INFO_OFFLOAD = 25, |
| + UNI_BSS_INFO_MLD = 26, |
| }; |
| |
| enum { |
| @@ -1823,6 +1880,7 @@ int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter); |
| int mt76_connac_mcu_restart(struct mt76_dev *dev); |
| int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index, |
| u8 rx_sel, u8 val); |
| +int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb); |
| int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm, |
| const char *fw_wa); |
| int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name); |
| diff --git a/mt76x02.h b/mt76x02.h |
| index 849c2644..3f2a9b7f 100644 |
| --- a/mt76x02.h |
| +++ b/mt76x02.h |
| @@ -187,7 +187,7 @@ int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val); |
| void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len); |
| bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update); |
| void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| - struct sk_buff *skb); |
| + struct sk_buff *skb, u32 *info); |
| void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); |
| irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance); |
| void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, |
| diff --git a/mt76x02_txrx.c b/mt76x02_txrx.c |
| index 3a313075..d8bc4ae1 100644 |
| --- a/mt76x02_txrx.c |
| +++ b/mt76x02_txrx.c |
| @@ -33,7 +33,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, |
| EXPORT_SYMBOL_GPL(mt76x02_tx); |
| |
| void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| - struct sk_buff *skb) |
| + struct sk_buff *skb, u32 *info) |
| { |
| struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); |
| void *rxwi = skb->data; |
| diff --git a/mt7915/coredump.c b/mt7915/coredump.c |
| index bb4b7040..d097a56d 100644 |
| --- a/mt7915/coredump.c |
| +++ b/mt7915/coredump.c |
| @@ -9,6 +9,7 @@ |
| |
| static bool coredump_memdump; |
| module_param(coredump_memdump, bool, 0644); |
| +MODULE_PARM_DESC(coredump_memdump, "Optional ability to dump firmware memory"); |
| |
| static const struct mt7915_mem_region mt7915_mem_regions[] = { |
| { |
| diff --git a/mt7915/debugfs.c b/mt7915/debugfs.c |
| index 766e6208..30f8f18b 100644 |
| --- a/mt7915/debugfs.c |
| +++ b/mt7915/debugfs.c |
| @@ -51,7 +51,7 @@ mt7915_sys_recovery_set(struct file *file, const char __user *user_buf, |
| { |
| struct mt7915_phy *phy = file->private_data; |
| struct mt7915_dev *dev = phy->dev; |
| - bool ext_phy = phy != &dev->phy; |
| + bool band = phy->band_idx; |
| char buf[16]; |
| int ret = 0; |
| u16 val; |
| @@ -83,7 +83,7 @@ mt7915_sys_recovery_set(struct file *file, const char __user *user_buf, |
| * 8: trigger firmware crash. |
| */ |
| case SER_QUERY: |
| - ret = mt7915_mcu_set_ser(dev, 0, 0, ext_phy); |
| + ret = mt7915_mcu_set_ser(dev, 0, 0, band); |
| break; |
| case SER_SET_RECOVER_L1: |
| case SER_SET_RECOVER_L2: |
| @@ -91,17 +91,17 @@ mt7915_sys_recovery_set(struct file *file, const char __user *user_buf, |
| case SER_SET_RECOVER_L3_TX_ABORT: |
| case SER_SET_RECOVER_L3_TX_DISABLE: |
| case SER_SET_RECOVER_L3_BF: |
| - ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), ext_phy); |
| + ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), band); |
| if (ret) |
| return ret; |
| |
| - ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, ext_phy); |
| + ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, band); |
| break; |
| |
| /* enable full chip reset */ |
| case SER_SET_RECOVER_FULL: |
| mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); |
| - ret = mt7915_mcu_set_ser(dev, 1, 3, ext_phy); |
| + ret = mt7915_mcu_set_ser(dev, 1, 3, band); |
| if (ret) |
| return ret; |
| |
| @@ -967,11 +967,18 @@ mt7915_rate_txpower_show(struct seq_file *file, void *data) |
| "RU484/SU40", "RU996/SU80", "RU2x996/SU160" |
| }; |
| struct mt7915_phy *phy = file->private; |
| + struct mt7915_dev *dev = phy->dev; |
| s8 txpower[MT7915_SKU_RATE_NUM], *buf; |
| - int i; |
| + u32 reg; |
| + int i, ret; |
| + |
| + ret = mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower)); |
| + if (ret) |
| + return ret; |
| + |
| + /* Txpower propagation path: TMAC -> TXV -> BBP */ |
| + seq_printf(file, "\nPhy %d\n", phy != &dev->phy); |
| |
| - seq_printf(file, "\nBand %d\n", phy != &phy->dev->phy); |
| - mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower)); |
| for (i = 0, buf = txpower; i < ARRAY_SIZE(mt7915_sku_group_len); i++) { |
| u8 mcs_num = mt7915_sku_group_len[i]; |
| |
| @@ -982,6 +989,12 @@ mt7915_rate_txpower_show(struct seq_file *file, void *data) |
| buf += mt7915_sku_group_len[i]; |
| } |
| |
| + reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_TPC_CTRL_STAT(phy->band_idx) : |
| + MT_WF_PHY_TPC_CTRL_STAT_MT7916(phy->band_idx); |
| + |
| + seq_printf(file, "\nBaseband transmit power %ld\n", |
| + mt76_get_field(dev, reg, MT_WF_PHY_TPC_POWER)); |
| + |
| return 0; |
| } |
| |
| diff --git a/mt7915/dma.c b/mt7915/dma.c |
| index 9a57ad8f..27b67800 100644 |
| --- a/mt7915/dma.c |
| +++ b/mt7915/dma.c |
| @@ -361,11 +361,18 @@ static int mt7915_dma_enable(struct mt7915_dev *dev) |
| |
| if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { |
| u32 wed_irq_mask = irq_mask; |
| + int ret; |
| |
| wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; |
| if (!is_mt7986(&dev->mt76)) |
| mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask); |
| - mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); |
| + else |
| + mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); |
| + |
| + ret = mt7915_mcu_wed_enable_rx_stats(dev); |
| + if (ret) |
| + return ret; |
| + |
| mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask); |
| } |
| |
| @@ -401,6 +408,9 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) |
| FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) | |
| FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, |
| wed_control_rx1)); |
| + if (is_mt7915(mdev)) |
| + mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP, |
| + MT_WFDMA0_EXT0_RXWB_KEEP); |
| } |
| } else { |
| mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED); |
| @@ -473,6 +483,13 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) |
| |
| /* rx data queue for band0 */ |
| if (!dev->phy.band_idx) { |
| + if (mtk_wed_device_active(&mdev->mmio.wed) && |
| + mtk_wed_get_rx_capa(&mdev->mmio.wed)) { |
| + dev->mt76.q_rx[MT_RXQ_MAIN].flags = |
| + MT_WED_Q_RX(MT7915_RXQ_BAND0); |
| + dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; |
| + } |
| + |
| ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], |
| MT_RXQ_ID(MT_RXQ_MAIN), |
| MT7915_RX_RING_SIZE, |
| @@ -503,6 +520,13 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) |
| } |
| |
| if (dev->dbdc_support || dev->phy.band_idx) { |
| + if (mtk_wed_device_active(&mdev->mmio.wed) && |
| + mtk_wed_get_rx_capa(&mdev->mmio.wed)) { |
| + dev->mt76.q_rx[MT_RXQ_BAND1].flags = |
| + MT_WED_Q_RX(MT7915_RXQ_BAND1); |
| + dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; |
| + } |
| + |
| /* rx data queue for band1 */ |
| ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1], |
| MT_RXQ_ID(MT_RXQ_BAND1), |
| diff --git a/mt7915/init.c b/mt7915/init.c |
| index 0a5f7d85..9e69ab82 100644 |
| --- a/mt7915/init.c |
| +++ b/mt7915/init.c |
| @@ -355,6 +355,9 @@ mt7915_init_wiphy(struct ieee80211_hw *hw) |
| wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY); |
| wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); |
| |
| + if (!is_mt7915(&dev->mt76)) |
| + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); |
| + |
| if (!mdev->dev->of_node || |
| !of_property_read_bool(mdev->dev->of_node, |
| "mediatek,disable-radar-background")) |
| diff --git a/mt7915/mac.c b/mt7915/mac.c |
| index 99123e77..97a19bdb 100644 |
| --- a/mt7915/mac.c |
| +++ b/mt7915/mac.c |
| @@ -165,9 +165,9 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev) |
| sta = container_of((void *)msta, struct ieee80211_sta, |
| drv_priv); |
| for (i = 0; i < IEEE80211_NUM_ACS; i++) { |
| - u8 q = mt76_connac_lmac_mapping(i); |
| - u32 tx_cur = tx_time[q]; |
| - u32 rx_cur = rx_time[q]; |
| + u8 queue = mt76_connac_lmac_mapping(i); |
| + u32 tx_cur = tx_time[queue]; |
| + u32 rx_cur = rx_time[queue]; |
| u8 tid = ac_to_tid[i]; |
| |
| if (!tx_cur && !rx_cur) |
| @@ -245,8 +245,38 @@ void mt7915_mac_enable_rtscts(struct mt7915_dev *dev, |
| mt76_clear(dev, addr, BIT(5)); |
| } |
| |
| +static void |
| +mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q, |
| + struct mt7915_sta *msta, struct sk_buff *skb, |
| + u32 info) |
| +{ |
| + struct ieee80211_vif *vif; |
| + struct wireless_dev *wdev; |
| + u32 hash, reason; |
| + |
| + if (!msta || !msta->vif) |
| + return; |
| + |
| + if (!(q->flags & MT_QFLAG_WED) || |
| + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX) |
| + return; |
| + |
| + if (!(info & MT_DMA_INFO_PPE_VLD)) |
| + return; |
| + |
| + vif = container_of((void *)msta->vif, struct ieee80211_vif, |
| + drv_priv); |
| + wdev = ieee80211_vif_to_wdev(vif); |
| + skb->dev = wdev->netdev; |
| + |
| + reason = FIELD_GET(MT_DMA_PPE_CPU_REASON, info); |
| + hash = FIELD_GET(MT_DMA_PPE_ENTRY, info); |
| + mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb, reason, hash); |
| +} |
| + |
| static int |
| -mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) |
| +mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb, |
| + enum mt76_rxq_id q, u32 *info) |
| { |
| struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; |
| struct mt76_phy *mphy = &dev->mt76.phy; |
| @@ -513,6 +543,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) |
| } |
| } else { |
| status->flag |= RX_FLAG_8023; |
| + mt7915_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb, |
| + *info); |
| } |
| |
| if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) |
| @@ -1096,7 +1128,7 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len) |
| } |
| |
| void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| - struct sk_buff *skb) |
| + struct sk_buff *skb, u32 *info) |
| { |
| struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); |
| __le32 *rxd = (__le32 *)skb->data; |
| @@ -1130,7 +1162,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| dev_kfree_skb(skb); |
| break; |
| case PKT_TYPE_NORMAL: |
| - if (!mt7915_mac_fill_rx(dev, skb)) { |
| + if (!mt7915_mac_fill_rx(dev, skb, q, info)) { |
| mt76_rx(&dev->mt76, q, skb); |
| return; |
| } |
| @@ -1228,18 +1260,18 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy) |
| MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); |
| } |
| |
| -void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy) |
| +void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool band) |
| { |
| u32 reg; |
| |
| - reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) : |
| - MT_WF_PHY_RXTD12_MT7916(ext_phy); |
| + reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(band) : |
| + MT_WF_PHY_RXTD12_MT7916(band); |
| mt76_set(dev, reg, |
| MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | |
| MT_WF_PHY_RXTD12_IRPI_SW_CLR); |
| |
| - reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) : |
| - MT_WF_PHY_RX_CTRL1_MT7916(ext_phy); |
| + reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(band) : |
| + MT_WF_PHY_RX_CTRL1_MT7916(band); |
| mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); |
| } |
| |
| @@ -1354,7 +1386,6 @@ mt7915_mac_restart(struct mt7915_dev *dev) |
| struct mt76_phy *ext_phy; |
| struct mt76_dev *mdev = &dev->mt76; |
| int i, ret; |
| - u32 irq_mask; |
| |
| ext_phy = dev->mt76.phys[MT_BAND1]; |
| phy2 = ext_phy ? ext_phy->priv : NULL; |
| @@ -1412,7 +1443,7 @@ mt7915_mac_restart(struct mt7915_dev *dev) |
| mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); |
| |
| if (dev->hif2) { |
| - mt76_wr(dev, MT_INT1_MASK_CSR, irq_mask); |
| + mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); |
| mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); |
| } |
| if (dev_is_pci(mdev->dev)) { |
| @@ -1949,7 +1980,6 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy) |
| static void mt7915_mac_severe_check(struct mt7915_phy *phy) |
| { |
| struct mt7915_dev *dev = phy->dev; |
| - bool ext_phy = phy != &dev->phy; |
| u32 trb; |
| |
| if (!phy->omac_mask) |
| @@ -1967,7 +1997,7 @@ static void mt7915_mac_severe_check(struct mt7915_phy *phy) |
| FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) && |
| trb == phy->trb_ts) |
| mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT, |
| - ext_phy); |
| + phy->band_idx); |
| |
| phy->trb_ts = trb; |
| } |
| diff --git a/mt7915/main.c b/mt7915/main.c |
| index fe5ec166..2505fa7e 100644 |
| --- a/mt7915/main.c |
| +++ b/mt7915/main.c |
| @@ -30,31 +30,31 @@ int mt7915_run(struct ieee80211_hw *hw) |
| running = mt7915_dev_running(dev); |
| |
| if (!running) { |
| - ret = mt76_connac_mcu_set_pm(&dev->mt76, 0, 0); |
| + ret = mt76_connac_mcu_set_pm(&dev->mt76, dev->phy.band_idx, 0); |
| if (ret) |
| goto out; |
| |
| - ret = mt7915_mcu_set_mac(dev, 0, true, true); |
| + ret = mt7915_mcu_set_mac(dev, dev->phy.band_idx, true, true); |
| if (ret) |
| goto out; |
| |
| - mt7915_mac_enable_nf(dev, 0); |
| + mt7915_mac_enable_nf(dev, dev->phy.band_idx); |
| } |
| |
| - if (phy != &dev->phy || phy->band_idx) { |
| - ret = mt76_connac_mcu_set_pm(&dev->mt76, 1, 0); |
| + if (phy != &dev->phy) { |
| + ret = mt76_connac_mcu_set_pm(&dev->mt76, phy->band_idx, 0); |
| if (ret) |
| goto out; |
| |
| - ret = mt7915_mcu_set_mac(dev, 1, true, true); |
| + ret = mt7915_mcu_set_mac(dev, phy->band_idx, true, true); |
| if (ret) |
| goto out; |
| |
| - mt7915_mac_enable_nf(dev, 1); |
| + mt7915_mac_enable_nf(dev, phy->band_idx); |
| } |
| |
| ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, |
| - phy != &dev->phy); |
| + phy->band_idx); |
| if (ret) |
| goto out; |
| |
| @@ -107,13 +107,13 @@ static void mt7915_stop(struct ieee80211_hw *hw) |
| clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); |
| |
| if (phy != &dev->phy) { |
| - mt76_connac_mcu_set_pm(&dev->mt76, 1, 1); |
| - mt7915_mcu_set_mac(dev, 1, false, false); |
| + mt76_connac_mcu_set_pm(&dev->mt76, phy->band_idx, 1); |
| + mt7915_mcu_set_mac(dev, phy->band_idx, false, false); |
| } |
| |
| if (!mt7915_dev_running(dev)) { |
| - mt76_connac_mcu_set_pm(&dev->mt76, 0, 1); |
| - mt7915_mcu_set_mac(dev, 0, false, false); |
| + mt76_connac_mcu_set_pm(&dev->mt76, dev->phy.band_idx, 1); |
| + mt7915_mcu_set_mac(dev, dev->phy.band_idx, false, false); |
| } |
| |
| mutex_unlock(&dev->mt76.mutex); |
| @@ -440,7 +440,6 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed) |
| { |
| struct mt7915_dev *dev = mt7915_hw_dev(hw); |
| struct mt7915_phy *phy = mt7915_hw_phy(hw); |
| - bool band = phy != &dev->phy; |
| int ret; |
| |
| if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { |
| @@ -468,6 +467,7 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed) |
| |
| if (changed & IEEE80211_CONF_CHANGE_MONITOR) { |
| bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR); |
| + bool band = phy->band_idx; |
| |
| if (!enabled) |
| phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; |
| @@ -505,7 +505,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw, |
| { |
| struct mt7915_dev *dev = mt7915_hw_dev(hw); |
| struct mt7915_phy *phy = mt7915_hw_phy(hw); |
| - bool band = phy != &dev->phy; |
| + bool band = phy->band_idx; |
| u32 ctl_flags = MT_WF_RFCR1_DROP_ACK | |
| MT_WF_RFCR1_DROP_BF_POLL | |
| MT_WF_RFCR1_DROP_BA | |
| @@ -600,10 +600,8 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, |
| mt7915_mcu_add_sta(dev, vif, NULL, join); |
| } |
| |
| - if (changed & BSS_CHANGED_ASSOC) { |
| + if (changed & BSS_CHANGED_ASSOC) |
| mt7915_mcu_add_bss_info(phy, vif, info->assoc); |
| - mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable); |
| - } |
| |
| if (changed & BSS_CHANGED_ERP_CTS_PROT) |
| mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot); |
| @@ -627,7 +625,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, |
| mt7915_mcu_set_tx(dev, vif); |
| |
| if (changed & BSS_CHANGED_HE_OBSS_PD) |
| - mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable); |
| + mt7915_mcu_add_obss_spr(phy, vif, &info->he_obss_pd); |
| |
| if (changed & BSS_CHANGED_HE_BSS_COLOR) |
| mt7915_update_bss_color(hw, vif, &info->he_bss_color); |
| @@ -744,7 +742,7 @@ static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val) |
| int ret; |
| |
| mutex_lock(&dev->mt76.mutex); |
| - ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, phy != &dev->phy); |
| + ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, phy->band_idx); |
| mutex_unlock(&dev->mt76.mutex); |
| |
| return ret; |
| @@ -847,7 +845,7 @@ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif) |
| { |
| struct mt7915_dev *dev = mt7915_hw_dev(hw); |
| struct mt7915_phy *phy = mt7915_hw_phy(hw); |
| - bool band = phy != &dev->phy; |
| + bool band = phy->band_idx; |
| union { |
| u64 t64; |
| u32 t32[2]; |
| @@ -892,7 +890,7 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
| struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; |
| struct mt7915_dev *dev = mt7915_hw_dev(hw); |
| struct mt7915_phy *phy = mt7915_hw_phy(hw); |
| - bool band = phy != &dev->phy; |
| + bool band = phy->band_idx; |
| union { |
| u64 t64; |
| u32 t32[2]; |
| @@ -923,7 +921,7 @@ mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
| struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; |
| struct mt7915_dev *dev = mt7915_hw_dev(hw); |
| struct mt7915_phy *phy = mt7915_hw_phy(hw); |
| - bool band = phy != &dev->phy; |
| + bool band = phy->band_idx; |
| union { |
| u64 t64; |
| u32 t32[2]; |
| @@ -1036,6 +1034,14 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw, |
| |
| sinfo->tx_retries = msta->wcid.stats.tx_retries; |
| sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); |
| + |
| + if (mtk_wed_get_rx_capa(&phy->dev->mt76.mmio.wed)) { |
| + sinfo->rx_bytes = msta->wcid.stats.rx_bytes; |
| + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); |
| + |
| + sinfo->rx_packets = msta->wcid.stats.rx_packets; |
| + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); |
| + } |
| } |
| |
| sinfo->ack_signal = (s8)msta->ack_signal; |
| @@ -1127,6 +1133,39 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw, |
| mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta); |
| } |
| |
| +static int mt7915_sta_set_txpwr(struct ieee80211_hw *hw, |
| + struct ieee80211_vif *vif, |
| + struct ieee80211_sta *sta) |
| +{ |
| + struct mt7915_phy *phy = mt7915_hw_phy(hw); |
| + struct mt7915_dev *dev = mt7915_hw_dev(hw); |
| + s16 txpower = sta->txpwr.power; |
| + int ret; |
| + |
| + if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC) |
| + txpower = 0; |
| + |
| + mutex_lock(&dev->mt76.mutex); |
| + |
| + /* NOTE: temporarily use 0 as minimum limit, which is a |
| + * global setting and will be applied to all stations. |
| + */ |
| + ret = mt7915_mcu_set_txpower_frame_min(phy, 0); |
| + if (ret) |
| + goto out; |
| + |
| + /* This only applies to data frames while pushing traffic, |
| + * whereas the management frames or other packets that are |
| + * using fixed rate can be configured via TxD. |
| + */ |
| + ret = mt7915_mcu_set_txpower_frame(phy, vif, sta, txpower); |
| + |
| +out: |
| + mutex_unlock(&dev->mt76.mutex); |
| + |
| + return ret; |
| +} |
| + |
| static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = { |
| "tx_ampdu_cnt", |
| "tx_stop_q_empty_cnt", |
| @@ -1492,6 +1531,7 @@ const struct ieee80211_ops mt7915_ops = { |
| .set_bitrate_mask = mt7915_set_bitrate_mask, |
| .set_coverage_class = mt7915_set_coverage_class, |
| .sta_statistics = mt7915_sta_statistics, |
| + .sta_set_txpwr = mt7915_sta_set_txpwr, |
| .sta_set_4addr = mt7915_sta_set_4addr, |
| .sta_set_decap_offload = mt7915_sta_set_decap_offload, |
| .add_twt_setup = mt7915_mac_add_twt_setup, |
| diff --git a/mt7915/mcu.c b/mt7915/mcu.c |
| index 09e3dd8e..36c21596 100644 |
| --- a/mt7915/mcu.c |
| +++ b/mt7915/mcu.c |
| @@ -32,6 +32,10 @@ |
| #define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p) |
| #define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m) |
| |
| +static bool sr_scene_detect = true; |
| +module_param(sr_scene_detect, bool, 0644); |
| +MODULE_PARM_DESC(sr_scene_detect, "Enable firmware scene detection algorithm"); |
| + |
| static u8 |
| mt7915_mcu_get_sta_nss(u16 mcs_map) |
| { |
| @@ -595,7 +599,7 @@ mt7915_mcu_muar_config(struct mt7915_phy *phy, struct ieee80211_vif *vif, |
| .mode = !!mask || enable, |
| .entry_count = 1, |
| .write = 1, |
| - .band = phy != &dev->phy, |
| + .band = phy->band_idx, |
| .index = idx * 2 + bssid, |
| }; |
| |
| @@ -1131,7 +1135,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, |
| mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160); |
| nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); |
| |
| - bf->ncol_bw160 = nss_mcs; |
| + bf->ncol_gt_bw80 = nss_mcs; |
| } |
| |
| if (pe->phy_cap_info[0] & |
| @@ -1139,10 +1143,10 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, |
| mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80); |
| nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); |
| |
| - if (bf->ncol_bw160) |
| - bf->ncol_bw160 = min_t(u8, bf->ncol_bw160, nss_mcs); |
| + if (bf->ncol_gt_bw80) |
| + bf->ncol_gt_bw80 = min_t(u8, bf->ncol_gt_bw80, nss_mcs); |
| else |
| - bf->ncol_bw160 = nss_mcs; |
| + bf->ncol_gt_bw80 = nss_mcs; |
| } |
| |
| snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK, |
| @@ -1150,7 +1154,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, |
| sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK, |
| pe->phy_cap_info[4]); |
| |
| - bf->nrow_bw160 = min_t(int, snd_dim, sts); |
| + bf->nrow_gt_bw80 = min_t(int, snd_dim, sts); |
| } |
| |
| static void |
| @@ -1677,10 +1681,32 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, |
| return ret; |
| } |
| out: |
| + ret = mt76_connac_mcu_sta_wed_update(&dev->mt76, skb); |
| + if (ret) |
| + return ret; |
| + |
| return mt76_mcu_skb_send_msg(&dev->mt76, skb, |
| MCU_EXT_CMD(STA_REC_UPDATE), true); |
| } |
| |
| +int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev) |
| +{ |
| +#ifdef CONFIG_NET_MEDIATEK_SOC_WED |
| + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| + struct { |
| + __le32 args[2]; |
| + } req = { |
| + .args[0] = cpu_to_le32(1), |
| + .args[1] = cpu_to_le32(6), |
| + }; |
| + |
| + return mtk_wed_device_update_msg(wed, MTK_WED_WO_CMD_RXCNT_CTRL, |
| + &req, sizeof(req)); |
| +#else |
| + return 0; |
| +#endif |
| +} |
| + |
| int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, |
| struct ieee80211_vif *vif, bool enable) |
| { |
| @@ -1689,7 +1715,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, |
| struct { |
| struct req_hdr { |
| u8 omac_idx; |
| - u8 dbdc_idx; |
| + u8 band_idx; |
| __le16 tlv_num; |
| u8 is_tlv_append; |
| u8 rsv[3]; |
| @@ -1698,13 +1724,13 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, |
| __le16 tag; |
| __le16 len; |
| u8 active; |
| - u8 dbdc_idx; |
| + u8 band_idx; |
| u8 omac_addr[ETH_ALEN]; |
| } __packed tlv; |
| } data = { |
| .hdr = { |
| .omac_idx = mvif->mt76.omac_idx, |
| - .dbdc_idx = mvif->mt76.band_idx, |
| + .band_idx = mvif->mt76.band_idx, |
| .tlv_num = cpu_to_le16(1), |
| .is_tlv_append = 1, |
| }, |
| @@ -1712,7 +1738,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, |
| .tag = cpu_to_le16(DEV_INFO_ACTIVE), |
| .len = cpu_to_le16(sizeof(struct req_tlv)), |
| .active = enable, |
| - .dbdc_idx = mvif->mt76.band_idx, |
| + .band_idx = mvif->mt76.band_idx, |
| }, |
| }; |
| |
| @@ -2559,7 +2585,7 @@ mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy, |
| req.monitor_central_chan = |
| ieee80211_frequency_to_channel(chandef->center_freq1); |
| req.monitor_bw = mt76_connac_chan_bw(chandef); |
| - req.band_idx = phy != &dev->phy; |
| + req.band_idx = phy->band_idx; |
| req.scan_mode = 1; |
| break; |
| } |
| @@ -2567,7 +2593,7 @@ mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy, |
| req.monitor_chan = chandef->chan->hw_value; |
| req.monitor_central_chan = |
| ieee80211_frequency_to_channel(chandef->center_freq1); |
| - req.band_idx = phy != &dev->phy; |
| + req.band_idx = phy->band_idx; |
| req.scan_mode = 2; |
| break; |
| case CH_SWITCH_BACKGROUND_SCAN_STOP: |
| @@ -2971,7 +2997,7 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch) |
| } |
| |
| for (i = 0; i < 5; i++) { |
| - req[i].band = cpu_to_le32(phy != &dev->phy); |
| + req[i].band = cpu_to_le32(phy->band_idx); |
| req[i].offs = cpu_to_le32(offs[i + start]); |
| |
| if (!is_mt7915(&dev->mt76) && i == 3) |
| @@ -3016,11 +3042,11 @@ int mt7915_mcu_get_temperature(struct mt7915_phy *phy) |
| struct { |
| u8 ctrl_id; |
| u8 action; |
| - u8 dbdc_idx; |
| + u8 band_idx; |
| u8 rsv[5]; |
| } req = { |
| .ctrl_id = THERMAL_SENSOR_TEMP_QUERY, |
| - .dbdc_idx = phy != &dev->phy, |
| + .band_idx = phy->band_idx, |
| }; |
| |
| return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req, |
| @@ -3079,6 +3105,88 @@ out: |
| &req, sizeof(req), false); |
| } |
| |
| +int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower) |
| +{ |
| + struct mt7915_dev *dev = phy->dev; |
| + struct { |
| + u8 format_id; |
| + u8 rsv; |
| + u8 band_idx; |
| + s8 txpower_min; |
| + } __packed req = { |
| + .format_id = TX_POWER_LIMIT_FRAME_MIN, |
| + .band_idx = phy->band_idx, |
| + .txpower_min = txpower * 2, /* 0.5db */ |
| + }; |
| + |
| + return mt76_mcu_send_msg(&dev->mt76, |
| + MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req, |
| + sizeof(req), true); |
| +} |
| + |
| +int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy, |
| + struct ieee80211_vif *vif, |
| + struct ieee80211_sta *sta, s8 txpower) |
| +{ |
| + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; |
| + struct mt7915_dev *dev = phy->dev; |
| + struct mt76_phy *mphy = phy->mt76; |
| + struct { |
| + u8 format_id; |
| + u8 rsv[3]; |
| + u8 band_idx; |
| + s8 txpower_max; |
| + __le16 wcid; |
| + s8 txpower_offs[48]; |
| + } __packed req = { |
| + .format_id = TX_POWER_LIMIT_FRAME, |
| + .band_idx = phy->band_idx, |
| + .txpower_max = DIV_ROUND_UP(mphy->txpower_cur, 2), |
| + .wcid = cpu_to_le16(msta->wcid.idx), |
| + }; |
| + int ret, n_chains = hweight8(mphy->antenna_mask); |
| + s8 txpower_sku[MT7915_SKU_RATE_NUM]; |
| + |
| + ret = mt7915_mcu_get_txpower_sku(phy, txpower_sku, sizeof(txpower_sku)); |
| + if (ret) |
| + return ret; |
| + |
| + txpower = txpower * 2 - mt76_tx_power_nss_delta(n_chains); |
| + if (txpower > mphy->txpower_cur || txpower < 0) |
| + return -EINVAL; |
| + |
| + if (txpower) { |
| + u32 offs, len, i; |
| + |
| + if (sta->ht_cap.ht_supported) { |
| + const u8 *sku_len = mt7915_sku_group_len; |
| + |
| + offs = sku_len[SKU_CCK] + sku_len[SKU_OFDM]; |
| + len = sku_len[SKU_HT_BW20] + sku_len[SKU_HT_BW40]; |
| + |
| + if (sta->vht_cap.vht_supported) { |
| + offs += len; |
| + len = sku_len[SKU_VHT_BW20] * 4; |
| + |
| + if (sta->he_cap.has_he) { |
| + offs += len + sku_len[SKU_HE_RU26] * 3; |
| + len = sku_len[SKU_HE_RU242] * 4; |
| + } |
| + } |
| + } else { |
| + return -EINVAL; |
| + } |
| + |
| + for (i = 0; i < len; i++, offs++) |
| + req.txpower_offs[i] = |
| + DIV_ROUND_UP(txpower - txpower_sku[offs], 2); |
| + } |
| + |
| + return mt76_mcu_send_msg(&dev->mt76, |
| + MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req, |
| + sizeof(req), true); |
| +} |
| + |
| int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy) |
| { |
| struct mt7915_dev *dev = phy->dev; |
| @@ -3087,11 +3195,11 @@ int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy) |
| struct mt7915_sku_val { |
| u8 format_id; |
| u8 limit_type; |
| - u8 dbdc_idx; |
| + u8 band_idx; |
| s8 val[MT7915_SKU_RATE_NUM]; |
| } __packed req = { |
| - .format_id = 4, |
| - .dbdc_idx = phy != &dev->phy, |
| + .format_id = TX_POWER_LIMIT_TABLE, |
| + .band_idx = phy->band_idx, |
| }; |
| struct mt76_power_limits limits_array; |
| s8 *la = (s8 *)&limits_array; |
| @@ -3137,14 +3245,14 @@ int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len) |
| struct { |
| u8 format_id; |
| u8 category; |
| - u8 band; |
| + u8 band_idx; |
| u8 _rsv; |
| } __packed req = { |
| - .format_id = 7, |
| + .format_id = TX_POWER_LIMIT_INFO, |
| .category = RATE_POWER_INFO, |
| - .band = phy != &dev->phy, |
| + .band_idx = phy->band_idx, |
| }; |
| - s8 res[MT7915_SKU_RATE_NUM][2]; |
| + s8 txpower_sku[MT7915_SKU_RATE_NUM][2]; |
| struct sk_buff *skb; |
| int ret, i; |
| |
| @@ -3154,9 +3262,9 @@ int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len) |
| if (ret) |
| return ret; |
| |
| - memcpy(res, skb->data + 4, sizeof(res)); |
| + memcpy(txpower_sku, skb->data + 4, sizeof(txpower_sku)); |
| for (i = 0; i < len; i++) |
| - txpower[i] = res[i][req.band]; |
| + txpower[i] = txpower_sku[i][req.band_idx]; |
| |
| dev_kfree_skb(skb); |
| |
| @@ -3191,11 +3299,11 @@ int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable) |
| struct mt7915_sku { |
| u8 format_id; |
| u8 sku_enable; |
| - u8 dbdc_idx; |
| + u8 band_idx; |
| u8 rsv; |
| } __packed req = { |
| - .format_id = 0, |
| - .dbdc_idx = phy != &dev->phy, |
| + .format_id = TX_POWER_LIMIT_ENABLE, |
| + .band_idx = phy->band_idx, |
| .sku_enable = enable, |
| }; |
| |
| @@ -3270,31 +3378,193 @@ int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action) |
| sizeof(req), true); |
| } |
| |
| -int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif, |
| - bool enable) |
| +static int |
| +mt7915_mcu_enable_obss_spr(struct mt7915_phy *phy, u8 action, u8 val) |
| +{ |
| + struct mt7915_dev *dev = phy->dev; |
| + struct mt7915_mcu_sr_ctrl req = { |
| + .action = action, |
| + .argnum = 1, |
| + .band_idx = phy->band_idx, |
| + .val = cpu_to_le32(val), |
| + }; |
| + |
| + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req, |
| + sizeof(req), true); |
| +} |
| + |
| +static int |
| +mt7915_mcu_set_obss_spr_pd(struct mt7915_phy *phy, |
| + struct ieee80211_he_obss_pd *he_obss_pd) |
| +{ |
| + struct mt7915_dev *dev = phy->dev; |
| + struct { |
| + struct mt7915_mcu_sr_ctrl ctrl; |
| + struct { |
| + u8 pd_th_non_srg; |
| + u8 pd_th_srg; |
| + u8 period_offs; |
| + u8 rcpi_src; |
| + __le16 obss_pd_min; |
| + __le16 obss_pd_min_srg; |
| + u8 resp_txpwr_mode; |
| + u8 txpwr_restrict_mode; |
| + u8 txpwr_ref; |
| + u8 rsv[3]; |
| + } __packed param; |
| + } __packed req = { |
| + .ctrl = { |
| + .action = SPR_SET_PARAM, |
| + .argnum = 9, |
| + .band_idx = phy->band_idx, |
| + }, |
| + }; |
| + int ret; |
| + u8 max_th = 82, non_srg_max_th = 62; |
| + |
| + /* disable firmware dynamical PD asjustment */ |
| + ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_DPD, false); |
| + if (ret) |
| + return ret; |
| + |
| + if (he_obss_pd->sr_ctrl & |
| + IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED) |
| + req.param.pd_th_non_srg = max_th; |
| + else if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) |
| + req.param.pd_th_non_srg = max_th - he_obss_pd->non_srg_max_offset; |
| + else |
| + req.param.pd_th_non_srg = non_srg_max_th; |
| + |
| + if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) |
| + req.param.pd_th_srg = max_th - he_obss_pd->max_offset; |
| + |
| + req.param.obss_pd_min = 82; |
| + req.param.obss_pd_min_srg = 82; |
| + req.param.txpwr_restrict_mode = 2; |
| + req.param.txpwr_ref = 21; |
| + |
| + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req, |
| + sizeof(req), true); |
| +} |
| + |
| +static int |
| +mt7915_mcu_set_obss_spr_siga(struct mt7915_phy *phy, struct ieee80211_vif *vif, |
| + struct ieee80211_he_obss_pd *he_obss_pd) |
| { |
| -#define MT_SPR_ENABLE 1 |
| struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; |
| + struct mt7915_dev *dev = phy->dev; |
| + u8 omac = mvif->mt76.omac_idx; |
| struct { |
| - u8 action; |
| - u8 arg_num; |
| - u8 band_idx; |
| - u8 status; |
| - u8 drop_tx_idx; |
| - u8 sta_idx; /* 256 sta */ |
| - u8 rsv[2]; |
| - __le32 val; |
| + struct mt7915_mcu_sr_ctrl ctrl; |
| + struct { |
| + u8 omac; |
| + u8 rsv[3]; |
| + u8 flag[20]; |
| + } __packed siga; |
| } __packed req = { |
| - .action = MT_SPR_ENABLE, |
| - .arg_num = 1, |
| - .band_idx = mvif->mt76.band_idx, |
| - .val = cpu_to_le32(enable), |
| + .ctrl = { |
| + .action = SPR_SET_SIGA, |
| + .argnum = 1, |
| + .band_idx = phy->band_idx, |
| + }, |
| + .siga = { |
| + .omac = omac > HW_BSSID_MAX ? omac - 12 : omac, |
| + }, |
| }; |
| + int ret; |
| + |
| + if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED) |
| + req.siga.flag[req.siga.omac] = 0xf; |
| + else |
| + return 0; |
| + |
| + /* switch to normal AP mode */ |
| + ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_MODE, 0); |
| + if (ret) |
| + return ret; |
| |
| return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req, |
| sizeof(req), true); |
| } |
| |
| +static int |
| +mt7915_mcu_set_obss_spr_bitmap(struct mt7915_phy *phy, |
| + struct ieee80211_he_obss_pd *he_obss_pd) |
| +{ |
| + struct mt7915_dev *dev = phy->dev; |
| + struct { |
| + struct mt7915_mcu_sr_ctrl ctrl; |
| + struct { |
| + __le32 color_l[2]; |
| + __le32 color_h[2]; |
| + __le32 bssid_l[2]; |
| + __le32 bssid_h[2]; |
| + } __packed bitmap; |
| + } __packed req = { |
| + .ctrl = { |
| + .action = SPR_SET_SRG_BITMAP, |
| + .argnum = 4, |
| + .band_idx = phy->band_idx, |
| + }, |
| + }; |
| + u32 bitmap; |
| + |
| + memcpy(&bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap)); |
| + req.bitmap.color_l[req.ctrl.band_idx] = cpu_to_le32(bitmap); |
| + |
| + memcpy(&bitmap, he_obss_pd->bss_color_bitmap + 4, sizeof(bitmap)); |
| + req.bitmap.color_h[req.ctrl.band_idx] = cpu_to_le32(bitmap); |
| + |
| + memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap)); |
| + req.bitmap.bssid_l[req.ctrl.band_idx] = cpu_to_le32(bitmap); |
| + |
| + memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap + 4, sizeof(bitmap)); |
| + req.bitmap.bssid_h[req.ctrl.band_idx] = cpu_to_le32(bitmap); |
| + |
| + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req, |
| + sizeof(req), true); |
| +} |
| + |
| +int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif, |
| + struct ieee80211_he_obss_pd *he_obss_pd) |
| +{ |
| + int ret; |
| + |
| + /* enable firmware scene detection algorithms */ |
| + ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_SD, sr_scene_detect); |
| + if (ret) |
| + return ret; |
| + |
| + /* enable spatial reuse */ |
| + ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE, he_obss_pd->enable); |
| + if (ret) |
| + return ret; |
| + |
| + if (!he_obss_pd->enable) |
| + return 0; |
| + |
| + ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_TX, true); |
| + if (ret) |
| + return ret; |
| + |
| + /* firmware dynamically adjusts PD threshold so skip manual control */ |
| + if (sr_scene_detect) |
| + return 0; |
| + |
| + /* set SRG/non-SRG OBSS PD threshold */ |
| + ret = mt7915_mcu_set_obss_spr_pd(phy, he_obss_pd); |
| + if (ret) |
| + return ret; |
| + |
| + /* Set SR prohibit */ |
| + ret = mt7915_mcu_set_obss_spr_siga(phy, vif, he_obss_pd); |
| + if (ret) |
| + return ret; |
| + |
| + /* set SRG BSS color/BSSID bitmap */ |
| + return mt7915_mcu_set_obss_spr_bitmap(phy, he_obss_pd); |
| +} |
| + |
| int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, |
| struct ieee80211_sta *sta, struct rate_info *rate) |
| { |
| diff --git a/mt7915/mcu.h b/mt7915/mcu.h |
| index c19b5d66..46c517e5 100644 |
| --- a/mt7915/mcu.h |
| +++ b/mt7915/mcu.h |
| @@ -129,6 +129,17 @@ struct mt7915_mcu_background_chain_ctrl { |
| u8 rsv[2]; |
| } __packed; |
| |
| +struct mt7915_mcu_sr_ctrl { |
| + u8 action; |
| + u8 argnum; |
| + u8 band_idx; |
| + u8 status; |
| + u8 drop_ta_idx; |
| + u8 sta_idx; /* 256 sta */ |
| + u8 rsv[2]; |
| + __le32 val; |
| +} __packed; |
| + |
| struct mt7915_mcu_eeprom { |
| u8 buffer_mode; |
| u8 format; |
| @@ -408,6 +419,25 @@ enum { |
| #define RATE_CFG_PHY_TYPE GENMASK(27, 24) |
| #define RATE_CFG_HE_LTF GENMASK(31, 28) |
| |
| +enum { |
| + TX_POWER_LIMIT_ENABLE, |
| + TX_POWER_LIMIT_TABLE = 0x4, |
| + TX_POWER_LIMIT_INFO = 0x7, |
| + TX_POWER_LIMIT_FRAME = 0x11, |
| + TX_POWER_LIMIT_FRAME_MIN = 0x12, |
| +}; |
| + |
| +enum { |
| + SPR_ENABLE = 0x1, |
| + SPR_ENABLE_SD = 0x3, |
| + SPR_ENABLE_MODE = 0x5, |
| + SPR_ENABLE_DPD = 0x23, |
| + SPR_ENABLE_TX = 0x25, |
| + SPR_SET_SRG_BITMAP = 0x80, |
| + SPR_SET_PARAM = 0xc2, |
| + SPR_SET_SIGA = 0xdc, |
| +}; |
| + |
| enum { |
| THERMAL_PROTECT_PARAMETER_CTRL, |
| THERMAL_PROTECT_BASIC_INFO, |
| diff --git a/mt7915/mmio.c b/mt7915/mmio.c |
| index 3c840853..3b4ede3b 100644 |
| --- a/mt7915/mmio.c |
| +++ b/mt7915/mmio.c |
| @@ -9,107 +9,112 @@ |
| #include "mt7915.h" |
| #include "mac.h" |
| #include "../trace.h" |
| +#include "../dma.h" |
| |
| static bool wed_enable; |
| module_param(wed_enable, bool, 0644); |
| +MODULE_PARM_DESC(wed_enable, "Enable Wireless Ethernet Dispatch support"); |
| |
| static const u32 mt7915_reg[] = { |
| - [INT_SOURCE_CSR] = 0xd7010, |
| - [INT_MASK_CSR] = 0xd7014, |
| - [INT1_SOURCE_CSR] = 0xd7088, |
| - [INT1_MASK_CSR] = 0xd708c, |
| - [INT_MCU_CMD_SOURCE] = 0xd51f0, |
| - [INT_MCU_CMD_EVENT] = 0x3108, |
| - [WFDMA0_ADDR] = 0xd4000, |
| - [WFDMA0_PCIE1_ADDR] = 0xd8000, |
| - [WFDMA_EXT_CSR_ADDR] = 0xd7000, |
| - [CBTOP1_PHY_END] = 0x77ffffff, |
| - [INFRA_MCU_ADDR_END] = 0x7c3fffff, |
| - [FW_ASSERT_STAT_ADDR] = 0x219848, |
| - [FW_EXCEPT_TYPE_ADDR] = 0x21987c, |
| - [FW_EXCEPT_COUNT_ADDR] = 0x219848, |
| - [FW_CIRQ_COUNT_ADDR] = 0x216f94, |
| - [FW_CIRQ_IDX_ADDR] = 0x216ef8, |
| - [FW_CIRQ_LISR_ADDR] = 0x2170ac, |
| - [FW_TASK_ID_ADDR] = 0x216f90, |
| - [FW_TASK_IDX_ADDR] = 0x216f9c, |
| - [FW_TASK_QID1_ADDR] = 0x219680, |
| - [FW_TASK_QID2_ADDR] = 0x219760, |
| - [FW_TASK_START_ADDR] = 0x219558, |
| - [FW_TASK_END_ADDR] = 0x219554, |
| - [FW_TASK_SIZE_ADDR] = 0x219560, |
| - [FW_LAST_MSG_ID_ADDR] = 0x216f70, |
| - [FW_EINT_INFO_ADDR] = 0x219818, |
| - [FW_SCHED_INFO_ADDR] = 0x219828, |
| - [SWDEF_BASE_ADDR] = 0x41f200, |
| - [TXQ_WED_RING_BASE] = 0xd7300, |
| - [RXQ_WED_RING_BASE] = 0xd7410, |
| + [INT_SOURCE_CSR] = 0xd7010, |
| + [INT_MASK_CSR] = 0xd7014, |
| + [INT1_SOURCE_CSR] = 0xd7088, |
| + [INT1_MASK_CSR] = 0xd708c, |
| + [INT_MCU_CMD_SOURCE] = 0xd51f0, |
| + [INT_MCU_CMD_EVENT] = 0x3108, |
| + [WFDMA0_ADDR] = 0xd4000, |
| + [WFDMA0_PCIE1_ADDR] = 0xd8000, |
| + [WFDMA_EXT_CSR_ADDR] = 0xd7000, |
| + [CBTOP1_PHY_END] = 0x77ffffff, |
| + [INFRA_MCU_ADDR_END] = 0x7c3fffff, |
| + [FW_ASSERT_STAT_ADDR] = 0x219848, |
| + [FW_EXCEPT_TYPE_ADDR] = 0x21987c, |
| + [FW_EXCEPT_COUNT_ADDR] = 0x219848, |
| + [FW_CIRQ_COUNT_ADDR] = 0x216f94, |
| + [FW_CIRQ_IDX_ADDR] = 0x216ef8, |
| + [FW_CIRQ_LISR_ADDR] = 0x2170ac, |
| + [FW_TASK_ID_ADDR] = 0x216f90, |
| + [FW_TASK_IDX_ADDR] = 0x216f9c, |
| + [FW_TASK_QID1_ADDR] = 0x219680, |
| + [FW_TASK_QID2_ADDR] = 0x219760, |
| + [FW_TASK_START_ADDR] = 0x219558, |
| + [FW_TASK_END_ADDR] = 0x219554, |
| + [FW_TASK_SIZE_ADDR] = 0x219560, |
| + [FW_LAST_MSG_ID_ADDR] = 0x216f70, |
| + [FW_EINT_INFO_ADDR] = 0x219818, |
| + [FW_SCHED_INFO_ADDR] = 0x219828, |
| + [SWDEF_BASE_ADDR] = 0x41f200, |
| + [TXQ_WED_RING_BASE] = 0xd7300, |
| + [RXQ_WED_RING_BASE] = 0xd7410, |
| + [RXQ_WED_DATA_RING_BASE] = 0xd4500, |
| }; |
| |
| static const u32 mt7916_reg[] = { |
| - [INT_SOURCE_CSR] = 0xd4200, |
| - [INT_MASK_CSR] = 0xd4204, |
| - [INT1_SOURCE_CSR] = 0xd8200, |
| - [INT1_MASK_CSR] = 0xd8204, |
| - [INT_MCU_CMD_SOURCE] = 0xd41f0, |
| - [INT_MCU_CMD_EVENT] = 0x2108, |
| - [WFDMA0_ADDR] = 0xd4000, |
| - [WFDMA0_PCIE1_ADDR] = 0xd8000, |
| - [WFDMA_EXT_CSR_ADDR] = 0xd7000, |
| - [CBTOP1_PHY_END] = 0x7fffffff, |
| - [INFRA_MCU_ADDR_END] = 0x7c085fff, |
| - [FW_ASSERT_STAT_ADDR] = 0x02204c14, |
| - [FW_EXCEPT_TYPE_ADDR] = 0x022051a4, |
| - [FW_EXCEPT_COUNT_ADDR] = 0x022050bc, |
| - [FW_CIRQ_COUNT_ADDR] = 0x022001ac, |
| - [FW_CIRQ_IDX_ADDR] = 0x02204f84, |
| - [FW_CIRQ_LISR_ADDR] = 0x022050d0, |
| - [FW_TASK_ID_ADDR] = 0x0220406c, |
| - [FW_TASK_IDX_ADDR] = 0x0220500c, |
| - [FW_TASK_QID1_ADDR] = 0x022028c8, |
| - [FW_TASK_QID2_ADDR] = 0x02202a38, |
| - [FW_TASK_START_ADDR] = 0x0220286c, |
| - [FW_TASK_END_ADDR] = 0x02202870, |
| - [FW_TASK_SIZE_ADDR] = 0x02202878, |
| - [FW_LAST_MSG_ID_ADDR] = 0x02204fe8, |
| - [FW_EINT_INFO_ADDR] = 0x0220525c, |
| - [FW_SCHED_INFO_ADDR] = 0x0220516c, |
| - [SWDEF_BASE_ADDR] = 0x411400, |
| - [TXQ_WED_RING_BASE] = 0xd7300, |
| - [RXQ_WED_RING_BASE] = 0xd7410, |
| + [INT_SOURCE_CSR] = 0xd4200, |
| + [INT_MASK_CSR] = 0xd4204, |
| + [INT1_SOURCE_CSR] = 0xd8200, |
| + [INT1_MASK_CSR] = 0xd8204, |
| + [INT_MCU_CMD_SOURCE] = 0xd41f0, |
| + [INT_MCU_CMD_EVENT] = 0x2108, |
| + [WFDMA0_ADDR] = 0xd4000, |
| + [WFDMA0_PCIE1_ADDR] = 0xd8000, |
| + [WFDMA_EXT_CSR_ADDR] = 0xd7000, |
| + [CBTOP1_PHY_END] = 0x7fffffff, |
| + [INFRA_MCU_ADDR_END] = 0x7c085fff, |
| + [FW_ASSERT_STAT_ADDR] = 0x02204c14, |
| + [FW_EXCEPT_TYPE_ADDR] = 0x022051a4, |
| + [FW_EXCEPT_COUNT_ADDR] = 0x022050bc, |
| + [FW_CIRQ_COUNT_ADDR] = 0x022001ac, |
| + [FW_CIRQ_IDX_ADDR] = 0x02204f84, |
| + [FW_CIRQ_LISR_ADDR] = 0x022050d0, |
| + [FW_TASK_ID_ADDR] = 0x0220406c, |
| + [FW_TASK_IDX_ADDR] = 0x0220500c, |
| + [FW_TASK_QID1_ADDR] = 0x022028c8, |
| + [FW_TASK_QID2_ADDR] = 0x02202a38, |
| + [FW_TASK_START_ADDR] = 0x0220286c, |
| + [FW_TASK_END_ADDR] = 0x02202870, |
| + [FW_TASK_SIZE_ADDR] = 0x02202878, |
| + [FW_LAST_MSG_ID_ADDR] = 0x02204fe8, |
| + [FW_EINT_INFO_ADDR] = 0x0220525c, |
| + [FW_SCHED_INFO_ADDR] = 0x0220516c, |
| + [SWDEF_BASE_ADDR] = 0x411400, |
| + [TXQ_WED_RING_BASE] = 0xd7300, |
| + [RXQ_WED_RING_BASE] = 0xd7410, |
| + [RXQ_WED_DATA_RING_BASE] = 0xd4540, |
| }; |
| |
| static const u32 mt7986_reg[] = { |
| - [INT_SOURCE_CSR] = 0x24200, |
| - [INT_MASK_CSR] = 0x24204, |
| - [INT1_SOURCE_CSR] = 0x28200, |
| - [INT1_MASK_CSR] = 0x28204, |
| - [INT_MCU_CMD_SOURCE] = 0x241f0, |
| - [INT_MCU_CMD_EVENT] = 0x54000108, |
| - [WFDMA0_ADDR] = 0x24000, |
| - [WFDMA0_PCIE1_ADDR] = 0x28000, |
| - [WFDMA_EXT_CSR_ADDR] = 0x27000, |
| - [CBTOP1_PHY_END] = 0x7fffffff, |
| - [INFRA_MCU_ADDR_END] = 0x7c085fff, |
| - [FW_ASSERT_STAT_ADDR] = 0x02204b54, |
| - [FW_EXCEPT_TYPE_ADDR] = 0x022050dc, |
| - [FW_EXCEPT_COUNT_ADDR] = 0x02204ffc, |
| - [FW_CIRQ_COUNT_ADDR] = 0x022001ac, |
| - [FW_CIRQ_IDX_ADDR] = 0x02204ec4, |
| - [FW_CIRQ_LISR_ADDR] = 0x02205010, |
| - [FW_TASK_ID_ADDR] = 0x02204fac, |
| - [FW_TASK_IDX_ADDR] = 0x02204f4c, |
| - [FW_TASK_QID1_ADDR] = 0x02202814, |
| - [FW_TASK_QID2_ADDR] = 0x02202984, |
| - [FW_TASK_START_ADDR] = 0x022027b8, |
| - [FW_TASK_END_ADDR] = 0x022027bc, |
| - [FW_TASK_SIZE_ADDR] = 0x022027c4, |
| - [FW_LAST_MSG_ID_ADDR] = 0x02204f28, |
| - [FW_EINT_INFO_ADDR] = 0x02205194, |
| - [FW_SCHED_INFO_ADDR] = 0x022051a4, |
| - [SWDEF_BASE_ADDR] = 0x411400, |
| - [TXQ_WED_RING_BASE] = 0x24420, |
| - [RXQ_WED_RING_BASE] = 0x24520, |
| + [INT_SOURCE_CSR] = 0x24200, |
| + [INT_MASK_CSR] = 0x24204, |
| + [INT1_SOURCE_CSR] = 0x28200, |
| + [INT1_MASK_CSR] = 0x28204, |
| + [INT_MCU_CMD_SOURCE] = 0x241f0, |
| + [INT_MCU_CMD_EVENT] = 0x54000108, |
| + [WFDMA0_ADDR] = 0x24000, |
| + [WFDMA0_PCIE1_ADDR] = 0x28000, |
| + [WFDMA_EXT_CSR_ADDR] = 0x27000, |
| + [CBTOP1_PHY_END] = 0x7fffffff, |
| + [INFRA_MCU_ADDR_END] = 0x7c085fff, |
| + [FW_ASSERT_STAT_ADDR] = 0x02204b54, |
| + [FW_EXCEPT_TYPE_ADDR] = 0x022050dc, |
| + [FW_EXCEPT_COUNT_ADDR] = 0x02204ffc, |
| + [FW_CIRQ_COUNT_ADDR] = 0x022001ac, |
| + [FW_CIRQ_IDX_ADDR] = 0x02204ec4, |
| + [FW_CIRQ_LISR_ADDR] = 0x02205010, |
| + [FW_TASK_ID_ADDR] = 0x02204fac, |
| + [FW_TASK_IDX_ADDR] = 0x02204f4c, |
| + [FW_TASK_QID1_ADDR] = 0x02202814, |
| + [FW_TASK_QID2_ADDR] = 0x02202984, |
| + [FW_TASK_START_ADDR] = 0x022027b8, |
| + [FW_TASK_END_ADDR] = 0x022027bc, |
| + [FW_TASK_SIZE_ADDR] = 0x022027c4, |
| + [FW_LAST_MSG_ID_ADDR] = 0x02204f28, |
| + [FW_EINT_INFO_ADDR] = 0x02205194, |
| + [FW_SCHED_INFO_ADDR] = 0x022051a4, |
| + [SWDEF_BASE_ADDR] = 0x411400, |
| + [TXQ_WED_RING_BASE] = 0x24420, |
| + [RXQ_WED_RING_BASE] = 0x24520, |
| + [RXQ_WED_DATA_RING_BASE] = 0x24540, |
| }; |
| |
| static const u32 mt7915_offs[] = { |
| @@ -585,6 +590,105 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed) |
| mt76_clear(dev, MT_AGG_ACR4(phy->band_idx), |
| MT_AGG_ACR_PPDU_TXS2H); |
| } |
| + |
| +static void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed) |
| +{ |
| + struct mt7915_dev *dev; |
| + struct page *page; |
| + int i; |
| + |
| + dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed); |
| + for (i = 0; i < dev->mt76.rx_token_size; i++) { |
| + struct mt76_txwi_cache *t; |
| + |
| + t = mt76_rx_token_release(&dev->mt76, i); |
| + if (!t || !t->ptr) |
| + continue; |
| + |
| + dma_unmap_single(dev->mt76.dma_dev, t->dma_addr, |
| + wed->wlan.rx_size, DMA_FROM_DEVICE); |
| + skb_free_frag(t->ptr); |
| + t->ptr = NULL; |
| + |
| + mt76_put_rxwi(&dev->mt76, t); |
| + } |
| + |
| + if (!wed->rx_buf_ring.rx_page.va) |
| + return; |
| + |
| + page = virt_to_page(wed->rx_buf_ring.rx_page.va); |
| + __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias); |
| + memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page)); |
| +} |
| + |
| +static u32 mt7915_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| +{ |
| + struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc; |
| + struct mt7915_dev *dev; |
| + u32 length; |
| + int i; |
| + |
| + dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed); |
| + length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size + |
| + sizeof(struct skb_shared_info)); |
| + |
| + for (i = 0; i < size; i++) { |
| + struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76); |
| + dma_addr_t phy_addr; |
| + int token; |
| + void *ptr; |
| + |
| + ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length, |
| + GFP_KERNEL); |
| + if (!ptr) |
| + goto unmap; |
| + |
| + phy_addr = dma_map_single(dev->mt76.dma_dev, ptr, |
| + wed->wlan.rx_size, |
| + DMA_TO_DEVICE); |
| + if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) { |
| + skb_free_frag(ptr); |
| + goto unmap; |
| + } |
| + |
| + desc->buf0 = cpu_to_le32(phy_addr); |
| + token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr); |
| + desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN, |
| + token)); |
| + desc++; |
| + } |
| + |
| + return 0; |
| + |
| +unmap: |
| + mt7915_wed_release_rx_buf(wed); |
| + return -ENOMEM; |
| +} |
| + |
| +static void mt7915_mmio_wed_update_rx_stats(struct mtk_wed_device *wed, |
| + struct mtk_wed_wo_rx_stats *stats) |
| +{ |
| + int idx = le16_to_cpu(stats->wlan_idx); |
| + struct mt7915_dev *dev; |
| + struct mt76_wcid *wcid; |
| + |
| + dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed); |
| + |
| + if (idx >= mt7915_wtbl_size(dev)) |
| + return; |
| + |
| + rcu_read_lock(); |
| + |
| + wcid = rcu_dereference(dev->mt76.wcid[idx]); |
| + if (wcid) { |
| + wcid->stats.rx_bytes += le32_to_cpu(stats->rx_byte_cnt); |
| + wcid->stats.rx_packets += le32_to_cpu(stats->rx_pkt_cnt); |
| + wcid->stats.rx_errors += le32_to_cpu(stats->rx_err_cnt); |
| + wcid->stats.rx_drops += le32_to_cpu(stats->rx_drop_cnt); |
| + } |
| + |
| + rcu_read_unlock(); |
| +} |
| #endif |
| |
| int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, |
| @@ -602,6 +706,10 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, |
| |
| wed->wlan.pci_dev = pci_dev; |
| wed->wlan.bus_type = MTK_WED_BUS_PCIE; |
| + wed->wlan.base = devm_ioremap(dev->mt76.dev, |
| + pci_resource_start(pci_dev, 0), |
| + pci_resource_len(pci_dev, 0)); |
| + wed->wlan.phy_base = pci_resource_start(pci_dev, 0); |
| wed->wlan.wpdma_int = pci_resource_start(pci_dev, 0) + |
| MT_INT_WED_SOURCE_CSR; |
| wed->wlan.wpdma_mask = pci_resource_start(pci_dev, 0) + |
| @@ -612,6 +720,10 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, |
| MT_TXQ_WED_RING_BASE; |
| wed->wlan.wpdma_txfree = pci_resource_start(pci_dev, 0) + |
| MT_RXQ_WED_RING_BASE; |
| + wed->wlan.wpdma_rx_glo = pci_resource_start(pci_dev, 0) + |
| + MT_WPDMA_GLO_CFG; |
| + wed->wlan.wpdma_rx = pci_resource_start(pci_dev, 0) + |
| + MT_RXQ_WED_DATA_RING_BASE; |
| } else { |
| struct platform_device *plat_dev = pdev_ptr; |
| struct resource *res; |
| @@ -622,19 +734,45 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, |
| |
| wed->wlan.platform_dev = plat_dev; |
| wed->wlan.bus_type = MTK_WED_BUS_AXI; |
| + wed->wlan.base = devm_ioremap(dev->mt76.dev, res->start, |
| + resource_size(res)); |
| + wed->wlan.phy_base = res->start; |
| wed->wlan.wpdma_int = res->start + MT_INT_SOURCE_CSR; |
| wed->wlan.wpdma_mask = res->start + MT_INT_MASK_CSR; |
| wed->wlan.wpdma_tx = res->start + MT_TXQ_WED_RING_BASE; |
| wed->wlan.wpdma_txfree = res->start + MT_RXQ_WED_RING_BASE; |
| + wed->wlan.wpdma_rx_glo = res->start + MT_WPDMA_GLO_CFG; |
| + wed->wlan.wpdma_rx = res->start + MT_RXQ_WED_DATA_RING_BASE; |
| } |
| wed->wlan.nbuf = 4096; |
| wed->wlan.tx_tbit[0] = is_mt7915(&dev->mt76) ? 4 : 30; |
| wed->wlan.tx_tbit[1] = is_mt7915(&dev->mt76) ? 5 : 31; |
| - wed->wlan.txfree_tbit = is_mt7915(&dev->mt76) ? 1 : 2; |
| + wed->wlan.txfree_tbit = is_mt7986(&dev->mt76) ? 2 : 1; |
| wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf; |
| + wed->wlan.wcid_512 = !is_mt7915(&dev->mt76); |
| + |
| + wed->wlan.rx_nbuf = 65536; |
| + wed->wlan.rx_npkt = MT7915_WED_RX_TOKEN_SIZE; |
| + wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE); |
| + if (is_mt7915(&dev->mt76)) { |
| + wed->wlan.rx_tbit[0] = 16; |
| + wed->wlan.rx_tbit[1] = 17; |
| + } else if (is_mt7986(&dev->mt76)) { |
| + wed->wlan.rx_tbit[0] = 22; |
| + wed->wlan.rx_tbit[1] = 23; |
| + } else { |
| + wed->wlan.rx_tbit[0] = 18; |
| + wed->wlan.rx_tbit[1] = 19; |
| + } |
| + |
| wed->wlan.init_buf = mt7915_wed_init_buf; |
| wed->wlan.offload_enable = mt7915_mmio_wed_offload_enable; |
| wed->wlan.offload_disable = mt7915_mmio_wed_offload_disable; |
| + wed->wlan.init_rx_buf = mt7915_wed_init_rx_buf; |
| + wed->wlan.release_rx_buf = mt7915_wed_release_rx_buf; |
| + wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats; |
| + |
| + dev->mt76.rx_token_size = wed->wlan.rx_npkt; |
| |
| if (mtk_wed_device_attach(wed)) |
| return 0; |
| diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h |
| index 9cb680e7..42f21343 100644 |
| --- a/mt7915/mt7915.h |
| +++ b/mt7915/mt7915.h |
| @@ -68,6 +68,8 @@ |
| #define MT7915_MIN_TWT_DUR 64 |
| #define MT7915_MAX_QUEUE (MT_RXQ_BAND2 + __MT_MCUQ_MAX + 2) |
| |
| +#define MT7915_WED_RX_TOKEN_SIZE 12288 |
| + |
| struct mt7915_vif; |
| struct mt7915_sta; |
| struct mt7915_dfs_pulse; |
| @@ -501,8 +503,8 @@ int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vi |
| struct cfg80211_he_bss_color *he_bss_color); |
| int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
| int enable, u32 changed); |
| -int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif, |
| - bool enable); |
| +int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif, |
| + struct ieee80211_he_obss_pd *he_obss_pd); |
| int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, |
| struct ieee80211_sta *sta, bool changed); |
| int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, |
| @@ -526,6 +528,10 @@ int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band); |
| int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable); |
| int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy); |
| int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len); |
| +int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower); |
| +int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy, |
| + struct ieee80211_vif *vif, |
| + struct ieee80211_sta *sta, s8 txpower); |
| int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action); |
| int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val); |
| int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev, |
| @@ -617,7 +623,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, |
| struct mt76_tx_info *tx_info); |
| void mt7915_tx_token_put(struct mt7915_dev *dev); |
| void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| - struct sk_buff *skb); |
| + struct sk_buff *skb, u32 *info); |
| bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len); |
| void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); |
| void mt7915_stats_work(struct work_struct *work); |
| @@ -628,6 +634,7 @@ void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy); |
| void mt7915_update_channel(struct mt76_phy *mphy); |
| int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable); |
| int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms); |
| +int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev); |
| int mt7915_init_debugfs(struct mt7915_phy *phy); |
| void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len); |
| bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len); |
| diff --git a/mt7915/regs.h b/mt7915/regs.h |
| index 0c61f125..aca1b2f1 100644 |
| --- a/mt7915/regs.h |
| +++ b/mt7915/regs.h |
| @@ -43,6 +43,7 @@ enum reg_rev { |
| SWDEF_BASE_ADDR, |
| TXQ_WED_RING_BASE, |
| RXQ_WED_RING_BASE, |
| + RXQ_WED_DATA_RING_BASE, |
| __MT_REG_MAX, |
| }; |
| |
| @@ -588,9 +589,14 @@ enum offs_rev { |
| #define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21) |
| |
| #define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c) |
| + |
| +#define MT_WFDMA0_EXT0_CFG MT_WFDMA0(0x2b0) |
| +#define MT_WFDMA0_EXT0_RXWB_KEEP BIT(10) |
| + |
| #define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0) |
| #define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4) |
| #define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8) |
| +#define MT_WPDMA_GLO_CFG MT_WFDMA0(0x208) |
| |
| /* WFDMA1 */ |
| #define MT_WFDMA1_BASE 0xd5000 |
| @@ -686,6 +692,7 @@ enum offs_rev { |
| |
| #define MT_TXQ_WED_RING_BASE __REG(TXQ_WED_RING_BASE) |
| #define MT_RXQ_WED_RING_BASE __REG(RXQ_WED_RING_BASE) |
| +#define MT_RXQ_WED_DATA_RING_BASE __REG(RXQ_WED_DATA_RING_BASE) |
| |
| #define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR) |
| #define MT_INT_MASK_CSR __REG(INT_MASK_CSR) |
| @@ -1179,6 +1186,10 @@ enum offs_rev { |
| #define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18) |
| #define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29) |
| |
| +#define MT_WF_PHY_TPC_CTRL_STAT(_phy) MT_WF_PHY(0xe7a0 + ((_phy) << 16)) |
| +#define MT_WF_PHY_TPC_CTRL_STAT_MT7916(_phy) MT_WF_PHY(0xe7a0 + ((_phy) << 20)) |
| +#define MT_WF_PHY_TPC_POWER GENMASK(15, 8) |
| + |
| #define MT_MCU_WM_CIRQ_BASE 0x89010000 |
| #define MT_MCU_WM_CIRQ(ofs) (MT_MCU_WM_CIRQ_BASE + (ofs)) |
| #define MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR MT_MCU_WM_CIRQ(0x80) |
| diff --git a/mt7915/testmode.c b/mt7915/testmode.c |
| index a979460f..7ace05e0 100644 |
| --- a/mt7915/testmode.c |
| +++ b/mt7915/testmode.c |
| @@ -44,14 +44,14 @@ mt7915_tm_set_tx_power(struct mt7915_phy *phy) |
| int ret; |
| struct { |
| u8 format_id; |
| - u8 dbdc_idx; |
| + u8 band_idx; |
| s8 tx_power; |
| u8 ant_idx; /* Only 0 is valid */ |
| u8 center_chan; |
| u8 rsv[3]; |
| } __packed req = { |
| .format_id = 0xf, |
| - .dbdc_idx = phy != &dev->phy, |
| + .band_idx = phy->band_idx, |
| .center_chan = ieee80211_frequency_to_channel(freq), |
| }; |
| u8 *tx_power = NULL; |
| @@ -77,7 +77,7 @@ mt7915_tm_set_freq_offset(struct mt7915_phy *phy, bool en, u32 val) |
| struct mt7915_tm_cmd req = { |
| .testmode_en = en, |
| .param_idx = MCU_ATE_SET_FREQ_OFFSET, |
| - .param.freq.band = phy != &dev->phy, |
| + .param.freq.band = phy->band_idx, |
| .param.freq.freq_offset = cpu_to_le32(val), |
| }; |
| |
| @@ -111,7 +111,7 @@ mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en) |
| .param_idx = MCU_ATE_SET_TRX, |
| .param.trx.type = type, |
| .param.trx.enable = en, |
| - .param.trx.band = phy != &dev->phy, |
| + .param.trx.band = phy->band_idx, |
| }; |
| |
| return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req, |
| @@ -126,7 +126,7 @@ mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid) |
| .testmode_en = 1, |
| .param_idx = MCU_ATE_CLEAN_TXQUEUE, |
| .param.clean.wcid = wcid, |
| - .param.clean.band = phy != &dev->phy, |
| + .param.clean.band = phy->band_idx, |
| }; |
| |
| return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req, |
| @@ -144,7 +144,7 @@ mt7915_tm_set_slot_time(struct mt7915_phy *phy, u8 slot_time, u8 sifs) |
| .param.slot.sifs = sifs, |
| .param.slot.rifs = 2, |
| .param.slot.eifs = cpu_to_le16(60), |
| - .param.slot.band = phy != &dev->phy, |
| + .param.slot.band = phy->band_idx, |
| }; |
| |
| return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req, |
| @@ -488,7 +488,7 @@ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en) |
| mt7915_tm_update_channel(phy); |
| |
| /* read-clear */ |
| - mt76_rr(dev, MT_MIB_SDR3(phy != &dev->phy)); |
| + mt76_rr(dev, MT_MIB_SDR3(phy->band_idx)); |
| mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en); |
| } |
| } |
| @@ -526,7 +526,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en) |
| tx_cont->control_ch = chandef->chan->hw_value; |
| tx_cont->center_ch = freq1; |
| tx_cont->tx_ant = td->tx_antenna_mask; |
| - tx_cont->band = phy != &dev->phy; |
| + tx_cont->band = phy->band_idx; |
| |
| switch (chandef->width) { |
| case NL80211_CHAN_WIDTH_40: |
| @@ -558,7 +558,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en) |
| } |
| |
| if (!en) { |
| - req.op.rf.param.func_data = cpu_to_le32(phy != &dev->phy); |
| + req.op.rf.param.func_data = cpu_to_le32(phy->band_idx); |
| goto out; |
| } |
| |
| diff --git a/mt7921/init.c b/mt7921/init.c |
| index 739d18fc..e42cb6be 100644 |
| --- a/mt7921/init.c |
| +++ b/mt7921/init.c |
| @@ -2,6 +2,7 @@ |
| /* Copyright (C) 2020 MediaTek Inc. */ |
| |
| #include <linux/etherdevice.h> |
| +#include <linux/firmware.h> |
| #include "mt7921.h" |
| #include "mac.h" |
| #include "mcu.h" |
| @@ -37,6 +38,7 @@ mt7921_regd_notifier(struct wiphy *wiphy, |
| |
| memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2)); |
| dev->mt76.region = request->dfs_region; |
| + dev->country_ie_env = request->country_ie_env; |
| |
| mt7921_mutex_acquire(dev); |
| mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env); |
| @@ -65,12 +67,18 @@ mt7921_init_wiphy(struct ieee80211_hw *hw) |
| hw->sta_data_size = sizeof(struct mt7921_sta); |
| hw->vif_data_size = sizeof(struct mt7921_vif); |
| |
| + if (dev->fw_features & MT7921_FW_CAP_CNM) |
| + wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; |
| + else |
| + wiphy->flags &= ~WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; |
| + |
| wiphy->iface_combinations = if_comb; |
| wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP | |
| WIPHY_FLAG_4ADDR_STATION); |
| wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | |
| BIT(NL80211_IFTYPE_AP); |
| wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); |
| + wiphy->max_remain_on_channel_duration = 5000; |
| wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN; |
| wiphy->max_scan_ssids = 4; |
| wiphy->max_sched_scan_plan_interval = |
| @@ -129,6 +137,58 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band) |
| mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN); |
| } |
| |
| +u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm) |
| +{ |
| + struct mt7921_fw_features *features = NULL; |
| + const struct mt76_connac2_fw_trailer *hdr; |
| + struct mt7921_realease_info *rel_info; |
| + const struct firmware *fw; |
| + int ret, i, offset = 0; |
| + const u8 *data, *end; |
| + |
| + ret = request_firmware(&fw, fw_wm, dev); |
| + if (ret) |
| + return ret; |
| + |
| + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { |
| + dev_err(dev, "Invalid firmware\n"); |
| + return -EINVAL; |
| + } |
| + |
| + data = fw->data; |
| + hdr = (const void *)(fw->data + fw->size - sizeof(*hdr)); |
| + |
| + for (i = 0; i < hdr->n_region; i++) { |
| + const struct mt76_connac2_fw_region *region; |
| + |
| + region = (const void *)((const u8 *)hdr - |
| + (hdr->n_region - i) * sizeof(*region)); |
| + offset += le32_to_cpu(region->len); |
| + } |
| + |
| + data += offset + 16; |
| + rel_info = (struct mt7921_realease_info *)data; |
| + data += sizeof(*rel_info); |
| + end = data + le16_to_cpu(rel_info->len); |
| + |
| + while (data < end) { |
| + rel_info = (struct mt7921_realease_info *)data; |
| + data += sizeof(*rel_info); |
| + |
| + if (rel_info->tag == MT7921_FW_TAG_FEATURE) { |
| + features = (struct mt7921_fw_features *)data; |
| + break; |
| + } |
| + |
| + data += le16_to_cpu(rel_info->len) + rel_info->pad_len; |
| + } |
| + |
| + release_firmware(fw); |
| + |
| + return features ? features->data : 0; |
| +} |
| +EXPORT_SYMBOL_GPL(mt7921_check_offload_capability); |
| + |
| int mt7921_mac_init(struct mt7921_dev *dev) |
| { |
| int i; |
| @@ -278,6 +338,10 @@ int mt7921_register_device(struct mt7921_dev *dev) |
| INIT_WORK(&dev->reset_work, mt7921_mac_reset_work); |
| INIT_WORK(&dev->init_work, mt7921_init_work); |
| |
| + INIT_WORK(&dev->phy.roc_work, mt7921_roc_work); |
| + timer_setup(&dev->phy.roc_timer, mt7921_roc_timer, 0); |
| + init_waitqueue_head(&dev->phy.roc_wait); |
| + |
| dev->pm.idle_timeout = MT7921_PM_TIMEOUT; |
| dev->pm.stats.last_wake_event = jiffies; |
| dev->pm.stats.last_doze_event = jiffies; |
| diff --git a/mt7921/mac.c b/mt7921/mac.c |
| index 7b15193c..639614b0 100644 |
| --- a/mt7921/mac.c |
| +++ b/mt7921/mac.c |
| @@ -692,7 +692,7 @@ bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len) |
| EXPORT_SYMBOL_GPL(mt7921_rx_check); |
| |
| void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| - struct sk_buff *skb) |
| + struct sk_buff *skb, u32 *info) |
| { |
| struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); |
| __le32 *rxd = (__le32 *)skb->data; |
| diff --git a/mt7921/main.c b/mt7921/main.c |
| index 00085b12..1b7219e3 100644 |
| --- a/mt7921/main.c |
| +++ b/mt7921/main.c |
| @@ -385,6 +385,116 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw, |
| mt76_packet_id_flush(&dev->mt76, &msta->wcid); |
| } |
| |
| +static void mt7921_roc_iter(void *priv, u8 *mac, |
| + struct ieee80211_vif *vif) |
| +{ |
| + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; |
| + struct mt7921_phy *phy = priv; |
| + |
| + mt7921_mcu_abort_roc(phy, mvif, phy->roc_token_id); |
| +} |
| + |
| +void mt7921_roc_work(struct work_struct *work) |
| +{ |
| + struct mt7921_phy *phy; |
| + |
| + phy = (struct mt7921_phy *)container_of(work, struct mt7921_phy, |
| + roc_work); |
| + |
| + if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) |
| + return; |
| + |
| + mt7921_mutex_acquire(phy->dev); |
| + ieee80211_iterate_active_interfaces(phy->mt76->hw, |
| + IEEE80211_IFACE_ITER_RESUME_ALL, |
| + mt7921_roc_iter, phy); |
| + mt7921_mutex_release(phy->dev); |
| + ieee80211_remain_on_channel_expired(phy->mt76->hw); |
| +} |
| + |
| +void mt7921_roc_timer(struct timer_list *timer) |
| +{ |
| + struct mt7921_phy *phy = from_timer(phy, timer, roc_timer); |
| + |
| + ieee80211_queue_work(phy->mt76->hw, &phy->roc_work); |
| +} |
| + |
| +static int mt7921_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif) |
| +{ |
| + int err; |
| + |
| + if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) |
| + return 0; |
| + |
| + del_timer_sync(&phy->roc_timer); |
| + cancel_work_sync(&phy->roc_work); |
| + err = mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id); |
| + clear_bit(MT76_STATE_ROC, &phy->mt76->state); |
| + |
| + return err; |
| +} |
| + |
| +static int mt7921_set_roc(struct mt7921_phy *phy, |
| + struct mt7921_vif *vif, |
| + struct ieee80211_channel *chan, |
| + int duration, |
| + enum mt7921_roc_req type) |
| +{ |
| + int err; |
| + |
| + if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state)) |
| + return -EBUSY; |
| + |
| + phy->roc_grant = false; |
| + |
| + err = mt7921_mcu_set_roc(phy, vif, chan, duration, type, |
| + ++phy->roc_token_id); |
| + if (err < 0) { |
| + clear_bit(MT76_STATE_ROC, &phy->mt76->state); |
| + goto out; |
| + } |
| + |
| + if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, HZ)) { |
| + mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id); |
| + clear_bit(MT76_STATE_ROC, &phy->mt76->state); |
| + err = -ETIMEDOUT; |
| + } |
| + |
| +out: |
| + return err; |
| +} |
| + |
| +static int mt7921_remain_on_channel(struct ieee80211_hw *hw, |
| + struct ieee80211_vif *vif, |
| + struct ieee80211_channel *chan, |
| + int duration, |
| + enum ieee80211_roc_type type) |
| +{ |
| + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; |
| + struct mt7921_phy *phy = mt7921_hw_phy(hw); |
| + int err; |
| + |
| + mt7921_mutex_acquire(phy->dev); |
| + err = mt7921_set_roc(phy, mvif, chan, duration, MT7921_ROC_REQ_ROC); |
| + mt7921_mutex_release(phy->dev); |
| + |
| + return err; |
| +} |
| + |
| +static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw, |
| + struct ieee80211_vif *vif) |
| +{ |
| + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; |
| + struct mt7921_phy *phy = mt7921_hw_phy(hw); |
| + int err; |
| + |
| + mt7921_mutex_acquire(phy->dev); |
| + err = mt7921_abort_roc(phy, mvif); |
| + mt7921_mutex_release(phy->dev); |
| + |
| + return err; |
| +} |
| + |
| static int mt7921_set_channel(struct mt7921_phy *phy) |
| { |
| struct mt7921_dev *dev = phy->dev; |
| @@ -1503,7 +1613,13 @@ static int mt7921_set_sar_specs(struct ieee80211_hw *hw, |
| int err; |
| |
| mt7921_mutex_acquire(dev); |
| + err = mt7921_mcu_set_clc(dev, dev->mt76.alpha2, |
| + dev->country_ie_env); |
| + if (err < 0) |
| + goto out; |
| + |
| err = mt7921_set_tx_sar_pwr(hw, sar); |
| +out: |
| mt7921_mutex_release(dev); |
| |
| return err; |
| @@ -1621,6 +1737,8 @@ const struct ieee80211_ops mt7921_ops = { |
| #endif /* CONFIG_PM */ |
| .flush = mt7921_flush, |
| .set_sar_specs = mt7921_set_sar_specs, |
| + .remain_on_channel = mt7921_remain_on_channel, |
| + .cancel_remain_on_channel = mt7921_cancel_remain_on_channel, |
| }; |
| EXPORT_SYMBOL_GPL(mt7921_ops); |
| |
| diff --git a/mt7921/mcu.c b/mt7921/mcu.c |
| index 104da7e1..b7ed744f 100644 |
| --- a/mt7921/mcu.c |
| +++ b/mt7921/mcu.c |
| @@ -154,6 +154,29 @@ void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) |
| |
| #endif /* CONFIG_PM */ |
| |
| +static void |
| +mt7921_mcu_uni_roc_event(struct mt7921_dev *dev, struct sk_buff *skb) |
| +{ |
| + struct mt7921_roc_grant_tlv *grant; |
| + struct mt76_connac2_mcu_rxd *rxd; |
| + int duration; |
| + |
| + rxd = (struct mt76_connac2_mcu_rxd *)skb->data; |
| + grant = (struct mt7921_roc_grant_tlv *)(rxd->tlv + 4); |
| + |
| + /* should never happen */ |
| + WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT)); |
| + |
| + if (grant->reqtype == MT7921_ROC_REQ_ROC) |
| + ieee80211_ready_on_channel(dev->mt76.phy.hw); |
| + |
| + dev->phy.roc_grant = true; |
| + wake_up(&dev->phy.roc_wait); |
| + duration = le32_to_cpu(grant->max_interval); |
| + mod_timer(&dev->phy.roc_timer, |
| + round_jiffies_up(jiffies + msecs_to_jiffies(duration))); |
| +} |
| + |
| static void |
| mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb) |
| { |
| @@ -295,6 +318,7 @@ mt7921_mcu_uni_rx_unsolicited_event(struct mt7921_dev *dev, |
| |
| switch (rxd->eid) { |
| case MCU_UNI_EVENT_ROC: |
| + mt7921_mcu_uni_roc_event(dev, skb); |
| break; |
| default: |
| break; |
| diff --git a/mt7921/mt7921.h b/mt7921/mt7921.h |
| index d9d78f6b..e915dfce 100644 |
| --- a/mt7921/mt7921.h |
| +++ b/mt7921/mt7921.h |
| @@ -32,6 +32,9 @@ |
| #define MT7921_MCU_INIT_RETRY_COUNT 10 |
| #define MT7921_WFSYS_INIT_RETRY_COUNT 2 |
| |
| +#define MT7921_FW_TAG_FEATURE 4 |
| +#define MT7921_FW_CAP_CNM BIT(7) |
| + |
| #define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin" |
| #define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin" |
| |
| @@ -67,6 +70,41 @@ enum mt7921_roc_req { |
| MT7921_ROC_REQ_NUM |
| }; |
| |
| +enum { |
| + UNI_EVENT_ROC_GRANT = 0, |
| + UNI_EVENT_ROC_TAG_NUM |
| +}; |
| + |
| +struct mt7921_realease_info { |
| + __le16 len; |
| + u8 pad_len; |
| + u8 tag; |
| +} __packed; |
| + |
| +struct mt7921_fw_features { |
| + u8 segment; |
| + u8 data; |
| + u8 rsv[14]; |
| +} __packed; |
| + |
| +struct mt7921_roc_grant_tlv { |
| + __le16 tag; |
| + __le16 len; |
| + u8 bss_idx; |
| + u8 tokenid; |
| + u8 status; |
| + u8 primarychannel; |
| + u8 rfsco; |
| + u8 rfband; |
| + u8 channelwidth; |
| + u8 centerfreqseg1; |
| + u8 centerfreqseg2; |
| + u8 reqtype; |
| + u8 dbdcband; |
| + u8 rsv[1]; |
| + __le32 max_interval; |
| +} __packed; |
| + |
| enum mt7921_sdio_pkt_type { |
| MT7921_SDIO_TXD, |
| MT7921_SDIO_DATA, |
| @@ -214,6 +252,12 @@ struct mt7921_phy { |
| #endif |
| |
| struct mt7921_clc *clc[MT7921_CLC_MAX_NUM]; |
| + |
| + struct work_struct roc_work; |
| + struct timer_list roc_timer; |
| + wait_queue_head_t roc_wait; |
| + u8 roc_token_id; |
| + bool roc_grant; |
| }; |
| |
| #define mt7921_init_reset(dev) ((dev)->hif_ops->init_reset(dev)) |
| @@ -250,6 +294,7 @@ struct mt7921_dev { |
| struct work_struct init_work; |
| |
| u8 fw_debug; |
| + u8 fw_features; |
| |
| struct mt76_connac_pm pm; |
| struct mt76_connac_coredump coredump; |
| @@ -258,6 +303,8 @@ struct mt7921_dev { |
| struct work_struct ipv6_ns_work; |
| /* IPv6 addresses for WoWLAN */ |
| struct sk_buff_head ipv6_ns_list; |
| + |
| + enum environment_cap country_ie_env; |
| }; |
| |
| enum { |
| @@ -422,7 +469,7 @@ void mt7921_tx_worker(struct mt76_worker *w); |
| void mt7921_tx_token_put(struct mt7921_dev *dev); |
| bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len); |
| void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, |
| - struct sk_buff *skb); |
| + struct sk_buff *skb, u32 *info); |
| void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); |
| void mt7921_stats_work(struct work_struct *work); |
| void mt7921_set_stream_he_caps(struct mt7921_phy *phy); |
| @@ -439,6 +486,8 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev, |
| struct ieee80211_ampdu_params *params, |
| bool enable); |
| void mt7921_scan_work(struct work_struct *work); |
| +void mt7921_roc_work(struct work_struct *work); |
| +void mt7921_roc_timer(struct timer_list *timer); |
| int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif); |
| int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev); |
| int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev); |
| @@ -527,4 +576,5 @@ int mt7921_mcu_set_roc(struct mt7921_phy *phy, struct mt7921_vif *vif, |
| enum mt7921_roc_req type, u8 token_id); |
| int mt7921_mcu_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif, |
| u8 token_id); |
| +u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm); |
| #endif |
| diff --git a/mt7921/pci.c b/mt7921/pci.c |
| index 4f34cb9e..fbb06f04 100644 |
| --- a/mt7921/pci.c |
| +++ b/mt7921/pci.c |
| @@ -13,10 +13,14 @@ |
| #include "../trace.h" |
| |
| static const struct pci_device_id mt7921_pci_device_table[] = { |
| - { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961) }, |
| - { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922) }, |
| - { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608) }, |
| - { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616) }, |
| + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961), |
| + .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM }, |
| + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922), |
| + .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM }, |
| + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608), |
| + .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM }, |
| + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616), |
| + .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM }, |
| { }, |
| }; |
| |
| @@ -253,9 +257,11 @@ static int mt7921_pci_probe(struct pci_dev *pdev, |
| .fw_own = mt7921e_mcu_fw_pmctrl, |
| }; |
| |
| + struct ieee80211_ops *ops; |
| struct mt76_bus_ops *bus_ops; |
| struct mt7921_dev *dev; |
| struct mt76_dev *mdev; |
| + u8 features; |
| int ret; |
| |
| ret = pcim_enable_device(pdev); |
| @@ -279,8 +285,21 @@ static int mt7921_pci_probe(struct pci_dev *pdev, |
| if (mt7921_disable_aspm) |
| mt76_pci_disable_aspm(pdev); |
| |
| - mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7921_ops, |
| - &drv_ops); |
| + features = mt7921_check_offload_capability(&pdev->dev, (const char *) |
| + id->driver_data); |
| + ops = devm_kmemdup(&pdev->dev, &mt7921_ops, sizeof(mt7921_ops), |
| + GFP_KERNEL); |
| + if (!ops) { |
| + ret = -ENOMEM; |
| + goto err_free_pci_vec; |
| + } |
| + |
| + if (!(features & MT7921_FW_CAP_CNM)) { |
| + ops->remain_on_channel = NULL; |
| + ops->cancel_remain_on_channel = NULL; |
| + } |
| + |
| + mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), ops, &drv_ops); |
| if (!mdev) { |
| ret = -ENOMEM; |
| goto err_free_pci_vec; |
| @@ -289,8 +308,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev, |
| pci_set_drvdata(pdev, mdev); |
| |
| dev = container_of(mdev, struct mt7921_dev, mt76); |
| + dev->fw_features = features; |
| dev->hif_ops = &mt7921_pcie_ops; |
| - |
| mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); |
| tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev); |
| |
| diff --git a/mt7921/sdio.c b/mt7921/sdio.c |
| index 031d99d4..f6b35087 100644 |
| --- a/mt7921/sdio.c |
| +++ b/mt7921/sdio.c |
| @@ -17,7 +17,8 @@ |
| #include "mcu.h" |
| |
| static const struct sdio_device_id mt7921s_table[] = { |
| - { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7901) }, |
| + { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7901), |
| + .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM }, |
| { } /* Terminating entry */ |
| }; |
| |
| @@ -122,18 +123,32 @@ static int mt7921s_probe(struct sdio_func *func, |
| .fw_own = mt7921s_mcu_fw_pmctrl, |
| }; |
| |
| + struct ieee80211_ops *ops; |
| struct mt7921_dev *dev; |
| struct mt76_dev *mdev; |
| + u8 features; |
| int ret; |
| |
| - mdev = mt76_alloc_device(&func->dev, sizeof(*dev), &mt7921_ops, |
| - &drv_ops); |
| + features = mt7921_check_offload_capability(&func->dev, (const char *) |
| + id->driver_data); |
| + |
| + ops = devm_kmemdup(&func->dev, &mt7921_ops, sizeof(mt7921_ops), |
| + GFP_KERNEL); |
| + if (!ops) |
| + return -ENOMEM; |
| + |
| + if (!(features & MT7921_FW_CAP_CNM)) { |
| + ops->remain_on_channel = NULL; |
| + ops->cancel_remain_on_channel = NULL; |
| + } |
| + |
| + mdev = mt76_alloc_device(&func->dev, sizeof(*dev), ops, &drv_ops); |
| if (!mdev) |
| return -ENOMEM; |
| |
| dev = container_of(mdev, struct mt7921_dev, mt76); |
| + dev->fw_features = features; |
| dev->hif_ops = &mt7921_sdio_ops; |
| - |
| sdio_set_drvdata(func, dev); |
| |
| ret = mt76s_init(mdev, func, &mt7921s_ops); |
| diff --git a/mt7921/usb.c b/mt7921/usb.c |
| index 89249f0b..8a49d3de 100644 |
| --- a/mt7921/usb.c |
| +++ b/mt7921/usb.c |
| @@ -13,7 +13,8 @@ |
| #include "mac.h" |
| |
| static const struct usb_device_id mt7921u_device_table[] = { |
| - { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7961, 0xff, 0xff, 0xff) }, |
| + { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7961, 0xff, 0xff, 0xff), |
| + .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM }, |
| { }, |
| }; |
| |
| @@ -204,13 +205,21 @@ static int mt7921u_probe(struct usb_interface *usb_intf, |
| struct ieee80211_hw *hw; |
| struct mt7921_dev *dev; |
| struct mt76_dev *mdev; |
| + u8 features; |
| int ret; |
| |
| + features = mt7921_check_offload_capability(&usb_intf->dev, (const char *) |
| + id->driver_info); |
| ops = devm_kmemdup(&usb_intf->dev, &mt7921_ops, sizeof(mt7921_ops), |
| GFP_KERNEL); |
| if (!ops) |
| return -ENOMEM; |
| |
| + if (!(features & MT7921_FW_CAP_CNM)) { |
| + ops->remain_on_channel = NULL; |
| + ops->cancel_remain_on_channel = NULL; |
| + } |
| + |
| ops->stop = mt7921u_stop; |
| |
| mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops); |
| @@ -218,6 +227,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf, |
| return -ENOMEM; |
| |
| dev = container_of(mdev, struct mt7921_dev, mt76); |
| + dev->fw_features = features; |
| dev->hif_ops = &hif_ops; |
| |
| udev = usb_get_dev(udev); |
| diff --git a/sdio.c b/sdio.c |
| index 0ec308f9..228bc7d4 100644 |
| --- a/sdio.c |
| +++ b/sdio.c |
| @@ -395,7 +395,7 @@ mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) |
| if (!e || !e->skb) |
| break; |
| |
| - dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb); |
| + dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb, NULL); |
| e->skb = NULL; |
| nframes++; |
| } |
| diff --git a/tx.c b/tx.c |
| index 65e2b7c1..c8d78b0a 100644 |
| --- a/tx.c |
| +++ b/tx.c |
| @@ -751,6 +751,23 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) |
| } |
| EXPORT_SYMBOL_GPL(mt76_token_consume); |
| |
| +int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, |
| + struct mt76_txwi_cache *t, dma_addr_t phys) |
| +{ |
| + int token; |
| + |
| + spin_lock_bh(&dev->rx_token_lock); |
| + token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size, |
| + GFP_ATOMIC); |
| + spin_unlock_bh(&dev->rx_token_lock); |
| + |
| + t->ptr = ptr; |
| + t->dma_addr = phys; |
| + |
| + return token; |
| +} |
| +EXPORT_SYMBOL_GPL(mt76_rx_token_consume); |
| + |
| struct mt76_txwi_cache * |
| mt76_token_release(struct mt76_dev *dev, int token, bool *wake) |
| { |
| @@ -779,3 +796,16 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake) |
| return txwi; |
| } |
| EXPORT_SYMBOL_GPL(mt76_token_release); |
| + |
| +struct mt76_txwi_cache * |
| +mt76_rx_token_release(struct mt76_dev *dev, int token) |
| +{ |
| + struct mt76_txwi_cache *t; |
| + |
| + spin_lock_bh(&dev->rx_token_lock); |
| + t = idr_remove(&dev->rx_token, token); |
| + spin_unlock_bh(&dev->rx_token_lock); |
| + |
| + return t; |
| +} |
| +EXPORT_SYMBOL_GPL(mt76_rx_token_release); |
| diff --git a/usb.c b/usb.c |
| index 50d07d91..369c27ab 100644 |
| --- a/usb.c |
| +++ b/usb.c |
| @@ -547,7 +547,7 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb, |
| len -= data_len; |
| nsgs++; |
| } |
| - dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb); |
| + dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL); |
| |
| return nsgs; |
| } |
| -- |
| 2.36.1 |
| |