| From c326d38bf0da40d6b0ccbd13de2bb267398598d0 Mon Sep 17 00:00:00 2001 |
| From: Sujuan Chen <sujuan.chen@mediatek.com> |
| Date: Wed, 14 Dec 2022 17:19:00 +0800 |
| Subject: [PATCH 3012/3013] mt76: mt7915: wed: add rxwi for further in chip rro |
| development |
| |
| Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> |
| --- |
| dma.c | 98 +++++++++++++++++++++++++------------------------ |
| mac80211.c | 2 +- |
| mt76.h | 24 +++++++----- |
| mt7915/dma.c | 2 - |
| mt7915/mmio.c | 21 ++++++----- |
| mt7915/mt7915.h | 1 + |
| tx.c | 16 ++++---- |
| 7 files changed, 86 insertions(+), 78 deletions(-) |
| |
| diff --git a/dma.c b/dma.c |
| index 0914266a..7ef272e2 100644 |
| --- a/dma.c |
| +++ b/dma.c |
| @@ -59,17 +59,17 @@ mt76_alloc_txwi(struct mt76_dev *dev) |
| return t; |
| } |
| |
| -static struct mt76_txwi_cache * |
| +static struct mt76_rxwi_cache * |
| mt76_alloc_rxwi(struct mt76_dev *dev) |
| { |
| - struct mt76_txwi_cache *t; |
| + struct mt76_rxwi_cache *r; |
| |
| - t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC); |
| - if (!t) |
| + r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC); |
| + if (!r) |
| return NULL; |
| |
| - t->ptr = NULL; |
| - return t; |
| + r->ptr = NULL; |
| + return r; |
| } |
| |
| static struct mt76_txwi_cache * |
| @@ -88,20 +88,20 @@ __mt76_get_txwi(struct mt76_dev *dev) |
| return t; |
| } |
| |
| -static struct mt76_txwi_cache * |
| +static struct mt76_rxwi_cache * |
| __mt76_get_rxwi(struct mt76_dev *dev) |
| { |
| - struct mt76_txwi_cache *t = NULL; |
| + struct mt76_rxwi_cache *r = NULL; |
| |
| - spin_lock(&dev->wed_lock); |
| + spin_lock(&dev->lock); |
| if (!list_empty(&dev->rxwi_cache)) { |
| - t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, |
| + r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache, |
| list); |
| - list_del(&t->list); |
| + list_del(&r->list); |
| } |
| - spin_unlock(&dev->wed_lock); |
| + spin_unlock(&dev->lock); |
| |
| - return t; |
| + return r; |
| } |
| |
| static struct mt76_txwi_cache * |
| @@ -115,13 +115,13 @@ mt76_get_txwi(struct mt76_dev *dev) |
| return mt76_alloc_txwi(dev); |
| } |
| |
| -struct mt76_txwi_cache * |
| +struct mt76_rxwi_cache * |
| mt76_get_rxwi(struct mt76_dev *dev) |
| { |
| - struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); |
| + struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev); |
| |
| - if (t) |
| - return t; |
| + if (r) |
| + return r; |
| |
| return mt76_alloc_rxwi(dev); |
| } |
| @@ -140,14 +140,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) |
| EXPORT_SYMBOL_GPL(mt76_put_txwi); |
| |
| void |
| -mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) |
| +mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r) |
| { |
| - if (!t) |
| + if (!r) |
| return; |
| |
| - spin_lock(&dev->wed_lock); |
| - list_add(&t->list, &dev->rxwi_cache); |
| - spin_unlock(&dev->wed_lock); |
| + spin_lock(&dev->lock); |
| + list_add(&r->list, &dev->rxwi_cache); |
| + spin_unlock(&dev->lock); |
| } |
| EXPORT_SYMBOL_GPL(mt76_put_rxwi); |
| |
| @@ -168,13 +168,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev) |
| void |
| mt76_free_pending_rxwi(struct mt76_dev *dev) |
| { |
| - struct mt76_txwi_cache *t; |
| + struct mt76_rxwi_cache *r; |
| |
| local_bh_disable(); |
| - while ((t = __mt76_get_rxwi(dev)) != NULL) { |
| - if (t->ptr) |
| - skb_free_frag(t->ptr); |
| - kfree(t); |
| + while ((r = __mt76_get_rxwi(dev)) != NULL) { |
| + if (r->ptr) |
| + skb_free_frag(r->ptr); |
| + kfree(r); |
| } |
| local_bh_enable(); |
| } |
| @@ -209,7 +209,7 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) |
| static int |
| mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| struct mt76_queue_buf *buf, int nbufs, u32 info, |
| - struct sk_buff *skb, void *txwi) |
| + struct sk_buff *skb, void *txwi, void *rxwi) |
| { |
| struct mt76_queue_entry *entry; |
| struct mt76_desc *desc; |
| @@ -227,13 +227,13 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| |
| if ((q->flags & MT_QFLAG_WED) && |
| FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { |
| - struct mt76_txwi_cache *t = txwi; |
| + struct mt76_rxwi_cache *r = rxwi; |
| int rx_token; |
| |
| - if (!t) |
| + if (!r) |
| return -ENOMEM; |
| |
| - rx_token = mt76_rx_token_consume(dev, (void *)skb, t, |
| + rx_token = mt76_rx_token_consume(dev, (void *)skb, r, |
| buf[0].addr); |
| if (rx_token < 0) |
| return -ENOMEM; |
| @@ -280,6 +280,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| } |
| |
| q->entry[idx].txwi = txwi; |
| + q->entry[idx].rxwi = rxwi; |
| q->entry[idx].skb = skb; |
| q->entry[idx].wcid = 0xffff; |
| |
| @@ -379,13 +380,13 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
| u32 id, find = 0; |
| u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, |
| le32_to_cpu(desc->buf1)); |
| - struct mt76_txwi_cache *t; |
| + struct mt76_rxwi_cache *r; |
| |
| if (*more) { |
| spin_lock_bh(&dev->rx_token_lock); |
| |
| - idr_for_each_entry(&dev->rx_token, t, id) { |
| - if (t->dma_addr == le32_to_cpu(desc->buf0)) { |
| + idr_for_each_entry(&dev->rx_token, r, id) { |
| + if (r->dma_addr == le32_to_cpu(desc->buf0)) { |
| find = 1; |
| desc->buf1 = FIELD_PREP(MT_DMA_CTL_TOKEN, id); |
| token = id; |
| @@ -398,11 +399,11 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
| return NULL; |
| } |
| |
| - t = mt76_rx_token_release(dev, token); |
| - if (!t) |
| + r = mt76_rx_token_release(dev, token); |
| + if (!r) |
| return NULL; |
| |
| - dma_unmap_single(dev->dma_dev, t->dma_addr, |
| + dma_unmap_single(dev->dma_dev, r->dma_addr, |
| SKB_WITH_OVERHEAD(q->buf_size), |
| DMA_FROM_DEVICE); |
| |
| @@ -410,10 +411,10 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
| if (!buf) |
| return NULL; |
| |
| - memcpy(buf, t->ptr, SKB_WITH_OVERHEAD(q->buf_size)); |
| - t->dma_addr = 0; |
| + memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size)); |
| + r->dma_addr = 0; |
| |
| - mt76_put_rxwi(dev, t); |
| + mt76_put_rxwi(dev, r); |
| |
| if (drop) { |
| u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
| @@ -481,7 +482,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, |
| buf.len = skb->len; |
| |
| spin_lock_bh(&q->lock); |
| - mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); |
| + mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL, NULL); |
| mt76_dma_kick_queue(dev, q); |
| spin_unlock_bh(&q->lock); |
| |
| @@ -558,7 +559,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, |
| goto unmap; |
| |
| return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, |
| - tx_info.info, tx_info.skb, t); |
| + tx_info.info, tx_info.skb, t, NULL); |
| |
| unmap: |
| for (n--; n > 0; n--) |
| @@ -598,20 +599,21 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| spin_lock_bh(&q->lock); |
| |
| while (q->queued < q->ndesc - 1) { |
| - struct mt76_txwi_cache *t = NULL; |
| + struct mt76_rxwi_cache *r = NULL; |
| struct mt76_queue_buf qbuf; |
| bool skip_alloc = false; |
| void *buf = NULL; |
| |
| if ((q->flags & MT_QFLAG_WED) && |
| FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { |
| - t = mt76_get_rxwi(dev); |
| - if (!t) |
| + r = mt76_get_rxwi(dev); |
| + if (!r) |
| break; |
| |
| - if (t->ptr) { |
| + /* reuse skb buf for wed rx copy*/ |
| + if (r->ptr) { |
| skip_alloc = true; |
| - buf = t->ptr; |
| + buf = r->ptr; |
| } |
| } |
| |
| @@ -630,7 +632,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| qbuf.addr = addr + offset; |
| qbuf.len = len - offset; |
| qbuf.skip_unmap = false; |
| - if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t) < 0) { |
| + if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL, r) < 0) { |
| dma_unmap_single(dev->dma_dev, addr, len, |
| DMA_FROM_DEVICE); |
| skb_free_frag(buf); |
| diff --git a/mac80211.c b/mac80211.c |
| index de9ef237..818f4f0c 100644 |
| --- a/mac80211.c |
| +++ b/mac80211.c |
| @@ -597,7 +597,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size, |
| spin_lock_init(&dev->lock); |
| spin_lock_init(&dev->cc_lock); |
| spin_lock_init(&dev->status_lock); |
| - spin_lock_init(&dev->wed_lock); |
| mutex_init(&dev->mutex); |
| init_waitqueue_head(&dev->tx_wait); |
| |
| @@ -628,6 +627,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size, |
| INIT_LIST_HEAD(&dev->txwi_cache); |
| INIT_LIST_HEAD(&dev->rxwi_cache); |
| dev->token_size = dev->drv->token_size; |
| + dev->rx_token_size = dev->drv->rx_token_size; |
| |
| for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) |
| skb_queue_head_init(&dev->rx_skb[i]); |
| diff --git a/mt76.h b/mt76.h |
| index f1795778..42364b81 100644 |
| --- a/mt76.h |
| +++ b/mt76.h |
| @@ -166,6 +166,7 @@ struct mt76_queue_entry { |
| }; |
| union { |
| struct mt76_txwi_cache *txwi; |
| + struct mt76_rxwi_cache *rxwi; |
| struct urb *urb; |
| int buf_sz; |
| }; |
| @@ -354,10 +355,15 @@ struct mt76_txwi_cache { |
| struct list_head list; |
| dma_addr_t dma_addr; |
| |
| - union { |
| - struct sk_buff *skb; |
| - void *ptr; |
| - }; |
| + struct sk_buff *skb; |
| +}; |
| + |
| +struct mt76_rxwi_cache { |
| + struct list_head list; |
| + dma_addr_t dma_addr; |
| + |
| + void *ptr; |
| + u32 token; |
| }; |
| |
| struct mt76_rx_tid { |
| @@ -441,6 +447,7 @@ struct mt76_driver_ops { |
| u16 txwi_size; |
| u16 token_size; |
| u8 mcs_rates; |
| + u16 rx_token_size; |
| |
| void (*update_survey)(struct mt76_phy *phy); |
| |
| @@ -805,7 +812,6 @@ struct mt76_dev { |
| |
| struct ieee80211_hw *hw; |
| |
| - spinlock_t wed_lock; |
| spinlock_t lock; |
| spinlock_t cc_lock; |
| |
| @@ -1394,8 +1400,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) |
| } |
| |
| void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); |
| -void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); |
| -struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev); |
| +void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r); |
| +struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev); |
| void mt76_free_pending_rxwi(struct mt76_dev *dev); |
| void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, |
| struct napi_struct *napi); |
| @@ -1541,9 +1547,9 @@ struct mt76_txwi_cache * |
| mt76_token_release(struct mt76_dev *dev, int token, bool *wake); |
| int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi); |
| void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked); |
| -struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); |
| +struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); |
| int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, |
| - struct mt76_txwi_cache *r, dma_addr_t phys); |
| + struct mt76_rxwi_cache *r, dma_addr_t phys); |
| |
| static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) |
| { |
| diff --git a/mt7915/dma.c b/mt7915/dma.c |
| index 36260085..9cbd3625 100644 |
| --- a/mt7915/dma.c |
| +++ b/mt7915/dma.c |
| @@ -492,7 +492,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) |
| mtk_wed_get_rx_capa(&mdev->mmio.wed)) { |
| dev->mt76.q_rx[MT_RXQ_MAIN].flags = |
| MT_WED_Q_RX(MT7915_RXQ_BAND0); |
| - dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; |
| } |
| |
| ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], |
| @@ -529,7 +528,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) |
| mtk_wed_get_rx_capa(&mdev->mmio.wed)) { |
| dev->mt76.q_rx[MT_RXQ_BAND1].flags = |
| MT_WED_Q_RX(MT7915_RXQ_BAND1); |
| - dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; |
| } |
| |
| /* rx data queue for band1 */ |
| diff --git a/mt7915/mmio.c b/mt7915/mmio.c |
| index 992beca3..ba728dd0 100644 |
| --- a/mt7915/mmio.c |
| +++ b/mt7915/mmio.c |
| @@ -603,18 +603,18 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) |
| |
| dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed); |
| for (i = 0; i < dev->mt76.rx_token_size; i++) { |
| - struct mt76_txwi_cache *t; |
| + struct mt76_rxwi_cache *r; |
| |
| - t = mt76_rx_token_release(&dev->mt76, i); |
| - if (!t || !t->ptr) |
| + r = mt76_rx_token_release(&dev->mt76, i); |
| + if (!r || !r->ptr) |
| continue; |
| |
| - dma_unmap_single(dev->mt76.dma_dev, t->dma_addr, |
| + dma_unmap_single(dev->mt76.dma_dev, r->dma_addr, |
| wed->wlan.rx_size, DMA_FROM_DEVICE); |
| - skb_free_frag(t->ptr); |
| - t->ptr = NULL; |
| + skb_free_frag(r->ptr); |
| + r->ptr = NULL; |
| |
| - mt76_put_rxwi(&dev->mt76, t); |
| + mt76_put_rxwi(&dev->mt76, r); |
| } |
| |
| mt76_free_pending_rxwi(&dev->mt76); |
| @@ -639,7 +639,7 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| sizeof(struct skb_shared_info)); |
| |
| for (i = 0; i < size; i++) { |
| - struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76); |
| + struct mt76_rxwi_cache *r = mt76_get_rxwi(&dev->mt76); |
| dma_addr_t phy_addr; |
| int token; |
| void *ptr; |
| @@ -658,7 +658,7 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| } |
| |
| desc->buf0 = cpu_to_le32(phy_addr); |
| - token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr); |
| + token = mt76_rx_token_consume(&dev->mt76, ptr, r, phy_addr); |
| if (token < 0) { |
| dma_unmap_single(dev->mt76.dma_dev, phy_addr, |
| wed->wlan.rx_size, DMA_TO_DEVICE); |
| @@ -786,7 +786,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, |
| wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats; |
| wed->wlan.ser_trigger = mt7915_wed_trigger_ser; |
| |
| - dev->mt76.rx_token_size = wed->wlan.rx_npkt; |
| + dev->mt76.rx_token_size += wed->wlan.rx_npkt; |
| |
| if (mtk_wed_device_attach(wed)) |
| return 0; |
| @@ -992,6 +992,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev, |
| SURVEY_INFO_TIME_RX | |
| SURVEY_INFO_TIME_BSS_RX, |
| .token_size = MT7915_TOKEN_SIZE, |
| + .rx_token_size = MT7915_RX_TOKEN_SIZE, |
| .tx_prepare_skb = mt7915_tx_prepare_skb, |
| .tx_complete_skb = mt76_connac_tx_complete_skb, |
| .rx_skb = mt7915_queue_rx_skb, |
| diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h |
| index ed293e70..6def0596 100644 |
| --- a/mt7915/mt7915.h |
| +++ b/mt7915/mt7915.h |
| @@ -65,6 +65,7 @@ |
| |
| #define MT7915_EEPROM_BLOCK_SIZE 16 |
| #define MT7915_TOKEN_SIZE 8192 |
| +#define MT7915_RX_TOKEN_SIZE 4096 |
| |
| #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ |
| #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ |
| diff --git a/tx.c b/tx.c |
| index 6d55566f..a72b7779 100644 |
| --- a/tx.c |
| +++ b/tx.c |
| @@ -756,16 +756,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) |
| EXPORT_SYMBOL_GPL(mt76_token_consume); |
| |
| int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, |
| - struct mt76_txwi_cache *t, dma_addr_t phys) |
| + struct mt76_rxwi_cache *r, dma_addr_t phys) |
| { |
| int token; |
| |
| spin_lock_bh(&dev->rx_token_lock); |
| - token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size, |
| + token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size, |
| GFP_ATOMIC); |
| if (token >= 0) { |
| - t->ptr = ptr; |
| - t->dma_addr = phys; |
| + r->ptr = ptr; |
| + r->dma_addr = phys; |
| } |
| spin_unlock_bh(&dev->rx_token_lock); |
| |
| @@ -802,15 +802,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake) |
| } |
| EXPORT_SYMBOL_GPL(mt76_token_release); |
| |
| -struct mt76_txwi_cache * |
| +struct mt76_rxwi_cache * |
| mt76_rx_token_release(struct mt76_dev *dev, int token) |
| { |
| - struct mt76_txwi_cache *t; |
| + struct mt76_rxwi_cache *r; |
| |
| spin_lock_bh(&dev->rx_token_lock); |
| - t = idr_remove(&dev->rx_token, token); |
| + r = idr_remove(&dev->rx_token, token); |
| spin_unlock_bh(&dev->rx_token_lock); |
| |
| - return t; |
| + return r; |
| } |
| EXPORT_SYMBOL_GPL(mt76_rx_token_release); |
| -- |
| 2.18.0 |
| |