developer | da18a74 | 2023-04-06 13:44:00 +0800 | [diff] [blame^] | 1 | From 4e691afbbc58b616a8d8516a10138021bcf9186a Mon Sep 17 00:00:00 2001 |
| 2 | From: Sujuan Chen <sujuan.chen@mediatek.com> |
| 3 | Date: Thu, 5 Jan 2023 16:43:57 +0800 |
| 4 | Subject: [PATCH 3012/3012] wifi: mt76: connac: wed: add wed rx copy skb |
| 5 | |
| 6 | Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> |
| 7 | --- |
| 8 | dma.c | 76 +++++++++++++++++++++++++++++++++++++++------------ |
| 9 | mt7915/mmio.c | 35 ++++++++++++++++++------ |
| 10 | 2 files changed, 85 insertions(+), 26 deletions(-) |
| 11 | |
| 12 | diff --git a/dma.c b/dma.c |
| 13 | index dfe69829..1fc4bd2d 100644 |
| 14 | --- a/dma.c |
| 15 | +++ b/dma.c |
| 16 | @@ -208,11 +208,11 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) |
| 17 | |
| 18 | static int |
| 19 | mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| 20 | - struct mt76_queue_buf *buf, void *data) |
| 21 | + struct mt76_queue_buf *buf, void *data, |
| 22 | + struct mt76_rxwi_cache *rxwi) |
| 23 | { |
| 24 | struct mt76_desc *desc = &q->desc[q->head]; |
| 25 | struct mt76_queue_entry *entry = &q->entry[q->head]; |
| 26 | - struct mt76_rxwi_cache *rxwi = NULL; |
| 27 | u32 buf1 = 0, ctrl; |
| 28 | int idx = q->head; |
| 29 | int rx_token; |
| 30 | @@ -220,9 +220,11 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
| 31 | ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); |
| 32 | |
| 33 | if (mt76_queue_is_wed_rx(q)) { |
| 34 | - rxwi = mt76_get_rxwi(dev); |
| 35 | - if (!rxwi) |
| 36 | - return -ENOMEM; |
| 37 | + if (!rxwi) { |
| 38 | + rxwi = mt76_get_rxwi(dev); |
| 39 | + if (!rxwi) |
| 40 | + return -ENOMEM; |
| 41 | + } |
| 42 | |
| 43 | rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr); |
| 44 | if (rx_token < 0) { |
| 45 | @@ -387,7 +389,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) |
| 46 | |
| 47 | static void * |
| 48 | mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
| 49 | - int *len, u32 *info, bool *more, bool *drop) |
| 50 | + int *len, u32 *info, bool *more, bool *drop, bool flush) |
| 51 | { |
| 52 | struct mt76_queue_entry *e = &q->entry[idx]; |
| 53 | struct mt76_desc *desc = &q->desc[idx]; |
| 54 | @@ -437,11 +439,43 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
| 55 | SKB_WITH_OVERHEAD(q->buf_size), |
| 56 | DMA_FROM_DEVICE); |
| 57 | |
| 58 | - buf = r->ptr; |
| 59 | - r->dma_addr = 0; |
| 60 | - r->ptr = NULL; |
| 61 | + if (flush) { |
| 62 | + buf = r->ptr; |
| 63 | + r->dma_addr = 0; |
| 64 | + r->ptr = NULL; |
| 65 | |
| 66 | - mt76_put_rxwi(dev, r); |
| 67 | + mt76_put_rxwi(dev, r); |
| 68 | + } else { |
| 69 | + struct mt76_queue_buf qbuf; |
| 70 | + |
| 71 | + buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
| 72 | + if (!buf) |
| 73 | + return NULL; |
| 74 | + |
| 75 | + memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size)); |
| 76 | + |
| 77 | + r->dma_addr = dma_map_single(dev->dma_dev, r->ptr, |
| 78 | + SKB_WITH_OVERHEAD(q->buf_size), |
| 79 | + DMA_FROM_DEVICE); |
| 80 | + if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) { |
| 81 | + skb_free_frag(r->ptr); |
| 82 | + mt76_put_rxwi(dev, r); |
| 83 | + return NULL; |
| 84 | + } |
| 85 | + |
| 86 | + qbuf.addr = r->dma_addr; |
| 87 | + qbuf.len = SKB_WITH_OVERHEAD(q->buf_size); |
| 88 | + qbuf.skip_unmap = false; |
| 89 | + |
| 90 | + if (mt76_dma_add_rx_buf(dev, q, &qbuf, r->ptr, r) < 0) { |
| 91 | + dma_unmap_single(dev->dma_dev, r->dma_addr, |
| 92 | + SKB_WITH_OVERHEAD(q->buf_size), |
| 93 | + DMA_FROM_DEVICE); |
| 94 | + skb_free_frag(r->ptr); |
| 95 | + mt76_put_rxwi(dev, r); |
| 96 | + return NULL; |
| 97 | + } |
| 98 | + } |
| 99 | |
| 100 | if (drop) { |
| 101 | u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
| 102 | @@ -480,7 +514,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, |
| 103 | q->tail = (q->tail + 1) % q->ndesc; |
| 104 | q->queued--; |
| 105 | |
| 106 | - return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); |
| 107 | + return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush); |
| 108 | } |
| 109 | |
| 110 | static int |
| 111 | @@ -612,6 +646,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| 112 | int len = SKB_WITH_OVERHEAD(q->buf_size); |
| 113 | int frames = 0, offset = q->buf_offset; |
| 114 | dma_addr_t addr; |
| 115 | + bool flags = false; |
| 116 | |
| 117 | if (!q->ndesc) |
| 118 | return 0; |
| 119 | @@ -635,7 +670,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| 120 | qbuf.addr = addr + offset; |
| 121 | qbuf.len = len - offset; |
| 122 | qbuf.skip_unmap = false; |
| 123 | - if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { |
| 124 | + if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) { |
| 125 | dma_unmap_single(dev->dma_dev, addr, len, |
| 126 | DMA_FROM_DEVICE); |
| 127 | skb_free_frag(buf); |
| 128 | @@ -644,7 +679,10 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| 129 | frames++; |
| 130 | } |
| 131 | |
| 132 | - if (frames) |
| 133 | + flags = (q->flags & MT_QFLAG_WED) && |
| 134 | + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX; |
| 135 | + |
| 136 | + if (frames || flags) |
| 137 | mt76_dma_kick_queue(dev, q); |
| 138 | |
| 139 | spin_unlock_bh(&q->lock); |
| 140 | @@ -767,12 +805,14 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) |
| 141 | |
| 142 | spin_unlock_bh(&q->lock); |
| 143 | |
| 144 | - if (!q->rx_page.va) |
| 145 | - return; |
| 146 | + if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX) { |
| 147 | + if (!q->rx_page.va) |
| 148 | + return; |
| 149 | |
| 150 | - page = virt_to_page(q->rx_page.va); |
| 151 | - __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); |
| 152 | - memset(&q->rx_page, 0, sizeof(q->rx_page)); |
| 153 | + page = virt_to_page(q->rx_page.va); |
| 154 | + __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); |
| 155 | + memset(&q->rx_page, 0, sizeof(q->rx_page)); |
| 156 | + } |
| 157 | } |
| 158 | |
| 159 | static void |
| 160 | diff --git a/mt7915/mmio.c b/mt7915/mmio.c |
| 161 | index 65ee2afa..614bb407 100644 |
| 162 | --- a/mt7915/mmio.c |
| 163 | +++ b/mt7915/mmio.c |
| 164 | @@ -581,6 +581,7 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed) |
| 165 | static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) |
| 166 | { |
| 167 | struct mt7915_dev *dev; |
| 168 | + struct page *page; |
| 169 | u32 length; |
| 170 | int i; |
| 171 | |
| 172 | @@ -597,13 +598,33 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) |
| 173 | |
| 174 | dma_unmap_single(dev->mt76.dma_dev, r->dma_addr, |
| 175 | wed->wlan.rx_size, DMA_FROM_DEVICE); |
| 176 | - __free_pages(virt_to_page(r->ptr), get_order(length)); |
| 177 | + skb_free_frag(r->ptr); |
| 178 | r->ptr = NULL; |
| 179 | |
| 180 | mt76_put_rxwi(&dev->mt76, r); |
| 181 | } |
| 182 | |
| 183 | mt76_free_pending_rxwi(&dev->mt76); |
| 184 | + |
| 185 | + mt76_for_each_q_rx(&dev->mt76, i) { |
| 186 | + struct mt76_queue *q = &dev->mt76.q_rx[i]; |
| 187 | + |
| 188 | + if (mt76_queue_is_wed_rx(q)) { |
| 189 | + if (!q->rx_page.va) |
| 190 | + continue; |
| 191 | + |
| 192 | + page = virt_to_page(q->rx_page.va); |
| 193 | + __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); |
| 194 | + memset(&q->rx_page, 0, sizeof(q->rx_page)); |
| 195 | + } |
| 196 | + } |
| 197 | + |
| 198 | + if (!wed->rx_buf_ring.rx_page.va) |
| 199 | + return; |
| 200 | + |
| 201 | + page = virt_to_page(wed->rx_buf_ring.rx_page.va); |
| 202 | + __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias); |
| 203 | + memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page)); |
| 204 | } |
| 205 | |
| 206 | static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| 207 | @@ -620,35 +641,33 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| 208 | for (i = 0; i < size; i++) { |
| 209 | struct mt76_rxwi_cache *r = mt76_get_rxwi(&dev->mt76); |
| 210 | dma_addr_t phy_addr; |
| 211 | - struct page *page; |
| 212 | int token; |
| 213 | void *ptr; |
| 214 | |
| 215 | if (!r) |
| 216 | goto unmap; |
| 217 | |
| 218 | - page = __dev_alloc_pages(GFP_KERNEL, get_order(length)); |
| 219 | - if (!page) { |
| 220 | + ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length, GFP_ATOMIC); |
| 221 | + if (!ptr) { |
| 222 | mt76_put_rxwi(&dev->mt76, r); |
| 223 | goto unmap; |
| 224 | } |
| 225 | |
| 226 | - ptr = page_address(page); |
| 227 | phy_addr = dma_map_single(dev->mt76.dma_dev, ptr, |
| 228 | wed->wlan.rx_size, |
| 229 | DMA_TO_DEVICE); |
| 230 | + |
| 231 | if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) { |
| 232 | - __free_pages(page, get_order(length)); |
| 233 | + skb_free_frag(ptr); |
| 234 | mt76_put_rxwi(&dev->mt76, r); |
| 235 | goto unmap; |
| 236 | } |
| 237 | - |
| 238 | desc->buf0 = cpu_to_le32(phy_addr); |
| 239 | token = mt76_rx_token_consume(&dev->mt76, ptr, r, phy_addr); |
| 240 | if (token < 0) { |
| 241 | dma_unmap_single(dev->mt76.dma_dev, phy_addr, |
| 242 | wed->wlan.rx_size, DMA_TO_DEVICE); |
| 243 | - __free_pages(page, get_order(length)); |
| 244 | + skb_free_frag(ptr); |
| 245 | mt76_put_rxwi(&dev->mt76, r); |
| 246 | goto unmap; |
| 247 | } |
| 248 | -- |
| 249 | 2.39.0 |
| 250 | |