developer | 0f312e8 | 2022-11-01 12:31:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: ISC |
| 2 | /* Copyright (C) 2020 MediaTek Inc. |
| 3 | * |
| 4 | * Author: Felix Fietkau <nbd@nbd.name> |
| 5 | * Lorenzo Bianconi <lorenzo@kernel.org> |
| 6 | * Sean Wang <sean.wang@mediatek.com> |
| 7 | */ |
| 8 | |
| 9 | #include <linux/kernel.h> |
| 10 | #include <linux/iopoll.h> |
| 11 | #include <linux/module.h> |
| 12 | |
| 13 | #include <linux/mmc/host.h> |
| 14 | #include <linux/mmc/sdio_ids.h> |
| 15 | #include <linux/mmc/sdio_func.h> |
| 16 | |
| 17 | #include "trace.h" |
| 18 | #include "sdio.h" |
| 19 | #include "mt76.h" |
| 20 | |
| 21 | static int mt76s_refill_sched_quota(struct mt76_dev *dev, u32 *data) |
| 22 | { |
| 23 | u32 ple_ac_data_quota[] = { |
| 24 | FIELD_GET(TXQ_CNT_L, data[4]), /* VO */ |
| 25 | FIELD_GET(TXQ_CNT_H, data[3]), /* VI */ |
| 26 | FIELD_GET(TXQ_CNT_L, data[3]), /* BE */ |
| 27 | FIELD_GET(TXQ_CNT_H, data[2]), /* BK */ |
| 28 | }; |
| 29 | u32 pse_ac_data_quota[] = { |
| 30 | FIELD_GET(TXQ_CNT_H, data[1]), /* VO */ |
| 31 | FIELD_GET(TXQ_CNT_L, data[1]), /* VI */ |
| 32 | FIELD_GET(TXQ_CNT_H, data[0]), /* BE */ |
| 33 | FIELD_GET(TXQ_CNT_L, data[0]), /* BK */ |
| 34 | }; |
| 35 | u32 pse_mcu_quota = FIELD_GET(TXQ_CNT_L, data[2]); |
| 36 | u32 pse_data_quota = 0, ple_data_quota = 0; |
| 37 | struct mt76_sdio *sdio = &dev->sdio; |
| 38 | int i; |
| 39 | |
| 40 | for (i = 0; i < ARRAY_SIZE(pse_ac_data_quota); i++) { |
| 41 | pse_data_quota += pse_ac_data_quota[i]; |
| 42 | ple_data_quota += ple_ac_data_quota[i]; |
| 43 | } |
| 44 | |
| 45 | if (!pse_data_quota && !ple_data_quota && !pse_mcu_quota) |
| 46 | return 0; |
| 47 | |
| 48 | sdio->sched.pse_mcu_quota += pse_mcu_quota; |
| 49 | sdio->sched.pse_data_quota += pse_data_quota; |
| 50 | sdio->sched.ple_data_quota += ple_data_quota; |
| 51 | |
| 52 | return pse_data_quota + ple_data_quota + pse_mcu_quota; |
| 53 | } |
| 54 | |
| 55 | static struct sk_buff * |
| 56 | mt76s_build_rx_skb(void *data, int data_len, int buf_len) |
| 57 | { |
| 58 | int len = min_t(int, data_len, MT_SKB_HEAD_LEN); |
| 59 | struct sk_buff *skb; |
| 60 | |
| 61 | skb = alloc_skb(len, GFP_KERNEL); |
| 62 | if (!skb) |
| 63 | return NULL; |
| 64 | |
| 65 | skb_put_data(skb, data, len); |
| 66 | if (data_len > len) { |
| 67 | struct page *page; |
| 68 | |
| 69 | data += len; |
| 70 | page = virt_to_head_page(data); |
| 71 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
| 72 | page, data - page_address(page), |
| 73 | data_len - len, buf_len); |
| 74 | get_page(page); |
| 75 | } |
| 76 | |
| 77 | return skb; |
| 78 | } |
| 79 | |
| 80 | static int |
| 81 | mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, |
| 82 | struct mt76s_intr *intr) |
| 83 | { |
| 84 | struct mt76_queue *q = &dev->q_rx[qid]; |
| 85 | struct mt76_sdio *sdio = &dev->sdio; |
| 86 | int len = 0, err, i; |
| 87 | struct page *page; |
| 88 | u8 *buf, *end; |
| 89 | |
| 90 | for (i = 0; i < intr->rx.num[qid]; i++) |
| 91 | len += round_up(intr->rx.len[qid][i] + 4, 4); |
| 92 | |
| 93 | if (!len) |
| 94 | return 0; |
| 95 | |
| 96 | if (len > sdio->func->cur_blksize) |
| 97 | len = roundup(len, sdio->func->cur_blksize); |
| 98 | |
| 99 | page = __dev_alloc_pages(GFP_KERNEL, get_order(len)); |
| 100 | if (!page) |
| 101 | return -ENOMEM; |
| 102 | |
| 103 | buf = page_address(page); |
| 104 | |
| 105 | sdio_claim_host(sdio->func); |
| 106 | err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len); |
| 107 | sdio_release_host(sdio->func); |
| 108 | |
| 109 | if (err < 0) { |
| 110 | dev_err(dev->dev, "sdio read data failed:%d\n", err); |
| 111 | put_page(page); |
| 112 | return err; |
| 113 | } |
| 114 | |
| 115 | end = buf + len; |
| 116 | i = 0; |
| 117 | |
| 118 | while (i < intr->rx.num[qid] && buf < end) { |
| 119 | int index = (q->head + i) % q->ndesc; |
| 120 | struct mt76_queue_entry *e = &q->entry[index]; |
| 121 | __le32 *rxd = (__le32 *)buf; |
| 122 | |
| 123 | /* parse rxd to get the actual packet length */ |
| 124 | len = le32_get_bits(rxd[0], GENMASK(15, 0)); |
| 125 | |
| 126 | /* Optimized path for TXS */ |
| 127 | if (!dev->drv->rx_check || dev->drv->rx_check(dev, buf, len)) { |
| 128 | e->skb = mt76s_build_rx_skb(buf, len, |
| 129 | round_up(len + 4, 4)); |
| 130 | if (!e->skb) |
| 131 | break; |
| 132 | |
| 133 | if (q->queued + i + 1 == q->ndesc) |
| 134 | break; |
| 135 | i++; |
| 136 | } |
| 137 | buf += round_up(len + 4, 4); |
| 138 | } |
| 139 | put_page(page); |
| 140 | |
| 141 | spin_lock_bh(&q->lock); |
| 142 | q->head = (q->head + i) % q->ndesc; |
| 143 | q->queued += i; |
| 144 | spin_unlock_bh(&q->lock); |
| 145 | |
| 146 | return i; |
| 147 | } |
| 148 | |
| 149 | static int mt76s_rx_handler(struct mt76_dev *dev) |
| 150 | { |
| 151 | struct mt76_sdio *sdio = &dev->sdio; |
| 152 | struct mt76s_intr intr; |
| 153 | int nframes = 0, ret; |
| 154 | |
| 155 | ret = sdio->parse_irq(dev, &intr); |
| 156 | if (ret) |
| 157 | return ret; |
| 158 | |
| 159 | trace_dev_irq(dev, intr.isr, 0); |
| 160 | |
| 161 | if (intr.isr & WHIER_RX0_DONE_INT_EN) { |
| 162 | ret = mt76s_rx_run_queue(dev, 0, &intr); |
| 163 | if (ret > 0) { |
| 164 | mt76_worker_schedule(&sdio->net_worker); |
| 165 | nframes += ret; |
| 166 | } |
| 167 | } |
| 168 | |
| 169 | if (intr.isr & WHIER_RX1_DONE_INT_EN) { |
| 170 | ret = mt76s_rx_run_queue(dev, 1, &intr); |
| 171 | if (ret > 0) { |
| 172 | mt76_worker_schedule(&sdio->net_worker); |
| 173 | nframes += ret; |
| 174 | } |
| 175 | } |
| 176 | |
| 177 | nframes += !!mt76s_refill_sched_quota(dev, intr.tx.wtqcr); |
| 178 | |
| 179 | return nframes; |
| 180 | } |
| 181 | |
| 182 | static int |
| 183 | mt76s_tx_pick_quota(struct mt76_sdio *sdio, bool mcu, int buf_sz, |
| 184 | int *pse_size, int *ple_size) |
| 185 | { |
| 186 | int pse_sz; |
| 187 | |
| 188 | pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, |
| 189 | sdio->sched.pse_page_size); |
| 190 | |
| 191 | if (mcu && sdio->hw_ver == MT76_CONNAC2_SDIO) |
| 192 | pse_sz = 1; |
| 193 | |
| 194 | if (mcu) { |
| 195 | if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz) |
| 196 | return -EBUSY; |
| 197 | } else { |
| 198 | if (sdio->sched.pse_data_quota < *pse_size + pse_sz || |
| 199 | sdio->sched.ple_data_quota < *ple_size + 1) |
| 200 | return -EBUSY; |
| 201 | |
| 202 | *ple_size = *ple_size + 1; |
| 203 | } |
| 204 | *pse_size = *pse_size + pse_sz; |
| 205 | |
| 206 | return 0; |
| 207 | } |
| 208 | |
| 209 | static void |
| 210 | mt76s_tx_update_quota(struct mt76_sdio *sdio, bool mcu, int pse_size, |
| 211 | int ple_size) |
| 212 | { |
| 213 | if (mcu) { |
| 214 | sdio->sched.pse_mcu_quota -= pse_size; |
| 215 | } else { |
| 216 | sdio->sched.pse_data_quota -= pse_size; |
| 217 | sdio->sched.ple_data_quota -= ple_size; |
| 218 | } |
| 219 | } |
| 220 | |
| 221 | static int __mt76s_xmit_queue(struct mt76_dev *dev, u8 *data, int len) |
| 222 | { |
| 223 | struct mt76_sdio *sdio = &dev->sdio; |
| 224 | int err; |
| 225 | |
| 226 | if (len > sdio->func->cur_blksize) |
| 227 | len = roundup(len, sdio->func->cur_blksize); |
| 228 | |
| 229 | sdio_claim_host(sdio->func); |
| 230 | err = sdio_writesb(sdio->func, MCR_WTDR1, data, len); |
| 231 | sdio_release_host(sdio->func); |
| 232 | |
| 233 | if (err) |
| 234 | dev_err(dev->dev, "sdio write failed: %d\n", err); |
| 235 | |
| 236 | return err; |
| 237 | } |
| 238 | |
| 239 | static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) |
| 240 | { |
| 241 | int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0; |
| 242 | bool mcu = q == dev->q_mcu[MT_MCUQ_WM]; |
| 243 | struct mt76_sdio *sdio = &dev->sdio; |
| 244 | u8 pad; |
| 245 | |
| 246 | while (q->first != q->head) { |
| 247 | struct mt76_queue_entry *e = &q->entry[q->first]; |
| 248 | struct sk_buff *iter; |
| 249 | |
| 250 | smp_rmb(); |
| 251 | |
| 252 | if (test_bit(MT76_MCU_RESET, &dev->phy.state)) |
| 253 | goto next; |
| 254 | |
| 255 | if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) { |
| 256 | __skb_put_zero(e->skb, 4); |
| 257 | err = __mt76s_xmit_queue(dev, e->skb->data, |
| 258 | e->skb->len); |
| 259 | if (err) |
| 260 | return err; |
| 261 | |
| 262 | goto next; |
| 263 | } |
| 264 | |
| 265 | pad = roundup(e->skb->len, 4) - e->skb->len; |
| 266 | if (len + e->skb->len + pad + 4 > dev->sdio.xmit_buf_sz) |
| 267 | break; |
| 268 | |
| 269 | if (mt76s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz, |
| 270 | &ple_sz)) |
| 271 | break; |
| 272 | |
| 273 | memcpy(sdio->xmit_buf + len, e->skb->data, skb_headlen(e->skb)); |
| 274 | len += skb_headlen(e->skb); |
| 275 | nframes++; |
| 276 | |
| 277 | skb_walk_frags(e->skb, iter) { |
| 278 | memcpy(sdio->xmit_buf + len, iter->data, iter->len); |
| 279 | len += iter->len; |
| 280 | nframes++; |
| 281 | } |
| 282 | |
| 283 | if (unlikely(pad)) { |
| 284 | memset(sdio->xmit_buf + len, 0, pad); |
| 285 | len += pad; |
| 286 | } |
| 287 | next: |
| 288 | q->first = (q->first + 1) % q->ndesc; |
| 289 | e->done = true; |
| 290 | } |
| 291 | |
| 292 | if (nframes) { |
| 293 | memset(sdio->xmit_buf + len, 0, 4); |
| 294 | err = __mt76s_xmit_queue(dev, sdio->xmit_buf, len + 4); |
| 295 | if (err) |
| 296 | return err; |
| 297 | } |
| 298 | mt76s_tx_update_quota(sdio, mcu, pse_sz, ple_sz); |
| 299 | |
| 300 | mt76_worker_schedule(&sdio->status_worker); |
| 301 | |
| 302 | return nframes; |
| 303 | } |
| 304 | |
| 305 | void mt76s_txrx_worker(struct mt76_sdio *sdio) |
| 306 | { |
| 307 | struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); |
| 308 | int i, nframes, ret; |
| 309 | |
| 310 | /* disable interrupt */ |
| 311 | sdio_claim_host(sdio->func); |
| 312 | sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL); |
| 313 | sdio_release_host(sdio->func); |
| 314 | |
| 315 | do { |
| 316 | nframes = 0; |
| 317 | |
| 318 | /* tx */ |
| 319 | for (i = 0; i <= MT_TXQ_PSD; i++) { |
| 320 | ret = mt76s_tx_run_queue(dev, dev->phy.q_tx[i]); |
| 321 | if (ret > 0) |
| 322 | nframes += ret; |
| 323 | } |
| 324 | ret = mt76s_tx_run_queue(dev, dev->q_mcu[MT_MCUQ_WM]); |
| 325 | if (ret > 0) |
| 326 | nframes += ret; |
| 327 | |
| 328 | /* rx */ |
| 329 | ret = mt76s_rx_handler(dev); |
| 330 | if (ret > 0) |
| 331 | nframes += ret; |
| 332 | |
| 333 | if (test_bit(MT76_MCU_RESET, &dev->phy.state) || |
| 334 | test_bit(MT76_STATE_SUSPEND, &dev->phy.state)) { |
| 335 | if (!mt76s_txqs_empty(dev)) |
| 336 | continue; |
| 337 | else |
| 338 | wake_up(&sdio->wait); |
| 339 | } |
| 340 | } while (nframes > 0); |
| 341 | |
| 342 | /* enable interrupt */ |
| 343 | sdio_claim_host(sdio->func); |
| 344 | sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL); |
| 345 | sdio_release_host(sdio->func); |
| 346 | } |
| 347 | EXPORT_SYMBOL_GPL(mt76s_txrx_worker); |
| 348 | |
| 349 | void mt76s_sdio_irq(struct sdio_func *func) |
| 350 | { |
| 351 | struct mt76_dev *dev = sdio_get_drvdata(func); |
| 352 | struct mt76_sdio *sdio = &dev->sdio; |
| 353 | |
| 354 | if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state) || |
| 355 | test_bit(MT76_MCU_RESET, &dev->phy.state)) |
| 356 | return; |
| 357 | |
| 358 | sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL); |
| 359 | mt76_worker_schedule(&sdio->txrx_worker); |
| 360 | } |
| 361 | EXPORT_SYMBOL_GPL(mt76s_sdio_irq); |
| 362 | |
| 363 | bool mt76s_txqs_empty(struct mt76_dev *dev) |
| 364 | { |
| 365 | struct mt76_queue *q; |
| 366 | int i; |
| 367 | |
| 368 | for (i = 0; i <= MT_TXQ_PSD + 1; i++) { |
| 369 | if (i <= MT_TXQ_PSD) |
| 370 | q = dev->phy.q_tx[i]; |
| 371 | else |
| 372 | q = dev->q_mcu[MT_MCUQ_WM]; |
| 373 | |
| 374 | if (q->first != q->head) |
| 375 | return false; |
| 376 | } |
| 377 | |
| 378 | return true; |
| 379 | } |
| 380 | EXPORT_SYMBOL_GPL(mt76s_txqs_empty); |