blob: 18f17e44f88f954024d9541855a399424574848e [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
6 */
7
8#include <linux/device.h>
9#include <linux/dmaengine.h>
10#include <linux/io.h>
11#include <linux/lockdep.h>
12#include <linux/of.h>
13#include <linux/of_device.h>
14#include <linux/of_dma.h>
15#include <linux/wait.h>
16#include <linux/workqueue.h>
17
18#include <virt-dma.h>
19
20#include "hpdma.h"
21#include "hwspinlock.h"
22#include "internal.h"
23#include "mbox.h"
24#include "mcu.h"
25
26#define HPDMA_CHAN_NUM (4)
27
28#define MTK_HPDMA_ALIGN_SIZE (DMAENGINE_ALIGN_16_BYTES)
29#define MTK_HPDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
30
31struct hpdma_dev;
32struct hpdma_vchan;
33struct hpdma_vdesc;
34struct hpdma_init_data;
35
36typedef struct hpdma_dev *(*hpdma_init_func_t)(struct platform_device *pdev,
37 const struct hpdma_init_data *data);
38typedef void (*tx_pending_desc_t)(struct hpdma_dev *hpdma,
39 struct hpdma_vchan *hchan,
40 struct hpdma_vdesc *hdesc);
41typedef struct dma_chan *(*of_dma_xlate_func_t)(struct of_phandle_args *dma_spec,
42 struct of_dma *ofdma);
43
44struct hpdma_vdesc {
45 struct virt_dma_desc vdesc;
46 dma_addr_t src;
47 dma_addr_t dst;
48 u32 total_num;
49 u32 axsize;
50 size_t len;
51};
52
53struct hpdma_vchan {
54 struct virt_dma_chan vchan;
55 struct work_struct tx_work;
56 struct hpdma_vdesc *issued_desc;
57 wait_queue_head_t stop_wait;
58 bool busy;
59 bool terminating;
60 u8 pchan_id;
61};
62
63struct hpdma_ops {
64 int (*vchan_init)(struct hpdma_dev *hpdma, struct dma_device *ddev);
65 void (*vchan_deinit)(struct hpdma_dev *hpdma);
66 int (*mbox_init)(struct platform_device *pdev, struct hpdma_dev *hpdma);
67 void (*mbox_deinit)(struct platform_device *pdev, struct hpdma_dev *hpdma);
68 tx_pending_desc_t tx_pending_desc;
69 of_dma_xlate_func_t of_dma_xlate;
70};
71
72struct hpdma_init_data {
73 struct hpdma_ops ops;
74 hpdma_init_func_t init;
75 mbox_handler_func_t mbox_handler;
76 enum hwspinlock_group hwspinlock_grp;
77 u32 trigger_start_slot; /* permission to start dma transfer */
78 u32 ch_base_slot; /* permission to occupy a physical channel */
79};
80
81struct hpdma_dev {
82 struct dma_device ddev;
83 struct hpdma_ops ops;
84 struct hpdma_vchan *hvchans;
85 struct hpdma_vchan *issued_chan;
86 spinlock_t lock; /* prevent inter-process racing hwspinlock */
87 void __iomem *base;
88 enum hwspinlock_group hwspinlock_grp;
89 u32 trigger_start_slot; /* permission to start dma transfer */
90 u32 ch_base_slot; /* permission to occupy a physical channel */
91};
92
93struct top_hpdma_dev {
94 struct mailbox_dev mdev;
95 struct hpdma_dev hpdma;
96};
97
98struct clust_hpdma_dev {
99 struct mailbox_dev mdev[CORE_MAX];
100 struct hpdma_dev hpdma;
101};
102
103static inline void hpdma_write(struct hpdma_dev *hpdma, u32 reg, u32 val)
104{
105 writel(val, hpdma->base + reg);
106}
107
108static inline void hpdma_set(struct hpdma_dev *hpdma, u32 reg, u32 mask)
109{
110 setbits(hpdma->base + reg, mask);
111}
112
113static inline void hpdma_clr(struct hpdma_dev *hpdma, u32 reg, u32 mask)
114{
115 clrbits(hpdma->base + reg, mask);
116}
117
118static inline void hpdma_rmw(struct hpdma_dev *hpdma, u32 reg, u32 mask, u32 val)
119{
120 clrsetbits(hpdma->base + reg, mask, val);
121}
122
123static inline u32 hpdma_read(struct hpdma_dev *hpdma, u32 reg)
124{
125 return readl(hpdma->base + reg);
126}
127
128struct hpdma_dev *chan_to_hpdma_dev(struct dma_chan *chan)
129{
130 return container_of(chan->device, struct hpdma_dev, ddev);
131}
132
133struct hpdma_vchan *chan_to_hpdma_vchan(struct dma_chan *chan)
134{
135 return container_of(chan, struct hpdma_vchan, vchan.chan);
136}
137
138struct hpdma_vdesc *vdesc_to_hpdma_vdesc(struct virt_dma_desc *vdesc)
139{
140 return container_of(vdesc, struct hpdma_vdesc, vdesc);
141}
142
143static inline void __mtk_hpdma_vchan_deinit(struct virt_dma_chan *vchan)
144{
145 list_del(&vchan->chan.device_node);
146 tasklet_kill(&vchan->task);
147}
148
149static inline int mtk_hpdma_prepare_transfer(struct hpdma_dev *hpdma)
150{
151 /*
152 * release when hpdma done
153 * prevent other APMCU's process contend hw spinlock
154 * since this lock will not be contended in interrupt context,
155 * it's safe to hold it without disable irq
156 */
157 spin_lock(&hpdma->lock);
158
159 /* it is not expected any issued chan right here */
160 if (!hpdma->issued_chan)
161 return 0;
162
163 dev_err(hpdma->ddev.dev,
164 "hpdma issued_chan is not empty when transfer started");
165
166 WARN_ON(1);
167
168 spin_unlock(&hpdma->lock);
169
170 return -1;
171}
172
173static inline void mtk_hpdma_unprepare_transfer(struct hpdma_dev *hpdma)
174{
175 spin_unlock(&hpdma->lock);
176}
177
178static inline int mtk_hpdma_start_transfer(struct hpdma_dev *hpdma,
179 struct hpdma_vchan *hvchan,
180 struct hpdma_vdesc *hvdesc)
181{
182 /* occupy hpdma start permission */
183 mtk_tops_hwspin_lock(hpdma->hwspinlock_grp, hpdma->trigger_start_slot);
184
185 /* acknowledge the terminate flow that HW is going to start */
186 hvchan->busy = true;
187
188 list_del(&hvdesc->vdesc.node);
189
190 /* set vdesc to current channel's pending transfer */
191 hvchan->issued_desc = hvdesc;
192 hpdma->issued_chan = hvchan;
193
194 /* last chance to abort the transfer if channel is terminating */
195 if (unlikely(hvchan->terminating))
196 goto terminate_transfer;
197
198 /* trigger dma start */
199 hpdma_set(hpdma, TOPS_HPDMA_X_START(hvchan->pchan_id), HPDMA_START);
200
201 return 0;
202
203terminate_transfer:
204 hvchan->busy = false;
205
206 hpdma->issued_chan = NULL;
207
208 mtk_tops_hwspin_unlock(hpdma->hwspinlock_grp, hpdma->trigger_start_slot);
209
210 return -1;
211}
212
213/* setup a channel's parameter before it acquires the permission to start transfer */
214static inline void mtk_hpdma_config_pchan(struct hpdma_dev *hpdma,
215 struct hpdma_vchan *hvchan,
216 struct hpdma_vdesc *hvdesc)
217{
218 /* update axsize */
219 hpdma_rmw(hpdma,
220 TOPS_HPDMA_X_CTRL(hvchan->pchan_id),
221 HPDMA_AXSIZE_MASK,
222 FIELD_PREP(HPDMA_AXSIZE_MASK, hvdesc->axsize));
223
224 /* update total num */
225 hpdma_rmw(hpdma,
226 TOPS_HPDMA_X_NUM(hvchan->pchan_id),
227 HPDMA_TOTALNUM_MASK,
228 FIELD_PREP(HPDMA_TOTALNUM_MASK, hvdesc->total_num));
229
230 /* set src addr */
231 hpdma_write(hpdma, TOPS_HPDMA_X_SRC(hvchan->pchan_id), hvdesc->src);
232
233 /* set dst addr */
234 hpdma_write(hpdma, TOPS_HPDMA_X_DST(hvchan->pchan_id), hvdesc->dst);
235}
236
237/*
238 * TODO: in general, we should allocate some buffer for dma transmission
239 * nothing to allocate for hpdma right now?
240 * TODO: we may not need this right now
241 */
242static int mtk_hpdma_alloc_chan_resources(struct dma_chan *chan)
243{
244 return 0;
245}
246
247/* TODO: we may not need this right now */
248static void mtk_hpdma_free_chan_resources(struct dma_chan *chan)
249{
250 /* stop all transmission, we have nothing to free for each channel */
251 dmaengine_terminate_sync(chan);
252}
253
254static void mtk_hpdma_issue_vchan_pending(struct hpdma_dev *hpdma,
255 struct hpdma_vchan *hvchan)
256{
257 struct virt_dma_desc *vdesc;
258
259 /* vchan's lock need to be held since its list will be modified */
260 lockdep_assert_held(&hvchan->vchan.lock);
261
262 /* if there is pending transfer on the fly, we should wait until it done */
263 if (unlikely(hvchan->issued_desc))
264 return;
265
266 /* fetch next desc to process */
267 vdesc = vchan_next_desc(&hvchan->vchan);
268 if (unlikely(!vdesc))
269 return;
270
271 /* start to transfer a pending descriptor */
272 hpdma->ops.tx_pending_desc(hpdma, hvchan, vdesc_to_hpdma_vdesc(vdesc));
273}
274
275static void mtk_hpdma_issue_pending(struct dma_chan *chan)
276{
277 struct hpdma_dev *hpdma = chan_to_hpdma_dev(chan);
278 struct hpdma_vchan *hvchan = chan_to_hpdma_vchan(chan);
279 unsigned long flag;
280
281 spin_lock_irqsave(&hvchan->vchan.lock, flag);
282
283 if (vchan_issue_pending(&hvchan->vchan))
284 mtk_hpdma_issue_vchan_pending(hpdma, hvchan);
285
286 spin_unlock_irqrestore(&hvchan->vchan.lock, flag);
287}
288
289/*
290 * since hpdma is not support to report how many chunks left to transfer,
291 * we can only report that current desc is completed or not
292 */
293static enum dma_status mtk_hpdma_tx_status(struct dma_chan *chan,
294 dma_cookie_t cookie,
295 struct dma_tx_state *tx_state)
296{
297 return dma_cookie_status(chan, cookie, tx_state);
298}
299
300/* optimize the hpdma parameters to get maximum throughput */
301static int mtk_hpdma_config_desc(struct hpdma_vdesc *hvdesc)
302{
303 hvdesc->axsize = 4;
304
305 /*
306 * the total transfer length = axsize * total_num
307 * axsize can be 1, 2, 4, 8, 16 bytes
308 * calculate axsize
309 */
310 while (hvdesc->axsize >= 0 && hvdesc->len % (0x1 << hvdesc->axsize))
311 hvdesc->axsize--;
312
313 if (hvdesc->axsize < 0)
314 return -EINVAL;
315
316 hvdesc->total_num = hvdesc->len / (0x1 << hvdesc->axsize);
317
318 return 0;
319}
320
321static struct dma_async_tx_descriptor *mtk_hpdma_prep_dma_memcpy(struct dma_chan *chan,
322 dma_addr_t dst,
323 dma_addr_t src,
324 size_t len,
325 unsigned long flags)
326{
327 struct hpdma_vdesc *hvdesc;
328 int ret = 0;
329
330 if (!len)
331 return ERR_PTR(-EPERM);
332
333 if (dst > 0xFFFFFFFF || src > 0xFFFFFFFF)
334 return ERR_PTR(-EINVAL);
335
336 hvdesc = kzalloc(sizeof(struct hpdma_vdesc), GFP_NOWAIT);
337 if (!hvdesc)
338 return ERR_PTR(-ENOMEM);
339
340 hvdesc->src = src;
341 hvdesc->dst = dst;
342 hvdesc->len = len;
343
344 ret = mtk_hpdma_config_desc(hvdesc);
345 if (ret) {
346 kfree(hvdesc);
347 return ERR_PTR(ret);
348 }
349
350 return vchan_tx_prep(to_virt_chan(chan), &hvdesc->vdesc, flags);
351}
352
353static void mtk_hpdma_terminate_all_inactive_desc(struct dma_chan *chan)
354{
355 struct virt_dma_chan *vchan = to_virt_chan(chan);
356 unsigned long flag;
357 LIST_HEAD(head);
358
359 spin_lock_irqsave(&vchan->lock, flag);
360
361 list_splice_tail_init(&vchan->desc_allocated, &head);
362 list_splice_tail_init(&vchan->desc_submitted, &head);
363 list_splice_tail_init(&vchan->desc_issued, &head);
364
365 spin_unlock_irqrestore(&vchan->lock, flag);
366
367 vchan_dma_desc_free_list(vchan, &head);
368}
369
370static int mtk_hpdma_terminate_all(struct dma_chan *chan)
371{
372 struct hpdma_vchan *hvchan = chan_to_hpdma_vchan(chan);
373
374 hvchan->terminating = true;
375
376 /* first terminate all inactive descriptors */
377 mtk_hpdma_terminate_all_inactive_desc(chan);
378
379 if (!hvchan->issued_desc)
380 goto out;
381
382 /* if there is a desc on the fly, we must wait until it done */
383 wait_event_interruptible(hvchan->stop_wait, !hvchan->busy);
384
385 vchan_terminate_vdesc(&hvchan->issued_desc->vdesc);
386
387 hvchan->issued_desc = NULL;
388
389 vchan_synchronize(&hvchan->vchan);
390
391out:
392 hvchan->terminating = false;
393
394 return 0;
395}
396
397static void mtk_hpdma_vdesc_free(struct virt_dma_desc *vdesc)
398{
399 kfree(container_of(vdesc, struct hpdma_vdesc, vdesc));
400}
401
402static void mtk_hpdma_tx_work(struct work_struct *work)
403{
404 struct hpdma_vchan *hvchan = container_of(work, struct hpdma_vchan, tx_work);
405 struct hpdma_dev *hpdma = chan_to_hpdma_dev(&hvchan->vchan.chan);
406 unsigned long flag;
407
408 if (unlikely(!vchan_next_desc(&hvchan->vchan)))
409 return;
410
411 spin_lock_irqsave(&hvchan->vchan.lock, flag);
412
413 mtk_hpdma_issue_vchan_pending(hpdma, hvchan);
414
415 spin_unlock_irqrestore(&hvchan->vchan.lock, flag);
416}
417
418static int mtk_hpdma_provider_init(struct platform_device *pdev,
419 struct hpdma_dev *hpdma)
420{
421 struct dma_device *ddev = &hpdma->ddev;
422 int ret = 0;
423
424 dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
425 dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
426
427 ddev->dev = &pdev->dev;
428 ddev->directions = BIT(DMA_MEM_TO_MEM);
429 ddev->copy_align = MTK_HPDMA_ALIGN_SIZE;
430 ddev->src_addr_widths = MTK_HPDMA_DMA_BUSWIDTHS;
431 ddev->dst_addr_widths = MTK_HPDMA_DMA_BUSWIDTHS;
432 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
433
434 ddev->device_alloc_chan_resources = mtk_hpdma_alloc_chan_resources;
435 ddev->device_free_chan_resources = mtk_hpdma_free_chan_resources;
436 ddev->device_issue_pending = mtk_hpdma_issue_pending;
437 ddev->device_tx_status = mtk_hpdma_tx_status;
438 ddev->device_prep_dma_memcpy = mtk_hpdma_prep_dma_memcpy;
439 ddev->device_terminate_all = mtk_hpdma_terminate_all;
440
441 INIT_LIST_HEAD(&ddev->channels);
442
443 ret = hpdma->ops.vchan_init(hpdma, ddev);
444 if (ret)
445 return ret;
446
447 ret = dma_async_device_register(ddev);
448 if (ret) {
449 dev_err(&pdev->dev, "register async dma device failed: %d\n", ret);
450 return ret;
451 }
452
453 ret = of_dma_controller_register(pdev->dev.of_node,
454 hpdma->ops.of_dma_xlate,
455 ddev);
456 if (ret) {
457 dev_err(&pdev->dev, "register dma controller failed: %d\n", ret);
458 goto unregister_async_dev;
459 }
460
461 return ret;
462
463unregister_async_dev:
464 dma_async_device_unregister(ddev);
465
466 return ret;
467}
468
469static int mtk_hpdma_probe(struct platform_device *pdev)
470{
471 const struct hpdma_init_data *init_data;
472 struct hpdma_dev *hpdma;
473 struct resource *res;
474 int ret = 0;
475
476 init_data = of_device_get_match_data(&pdev->dev);
477 if (!init_data) {
478 dev_err(&pdev->dev, "hpdma init data not exist\n");
479 return -ENODEV;
480 }
481
482 hpdma = init_data->init(pdev, init_data);
483 if (IS_ERR(hpdma)) {
484 dev_err(&pdev->dev, "hpdma init failed: %ld\n", PTR_ERR(hpdma));
485 return PTR_ERR(hpdma);
486 }
487
488 memcpy(&hpdma->ops, &init_data->ops, sizeof(struct hpdma_ops));
489 hpdma->hwspinlock_grp = init_data->hwspinlock_grp;
490 hpdma->trigger_start_slot = init_data->trigger_start_slot;
491 hpdma->ch_base_slot = init_data->ch_base_slot;
492
493 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
494 if (!res)
495 return -ENXIO;
496
497 hpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
498 if (!hpdma->base)
499 return -ENOMEM;
500
501 /*
502 * since hpdma does not send signal to APMCU,
503 * we need TOPS mailbox to notify us when hpdma done
504 */
505 ret = hpdma->ops.mbox_init(pdev, hpdma);
506 if (ret)
507 return ret;
508
509 ret = mtk_hpdma_provider_init(pdev, hpdma);
510 if (ret)
511 goto unregister_mbox;
512
513 spin_lock_init(&hpdma->lock);
514
515 platform_set_drvdata(pdev, hpdma);
516
517 dev_info(hpdma->ddev.dev, "hpdma init done\n");
518
519 return ret;
520
521unregister_mbox:
522 hpdma->ops.mbox_deinit(pdev, hpdma);
523
524 return ret;
525}
526
527static int mtk_hpdma_remove(struct platform_device *pdev)
528{
529 struct hpdma_dev *hpdma = platform_get_drvdata(pdev);
530
531 if (!hpdma)
532 return 0;
533
534 hpdma->ops.vchan_deinit(hpdma);
535
536 hpdma->ops.mbox_deinit(pdev, hpdma);
537
538 dma_async_device_unregister(&hpdma->ddev);
539
540 of_dma_controller_free(pdev->dev.of_node);
541
542 return 0;
543}
544
545static struct dma_chan *mtk_clust_hpdma_of_xlate(struct of_phandle_args *dma_spec,
546 struct of_dma *ofdma)
547{
548 struct dma_device *ddev = ofdma->of_dma_data;
549 struct hpdma_dev *hpdma;
550 u32 id;
551
552 if (!ddev || dma_spec->args_count != 2)
553 return ERR_PTR(-EINVAL);
554
555 hpdma = container_of(ddev, struct hpdma_dev, ddev);
556 id = dma_spec->args[0] * CORE_OFFLOAD_NUM + dma_spec->args[1];
557
558 return dma_get_slave_channel(&hpdma->hvchans[id].vchan.chan);
559}
560
561static struct hpdma_dev *mtk_top_hpdma_init(struct platform_device *pdev,
562 const struct hpdma_init_data *data)
563{
564 struct top_hpdma_dev *top_hpdma = NULL;
565
566 if (!data)
567 return ERR_PTR(-EINVAL);
568
569 top_hpdma = devm_kzalloc(&pdev->dev, sizeof(*top_hpdma), GFP_KERNEL);
570 if (!top_hpdma)
571 return ERR_PTR(-ENOMEM);
572
573 top_hpdma->mdev.core = CORE_MGMT;
574 top_hpdma->mdev.cmd_id = MBOX_CM2AP_CMD_HPDMA;
575 top_hpdma->mdev.mbox_handler = data->mbox_handler;
576 top_hpdma->mdev.priv = &top_hpdma->hpdma;
577
578 return &top_hpdma->hpdma;
579}
580
581static void mtk_top_hpdma_vchan_deinit(struct hpdma_dev *hpdma)
582{
583 struct hpdma_vchan *hvchan;
584 u32 i;
585
586 for (i = 0; i < __TOP_HPDMA_REQ; i++) {
587 hvchan = &hpdma->hvchans[i];
588 __mtk_hpdma_vchan_deinit(&hvchan->vchan);
589 }
590}
591
592static int mtk_top_hpdma_vchan_init(struct hpdma_dev *hpdma, struct dma_device *ddev)
593{
594 struct hpdma_vchan *hvchan;
595 u32 i;
596
597 hpdma->hvchans = devm_kcalloc(ddev->dev, __TOP_HPDMA_REQ,
598 sizeof(struct hpdma_vchan),
599 GFP_KERNEL);
600 if (!hpdma->hvchans)
601 return -ENOMEM;
602
603 for (i = 0; i < __TOP_HPDMA_REQ; i++) {
604 hvchan = &hpdma->hvchans[i];
605
606 init_waitqueue_head(&hvchan->stop_wait);
607 INIT_WORK(&hvchan->tx_work, mtk_hpdma_tx_work);
608
609 hvchan->vchan.desc_free = mtk_hpdma_vdesc_free;
610 /*
611 * TODO: maybe init vchan by ourselves with
612 * customized tasklet?
613 * if we setup customized tasklet to transmit
614 * remaining chunks in a channel, we should be careful about
615 * hpdma->lock since it will be acquired in softirq context
616 */
617 vchan_init(&hvchan->vchan, ddev);
618 }
619
620 return 0;
621}
622
623static void mtk_top_hpdma_unregister_mbox(struct platform_device *pdev,
624 struct hpdma_dev *hpdma)
625{
626 struct top_hpdma_dev *top_hpdma;
627
628 top_hpdma = container_of(hpdma, struct top_hpdma_dev, hpdma);
629
630 unregister_mbox_dev(MBOX_RECV, &top_hpdma->mdev);
631}
632
633static int mtk_top_hpdma_register_mbox(struct platform_device *pdev,
634 struct hpdma_dev *hpdma)
635{
636 struct top_hpdma_dev *top_hpdma;
637 int ret = 0;
638
639 top_hpdma = container_of(hpdma, struct top_hpdma_dev, hpdma);
640
641 ret = register_mbox_dev(MBOX_RECV, &top_hpdma->mdev);
642 if (ret) {
643 dev_err(&pdev->dev, "register mailbox device failed: %d\n", ret);
644 return ret;
645 }
646
647 return ret;
648}
649
650static void mtk_top_hpdma_tx_pending_desc(struct hpdma_dev *hpdma,
651 struct hpdma_vchan *hvchan,
652 struct hpdma_vdesc *hvdesc)
653{
654 u32 slot = hpdma->ch_base_slot;
655 enum hwspinlock_group grp = hpdma->hwspinlock_grp;
656
657 hvchan->pchan_id = 0;
658
659 mtk_hpdma_prepare_transfer(hpdma);
660
661 /* occupy hpdma physical channel */
662 while (!mtk_tops_hwspin_try_lock(grp, slot)) {
663
664 if (unlikely(hvchan->terminating)) {
665 spin_unlock(&hpdma->lock);
666 return;
667 }
668
669 hvchan->pchan_id = (hvchan->pchan_id + 1) % HPDMA_CHAN_NUM;
670 if (++slot - hpdma->ch_base_slot == HPDMA_CHAN_NUM)
671 slot = hpdma->ch_base_slot;
672 }
673
674 mtk_hpdma_config_pchan(hpdma, hvchan, hvdesc);
675
676 if (!mtk_hpdma_start_transfer(hpdma, hvchan, hvdesc))
677 return;
678
679 /* start transfer failed */
680 mtk_tops_hwspin_unlock(grp, slot);
681
682 mtk_hpdma_unprepare_transfer(hpdma);
683
684 wake_up_interruptible(&hvchan->stop_wait);
685}
686
687static struct hpdma_dev *mtk_clust_hpdma_init(struct platform_device *pdev,
688 const struct hpdma_init_data *data)
689{
690 struct clust_hpdma_dev *clust_hpdma = NULL;
691 u32 i;
692
693 if (!data)
694 return ERR_PTR(-EINVAL);
695
696 clust_hpdma = devm_kzalloc(&pdev->dev, sizeof(*clust_hpdma), GFP_KERNEL);
697 if (!clust_hpdma)
698 return ERR_PTR(-ENOMEM);
699
700 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
701 clust_hpdma->mdev[i].core = CORE_OFFLOAD_0 + i;
702 clust_hpdma->mdev[i].cmd_id = MBOX_CX2AP_CMD_HPDMA;
703 clust_hpdma->mdev[i].mbox_handler = data->mbox_handler;
704 clust_hpdma->mdev[i].priv = &clust_hpdma->hpdma;
705 }
706
707 return &clust_hpdma->hpdma;
708}
709
710static void mtk_clust_hpdma_vchan_deinit(struct hpdma_dev *hpdma)
711{
712 struct hpdma_vchan *hvchan;
713 u32 i, j;
714
715 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
716 for (j = 0; j < __CLUST_HPDMA_REQ; j++) {
717 hvchan = &hpdma->hvchans[i];
718 __mtk_hpdma_vchan_deinit(&hvchan->vchan);
719 }
720 }
721}
722
723static int mtk_clust_hpdma_vchan_init(struct hpdma_dev *hpdma, struct dma_device *ddev)
724{
725 struct hpdma_vchan *hvchan;
726 u32 i, j;
727
728 hpdma->hvchans = devm_kcalloc(ddev->dev, __CLUST_HPDMA_REQ * CORE_OFFLOAD_NUM,
729 sizeof(struct hpdma_vchan),
730 GFP_KERNEL);
731 if (!hpdma->hvchans)
732 return -ENOMEM;
733
734 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
735 for (j = 0; j < __CLUST_HPDMA_REQ; j++) {
736 hvchan = &hpdma->hvchans[i * __CLUST_HPDMA_REQ + j];
737
738 hvchan->pchan_id = i;
739 init_waitqueue_head(&hvchan->stop_wait);
740 INIT_WORK(&hvchan->tx_work, mtk_hpdma_tx_work);
741
742 hvchan->vchan.desc_free = mtk_hpdma_vdesc_free;
743 /*
744 * TODO: maybe init vchan by ourselves with
745 * customized tasklet?
746 * if we setup customized tasklet to transmit
747 * remaining chunks in a channel, we should be careful about
748 * hpdma->lock since it will be acquired in softirq context
749 */
750 vchan_init(&hvchan->vchan, ddev);
751 }
752 }
753
754 return 0;
755}
756
757static void mtk_clust_hpdma_unregister_mbox(struct platform_device *pdev,
758 struct hpdma_dev *hpdma)
759{
760 struct clust_hpdma_dev *clust_hpdma;
761 u32 i;
762
763 clust_hpdma = container_of(hpdma, struct clust_hpdma_dev, hpdma);
764
765 for (i = 0; i < CORE_OFFLOAD_NUM; i++)
766 unregister_mbox_dev(MBOX_RECV, &clust_hpdma->mdev[i]);
767}
768
769static int mtk_clust_hpdma_register_mbox(struct platform_device *pdev,
770 struct hpdma_dev *hpdma)
771{
772 struct clust_hpdma_dev *clust_hpdma;
773 int ret = 0;
774 int i;
775
776 clust_hpdma = container_of(hpdma, struct clust_hpdma_dev, hpdma);
777
778 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
779 ret = register_mbox_dev(MBOX_RECV, &clust_hpdma->mdev[i]);
780 if (ret) {
781 dev_err(&pdev->dev, "register mbox%d failed: %d\n", i, ret);
782 goto unregister_mbox;
783 }
784 }
785
786 return ret;
787
788unregister_mbox:
789 for (--i; i >= 0; i--)
790 unregister_mbox_dev(MBOX_RECV, &clust_hpdma->mdev[i]);
791
792 return ret;
793}
794
795static void mtk_clust_hpdma_tx_pending_desc(struct hpdma_dev *hpdma,
796 struct hpdma_vchan *hvchan,
797 struct hpdma_vdesc *hvdesc)
798{
799 u32 slot = hpdma->ch_base_slot + hvchan->pchan_id;
800 enum hwspinlock_group grp = hpdma->hwspinlock_grp;
801
802 mtk_hpdma_prepare_transfer(hpdma);
803
804 /* occupy hpdma physical channel */
805 mtk_tops_hwspin_lock(grp, slot);
806
807 mtk_hpdma_config_pchan(hpdma, hvchan, hvdesc);
808
809 if (!mtk_hpdma_start_transfer(hpdma, hvchan, hvdesc))
810 return;
811
812 /* start transfer failed */
813 mtk_tops_hwspin_unlock(grp, slot);
814
815 mtk_hpdma_unprepare_transfer(hpdma);
816
817 wake_up_interruptible(&hvchan->stop_wait);
818}
819
820static enum mbox_msg_cnt mtk_hpdma_ap_recv_mbox_msg(struct mailbox_dev *mdev,
821 struct mailbox_msg *msg)
822{
823 struct hpdma_dev *hpdma = mdev->priv;
824 struct hpdma_vchan *hvchan;
825 struct hpdma_vdesc *hvdesc;
826 enum hwspinlock_group grp;
827 unsigned long flag;
828 u32 slot;
829
830 if (!hpdma)
831 return MBOX_NO_RET_MSG;
832
833 hvchan = hpdma->issued_chan;
834 if (!hvchan) {
835 dev_err(hpdma->ddev.dev, "unexpected hpdma mailbox recv\n");
836 return MBOX_NO_RET_MSG;
837 }
838
839 grp = hpdma->hwspinlock_grp;
840
841 hvdesc = hvchan->issued_desc;
842
843 /* clear issued channel before releasing hwspinlock */
844 hpdma->issued_chan = NULL;
845
846 hvchan->busy = false;
847 hvchan->issued_desc = NULL;
848
849 /* release hwspinlock */
850 slot = hvchan->pchan_id + hpdma->ch_base_slot;
851
852 mtk_tops_hwspin_unlock(grp, hpdma->trigger_start_slot);
853
854 mtk_tops_hwspin_unlock(grp, slot);
855
856 /* release to let other APMCU process to contend hw spinlock */
857 spin_unlock(&hpdma->lock);
858
859 if (unlikely(hvchan->terminating)) {
860 wake_up_interruptible(&hvchan->stop_wait);
861 return MBOX_NO_RET_MSG;
862 }
863
864 /*
865 * complete vdesc and schedule tx work again
866 * if there is more vdesc left in the channel
867 */
868 spin_lock_irqsave(&hvchan->vchan.lock, flag);
869
870 vchan_cookie_complete(&hvdesc->vdesc);
871
872 if (vchan_next_desc(&hvchan->vchan))
873 schedule_work(&hvchan->tx_work);
874
875 spin_unlock_irqrestore(&hvchan->vchan.lock, flag);
876
877 return MBOX_NO_RET_MSG;
878}
879
880struct hpdma_init_data top_hpdma_init_data = {
881 .ops = {
882 .vchan_init = mtk_top_hpdma_vchan_init,
883 .vchan_deinit = mtk_top_hpdma_vchan_deinit,
884 .mbox_init = mtk_top_hpdma_register_mbox,
885 .mbox_deinit = mtk_top_hpdma_unregister_mbox,
886 .tx_pending_desc = mtk_top_hpdma_tx_pending_desc,
887 .of_dma_xlate = of_dma_xlate_by_chan_id,
888 },
889 .init = mtk_top_hpdma_init,
890 .mbox_handler = mtk_hpdma_ap_recv_mbox_msg,
891 .hwspinlock_grp = HWSPINLOCK_GROUP_TOP,
892 .trigger_start_slot = HWSPINLOCK_TOP_SLOT_HPDMA_LOCK,
893 .ch_base_slot = HWSPINLOCK_TOP_SLOT_HPDMA_PCH0,
894};
895
896static struct hpdma_init_data clust_hpdma_init_data = {
897 .ops = {
898 .vchan_init = mtk_clust_hpdma_vchan_init,
899 .vchan_deinit = mtk_clust_hpdma_vchan_deinit,
900 .mbox_init = mtk_clust_hpdma_register_mbox,
901 .mbox_deinit = mtk_clust_hpdma_unregister_mbox,
902 .tx_pending_desc = mtk_clust_hpdma_tx_pending_desc,
903 .of_dma_xlate = mtk_clust_hpdma_of_xlate,
904 },
905 .init = mtk_clust_hpdma_init,
906 .mbox_handler = mtk_hpdma_ap_recv_mbox_msg,
907 .hwspinlock_grp = HWSPINLOCK_GROUP_CLUST,
908 .trigger_start_slot = HWSPINLOCK_CLUST_SLOT_HPDMA_LOCK,
909 .ch_base_slot = HWSPINLOCK_CLUST_SLOT_HPDMA_PCH0,
910};
911
912static struct of_device_id mtk_hpdma_match[] = {
913 { .compatible = "mediatek,hpdma-top", .data = &top_hpdma_init_data, },
914 { .compatible = "mediatek,hpdma-sub", .data = &clust_hpdma_init_data, },
915 { },
916};
917
918static struct platform_driver mtk_hpdma_driver = {
919 .probe = mtk_hpdma_probe,
920 .remove = mtk_hpdma_remove,
921 .driver = {
922 .name = "mediatek,hpdma",
923 .owner = THIS_MODULE,
924 .of_match_table = mtk_hpdma_match,
925 },
926};
927
928int __init mtk_tops_hpdma_init(void)
929{
930 int ret = 0;
931
932 ret = platform_driver_register(&mtk_hpdma_driver);
933 if (ret)
934 return ret;
935
936 return ret;
937}
938
939void __exit mtk_tops_hpdma_exit(void)
940{
941 platform_driver_unregister(&mtk_hpdma_driver);
942}