blob: 4bcae0f7048112916208877e45b6a6bd8b3c6cb2 [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
6 */
7
8#include <linux/device.h>
9#include <linux/dmaengine.h>
10#include <linux/io.h>
11#include <linux/lockdep.h>
12#include <linux/of.h>
13#include <linux/of_device.h>
14#include <linux/of_dma.h>
15#include <linux/wait.h>
16#include <linux/workqueue.h>
17
18#include <virt-dma.h>
19
developer0fb30d52023-12-04 09:51:36 +080020#include "tops/hpdma.h"
21#include "tops/hwspinlock.h"
22#include "tops/internal.h"
23#include "tops/mbox.h"
24#include "tops/mcu.h"
developere5e687d2023-08-08 16:05:33 +080025
26#define HPDMA_CHAN_NUM (4)
27
28#define MTK_HPDMA_ALIGN_SIZE (DMAENGINE_ALIGN_16_BYTES)
29#define MTK_HPDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
30
31struct hpdma_dev;
32struct hpdma_vchan;
33struct hpdma_vdesc;
34struct hpdma_init_data;
35
36typedef struct hpdma_dev *(*hpdma_init_func_t)(struct platform_device *pdev,
37 const struct hpdma_init_data *data);
38typedef void (*tx_pending_desc_t)(struct hpdma_dev *hpdma,
39 struct hpdma_vchan *hchan,
40 struct hpdma_vdesc *hdesc);
41typedef struct dma_chan *(*of_dma_xlate_func_t)(struct of_phandle_args *dma_spec,
42 struct of_dma *ofdma);
43
44struct hpdma_vdesc {
45 struct virt_dma_desc vdesc;
46 dma_addr_t src;
47 dma_addr_t dst;
48 u32 total_num;
49 u32 axsize;
50 size_t len;
51};
52
53struct hpdma_vchan {
54 struct virt_dma_chan vchan;
55 struct work_struct tx_work;
56 struct hpdma_vdesc *issued_desc;
57 wait_queue_head_t stop_wait;
58 bool busy;
59 bool terminating;
60 u8 pchan_id;
61};
62
63struct hpdma_ops {
64 int (*vchan_init)(struct hpdma_dev *hpdma, struct dma_device *ddev);
65 void (*vchan_deinit)(struct hpdma_dev *hpdma);
66 int (*mbox_init)(struct platform_device *pdev, struct hpdma_dev *hpdma);
67 void (*mbox_deinit)(struct platform_device *pdev, struct hpdma_dev *hpdma);
68 tx_pending_desc_t tx_pending_desc;
69 of_dma_xlate_func_t of_dma_xlate;
70};
71
72struct hpdma_init_data {
73 struct hpdma_ops ops;
74 hpdma_init_func_t init;
75 mbox_handler_func_t mbox_handler;
76 enum hwspinlock_group hwspinlock_grp;
77 u32 trigger_start_slot; /* permission to start dma transfer */
78 u32 ch_base_slot; /* permission to occupy a physical channel */
79};
80
81struct hpdma_dev {
82 struct dma_device ddev;
83 struct hpdma_ops ops;
84 struct hpdma_vchan *hvchans;
85 struct hpdma_vchan *issued_chan;
86 spinlock_t lock; /* prevent inter-process racing hwspinlock */
87 void __iomem *base;
88 enum hwspinlock_group hwspinlock_grp;
89 u32 trigger_start_slot; /* permission to start dma transfer */
90 u32 ch_base_slot; /* permission to occupy a physical channel */
91};
92
93struct top_hpdma_dev {
94 struct mailbox_dev mdev;
95 struct hpdma_dev hpdma;
96};
97
98struct clust_hpdma_dev {
99 struct mailbox_dev mdev[CORE_MAX];
100 struct hpdma_dev hpdma;
101};
102
103static inline void hpdma_write(struct hpdma_dev *hpdma, u32 reg, u32 val)
104{
105 writel(val, hpdma->base + reg);
106}
107
108static inline void hpdma_set(struct hpdma_dev *hpdma, u32 reg, u32 mask)
109{
110 setbits(hpdma->base + reg, mask);
111}
112
113static inline void hpdma_clr(struct hpdma_dev *hpdma, u32 reg, u32 mask)
114{
115 clrbits(hpdma->base + reg, mask);
116}
117
118static inline void hpdma_rmw(struct hpdma_dev *hpdma, u32 reg, u32 mask, u32 val)
119{
120 clrsetbits(hpdma->base + reg, mask, val);
121}
122
123static inline u32 hpdma_read(struct hpdma_dev *hpdma, u32 reg)
124{
125 return readl(hpdma->base + reg);
126}
127
128struct hpdma_dev *chan_to_hpdma_dev(struct dma_chan *chan)
129{
130 return container_of(chan->device, struct hpdma_dev, ddev);
131}
132
133struct hpdma_vchan *chan_to_hpdma_vchan(struct dma_chan *chan)
134{
135 return container_of(chan, struct hpdma_vchan, vchan.chan);
136}
137
138struct hpdma_vdesc *vdesc_to_hpdma_vdesc(struct virt_dma_desc *vdesc)
139{
140 return container_of(vdesc, struct hpdma_vdesc, vdesc);
141}
142
143static inline void __mtk_hpdma_vchan_deinit(struct virt_dma_chan *vchan)
144{
145 list_del(&vchan->chan.device_node);
146 tasklet_kill(&vchan->task);
147}
148
149static inline int mtk_hpdma_prepare_transfer(struct hpdma_dev *hpdma)
150{
151 /*
152 * release when hpdma done
153 * prevent other APMCU's process contend hw spinlock
154 * since this lock will not be contended in interrupt context,
155 * it's safe to hold it without disable irq
156 */
157 spin_lock(&hpdma->lock);
158
159 /* it is not expected any issued chan right here */
160 if (!hpdma->issued_chan)
161 return 0;
162
163 dev_err(hpdma->ddev.dev,
164 "hpdma issued_chan is not empty when transfer started");
165
166 WARN_ON(1);
167
168 spin_unlock(&hpdma->lock);
169
170 return -1;
171}
172
173static inline void mtk_hpdma_unprepare_transfer(struct hpdma_dev *hpdma)
174{
175 spin_unlock(&hpdma->lock);
176}
177
178static inline int mtk_hpdma_start_transfer(struct hpdma_dev *hpdma,
179 struct hpdma_vchan *hvchan,
180 struct hpdma_vdesc *hvdesc)
181{
182 /* occupy hpdma start permission */
183 mtk_tops_hwspin_lock(hpdma->hwspinlock_grp, hpdma->trigger_start_slot);
184
185 /* acknowledge the terminate flow that HW is going to start */
186 hvchan->busy = true;
187
188 list_del(&hvdesc->vdesc.node);
189
190 /* set vdesc to current channel's pending transfer */
191 hvchan->issued_desc = hvdesc;
192 hpdma->issued_chan = hvchan;
193
194 /* last chance to abort the transfer if channel is terminating */
195 if (unlikely(hvchan->terminating))
196 goto terminate_transfer;
197
198 /* trigger dma start */
199 hpdma_set(hpdma, TOPS_HPDMA_X_START(hvchan->pchan_id), HPDMA_START);
200
201 return 0;
202
203terminate_transfer:
204 hvchan->busy = false;
205
206 hpdma->issued_chan = NULL;
207
208 mtk_tops_hwspin_unlock(hpdma->hwspinlock_grp, hpdma->trigger_start_slot);
209
210 return -1;
211}
212
213/* setup a channel's parameter before it acquires the permission to start transfer */
214static inline void mtk_hpdma_config_pchan(struct hpdma_dev *hpdma,
215 struct hpdma_vchan *hvchan,
216 struct hpdma_vdesc *hvdesc)
217{
218 /* update axsize */
219 hpdma_rmw(hpdma,
220 TOPS_HPDMA_X_CTRL(hvchan->pchan_id),
221 HPDMA_AXSIZE_MASK,
222 FIELD_PREP(HPDMA_AXSIZE_MASK, hvdesc->axsize));
223
224 /* update total num */
225 hpdma_rmw(hpdma,
226 TOPS_HPDMA_X_NUM(hvchan->pchan_id),
227 HPDMA_TOTALNUM_MASK,
228 FIELD_PREP(HPDMA_TOTALNUM_MASK, hvdesc->total_num));
229
230 /* set src addr */
231 hpdma_write(hpdma, TOPS_HPDMA_X_SRC(hvchan->pchan_id), hvdesc->src);
232
233 /* set dst addr */
234 hpdma_write(hpdma, TOPS_HPDMA_X_DST(hvchan->pchan_id), hvdesc->dst);
235}
236
237/*
238 * TODO: in general, we should allocate some buffer for dma transmission
239 * nothing to allocate for hpdma right now?
240 * TODO: we may not need this right now
241 */
242static int mtk_hpdma_alloc_chan_resources(struct dma_chan *chan)
243{
244 return 0;
245}
246
247/* TODO: we may not need this right now */
248static void mtk_hpdma_free_chan_resources(struct dma_chan *chan)
249{
250 /* stop all transmission, we have nothing to free for each channel */
251 dmaengine_terminate_sync(chan);
252}
253
254static void mtk_hpdma_issue_vchan_pending(struct hpdma_dev *hpdma,
255 struct hpdma_vchan *hvchan)
256{
257 struct virt_dma_desc *vdesc;
258
259 /* vchan's lock need to be held since its list will be modified */
260 lockdep_assert_held(&hvchan->vchan.lock);
261
262 /* if there is pending transfer on the fly, we should wait until it done */
263 if (unlikely(hvchan->issued_desc))
264 return;
265
266 /* fetch next desc to process */
267 vdesc = vchan_next_desc(&hvchan->vchan);
268 if (unlikely(!vdesc))
269 return;
270
271 /* start to transfer a pending descriptor */
272 hpdma->ops.tx_pending_desc(hpdma, hvchan, vdesc_to_hpdma_vdesc(vdesc));
273}
274
275static void mtk_hpdma_issue_pending(struct dma_chan *chan)
276{
277 struct hpdma_dev *hpdma = chan_to_hpdma_dev(chan);
278 struct hpdma_vchan *hvchan = chan_to_hpdma_vchan(chan);
279 unsigned long flag;
280
281 spin_lock_irqsave(&hvchan->vchan.lock, flag);
282
283 if (vchan_issue_pending(&hvchan->vchan))
284 mtk_hpdma_issue_vchan_pending(hpdma, hvchan);
285
286 spin_unlock_irqrestore(&hvchan->vchan.lock, flag);
287}
288
289/*
290 * since hpdma is not support to report how many chunks left to transfer,
291 * we can only report that current desc is completed or not
292 */
293static enum dma_status mtk_hpdma_tx_status(struct dma_chan *chan,
294 dma_cookie_t cookie,
295 struct dma_tx_state *tx_state)
296{
297 return dma_cookie_status(chan, cookie, tx_state);
298}
299
300/* optimize the hpdma parameters to get maximum throughput */
301static int mtk_hpdma_config_desc(struct hpdma_vdesc *hvdesc)
302{
303 hvdesc->axsize = 4;
304
305 /*
306 * the total transfer length = axsize * total_num
307 * axsize can be 1, 2, 4, 8, 16 bytes
308 * calculate axsize
309 */
developer0fb30d52023-12-04 09:51:36 +0800310 while (hvdesc->axsize > 0 && hvdesc->len % (0x1 << hvdesc->axsize))
developere5e687d2023-08-08 16:05:33 +0800311 hvdesc->axsize--;
312
developere5e687d2023-08-08 16:05:33 +0800313 hvdesc->total_num = hvdesc->len / (0x1 << hvdesc->axsize);
314
315 return 0;
316}
317
318static struct dma_async_tx_descriptor *mtk_hpdma_prep_dma_memcpy(struct dma_chan *chan,
319 dma_addr_t dst,
320 dma_addr_t src,
321 size_t len,
322 unsigned long flags)
323{
324 struct hpdma_vdesc *hvdesc;
325 int ret = 0;
326
327 if (!len)
328 return ERR_PTR(-EPERM);
329
330 if (dst > 0xFFFFFFFF || src > 0xFFFFFFFF)
331 return ERR_PTR(-EINVAL);
332
333 hvdesc = kzalloc(sizeof(struct hpdma_vdesc), GFP_NOWAIT);
334 if (!hvdesc)
335 return ERR_PTR(-ENOMEM);
336
337 hvdesc->src = src;
338 hvdesc->dst = dst;
339 hvdesc->len = len;
340
341 ret = mtk_hpdma_config_desc(hvdesc);
342 if (ret) {
343 kfree(hvdesc);
344 return ERR_PTR(ret);
345 }
346
347 return vchan_tx_prep(to_virt_chan(chan), &hvdesc->vdesc, flags);
348}
349
350static void mtk_hpdma_terminate_all_inactive_desc(struct dma_chan *chan)
351{
352 struct virt_dma_chan *vchan = to_virt_chan(chan);
353 unsigned long flag;
354 LIST_HEAD(head);
355
356 spin_lock_irqsave(&vchan->lock, flag);
357
358 list_splice_tail_init(&vchan->desc_allocated, &head);
359 list_splice_tail_init(&vchan->desc_submitted, &head);
360 list_splice_tail_init(&vchan->desc_issued, &head);
361
362 spin_unlock_irqrestore(&vchan->lock, flag);
363
364 vchan_dma_desc_free_list(vchan, &head);
365}
366
367static int mtk_hpdma_terminate_all(struct dma_chan *chan)
368{
369 struct hpdma_vchan *hvchan = chan_to_hpdma_vchan(chan);
370
371 hvchan->terminating = true;
372
373 /* first terminate all inactive descriptors */
374 mtk_hpdma_terminate_all_inactive_desc(chan);
375
376 if (!hvchan->issued_desc)
377 goto out;
378
379 /* if there is a desc on the fly, we must wait until it done */
380 wait_event_interruptible(hvchan->stop_wait, !hvchan->busy);
381
382 vchan_terminate_vdesc(&hvchan->issued_desc->vdesc);
383
384 hvchan->issued_desc = NULL;
385
386 vchan_synchronize(&hvchan->vchan);
387
388out:
389 hvchan->terminating = false;
390
391 return 0;
392}
393
394static void mtk_hpdma_vdesc_free(struct virt_dma_desc *vdesc)
395{
396 kfree(container_of(vdesc, struct hpdma_vdesc, vdesc));
397}
398
399static void mtk_hpdma_tx_work(struct work_struct *work)
400{
401 struct hpdma_vchan *hvchan = container_of(work, struct hpdma_vchan, tx_work);
402 struct hpdma_dev *hpdma = chan_to_hpdma_dev(&hvchan->vchan.chan);
403 unsigned long flag;
404
405 if (unlikely(!vchan_next_desc(&hvchan->vchan)))
406 return;
407
408 spin_lock_irqsave(&hvchan->vchan.lock, flag);
409
410 mtk_hpdma_issue_vchan_pending(hpdma, hvchan);
411
412 spin_unlock_irqrestore(&hvchan->vchan.lock, flag);
413}
414
415static int mtk_hpdma_provider_init(struct platform_device *pdev,
416 struct hpdma_dev *hpdma)
417{
418 struct dma_device *ddev = &hpdma->ddev;
419 int ret = 0;
420
421 dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
422 dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
423
424 ddev->dev = &pdev->dev;
425 ddev->directions = BIT(DMA_MEM_TO_MEM);
426 ddev->copy_align = MTK_HPDMA_ALIGN_SIZE;
427 ddev->src_addr_widths = MTK_HPDMA_DMA_BUSWIDTHS;
428 ddev->dst_addr_widths = MTK_HPDMA_DMA_BUSWIDTHS;
429 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
430
431 ddev->device_alloc_chan_resources = mtk_hpdma_alloc_chan_resources;
432 ddev->device_free_chan_resources = mtk_hpdma_free_chan_resources;
433 ddev->device_issue_pending = mtk_hpdma_issue_pending;
434 ddev->device_tx_status = mtk_hpdma_tx_status;
435 ddev->device_prep_dma_memcpy = mtk_hpdma_prep_dma_memcpy;
436 ddev->device_terminate_all = mtk_hpdma_terminate_all;
437
438 INIT_LIST_HEAD(&ddev->channels);
439
440 ret = hpdma->ops.vchan_init(hpdma, ddev);
441 if (ret)
442 return ret;
443
444 ret = dma_async_device_register(ddev);
445 if (ret) {
446 dev_err(&pdev->dev, "register async dma device failed: %d\n", ret);
447 return ret;
448 }
449
450 ret = of_dma_controller_register(pdev->dev.of_node,
451 hpdma->ops.of_dma_xlate,
452 ddev);
453 if (ret) {
454 dev_err(&pdev->dev, "register dma controller failed: %d\n", ret);
455 goto unregister_async_dev;
456 }
457
458 return ret;
459
460unregister_async_dev:
461 dma_async_device_unregister(ddev);
462
463 return ret;
464}
465
466static int mtk_hpdma_probe(struct platform_device *pdev)
467{
468 const struct hpdma_init_data *init_data;
469 struct hpdma_dev *hpdma;
470 struct resource *res;
471 int ret = 0;
472
473 init_data = of_device_get_match_data(&pdev->dev);
474 if (!init_data) {
475 dev_err(&pdev->dev, "hpdma init data not exist\n");
476 return -ENODEV;
477 }
478
479 hpdma = init_data->init(pdev, init_data);
480 if (IS_ERR(hpdma)) {
481 dev_err(&pdev->dev, "hpdma init failed: %ld\n", PTR_ERR(hpdma));
482 return PTR_ERR(hpdma);
483 }
484
485 memcpy(&hpdma->ops, &init_data->ops, sizeof(struct hpdma_ops));
486 hpdma->hwspinlock_grp = init_data->hwspinlock_grp;
487 hpdma->trigger_start_slot = init_data->trigger_start_slot;
488 hpdma->ch_base_slot = init_data->ch_base_slot;
489
490 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
491 if (!res)
492 return -ENXIO;
493
494 hpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
495 if (!hpdma->base)
496 return -ENOMEM;
497
498 /*
499 * since hpdma does not send signal to APMCU,
500 * we need TOPS mailbox to notify us when hpdma done
501 */
502 ret = hpdma->ops.mbox_init(pdev, hpdma);
503 if (ret)
504 return ret;
505
506 ret = mtk_hpdma_provider_init(pdev, hpdma);
507 if (ret)
508 goto unregister_mbox;
509
510 spin_lock_init(&hpdma->lock);
511
512 platform_set_drvdata(pdev, hpdma);
513
514 dev_info(hpdma->ddev.dev, "hpdma init done\n");
515
516 return ret;
517
518unregister_mbox:
519 hpdma->ops.mbox_deinit(pdev, hpdma);
520
521 return ret;
522}
523
524static int mtk_hpdma_remove(struct platform_device *pdev)
525{
526 struct hpdma_dev *hpdma = platform_get_drvdata(pdev);
527
528 if (!hpdma)
529 return 0;
530
531 hpdma->ops.vchan_deinit(hpdma);
532
533 hpdma->ops.mbox_deinit(pdev, hpdma);
534
535 dma_async_device_unregister(&hpdma->ddev);
536
537 of_dma_controller_free(pdev->dev.of_node);
538
539 return 0;
540}
541
542static struct dma_chan *mtk_clust_hpdma_of_xlate(struct of_phandle_args *dma_spec,
543 struct of_dma *ofdma)
544{
545 struct dma_device *ddev = ofdma->of_dma_data;
546 struct hpdma_dev *hpdma;
547 u32 id;
548
549 if (!ddev || dma_spec->args_count != 2)
550 return ERR_PTR(-EINVAL);
551
552 hpdma = container_of(ddev, struct hpdma_dev, ddev);
553 id = dma_spec->args[0] * CORE_OFFLOAD_NUM + dma_spec->args[1];
554
555 return dma_get_slave_channel(&hpdma->hvchans[id].vchan.chan);
556}
557
558static struct hpdma_dev *mtk_top_hpdma_init(struct platform_device *pdev,
559 const struct hpdma_init_data *data)
560{
561 struct top_hpdma_dev *top_hpdma = NULL;
562
563 if (!data)
564 return ERR_PTR(-EINVAL);
565
566 top_hpdma = devm_kzalloc(&pdev->dev, sizeof(*top_hpdma), GFP_KERNEL);
567 if (!top_hpdma)
568 return ERR_PTR(-ENOMEM);
569
570 top_hpdma->mdev.core = CORE_MGMT;
571 top_hpdma->mdev.cmd_id = MBOX_CM2AP_CMD_HPDMA;
572 top_hpdma->mdev.mbox_handler = data->mbox_handler;
573 top_hpdma->mdev.priv = &top_hpdma->hpdma;
574
575 return &top_hpdma->hpdma;
576}
577
578static void mtk_top_hpdma_vchan_deinit(struct hpdma_dev *hpdma)
579{
580 struct hpdma_vchan *hvchan;
581 u32 i;
582
583 for (i = 0; i < __TOP_HPDMA_REQ; i++) {
584 hvchan = &hpdma->hvchans[i];
585 __mtk_hpdma_vchan_deinit(&hvchan->vchan);
586 }
587}
588
589static int mtk_top_hpdma_vchan_init(struct hpdma_dev *hpdma, struct dma_device *ddev)
590{
591 struct hpdma_vchan *hvchan;
592 u32 i;
593
594 hpdma->hvchans = devm_kcalloc(ddev->dev, __TOP_HPDMA_REQ,
595 sizeof(struct hpdma_vchan),
596 GFP_KERNEL);
597 if (!hpdma->hvchans)
598 return -ENOMEM;
599
600 for (i = 0; i < __TOP_HPDMA_REQ; i++) {
601 hvchan = &hpdma->hvchans[i];
602
603 init_waitqueue_head(&hvchan->stop_wait);
604 INIT_WORK(&hvchan->tx_work, mtk_hpdma_tx_work);
605
606 hvchan->vchan.desc_free = mtk_hpdma_vdesc_free;
607 /*
608 * TODO: maybe init vchan by ourselves with
609 * customized tasklet?
610 * if we setup customized tasklet to transmit
611 * remaining chunks in a channel, we should be careful about
612 * hpdma->lock since it will be acquired in softirq context
613 */
614 vchan_init(&hvchan->vchan, ddev);
615 }
616
617 return 0;
618}
619
620static void mtk_top_hpdma_unregister_mbox(struct platform_device *pdev,
621 struct hpdma_dev *hpdma)
622{
623 struct top_hpdma_dev *top_hpdma;
624
625 top_hpdma = container_of(hpdma, struct top_hpdma_dev, hpdma);
626
627 unregister_mbox_dev(MBOX_RECV, &top_hpdma->mdev);
628}
629
630static int mtk_top_hpdma_register_mbox(struct platform_device *pdev,
631 struct hpdma_dev *hpdma)
632{
633 struct top_hpdma_dev *top_hpdma;
634 int ret = 0;
635
636 top_hpdma = container_of(hpdma, struct top_hpdma_dev, hpdma);
637
638 ret = register_mbox_dev(MBOX_RECV, &top_hpdma->mdev);
639 if (ret) {
640 dev_err(&pdev->dev, "register mailbox device failed: %d\n", ret);
641 return ret;
642 }
643
644 return ret;
645}
646
647static void mtk_top_hpdma_tx_pending_desc(struct hpdma_dev *hpdma,
648 struct hpdma_vchan *hvchan,
649 struct hpdma_vdesc *hvdesc)
650{
651 u32 slot = hpdma->ch_base_slot;
652 enum hwspinlock_group grp = hpdma->hwspinlock_grp;
653
654 hvchan->pchan_id = 0;
655
656 mtk_hpdma_prepare_transfer(hpdma);
657
658 /* occupy hpdma physical channel */
659 while (!mtk_tops_hwspin_try_lock(grp, slot)) {
660
661 if (unlikely(hvchan->terminating)) {
662 spin_unlock(&hpdma->lock);
663 return;
664 }
665
666 hvchan->pchan_id = (hvchan->pchan_id + 1) % HPDMA_CHAN_NUM;
667 if (++slot - hpdma->ch_base_slot == HPDMA_CHAN_NUM)
668 slot = hpdma->ch_base_slot;
669 }
670
671 mtk_hpdma_config_pchan(hpdma, hvchan, hvdesc);
672
673 if (!mtk_hpdma_start_transfer(hpdma, hvchan, hvdesc))
674 return;
675
676 /* start transfer failed */
677 mtk_tops_hwspin_unlock(grp, slot);
678
679 mtk_hpdma_unprepare_transfer(hpdma);
680
681 wake_up_interruptible(&hvchan->stop_wait);
682}
683
684static struct hpdma_dev *mtk_clust_hpdma_init(struct platform_device *pdev,
685 const struct hpdma_init_data *data)
686{
687 struct clust_hpdma_dev *clust_hpdma = NULL;
688 u32 i;
689
690 if (!data)
691 return ERR_PTR(-EINVAL);
692
693 clust_hpdma = devm_kzalloc(&pdev->dev, sizeof(*clust_hpdma), GFP_KERNEL);
694 if (!clust_hpdma)
695 return ERR_PTR(-ENOMEM);
696
697 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
698 clust_hpdma->mdev[i].core = CORE_OFFLOAD_0 + i;
699 clust_hpdma->mdev[i].cmd_id = MBOX_CX2AP_CMD_HPDMA;
700 clust_hpdma->mdev[i].mbox_handler = data->mbox_handler;
701 clust_hpdma->mdev[i].priv = &clust_hpdma->hpdma;
702 }
703
704 return &clust_hpdma->hpdma;
705}
706
707static void mtk_clust_hpdma_vchan_deinit(struct hpdma_dev *hpdma)
708{
709 struct hpdma_vchan *hvchan;
710 u32 i, j;
711
712 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
713 for (j = 0; j < __CLUST_HPDMA_REQ; j++) {
714 hvchan = &hpdma->hvchans[i];
715 __mtk_hpdma_vchan_deinit(&hvchan->vchan);
716 }
717 }
718}
719
720static int mtk_clust_hpdma_vchan_init(struct hpdma_dev *hpdma, struct dma_device *ddev)
721{
722 struct hpdma_vchan *hvchan;
723 u32 i, j;
724
725 hpdma->hvchans = devm_kcalloc(ddev->dev, __CLUST_HPDMA_REQ * CORE_OFFLOAD_NUM,
726 sizeof(struct hpdma_vchan),
727 GFP_KERNEL);
728 if (!hpdma->hvchans)
729 return -ENOMEM;
730
731 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
732 for (j = 0; j < __CLUST_HPDMA_REQ; j++) {
733 hvchan = &hpdma->hvchans[i * __CLUST_HPDMA_REQ + j];
734
735 hvchan->pchan_id = i;
736 init_waitqueue_head(&hvchan->stop_wait);
737 INIT_WORK(&hvchan->tx_work, mtk_hpdma_tx_work);
738
739 hvchan->vchan.desc_free = mtk_hpdma_vdesc_free;
740 /*
741 * TODO: maybe init vchan by ourselves with
742 * customized tasklet?
743 * if we setup customized tasklet to transmit
744 * remaining chunks in a channel, we should be careful about
745 * hpdma->lock since it will be acquired in softirq context
746 */
747 vchan_init(&hvchan->vchan, ddev);
748 }
749 }
750
751 return 0;
752}
753
754static void mtk_clust_hpdma_unregister_mbox(struct platform_device *pdev,
755 struct hpdma_dev *hpdma)
756{
757 struct clust_hpdma_dev *clust_hpdma;
758 u32 i;
759
760 clust_hpdma = container_of(hpdma, struct clust_hpdma_dev, hpdma);
761
762 for (i = 0; i < CORE_OFFLOAD_NUM; i++)
763 unregister_mbox_dev(MBOX_RECV, &clust_hpdma->mdev[i]);
764}
765
766static int mtk_clust_hpdma_register_mbox(struct platform_device *pdev,
767 struct hpdma_dev *hpdma)
768{
769 struct clust_hpdma_dev *clust_hpdma;
770 int ret = 0;
771 int i;
772
773 clust_hpdma = container_of(hpdma, struct clust_hpdma_dev, hpdma);
774
775 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
776 ret = register_mbox_dev(MBOX_RECV, &clust_hpdma->mdev[i]);
777 if (ret) {
778 dev_err(&pdev->dev, "register mbox%d failed: %d\n", i, ret);
779 goto unregister_mbox;
780 }
781 }
782
783 return ret;
784
785unregister_mbox:
786 for (--i; i >= 0; i--)
787 unregister_mbox_dev(MBOX_RECV, &clust_hpdma->mdev[i]);
788
789 return ret;
790}
791
792static void mtk_clust_hpdma_tx_pending_desc(struct hpdma_dev *hpdma,
793 struct hpdma_vchan *hvchan,
794 struct hpdma_vdesc *hvdesc)
795{
796 u32 slot = hpdma->ch_base_slot + hvchan->pchan_id;
797 enum hwspinlock_group grp = hpdma->hwspinlock_grp;
798
799 mtk_hpdma_prepare_transfer(hpdma);
800
801 /* occupy hpdma physical channel */
802 mtk_tops_hwspin_lock(grp, slot);
803
804 mtk_hpdma_config_pchan(hpdma, hvchan, hvdesc);
805
806 if (!mtk_hpdma_start_transfer(hpdma, hvchan, hvdesc))
807 return;
808
809 /* start transfer failed */
810 mtk_tops_hwspin_unlock(grp, slot);
811
812 mtk_hpdma_unprepare_transfer(hpdma);
813
814 wake_up_interruptible(&hvchan->stop_wait);
815}
816
817static enum mbox_msg_cnt mtk_hpdma_ap_recv_mbox_msg(struct mailbox_dev *mdev,
818 struct mailbox_msg *msg)
819{
820 struct hpdma_dev *hpdma = mdev->priv;
821 struct hpdma_vchan *hvchan;
822 struct hpdma_vdesc *hvdesc;
823 enum hwspinlock_group grp;
824 unsigned long flag;
825 u32 slot;
826
827 if (!hpdma)
828 return MBOX_NO_RET_MSG;
829
830 hvchan = hpdma->issued_chan;
831 if (!hvchan) {
832 dev_err(hpdma->ddev.dev, "unexpected hpdma mailbox recv\n");
833 return MBOX_NO_RET_MSG;
834 }
835
836 grp = hpdma->hwspinlock_grp;
837
838 hvdesc = hvchan->issued_desc;
839
840 /* clear issued channel before releasing hwspinlock */
841 hpdma->issued_chan = NULL;
842
843 hvchan->busy = false;
844 hvchan->issued_desc = NULL;
845
846 /* release hwspinlock */
847 slot = hvchan->pchan_id + hpdma->ch_base_slot;
848
849 mtk_tops_hwspin_unlock(grp, hpdma->trigger_start_slot);
850
851 mtk_tops_hwspin_unlock(grp, slot);
852
853 /* release to let other APMCU process to contend hw spinlock */
854 spin_unlock(&hpdma->lock);
855
856 if (unlikely(hvchan->terminating)) {
857 wake_up_interruptible(&hvchan->stop_wait);
858 return MBOX_NO_RET_MSG;
859 }
860
861 /*
862 * complete vdesc and schedule tx work again
863 * if there is more vdesc left in the channel
864 */
865 spin_lock_irqsave(&hvchan->vchan.lock, flag);
866
867 vchan_cookie_complete(&hvdesc->vdesc);
868
869 if (vchan_next_desc(&hvchan->vchan))
870 schedule_work(&hvchan->tx_work);
871
872 spin_unlock_irqrestore(&hvchan->vchan.lock, flag);
873
874 return MBOX_NO_RET_MSG;
875}
876
877struct hpdma_init_data top_hpdma_init_data = {
878 .ops = {
879 .vchan_init = mtk_top_hpdma_vchan_init,
880 .vchan_deinit = mtk_top_hpdma_vchan_deinit,
881 .mbox_init = mtk_top_hpdma_register_mbox,
882 .mbox_deinit = mtk_top_hpdma_unregister_mbox,
883 .tx_pending_desc = mtk_top_hpdma_tx_pending_desc,
884 .of_dma_xlate = of_dma_xlate_by_chan_id,
885 },
886 .init = mtk_top_hpdma_init,
887 .mbox_handler = mtk_hpdma_ap_recv_mbox_msg,
888 .hwspinlock_grp = HWSPINLOCK_GROUP_TOP,
889 .trigger_start_slot = HWSPINLOCK_TOP_SLOT_HPDMA_LOCK,
890 .ch_base_slot = HWSPINLOCK_TOP_SLOT_HPDMA_PCH0,
891};
892
893static struct hpdma_init_data clust_hpdma_init_data = {
894 .ops = {
895 .vchan_init = mtk_clust_hpdma_vchan_init,
896 .vchan_deinit = mtk_clust_hpdma_vchan_deinit,
897 .mbox_init = mtk_clust_hpdma_register_mbox,
898 .mbox_deinit = mtk_clust_hpdma_unregister_mbox,
899 .tx_pending_desc = mtk_clust_hpdma_tx_pending_desc,
900 .of_dma_xlate = mtk_clust_hpdma_of_xlate,
901 },
902 .init = mtk_clust_hpdma_init,
903 .mbox_handler = mtk_hpdma_ap_recv_mbox_msg,
904 .hwspinlock_grp = HWSPINLOCK_GROUP_CLUST,
905 .trigger_start_slot = HWSPINLOCK_CLUST_SLOT_HPDMA_LOCK,
906 .ch_base_slot = HWSPINLOCK_CLUST_SLOT_HPDMA_PCH0,
907};
908
909static struct of_device_id mtk_hpdma_match[] = {
910 { .compatible = "mediatek,hpdma-top", .data = &top_hpdma_init_data, },
911 { .compatible = "mediatek,hpdma-sub", .data = &clust_hpdma_init_data, },
912 { },
913};
914
915static struct platform_driver mtk_hpdma_driver = {
916 .probe = mtk_hpdma_probe,
917 .remove = mtk_hpdma_remove,
918 .driver = {
919 .name = "mediatek,hpdma",
920 .owner = THIS_MODULE,
921 .of_match_table = mtk_hpdma_match,
922 },
923};
924
925int __init mtk_tops_hpdma_init(void)
926{
927 int ret = 0;
928
929 ret = platform_driver_register(&mtk_hpdma_driver);
930 if (ret)
931 return ret;
932
933 return ret;
934}
935
936void __exit mtk_tops_hpdma_exit(void)
937{
938 platform_driver_unregister(&mtk_hpdma_driver);
939}