blob: f90ca53e906d2824609d4888f99bbd45beeb0f71 [file] [log] [blame]
Vignesh R3a9dbf32019-02-05 17:31:24 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#define pr_fmt(fmt) "udma: " fmt
7
8#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053010#include <asm/io.h>
11#include <asm/bitops.h>
12#include <malloc.h>
13#include <asm/dma-mapping.h>
14#include <dm.h>
15#include <dm/read.h>
16#include <dm/of_access.h>
17#include <dma.h>
18#include <dma-uclass.h>
19#include <linux/delay.h>
20#include <dt-bindings/dma/k3-udma.h>
21#include <linux/soc/ti/k3-navss-ringacc.h>
22#include <linux/soc/ti/cppi5.h>
23#include <linux/soc/ti/ti-udma.h>
24#include <linux/soc/ti/ti_sci_protocol.h>
25
26#include "k3-udma-hwdef.h"
27
28#if BITS_PER_LONG == 64
29#define RINGACC_RING_USE_PROXY (0)
30#else
31#define RINGACC_RING_USE_PROXY (1)
32#endif
33
34struct udma_chan;
35
36enum udma_mmr {
37 MMR_GCFG = 0,
38 MMR_RCHANRT,
39 MMR_TCHANRT,
40 MMR_LAST,
41};
42
43static const char * const mmr_names[] = {
44 "gcfg", "rchanrt", "tchanrt"
45};
46
47struct udma_tchan {
48 void __iomem *reg_rt;
49
50 int id;
51 struct k3_nav_ring *t_ring; /* Transmit ring */
52 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
53};
54
55struct udma_rchan {
56 void __iomem *reg_rt;
57
58 int id;
59 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
60 struct k3_nav_ring *r_ring; /* Receive ring*/
61};
62
63struct udma_rflow {
64 int id;
65};
66
67struct udma_dev {
68 struct device *dev;
69 void __iomem *mmrs[MMR_LAST];
70
71 struct k3_nav_ringacc *ringacc;
72
73 u32 features;
74
75 int tchan_cnt;
76 int echan_cnt;
77 int rchan_cnt;
78 int rflow_cnt;
79 unsigned long *tchan_map;
80 unsigned long *rchan_map;
81 unsigned long *rflow_map;
82
83 struct udma_tchan *tchans;
84 struct udma_rchan *rchans;
85 struct udma_rflow *rflows;
86
87 struct udma_chan *channels;
88 u32 psil_base;
89
90 u32 ch_count;
91 const struct ti_sci_handle *tisci;
92 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
93 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
94 u32 tisci_dev_id;
95 u32 tisci_navss_dev_id;
96 bool is_coherent;
97};
98
99struct udma_chan {
100 struct udma_dev *ud;
101 char name[20];
102
103 struct udma_tchan *tchan;
104 struct udma_rchan *rchan;
105 struct udma_rflow *rflow;
106
Vignesh Raghavendra39349892019-12-04 22:17:21 +0530107 struct ti_udma_drv_chan_cfg_data cfg_data;
108
Vignesh R3a9dbf32019-02-05 17:31:24 +0530109 u32 bcnt; /* number of bytes completed since the start of the channel */
110
111 bool pkt_mode; /* TR or packet */
112 bool needs_epib; /* EPIB is needed for the communication or not */
113 u32 psd_size; /* size of Protocol Specific Data */
114 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
115 int slave_thread_id;
116 u32 src_thread;
117 u32 dst_thread;
118 u32 static_tr_type;
119
120 u32 id;
121 enum dma_direction dir;
122
123 struct cppi5_host_desc_t *desc_tx;
124 u32 hdesc_size;
125 bool in_use;
126 void *desc_rx;
127 u32 num_rx_bufs;
128 u32 desc_rx_cur;
129
130};
131
132#define UDMA_CH_1000(ch) (ch * 0x1000)
133#define UDMA_CH_100(ch) (ch * 0x100)
134#define UDMA_CH_40(ch) (ch * 0x40)
135
136#ifdef PKTBUFSRX
137#define UDMA_RX_DESC_NUM PKTBUFSRX
138#else
139#define UDMA_RX_DESC_NUM 4
140#endif
141
142/* Generic register access functions */
143static inline u32 udma_read(void __iomem *base, int reg)
144{
145 u32 v;
146
147 v = __raw_readl(base + reg);
148 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
149 return v;
150}
151
152static inline void udma_write(void __iomem *base, int reg, u32 val)
153{
154 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
155 __raw_writel(val, base + reg);
156}
157
158static inline void udma_update_bits(void __iomem *base, int reg,
159 u32 mask, u32 val)
160{
161 u32 tmp, orig;
162
163 orig = udma_read(base, reg);
164 tmp = orig & ~mask;
165 tmp |= (val & mask);
166
167 if (tmp != orig)
168 udma_write(base, reg, tmp);
169}
170
171/* TCHANRT */
172static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
173{
174 if (!tchan)
175 return 0;
176 return udma_read(tchan->reg_rt, reg);
177}
178
179static inline void udma_tchanrt_write(struct udma_tchan *tchan,
180 int reg, u32 val)
181{
182 if (!tchan)
183 return;
184 udma_write(tchan->reg_rt, reg, val);
185}
186
187/* RCHANRT */
188static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
189{
190 if (!rchan)
191 return 0;
192 return udma_read(rchan->reg_rt, reg);
193}
194
195static inline void udma_rchanrt_write(struct udma_rchan *rchan,
196 int reg, u32 val)
197{
198 if (!rchan)
199 return;
200 udma_write(rchan->reg_rt, reg, val);
201}
202
203static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
204 u32 dst_thread)
205{
206 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
207 return ud->tisci_psil_ops->pair(ud->tisci,
208 ud->tisci_navss_dev_id,
209 src_thread, dst_thread);
210}
211
212static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
213 u32 dst_thread)
214{
215 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
216 return ud->tisci_psil_ops->unpair(ud->tisci,
217 ud->tisci_navss_dev_id,
218 src_thread, dst_thread);
219}
220
221static inline char *udma_get_dir_text(enum dma_direction dir)
222{
223 switch (dir) {
224 case DMA_DEV_TO_MEM:
225 return "DEV_TO_MEM";
226 case DMA_MEM_TO_DEV:
227 return "MEM_TO_DEV";
228 case DMA_MEM_TO_MEM:
229 return "MEM_TO_MEM";
230 case DMA_DEV_TO_DEV:
231 return "DEV_TO_DEV";
232 default:
233 break;
234 }
235
236 return "invalid";
237}
238
239static inline bool udma_is_chan_running(struct udma_chan *uc)
240{
241 u32 trt_ctl = 0;
242 u32 rrt_ctl = 0;
243
244 switch (uc->dir) {
245 case DMA_DEV_TO_MEM:
246 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
247 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
248 __func__, rrt_ctl,
249 udma_rchanrt_read(uc->rchan,
250 UDMA_RCHAN_RT_PEER_RT_EN_REG));
251 break;
252 case DMA_MEM_TO_DEV:
253 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
254 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
255 __func__, trt_ctl,
256 udma_tchanrt_read(uc->tchan,
257 UDMA_TCHAN_RT_PEER_RT_EN_REG));
258 break;
259 case DMA_MEM_TO_MEM:
260 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
261 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
262 break;
263 default:
264 break;
265 }
266
267 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
268 return true;
269
270 return false;
271}
272
273static int udma_is_coherent(struct udma_chan *uc)
274{
275 return uc->ud->is_coherent;
276}
277
278static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
279{
280 struct k3_nav_ring *ring = NULL;
281 int ret = -ENOENT;
282
283 switch (uc->dir) {
284 case DMA_DEV_TO_MEM:
285 ring = uc->rchan->r_ring;
286 break;
287 case DMA_MEM_TO_DEV:
288 ring = uc->tchan->tc_ring;
289 break;
290 case DMA_MEM_TO_MEM:
291 ring = uc->tchan->tc_ring;
292 break;
293 default:
294 break;
295 }
296
297 if (ring && k3_nav_ringacc_ring_get_occ(ring))
298 ret = k3_nav_ringacc_ring_pop(ring, addr);
299
300 return ret;
301}
302
303static void udma_reset_rings(struct udma_chan *uc)
304{
305 struct k3_nav_ring *ring1 = NULL;
306 struct k3_nav_ring *ring2 = NULL;
307
308 switch (uc->dir) {
309 case DMA_DEV_TO_MEM:
310 ring1 = uc->rchan->fd_ring;
311 ring2 = uc->rchan->r_ring;
312 break;
313 case DMA_MEM_TO_DEV:
314 ring1 = uc->tchan->t_ring;
315 ring2 = uc->tchan->tc_ring;
316 break;
317 case DMA_MEM_TO_MEM:
318 ring1 = uc->tchan->t_ring;
319 ring2 = uc->tchan->tc_ring;
320 break;
321 default:
322 break;
323 }
324
325 if (ring1)
326 k3_nav_ringacc_ring_reset_dma(ring1, 0);
327 if (ring2)
328 k3_nav_ringacc_ring_reset(ring2);
329}
330
331static void udma_reset_counters(struct udma_chan *uc)
332{
333 u32 val;
334
335 if (uc->tchan) {
336 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
337 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
338
339 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
340 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
341
342 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
343 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
344
345 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
346 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
347 }
348
349 if (uc->rchan) {
350 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
351 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
352
353 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
354 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
355
356 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
357 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
358
359 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
360 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
361 }
362
363 uc->bcnt = 0;
364}
365
366static inline int udma_stop_hard(struct udma_chan *uc)
367{
368 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
369
370 switch (uc->dir) {
371 case DMA_DEV_TO_MEM:
372 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
373 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
374 break;
375 case DMA_MEM_TO_DEV:
376 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
377 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
378 break;
379 case DMA_MEM_TO_MEM:
380 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
381 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
382 break;
383 default:
384 return -EINVAL;
385 }
386
387 return 0;
388}
389
390static int udma_start(struct udma_chan *uc)
391{
392 /* Channel is already running, no need to proceed further */
393 if (udma_is_chan_running(uc))
394 goto out;
395
396 pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n",
397 __func__, uc->id, udma_get_dir_text(uc->dir),
398 uc->static_tr_type);
399
400 /* Make sure that we clear the teardown bit, if it is set */
401 udma_stop_hard(uc);
402
403 /* Reset all counters */
404 udma_reset_counters(uc);
405
406 switch (uc->dir) {
407 case DMA_DEV_TO_MEM:
408 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
409 UDMA_CHAN_RT_CTL_EN);
410
411 /* Enable remote */
412 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
413 UDMA_PEER_RT_EN_ENABLE);
414
415 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
416 __func__,
417 udma_rchanrt_read(uc->rchan,
418 UDMA_RCHAN_RT_CTL_REG),
419 udma_rchanrt_read(uc->rchan,
420 UDMA_RCHAN_RT_PEER_RT_EN_REG));
421 break;
422 case DMA_MEM_TO_DEV:
423 /* Enable remote */
424 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
425 UDMA_PEER_RT_EN_ENABLE);
426
427 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
428 UDMA_CHAN_RT_CTL_EN);
429
430 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
431 __func__,
432 udma_rchanrt_read(uc->rchan,
433 UDMA_TCHAN_RT_CTL_REG),
434 udma_rchanrt_read(uc->rchan,
435 UDMA_TCHAN_RT_PEER_RT_EN_REG));
436 break;
437 case DMA_MEM_TO_MEM:
438 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
439 UDMA_CHAN_RT_CTL_EN);
440 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
441 UDMA_CHAN_RT_CTL_EN);
442
443 break;
444 default:
445 return -EINVAL;
446 }
447
448 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
449out:
450 return 0;
451}
452
453static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
454{
455 int i = 0;
456 u32 val;
457
458 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
459 UDMA_CHAN_RT_CTL_EN |
460 UDMA_CHAN_RT_CTL_TDOWN);
461
462 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
463
464 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
465 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
466 udelay(1);
467 if (i > 1000) {
468 printf(" %s TIMEOUT !\n", __func__);
469 break;
470 }
471 i++;
472 }
473
474 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
475 if (val & UDMA_PEER_RT_EN_ENABLE)
476 printf("%s: peer not stopped TIMEOUT !\n", __func__);
477}
478
479static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
480{
481 int i = 0;
482 u32 val;
483
484 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
485 UDMA_PEER_RT_EN_ENABLE |
486 UDMA_PEER_RT_EN_TEARDOWN);
487
488 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
489
490 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
491 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
492 udelay(1);
493 if (i > 1000) {
494 printf("%s TIMEOUT !\n", __func__);
495 break;
496 }
497 i++;
498 }
499
500 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
501 if (val & UDMA_PEER_RT_EN_ENABLE)
502 printf("%s: peer not stopped TIMEOUT !\n", __func__);
503}
504
505static inline int udma_stop(struct udma_chan *uc)
506{
507 pr_debug("%s: chan:%d dir:%s\n",
508 __func__, uc->id, udma_get_dir_text(uc->dir));
509
510 udma_reset_counters(uc);
511 switch (uc->dir) {
512 case DMA_DEV_TO_MEM:
513 udma_stop_dev2mem(uc, true);
514 break;
515 case DMA_MEM_TO_DEV:
516 udma_stop_mem2dev(uc, true);
517 break;
518 case DMA_MEM_TO_MEM:
519 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
520 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
521 break;
522 default:
523 return -EINVAL;
524 }
525
526 return 0;
527}
528
529static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
530{
531 int i = 1;
532
533 while (udma_pop_from_ring(uc, paddr)) {
534 udelay(1);
535 if (!(i % 1000000))
536 printf(".");
537 i++;
538 }
539}
540
541#define UDMA_RESERVE_RESOURCE(res) \
542static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
543 int id) \
544{ \
545 if (id >= 0) { \
546 if (test_bit(id, ud->res##_map)) { \
547 dev_err(ud->dev, "res##%d is in use\n", id); \
548 return ERR_PTR(-ENOENT); \
549 } \
550 } else { \
551 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
552 if (id == ud->res##_cnt) { \
553 return ERR_PTR(-ENOENT); \
554 } \
555 } \
556 \
557 __set_bit(id, ud->res##_map); \
558 return &ud->res##s[id]; \
559}
560
561UDMA_RESERVE_RESOURCE(tchan);
562UDMA_RESERVE_RESOURCE(rchan);
563UDMA_RESERVE_RESOURCE(rflow);
564
565static int udma_get_tchan(struct udma_chan *uc)
566{
567 struct udma_dev *ud = uc->ud;
568
569 if (uc->tchan) {
570 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
571 uc->id, uc->tchan->id);
572 return 0;
573 }
574
575 uc->tchan = __udma_reserve_tchan(ud, -1);
576 if (IS_ERR(uc->tchan))
577 return PTR_ERR(uc->tchan);
578
579 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
580
Vignesh R3a9dbf32019-02-05 17:31:24 +0530581 return 0;
582}
583
584static int udma_get_rchan(struct udma_chan *uc)
585{
586 struct udma_dev *ud = uc->ud;
587
588 if (uc->rchan) {
589 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
590 uc->id, uc->rchan->id);
591 return 0;
592 }
593
594 uc->rchan = __udma_reserve_rchan(ud, -1);
595 if (IS_ERR(uc->rchan))
596 return PTR_ERR(uc->rchan);
597
598 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
599
Vignesh R3a9dbf32019-02-05 17:31:24 +0530600 return 0;
601}
602
603static int udma_get_chan_pair(struct udma_chan *uc)
604{
605 struct udma_dev *ud = uc->ud;
606 int chan_id, end;
607
608 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
609 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
610 uc->id, uc->tchan->id);
611 return 0;
612 }
613
614 if (uc->tchan) {
615 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
616 uc->id, uc->tchan->id);
617 return -EBUSY;
618 } else if (uc->rchan) {
619 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
620 uc->id, uc->rchan->id);
621 return -EBUSY;
622 }
623
624 /* Can be optimized, but let's have it like this for now */
625 end = min(ud->tchan_cnt, ud->rchan_cnt);
626 for (chan_id = 0; chan_id < end; chan_id++) {
627 if (!test_bit(chan_id, ud->tchan_map) &&
628 !test_bit(chan_id, ud->rchan_map))
629 break;
630 }
631
632 if (chan_id == end)
633 return -ENOENT;
634
635 __set_bit(chan_id, ud->tchan_map);
636 __set_bit(chan_id, ud->rchan_map);
637 uc->tchan = &ud->tchans[chan_id];
638 uc->rchan = &ud->rchans[chan_id];
639
640 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
641
Vignesh R3a9dbf32019-02-05 17:31:24 +0530642 return 0;
643}
644
645static int udma_get_rflow(struct udma_chan *uc, int flow_id)
646{
647 struct udma_dev *ud = uc->ud;
648
649 if (uc->rflow) {
650 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
651 uc->id, uc->rflow->id);
652 return 0;
653 }
654
655 if (!uc->rchan)
656 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
657
658 uc->rflow = __udma_reserve_rflow(ud, flow_id);
659 if (IS_ERR(uc->rflow))
660 return PTR_ERR(uc->rflow);
661
662 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
663 return 0;
664}
665
666static void udma_put_rchan(struct udma_chan *uc)
667{
668 struct udma_dev *ud = uc->ud;
669
670 if (uc->rchan) {
671 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
672 uc->rchan->id);
673 __clear_bit(uc->rchan->id, ud->rchan_map);
674 uc->rchan = NULL;
675 }
676}
677
678static void udma_put_tchan(struct udma_chan *uc)
679{
680 struct udma_dev *ud = uc->ud;
681
682 if (uc->tchan) {
683 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
684 uc->tchan->id);
685 __clear_bit(uc->tchan->id, ud->tchan_map);
686 uc->tchan = NULL;
687 }
688}
689
690static void udma_put_rflow(struct udma_chan *uc)
691{
692 struct udma_dev *ud = uc->ud;
693
694 if (uc->rflow) {
695 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
696 uc->rflow->id);
697 __clear_bit(uc->rflow->id, ud->rflow_map);
698 uc->rflow = NULL;
699 }
700}
701
702static void udma_free_tx_resources(struct udma_chan *uc)
703{
704 if (!uc->tchan)
705 return;
706
707 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
708 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
709 uc->tchan->t_ring = NULL;
710 uc->tchan->tc_ring = NULL;
711
712 udma_put_tchan(uc);
713}
714
715static int udma_alloc_tx_resources(struct udma_chan *uc)
716{
717 struct k3_nav_ring_cfg ring_cfg;
718 struct udma_dev *ud = uc->ud;
719 int ret;
720
721 ret = udma_get_tchan(uc);
722 if (ret)
723 return ret;
724
725 uc->tchan->t_ring = k3_nav_ringacc_request_ring(
726 ud->ringacc, uc->tchan->id,
727 RINGACC_RING_USE_PROXY);
728 if (!uc->tchan->t_ring) {
729 ret = -EBUSY;
730 goto err_tx_ring;
731 }
732
733 uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
734 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
735 if (!uc->tchan->tc_ring) {
736 ret = -EBUSY;
737 goto err_txc_ring;
738 }
739
740 memset(&ring_cfg, 0, sizeof(ring_cfg));
741 ring_cfg.size = 16;
742 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
743 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_MESSAGE;
744
745 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
746 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
747
748 if (ret)
749 goto err_ringcfg;
750
751 return 0;
752
753err_ringcfg:
754 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
755 uc->tchan->tc_ring = NULL;
756err_txc_ring:
757 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
758 uc->tchan->t_ring = NULL;
759err_tx_ring:
760 udma_put_tchan(uc);
761
762 return ret;
763}
764
765static void udma_free_rx_resources(struct udma_chan *uc)
766{
767 if (!uc->rchan)
768 return;
769
770 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
771 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
772 uc->rchan->fd_ring = NULL;
773 uc->rchan->r_ring = NULL;
774
775 udma_put_rflow(uc);
776 udma_put_rchan(uc);
777}
778
779static int udma_alloc_rx_resources(struct udma_chan *uc)
780{
781 struct k3_nav_ring_cfg ring_cfg;
782 struct udma_dev *ud = uc->ud;
783 int fd_ring_id;
784 int ret;
785
786 ret = udma_get_rchan(uc);
787 if (ret)
788 return ret;
789
790 /* For MEM_TO_MEM we don't need rflow or rings */
791 if (uc->dir == DMA_MEM_TO_MEM)
792 return 0;
793
794 ret = udma_get_rflow(uc, uc->rchan->id);
795 if (ret) {
796 ret = -EBUSY;
797 goto err_rflow;
798 }
799
800 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
801
802 uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
803 ud->ringacc, fd_ring_id,
804 RINGACC_RING_USE_PROXY);
805 if (!uc->rchan->fd_ring) {
806 ret = -EBUSY;
807 goto err_rx_ring;
808 }
809
810 uc->rchan->r_ring = k3_nav_ringacc_request_ring(
811 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
812 if (!uc->rchan->r_ring) {
813 ret = -EBUSY;
814 goto err_rxc_ring;
815 }
816
817 memset(&ring_cfg, 0, sizeof(ring_cfg));
818 ring_cfg.size = 16;
819 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
820 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_MESSAGE;
821
822 ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
823 ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
824
825 if (ret)
826 goto err_ringcfg;
827
828 return 0;
829
830err_ringcfg:
831 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
832 uc->rchan->r_ring = NULL;
833err_rxc_ring:
834 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
835 uc->rchan->fd_ring = NULL;
836err_rx_ring:
837 udma_put_rflow(uc);
838err_rflow:
839 udma_put_rchan(uc);
840
841 return ret;
842}
843
844static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
845{
846 struct udma_dev *ud = uc->ud;
847 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
848 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
849 u32 mode;
850 int ret;
851
852 if (uc->pkt_mode)
853 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
854 else
855 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
856
857 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
858 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
859 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
860 req.nav_id = ud->tisci_dev_id;
861 req.index = uc->tchan->id;
862 req.tx_chan_type = mode;
863 if (uc->dir == DMA_MEM_TO_MEM)
864 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
865 else
866 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
867 uc->psd_size,
868 0) >> 2;
869 req.txcq_qnum = tc_ring;
870
871 ret = ud->tisci_udmap_ops->tx_ch_cfg(ud->tisci, &req);
872 if (ret)
873 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
874
875 return ret;
876}
877
878static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
879{
880 struct udma_dev *ud = uc->ud;
881 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
882 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
883 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
884 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
885 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
886 u32 mode;
887 int ret;
888
889 if (uc->pkt_mode)
890 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
891 else
892 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
893
894 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
895 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
896 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
897 req.nav_id = ud->tisci_dev_id;
898 req.index = uc->rchan->id;
899 req.rx_chan_type = mode;
900 if (uc->dir == DMA_MEM_TO_MEM) {
901 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
902 req.rxcq_qnum = tc_ring;
903 } else {
904 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
905 uc->psd_size,
906 0) >> 2;
907 req.rxcq_qnum = rx_ring;
908 }
909 if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) {
910 req.flowid_start = uc->rflow->id;
911 req.flowid_cnt = 1;
912 req.valid_params |=
913 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
914 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
915 }
916
917 ret = ud->tisci_udmap_ops->rx_ch_cfg(ud->tisci, &req);
918 if (ret) {
919 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
920 uc->rchan->id, ret);
921 return ret;
922 }
923 if (uc->dir == DMA_MEM_TO_MEM)
924 return ret;
925
926 flow_req.valid_params =
927 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
928 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
929 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
930 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
931 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
932 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
933 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
934 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
935 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
936 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
937 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
938 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
939 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
940 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
941
942 flow_req.nav_id = ud->tisci_dev_id;
943 flow_req.flow_index = uc->rflow->id;
944
945 if (uc->needs_epib)
946 flow_req.rx_einfo_present = 1;
947 else
948 flow_req.rx_einfo_present = 0;
949
950 if (uc->psd_size)
951 flow_req.rx_psinfo_present = 1;
952 else
953 flow_req.rx_psinfo_present = 0;
954
955 flow_req.rx_error_handling = 0;
956 flow_req.rx_desc_type = 0;
957 flow_req.rx_dest_qnum = rx_ring;
958 flow_req.rx_src_tag_hi_sel = 2;
959 flow_req.rx_src_tag_lo_sel = 4;
960 flow_req.rx_dest_tag_hi_sel = 5;
961 flow_req.rx_dest_tag_lo_sel = 4;
962 flow_req.rx_fdq0_sz0_qnum = fd_ring;
963 flow_req.rx_fdq1_qnum = fd_ring;
964 flow_req.rx_fdq2_qnum = fd_ring;
965 flow_req.rx_fdq3_qnum = fd_ring;
966 flow_req.rx_ps_location = 0;
967
968 ret = ud->tisci_udmap_ops->rx_flow_cfg(ud->tisci, &flow_req);
969 if (ret)
970 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
971 uc->rchan->id, uc->rflow->id, ret);
972
973 return ret;
974}
975
976static int udma_alloc_chan_resources(struct udma_chan *uc)
977{
978 struct udma_dev *ud = uc->ud;
979 int ret;
980
981 pr_debug("%s: chan:%d as %s\n",
982 __func__, uc->id, udma_get_dir_text(uc->dir));
983
984 switch (uc->dir) {
985 case DMA_MEM_TO_MEM:
986 /* Non synchronized - mem to mem type of transfer */
987 ret = udma_get_chan_pair(uc);
988 if (ret)
989 return ret;
990
991 ret = udma_alloc_tx_resources(uc);
992 if (ret)
993 goto err_free_res;
994
995 ret = udma_alloc_rx_resources(uc);
996 if (ret)
997 goto err_free_res;
998
999 uc->src_thread = ud->psil_base + uc->tchan->id;
1000 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1001 break;
1002 case DMA_MEM_TO_DEV:
1003 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1004 ret = udma_alloc_tx_resources(uc);
1005 if (ret)
1006 goto err_free_res;
1007
1008 uc->src_thread = ud->psil_base + uc->tchan->id;
1009 uc->dst_thread = uc->slave_thread_id;
1010 if (!(uc->dst_thread & 0x8000))
1011 uc->dst_thread |= 0x8000;
1012
1013 break;
1014 case DMA_DEV_TO_MEM:
1015 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1016 ret = udma_alloc_rx_resources(uc);
1017 if (ret)
1018 goto err_free_res;
1019
1020 uc->src_thread = uc->slave_thread_id;
1021 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1022
1023 break;
1024 default:
1025 /* Can not happen */
1026 pr_debug("%s: chan:%d invalid direction (%u)\n",
1027 __func__, uc->id, uc->dir);
1028 return -EINVAL;
1029 }
1030
1031 /* We have channel indexes and rings */
1032 if (uc->dir == DMA_MEM_TO_MEM) {
1033 ret = udma_alloc_tchan_sci_req(uc);
1034 if (ret)
1035 goto err_free_res;
1036
1037 ret = udma_alloc_rchan_sci_req(uc);
1038 if (ret)
1039 goto err_free_res;
1040 } else {
1041 /* Slave transfer */
1042 if (uc->dir == DMA_MEM_TO_DEV) {
1043 ret = udma_alloc_tchan_sci_req(uc);
1044 if (ret)
1045 goto err_free_res;
1046 } else {
1047 ret = udma_alloc_rchan_sci_req(uc);
1048 if (ret)
1049 goto err_free_res;
1050 }
1051 }
1052
Peter Ujfalusid15f8652019-04-25 12:08:15 +05301053 if (udma_is_chan_running(uc)) {
1054 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1055 udma_stop(uc);
1056 if (udma_is_chan_running(uc)) {
1057 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1058 goto err_free_res;
1059 }
1060 }
1061
Vignesh R3a9dbf32019-02-05 17:31:24 +05301062 /* PSI-L pairing */
1063 ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread);
1064 if (ret) {
1065 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1066 goto err_free_res;
1067 }
1068
1069 return 0;
1070
1071err_free_res:
1072 udma_free_tx_resources(uc);
1073 udma_free_rx_resources(uc);
1074 uc->slave_thread_id = -1;
1075 return ret;
1076}
1077
1078static void udma_free_chan_resources(struct udma_chan *uc)
1079{
1080 /* Some configuration to UDMA-P channel: disable, reset, whatever */
1081
1082 /* Release PSI-L pairing */
1083 udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread);
1084
1085 /* Reset the rings for a new start */
1086 udma_reset_rings(uc);
1087 udma_free_tx_resources(uc);
1088 udma_free_rx_resources(uc);
1089
1090 uc->slave_thread_id = -1;
1091 uc->dir = DMA_MEM_TO_MEM;
1092}
1093
1094static int udma_get_mmrs(struct udevice *dev)
1095{
1096 struct udma_dev *ud = dev_get_priv(dev);
1097 int i;
1098
1099 for (i = 0; i < MMR_LAST; i++) {
1100 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1101 mmr_names[i]);
1102 if (!ud->mmrs[i])
1103 return -EINVAL;
1104 }
1105
1106 return 0;
1107}
1108
1109#define UDMA_MAX_CHANNELS 192
1110
1111static int udma_probe(struct udevice *dev)
1112{
1113 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1114 struct udma_dev *ud = dev_get_priv(dev);
1115 int i, ret;
1116 u32 cap2, cap3;
1117 struct udevice *tmp;
1118 struct udevice *tisci_dev = NULL;
1119
1120 ret = udma_get_mmrs(dev);
1121 if (ret)
1122 return ret;
1123
1124 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1125 "ti,ringacc", &tmp);
1126 ud->ringacc = dev_get_priv(tmp);
1127 if (IS_ERR(ud->ringacc))
1128 return PTR_ERR(ud->ringacc);
1129
1130 ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0);
1131 if (!ud->psil_base) {
1132 dev_info(dev,
1133 "Missing ti,psil-base property, using %d.\n", ret);
1134 return -EINVAL;
1135 }
1136
1137 ret = uclass_get_device_by_name(UCLASS_FIRMWARE, "dmsc", &tisci_dev);
1138 if (ret) {
1139 debug("TISCI RA RM get failed (%d)\n", ret);
1140 ud->tisci = NULL;
1141 return 0;
1142 }
1143 ud->tisci = (struct ti_sci_handle *)
1144 (ti_sci_get_handle_from_sysfw(tisci_dev));
1145
1146 ret = dev_read_u32_default(dev, "ti,sci", 0);
1147 if (!ret) {
1148 dev_err(dev, "TISCI RA RM disabled\n");
1149 ud->tisci = NULL;
1150 }
1151
1152 if (ud->tisci) {
1153 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1154
1155 ud->tisci_dev_id = -1;
1156 ret = dev_read_u32(dev, "ti,sci-dev-id", &ud->tisci_dev_id);
1157 if (ret) {
1158 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1159 return ret;
1160 }
1161
1162 ud->tisci_navss_dev_id = -1;
1163 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1164 &ud->tisci_navss_dev_id);
1165 if (ret) {
1166 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1167 return ret;
1168 }
1169
1170 ud->tisci_udmap_ops = &ud->tisci->ops.rm_udmap_ops;
1171 ud->tisci_psil_ops = &ud->tisci->ops.rm_psil_ops;
1172 }
1173
1174 ud->is_coherent = dev_read_bool(dev, "dma-coherent");
1175
1176 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1177 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1178
1179 ud->rflow_cnt = cap3 & 0x3fff;
1180 ud->tchan_cnt = cap2 & 0x1ff;
1181 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1182 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1183 ud->ch_count = ud->tchan_cnt + ud->rchan_cnt;
1184
1185 dev_info(dev,
1186 "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1187 ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1188 ud->tisci_dev_id);
1189 dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1190
1191 ud->channels = devm_kcalloc(dev, ud->ch_count, sizeof(*ud->channels),
1192 GFP_KERNEL);
1193 ud->tchan_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->tchan_cnt),
1194 sizeof(unsigned long), GFP_KERNEL);
1195 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt,
1196 sizeof(*ud->tchans), GFP_KERNEL);
1197 ud->rchan_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
1198 sizeof(unsigned long), GFP_KERNEL);
1199 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt,
1200 sizeof(*ud->rchans), GFP_KERNEL);
1201 ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1202 sizeof(unsigned long), GFP_KERNEL);
1203 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt,
1204 sizeof(*ud->rflows), GFP_KERNEL);
1205
1206 if (!ud->channels || !ud->tchan_map || !ud->rchan_map ||
1207 !ud->rflow_map || !ud->tchans || !ud->rchans || !ud->rflows)
1208 return -ENOMEM;
1209
1210 for (i = 0; i < ud->tchan_cnt; i++) {
1211 struct udma_tchan *tchan = &ud->tchans[i];
1212
1213 tchan->id = i;
1214 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1215 }
1216
1217 for (i = 0; i < ud->rchan_cnt; i++) {
1218 struct udma_rchan *rchan = &ud->rchans[i];
1219
1220 rchan->id = i;
1221 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1222 }
1223
1224 for (i = 0; i < ud->rflow_cnt; i++) {
1225 struct udma_rflow *rflow = &ud->rflows[i];
1226
1227 rflow->id = i;
1228 }
1229
1230 for (i = 0; i < ud->ch_count; i++) {
1231 struct udma_chan *uc = &ud->channels[i];
1232
1233 uc->ud = ud;
1234 uc->id = i;
1235 uc->slave_thread_id = -1;
1236 uc->tchan = NULL;
1237 uc->rchan = NULL;
1238 uc->dir = DMA_MEM_TO_MEM;
1239 sprintf(uc->name, "UDMA chan%d\n", i);
1240 if (!i)
1241 uc->in_use = true;
1242 }
1243
1244 pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1245 udma_read(ud->mmrs[MMR_GCFG], 0),
1246 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1247 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1248 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1249 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1250
1251 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1252
1253 return ret;
1254}
1255
1256static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1257 dma_addr_t src, size_t len)
1258{
1259 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1260 struct cppi5_tr_type15_t *tr_req;
1261 int num_tr;
1262 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1263 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1264 unsigned long dummy;
1265 void *tr_desc;
1266 size_t desc_size;
1267
1268 if (len < SZ_64K) {
1269 num_tr = 1;
1270 tr0_cnt0 = len;
1271 tr0_cnt1 = 1;
1272 } else {
1273 unsigned long align_to = __ffs(src | dest);
1274
1275 if (align_to > 3)
1276 align_to = 3;
1277 /*
1278 * Keep simple: tr0: SZ_64K-alignment blocks,
1279 * tr1: the remaining
1280 */
1281 num_tr = 2;
1282 tr0_cnt0 = (SZ_64K - BIT(align_to));
1283 if (len / tr0_cnt0 >= SZ_64K) {
1284 dev_err(uc->ud->dev, "size %zu is not supported\n",
1285 len);
1286 return NULL;
1287 }
1288
1289 tr0_cnt1 = len / tr0_cnt0;
1290 tr1_cnt0 = len % tr0_cnt0;
1291 }
1292
1293 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1294 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1295 if (!tr_desc)
1296 return NULL;
1297 memset(tr_desc, 0, desc_size);
1298
1299 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1300 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1301 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1302
1303 tr_req = tr_desc + tr_size;
1304
1305 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1306 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1307 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1308
1309 tr_req[0].addr = src;
1310 tr_req[0].icnt0 = tr0_cnt0;
1311 tr_req[0].icnt1 = tr0_cnt1;
1312 tr_req[0].icnt2 = 1;
1313 tr_req[0].icnt3 = 1;
1314 tr_req[0].dim1 = tr0_cnt0;
1315
1316 tr_req[0].daddr = dest;
1317 tr_req[0].dicnt0 = tr0_cnt0;
1318 tr_req[0].dicnt1 = tr0_cnt1;
1319 tr_req[0].dicnt2 = 1;
1320 tr_req[0].dicnt3 = 1;
1321 tr_req[0].ddim1 = tr0_cnt0;
1322
1323 if (num_tr == 2) {
1324 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1325 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1326 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1327
1328 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1329 tr_req[1].icnt0 = tr1_cnt0;
1330 tr_req[1].icnt1 = 1;
1331 tr_req[1].icnt2 = 1;
1332 tr_req[1].icnt3 = 1;
1333
1334 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1335 tr_req[1].dicnt0 = tr1_cnt0;
1336 tr_req[1].dicnt1 = 1;
1337 tr_req[1].dicnt2 = 1;
1338 tr_req[1].dicnt3 = 1;
1339 }
1340
1341 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1342
1343 if (!udma_is_coherent(uc)) {
1344 flush_dcache_range((u64)tr_desc,
1345 ALIGN((u64)tr_desc + desc_size,
1346 ARCH_DMA_MINALIGN));
1347 }
1348
1349 k3_nav_ringacc_ring_push(uc->tchan->t_ring, &tr_desc);
1350
1351 return 0;
1352}
1353
1354static int udma_transfer(struct udevice *dev, int direction,
1355 void *dst, void *src, size_t len)
1356{
1357 struct udma_dev *ud = dev_get_priv(dev);
1358 /* Channel0 is reserved for memcpy */
1359 struct udma_chan *uc = &ud->channels[0];
1360 dma_addr_t paddr = 0;
1361 int ret;
1362
1363 ret = udma_alloc_chan_resources(uc);
1364 if (ret)
1365 return ret;
1366
1367 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1368 udma_start(uc);
1369 udma_poll_completion(uc, &paddr);
1370 udma_stop(uc);
1371
1372 udma_free_chan_resources(uc);
1373 return 0;
1374}
1375
1376static int udma_request(struct dma *dma)
1377{
1378 struct udma_dev *ud = dev_get_priv(dma->dev);
1379 struct udma_chan *uc;
1380 unsigned long dummy;
1381 int ret;
1382
1383 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1384 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1385 return -EINVAL;
1386 }
1387
1388 uc = &ud->channels[dma->id];
1389 ret = udma_alloc_chan_resources(uc);
1390 if (ret) {
1391 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1392 return -EINVAL;
1393 }
1394
1395 uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib,
1396 uc->psd_size, 0);
1397 uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN);
1398
1399 if (uc->dir == DMA_MEM_TO_DEV) {
1400 uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy);
1401 memset(uc->desc_tx, 0, uc->hdesc_size);
1402 } else {
1403 uc->desc_rx = dma_alloc_coherent(
1404 uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1405 memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM);
1406 }
1407
1408 uc->in_use = true;
1409 uc->desc_rx_cur = 0;
1410 uc->num_rx_bufs = 0;
1411
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301412 if (uc->dir == DMA_DEV_TO_MEM) {
1413 uc->cfg_data.flow_id_base = uc->rflow->id;
1414 uc->cfg_data.flow_id_cnt = 1;
1415 }
1416
Vignesh R3a9dbf32019-02-05 17:31:24 +05301417 return 0;
1418}
1419
1420static int udma_free(struct dma *dma)
1421{
1422 struct udma_dev *ud = dev_get_priv(dma->dev);
1423 struct udma_chan *uc;
1424
1425 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1426 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1427 return -EINVAL;
1428 }
1429 uc = &ud->channels[dma->id];
1430
1431 if (udma_is_chan_running(uc))
1432 udma_stop(uc);
1433 udma_free_chan_resources(uc);
1434
1435 uc->in_use = false;
1436
1437 return 0;
1438}
1439
1440static int udma_enable(struct dma *dma)
1441{
1442 struct udma_dev *ud = dev_get_priv(dma->dev);
1443 struct udma_chan *uc;
1444 int ret;
1445
1446 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1447 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1448 return -EINVAL;
1449 }
1450 uc = &ud->channels[dma->id];
1451
1452 ret = udma_start(uc);
1453
1454 return ret;
1455}
1456
1457static int udma_disable(struct dma *dma)
1458{
1459 struct udma_dev *ud = dev_get_priv(dma->dev);
1460 struct udma_chan *uc;
1461 int ret = 0;
1462
1463 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1464 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1465 return -EINVAL;
1466 }
1467 uc = &ud->channels[dma->id];
1468
1469 if (udma_is_chan_running(uc))
1470 ret = udma_stop(uc);
1471 else
1472 dev_err(dma->dev, "%s not running\n", __func__);
1473
1474 return ret;
1475}
1476
1477static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1478{
1479 struct udma_dev *ud = dev_get_priv(dma->dev);
1480 struct cppi5_host_desc_t *desc_tx;
1481 dma_addr_t dma_src = (dma_addr_t)src;
1482 struct ti_udma_drv_packet_data packet_data = { 0 };
1483 dma_addr_t paddr;
1484 struct udma_chan *uc;
1485 u32 tc_ring_id;
1486 int ret;
1487
Keerthya3c8bb12019-04-24 16:33:54 +05301488 if (metadata)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301489 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1490
1491 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1492 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1493 return -EINVAL;
1494 }
1495 uc = &ud->channels[dma->id];
1496
1497 if (uc->dir != DMA_MEM_TO_DEV)
1498 return -EINVAL;
1499
1500 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1501
1502 desc_tx = uc->desc_tx;
1503
1504 cppi5_hdesc_reset_hbdesc(desc_tx);
1505
1506 cppi5_hdesc_init(desc_tx,
1507 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1508 uc->psd_size);
1509 cppi5_hdesc_set_pktlen(desc_tx, len);
1510 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1511 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1512 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1513 /* pass below information from caller */
1514 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1515 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1516
1517 if (!udma_is_coherent(uc)) {
1518 flush_dcache_range((u64)dma_src,
1519 ALIGN((u64)dma_src + len,
1520 ARCH_DMA_MINALIGN));
1521 flush_dcache_range((u64)desc_tx,
1522 ALIGN((u64)desc_tx + uc->hdesc_size,
1523 ARCH_DMA_MINALIGN));
1524 }
1525
1526 ret = k3_nav_ringacc_ring_push(uc->tchan->t_ring, &uc->desc_tx);
1527 if (ret) {
1528 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1529 dma->id, ret);
1530 return ret;
1531 }
1532
1533 udma_poll_completion(uc, &paddr);
1534
1535 return 0;
1536}
1537
1538static int udma_receive(struct dma *dma, void **dst, void *metadata)
1539{
1540 struct udma_dev *ud = dev_get_priv(dma->dev);
1541 struct cppi5_host_desc_t *desc_rx;
1542 dma_addr_t buf_dma;
1543 struct udma_chan *uc;
1544 u32 buf_dma_len, pkt_len;
1545 u32 port_id = 0;
1546 int ret;
1547
1548 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1549 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1550 return -EINVAL;
1551 }
1552 uc = &ud->channels[dma->id];
1553
1554 if (uc->dir != DMA_DEV_TO_MEM)
1555 return -EINVAL;
1556 if (!uc->num_rx_bufs)
1557 return -EINVAL;
1558
1559 ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
1560 if (ret && ret != -ENODATA) {
1561 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1562 return ret;
1563 } else if (ret == -ENODATA) {
1564 return 0;
1565 }
1566
1567 /* invalidate cache data */
1568 if (!udma_is_coherent(uc)) {
1569 invalidate_dcache_range((ulong)desc_rx,
1570 (ulong)(desc_rx + uc->hdesc_size));
1571 }
1572
1573 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1574 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1575
1576 /* invalidate cache data */
1577 if (!udma_is_coherent(uc)) {
1578 invalidate_dcache_range((ulong)buf_dma,
1579 (ulong)(buf_dma + buf_dma_len));
1580 }
1581
1582 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1583
1584 *dst = (void *)buf_dma;
1585 uc->num_rx_bufs--;
1586
1587 return pkt_len;
1588}
1589
1590static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1591{
1592 struct udma_dev *ud = dev_get_priv(dma->dev);
1593 struct udma_chan *uc = &ud->channels[0];
1594 ofnode chconf_node, slave_node;
1595 char prop[50];
1596 u32 val;
1597
1598 for (val = 0; val < ud->ch_count; val++) {
1599 uc = &ud->channels[val];
1600 if (!uc->in_use)
1601 break;
1602 }
1603
1604 if (val == ud->ch_count)
1605 return -EBUSY;
1606
1607 uc->dir = DMA_DEV_TO_MEM;
1608 if (args->args[2] == UDMA_DIR_TX)
1609 uc->dir = DMA_MEM_TO_DEV;
1610
1611 slave_node = ofnode_get_by_phandle(args->args[0]);
1612 if (!ofnode_valid(slave_node)) {
1613 dev_err(ud->dev, "slave node is missing\n");
1614 return -EINVAL;
1615 }
1616
1617 snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]);
1618 chconf_node = ofnode_find_subnode(slave_node, prop);
1619 if (!ofnode_valid(chconf_node)) {
1620 dev_err(ud->dev, "Channel configuration node is missing\n");
1621 return -EINVAL;
1622 }
1623
1624 if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) {
1625 if (val == UDMA_PKT_MODE)
1626 uc->pkt_mode = true;
1627 }
1628
1629 if (!ofnode_read_u32(chconf_node, "statictr-type", &val))
1630 uc->static_tr_type = val;
1631
1632 uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib");
1633 if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val))
1634 uc->psd_size = val;
1635 uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size;
1636
1637 if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) {
1638 dev_err(ud->dev, "ti,psil-base is missing\n");
1639 return -EINVAL;
1640 }
1641
1642 uc->slave_thread_id = val + args->args[1];
1643
1644 dma->id = uc->id;
1645 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
1646 dma->id, uc->needs_epib,
1647 uc->psd_size, uc->metadata_size,
1648 uc->slave_thread_id);
1649
1650 return 0;
1651}
1652
1653int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1654{
1655 struct udma_dev *ud = dev_get_priv(dma->dev);
1656 struct cppi5_host_desc_t *desc_rx;
1657 dma_addr_t dma_dst;
1658 struct udma_chan *uc;
1659 u32 desc_num;
1660
1661 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1662 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1663 return -EINVAL;
1664 }
1665 uc = &ud->channels[dma->id];
1666
1667 if (uc->dir != DMA_DEV_TO_MEM)
1668 return -EINVAL;
1669
1670 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1671 return -EINVAL;
1672
1673 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
1674 desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size);
1675 dma_dst = (dma_addr_t)dst;
1676
1677 cppi5_hdesc_reset_hbdesc(desc_rx);
1678
1679 cppi5_hdesc_init(desc_rx,
1680 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1681 uc->psd_size);
1682 cppi5_hdesc_set_pktlen(desc_rx, size);
1683 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1684
1685 if (!udma_is_coherent(uc)) {
1686 flush_dcache_range((u64)desc_rx,
1687 ALIGN((u64)desc_rx + uc->hdesc_size,
1688 ARCH_DMA_MINALIGN));
1689 }
1690
1691 k3_nav_ringacc_ring_push(uc->rchan->fd_ring, &desc_rx);
1692
1693 uc->num_rx_bufs++;
1694 uc->desc_rx_cur++;
1695
1696 return 0;
1697}
1698
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301699static int udma_get_cfg(struct dma *dma, u32 id, void **data)
1700{
1701 struct udma_dev *ud = dev_get_priv(dma->dev);
1702 struct udma_chan *uc;
1703
1704 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1705 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1706 return -EINVAL;
1707 }
1708
1709 switch (id) {
1710 case TI_UDMA_CHAN_PRIV_INFO:
1711 uc = &ud->channels[dma->id];
1712 *data = &uc->cfg_data;
1713 return 0;
1714 }
1715
1716 return -EINVAL;
1717}
1718
Vignesh R3a9dbf32019-02-05 17:31:24 +05301719static const struct dma_ops udma_ops = {
1720 .transfer = udma_transfer,
1721 .of_xlate = udma_of_xlate,
1722 .request = udma_request,
1723 .free = udma_free,
1724 .enable = udma_enable,
1725 .disable = udma_disable,
1726 .send = udma_send,
1727 .receive = udma_receive,
1728 .prepare_rcv_buf = udma_prepare_rcv_buf,
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301729 .get_cfg = udma_get_cfg,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301730};
1731
1732static const struct udevice_id udma_ids[] = {
1733 { .compatible = "ti,k3-navss-udmap" },
1734 { }
1735};
1736
1737U_BOOT_DRIVER(ti_edma3) = {
1738 .name = "ti-udma",
1739 .id = UCLASS_DMA,
1740 .of_match = udma_ids,
1741 .ops = &udma_ops,
1742 .probe = udma_probe,
1743 .priv_auto_alloc_size = sizeof(struct udma_dev),
1744};