blob: a5fc7809bc41919c8cf964b907abbebbdb0cb8ca [file] [log] [blame]
Vignesh R3a9dbf32019-02-05 17:31:24 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#define pr_fmt(fmt) "udma: " fmt
7
8#include <common.h>
9#include <asm/io.h>
10#include <asm/bitops.h>
11#include <malloc.h>
12#include <asm/dma-mapping.h>
13#include <dm.h>
14#include <dm/read.h>
15#include <dm/of_access.h>
16#include <dma.h>
17#include <dma-uclass.h>
18#include <linux/delay.h>
19#include <dt-bindings/dma/k3-udma.h>
20#include <linux/soc/ti/k3-navss-ringacc.h>
21#include <linux/soc/ti/cppi5.h>
22#include <linux/soc/ti/ti-udma.h>
23#include <linux/soc/ti/ti_sci_protocol.h>
24
25#include "k3-udma-hwdef.h"
26
27#if BITS_PER_LONG == 64
28#define RINGACC_RING_USE_PROXY (0)
29#else
30#define RINGACC_RING_USE_PROXY (1)
31#endif
32
33struct udma_chan;
34
35enum udma_mmr {
36 MMR_GCFG = 0,
37 MMR_RCHANRT,
38 MMR_TCHANRT,
39 MMR_LAST,
40};
41
42static const char * const mmr_names[] = {
43 "gcfg", "rchanrt", "tchanrt"
44};
45
46struct udma_tchan {
47 void __iomem *reg_rt;
48
49 int id;
50 struct k3_nav_ring *t_ring; /* Transmit ring */
51 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
52};
53
54struct udma_rchan {
55 void __iomem *reg_rt;
56
57 int id;
58 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
59 struct k3_nav_ring *r_ring; /* Receive ring*/
60};
61
62struct udma_rflow {
63 int id;
64};
65
66struct udma_dev {
67 struct device *dev;
68 void __iomem *mmrs[MMR_LAST];
69
70 struct k3_nav_ringacc *ringacc;
71
72 u32 features;
73
74 int tchan_cnt;
75 int echan_cnt;
76 int rchan_cnt;
77 int rflow_cnt;
78 unsigned long *tchan_map;
79 unsigned long *rchan_map;
80 unsigned long *rflow_map;
81
82 struct udma_tchan *tchans;
83 struct udma_rchan *rchans;
84 struct udma_rflow *rflows;
85
86 struct udma_chan *channels;
87 u32 psil_base;
88
89 u32 ch_count;
90 const struct ti_sci_handle *tisci;
91 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
92 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
93 u32 tisci_dev_id;
94 u32 tisci_navss_dev_id;
95 bool is_coherent;
96};
97
98struct udma_chan {
99 struct udma_dev *ud;
100 char name[20];
101
102 struct udma_tchan *tchan;
103 struct udma_rchan *rchan;
104 struct udma_rflow *rflow;
105
106 u32 bcnt; /* number of bytes completed since the start of the channel */
107
108 bool pkt_mode; /* TR or packet */
109 bool needs_epib; /* EPIB is needed for the communication or not */
110 u32 psd_size; /* size of Protocol Specific Data */
111 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
112 int slave_thread_id;
113 u32 src_thread;
114 u32 dst_thread;
115 u32 static_tr_type;
116
117 u32 id;
118 enum dma_direction dir;
119
120 struct cppi5_host_desc_t *desc_tx;
121 u32 hdesc_size;
122 bool in_use;
123 void *desc_rx;
124 u32 num_rx_bufs;
125 u32 desc_rx_cur;
126
127};
128
129#define UDMA_CH_1000(ch) (ch * 0x1000)
130#define UDMA_CH_100(ch) (ch * 0x100)
131#define UDMA_CH_40(ch) (ch * 0x40)
132
133#ifdef PKTBUFSRX
134#define UDMA_RX_DESC_NUM PKTBUFSRX
135#else
136#define UDMA_RX_DESC_NUM 4
137#endif
138
139/* Generic register access functions */
140static inline u32 udma_read(void __iomem *base, int reg)
141{
142 u32 v;
143
144 v = __raw_readl(base + reg);
145 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
146 return v;
147}
148
149static inline void udma_write(void __iomem *base, int reg, u32 val)
150{
151 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
152 __raw_writel(val, base + reg);
153}
154
155static inline void udma_update_bits(void __iomem *base, int reg,
156 u32 mask, u32 val)
157{
158 u32 tmp, orig;
159
160 orig = udma_read(base, reg);
161 tmp = orig & ~mask;
162 tmp |= (val & mask);
163
164 if (tmp != orig)
165 udma_write(base, reg, tmp);
166}
167
168/* TCHANRT */
169static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
170{
171 if (!tchan)
172 return 0;
173 return udma_read(tchan->reg_rt, reg);
174}
175
176static inline void udma_tchanrt_write(struct udma_tchan *tchan,
177 int reg, u32 val)
178{
179 if (!tchan)
180 return;
181 udma_write(tchan->reg_rt, reg, val);
182}
183
184/* RCHANRT */
185static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
186{
187 if (!rchan)
188 return 0;
189 return udma_read(rchan->reg_rt, reg);
190}
191
192static inline void udma_rchanrt_write(struct udma_rchan *rchan,
193 int reg, u32 val)
194{
195 if (!rchan)
196 return;
197 udma_write(rchan->reg_rt, reg, val);
198}
199
200static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
201 u32 dst_thread)
202{
203 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
204 return ud->tisci_psil_ops->pair(ud->tisci,
205 ud->tisci_navss_dev_id,
206 src_thread, dst_thread);
207}
208
209static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
210 u32 dst_thread)
211{
212 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
213 return ud->tisci_psil_ops->unpair(ud->tisci,
214 ud->tisci_navss_dev_id,
215 src_thread, dst_thread);
216}
217
218static inline char *udma_get_dir_text(enum dma_direction dir)
219{
220 switch (dir) {
221 case DMA_DEV_TO_MEM:
222 return "DEV_TO_MEM";
223 case DMA_MEM_TO_DEV:
224 return "MEM_TO_DEV";
225 case DMA_MEM_TO_MEM:
226 return "MEM_TO_MEM";
227 case DMA_DEV_TO_DEV:
228 return "DEV_TO_DEV";
229 default:
230 break;
231 }
232
233 return "invalid";
234}
235
236static inline bool udma_is_chan_running(struct udma_chan *uc)
237{
238 u32 trt_ctl = 0;
239 u32 rrt_ctl = 0;
240
241 switch (uc->dir) {
242 case DMA_DEV_TO_MEM:
243 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
244 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
245 __func__, rrt_ctl,
246 udma_rchanrt_read(uc->rchan,
247 UDMA_RCHAN_RT_PEER_RT_EN_REG));
248 break;
249 case DMA_MEM_TO_DEV:
250 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
251 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
252 __func__, trt_ctl,
253 udma_tchanrt_read(uc->tchan,
254 UDMA_TCHAN_RT_PEER_RT_EN_REG));
255 break;
256 case DMA_MEM_TO_MEM:
257 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
258 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
259 break;
260 default:
261 break;
262 }
263
264 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
265 return true;
266
267 return false;
268}
269
270static int udma_is_coherent(struct udma_chan *uc)
271{
272 return uc->ud->is_coherent;
273}
274
275static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
276{
277 struct k3_nav_ring *ring = NULL;
278 int ret = -ENOENT;
279
280 switch (uc->dir) {
281 case DMA_DEV_TO_MEM:
282 ring = uc->rchan->r_ring;
283 break;
284 case DMA_MEM_TO_DEV:
285 ring = uc->tchan->tc_ring;
286 break;
287 case DMA_MEM_TO_MEM:
288 ring = uc->tchan->tc_ring;
289 break;
290 default:
291 break;
292 }
293
294 if (ring && k3_nav_ringacc_ring_get_occ(ring))
295 ret = k3_nav_ringacc_ring_pop(ring, addr);
296
297 return ret;
298}
299
300static void udma_reset_rings(struct udma_chan *uc)
301{
302 struct k3_nav_ring *ring1 = NULL;
303 struct k3_nav_ring *ring2 = NULL;
304
305 switch (uc->dir) {
306 case DMA_DEV_TO_MEM:
307 ring1 = uc->rchan->fd_ring;
308 ring2 = uc->rchan->r_ring;
309 break;
310 case DMA_MEM_TO_DEV:
311 ring1 = uc->tchan->t_ring;
312 ring2 = uc->tchan->tc_ring;
313 break;
314 case DMA_MEM_TO_MEM:
315 ring1 = uc->tchan->t_ring;
316 ring2 = uc->tchan->tc_ring;
317 break;
318 default:
319 break;
320 }
321
322 if (ring1)
323 k3_nav_ringacc_ring_reset_dma(ring1, 0);
324 if (ring2)
325 k3_nav_ringacc_ring_reset(ring2);
326}
327
328static void udma_reset_counters(struct udma_chan *uc)
329{
330 u32 val;
331
332 if (uc->tchan) {
333 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
334 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
335
336 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
337 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
338
339 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
340 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
341
342 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
343 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
344 }
345
346 if (uc->rchan) {
347 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
348 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
349
350 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
351 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
352
353 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
354 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
355
356 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
357 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
358 }
359
360 uc->bcnt = 0;
361}
362
363static inline int udma_stop_hard(struct udma_chan *uc)
364{
365 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
366
367 switch (uc->dir) {
368 case DMA_DEV_TO_MEM:
369 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
370 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
371 break;
372 case DMA_MEM_TO_DEV:
373 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
374 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
375 break;
376 case DMA_MEM_TO_MEM:
377 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
378 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
379 break;
380 default:
381 return -EINVAL;
382 }
383
384 return 0;
385}
386
387static int udma_start(struct udma_chan *uc)
388{
389 /* Channel is already running, no need to proceed further */
390 if (udma_is_chan_running(uc))
391 goto out;
392
393 pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n",
394 __func__, uc->id, udma_get_dir_text(uc->dir),
395 uc->static_tr_type);
396
397 /* Make sure that we clear the teardown bit, if it is set */
398 udma_stop_hard(uc);
399
400 /* Reset all counters */
401 udma_reset_counters(uc);
402
403 switch (uc->dir) {
404 case DMA_DEV_TO_MEM:
405 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
406 UDMA_CHAN_RT_CTL_EN);
407
408 /* Enable remote */
409 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
410 UDMA_PEER_RT_EN_ENABLE);
411
412 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
413 __func__,
414 udma_rchanrt_read(uc->rchan,
415 UDMA_RCHAN_RT_CTL_REG),
416 udma_rchanrt_read(uc->rchan,
417 UDMA_RCHAN_RT_PEER_RT_EN_REG));
418 break;
419 case DMA_MEM_TO_DEV:
420 /* Enable remote */
421 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
422 UDMA_PEER_RT_EN_ENABLE);
423
424 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
425 UDMA_CHAN_RT_CTL_EN);
426
427 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
428 __func__,
429 udma_rchanrt_read(uc->rchan,
430 UDMA_TCHAN_RT_CTL_REG),
431 udma_rchanrt_read(uc->rchan,
432 UDMA_TCHAN_RT_PEER_RT_EN_REG));
433 break;
434 case DMA_MEM_TO_MEM:
435 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
436 UDMA_CHAN_RT_CTL_EN);
437 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
438 UDMA_CHAN_RT_CTL_EN);
439
440 break;
441 default:
442 return -EINVAL;
443 }
444
445 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
446out:
447 return 0;
448}
449
450static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
451{
452 int i = 0;
453 u32 val;
454
455 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
456 UDMA_CHAN_RT_CTL_EN |
457 UDMA_CHAN_RT_CTL_TDOWN);
458
459 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
460
461 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
462 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
463 udelay(1);
464 if (i > 1000) {
465 printf(" %s TIMEOUT !\n", __func__);
466 break;
467 }
468 i++;
469 }
470
471 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
472 if (val & UDMA_PEER_RT_EN_ENABLE)
473 printf("%s: peer not stopped TIMEOUT !\n", __func__);
474}
475
476static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
477{
478 int i = 0;
479 u32 val;
480
481 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
482 UDMA_PEER_RT_EN_ENABLE |
483 UDMA_PEER_RT_EN_TEARDOWN);
484
485 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
486
487 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
488 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
489 udelay(1);
490 if (i > 1000) {
491 printf("%s TIMEOUT !\n", __func__);
492 break;
493 }
494 i++;
495 }
496
497 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
498 if (val & UDMA_PEER_RT_EN_ENABLE)
499 printf("%s: peer not stopped TIMEOUT !\n", __func__);
500}
501
502static inline int udma_stop(struct udma_chan *uc)
503{
504 pr_debug("%s: chan:%d dir:%s\n",
505 __func__, uc->id, udma_get_dir_text(uc->dir));
506
507 udma_reset_counters(uc);
508 switch (uc->dir) {
509 case DMA_DEV_TO_MEM:
510 udma_stop_dev2mem(uc, true);
511 break;
512 case DMA_MEM_TO_DEV:
513 udma_stop_mem2dev(uc, true);
514 break;
515 case DMA_MEM_TO_MEM:
516 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
517 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
518 break;
519 default:
520 return -EINVAL;
521 }
522
523 return 0;
524}
525
526static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
527{
528 int i = 1;
529
530 while (udma_pop_from_ring(uc, paddr)) {
531 udelay(1);
532 if (!(i % 1000000))
533 printf(".");
534 i++;
535 }
536}
537
538#define UDMA_RESERVE_RESOURCE(res) \
539static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
540 int id) \
541{ \
542 if (id >= 0) { \
543 if (test_bit(id, ud->res##_map)) { \
544 dev_err(ud->dev, "res##%d is in use\n", id); \
545 return ERR_PTR(-ENOENT); \
546 } \
547 } else { \
548 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
549 if (id == ud->res##_cnt) { \
550 return ERR_PTR(-ENOENT); \
551 } \
552 } \
553 \
554 __set_bit(id, ud->res##_map); \
555 return &ud->res##s[id]; \
556}
557
558UDMA_RESERVE_RESOURCE(tchan);
559UDMA_RESERVE_RESOURCE(rchan);
560UDMA_RESERVE_RESOURCE(rflow);
561
562static int udma_get_tchan(struct udma_chan *uc)
563{
564 struct udma_dev *ud = uc->ud;
565
566 if (uc->tchan) {
567 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
568 uc->id, uc->tchan->id);
569 return 0;
570 }
571
572 uc->tchan = __udma_reserve_tchan(ud, -1);
573 if (IS_ERR(uc->tchan))
574 return PTR_ERR(uc->tchan);
575
576 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
577
Vignesh R3a9dbf32019-02-05 17:31:24 +0530578 return 0;
579}
580
581static int udma_get_rchan(struct udma_chan *uc)
582{
583 struct udma_dev *ud = uc->ud;
584
585 if (uc->rchan) {
586 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
587 uc->id, uc->rchan->id);
588 return 0;
589 }
590
591 uc->rchan = __udma_reserve_rchan(ud, -1);
592 if (IS_ERR(uc->rchan))
593 return PTR_ERR(uc->rchan);
594
595 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
596
Vignesh R3a9dbf32019-02-05 17:31:24 +0530597 return 0;
598}
599
600static int udma_get_chan_pair(struct udma_chan *uc)
601{
602 struct udma_dev *ud = uc->ud;
603 int chan_id, end;
604
605 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
606 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
607 uc->id, uc->tchan->id);
608 return 0;
609 }
610
611 if (uc->tchan) {
612 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
613 uc->id, uc->tchan->id);
614 return -EBUSY;
615 } else if (uc->rchan) {
616 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
617 uc->id, uc->rchan->id);
618 return -EBUSY;
619 }
620
621 /* Can be optimized, but let's have it like this for now */
622 end = min(ud->tchan_cnt, ud->rchan_cnt);
623 for (chan_id = 0; chan_id < end; chan_id++) {
624 if (!test_bit(chan_id, ud->tchan_map) &&
625 !test_bit(chan_id, ud->rchan_map))
626 break;
627 }
628
629 if (chan_id == end)
630 return -ENOENT;
631
632 __set_bit(chan_id, ud->tchan_map);
633 __set_bit(chan_id, ud->rchan_map);
634 uc->tchan = &ud->tchans[chan_id];
635 uc->rchan = &ud->rchans[chan_id];
636
637 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
638
Vignesh R3a9dbf32019-02-05 17:31:24 +0530639 return 0;
640}
641
642static int udma_get_rflow(struct udma_chan *uc, int flow_id)
643{
644 struct udma_dev *ud = uc->ud;
645
646 if (uc->rflow) {
647 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
648 uc->id, uc->rflow->id);
649 return 0;
650 }
651
652 if (!uc->rchan)
653 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
654
655 uc->rflow = __udma_reserve_rflow(ud, flow_id);
656 if (IS_ERR(uc->rflow))
657 return PTR_ERR(uc->rflow);
658
659 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
660 return 0;
661}
662
663static void udma_put_rchan(struct udma_chan *uc)
664{
665 struct udma_dev *ud = uc->ud;
666
667 if (uc->rchan) {
668 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
669 uc->rchan->id);
670 __clear_bit(uc->rchan->id, ud->rchan_map);
671 uc->rchan = NULL;
672 }
673}
674
675static void udma_put_tchan(struct udma_chan *uc)
676{
677 struct udma_dev *ud = uc->ud;
678
679 if (uc->tchan) {
680 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
681 uc->tchan->id);
682 __clear_bit(uc->tchan->id, ud->tchan_map);
683 uc->tchan = NULL;
684 }
685}
686
687static void udma_put_rflow(struct udma_chan *uc)
688{
689 struct udma_dev *ud = uc->ud;
690
691 if (uc->rflow) {
692 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
693 uc->rflow->id);
694 __clear_bit(uc->rflow->id, ud->rflow_map);
695 uc->rflow = NULL;
696 }
697}
698
699static void udma_free_tx_resources(struct udma_chan *uc)
700{
701 if (!uc->tchan)
702 return;
703
704 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
705 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
706 uc->tchan->t_ring = NULL;
707 uc->tchan->tc_ring = NULL;
708
709 udma_put_tchan(uc);
710}
711
712static int udma_alloc_tx_resources(struct udma_chan *uc)
713{
714 struct k3_nav_ring_cfg ring_cfg;
715 struct udma_dev *ud = uc->ud;
716 int ret;
717
718 ret = udma_get_tchan(uc);
719 if (ret)
720 return ret;
721
722 uc->tchan->t_ring = k3_nav_ringacc_request_ring(
723 ud->ringacc, uc->tchan->id,
724 RINGACC_RING_USE_PROXY);
725 if (!uc->tchan->t_ring) {
726 ret = -EBUSY;
727 goto err_tx_ring;
728 }
729
730 uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
731 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
732 if (!uc->tchan->tc_ring) {
733 ret = -EBUSY;
734 goto err_txc_ring;
735 }
736
737 memset(&ring_cfg, 0, sizeof(ring_cfg));
738 ring_cfg.size = 16;
739 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
740 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_MESSAGE;
741
742 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
743 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
744
745 if (ret)
746 goto err_ringcfg;
747
748 return 0;
749
750err_ringcfg:
751 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
752 uc->tchan->tc_ring = NULL;
753err_txc_ring:
754 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
755 uc->tchan->t_ring = NULL;
756err_tx_ring:
757 udma_put_tchan(uc);
758
759 return ret;
760}
761
762static void udma_free_rx_resources(struct udma_chan *uc)
763{
764 if (!uc->rchan)
765 return;
766
767 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
768 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
769 uc->rchan->fd_ring = NULL;
770 uc->rchan->r_ring = NULL;
771
772 udma_put_rflow(uc);
773 udma_put_rchan(uc);
774}
775
776static int udma_alloc_rx_resources(struct udma_chan *uc)
777{
778 struct k3_nav_ring_cfg ring_cfg;
779 struct udma_dev *ud = uc->ud;
780 int fd_ring_id;
781 int ret;
782
783 ret = udma_get_rchan(uc);
784 if (ret)
785 return ret;
786
787 /* For MEM_TO_MEM we don't need rflow or rings */
788 if (uc->dir == DMA_MEM_TO_MEM)
789 return 0;
790
791 ret = udma_get_rflow(uc, uc->rchan->id);
792 if (ret) {
793 ret = -EBUSY;
794 goto err_rflow;
795 }
796
797 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
798
799 uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
800 ud->ringacc, fd_ring_id,
801 RINGACC_RING_USE_PROXY);
802 if (!uc->rchan->fd_ring) {
803 ret = -EBUSY;
804 goto err_rx_ring;
805 }
806
807 uc->rchan->r_ring = k3_nav_ringacc_request_ring(
808 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
809 if (!uc->rchan->r_ring) {
810 ret = -EBUSY;
811 goto err_rxc_ring;
812 }
813
814 memset(&ring_cfg, 0, sizeof(ring_cfg));
815 ring_cfg.size = 16;
816 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
817 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_MESSAGE;
818
819 ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
820 ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
821
822 if (ret)
823 goto err_ringcfg;
824
825 return 0;
826
827err_ringcfg:
828 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
829 uc->rchan->r_ring = NULL;
830err_rxc_ring:
831 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
832 uc->rchan->fd_ring = NULL;
833err_rx_ring:
834 udma_put_rflow(uc);
835err_rflow:
836 udma_put_rchan(uc);
837
838 return ret;
839}
840
841static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
842{
843 struct udma_dev *ud = uc->ud;
844 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
845 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
846 u32 mode;
847 int ret;
848
849 if (uc->pkt_mode)
850 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
851 else
852 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
853
854 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
855 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
856 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
857 req.nav_id = ud->tisci_dev_id;
858 req.index = uc->tchan->id;
859 req.tx_chan_type = mode;
860 if (uc->dir == DMA_MEM_TO_MEM)
861 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
862 else
863 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
864 uc->psd_size,
865 0) >> 2;
866 req.txcq_qnum = tc_ring;
867
868 ret = ud->tisci_udmap_ops->tx_ch_cfg(ud->tisci, &req);
869 if (ret)
870 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
871
872 return ret;
873}
874
875static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
876{
877 struct udma_dev *ud = uc->ud;
878 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
879 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
880 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
881 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
882 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
883 u32 mode;
884 int ret;
885
886 if (uc->pkt_mode)
887 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
888 else
889 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
890
891 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
892 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
893 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
894 req.nav_id = ud->tisci_dev_id;
895 req.index = uc->rchan->id;
896 req.rx_chan_type = mode;
897 if (uc->dir == DMA_MEM_TO_MEM) {
898 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
899 req.rxcq_qnum = tc_ring;
900 } else {
901 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
902 uc->psd_size,
903 0) >> 2;
904 req.rxcq_qnum = rx_ring;
905 }
906 if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) {
907 req.flowid_start = uc->rflow->id;
908 req.flowid_cnt = 1;
909 req.valid_params |=
910 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
911 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
912 }
913
914 ret = ud->tisci_udmap_ops->rx_ch_cfg(ud->tisci, &req);
915 if (ret) {
916 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
917 uc->rchan->id, ret);
918 return ret;
919 }
920 if (uc->dir == DMA_MEM_TO_MEM)
921 return ret;
922
923 flow_req.valid_params =
924 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
925 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
926 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
927 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
928 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
929 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
930 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
931 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
932 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
933 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
934 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
935 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
936 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
937 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
938
939 flow_req.nav_id = ud->tisci_dev_id;
940 flow_req.flow_index = uc->rflow->id;
941
942 if (uc->needs_epib)
943 flow_req.rx_einfo_present = 1;
944 else
945 flow_req.rx_einfo_present = 0;
946
947 if (uc->psd_size)
948 flow_req.rx_psinfo_present = 1;
949 else
950 flow_req.rx_psinfo_present = 0;
951
952 flow_req.rx_error_handling = 0;
953 flow_req.rx_desc_type = 0;
954 flow_req.rx_dest_qnum = rx_ring;
955 flow_req.rx_src_tag_hi_sel = 2;
956 flow_req.rx_src_tag_lo_sel = 4;
957 flow_req.rx_dest_tag_hi_sel = 5;
958 flow_req.rx_dest_tag_lo_sel = 4;
959 flow_req.rx_fdq0_sz0_qnum = fd_ring;
960 flow_req.rx_fdq1_qnum = fd_ring;
961 flow_req.rx_fdq2_qnum = fd_ring;
962 flow_req.rx_fdq3_qnum = fd_ring;
963 flow_req.rx_ps_location = 0;
964
965 ret = ud->tisci_udmap_ops->rx_flow_cfg(ud->tisci, &flow_req);
966 if (ret)
967 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
968 uc->rchan->id, uc->rflow->id, ret);
969
970 return ret;
971}
972
973static int udma_alloc_chan_resources(struct udma_chan *uc)
974{
975 struct udma_dev *ud = uc->ud;
976 int ret;
977
978 pr_debug("%s: chan:%d as %s\n",
979 __func__, uc->id, udma_get_dir_text(uc->dir));
980
981 switch (uc->dir) {
982 case DMA_MEM_TO_MEM:
983 /* Non synchronized - mem to mem type of transfer */
984 ret = udma_get_chan_pair(uc);
985 if (ret)
986 return ret;
987
988 ret = udma_alloc_tx_resources(uc);
989 if (ret)
990 goto err_free_res;
991
992 ret = udma_alloc_rx_resources(uc);
993 if (ret)
994 goto err_free_res;
995
996 uc->src_thread = ud->psil_base + uc->tchan->id;
997 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
998 break;
999 case DMA_MEM_TO_DEV:
1000 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1001 ret = udma_alloc_tx_resources(uc);
1002 if (ret)
1003 goto err_free_res;
1004
1005 uc->src_thread = ud->psil_base + uc->tchan->id;
1006 uc->dst_thread = uc->slave_thread_id;
1007 if (!(uc->dst_thread & 0x8000))
1008 uc->dst_thread |= 0x8000;
1009
1010 break;
1011 case DMA_DEV_TO_MEM:
1012 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1013 ret = udma_alloc_rx_resources(uc);
1014 if (ret)
1015 goto err_free_res;
1016
1017 uc->src_thread = uc->slave_thread_id;
1018 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1019
1020 break;
1021 default:
1022 /* Can not happen */
1023 pr_debug("%s: chan:%d invalid direction (%u)\n",
1024 __func__, uc->id, uc->dir);
1025 return -EINVAL;
1026 }
1027
1028 /* We have channel indexes and rings */
1029 if (uc->dir == DMA_MEM_TO_MEM) {
1030 ret = udma_alloc_tchan_sci_req(uc);
1031 if (ret)
1032 goto err_free_res;
1033
1034 ret = udma_alloc_rchan_sci_req(uc);
1035 if (ret)
1036 goto err_free_res;
1037 } else {
1038 /* Slave transfer */
1039 if (uc->dir == DMA_MEM_TO_DEV) {
1040 ret = udma_alloc_tchan_sci_req(uc);
1041 if (ret)
1042 goto err_free_res;
1043 } else {
1044 ret = udma_alloc_rchan_sci_req(uc);
1045 if (ret)
1046 goto err_free_res;
1047 }
1048 }
1049
Peter Ujfalusid15f8652019-04-25 12:08:15 +05301050 if (udma_is_chan_running(uc)) {
1051 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1052 udma_stop(uc);
1053 if (udma_is_chan_running(uc)) {
1054 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1055 goto err_free_res;
1056 }
1057 }
1058
Vignesh R3a9dbf32019-02-05 17:31:24 +05301059 /* PSI-L pairing */
1060 ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread);
1061 if (ret) {
1062 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1063 goto err_free_res;
1064 }
1065
1066 return 0;
1067
1068err_free_res:
1069 udma_free_tx_resources(uc);
1070 udma_free_rx_resources(uc);
1071 uc->slave_thread_id = -1;
1072 return ret;
1073}
1074
1075static void udma_free_chan_resources(struct udma_chan *uc)
1076{
1077 /* Some configuration to UDMA-P channel: disable, reset, whatever */
1078
1079 /* Release PSI-L pairing */
1080 udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread);
1081
1082 /* Reset the rings for a new start */
1083 udma_reset_rings(uc);
1084 udma_free_tx_resources(uc);
1085 udma_free_rx_resources(uc);
1086
1087 uc->slave_thread_id = -1;
1088 uc->dir = DMA_MEM_TO_MEM;
1089}
1090
1091static int udma_get_mmrs(struct udevice *dev)
1092{
1093 struct udma_dev *ud = dev_get_priv(dev);
1094 int i;
1095
1096 for (i = 0; i < MMR_LAST; i++) {
1097 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1098 mmr_names[i]);
1099 if (!ud->mmrs[i])
1100 return -EINVAL;
1101 }
1102
1103 return 0;
1104}
1105
1106#define UDMA_MAX_CHANNELS 192
1107
1108static int udma_probe(struct udevice *dev)
1109{
1110 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1111 struct udma_dev *ud = dev_get_priv(dev);
1112 int i, ret;
1113 u32 cap2, cap3;
1114 struct udevice *tmp;
1115 struct udevice *tisci_dev = NULL;
1116
1117 ret = udma_get_mmrs(dev);
1118 if (ret)
1119 return ret;
1120
1121 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1122 "ti,ringacc", &tmp);
1123 ud->ringacc = dev_get_priv(tmp);
1124 if (IS_ERR(ud->ringacc))
1125 return PTR_ERR(ud->ringacc);
1126
1127 ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0);
1128 if (!ud->psil_base) {
1129 dev_info(dev,
1130 "Missing ti,psil-base property, using %d.\n", ret);
1131 return -EINVAL;
1132 }
1133
1134 ret = uclass_get_device_by_name(UCLASS_FIRMWARE, "dmsc", &tisci_dev);
1135 if (ret) {
1136 debug("TISCI RA RM get failed (%d)\n", ret);
1137 ud->tisci = NULL;
1138 return 0;
1139 }
1140 ud->tisci = (struct ti_sci_handle *)
1141 (ti_sci_get_handle_from_sysfw(tisci_dev));
1142
1143 ret = dev_read_u32_default(dev, "ti,sci", 0);
1144 if (!ret) {
1145 dev_err(dev, "TISCI RA RM disabled\n");
1146 ud->tisci = NULL;
1147 }
1148
1149 if (ud->tisci) {
1150 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1151
1152 ud->tisci_dev_id = -1;
1153 ret = dev_read_u32(dev, "ti,sci-dev-id", &ud->tisci_dev_id);
1154 if (ret) {
1155 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1156 return ret;
1157 }
1158
1159 ud->tisci_navss_dev_id = -1;
1160 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1161 &ud->tisci_navss_dev_id);
1162 if (ret) {
1163 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1164 return ret;
1165 }
1166
1167 ud->tisci_udmap_ops = &ud->tisci->ops.rm_udmap_ops;
1168 ud->tisci_psil_ops = &ud->tisci->ops.rm_psil_ops;
1169 }
1170
1171 ud->is_coherent = dev_read_bool(dev, "dma-coherent");
1172
1173 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1174 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1175
1176 ud->rflow_cnt = cap3 & 0x3fff;
1177 ud->tchan_cnt = cap2 & 0x1ff;
1178 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1179 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1180 ud->ch_count = ud->tchan_cnt + ud->rchan_cnt;
1181
1182 dev_info(dev,
1183 "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1184 ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1185 ud->tisci_dev_id);
1186 dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1187
1188 ud->channels = devm_kcalloc(dev, ud->ch_count, sizeof(*ud->channels),
1189 GFP_KERNEL);
1190 ud->tchan_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->tchan_cnt),
1191 sizeof(unsigned long), GFP_KERNEL);
1192 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt,
1193 sizeof(*ud->tchans), GFP_KERNEL);
1194 ud->rchan_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
1195 sizeof(unsigned long), GFP_KERNEL);
1196 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt,
1197 sizeof(*ud->rchans), GFP_KERNEL);
1198 ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1199 sizeof(unsigned long), GFP_KERNEL);
1200 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt,
1201 sizeof(*ud->rflows), GFP_KERNEL);
1202
1203 if (!ud->channels || !ud->tchan_map || !ud->rchan_map ||
1204 !ud->rflow_map || !ud->tchans || !ud->rchans || !ud->rflows)
1205 return -ENOMEM;
1206
1207 for (i = 0; i < ud->tchan_cnt; i++) {
1208 struct udma_tchan *tchan = &ud->tchans[i];
1209
1210 tchan->id = i;
1211 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1212 }
1213
1214 for (i = 0; i < ud->rchan_cnt; i++) {
1215 struct udma_rchan *rchan = &ud->rchans[i];
1216
1217 rchan->id = i;
1218 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1219 }
1220
1221 for (i = 0; i < ud->rflow_cnt; i++) {
1222 struct udma_rflow *rflow = &ud->rflows[i];
1223
1224 rflow->id = i;
1225 }
1226
1227 for (i = 0; i < ud->ch_count; i++) {
1228 struct udma_chan *uc = &ud->channels[i];
1229
1230 uc->ud = ud;
1231 uc->id = i;
1232 uc->slave_thread_id = -1;
1233 uc->tchan = NULL;
1234 uc->rchan = NULL;
1235 uc->dir = DMA_MEM_TO_MEM;
1236 sprintf(uc->name, "UDMA chan%d\n", i);
1237 if (!i)
1238 uc->in_use = true;
1239 }
1240
1241 pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1242 udma_read(ud->mmrs[MMR_GCFG], 0),
1243 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1244 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1245 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1246 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1247
1248 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1249
1250 return ret;
1251}
1252
1253static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1254 dma_addr_t src, size_t len)
1255{
1256 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1257 struct cppi5_tr_type15_t *tr_req;
1258 int num_tr;
1259 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1260 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1261 unsigned long dummy;
1262 void *tr_desc;
1263 size_t desc_size;
1264
1265 if (len < SZ_64K) {
1266 num_tr = 1;
1267 tr0_cnt0 = len;
1268 tr0_cnt1 = 1;
1269 } else {
1270 unsigned long align_to = __ffs(src | dest);
1271
1272 if (align_to > 3)
1273 align_to = 3;
1274 /*
1275 * Keep simple: tr0: SZ_64K-alignment blocks,
1276 * tr1: the remaining
1277 */
1278 num_tr = 2;
1279 tr0_cnt0 = (SZ_64K - BIT(align_to));
1280 if (len / tr0_cnt0 >= SZ_64K) {
1281 dev_err(uc->ud->dev, "size %zu is not supported\n",
1282 len);
1283 return NULL;
1284 }
1285
1286 tr0_cnt1 = len / tr0_cnt0;
1287 tr1_cnt0 = len % tr0_cnt0;
1288 }
1289
1290 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1291 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1292 if (!tr_desc)
1293 return NULL;
1294 memset(tr_desc, 0, desc_size);
1295
1296 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1297 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1298 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1299
1300 tr_req = tr_desc + tr_size;
1301
1302 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1303 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1304 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1305
1306 tr_req[0].addr = src;
1307 tr_req[0].icnt0 = tr0_cnt0;
1308 tr_req[0].icnt1 = tr0_cnt1;
1309 tr_req[0].icnt2 = 1;
1310 tr_req[0].icnt3 = 1;
1311 tr_req[0].dim1 = tr0_cnt0;
1312
1313 tr_req[0].daddr = dest;
1314 tr_req[0].dicnt0 = tr0_cnt0;
1315 tr_req[0].dicnt1 = tr0_cnt1;
1316 tr_req[0].dicnt2 = 1;
1317 tr_req[0].dicnt3 = 1;
1318 tr_req[0].ddim1 = tr0_cnt0;
1319
1320 if (num_tr == 2) {
1321 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1322 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1323 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1324
1325 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1326 tr_req[1].icnt0 = tr1_cnt0;
1327 tr_req[1].icnt1 = 1;
1328 tr_req[1].icnt2 = 1;
1329 tr_req[1].icnt3 = 1;
1330
1331 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1332 tr_req[1].dicnt0 = tr1_cnt0;
1333 tr_req[1].dicnt1 = 1;
1334 tr_req[1].dicnt2 = 1;
1335 tr_req[1].dicnt3 = 1;
1336 }
1337
1338 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1339
1340 if (!udma_is_coherent(uc)) {
1341 flush_dcache_range((u64)tr_desc,
1342 ALIGN((u64)tr_desc + desc_size,
1343 ARCH_DMA_MINALIGN));
1344 }
1345
1346 k3_nav_ringacc_ring_push(uc->tchan->t_ring, &tr_desc);
1347
1348 return 0;
1349}
1350
1351static int udma_transfer(struct udevice *dev, int direction,
1352 void *dst, void *src, size_t len)
1353{
1354 struct udma_dev *ud = dev_get_priv(dev);
1355 /* Channel0 is reserved for memcpy */
1356 struct udma_chan *uc = &ud->channels[0];
1357 dma_addr_t paddr = 0;
1358 int ret;
1359
1360 ret = udma_alloc_chan_resources(uc);
1361 if (ret)
1362 return ret;
1363
1364 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1365 udma_start(uc);
1366 udma_poll_completion(uc, &paddr);
1367 udma_stop(uc);
1368
1369 udma_free_chan_resources(uc);
1370 return 0;
1371}
1372
1373static int udma_request(struct dma *dma)
1374{
1375 struct udma_dev *ud = dev_get_priv(dma->dev);
1376 struct udma_chan *uc;
1377 unsigned long dummy;
1378 int ret;
1379
1380 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1381 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1382 return -EINVAL;
1383 }
1384
1385 uc = &ud->channels[dma->id];
1386 ret = udma_alloc_chan_resources(uc);
1387 if (ret) {
1388 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1389 return -EINVAL;
1390 }
1391
1392 uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib,
1393 uc->psd_size, 0);
1394 uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN);
1395
1396 if (uc->dir == DMA_MEM_TO_DEV) {
1397 uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy);
1398 memset(uc->desc_tx, 0, uc->hdesc_size);
1399 } else {
1400 uc->desc_rx = dma_alloc_coherent(
1401 uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1402 memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM);
1403 }
1404
1405 uc->in_use = true;
1406 uc->desc_rx_cur = 0;
1407 uc->num_rx_bufs = 0;
1408
1409 return 0;
1410}
1411
1412static int udma_free(struct dma *dma)
1413{
1414 struct udma_dev *ud = dev_get_priv(dma->dev);
1415 struct udma_chan *uc;
1416
1417 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1418 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1419 return -EINVAL;
1420 }
1421 uc = &ud->channels[dma->id];
1422
1423 if (udma_is_chan_running(uc))
1424 udma_stop(uc);
1425 udma_free_chan_resources(uc);
1426
1427 uc->in_use = false;
1428
1429 return 0;
1430}
1431
1432static int udma_enable(struct dma *dma)
1433{
1434 struct udma_dev *ud = dev_get_priv(dma->dev);
1435 struct udma_chan *uc;
1436 int ret;
1437
1438 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1439 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1440 return -EINVAL;
1441 }
1442 uc = &ud->channels[dma->id];
1443
1444 ret = udma_start(uc);
1445
1446 return ret;
1447}
1448
1449static int udma_disable(struct dma *dma)
1450{
1451 struct udma_dev *ud = dev_get_priv(dma->dev);
1452 struct udma_chan *uc;
1453 int ret = 0;
1454
1455 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1456 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1457 return -EINVAL;
1458 }
1459 uc = &ud->channels[dma->id];
1460
1461 if (udma_is_chan_running(uc))
1462 ret = udma_stop(uc);
1463 else
1464 dev_err(dma->dev, "%s not running\n", __func__);
1465
1466 return ret;
1467}
1468
1469static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1470{
1471 struct udma_dev *ud = dev_get_priv(dma->dev);
1472 struct cppi5_host_desc_t *desc_tx;
1473 dma_addr_t dma_src = (dma_addr_t)src;
1474 struct ti_udma_drv_packet_data packet_data = { 0 };
1475 dma_addr_t paddr;
1476 struct udma_chan *uc;
1477 u32 tc_ring_id;
1478 int ret;
1479
Keerthya3c8bb12019-04-24 16:33:54 +05301480 if (metadata)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301481 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1482
1483 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1484 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1485 return -EINVAL;
1486 }
1487 uc = &ud->channels[dma->id];
1488
1489 if (uc->dir != DMA_MEM_TO_DEV)
1490 return -EINVAL;
1491
1492 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1493
1494 desc_tx = uc->desc_tx;
1495
1496 cppi5_hdesc_reset_hbdesc(desc_tx);
1497
1498 cppi5_hdesc_init(desc_tx,
1499 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1500 uc->psd_size);
1501 cppi5_hdesc_set_pktlen(desc_tx, len);
1502 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1503 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1504 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1505 /* pass below information from caller */
1506 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1507 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1508
1509 if (!udma_is_coherent(uc)) {
1510 flush_dcache_range((u64)dma_src,
1511 ALIGN((u64)dma_src + len,
1512 ARCH_DMA_MINALIGN));
1513 flush_dcache_range((u64)desc_tx,
1514 ALIGN((u64)desc_tx + uc->hdesc_size,
1515 ARCH_DMA_MINALIGN));
1516 }
1517
1518 ret = k3_nav_ringacc_ring_push(uc->tchan->t_ring, &uc->desc_tx);
1519 if (ret) {
1520 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1521 dma->id, ret);
1522 return ret;
1523 }
1524
1525 udma_poll_completion(uc, &paddr);
1526
1527 return 0;
1528}
1529
1530static int udma_receive(struct dma *dma, void **dst, void *metadata)
1531{
1532 struct udma_dev *ud = dev_get_priv(dma->dev);
1533 struct cppi5_host_desc_t *desc_rx;
1534 dma_addr_t buf_dma;
1535 struct udma_chan *uc;
1536 u32 buf_dma_len, pkt_len;
1537 u32 port_id = 0;
1538 int ret;
1539
1540 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1541 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1542 return -EINVAL;
1543 }
1544 uc = &ud->channels[dma->id];
1545
1546 if (uc->dir != DMA_DEV_TO_MEM)
1547 return -EINVAL;
1548 if (!uc->num_rx_bufs)
1549 return -EINVAL;
1550
1551 ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
1552 if (ret && ret != -ENODATA) {
1553 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1554 return ret;
1555 } else if (ret == -ENODATA) {
1556 return 0;
1557 }
1558
1559 /* invalidate cache data */
1560 if (!udma_is_coherent(uc)) {
1561 invalidate_dcache_range((ulong)desc_rx,
1562 (ulong)(desc_rx + uc->hdesc_size));
1563 }
1564
1565 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1566 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1567
1568 /* invalidate cache data */
1569 if (!udma_is_coherent(uc)) {
1570 invalidate_dcache_range((ulong)buf_dma,
1571 (ulong)(buf_dma + buf_dma_len));
1572 }
1573
1574 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1575
1576 *dst = (void *)buf_dma;
1577 uc->num_rx_bufs--;
1578
1579 return pkt_len;
1580}
1581
1582static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1583{
1584 struct udma_dev *ud = dev_get_priv(dma->dev);
1585 struct udma_chan *uc = &ud->channels[0];
1586 ofnode chconf_node, slave_node;
1587 char prop[50];
1588 u32 val;
1589
1590 for (val = 0; val < ud->ch_count; val++) {
1591 uc = &ud->channels[val];
1592 if (!uc->in_use)
1593 break;
1594 }
1595
1596 if (val == ud->ch_count)
1597 return -EBUSY;
1598
1599 uc->dir = DMA_DEV_TO_MEM;
1600 if (args->args[2] == UDMA_DIR_TX)
1601 uc->dir = DMA_MEM_TO_DEV;
1602
1603 slave_node = ofnode_get_by_phandle(args->args[0]);
1604 if (!ofnode_valid(slave_node)) {
1605 dev_err(ud->dev, "slave node is missing\n");
1606 return -EINVAL;
1607 }
1608
1609 snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]);
1610 chconf_node = ofnode_find_subnode(slave_node, prop);
1611 if (!ofnode_valid(chconf_node)) {
1612 dev_err(ud->dev, "Channel configuration node is missing\n");
1613 return -EINVAL;
1614 }
1615
1616 if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) {
1617 if (val == UDMA_PKT_MODE)
1618 uc->pkt_mode = true;
1619 }
1620
1621 if (!ofnode_read_u32(chconf_node, "statictr-type", &val))
1622 uc->static_tr_type = val;
1623
1624 uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib");
1625 if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val))
1626 uc->psd_size = val;
1627 uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size;
1628
1629 if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) {
1630 dev_err(ud->dev, "ti,psil-base is missing\n");
1631 return -EINVAL;
1632 }
1633
1634 uc->slave_thread_id = val + args->args[1];
1635
1636 dma->id = uc->id;
1637 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
1638 dma->id, uc->needs_epib,
1639 uc->psd_size, uc->metadata_size,
1640 uc->slave_thread_id);
1641
1642 return 0;
1643}
1644
1645int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1646{
1647 struct udma_dev *ud = dev_get_priv(dma->dev);
1648 struct cppi5_host_desc_t *desc_rx;
1649 dma_addr_t dma_dst;
1650 struct udma_chan *uc;
1651 u32 desc_num;
1652
1653 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1654 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1655 return -EINVAL;
1656 }
1657 uc = &ud->channels[dma->id];
1658
1659 if (uc->dir != DMA_DEV_TO_MEM)
1660 return -EINVAL;
1661
1662 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1663 return -EINVAL;
1664
1665 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
1666 desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size);
1667 dma_dst = (dma_addr_t)dst;
1668
1669 cppi5_hdesc_reset_hbdesc(desc_rx);
1670
1671 cppi5_hdesc_init(desc_rx,
1672 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1673 uc->psd_size);
1674 cppi5_hdesc_set_pktlen(desc_rx, size);
1675 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1676
1677 if (!udma_is_coherent(uc)) {
1678 flush_dcache_range((u64)desc_rx,
1679 ALIGN((u64)desc_rx + uc->hdesc_size,
1680 ARCH_DMA_MINALIGN));
1681 }
1682
1683 k3_nav_ringacc_ring_push(uc->rchan->fd_ring, &desc_rx);
1684
1685 uc->num_rx_bufs++;
1686 uc->desc_rx_cur++;
1687
1688 return 0;
1689}
1690
1691static const struct dma_ops udma_ops = {
1692 .transfer = udma_transfer,
1693 .of_xlate = udma_of_xlate,
1694 .request = udma_request,
1695 .free = udma_free,
1696 .enable = udma_enable,
1697 .disable = udma_disable,
1698 .send = udma_send,
1699 .receive = udma_receive,
1700 .prepare_rcv_buf = udma_prepare_rcv_buf,
1701};
1702
1703static const struct udevice_id udma_ids[] = {
1704 { .compatible = "ti,k3-navss-udmap" },
1705 { }
1706};
1707
1708U_BOOT_DRIVER(ti_edma3) = {
1709 .name = "ti-udma",
1710 .id = UCLASS_DMA,
1711 .of_match = udma_ids,
1712 .ops = &udma_ops,
1713 .probe = udma_probe,
1714 .priv_auto_alloc_size = sizeof(struct udma_dev),
1715};