blob: 95f6b5a93a313128a8c93a293b97208ab34d0f2f [file] [log] [blame]
Vignesh R3a9dbf32019-02-05 17:31:24 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#define pr_fmt(fmt) "udma: " fmt
7
8#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053010#include <asm/io.h>
11#include <asm/bitops.h>
12#include <malloc.h>
13#include <asm/dma-mapping.h>
14#include <dm.h>
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053015#include <dm/device.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053016#include <dm/read.h>
17#include <dm/of_access.h>
18#include <dma.h>
19#include <dma-uclass.h>
20#include <linux/delay.h>
21#include <dt-bindings/dma/k3-udma.h>
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053022#include <linux/bitmap.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053023#include <linux/soc/ti/k3-navss-ringacc.h>
24#include <linux/soc/ti/cppi5.h>
25#include <linux/soc/ti/ti-udma.h>
26#include <linux/soc/ti/ti_sci_protocol.h>
27
28#include "k3-udma-hwdef.h"
29
30#if BITS_PER_LONG == 64
31#define RINGACC_RING_USE_PROXY (0)
32#else
33#define RINGACC_RING_USE_PROXY (1)
34#endif
35
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053036#define K3_UDMA_MAX_RFLOWS 1024
37
Vignesh R3a9dbf32019-02-05 17:31:24 +053038struct udma_chan;
39
40enum udma_mmr {
41 MMR_GCFG = 0,
42 MMR_RCHANRT,
43 MMR_TCHANRT,
44 MMR_LAST,
45};
46
47static const char * const mmr_names[] = {
48 "gcfg", "rchanrt", "tchanrt"
49};
50
51struct udma_tchan {
52 void __iomem *reg_rt;
53
54 int id;
55 struct k3_nav_ring *t_ring; /* Transmit ring */
56 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
57};
58
59struct udma_rchan {
60 void __iomem *reg_rt;
61
62 int id;
63 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
64 struct k3_nav_ring *r_ring; /* Receive ring*/
65};
66
67struct udma_rflow {
68 int id;
69};
70
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053071enum udma_rm_range {
72 RM_RANGE_TCHAN = 0,
73 RM_RANGE_RCHAN,
74 RM_RANGE_RFLOW,
75 RM_RANGE_LAST,
76};
77
78struct udma_tisci_rm {
79 const struct ti_sci_handle *tisci;
80 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
81 u32 tisci_dev_id;
82
83 /* tisci information for PSI-L thread pairing/unpairing */
84 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
85 u32 tisci_navss_dev_id;
86
87 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
88};
89
Vignesh R3a9dbf32019-02-05 17:31:24 +053090struct udma_dev {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053091 struct udevice *dev;
Vignesh R3a9dbf32019-02-05 17:31:24 +053092 void __iomem *mmrs[MMR_LAST];
93
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053094 struct udma_tisci_rm tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +053095 struct k3_nav_ringacc *ringacc;
96
97 u32 features;
98
99 int tchan_cnt;
100 int echan_cnt;
101 int rchan_cnt;
102 int rflow_cnt;
103 unsigned long *tchan_map;
104 unsigned long *rchan_map;
105 unsigned long *rflow_map;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530106 unsigned long *rflow_map_reserved;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530107
108 struct udma_tchan *tchans;
109 struct udma_rchan *rchans;
110 struct udma_rflow *rflows;
111
112 struct udma_chan *channels;
113 u32 psil_base;
114
115 u32 ch_count;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530116};
117
118struct udma_chan {
119 struct udma_dev *ud;
120 char name[20];
121
122 struct udma_tchan *tchan;
123 struct udma_rchan *rchan;
124 struct udma_rflow *rflow;
125
Vignesh Raghavendra39349892019-12-04 22:17:21 +0530126 struct ti_udma_drv_chan_cfg_data cfg_data;
127
Vignesh R3a9dbf32019-02-05 17:31:24 +0530128 u32 bcnt; /* number of bytes completed since the start of the channel */
129
130 bool pkt_mode; /* TR or packet */
131 bool needs_epib; /* EPIB is needed for the communication or not */
132 u32 psd_size; /* size of Protocol Specific Data */
133 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
134 int slave_thread_id;
135 u32 src_thread;
136 u32 dst_thread;
137 u32 static_tr_type;
138
139 u32 id;
140 enum dma_direction dir;
141
142 struct cppi5_host_desc_t *desc_tx;
143 u32 hdesc_size;
144 bool in_use;
145 void *desc_rx;
146 u32 num_rx_bufs;
147 u32 desc_rx_cur;
148
149};
150
151#define UDMA_CH_1000(ch) (ch * 0x1000)
152#define UDMA_CH_100(ch) (ch * 0x100)
153#define UDMA_CH_40(ch) (ch * 0x40)
154
155#ifdef PKTBUFSRX
156#define UDMA_RX_DESC_NUM PKTBUFSRX
157#else
158#define UDMA_RX_DESC_NUM 4
159#endif
160
161/* Generic register access functions */
162static inline u32 udma_read(void __iomem *base, int reg)
163{
164 u32 v;
165
166 v = __raw_readl(base + reg);
167 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
168 return v;
169}
170
171static inline void udma_write(void __iomem *base, int reg, u32 val)
172{
173 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
174 __raw_writel(val, base + reg);
175}
176
177static inline void udma_update_bits(void __iomem *base, int reg,
178 u32 mask, u32 val)
179{
180 u32 tmp, orig;
181
182 orig = udma_read(base, reg);
183 tmp = orig & ~mask;
184 tmp |= (val & mask);
185
186 if (tmp != orig)
187 udma_write(base, reg, tmp);
188}
189
190/* TCHANRT */
191static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
192{
193 if (!tchan)
194 return 0;
195 return udma_read(tchan->reg_rt, reg);
196}
197
198static inline void udma_tchanrt_write(struct udma_tchan *tchan,
199 int reg, u32 val)
200{
201 if (!tchan)
202 return;
203 udma_write(tchan->reg_rt, reg, val);
204}
205
206/* RCHANRT */
207static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
208{
209 if (!rchan)
210 return 0;
211 return udma_read(rchan->reg_rt, reg);
212}
213
214static inline void udma_rchanrt_write(struct udma_rchan *rchan,
215 int reg, u32 val)
216{
217 if (!rchan)
218 return;
219 udma_write(rchan->reg_rt, reg, val);
220}
221
222static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
223 u32 dst_thread)
224{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530225 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
226
Vignesh R3a9dbf32019-02-05 17:31:24 +0530227 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530228
229 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
230 tisci_rm->tisci_navss_dev_id,
231 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530232}
233
234static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
235 u32 dst_thread)
236{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530237 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
238
Vignesh R3a9dbf32019-02-05 17:31:24 +0530239 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530240
241 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
242 tisci_rm->tisci_navss_dev_id,
243 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530244}
245
246static inline char *udma_get_dir_text(enum dma_direction dir)
247{
248 switch (dir) {
249 case DMA_DEV_TO_MEM:
250 return "DEV_TO_MEM";
251 case DMA_MEM_TO_DEV:
252 return "MEM_TO_DEV";
253 case DMA_MEM_TO_MEM:
254 return "MEM_TO_MEM";
255 case DMA_DEV_TO_DEV:
256 return "DEV_TO_DEV";
257 default:
258 break;
259 }
260
261 return "invalid";
262}
263
264static inline bool udma_is_chan_running(struct udma_chan *uc)
265{
266 u32 trt_ctl = 0;
267 u32 rrt_ctl = 0;
268
269 switch (uc->dir) {
270 case DMA_DEV_TO_MEM:
271 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
272 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
273 __func__, rrt_ctl,
274 udma_rchanrt_read(uc->rchan,
275 UDMA_RCHAN_RT_PEER_RT_EN_REG));
276 break;
277 case DMA_MEM_TO_DEV:
278 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
279 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
280 __func__, trt_ctl,
281 udma_tchanrt_read(uc->tchan,
282 UDMA_TCHAN_RT_PEER_RT_EN_REG));
283 break;
284 case DMA_MEM_TO_MEM:
285 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
286 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
287 break;
288 default:
289 break;
290 }
291
292 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
293 return true;
294
295 return false;
296}
297
Vignesh R3a9dbf32019-02-05 17:31:24 +0530298static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
299{
300 struct k3_nav_ring *ring = NULL;
301 int ret = -ENOENT;
302
303 switch (uc->dir) {
304 case DMA_DEV_TO_MEM:
305 ring = uc->rchan->r_ring;
306 break;
307 case DMA_MEM_TO_DEV:
308 ring = uc->tchan->tc_ring;
309 break;
310 case DMA_MEM_TO_MEM:
311 ring = uc->tchan->tc_ring;
312 break;
313 default:
314 break;
315 }
316
317 if (ring && k3_nav_ringacc_ring_get_occ(ring))
318 ret = k3_nav_ringacc_ring_pop(ring, addr);
319
320 return ret;
321}
322
323static void udma_reset_rings(struct udma_chan *uc)
324{
325 struct k3_nav_ring *ring1 = NULL;
326 struct k3_nav_ring *ring2 = NULL;
327
328 switch (uc->dir) {
329 case DMA_DEV_TO_MEM:
330 ring1 = uc->rchan->fd_ring;
331 ring2 = uc->rchan->r_ring;
332 break;
333 case DMA_MEM_TO_DEV:
334 ring1 = uc->tchan->t_ring;
335 ring2 = uc->tchan->tc_ring;
336 break;
337 case DMA_MEM_TO_MEM:
338 ring1 = uc->tchan->t_ring;
339 ring2 = uc->tchan->tc_ring;
340 break;
341 default:
342 break;
343 }
344
345 if (ring1)
346 k3_nav_ringacc_ring_reset_dma(ring1, 0);
347 if (ring2)
348 k3_nav_ringacc_ring_reset(ring2);
349}
350
351static void udma_reset_counters(struct udma_chan *uc)
352{
353 u32 val;
354
355 if (uc->tchan) {
356 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
357 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
358
359 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
360 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
361
362 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
363 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
364
365 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
366 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
367 }
368
369 if (uc->rchan) {
370 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
371 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
372
373 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
374 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
375
376 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
377 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
378
379 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
380 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
381 }
382
383 uc->bcnt = 0;
384}
385
386static inline int udma_stop_hard(struct udma_chan *uc)
387{
388 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
389
390 switch (uc->dir) {
391 case DMA_DEV_TO_MEM:
392 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
393 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
394 break;
395 case DMA_MEM_TO_DEV:
396 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
397 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
398 break;
399 case DMA_MEM_TO_MEM:
400 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
401 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
402 break;
403 default:
404 return -EINVAL;
405 }
406
407 return 0;
408}
409
410static int udma_start(struct udma_chan *uc)
411{
412 /* Channel is already running, no need to proceed further */
413 if (udma_is_chan_running(uc))
414 goto out;
415
416 pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n",
417 __func__, uc->id, udma_get_dir_text(uc->dir),
418 uc->static_tr_type);
419
420 /* Make sure that we clear the teardown bit, if it is set */
421 udma_stop_hard(uc);
422
423 /* Reset all counters */
424 udma_reset_counters(uc);
425
426 switch (uc->dir) {
427 case DMA_DEV_TO_MEM:
428 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
429 UDMA_CHAN_RT_CTL_EN);
430
431 /* Enable remote */
432 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
433 UDMA_PEER_RT_EN_ENABLE);
434
435 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
436 __func__,
437 udma_rchanrt_read(uc->rchan,
438 UDMA_RCHAN_RT_CTL_REG),
439 udma_rchanrt_read(uc->rchan,
440 UDMA_RCHAN_RT_PEER_RT_EN_REG));
441 break;
442 case DMA_MEM_TO_DEV:
443 /* Enable remote */
444 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
445 UDMA_PEER_RT_EN_ENABLE);
446
447 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
448 UDMA_CHAN_RT_CTL_EN);
449
450 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
451 __func__,
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530452 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530453 UDMA_TCHAN_RT_CTL_REG),
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530454 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530455 UDMA_TCHAN_RT_PEER_RT_EN_REG));
456 break;
457 case DMA_MEM_TO_MEM:
458 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
459 UDMA_CHAN_RT_CTL_EN);
460 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
461 UDMA_CHAN_RT_CTL_EN);
462
463 break;
464 default:
465 return -EINVAL;
466 }
467
468 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
469out:
470 return 0;
471}
472
473static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
474{
475 int i = 0;
476 u32 val;
477
478 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
479 UDMA_CHAN_RT_CTL_EN |
480 UDMA_CHAN_RT_CTL_TDOWN);
481
482 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
483
484 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
485 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
486 udelay(1);
487 if (i > 1000) {
488 printf(" %s TIMEOUT !\n", __func__);
489 break;
490 }
491 i++;
492 }
493
494 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
495 if (val & UDMA_PEER_RT_EN_ENABLE)
496 printf("%s: peer not stopped TIMEOUT !\n", __func__);
497}
498
499static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
500{
501 int i = 0;
502 u32 val;
503
504 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
505 UDMA_PEER_RT_EN_ENABLE |
506 UDMA_PEER_RT_EN_TEARDOWN);
507
508 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
509
510 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
511 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
512 udelay(1);
513 if (i > 1000) {
514 printf("%s TIMEOUT !\n", __func__);
515 break;
516 }
517 i++;
518 }
519
520 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
521 if (val & UDMA_PEER_RT_EN_ENABLE)
522 printf("%s: peer not stopped TIMEOUT !\n", __func__);
523}
524
525static inline int udma_stop(struct udma_chan *uc)
526{
527 pr_debug("%s: chan:%d dir:%s\n",
528 __func__, uc->id, udma_get_dir_text(uc->dir));
529
530 udma_reset_counters(uc);
531 switch (uc->dir) {
532 case DMA_DEV_TO_MEM:
533 udma_stop_dev2mem(uc, true);
534 break;
535 case DMA_MEM_TO_DEV:
536 udma_stop_mem2dev(uc, true);
537 break;
538 case DMA_MEM_TO_MEM:
539 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
540 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
541 break;
542 default:
543 return -EINVAL;
544 }
545
546 return 0;
547}
548
549static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
550{
551 int i = 1;
552
553 while (udma_pop_from_ring(uc, paddr)) {
554 udelay(1);
555 if (!(i % 1000000))
556 printf(".");
557 i++;
558 }
559}
560
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530561static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
562{
563 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
564
565 if (id >= 0) {
566 if (test_bit(id, ud->rflow_map)) {
567 dev_err(ud->dev, "rflow%d is in use\n", id);
568 return ERR_PTR(-ENOENT);
569 }
570 } else {
571 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
572 ud->rflow_cnt);
573
574 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
575 if (id >= ud->rflow_cnt)
576 return ERR_PTR(-ENOENT);
577 }
578
579 __set_bit(id, ud->rflow_map);
580 return &ud->rflows[id];
581}
582
Vignesh R3a9dbf32019-02-05 17:31:24 +0530583#define UDMA_RESERVE_RESOURCE(res) \
584static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
585 int id) \
586{ \
587 if (id >= 0) { \
588 if (test_bit(id, ud->res##_map)) { \
589 dev_err(ud->dev, "res##%d is in use\n", id); \
590 return ERR_PTR(-ENOENT); \
591 } \
592 } else { \
593 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
594 if (id == ud->res##_cnt) { \
595 return ERR_PTR(-ENOENT); \
596 } \
597 } \
598 \
599 __set_bit(id, ud->res##_map); \
600 return &ud->res##s[id]; \
601}
602
603UDMA_RESERVE_RESOURCE(tchan);
604UDMA_RESERVE_RESOURCE(rchan);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530605
606static int udma_get_tchan(struct udma_chan *uc)
607{
608 struct udma_dev *ud = uc->ud;
609
610 if (uc->tchan) {
611 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
612 uc->id, uc->tchan->id);
613 return 0;
614 }
615
616 uc->tchan = __udma_reserve_tchan(ud, -1);
617 if (IS_ERR(uc->tchan))
618 return PTR_ERR(uc->tchan);
619
620 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
621
Vignesh R3a9dbf32019-02-05 17:31:24 +0530622 return 0;
623}
624
625static int udma_get_rchan(struct udma_chan *uc)
626{
627 struct udma_dev *ud = uc->ud;
628
629 if (uc->rchan) {
630 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
631 uc->id, uc->rchan->id);
632 return 0;
633 }
634
635 uc->rchan = __udma_reserve_rchan(ud, -1);
636 if (IS_ERR(uc->rchan))
637 return PTR_ERR(uc->rchan);
638
639 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
640
Vignesh R3a9dbf32019-02-05 17:31:24 +0530641 return 0;
642}
643
644static int udma_get_chan_pair(struct udma_chan *uc)
645{
646 struct udma_dev *ud = uc->ud;
647 int chan_id, end;
648
649 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
650 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
651 uc->id, uc->tchan->id);
652 return 0;
653 }
654
655 if (uc->tchan) {
656 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
657 uc->id, uc->tchan->id);
658 return -EBUSY;
659 } else if (uc->rchan) {
660 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
661 uc->id, uc->rchan->id);
662 return -EBUSY;
663 }
664
665 /* Can be optimized, but let's have it like this for now */
666 end = min(ud->tchan_cnt, ud->rchan_cnt);
667 for (chan_id = 0; chan_id < end; chan_id++) {
668 if (!test_bit(chan_id, ud->tchan_map) &&
669 !test_bit(chan_id, ud->rchan_map))
670 break;
671 }
672
673 if (chan_id == end)
674 return -ENOENT;
675
676 __set_bit(chan_id, ud->tchan_map);
677 __set_bit(chan_id, ud->rchan_map);
678 uc->tchan = &ud->tchans[chan_id];
679 uc->rchan = &ud->rchans[chan_id];
680
681 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
682
Vignesh R3a9dbf32019-02-05 17:31:24 +0530683 return 0;
684}
685
686static int udma_get_rflow(struct udma_chan *uc, int flow_id)
687{
688 struct udma_dev *ud = uc->ud;
689
690 if (uc->rflow) {
691 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
692 uc->id, uc->rflow->id);
693 return 0;
694 }
695
696 if (!uc->rchan)
697 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
698
699 uc->rflow = __udma_reserve_rflow(ud, flow_id);
700 if (IS_ERR(uc->rflow))
701 return PTR_ERR(uc->rflow);
702
703 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
704 return 0;
705}
706
707static void udma_put_rchan(struct udma_chan *uc)
708{
709 struct udma_dev *ud = uc->ud;
710
711 if (uc->rchan) {
712 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
713 uc->rchan->id);
714 __clear_bit(uc->rchan->id, ud->rchan_map);
715 uc->rchan = NULL;
716 }
717}
718
719static void udma_put_tchan(struct udma_chan *uc)
720{
721 struct udma_dev *ud = uc->ud;
722
723 if (uc->tchan) {
724 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
725 uc->tchan->id);
726 __clear_bit(uc->tchan->id, ud->tchan_map);
727 uc->tchan = NULL;
728 }
729}
730
731static void udma_put_rflow(struct udma_chan *uc)
732{
733 struct udma_dev *ud = uc->ud;
734
735 if (uc->rflow) {
736 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
737 uc->rflow->id);
738 __clear_bit(uc->rflow->id, ud->rflow_map);
739 uc->rflow = NULL;
740 }
741}
742
743static void udma_free_tx_resources(struct udma_chan *uc)
744{
745 if (!uc->tchan)
746 return;
747
748 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
749 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
750 uc->tchan->t_ring = NULL;
751 uc->tchan->tc_ring = NULL;
752
753 udma_put_tchan(uc);
754}
755
756static int udma_alloc_tx_resources(struct udma_chan *uc)
757{
758 struct k3_nav_ring_cfg ring_cfg;
759 struct udma_dev *ud = uc->ud;
760 int ret;
761
762 ret = udma_get_tchan(uc);
763 if (ret)
764 return ret;
765
766 uc->tchan->t_ring = k3_nav_ringacc_request_ring(
767 ud->ringacc, uc->tchan->id,
768 RINGACC_RING_USE_PROXY);
769 if (!uc->tchan->t_ring) {
770 ret = -EBUSY;
771 goto err_tx_ring;
772 }
773
774 uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
775 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
776 if (!uc->tchan->tc_ring) {
777 ret = -EBUSY;
778 goto err_txc_ring;
779 }
780
781 memset(&ring_cfg, 0, sizeof(ring_cfg));
782 ring_cfg.size = 16;
783 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530784 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530785
786 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
787 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
788
789 if (ret)
790 goto err_ringcfg;
791
792 return 0;
793
794err_ringcfg:
795 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
796 uc->tchan->tc_ring = NULL;
797err_txc_ring:
798 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
799 uc->tchan->t_ring = NULL;
800err_tx_ring:
801 udma_put_tchan(uc);
802
803 return ret;
804}
805
806static void udma_free_rx_resources(struct udma_chan *uc)
807{
808 if (!uc->rchan)
809 return;
810
811 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
812 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
813 uc->rchan->fd_ring = NULL;
814 uc->rchan->r_ring = NULL;
815
816 udma_put_rflow(uc);
817 udma_put_rchan(uc);
818}
819
820static int udma_alloc_rx_resources(struct udma_chan *uc)
821{
822 struct k3_nav_ring_cfg ring_cfg;
823 struct udma_dev *ud = uc->ud;
824 int fd_ring_id;
825 int ret;
826
827 ret = udma_get_rchan(uc);
828 if (ret)
829 return ret;
830
831 /* For MEM_TO_MEM we don't need rflow or rings */
832 if (uc->dir == DMA_MEM_TO_MEM)
833 return 0;
834
835 ret = udma_get_rflow(uc, uc->rchan->id);
836 if (ret) {
837 ret = -EBUSY;
838 goto err_rflow;
839 }
840
841 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
842
843 uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
844 ud->ringacc, fd_ring_id,
845 RINGACC_RING_USE_PROXY);
846 if (!uc->rchan->fd_ring) {
847 ret = -EBUSY;
848 goto err_rx_ring;
849 }
850
851 uc->rchan->r_ring = k3_nav_ringacc_request_ring(
852 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
853 if (!uc->rchan->r_ring) {
854 ret = -EBUSY;
855 goto err_rxc_ring;
856 }
857
858 memset(&ring_cfg, 0, sizeof(ring_cfg));
859 ring_cfg.size = 16;
860 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530861 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530862
863 ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
864 ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
865
866 if (ret)
867 goto err_ringcfg;
868
869 return 0;
870
871err_ringcfg:
872 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
873 uc->rchan->r_ring = NULL;
874err_rxc_ring:
875 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
876 uc->rchan->fd_ring = NULL;
877err_rx_ring:
878 udma_put_rflow(uc);
879err_rflow:
880 udma_put_rchan(uc);
881
882 return ret;
883}
884
885static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
886{
887 struct udma_dev *ud = uc->ud;
888 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
889 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530890 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530891 u32 mode;
892 int ret;
893
894 if (uc->pkt_mode)
895 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
896 else
897 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
898
899 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
900 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
901 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530902 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530903 req.index = uc->tchan->id;
904 req.tx_chan_type = mode;
905 if (uc->dir == DMA_MEM_TO_MEM)
906 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
907 else
908 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
909 uc->psd_size,
910 0) >> 2;
911 req.txcq_qnum = tc_ring;
912
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530913 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530914 if (ret)
915 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
916
917 return ret;
918}
919
920static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
921{
922 struct udma_dev *ud = uc->ud;
923 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
924 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
925 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
926 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
927 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530928 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530929 u32 mode;
930 int ret;
931
932 if (uc->pkt_mode)
933 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
934 else
935 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
936
937 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
938 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
939 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530940 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530941 req.index = uc->rchan->id;
942 req.rx_chan_type = mode;
943 if (uc->dir == DMA_MEM_TO_MEM) {
944 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
945 req.rxcq_qnum = tc_ring;
946 } else {
947 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
948 uc->psd_size,
949 0) >> 2;
950 req.rxcq_qnum = rx_ring;
951 }
952 if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) {
953 req.flowid_start = uc->rflow->id;
954 req.flowid_cnt = 1;
955 req.valid_params |=
956 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
957 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
958 }
959
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530960 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530961 if (ret) {
962 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
963 uc->rchan->id, ret);
964 return ret;
965 }
966 if (uc->dir == DMA_MEM_TO_MEM)
967 return ret;
968
969 flow_req.valid_params =
970 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
971 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
972 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
973 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
974 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
975 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
976 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
977 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
978 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
979 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
980 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
981 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
982 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
983 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
984
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530985 flow_req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530986 flow_req.flow_index = uc->rflow->id;
987
988 if (uc->needs_epib)
989 flow_req.rx_einfo_present = 1;
990 else
991 flow_req.rx_einfo_present = 0;
992
993 if (uc->psd_size)
994 flow_req.rx_psinfo_present = 1;
995 else
996 flow_req.rx_psinfo_present = 0;
997
998 flow_req.rx_error_handling = 0;
999 flow_req.rx_desc_type = 0;
1000 flow_req.rx_dest_qnum = rx_ring;
1001 flow_req.rx_src_tag_hi_sel = 2;
1002 flow_req.rx_src_tag_lo_sel = 4;
1003 flow_req.rx_dest_tag_hi_sel = 5;
1004 flow_req.rx_dest_tag_lo_sel = 4;
1005 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1006 flow_req.rx_fdq1_qnum = fd_ring;
1007 flow_req.rx_fdq2_qnum = fd_ring;
1008 flow_req.rx_fdq3_qnum = fd_ring;
1009 flow_req.rx_ps_location = 0;
1010
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301011 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1012 &flow_req);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301013 if (ret)
1014 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1015 uc->rchan->id, uc->rflow->id, ret);
1016
1017 return ret;
1018}
1019
1020static int udma_alloc_chan_resources(struct udma_chan *uc)
1021{
1022 struct udma_dev *ud = uc->ud;
1023 int ret;
1024
1025 pr_debug("%s: chan:%d as %s\n",
1026 __func__, uc->id, udma_get_dir_text(uc->dir));
1027
1028 switch (uc->dir) {
1029 case DMA_MEM_TO_MEM:
1030 /* Non synchronized - mem to mem type of transfer */
1031 ret = udma_get_chan_pair(uc);
1032 if (ret)
1033 return ret;
1034
1035 ret = udma_alloc_tx_resources(uc);
1036 if (ret)
1037 goto err_free_res;
1038
1039 ret = udma_alloc_rx_resources(uc);
1040 if (ret)
1041 goto err_free_res;
1042
1043 uc->src_thread = ud->psil_base + uc->tchan->id;
1044 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1045 break;
1046 case DMA_MEM_TO_DEV:
1047 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1048 ret = udma_alloc_tx_resources(uc);
1049 if (ret)
1050 goto err_free_res;
1051
1052 uc->src_thread = ud->psil_base + uc->tchan->id;
1053 uc->dst_thread = uc->slave_thread_id;
1054 if (!(uc->dst_thread & 0x8000))
1055 uc->dst_thread |= 0x8000;
1056
1057 break;
1058 case DMA_DEV_TO_MEM:
1059 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1060 ret = udma_alloc_rx_resources(uc);
1061 if (ret)
1062 goto err_free_res;
1063
1064 uc->src_thread = uc->slave_thread_id;
1065 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1066
1067 break;
1068 default:
1069 /* Can not happen */
1070 pr_debug("%s: chan:%d invalid direction (%u)\n",
1071 __func__, uc->id, uc->dir);
1072 return -EINVAL;
1073 }
1074
1075 /* We have channel indexes and rings */
1076 if (uc->dir == DMA_MEM_TO_MEM) {
1077 ret = udma_alloc_tchan_sci_req(uc);
1078 if (ret)
1079 goto err_free_res;
1080
1081 ret = udma_alloc_rchan_sci_req(uc);
1082 if (ret)
1083 goto err_free_res;
1084 } else {
1085 /* Slave transfer */
1086 if (uc->dir == DMA_MEM_TO_DEV) {
1087 ret = udma_alloc_tchan_sci_req(uc);
1088 if (ret)
1089 goto err_free_res;
1090 } else {
1091 ret = udma_alloc_rchan_sci_req(uc);
1092 if (ret)
1093 goto err_free_res;
1094 }
1095 }
1096
Peter Ujfalusid15f8652019-04-25 12:08:15 +05301097 if (udma_is_chan_running(uc)) {
1098 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1099 udma_stop(uc);
1100 if (udma_is_chan_running(uc)) {
1101 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1102 goto err_free_res;
1103 }
1104 }
1105
Vignesh R3a9dbf32019-02-05 17:31:24 +05301106 /* PSI-L pairing */
1107 ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread);
1108 if (ret) {
1109 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1110 goto err_free_res;
1111 }
1112
1113 return 0;
1114
1115err_free_res:
1116 udma_free_tx_resources(uc);
1117 udma_free_rx_resources(uc);
1118 uc->slave_thread_id = -1;
1119 return ret;
1120}
1121
1122static void udma_free_chan_resources(struct udma_chan *uc)
1123{
1124 /* Some configuration to UDMA-P channel: disable, reset, whatever */
1125
1126 /* Release PSI-L pairing */
1127 udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread);
1128
1129 /* Reset the rings for a new start */
1130 udma_reset_rings(uc);
1131 udma_free_tx_resources(uc);
1132 udma_free_rx_resources(uc);
1133
1134 uc->slave_thread_id = -1;
1135 uc->dir = DMA_MEM_TO_MEM;
1136}
1137
1138static int udma_get_mmrs(struct udevice *dev)
1139{
1140 struct udma_dev *ud = dev_get_priv(dev);
1141 int i;
1142
1143 for (i = 0; i < MMR_LAST; i++) {
1144 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1145 mmr_names[i]);
1146 if (!ud->mmrs[i])
1147 return -EINVAL;
1148 }
1149
1150 return 0;
1151}
1152
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301153static int udma_setup_resources(struct udma_dev *ud)
1154{
1155 struct udevice *dev = ud->dev;
1156 int ch_count, i;
1157 u32 cap2, cap3;
1158 struct ti_sci_resource_desc *rm_desc;
1159 struct ti_sci_resource *rm_res;
1160 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1161 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
1162 "ti,sci-rm-range-rchan",
1163 "ti,sci-rm-range-rflow" };
1164
1165 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1166 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1167
1168 ud->rflow_cnt = cap3 & 0x3fff;
1169 ud->tchan_cnt = cap2 & 0x1ff;
1170 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1171 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1172 ch_count = ud->tchan_cnt + ud->rchan_cnt;
1173
1174 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1175 sizeof(unsigned long), GFP_KERNEL);
1176 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1177 GFP_KERNEL);
1178 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1179 sizeof(unsigned long), GFP_KERNEL);
1180 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1181 GFP_KERNEL);
1182 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1183 sizeof(unsigned long), GFP_KERNEL);
1184 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1185 sizeof(unsigned long),
1186 GFP_KERNEL);
1187 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1188 GFP_KERNEL);
1189
1190 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1191 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1192 !ud->rflows)
1193 return -ENOMEM;
1194
1195 /*
1196 * RX flows with the same Ids as RX channels are reserved to be used
1197 * as default flows if remote HW can't generate flow_ids. Those
1198 * RX flows can be requested only explicitly by id.
1199 */
1200 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1201
1202 /* Get resource ranges from tisci */
1203 for (i = 0; i < RM_RANGE_LAST; i++)
1204 tisci_rm->rm_ranges[i] =
1205 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1206 tisci_rm->tisci_dev_id,
1207 (char *)range_names[i]);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301208
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301209 /* tchan ranges */
1210 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1211 if (IS_ERR(rm_res)) {
1212 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1213 } else {
1214 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1215 for (i = 0; i < rm_res->sets; i++) {
1216 rm_desc = &rm_res->desc[i];
1217 bitmap_clear(ud->tchan_map, rm_desc->start,
1218 rm_desc->num);
1219 }
1220 }
1221
1222 /* rchan and matching default flow ranges */
1223 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1224 if (IS_ERR(rm_res)) {
1225 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1226 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1227 } else {
1228 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1229 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1230 for (i = 0; i < rm_res->sets; i++) {
1231 rm_desc = &rm_res->desc[i];
1232 bitmap_clear(ud->rchan_map, rm_desc->start,
1233 rm_desc->num);
1234 bitmap_clear(ud->rflow_map, rm_desc->start,
1235 rm_desc->num);
1236 }
1237 }
1238
1239 /* GP rflow ranges */
1240 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1241 if (IS_ERR(rm_res)) {
1242 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1243 ud->rflow_cnt - ud->rchan_cnt);
1244 } else {
1245 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1246 ud->rflow_cnt - ud->rchan_cnt);
1247 for (i = 0; i < rm_res->sets; i++) {
1248 rm_desc = &rm_res->desc[i];
1249 bitmap_clear(ud->rflow_map, rm_desc->start,
1250 rm_desc->num);
1251 }
1252 }
1253
1254 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1255 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1256 if (!ch_count)
1257 return -ENODEV;
1258
1259 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1260 GFP_KERNEL);
1261 if (!ud->channels)
1262 return -ENOMEM;
1263
1264 dev_info(dev,
1265 "Channels: %d (tchan: %u, echan: %u, rchan: %u, rflow: %u)\n",
1266 ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1267 ud->rflow_cnt);
1268
1269 return ch_count;
1270}
Vignesh R3a9dbf32019-02-05 17:31:24 +05301271static int udma_probe(struct udevice *dev)
1272{
1273 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1274 struct udma_dev *ud = dev_get_priv(dev);
1275 int i, ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301276 struct udevice *tmp;
1277 struct udevice *tisci_dev = NULL;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301278 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1279 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1280
Vignesh R3a9dbf32019-02-05 17:31:24 +05301281
1282 ret = udma_get_mmrs(dev);
1283 if (ret)
1284 return ret;
1285
1286 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1287 "ti,ringacc", &tmp);
1288 ud->ringacc = dev_get_priv(tmp);
1289 if (IS_ERR(ud->ringacc))
1290 return PTR_ERR(ud->ringacc);
1291
1292 ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0);
1293 if (!ud->psil_base) {
1294 dev_info(dev,
1295 "Missing ti,psil-base property, using %d.\n", ret);
1296 return -EINVAL;
1297 }
1298
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301299 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1300 "ti,sci", &tisci_dev);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301301 if (ret) {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301302 debug("Failed to get TISCI phandle (%d)\n", ret);
1303 tisci_rm->tisci = NULL;
1304 return -EINVAL;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301305 }
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301306 tisci_rm->tisci = (struct ti_sci_handle *)
1307 (ti_sci_get_handle_from_sysfw(tisci_dev));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301308
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301309 tisci_rm->tisci_dev_id = -1;
1310 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1311 if (ret) {
1312 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1313 return ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301314 }
1315
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301316 tisci_rm->tisci_navss_dev_id = -1;
1317 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1318 &tisci_rm->tisci_navss_dev_id);
1319 if (ret) {
1320 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1321 return ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301322 }
1323
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301324 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1325 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301326
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301327 ud->dev = dev;
1328 ud->ch_count = udma_setup_resources(ud);
1329 if (ud->ch_count <= 0)
1330 return ud->ch_count;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301331
1332 dev_info(dev,
1333 "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1334 ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301335 tisci_rm->tisci_dev_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301336 dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1337
Vignesh R3a9dbf32019-02-05 17:31:24 +05301338 for (i = 0; i < ud->tchan_cnt; i++) {
1339 struct udma_tchan *tchan = &ud->tchans[i];
1340
1341 tchan->id = i;
1342 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1343 }
1344
1345 for (i = 0; i < ud->rchan_cnt; i++) {
1346 struct udma_rchan *rchan = &ud->rchans[i];
1347
1348 rchan->id = i;
1349 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1350 }
1351
1352 for (i = 0; i < ud->rflow_cnt; i++) {
1353 struct udma_rflow *rflow = &ud->rflows[i];
1354
1355 rflow->id = i;
1356 }
1357
1358 for (i = 0; i < ud->ch_count; i++) {
1359 struct udma_chan *uc = &ud->channels[i];
1360
1361 uc->ud = ud;
1362 uc->id = i;
1363 uc->slave_thread_id = -1;
1364 uc->tchan = NULL;
1365 uc->rchan = NULL;
1366 uc->dir = DMA_MEM_TO_MEM;
1367 sprintf(uc->name, "UDMA chan%d\n", i);
1368 if (!i)
1369 uc->in_use = true;
1370 }
1371
1372 pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1373 udma_read(ud->mmrs[MMR_GCFG], 0),
1374 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1375 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1376 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1377 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1378
1379 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1380
1381 return ret;
1382}
1383
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301384static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1385{
1386 u64 addr = 0;
1387
1388 memcpy(&addr, &elem, sizeof(elem));
1389 return k3_nav_ringacc_ring_push(ring, &addr);
1390}
1391
Vignesh R3a9dbf32019-02-05 17:31:24 +05301392static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1393 dma_addr_t src, size_t len)
1394{
1395 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1396 struct cppi5_tr_type15_t *tr_req;
1397 int num_tr;
1398 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1399 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1400 unsigned long dummy;
1401 void *tr_desc;
1402 size_t desc_size;
1403
1404 if (len < SZ_64K) {
1405 num_tr = 1;
1406 tr0_cnt0 = len;
1407 tr0_cnt1 = 1;
1408 } else {
1409 unsigned long align_to = __ffs(src | dest);
1410
1411 if (align_to > 3)
1412 align_to = 3;
1413 /*
1414 * Keep simple: tr0: SZ_64K-alignment blocks,
1415 * tr1: the remaining
1416 */
1417 num_tr = 2;
1418 tr0_cnt0 = (SZ_64K - BIT(align_to));
1419 if (len / tr0_cnt0 >= SZ_64K) {
1420 dev_err(uc->ud->dev, "size %zu is not supported\n",
1421 len);
1422 return NULL;
1423 }
1424
1425 tr0_cnt1 = len / tr0_cnt0;
1426 tr1_cnt0 = len % tr0_cnt0;
1427 }
1428
1429 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1430 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1431 if (!tr_desc)
1432 return NULL;
1433 memset(tr_desc, 0, desc_size);
1434
1435 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1436 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1437 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1438
1439 tr_req = tr_desc + tr_size;
1440
1441 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1442 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1443 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1444
1445 tr_req[0].addr = src;
1446 tr_req[0].icnt0 = tr0_cnt0;
1447 tr_req[0].icnt1 = tr0_cnt1;
1448 tr_req[0].icnt2 = 1;
1449 tr_req[0].icnt3 = 1;
1450 tr_req[0].dim1 = tr0_cnt0;
1451
1452 tr_req[0].daddr = dest;
1453 tr_req[0].dicnt0 = tr0_cnt0;
1454 tr_req[0].dicnt1 = tr0_cnt1;
1455 tr_req[0].dicnt2 = 1;
1456 tr_req[0].dicnt3 = 1;
1457 tr_req[0].ddim1 = tr0_cnt0;
1458
1459 if (num_tr == 2) {
1460 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1461 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1462 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1463
1464 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1465 tr_req[1].icnt0 = tr1_cnt0;
1466 tr_req[1].icnt1 = 1;
1467 tr_req[1].icnt2 = 1;
1468 tr_req[1].icnt3 = 1;
1469
1470 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1471 tr_req[1].dicnt0 = tr1_cnt0;
1472 tr_req[1].dicnt1 = 1;
1473 tr_req[1].dicnt2 = 1;
1474 tr_req[1].dicnt3 = 1;
1475 }
1476
1477 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1478
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301479 flush_dcache_range((unsigned long)tr_desc,
1480 ALIGN((unsigned long)tr_desc + desc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301481 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301482
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301483 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301484
1485 return 0;
1486}
1487
1488static int udma_transfer(struct udevice *dev, int direction,
1489 void *dst, void *src, size_t len)
1490{
1491 struct udma_dev *ud = dev_get_priv(dev);
1492 /* Channel0 is reserved for memcpy */
1493 struct udma_chan *uc = &ud->channels[0];
1494 dma_addr_t paddr = 0;
1495 int ret;
1496
1497 ret = udma_alloc_chan_resources(uc);
1498 if (ret)
1499 return ret;
1500
1501 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1502 udma_start(uc);
1503 udma_poll_completion(uc, &paddr);
1504 udma_stop(uc);
1505
1506 udma_free_chan_resources(uc);
1507 return 0;
1508}
1509
1510static int udma_request(struct dma *dma)
1511{
1512 struct udma_dev *ud = dev_get_priv(dma->dev);
1513 struct udma_chan *uc;
1514 unsigned long dummy;
1515 int ret;
1516
1517 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1518 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1519 return -EINVAL;
1520 }
1521
1522 uc = &ud->channels[dma->id];
1523 ret = udma_alloc_chan_resources(uc);
1524 if (ret) {
1525 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1526 return -EINVAL;
1527 }
1528
1529 uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib,
1530 uc->psd_size, 0);
1531 uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN);
1532
1533 if (uc->dir == DMA_MEM_TO_DEV) {
1534 uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy);
1535 memset(uc->desc_tx, 0, uc->hdesc_size);
1536 } else {
1537 uc->desc_rx = dma_alloc_coherent(
1538 uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1539 memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM);
1540 }
1541
1542 uc->in_use = true;
1543 uc->desc_rx_cur = 0;
1544 uc->num_rx_bufs = 0;
1545
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301546 if (uc->dir == DMA_DEV_TO_MEM) {
1547 uc->cfg_data.flow_id_base = uc->rflow->id;
1548 uc->cfg_data.flow_id_cnt = 1;
1549 }
1550
Vignesh R3a9dbf32019-02-05 17:31:24 +05301551 return 0;
1552}
1553
1554static int udma_free(struct dma *dma)
1555{
1556 struct udma_dev *ud = dev_get_priv(dma->dev);
1557 struct udma_chan *uc;
1558
1559 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1560 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1561 return -EINVAL;
1562 }
1563 uc = &ud->channels[dma->id];
1564
1565 if (udma_is_chan_running(uc))
1566 udma_stop(uc);
1567 udma_free_chan_resources(uc);
1568
1569 uc->in_use = false;
1570
1571 return 0;
1572}
1573
1574static int udma_enable(struct dma *dma)
1575{
1576 struct udma_dev *ud = dev_get_priv(dma->dev);
1577 struct udma_chan *uc;
1578 int ret;
1579
1580 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1581 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1582 return -EINVAL;
1583 }
1584 uc = &ud->channels[dma->id];
1585
1586 ret = udma_start(uc);
1587
1588 return ret;
1589}
1590
1591static int udma_disable(struct dma *dma)
1592{
1593 struct udma_dev *ud = dev_get_priv(dma->dev);
1594 struct udma_chan *uc;
1595 int ret = 0;
1596
1597 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1598 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1599 return -EINVAL;
1600 }
1601 uc = &ud->channels[dma->id];
1602
1603 if (udma_is_chan_running(uc))
1604 ret = udma_stop(uc);
1605 else
1606 dev_err(dma->dev, "%s not running\n", __func__);
1607
1608 return ret;
1609}
1610
1611static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1612{
1613 struct udma_dev *ud = dev_get_priv(dma->dev);
1614 struct cppi5_host_desc_t *desc_tx;
1615 dma_addr_t dma_src = (dma_addr_t)src;
1616 struct ti_udma_drv_packet_data packet_data = { 0 };
1617 dma_addr_t paddr;
1618 struct udma_chan *uc;
1619 u32 tc_ring_id;
1620 int ret;
1621
Keerthya3c8bb12019-04-24 16:33:54 +05301622 if (metadata)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301623 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1624
1625 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1626 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1627 return -EINVAL;
1628 }
1629 uc = &ud->channels[dma->id];
1630
1631 if (uc->dir != DMA_MEM_TO_DEV)
1632 return -EINVAL;
1633
1634 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1635
1636 desc_tx = uc->desc_tx;
1637
1638 cppi5_hdesc_reset_hbdesc(desc_tx);
1639
1640 cppi5_hdesc_init(desc_tx,
1641 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1642 uc->psd_size);
1643 cppi5_hdesc_set_pktlen(desc_tx, len);
1644 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1645 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1646 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1647 /* pass below information from caller */
1648 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1649 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1650
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301651 flush_dcache_range((unsigned long)dma_src,
1652 ALIGN((unsigned long)dma_src + len,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301653 ARCH_DMA_MINALIGN));
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301654 flush_dcache_range((unsigned long)desc_tx,
1655 ALIGN((unsigned long)desc_tx + uc->hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301656 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301657
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301658 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301659 if (ret) {
1660 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1661 dma->id, ret);
1662 return ret;
1663 }
1664
1665 udma_poll_completion(uc, &paddr);
1666
1667 return 0;
1668}
1669
1670static int udma_receive(struct dma *dma, void **dst, void *metadata)
1671{
1672 struct udma_dev *ud = dev_get_priv(dma->dev);
1673 struct cppi5_host_desc_t *desc_rx;
1674 dma_addr_t buf_dma;
1675 struct udma_chan *uc;
1676 u32 buf_dma_len, pkt_len;
1677 u32 port_id = 0;
1678 int ret;
1679
1680 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1681 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1682 return -EINVAL;
1683 }
1684 uc = &ud->channels[dma->id];
1685
1686 if (uc->dir != DMA_DEV_TO_MEM)
1687 return -EINVAL;
1688 if (!uc->num_rx_bufs)
1689 return -EINVAL;
1690
1691 ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
1692 if (ret && ret != -ENODATA) {
1693 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1694 return ret;
1695 } else if (ret == -ENODATA) {
1696 return 0;
1697 }
1698
1699 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301700 invalidate_dcache_range((ulong)desc_rx,
1701 (ulong)(desc_rx + uc->hdesc_size));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301702
1703 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1704 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1705
1706 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301707 invalidate_dcache_range((ulong)buf_dma,
1708 (ulong)(buf_dma + buf_dma_len));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301709
1710 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1711
1712 *dst = (void *)buf_dma;
1713 uc->num_rx_bufs--;
1714
1715 return pkt_len;
1716}
1717
1718static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1719{
1720 struct udma_dev *ud = dev_get_priv(dma->dev);
1721 struct udma_chan *uc = &ud->channels[0];
1722 ofnode chconf_node, slave_node;
1723 char prop[50];
1724 u32 val;
1725
1726 for (val = 0; val < ud->ch_count; val++) {
1727 uc = &ud->channels[val];
1728 if (!uc->in_use)
1729 break;
1730 }
1731
1732 if (val == ud->ch_count)
1733 return -EBUSY;
1734
1735 uc->dir = DMA_DEV_TO_MEM;
1736 if (args->args[2] == UDMA_DIR_TX)
1737 uc->dir = DMA_MEM_TO_DEV;
1738
1739 slave_node = ofnode_get_by_phandle(args->args[0]);
1740 if (!ofnode_valid(slave_node)) {
1741 dev_err(ud->dev, "slave node is missing\n");
1742 return -EINVAL;
1743 }
1744
1745 snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]);
1746 chconf_node = ofnode_find_subnode(slave_node, prop);
1747 if (!ofnode_valid(chconf_node)) {
1748 dev_err(ud->dev, "Channel configuration node is missing\n");
1749 return -EINVAL;
1750 }
1751
1752 if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) {
1753 if (val == UDMA_PKT_MODE)
1754 uc->pkt_mode = true;
1755 }
1756
1757 if (!ofnode_read_u32(chconf_node, "statictr-type", &val))
1758 uc->static_tr_type = val;
1759
1760 uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib");
1761 if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val))
1762 uc->psd_size = val;
1763 uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size;
1764
1765 if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) {
1766 dev_err(ud->dev, "ti,psil-base is missing\n");
1767 return -EINVAL;
1768 }
1769
1770 uc->slave_thread_id = val + args->args[1];
1771
1772 dma->id = uc->id;
1773 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
1774 dma->id, uc->needs_epib,
1775 uc->psd_size, uc->metadata_size,
1776 uc->slave_thread_id);
1777
1778 return 0;
1779}
1780
1781int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1782{
1783 struct udma_dev *ud = dev_get_priv(dma->dev);
1784 struct cppi5_host_desc_t *desc_rx;
1785 dma_addr_t dma_dst;
1786 struct udma_chan *uc;
1787 u32 desc_num;
1788
1789 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1790 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1791 return -EINVAL;
1792 }
1793 uc = &ud->channels[dma->id];
1794
1795 if (uc->dir != DMA_DEV_TO_MEM)
1796 return -EINVAL;
1797
1798 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1799 return -EINVAL;
1800
1801 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
1802 desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size);
1803 dma_dst = (dma_addr_t)dst;
1804
1805 cppi5_hdesc_reset_hbdesc(desc_rx);
1806
1807 cppi5_hdesc_init(desc_rx,
1808 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1809 uc->psd_size);
1810 cppi5_hdesc_set_pktlen(desc_rx, size);
1811 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1812
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301813 flush_dcache_range((unsigned long)desc_rx,
1814 ALIGN((unsigned long)desc_rx + uc->hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301815 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301816
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301817 udma_push_to_ring(uc->rchan->fd_ring, desc_rx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301818
1819 uc->num_rx_bufs++;
1820 uc->desc_rx_cur++;
1821
1822 return 0;
1823}
1824
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301825static int udma_get_cfg(struct dma *dma, u32 id, void **data)
1826{
1827 struct udma_dev *ud = dev_get_priv(dma->dev);
1828 struct udma_chan *uc;
1829
1830 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1831 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1832 return -EINVAL;
1833 }
1834
1835 switch (id) {
1836 case TI_UDMA_CHAN_PRIV_INFO:
1837 uc = &ud->channels[dma->id];
1838 *data = &uc->cfg_data;
1839 return 0;
1840 }
1841
1842 return -EINVAL;
1843}
1844
Vignesh R3a9dbf32019-02-05 17:31:24 +05301845static const struct dma_ops udma_ops = {
1846 .transfer = udma_transfer,
1847 .of_xlate = udma_of_xlate,
1848 .request = udma_request,
1849 .free = udma_free,
1850 .enable = udma_enable,
1851 .disable = udma_disable,
1852 .send = udma_send,
1853 .receive = udma_receive,
1854 .prepare_rcv_buf = udma_prepare_rcv_buf,
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301855 .get_cfg = udma_get_cfg,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301856};
1857
1858static const struct udevice_id udma_ids[] = {
1859 { .compatible = "ti,k3-navss-udmap" },
Vignesh Raghavendra30bc6ea2019-12-04 22:17:23 +05301860 { .compatible = "ti,j721e-navss-mcu-udmap" },
Vignesh R3a9dbf32019-02-05 17:31:24 +05301861 { }
1862};
1863
1864U_BOOT_DRIVER(ti_edma3) = {
1865 .name = "ti-udma",
1866 .id = UCLASS_DMA,
1867 .of_match = udma_ids,
1868 .ops = &udma_ops,
1869 .probe = udma_probe,
1870 .priv_auto_alloc_size = sizeof(struct udma_dev),
1871};