blob: 299e707adcf5e20504b67efd8f7cdbda99153e20 [file] [log] [blame]
Vignesh R3a9dbf32019-02-05 17:31:24 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#define pr_fmt(fmt) "udma: " fmt
7
8#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
Simon Glass0f2af882020-05-10 11:40:05 -060010#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060011#include <asm/cache.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053012#include <asm/io.h>
13#include <asm/bitops.h>
14#include <malloc.h>
Masahiro Yamada6373a172020-02-14 16:40:19 +090015#include <linux/dma-mapping.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053016#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070017#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070018#include <dm/devres.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053019#include <dm/read.h>
20#include <dm/of_access.h>
21#include <dma.h>
22#include <dma-uclass.h>
23#include <linux/delay.h>
24#include <dt-bindings/dma/k3-udma.h>
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053025#include <linux/bitmap.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070026#include <linux/err.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053027#include <linux/soc/ti/k3-navss-ringacc.h>
28#include <linux/soc/ti/cppi5.h>
29#include <linux/soc/ti/ti-udma.h>
30#include <linux/soc/ti/ti_sci_protocol.h>
31
32#include "k3-udma-hwdef.h"
33
34#if BITS_PER_LONG == 64
35#define RINGACC_RING_USE_PROXY (0)
36#else
37#define RINGACC_RING_USE_PROXY (1)
38#endif
39
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053040#define K3_UDMA_MAX_RFLOWS 1024
41
Vignesh R3a9dbf32019-02-05 17:31:24 +053042struct udma_chan;
43
44enum udma_mmr {
45 MMR_GCFG = 0,
46 MMR_RCHANRT,
47 MMR_TCHANRT,
48 MMR_LAST,
49};
50
51static const char * const mmr_names[] = {
52 "gcfg", "rchanrt", "tchanrt"
53};
54
55struct udma_tchan {
56 void __iomem *reg_rt;
57
58 int id;
59 struct k3_nav_ring *t_ring; /* Transmit ring */
60 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
61};
62
63struct udma_rchan {
64 void __iomem *reg_rt;
65
66 int id;
67 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
68 struct k3_nav_ring *r_ring; /* Receive ring*/
69};
70
71struct udma_rflow {
72 int id;
73};
74
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053075enum udma_rm_range {
76 RM_RANGE_TCHAN = 0,
77 RM_RANGE_RCHAN,
78 RM_RANGE_RFLOW,
79 RM_RANGE_LAST,
80};
81
82struct udma_tisci_rm {
83 const struct ti_sci_handle *tisci;
84 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
85 u32 tisci_dev_id;
86
87 /* tisci information for PSI-L thread pairing/unpairing */
88 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
89 u32 tisci_navss_dev_id;
90
91 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
92};
93
Vignesh R3a9dbf32019-02-05 17:31:24 +053094struct udma_dev {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053095 struct udevice *dev;
Vignesh R3a9dbf32019-02-05 17:31:24 +053096 void __iomem *mmrs[MMR_LAST];
97
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053098 struct udma_tisci_rm tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +053099 struct k3_nav_ringacc *ringacc;
100
101 u32 features;
102
103 int tchan_cnt;
104 int echan_cnt;
105 int rchan_cnt;
106 int rflow_cnt;
107 unsigned long *tchan_map;
108 unsigned long *rchan_map;
109 unsigned long *rflow_map;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530110 unsigned long *rflow_map_reserved;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530111
112 struct udma_tchan *tchans;
113 struct udma_rchan *rchans;
114 struct udma_rflow *rflows;
115
116 struct udma_chan *channels;
117 u32 psil_base;
118
119 u32 ch_count;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530120};
121
122struct udma_chan {
123 struct udma_dev *ud;
124 char name[20];
125
126 struct udma_tchan *tchan;
127 struct udma_rchan *rchan;
128 struct udma_rflow *rflow;
129
Vignesh Raghavendra39349892019-12-04 22:17:21 +0530130 struct ti_udma_drv_chan_cfg_data cfg_data;
131
Vignesh R3a9dbf32019-02-05 17:31:24 +0530132 u32 bcnt; /* number of bytes completed since the start of the channel */
133
134 bool pkt_mode; /* TR or packet */
135 bool needs_epib; /* EPIB is needed for the communication or not */
136 u32 psd_size; /* size of Protocol Specific Data */
137 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
138 int slave_thread_id;
139 u32 src_thread;
140 u32 dst_thread;
141 u32 static_tr_type;
142
143 u32 id;
144 enum dma_direction dir;
145
146 struct cppi5_host_desc_t *desc_tx;
147 u32 hdesc_size;
148 bool in_use;
149 void *desc_rx;
150 u32 num_rx_bufs;
151 u32 desc_rx_cur;
152
153};
154
155#define UDMA_CH_1000(ch) (ch * 0x1000)
156#define UDMA_CH_100(ch) (ch * 0x100)
157#define UDMA_CH_40(ch) (ch * 0x40)
158
159#ifdef PKTBUFSRX
160#define UDMA_RX_DESC_NUM PKTBUFSRX
161#else
162#define UDMA_RX_DESC_NUM 4
163#endif
164
165/* Generic register access functions */
166static inline u32 udma_read(void __iomem *base, int reg)
167{
168 u32 v;
169
170 v = __raw_readl(base + reg);
171 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
172 return v;
173}
174
175static inline void udma_write(void __iomem *base, int reg, u32 val)
176{
177 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
178 __raw_writel(val, base + reg);
179}
180
181static inline void udma_update_bits(void __iomem *base, int reg,
182 u32 mask, u32 val)
183{
184 u32 tmp, orig;
185
186 orig = udma_read(base, reg);
187 tmp = orig & ~mask;
188 tmp |= (val & mask);
189
190 if (tmp != orig)
191 udma_write(base, reg, tmp);
192}
193
194/* TCHANRT */
195static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
196{
197 if (!tchan)
198 return 0;
199 return udma_read(tchan->reg_rt, reg);
200}
201
202static inline void udma_tchanrt_write(struct udma_tchan *tchan,
203 int reg, u32 val)
204{
205 if (!tchan)
206 return;
207 udma_write(tchan->reg_rt, reg, val);
208}
209
210/* RCHANRT */
211static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
212{
213 if (!rchan)
214 return 0;
215 return udma_read(rchan->reg_rt, reg);
216}
217
218static inline void udma_rchanrt_write(struct udma_rchan *rchan,
219 int reg, u32 val)
220{
221 if (!rchan)
222 return;
223 udma_write(rchan->reg_rt, reg, val);
224}
225
226static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
227 u32 dst_thread)
228{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530229 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
230
Vignesh R3a9dbf32019-02-05 17:31:24 +0530231 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530232
233 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
234 tisci_rm->tisci_navss_dev_id,
235 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530236}
237
238static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
239 u32 dst_thread)
240{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530241 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
242
Vignesh R3a9dbf32019-02-05 17:31:24 +0530243 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530244
245 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
246 tisci_rm->tisci_navss_dev_id,
247 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530248}
249
250static inline char *udma_get_dir_text(enum dma_direction dir)
251{
252 switch (dir) {
253 case DMA_DEV_TO_MEM:
254 return "DEV_TO_MEM";
255 case DMA_MEM_TO_DEV:
256 return "MEM_TO_DEV";
257 case DMA_MEM_TO_MEM:
258 return "MEM_TO_MEM";
259 case DMA_DEV_TO_DEV:
260 return "DEV_TO_DEV";
261 default:
262 break;
263 }
264
265 return "invalid";
266}
267
268static inline bool udma_is_chan_running(struct udma_chan *uc)
269{
270 u32 trt_ctl = 0;
271 u32 rrt_ctl = 0;
272
273 switch (uc->dir) {
274 case DMA_DEV_TO_MEM:
275 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
276 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
277 __func__, rrt_ctl,
278 udma_rchanrt_read(uc->rchan,
279 UDMA_RCHAN_RT_PEER_RT_EN_REG));
280 break;
281 case DMA_MEM_TO_DEV:
282 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
283 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
284 __func__, trt_ctl,
285 udma_tchanrt_read(uc->tchan,
286 UDMA_TCHAN_RT_PEER_RT_EN_REG));
287 break;
288 case DMA_MEM_TO_MEM:
289 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
290 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
291 break;
292 default:
293 break;
294 }
295
296 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
297 return true;
298
299 return false;
300}
301
Vignesh R3a9dbf32019-02-05 17:31:24 +0530302static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
303{
304 struct k3_nav_ring *ring = NULL;
305 int ret = -ENOENT;
306
307 switch (uc->dir) {
308 case DMA_DEV_TO_MEM:
309 ring = uc->rchan->r_ring;
310 break;
311 case DMA_MEM_TO_DEV:
312 ring = uc->tchan->tc_ring;
313 break;
314 case DMA_MEM_TO_MEM:
315 ring = uc->tchan->tc_ring;
316 break;
317 default:
318 break;
319 }
320
321 if (ring && k3_nav_ringacc_ring_get_occ(ring))
322 ret = k3_nav_ringacc_ring_pop(ring, addr);
323
324 return ret;
325}
326
327static void udma_reset_rings(struct udma_chan *uc)
328{
329 struct k3_nav_ring *ring1 = NULL;
330 struct k3_nav_ring *ring2 = NULL;
331
332 switch (uc->dir) {
333 case DMA_DEV_TO_MEM:
334 ring1 = uc->rchan->fd_ring;
335 ring2 = uc->rchan->r_ring;
336 break;
337 case DMA_MEM_TO_DEV:
338 ring1 = uc->tchan->t_ring;
339 ring2 = uc->tchan->tc_ring;
340 break;
341 case DMA_MEM_TO_MEM:
342 ring1 = uc->tchan->t_ring;
343 ring2 = uc->tchan->tc_ring;
344 break;
345 default:
346 break;
347 }
348
349 if (ring1)
350 k3_nav_ringacc_ring_reset_dma(ring1, 0);
351 if (ring2)
352 k3_nav_ringacc_ring_reset(ring2);
353}
354
355static void udma_reset_counters(struct udma_chan *uc)
356{
357 u32 val;
358
359 if (uc->tchan) {
360 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
361 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
362
363 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
364 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
365
366 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
367 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
368
369 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
370 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
371 }
372
373 if (uc->rchan) {
374 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
375 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
376
377 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
378 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
379
380 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
381 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
382
383 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
384 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
385 }
386
387 uc->bcnt = 0;
388}
389
390static inline int udma_stop_hard(struct udma_chan *uc)
391{
392 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
393
394 switch (uc->dir) {
395 case DMA_DEV_TO_MEM:
396 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
397 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
398 break;
399 case DMA_MEM_TO_DEV:
400 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
401 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
402 break;
403 case DMA_MEM_TO_MEM:
404 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
405 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
406 break;
407 default:
408 return -EINVAL;
409 }
410
411 return 0;
412}
413
414static int udma_start(struct udma_chan *uc)
415{
416 /* Channel is already running, no need to proceed further */
417 if (udma_is_chan_running(uc))
418 goto out;
419
420 pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n",
421 __func__, uc->id, udma_get_dir_text(uc->dir),
422 uc->static_tr_type);
423
424 /* Make sure that we clear the teardown bit, if it is set */
425 udma_stop_hard(uc);
426
427 /* Reset all counters */
428 udma_reset_counters(uc);
429
430 switch (uc->dir) {
431 case DMA_DEV_TO_MEM:
432 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
433 UDMA_CHAN_RT_CTL_EN);
434
435 /* Enable remote */
436 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
437 UDMA_PEER_RT_EN_ENABLE);
438
439 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
440 __func__,
441 udma_rchanrt_read(uc->rchan,
442 UDMA_RCHAN_RT_CTL_REG),
443 udma_rchanrt_read(uc->rchan,
444 UDMA_RCHAN_RT_PEER_RT_EN_REG));
445 break;
446 case DMA_MEM_TO_DEV:
447 /* Enable remote */
448 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
449 UDMA_PEER_RT_EN_ENABLE);
450
451 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
452 UDMA_CHAN_RT_CTL_EN);
453
454 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
455 __func__,
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530456 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530457 UDMA_TCHAN_RT_CTL_REG),
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530458 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530459 UDMA_TCHAN_RT_PEER_RT_EN_REG));
460 break;
461 case DMA_MEM_TO_MEM:
462 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
463 UDMA_CHAN_RT_CTL_EN);
464 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
465 UDMA_CHAN_RT_CTL_EN);
466
467 break;
468 default:
469 return -EINVAL;
470 }
471
472 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
473out:
474 return 0;
475}
476
477static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
478{
479 int i = 0;
480 u32 val;
481
482 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
483 UDMA_CHAN_RT_CTL_EN |
484 UDMA_CHAN_RT_CTL_TDOWN);
485
486 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
487
488 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
489 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
490 udelay(1);
491 if (i > 1000) {
492 printf(" %s TIMEOUT !\n", __func__);
493 break;
494 }
495 i++;
496 }
497
498 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
499 if (val & UDMA_PEER_RT_EN_ENABLE)
500 printf("%s: peer not stopped TIMEOUT !\n", __func__);
501}
502
503static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
504{
505 int i = 0;
506 u32 val;
507
508 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
509 UDMA_PEER_RT_EN_ENABLE |
510 UDMA_PEER_RT_EN_TEARDOWN);
511
512 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
513
514 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
515 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
516 udelay(1);
517 if (i > 1000) {
518 printf("%s TIMEOUT !\n", __func__);
519 break;
520 }
521 i++;
522 }
523
524 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
525 if (val & UDMA_PEER_RT_EN_ENABLE)
526 printf("%s: peer not stopped TIMEOUT !\n", __func__);
527}
528
529static inline int udma_stop(struct udma_chan *uc)
530{
531 pr_debug("%s: chan:%d dir:%s\n",
532 __func__, uc->id, udma_get_dir_text(uc->dir));
533
534 udma_reset_counters(uc);
535 switch (uc->dir) {
536 case DMA_DEV_TO_MEM:
537 udma_stop_dev2mem(uc, true);
538 break;
539 case DMA_MEM_TO_DEV:
540 udma_stop_mem2dev(uc, true);
541 break;
542 case DMA_MEM_TO_MEM:
543 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
544 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
545 break;
546 default:
547 return -EINVAL;
548 }
549
550 return 0;
551}
552
553static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
554{
555 int i = 1;
556
557 while (udma_pop_from_ring(uc, paddr)) {
558 udelay(1);
559 if (!(i % 1000000))
560 printf(".");
561 i++;
562 }
563}
564
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530565static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
566{
567 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
568
569 if (id >= 0) {
570 if (test_bit(id, ud->rflow_map)) {
571 dev_err(ud->dev, "rflow%d is in use\n", id);
572 return ERR_PTR(-ENOENT);
573 }
574 } else {
575 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
576 ud->rflow_cnt);
577
578 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
579 if (id >= ud->rflow_cnt)
580 return ERR_PTR(-ENOENT);
581 }
582
583 __set_bit(id, ud->rflow_map);
584 return &ud->rflows[id];
585}
586
Vignesh R3a9dbf32019-02-05 17:31:24 +0530587#define UDMA_RESERVE_RESOURCE(res) \
588static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
589 int id) \
590{ \
591 if (id >= 0) { \
592 if (test_bit(id, ud->res##_map)) { \
593 dev_err(ud->dev, "res##%d is in use\n", id); \
594 return ERR_PTR(-ENOENT); \
595 } \
596 } else { \
597 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
598 if (id == ud->res##_cnt) { \
599 return ERR_PTR(-ENOENT); \
600 } \
601 } \
602 \
603 __set_bit(id, ud->res##_map); \
604 return &ud->res##s[id]; \
605}
606
607UDMA_RESERVE_RESOURCE(tchan);
608UDMA_RESERVE_RESOURCE(rchan);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530609
610static int udma_get_tchan(struct udma_chan *uc)
611{
612 struct udma_dev *ud = uc->ud;
613
614 if (uc->tchan) {
615 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
616 uc->id, uc->tchan->id);
617 return 0;
618 }
619
620 uc->tchan = __udma_reserve_tchan(ud, -1);
621 if (IS_ERR(uc->tchan))
622 return PTR_ERR(uc->tchan);
623
624 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
625
Vignesh R3a9dbf32019-02-05 17:31:24 +0530626 return 0;
627}
628
629static int udma_get_rchan(struct udma_chan *uc)
630{
631 struct udma_dev *ud = uc->ud;
632
633 if (uc->rchan) {
634 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
635 uc->id, uc->rchan->id);
636 return 0;
637 }
638
639 uc->rchan = __udma_reserve_rchan(ud, -1);
640 if (IS_ERR(uc->rchan))
641 return PTR_ERR(uc->rchan);
642
643 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
644
Vignesh R3a9dbf32019-02-05 17:31:24 +0530645 return 0;
646}
647
648static int udma_get_chan_pair(struct udma_chan *uc)
649{
650 struct udma_dev *ud = uc->ud;
651 int chan_id, end;
652
653 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
654 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
655 uc->id, uc->tchan->id);
656 return 0;
657 }
658
659 if (uc->tchan) {
660 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
661 uc->id, uc->tchan->id);
662 return -EBUSY;
663 } else if (uc->rchan) {
664 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
665 uc->id, uc->rchan->id);
666 return -EBUSY;
667 }
668
669 /* Can be optimized, but let's have it like this for now */
670 end = min(ud->tchan_cnt, ud->rchan_cnt);
671 for (chan_id = 0; chan_id < end; chan_id++) {
672 if (!test_bit(chan_id, ud->tchan_map) &&
673 !test_bit(chan_id, ud->rchan_map))
674 break;
675 }
676
677 if (chan_id == end)
678 return -ENOENT;
679
680 __set_bit(chan_id, ud->tchan_map);
681 __set_bit(chan_id, ud->rchan_map);
682 uc->tchan = &ud->tchans[chan_id];
683 uc->rchan = &ud->rchans[chan_id];
684
685 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
686
Vignesh R3a9dbf32019-02-05 17:31:24 +0530687 return 0;
688}
689
690static int udma_get_rflow(struct udma_chan *uc, int flow_id)
691{
692 struct udma_dev *ud = uc->ud;
693
694 if (uc->rflow) {
695 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
696 uc->id, uc->rflow->id);
697 return 0;
698 }
699
700 if (!uc->rchan)
701 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
702
703 uc->rflow = __udma_reserve_rflow(ud, flow_id);
704 if (IS_ERR(uc->rflow))
705 return PTR_ERR(uc->rflow);
706
707 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
708 return 0;
709}
710
711static void udma_put_rchan(struct udma_chan *uc)
712{
713 struct udma_dev *ud = uc->ud;
714
715 if (uc->rchan) {
716 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
717 uc->rchan->id);
718 __clear_bit(uc->rchan->id, ud->rchan_map);
719 uc->rchan = NULL;
720 }
721}
722
723static void udma_put_tchan(struct udma_chan *uc)
724{
725 struct udma_dev *ud = uc->ud;
726
727 if (uc->tchan) {
728 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
729 uc->tchan->id);
730 __clear_bit(uc->tchan->id, ud->tchan_map);
731 uc->tchan = NULL;
732 }
733}
734
735static void udma_put_rflow(struct udma_chan *uc)
736{
737 struct udma_dev *ud = uc->ud;
738
739 if (uc->rflow) {
740 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
741 uc->rflow->id);
742 __clear_bit(uc->rflow->id, ud->rflow_map);
743 uc->rflow = NULL;
744 }
745}
746
747static void udma_free_tx_resources(struct udma_chan *uc)
748{
749 if (!uc->tchan)
750 return;
751
752 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
753 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
754 uc->tchan->t_ring = NULL;
755 uc->tchan->tc_ring = NULL;
756
757 udma_put_tchan(uc);
758}
759
760static int udma_alloc_tx_resources(struct udma_chan *uc)
761{
762 struct k3_nav_ring_cfg ring_cfg;
763 struct udma_dev *ud = uc->ud;
764 int ret;
765
766 ret = udma_get_tchan(uc);
767 if (ret)
768 return ret;
769
770 uc->tchan->t_ring = k3_nav_ringacc_request_ring(
771 ud->ringacc, uc->tchan->id,
772 RINGACC_RING_USE_PROXY);
773 if (!uc->tchan->t_ring) {
774 ret = -EBUSY;
775 goto err_tx_ring;
776 }
777
778 uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
779 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
780 if (!uc->tchan->tc_ring) {
781 ret = -EBUSY;
782 goto err_txc_ring;
783 }
784
785 memset(&ring_cfg, 0, sizeof(ring_cfg));
786 ring_cfg.size = 16;
787 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530788 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530789
790 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
791 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
792
793 if (ret)
794 goto err_ringcfg;
795
796 return 0;
797
798err_ringcfg:
799 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
800 uc->tchan->tc_ring = NULL;
801err_txc_ring:
802 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
803 uc->tchan->t_ring = NULL;
804err_tx_ring:
805 udma_put_tchan(uc);
806
807 return ret;
808}
809
810static void udma_free_rx_resources(struct udma_chan *uc)
811{
812 if (!uc->rchan)
813 return;
814
815 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
816 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
817 uc->rchan->fd_ring = NULL;
818 uc->rchan->r_ring = NULL;
819
820 udma_put_rflow(uc);
821 udma_put_rchan(uc);
822}
823
824static int udma_alloc_rx_resources(struct udma_chan *uc)
825{
826 struct k3_nav_ring_cfg ring_cfg;
827 struct udma_dev *ud = uc->ud;
828 int fd_ring_id;
829 int ret;
830
831 ret = udma_get_rchan(uc);
832 if (ret)
833 return ret;
834
835 /* For MEM_TO_MEM we don't need rflow or rings */
836 if (uc->dir == DMA_MEM_TO_MEM)
837 return 0;
838
839 ret = udma_get_rflow(uc, uc->rchan->id);
840 if (ret) {
841 ret = -EBUSY;
842 goto err_rflow;
843 }
844
845 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
846
847 uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
848 ud->ringacc, fd_ring_id,
849 RINGACC_RING_USE_PROXY);
850 if (!uc->rchan->fd_ring) {
851 ret = -EBUSY;
852 goto err_rx_ring;
853 }
854
855 uc->rchan->r_ring = k3_nav_ringacc_request_ring(
856 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
857 if (!uc->rchan->r_ring) {
858 ret = -EBUSY;
859 goto err_rxc_ring;
860 }
861
862 memset(&ring_cfg, 0, sizeof(ring_cfg));
863 ring_cfg.size = 16;
864 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530865 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530866
867 ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
868 ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
869
870 if (ret)
871 goto err_ringcfg;
872
873 return 0;
874
875err_ringcfg:
876 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
877 uc->rchan->r_ring = NULL;
878err_rxc_ring:
879 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
880 uc->rchan->fd_ring = NULL;
881err_rx_ring:
882 udma_put_rflow(uc);
883err_rflow:
884 udma_put_rchan(uc);
885
886 return ret;
887}
888
889static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
890{
891 struct udma_dev *ud = uc->ud;
892 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
893 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530894 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530895 u32 mode;
896 int ret;
897
898 if (uc->pkt_mode)
899 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
900 else
901 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
902
903 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
904 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
905 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530906 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530907 req.index = uc->tchan->id;
908 req.tx_chan_type = mode;
909 if (uc->dir == DMA_MEM_TO_MEM)
910 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
911 else
912 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
913 uc->psd_size,
914 0) >> 2;
915 req.txcq_qnum = tc_ring;
916
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530917 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530918 if (ret)
919 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
920
921 return ret;
922}
923
924static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
925{
926 struct udma_dev *ud = uc->ud;
927 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
928 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
929 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
930 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
931 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530932 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530933 u32 mode;
934 int ret;
935
936 if (uc->pkt_mode)
937 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
938 else
939 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
940
941 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
942 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
Lokesh Vutla9eae8622020-02-28 17:56:20 +0530943 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
944 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
945 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530946 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530947 req.index = uc->rchan->id;
948 req.rx_chan_type = mode;
949 if (uc->dir == DMA_MEM_TO_MEM) {
950 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
951 req.rxcq_qnum = tc_ring;
952 } else {
953 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
954 uc->psd_size,
955 0) >> 2;
956 req.rxcq_qnum = rx_ring;
957 }
958 if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) {
959 req.flowid_start = uc->rflow->id;
960 req.flowid_cnt = 1;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530961 }
962
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530963 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530964 if (ret) {
965 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
966 uc->rchan->id, ret);
967 return ret;
968 }
969 if (uc->dir == DMA_MEM_TO_MEM)
970 return ret;
971
972 flow_req.valid_params =
973 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
974 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
975 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
976 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
977 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
978 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
979 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
980 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
981 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
982 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
983 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
984 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
985 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
986 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
987
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530988 flow_req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530989 flow_req.flow_index = uc->rflow->id;
990
991 if (uc->needs_epib)
992 flow_req.rx_einfo_present = 1;
993 else
994 flow_req.rx_einfo_present = 0;
995
996 if (uc->psd_size)
997 flow_req.rx_psinfo_present = 1;
998 else
999 flow_req.rx_psinfo_present = 0;
1000
1001 flow_req.rx_error_handling = 0;
1002 flow_req.rx_desc_type = 0;
1003 flow_req.rx_dest_qnum = rx_ring;
1004 flow_req.rx_src_tag_hi_sel = 2;
1005 flow_req.rx_src_tag_lo_sel = 4;
1006 flow_req.rx_dest_tag_hi_sel = 5;
1007 flow_req.rx_dest_tag_lo_sel = 4;
1008 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1009 flow_req.rx_fdq1_qnum = fd_ring;
1010 flow_req.rx_fdq2_qnum = fd_ring;
1011 flow_req.rx_fdq3_qnum = fd_ring;
1012 flow_req.rx_ps_location = 0;
1013
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301014 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1015 &flow_req);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301016 if (ret)
1017 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1018 uc->rchan->id, uc->rflow->id, ret);
1019
1020 return ret;
1021}
1022
1023static int udma_alloc_chan_resources(struct udma_chan *uc)
1024{
1025 struct udma_dev *ud = uc->ud;
1026 int ret;
1027
1028 pr_debug("%s: chan:%d as %s\n",
1029 __func__, uc->id, udma_get_dir_text(uc->dir));
1030
1031 switch (uc->dir) {
1032 case DMA_MEM_TO_MEM:
1033 /* Non synchronized - mem to mem type of transfer */
1034 ret = udma_get_chan_pair(uc);
1035 if (ret)
1036 return ret;
1037
1038 ret = udma_alloc_tx_resources(uc);
1039 if (ret)
1040 goto err_free_res;
1041
1042 ret = udma_alloc_rx_resources(uc);
1043 if (ret)
1044 goto err_free_res;
1045
1046 uc->src_thread = ud->psil_base + uc->tchan->id;
1047 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1048 break;
1049 case DMA_MEM_TO_DEV:
1050 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1051 ret = udma_alloc_tx_resources(uc);
1052 if (ret)
1053 goto err_free_res;
1054
1055 uc->src_thread = ud->psil_base + uc->tchan->id;
1056 uc->dst_thread = uc->slave_thread_id;
1057 if (!(uc->dst_thread & 0x8000))
1058 uc->dst_thread |= 0x8000;
1059
1060 break;
1061 case DMA_DEV_TO_MEM:
1062 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1063 ret = udma_alloc_rx_resources(uc);
1064 if (ret)
1065 goto err_free_res;
1066
1067 uc->src_thread = uc->slave_thread_id;
1068 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1069
1070 break;
1071 default:
1072 /* Can not happen */
1073 pr_debug("%s: chan:%d invalid direction (%u)\n",
1074 __func__, uc->id, uc->dir);
1075 return -EINVAL;
1076 }
1077
1078 /* We have channel indexes and rings */
1079 if (uc->dir == DMA_MEM_TO_MEM) {
1080 ret = udma_alloc_tchan_sci_req(uc);
1081 if (ret)
1082 goto err_free_res;
1083
1084 ret = udma_alloc_rchan_sci_req(uc);
1085 if (ret)
1086 goto err_free_res;
1087 } else {
1088 /* Slave transfer */
1089 if (uc->dir == DMA_MEM_TO_DEV) {
1090 ret = udma_alloc_tchan_sci_req(uc);
1091 if (ret)
1092 goto err_free_res;
1093 } else {
1094 ret = udma_alloc_rchan_sci_req(uc);
1095 if (ret)
1096 goto err_free_res;
1097 }
1098 }
1099
Peter Ujfalusid15f8652019-04-25 12:08:15 +05301100 if (udma_is_chan_running(uc)) {
1101 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1102 udma_stop(uc);
1103 if (udma_is_chan_running(uc)) {
1104 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1105 goto err_free_res;
1106 }
1107 }
1108
Vignesh R3a9dbf32019-02-05 17:31:24 +05301109 /* PSI-L pairing */
1110 ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread);
1111 if (ret) {
1112 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1113 goto err_free_res;
1114 }
1115
1116 return 0;
1117
1118err_free_res:
1119 udma_free_tx_resources(uc);
1120 udma_free_rx_resources(uc);
1121 uc->slave_thread_id = -1;
1122 return ret;
1123}
1124
1125static void udma_free_chan_resources(struct udma_chan *uc)
1126{
1127 /* Some configuration to UDMA-P channel: disable, reset, whatever */
1128
1129 /* Release PSI-L pairing */
1130 udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread);
1131
1132 /* Reset the rings for a new start */
1133 udma_reset_rings(uc);
1134 udma_free_tx_resources(uc);
1135 udma_free_rx_resources(uc);
1136
1137 uc->slave_thread_id = -1;
1138 uc->dir = DMA_MEM_TO_MEM;
1139}
1140
1141static int udma_get_mmrs(struct udevice *dev)
1142{
1143 struct udma_dev *ud = dev_get_priv(dev);
1144 int i;
1145
1146 for (i = 0; i < MMR_LAST; i++) {
1147 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1148 mmr_names[i]);
1149 if (!ud->mmrs[i])
1150 return -EINVAL;
1151 }
1152
1153 return 0;
1154}
1155
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301156static int udma_setup_resources(struct udma_dev *ud)
1157{
1158 struct udevice *dev = ud->dev;
1159 int ch_count, i;
1160 u32 cap2, cap3;
1161 struct ti_sci_resource_desc *rm_desc;
1162 struct ti_sci_resource *rm_res;
1163 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1164 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
1165 "ti,sci-rm-range-rchan",
1166 "ti,sci-rm-range-rflow" };
1167
1168 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1169 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1170
1171 ud->rflow_cnt = cap3 & 0x3fff;
1172 ud->tchan_cnt = cap2 & 0x1ff;
1173 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1174 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1175 ch_count = ud->tchan_cnt + ud->rchan_cnt;
1176
1177 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1178 sizeof(unsigned long), GFP_KERNEL);
1179 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1180 GFP_KERNEL);
1181 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1182 sizeof(unsigned long), GFP_KERNEL);
1183 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1184 GFP_KERNEL);
1185 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1186 sizeof(unsigned long), GFP_KERNEL);
1187 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1188 sizeof(unsigned long),
1189 GFP_KERNEL);
1190 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1191 GFP_KERNEL);
1192
1193 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1194 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1195 !ud->rflows)
1196 return -ENOMEM;
1197
1198 /*
1199 * RX flows with the same Ids as RX channels are reserved to be used
1200 * as default flows if remote HW can't generate flow_ids. Those
1201 * RX flows can be requested only explicitly by id.
1202 */
1203 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1204
1205 /* Get resource ranges from tisci */
1206 for (i = 0; i < RM_RANGE_LAST; i++)
1207 tisci_rm->rm_ranges[i] =
1208 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1209 tisci_rm->tisci_dev_id,
1210 (char *)range_names[i]);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301211
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301212 /* tchan ranges */
1213 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1214 if (IS_ERR(rm_res)) {
1215 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1216 } else {
1217 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1218 for (i = 0; i < rm_res->sets; i++) {
1219 rm_desc = &rm_res->desc[i];
1220 bitmap_clear(ud->tchan_map, rm_desc->start,
1221 rm_desc->num);
1222 }
1223 }
1224
1225 /* rchan and matching default flow ranges */
1226 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1227 if (IS_ERR(rm_res)) {
1228 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1229 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1230 } else {
1231 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1232 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1233 for (i = 0; i < rm_res->sets; i++) {
1234 rm_desc = &rm_res->desc[i];
1235 bitmap_clear(ud->rchan_map, rm_desc->start,
1236 rm_desc->num);
1237 bitmap_clear(ud->rflow_map, rm_desc->start,
1238 rm_desc->num);
1239 }
1240 }
1241
1242 /* GP rflow ranges */
1243 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1244 if (IS_ERR(rm_res)) {
1245 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1246 ud->rflow_cnt - ud->rchan_cnt);
1247 } else {
1248 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1249 ud->rflow_cnt - ud->rchan_cnt);
1250 for (i = 0; i < rm_res->sets; i++) {
1251 rm_desc = &rm_res->desc[i];
1252 bitmap_clear(ud->rflow_map, rm_desc->start,
1253 rm_desc->num);
1254 }
1255 }
1256
1257 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1258 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1259 if (!ch_count)
1260 return -ENODEV;
1261
1262 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1263 GFP_KERNEL);
1264 if (!ud->channels)
1265 return -ENOMEM;
1266
1267 dev_info(dev,
1268 "Channels: %d (tchan: %u, echan: %u, rchan: %u, rflow: %u)\n",
1269 ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1270 ud->rflow_cnt);
1271
1272 return ch_count;
1273}
Vignesh R3a9dbf32019-02-05 17:31:24 +05301274static int udma_probe(struct udevice *dev)
1275{
1276 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1277 struct udma_dev *ud = dev_get_priv(dev);
1278 int i, ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301279 struct udevice *tmp;
1280 struct udevice *tisci_dev = NULL;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301281 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1282 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1283
Vignesh R3a9dbf32019-02-05 17:31:24 +05301284
1285 ret = udma_get_mmrs(dev);
1286 if (ret)
1287 return ret;
1288
1289 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1290 "ti,ringacc", &tmp);
1291 ud->ringacc = dev_get_priv(tmp);
1292 if (IS_ERR(ud->ringacc))
1293 return PTR_ERR(ud->ringacc);
1294
1295 ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0);
1296 if (!ud->psil_base) {
1297 dev_info(dev,
1298 "Missing ti,psil-base property, using %d.\n", ret);
1299 return -EINVAL;
1300 }
1301
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301302 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1303 "ti,sci", &tisci_dev);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301304 if (ret) {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301305 debug("Failed to get TISCI phandle (%d)\n", ret);
1306 tisci_rm->tisci = NULL;
1307 return -EINVAL;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301308 }
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301309 tisci_rm->tisci = (struct ti_sci_handle *)
1310 (ti_sci_get_handle_from_sysfw(tisci_dev));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301311
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301312 tisci_rm->tisci_dev_id = -1;
1313 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1314 if (ret) {
1315 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1316 return ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301317 }
1318
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301319 tisci_rm->tisci_navss_dev_id = -1;
1320 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1321 &tisci_rm->tisci_navss_dev_id);
1322 if (ret) {
1323 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1324 return ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301325 }
1326
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301327 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1328 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301329
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301330 ud->dev = dev;
1331 ud->ch_count = udma_setup_resources(ud);
1332 if (ud->ch_count <= 0)
1333 return ud->ch_count;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301334
1335 dev_info(dev,
1336 "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1337 ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301338 tisci_rm->tisci_dev_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301339 dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1340
Vignesh R3a9dbf32019-02-05 17:31:24 +05301341 for (i = 0; i < ud->tchan_cnt; i++) {
1342 struct udma_tchan *tchan = &ud->tchans[i];
1343
1344 tchan->id = i;
1345 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1346 }
1347
1348 for (i = 0; i < ud->rchan_cnt; i++) {
1349 struct udma_rchan *rchan = &ud->rchans[i];
1350
1351 rchan->id = i;
1352 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1353 }
1354
1355 for (i = 0; i < ud->rflow_cnt; i++) {
1356 struct udma_rflow *rflow = &ud->rflows[i];
1357
1358 rflow->id = i;
1359 }
1360
1361 for (i = 0; i < ud->ch_count; i++) {
1362 struct udma_chan *uc = &ud->channels[i];
1363
1364 uc->ud = ud;
1365 uc->id = i;
1366 uc->slave_thread_id = -1;
1367 uc->tchan = NULL;
1368 uc->rchan = NULL;
1369 uc->dir = DMA_MEM_TO_MEM;
1370 sprintf(uc->name, "UDMA chan%d\n", i);
1371 if (!i)
1372 uc->in_use = true;
1373 }
1374
1375 pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1376 udma_read(ud->mmrs[MMR_GCFG], 0),
1377 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1378 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1379 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1380 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1381
1382 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1383
1384 return ret;
1385}
1386
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301387static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1388{
1389 u64 addr = 0;
1390
1391 memcpy(&addr, &elem, sizeof(elem));
1392 return k3_nav_ringacc_ring_push(ring, &addr);
1393}
1394
Vignesh R3a9dbf32019-02-05 17:31:24 +05301395static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1396 dma_addr_t src, size_t len)
1397{
1398 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1399 struct cppi5_tr_type15_t *tr_req;
1400 int num_tr;
1401 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1402 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1403 unsigned long dummy;
1404 void *tr_desc;
1405 size_t desc_size;
1406
1407 if (len < SZ_64K) {
1408 num_tr = 1;
1409 tr0_cnt0 = len;
1410 tr0_cnt1 = 1;
1411 } else {
1412 unsigned long align_to = __ffs(src | dest);
1413
1414 if (align_to > 3)
1415 align_to = 3;
1416 /*
1417 * Keep simple: tr0: SZ_64K-alignment blocks,
1418 * tr1: the remaining
1419 */
1420 num_tr = 2;
1421 tr0_cnt0 = (SZ_64K - BIT(align_to));
1422 if (len / tr0_cnt0 >= SZ_64K) {
1423 dev_err(uc->ud->dev, "size %zu is not supported\n",
1424 len);
1425 return NULL;
1426 }
1427
1428 tr0_cnt1 = len / tr0_cnt0;
1429 tr1_cnt0 = len % tr0_cnt0;
1430 }
1431
1432 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1433 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1434 if (!tr_desc)
1435 return NULL;
1436 memset(tr_desc, 0, desc_size);
1437
1438 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1439 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1440 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1441
1442 tr_req = tr_desc + tr_size;
1443
1444 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1445 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1446 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1447
1448 tr_req[0].addr = src;
1449 tr_req[0].icnt0 = tr0_cnt0;
1450 tr_req[0].icnt1 = tr0_cnt1;
1451 tr_req[0].icnt2 = 1;
1452 tr_req[0].icnt3 = 1;
1453 tr_req[0].dim1 = tr0_cnt0;
1454
1455 tr_req[0].daddr = dest;
1456 tr_req[0].dicnt0 = tr0_cnt0;
1457 tr_req[0].dicnt1 = tr0_cnt1;
1458 tr_req[0].dicnt2 = 1;
1459 tr_req[0].dicnt3 = 1;
1460 tr_req[0].ddim1 = tr0_cnt0;
1461
1462 if (num_tr == 2) {
1463 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1464 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1465 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1466
1467 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1468 tr_req[1].icnt0 = tr1_cnt0;
1469 tr_req[1].icnt1 = 1;
1470 tr_req[1].icnt2 = 1;
1471 tr_req[1].icnt3 = 1;
1472
1473 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1474 tr_req[1].dicnt0 = tr1_cnt0;
1475 tr_req[1].dicnt1 = 1;
1476 tr_req[1].dicnt2 = 1;
1477 tr_req[1].dicnt3 = 1;
1478 }
1479
1480 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1481
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301482 flush_dcache_range((unsigned long)tr_desc,
1483 ALIGN((unsigned long)tr_desc + desc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301484 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301485
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301486 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301487
1488 return 0;
1489}
1490
1491static int udma_transfer(struct udevice *dev, int direction,
1492 void *dst, void *src, size_t len)
1493{
1494 struct udma_dev *ud = dev_get_priv(dev);
1495 /* Channel0 is reserved for memcpy */
1496 struct udma_chan *uc = &ud->channels[0];
1497 dma_addr_t paddr = 0;
1498 int ret;
1499
1500 ret = udma_alloc_chan_resources(uc);
1501 if (ret)
1502 return ret;
1503
1504 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1505 udma_start(uc);
1506 udma_poll_completion(uc, &paddr);
1507 udma_stop(uc);
1508
1509 udma_free_chan_resources(uc);
1510 return 0;
1511}
1512
1513static int udma_request(struct dma *dma)
1514{
1515 struct udma_dev *ud = dev_get_priv(dma->dev);
1516 struct udma_chan *uc;
1517 unsigned long dummy;
1518 int ret;
1519
1520 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1521 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1522 return -EINVAL;
1523 }
1524
1525 uc = &ud->channels[dma->id];
1526 ret = udma_alloc_chan_resources(uc);
1527 if (ret) {
1528 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1529 return -EINVAL;
1530 }
1531
1532 uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib,
1533 uc->psd_size, 0);
1534 uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN);
1535
1536 if (uc->dir == DMA_MEM_TO_DEV) {
1537 uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy);
1538 memset(uc->desc_tx, 0, uc->hdesc_size);
1539 } else {
1540 uc->desc_rx = dma_alloc_coherent(
1541 uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1542 memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM);
1543 }
1544
1545 uc->in_use = true;
1546 uc->desc_rx_cur = 0;
1547 uc->num_rx_bufs = 0;
1548
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301549 if (uc->dir == DMA_DEV_TO_MEM) {
1550 uc->cfg_data.flow_id_base = uc->rflow->id;
1551 uc->cfg_data.flow_id_cnt = 1;
1552 }
1553
Vignesh R3a9dbf32019-02-05 17:31:24 +05301554 return 0;
1555}
1556
Simon Glass75c0ad62020-02-03 07:35:55 -07001557static int udma_rfree(struct dma *dma)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301558{
1559 struct udma_dev *ud = dev_get_priv(dma->dev);
1560 struct udma_chan *uc;
1561
1562 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1563 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1564 return -EINVAL;
1565 }
1566 uc = &ud->channels[dma->id];
1567
1568 if (udma_is_chan_running(uc))
1569 udma_stop(uc);
1570 udma_free_chan_resources(uc);
1571
1572 uc->in_use = false;
1573
1574 return 0;
1575}
1576
1577static int udma_enable(struct dma *dma)
1578{
1579 struct udma_dev *ud = dev_get_priv(dma->dev);
1580 struct udma_chan *uc;
1581 int ret;
1582
1583 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1584 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1585 return -EINVAL;
1586 }
1587 uc = &ud->channels[dma->id];
1588
1589 ret = udma_start(uc);
1590
1591 return ret;
1592}
1593
1594static int udma_disable(struct dma *dma)
1595{
1596 struct udma_dev *ud = dev_get_priv(dma->dev);
1597 struct udma_chan *uc;
1598 int ret = 0;
1599
1600 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1601 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1602 return -EINVAL;
1603 }
1604 uc = &ud->channels[dma->id];
1605
1606 if (udma_is_chan_running(uc))
1607 ret = udma_stop(uc);
1608 else
1609 dev_err(dma->dev, "%s not running\n", __func__);
1610
1611 return ret;
1612}
1613
1614static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1615{
1616 struct udma_dev *ud = dev_get_priv(dma->dev);
1617 struct cppi5_host_desc_t *desc_tx;
1618 dma_addr_t dma_src = (dma_addr_t)src;
1619 struct ti_udma_drv_packet_data packet_data = { 0 };
1620 dma_addr_t paddr;
1621 struct udma_chan *uc;
1622 u32 tc_ring_id;
1623 int ret;
1624
Keerthya3c8bb12019-04-24 16:33:54 +05301625 if (metadata)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301626 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1627
1628 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1629 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1630 return -EINVAL;
1631 }
1632 uc = &ud->channels[dma->id];
1633
1634 if (uc->dir != DMA_MEM_TO_DEV)
1635 return -EINVAL;
1636
1637 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1638
1639 desc_tx = uc->desc_tx;
1640
1641 cppi5_hdesc_reset_hbdesc(desc_tx);
1642
1643 cppi5_hdesc_init(desc_tx,
1644 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1645 uc->psd_size);
1646 cppi5_hdesc_set_pktlen(desc_tx, len);
1647 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1648 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1649 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1650 /* pass below information from caller */
1651 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1652 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1653
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301654 flush_dcache_range((unsigned long)dma_src,
1655 ALIGN((unsigned long)dma_src + len,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301656 ARCH_DMA_MINALIGN));
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301657 flush_dcache_range((unsigned long)desc_tx,
1658 ALIGN((unsigned long)desc_tx + uc->hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301659 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301660
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301661 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301662 if (ret) {
1663 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1664 dma->id, ret);
1665 return ret;
1666 }
1667
1668 udma_poll_completion(uc, &paddr);
1669
1670 return 0;
1671}
1672
1673static int udma_receive(struct dma *dma, void **dst, void *metadata)
1674{
1675 struct udma_dev *ud = dev_get_priv(dma->dev);
1676 struct cppi5_host_desc_t *desc_rx;
1677 dma_addr_t buf_dma;
1678 struct udma_chan *uc;
1679 u32 buf_dma_len, pkt_len;
1680 u32 port_id = 0;
1681 int ret;
1682
1683 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1684 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1685 return -EINVAL;
1686 }
1687 uc = &ud->channels[dma->id];
1688
1689 if (uc->dir != DMA_DEV_TO_MEM)
1690 return -EINVAL;
1691 if (!uc->num_rx_bufs)
1692 return -EINVAL;
1693
1694 ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
1695 if (ret && ret != -ENODATA) {
1696 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1697 return ret;
1698 } else if (ret == -ENODATA) {
1699 return 0;
1700 }
1701
1702 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301703 invalidate_dcache_range((ulong)desc_rx,
1704 (ulong)(desc_rx + uc->hdesc_size));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301705
1706 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1707 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1708
1709 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301710 invalidate_dcache_range((ulong)buf_dma,
1711 (ulong)(buf_dma + buf_dma_len));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301712
1713 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1714
1715 *dst = (void *)buf_dma;
1716 uc->num_rx_bufs--;
1717
1718 return pkt_len;
1719}
1720
1721static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1722{
1723 struct udma_dev *ud = dev_get_priv(dma->dev);
1724 struct udma_chan *uc = &ud->channels[0];
1725 ofnode chconf_node, slave_node;
1726 char prop[50];
1727 u32 val;
1728
1729 for (val = 0; val < ud->ch_count; val++) {
1730 uc = &ud->channels[val];
1731 if (!uc->in_use)
1732 break;
1733 }
1734
1735 if (val == ud->ch_count)
1736 return -EBUSY;
1737
1738 uc->dir = DMA_DEV_TO_MEM;
1739 if (args->args[2] == UDMA_DIR_TX)
1740 uc->dir = DMA_MEM_TO_DEV;
1741
1742 slave_node = ofnode_get_by_phandle(args->args[0]);
1743 if (!ofnode_valid(slave_node)) {
1744 dev_err(ud->dev, "slave node is missing\n");
1745 return -EINVAL;
1746 }
1747
1748 snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]);
1749 chconf_node = ofnode_find_subnode(slave_node, prop);
1750 if (!ofnode_valid(chconf_node)) {
1751 dev_err(ud->dev, "Channel configuration node is missing\n");
1752 return -EINVAL;
1753 }
1754
1755 if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) {
1756 if (val == UDMA_PKT_MODE)
1757 uc->pkt_mode = true;
1758 }
1759
1760 if (!ofnode_read_u32(chconf_node, "statictr-type", &val))
1761 uc->static_tr_type = val;
1762
1763 uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib");
1764 if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val))
1765 uc->psd_size = val;
1766 uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size;
1767
1768 if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) {
1769 dev_err(ud->dev, "ti,psil-base is missing\n");
1770 return -EINVAL;
1771 }
1772
1773 uc->slave_thread_id = val + args->args[1];
1774
1775 dma->id = uc->id;
1776 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
1777 dma->id, uc->needs_epib,
1778 uc->psd_size, uc->metadata_size,
1779 uc->slave_thread_id);
1780
1781 return 0;
1782}
1783
1784int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1785{
1786 struct udma_dev *ud = dev_get_priv(dma->dev);
1787 struct cppi5_host_desc_t *desc_rx;
1788 dma_addr_t dma_dst;
1789 struct udma_chan *uc;
1790 u32 desc_num;
1791
1792 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1793 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1794 return -EINVAL;
1795 }
1796 uc = &ud->channels[dma->id];
1797
1798 if (uc->dir != DMA_DEV_TO_MEM)
1799 return -EINVAL;
1800
1801 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1802 return -EINVAL;
1803
1804 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
1805 desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size);
1806 dma_dst = (dma_addr_t)dst;
1807
1808 cppi5_hdesc_reset_hbdesc(desc_rx);
1809
1810 cppi5_hdesc_init(desc_rx,
1811 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1812 uc->psd_size);
1813 cppi5_hdesc_set_pktlen(desc_rx, size);
1814 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1815
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301816 flush_dcache_range((unsigned long)desc_rx,
1817 ALIGN((unsigned long)desc_rx + uc->hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301818 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301819
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301820 udma_push_to_ring(uc->rchan->fd_ring, desc_rx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301821
1822 uc->num_rx_bufs++;
1823 uc->desc_rx_cur++;
1824
1825 return 0;
1826}
1827
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301828static int udma_get_cfg(struct dma *dma, u32 id, void **data)
1829{
1830 struct udma_dev *ud = dev_get_priv(dma->dev);
1831 struct udma_chan *uc;
1832
1833 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1834 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1835 return -EINVAL;
1836 }
1837
1838 switch (id) {
1839 case TI_UDMA_CHAN_PRIV_INFO:
1840 uc = &ud->channels[dma->id];
1841 *data = &uc->cfg_data;
1842 return 0;
1843 }
1844
1845 return -EINVAL;
1846}
1847
Vignesh R3a9dbf32019-02-05 17:31:24 +05301848static const struct dma_ops udma_ops = {
1849 .transfer = udma_transfer,
1850 .of_xlate = udma_of_xlate,
1851 .request = udma_request,
Simon Glass75c0ad62020-02-03 07:35:55 -07001852 .rfree = udma_rfree,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301853 .enable = udma_enable,
1854 .disable = udma_disable,
1855 .send = udma_send,
1856 .receive = udma_receive,
1857 .prepare_rcv_buf = udma_prepare_rcv_buf,
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301858 .get_cfg = udma_get_cfg,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301859};
1860
1861static const struct udevice_id udma_ids[] = {
1862 { .compatible = "ti,k3-navss-udmap" },
Vignesh Raghavendra30bc6ea2019-12-04 22:17:23 +05301863 { .compatible = "ti,j721e-navss-mcu-udmap" },
Vignesh R3a9dbf32019-02-05 17:31:24 +05301864 { }
1865};
1866
1867U_BOOT_DRIVER(ti_edma3) = {
1868 .name = "ti-udma",
1869 .id = UCLASS_DMA,
1870 .of_match = udma_ids,
1871 .ops = &udma_ops,
1872 .probe = udma_probe,
1873 .priv_auto_alloc_size = sizeof(struct udma_dev),
1874};