blob: a0e536ae5e975eeb0c6490eb0de27ff080dbf51e [file] [log] [blame]
Vignesh R3a9dbf32019-02-05 17:31:24 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#define pr_fmt(fmt) "udma: " fmt
7
8#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053010#include <asm/io.h>
11#include <asm/bitops.h>
12#include <malloc.h>
Masahiro Yamada6373a172020-02-14 16:40:19 +090013#include <linux/dma-mapping.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053014#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070015#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070016#include <dm/devres.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053017#include <dm/read.h>
18#include <dm/of_access.h>
19#include <dma.h>
20#include <dma-uclass.h>
21#include <linux/delay.h>
22#include <dt-bindings/dma/k3-udma.h>
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053023#include <linux/bitmap.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070024#include <linux/err.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053025#include <linux/soc/ti/k3-navss-ringacc.h>
26#include <linux/soc/ti/cppi5.h>
27#include <linux/soc/ti/ti-udma.h>
28#include <linux/soc/ti/ti_sci_protocol.h>
29
30#include "k3-udma-hwdef.h"
31
32#if BITS_PER_LONG == 64
33#define RINGACC_RING_USE_PROXY (0)
34#else
35#define RINGACC_RING_USE_PROXY (1)
36#endif
37
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053038#define K3_UDMA_MAX_RFLOWS 1024
39
Vignesh R3a9dbf32019-02-05 17:31:24 +053040struct udma_chan;
41
42enum udma_mmr {
43 MMR_GCFG = 0,
44 MMR_RCHANRT,
45 MMR_TCHANRT,
46 MMR_LAST,
47};
48
49static const char * const mmr_names[] = {
50 "gcfg", "rchanrt", "tchanrt"
51};
52
53struct udma_tchan {
54 void __iomem *reg_rt;
55
56 int id;
57 struct k3_nav_ring *t_ring; /* Transmit ring */
58 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
59};
60
61struct udma_rchan {
62 void __iomem *reg_rt;
63
64 int id;
65 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
66 struct k3_nav_ring *r_ring; /* Receive ring*/
67};
68
69struct udma_rflow {
70 int id;
71};
72
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053073enum udma_rm_range {
74 RM_RANGE_TCHAN = 0,
75 RM_RANGE_RCHAN,
76 RM_RANGE_RFLOW,
77 RM_RANGE_LAST,
78};
79
80struct udma_tisci_rm {
81 const struct ti_sci_handle *tisci;
82 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
83 u32 tisci_dev_id;
84
85 /* tisci information for PSI-L thread pairing/unpairing */
86 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
87 u32 tisci_navss_dev_id;
88
89 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
90};
91
Vignesh R3a9dbf32019-02-05 17:31:24 +053092struct udma_dev {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053093 struct udevice *dev;
Vignesh R3a9dbf32019-02-05 17:31:24 +053094 void __iomem *mmrs[MMR_LAST];
95
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053096 struct udma_tisci_rm tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +053097 struct k3_nav_ringacc *ringacc;
98
99 u32 features;
100
101 int tchan_cnt;
102 int echan_cnt;
103 int rchan_cnt;
104 int rflow_cnt;
105 unsigned long *tchan_map;
106 unsigned long *rchan_map;
107 unsigned long *rflow_map;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530108 unsigned long *rflow_map_reserved;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530109
110 struct udma_tchan *tchans;
111 struct udma_rchan *rchans;
112 struct udma_rflow *rflows;
113
114 struct udma_chan *channels;
115 u32 psil_base;
116
117 u32 ch_count;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530118};
119
120struct udma_chan {
121 struct udma_dev *ud;
122 char name[20];
123
124 struct udma_tchan *tchan;
125 struct udma_rchan *rchan;
126 struct udma_rflow *rflow;
127
Vignesh Raghavendra39349892019-12-04 22:17:21 +0530128 struct ti_udma_drv_chan_cfg_data cfg_data;
129
Vignesh R3a9dbf32019-02-05 17:31:24 +0530130 u32 bcnt; /* number of bytes completed since the start of the channel */
131
132 bool pkt_mode; /* TR or packet */
133 bool needs_epib; /* EPIB is needed for the communication or not */
134 u32 psd_size; /* size of Protocol Specific Data */
135 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
136 int slave_thread_id;
137 u32 src_thread;
138 u32 dst_thread;
139 u32 static_tr_type;
140
141 u32 id;
142 enum dma_direction dir;
143
144 struct cppi5_host_desc_t *desc_tx;
145 u32 hdesc_size;
146 bool in_use;
147 void *desc_rx;
148 u32 num_rx_bufs;
149 u32 desc_rx_cur;
150
151};
152
153#define UDMA_CH_1000(ch) (ch * 0x1000)
154#define UDMA_CH_100(ch) (ch * 0x100)
155#define UDMA_CH_40(ch) (ch * 0x40)
156
157#ifdef PKTBUFSRX
158#define UDMA_RX_DESC_NUM PKTBUFSRX
159#else
160#define UDMA_RX_DESC_NUM 4
161#endif
162
163/* Generic register access functions */
164static inline u32 udma_read(void __iomem *base, int reg)
165{
166 u32 v;
167
168 v = __raw_readl(base + reg);
169 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
170 return v;
171}
172
173static inline void udma_write(void __iomem *base, int reg, u32 val)
174{
175 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
176 __raw_writel(val, base + reg);
177}
178
179static inline void udma_update_bits(void __iomem *base, int reg,
180 u32 mask, u32 val)
181{
182 u32 tmp, orig;
183
184 orig = udma_read(base, reg);
185 tmp = orig & ~mask;
186 tmp |= (val & mask);
187
188 if (tmp != orig)
189 udma_write(base, reg, tmp);
190}
191
192/* TCHANRT */
193static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
194{
195 if (!tchan)
196 return 0;
197 return udma_read(tchan->reg_rt, reg);
198}
199
200static inline void udma_tchanrt_write(struct udma_tchan *tchan,
201 int reg, u32 val)
202{
203 if (!tchan)
204 return;
205 udma_write(tchan->reg_rt, reg, val);
206}
207
208/* RCHANRT */
209static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
210{
211 if (!rchan)
212 return 0;
213 return udma_read(rchan->reg_rt, reg);
214}
215
216static inline void udma_rchanrt_write(struct udma_rchan *rchan,
217 int reg, u32 val)
218{
219 if (!rchan)
220 return;
221 udma_write(rchan->reg_rt, reg, val);
222}
223
224static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
225 u32 dst_thread)
226{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530227 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
228
Vignesh R3a9dbf32019-02-05 17:31:24 +0530229 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530230
231 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
232 tisci_rm->tisci_navss_dev_id,
233 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530234}
235
236static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
237 u32 dst_thread)
238{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530239 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
240
Vignesh R3a9dbf32019-02-05 17:31:24 +0530241 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530242
243 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
244 tisci_rm->tisci_navss_dev_id,
245 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530246}
247
248static inline char *udma_get_dir_text(enum dma_direction dir)
249{
250 switch (dir) {
251 case DMA_DEV_TO_MEM:
252 return "DEV_TO_MEM";
253 case DMA_MEM_TO_DEV:
254 return "MEM_TO_DEV";
255 case DMA_MEM_TO_MEM:
256 return "MEM_TO_MEM";
257 case DMA_DEV_TO_DEV:
258 return "DEV_TO_DEV";
259 default:
260 break;
261 }
262
263 return "invalid";
264}
265
266static inline bool udma_is_chan_running(struct udma_chan *uc)
267{
268 u32 trt_ctl = 0;
269 u32 rrt_ctl = 0;
270
271 switch (uc->dir) {
272 case DMA_DEV_TO_MEM:
273 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
274 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
275 __func__, rrt_ctl,
276 udma_rchanrt_read(uc->rchan,
277 UDMA_RCHAN_RT_PEER_RT_EN_REG));
278 break;
279 case DMA_MEM_TO_DEV:
280 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
281 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
282 __func__, trt_ctl,
283 udma_tchanrt_read(uc->tchan,
284 UDMA_TCHAN_RT_PEER_RT_EN_REG));
285 break;
286 case DMA_MEM_TO_MEM:
287 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
288 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
289 break;
290 default:
291 break;
292 }
293
294 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
295 return true;
296
297 return false;
298}
299
Vignesh R3a9dbf32019-02-05 17:31:24 +0530300static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
301{
302 struct k3_nav_ring *ring = NULL;
303 int ret = -ENOENT;
304
305 switch (uc->dir) {
306 case DMA_DEV_TO_MEM:
307 ring = uc->rchan->r_ring;
308 break;
309 case DMA_MEM_TO_DEV:
310 ring = uc->tchan->tc_ring;
311 break;
312 case DMA_MEM_TO_MEM:
313 ring = uc->tchan->tc_ring;
314 break;
315 default:
316 break;
317 }
318
319 if (ring && k3_nav_ringacc_ring_get_occ(ring))
320 ret = k3_nav_ringacc_ring_pop(ring, addr);
321
322 return ret;
323}
324
325static void udma_reset_rings(struct udma_chan *uc)
326{
327 struct k3_nav_ring *ring1 = NULL;
328 struct k3_nav_ring *ring2 = NULL;
329
330 switch (uc->dir) {
331 case DMA_DEV_TO_MEM:
332 ring1 = uc->rchan->fd_ring;
333 ring2 = uc->rchan->r_ring;
334 break;
335 case DMA_MEM_TO_DEV:
336 ring1 = uc->tchan->t_ring;
337 ring2 = uc->tchan->tc_ring;
338 break;
339 case DMA_MEM_TO_MEM:
340 ring1 = uc->tchan->t_ring;
341 ring2 = uc->tchan->tc_ring;
342 break;
343 default:
344 break;
345 }
346
347 if (ring1)
348 k3_nav_ringacc_ring_reset_dma(ring1, 0);
349 if (ring2)
350 k3_nav_ringacc_ring_reset(ring2);
351}
352
353static void udma_reset_counters(struct udma_chan *uc)
354{
355 u32 val;
356
357 if (uc->tchan) {
358 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
359 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
360
361 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
362 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
363
364 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
365 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
366
367 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
368 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
369 }
370
371 if (uc->rchan) {
372 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
373 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
374
375 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
376 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
377
378 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
379 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
380
381 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
382 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
383 }
384
385 uc->bcnt = 0;
386}
387
388static inline int udma_stop_hard(struct udma_chan *uc)
389{
390 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
391
392 switch (uc->dir) {
393 case DMA_DEV_TO_MEM:
394 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
395 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
396 break;
397 case DMA_MEM_TO_DEV:
398 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
399 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
400 break;
401 case DMA_MEM_TO_MEM:
402 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
403 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
404 break;
405 default:
406 return -EINVAL;
407 }
408
409 return 0;
410}
411
412static int udma_start(struct udma_chan *uc)
413{
414 /* Channel is already running, no need to proceed further */
415 if (udma_is_chan_running(uc))
416 goto out;
417
418 pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n",
419 __func__, uc->id, udma_get_dir_text(uc->dir),
420 uc->static_tr_type);
421
422 /* Make sure that we clear the teardown bit, if it is set */
423 udma_stop_hard(uc);
424
425 /* Reset all counters */
426 udma_reset_counters(uc);
427
428 switch (uc->dir) {
429 case DMA_DEV_TO_MEM:
430 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
431 UDMA_CHAN_RT_CTL_EN);
432
433 /* Enable remote */
434 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
435 UDMA_PEER_RT_EN_ENABLE);
436
437 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
438 __func__,
439 udma_rchanrt_read(uc->rchan,
440 UDMA_RCHAN_RT_CTL_REG),
441 udma_rchanrt_read(uc->rchan,
442 UDMA_RCHAN_RT_PEER_RT_EN_REG));
443 break;
444 case DMA_MEM_TO_DEV:
445 /* Enable remote */
446 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
447 UDMA_PEER_RT_EN_ENABLE);
448
449 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
450 UDMA_CHAN_RT_CTL_EN);
451
452 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
453 __func__,
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530454 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530455 UDMA_TCHAN_RT_CTL_REG),
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530456 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530457 UDMA_TCHAN_RT_PEER_RT_EN_REG));
458 break;
459 case DMA_MEM_TO_MEM:
460 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
461 UDMA_CHAN_RT_CTL_EN);
462 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
463 UDMA_CHAN_RT_CTL_EN);
464
465 break;
466 default:
467 return -EINVAL;
468 }
469
470 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
471out:
472 return 0;
473}
474
475static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
476{
477 int i = 0;
478 u32 val;
479
480 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
481 UDMA_CHAN_RT_CTL_EN |
482 UDMA_CHAN_RT_CTL_TDOWN);
483
484 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
485
486 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
487 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
488 udelay(1);
489 if (i > 1000) {
490 printf(" %s TIMEOUT !\n", __func__);
491 break;
492 }
493 i++;
494 }
495
496 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
497 if (val & UDMA_PEER_RT_EN_ENABLE)
498 printf("%s: peer not stopped TIMEOUT !\n", __func__);
499}
500
501static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
502{
503 int i = 0;
504 u32 val;
505
506 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
507 UDMA_PEER_RT_EN_ENABLE |
508 UDMA_PEER_RT_EN_TEARDOWN);
509
510 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
511
512 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
513 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
514 udelay(1);
515 if (i > 1000) {
516 printf("%s TIMEOUT !\n", __func__);
517 break;
518 }
519 i++;
520 }
521
522 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
523 if (val & UDMA_PEER_RT_EN_ENABLE)
524 printf("%s: peer not stopped TIMEOUT !\n", __func__);
525}
526
527static inline int udma_stop(struct udma_chan *uc)
528{
529 pr_debug("%s: chan:%d dir:%s\n",
530 __func__, uc->id, udma_get_dir_text(uc->dir));
531
532 udma_reset_counters(uc);
533 switch (uc->dir) {
534 case DMA_DEV_TO_MEM:
535 udma_stop_dev2mem(uc, true);
536 break;
537 case DMA_MEM_TO_DEV:
538 udma_stop_mem2dev(uc, true);
539 break;
540 case DMA_MEM_TO_MEM:
541 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
542 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
543 break;
544 default:
545 return -EINVAL;
546 }
547
548 return 0;
549}
550
551static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
552{
553 int i = 1;
554
555 while (udma_pop_from_ring(uc, paddr)) {
556 udelay(1);
557 if (!(i % 1000000))
558 printf(".");
559 i++;
560 }
561}
562
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530563static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
564{
565 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
566
567 if (id >= 0) {
568 if (test_bit(id, ud->rflow_map)) {
569 dev_err(ud->dev, "rflow%d is in use\n", id);
570 return ERR_PTR(-ENOENT);
571 }
572 } else {
573 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
574 ud->rflow_cnt);
575
576 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
577 if (id >= ud->rflow_cnt)
578 return ERR_PTR(-ENOENT);
579 }
580
581 __set_bit(id, ud->rflow_map);
582 return &ud->rflows[id];
583}
584
Vignesh R3a9dbf32019-02-05 17:31:24 +0530585#define UDMA_RESERVE_RESOURCE(res) \
586static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
587 int id) \
588{ \
589 if (id >= 0) { \
590 if (test_bit(id, ud->res##_map)) { \
591 dev_err(ud->dev, "res##%d is in use\n", id); \
592 return ERR_PTR(-ENOENT); \
593 } \
594 } else { \
595 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
596 if (id == ud->res##_cnt) { \
597 return ERR_PTR(-ENOENT); \
598 } \
599 } \
600 \
601 __set_bit(id, ud->res##_map); \
602 return &ud->res##s[id]; \
603}
604
605UDMA_RESERVE_RESOURCE(tchan);
606UDMA_RESERVE_RESOURCE(rchan);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530607
608static int udma_get_tchan(struct udma_chan *uc)
609{
610 struct udma_dev *ud = uc->ud;
611
612 if (uc->tchan) {
613 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
614 uc->id, uc->tchan->id);
615 return 0;
616 }
617
618 uc->tchan = __udma_reserve_tchan(ud, -1);
619 if (IS_ERR(uc->tchan))
620 return PTR_ERR(uc->tchan);
621
622 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
623
Vignesh R3a9dbf32019-02-05 17:31:24 +0530624 return 0;
625}
626
627static int udma_get_rchan(struct udma_chan *uc)
628{
629 struct udma_dev *ud = uc->ud;
630
631 if (uc->rchan) {
632 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
633 uc->id, uc->rchan->id);
634 return 0;
635 }
636
637 uc->rchan = __udma_reserve_rchan(ud, -1);
638 if (IS_ERR(uc->rchan))
639 return PTR_ERR(uc->rchan);
640
641 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
642
Vignesh R3a9dbf32019-02-05 17:31:24 +0530643 return 0;
644}
645
646static int udma_get_chan_pair(struct udma_chan *uc)
647{
648 struct udma_dev *ud = uc->ud;
649 int chan_id, end;
650
651 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
652 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
653 uc->id, uc->tchan->id);
654 return 0;
655 }
656
657 if (uc->tchan) {
658 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
659 uc->id, uc->tchan->id);
660 return -EBUSY;
661 } else if (uc->rchan) {
662 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
663 uc->id, uc->rchan->id);
664 return -EBUSY;
665 }
666
667 /* Can be optimized, but let's have it like this for now */
668 end = min(ud->tchan_cnt, ud->rchan_cnt);
669 for (chan_id = 0; chan_id < end; chan_id++) {
670 if (!test_bit(chan_id, ud->tchan_map) &&
671 !test_bit(chan_id, ud->rchan_map))
672 break;
673 }
674
675 if (chan_id == end)
676 return -ENOENT;
677
678 __set_bit(chan_id, ud->tchan_map);
679 __set_bit(chan_id, ud->rchan_map);
680 uc->tchan = &ud->tchans[chan_id];
681 uc->rchan = &ud->rchans[chan_id];
682
683 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
684
Vignesh R3a9dbf32019-02-05 17:31:24 +0530685 return 0;
686}
687
688static int udma_get_rflow(struct udma_chan *uc, int flow_id)
689{
690 struct udma_dev *ud = uc->ud;
691
692 if (uc->rflow) {
693 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
694 uc->id, uc->rflow->id);
695 return 0;
696 }
697
698 if (!uc->rchan)
699 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
700
701 uc->rflow = __udma_reserve_rflow(ud, flow_id);
702 if (IS_ERR(uc->rflow))
703 return PTR_ERR(uc->rflow);
704
705 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
706 return 0;
707}
708
709static void udma_put_rchan(struct udma_chan *uc)
710{
711 struct udma_dev *ud = uc->ud;
712
713 if (uc->rchan) {
714 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
715 uc->rchan->id);
716 __clear_bit(uc->rchan->id, ud->rchan_map);
717 uc->rchan = NULL;
718 }
719}
720
721static void udma_put_tchan(struct udma_chan *uc)
722{
723 struct udma_dev *ud = uc->ud;
724
725 if (uc->tchan) {
726 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
727 uc->tchan->id);
728 __clear_bit(uc->tchan->id, ud->tchan_map);
729 uc->tchan = NULL;
730 }
731}
732
733static void udma_put_rflow(struct udma_chan *uc)
734{
735 struct udma_dev *ud = uc->ud;
736
737 if (uc->rflow) {
738 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
739 uc->rflow->id);
740 __clear_bit(uc->rflow->id, ud->rflow_map);
741 uc->rflow = NULL;
742 }
743}
744
745static void udma_free_tx_resources(struct udma_chan *uc)
746{
747 if (!uc->tchan)
748 return;
749
750 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
751 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
752 uc->tchan->t_ring = NULL;
753 uc->tchan->tc_ring = NULL;
754
755 udma_put_tchan(uc);
756}
757
758static int udma_alloc_tx_resources(struct udma_chan *uc)
759{
760 struct k3_nav_ring_cfg ring_cfg;
761 struct udma_dev *ud = uc->ud;
762 int ret;
763
764 ret = udma_get_tchan(uc);
765 if (ret)
766 return ret;
767
768 uc->tchan->t_ring = k3_nav_ringacc_request_ring(
769 ud->ringacc, uc->tchan->id,
770 RINGACC_RING_USE_PROXY);
771 if (!uc->tchan->t_ring) {
772 ret = -EBUSY;
773 goto err_tx_ring;
774 }
775
776 uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
777 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
778 if (!uc->tchan->tc_ring) {
779 ret = -EBUSY;
780 goto err_txc_ring;
781 }
782
783 memset(&ring_cfg, 0, sizeof(ring_cfg));
784 ring_cfg.size = 16;
785 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530786 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530787
788 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
789 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
790
791 if (ret)
792 goto err_ringcfg;
793
794 return 0;
795
796err_ringcfg:
797 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
798 uc->tchan->tc_ring = NULL;
799err_txc_ring:
800 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
801 uc->tchan->t_ring = NULL;
802err_tx_ring:
803 udma_put_tchan(uc);
804
805 return ret;
806}
807
808static void udma_free_rx_resources(struct udma_chan *uc)
809{
810 if (!uc->rchan)
811 return;
812
813 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
814 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
815 uc->rchan->fd_ring = NULL;
816 uc->rchan->r_ring = NULL;
817
818 udma_put_rflow(uc);
819 udma_put_rchan(uc);
820}
821
822static int udma_alloc_rx_resources(struct udma_chan *uc)
823{
824 struct k3_nav_ring_cfg ring_cfg;
825 struct udma_dev *ud = uc->ud;
826 int fd_ring_id;
827 int ret;
828
829 ret = udma_get_rchan(uc);
830 if (ret)
831 return ret;
832
833 /* For MEM_TO_MEM we don't need rflow or rings */
834 if (uc->dir == DMA_MEM_TO_MEM)
835 return 0;
836
837 ret = udma_get_rflow(uc, uc->rchan->id);
838 if (ret) {
839 ret = -EBUSY;
840 goto err_rflow;
841 }
842
843 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
844
845 uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
846 ud->ringacc, fd_ring_id,
847 RINGACC_RING_USE_PROXY);
848 if (!uc->rchan->fd_ring) {
849 ret = -EBUSY;
850 goto err_rx_ring;
851 }
852
853 uc->rchan->r_ring = k3_nav_ringacc_request_ring(
854 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
855 if (!uc->rchan->r_ring) {
856 ret = -EBUSY;
857 goto err_rxc_ring;
858 }
859
860 memset(&ring_cfg, 0, sizeof(ring_cfg));
861 ring_cfg.size = 16;
862 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530863 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530864
865 ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
866 ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
867
868 if (ret)
869 goto err_ringcfg;
870
871 return 0;
872
873err_ringcfg:
874 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
875 uc->rchan->r_ring = NULL;
876err_rxc_ring:
877 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
878 uc->rchan->fd_ring = NULL;
879err_rx_ring:
880 udma_put_rflow(uc);
881err_rflow:
882 udma_put_rchan(uc);
883
884 return ret;
885}
886
887static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
888{
889 struct udma_dev *ud = uc->ud;
890 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
891 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530892 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530893 u32 mode;
894 int ret;
895
896 if (uc->pkt_mode)
897 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
898 else
899 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
900
901 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
902 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
903 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530904 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530905 req.index = uc->tchan->id;
906 req.tx_chan_type = mode;
907 if (uc->dir == DMA_MEM_TO_MEM)
908 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
909 else
910 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
911 uc->psd_size,
912 0) >> 2;
913 req.txcq_qnum = tc_ring;
914
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530915 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530916 if (ret)
917 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
918
919 return ret;
920}
921
922static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
923{
924 struct udma_dev *ud = uc->ud;
925 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
926 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
927 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
928 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
929 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530930 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530931 u32 mode;
932 int ret;
933
934 if (uc->pkt_mode)
935 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
936 else
937 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
938
939 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
940 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
Lokesh Vutla9eae8622020-02-28 17:56:20 +0530941 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
942 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
943 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530944 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530945 req.index = uc->rchan->id;
946 req.rx_chan_type = mode;
947 if (uc->dir == DMA_MEM_TO_MEM) {
948 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
949 req.rxcq_qnum = tc_ring;
950 } else {
951 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
952 uc->psd_size,
953 0) >> 2;
954 req.rxcq_qnum = rx_ring;
955 }
956 if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) {
957 req.flowid_start = uc->rflow->id;
958 req.flowid_cnt = 1;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530959 }
960
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530961 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530962 if (ret) {
963 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
964 uc->rchan->id, ret);
965 return ret;
966 }
967 if (uc->dir == DMA_MEM_TO_MEM)
968 return ret;
969
970 flow_req.valid_params =
971 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
972 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
973 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
974 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
975 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
976 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
977 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
978 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
979 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
980 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
981 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
982 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
983 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
984 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
985
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530986 flow_req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530987 flow_req.flow_index = uc->rflow->id;
988
989 if (uc->needs_epib)
990 flow_req.rx_einfo_present = 1;
991 else
992 flow_req.rx_einfo_present = 0;
993
994 if (uc->psd_size)
995 flow_req.rx_psinfo_present = 1;
996 else
997 flow_req.rx_psinfo_present = 0;
998
999 flow_req.rx_error_handling = 0;
1000 flow_req.rx_desc_type = 0;
1001 flow_req.rx_dest_qnum = rx_ring;
1002 flow_req.rx_src_tag_hi_sel = 2;
1003 flow_req.rx_src_tag_lo_sel = 4;
1004 flow_req.rx_dest_tag_hi_sel = 5;
1005 flow_req.rx_dest_tag_lo_sel = 4;
1006 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1007 flow_req.rx_fdq1_qnum = fd_ring;
1008 flow_req.rx_fdq2_qnum = fd_ring;
1009 flow_req.rx_fdq3_qnum = fd_ring;
1010 flow_req.rx_ps_location = 0;
1011
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301012 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1013 &flow_req);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301014 if (ret)
1015 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1016 uc->rchan->id, uc->rflow->id, ret);
1017
1018 return ret;
1019}
1020
1021static int udma_alloc_chan_resources(struct udma_chan *uc)
1022{
1023 struct udma_dev *ud = uc->ud;
1024 int ret;
1025
1026 pr_debug("%s: chan:%d as %s\n",
1027 __func__, uc->id, udma_get_dir_text(uc->dir));
1028
1029 switch (uc->dir) {
1030 case DMA_MEM_TO_MEM:
1031 /* Non synchronized - mem to mem type of transfer */
1032 ret = udma_get_chan_pair(uc);
1033 if (ret)
1034 return ret;
1035
1036 ret = udma_alloc_tx_resources(uc);
1037 if (ret)
1038 goto err_free_res;
1039
1040 ret = udma_alloc_rx_resources(uc);
1041 if (ret)
1042 goto err_free_res;
1043
1044 uc->src_thread = ud->psil_base + uc->tchan->id;
1045 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1046 break;
1047 case DMA_MEM_TO_DEV:
1048 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1049 ret = udma_alloc_tx_resources(uc);
1050 if (ret)
1051 goto err_free_res;
1052
1053 uc->src_thread = ud->psil_base + uc->tchan->id;
1054 uc->dst_thread = uc->slave_thread_id;
1055 if (!(uc->dst_thread & 0x8000))
1056 uc->dst_thread |= 0x8000;
1057
1058 break;
1059 case DMA_DEV_TO_MEM:
1060 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1061 ret = udma_alloc_rx_resources(uc);
1062 if (ret)
1063 goto err_free_res;
1064
1065 uc->src_thread = uc->slave_thread_id;
1066 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1067
1068 break;
1069 default:
1070 /* Can not happen */
1071 pr_debug("%s: chan:%d invalid direction (%u)\n",
1072 __func__, uc->id, uc->dir);
1073 return -EINVAL;
1074 }
1075
1076 /* We have channel indexes and rings */
1077 if (uc->dir == DMA_MEM_TO_MEM) {
1078 ret = udma_alloc_tchan_sci_req(uc);
1079 if (ret)
1080 goto err_free_res;
1081
1082 ret = udma_alloc_rchan_sci_req(uc);
1083 if (ret)
1084 goto err_free_res;
1085 } else {
1086 /* Slave transfer */
1087 if (uc->dir == DMA_MEM_TO_DEV) {
1088 ret = udma_alloc_tchan_sci_req(uc);
1089 if (ret)
1090 goto err_free_res;
1091 } else {
1092 ret = udma_alloc_rchan_sci_req(uc);
1093 if (ret)
1094 goto err_free_res;
1095 }
1096 }
1097
Peter Ujfalusid15f8652019-04-25 12:08:15 +05301098 if (udma_is_chan_running(uc)) {
1099 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1100 udma_stop(uc);
1101 if (udma_is_chan_running(uc)) {
1102 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1103 goto err_free_res;
1104 }
1105 }
1106
Vignesh R3a9dbf32019-02-05 17:31:24 +05301107 /* PSI-L pairing */
1108 ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread);
1109 if (ret) {
1110 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1111 goto err_free_res;
1112 }
1113
1114 return 0;
1115
1116err_free_res:
1117 udma_free_tx_resources(uc);
1118 udma_free_rx_resources(uc);
1119 uc->slave_thread_id = -1;
1120 return ret;
1121}
1122
1123static void udma_free_chan_resources(struct udma_chan *uc)
1124{
1125 /* Some configuration to UDMA-P channel: disable, reset, whatever */
1126
1127 /* Release PSI-L pairing */
1128 udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread);
1129
1130 /* Reset the rings for a new start */
1131 udma_reset_rings(uc);
1132 udma_free_tx_resources(uc);
1133 udma_free_rx_resources(uc);
1134
1135 uc->slave_thread_id = -1;
1136 uc->dir = DMA_MEM_TO_MEM;
1137}
1138
1139static int udma_get_mmrs(struct udevice *dev)
1140{
1141 struct udma_dev *ud = dev_get_priv(dev);
1142 int i;
1143
1144 for (i = 0; i < MMR_LAST; i++) {
1145 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1146 mmr_names[i]);
1147 if (!ud->mmrs[i])
1148 return -EINVAL;
1149 }
1150
1151 return 0;
1152}
1153
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301154static int udma_setup_resources(struct udma_dev *ud)
1155{
1156 struct udevice *dev = ud->dev;
1157 int ch_count, i;
1158 u32 cap2, cap3;
1159 struct ti_sci_resource_desc *rm_desc;
1160 struct ti_sci_resource *rm_res;
1161 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1162 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
1163 "ti,sci-rm-range-rchan",
1164 "ti,sci-rm-range-rflow" };
1165
1166 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1167 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1168
1169 ud->rflow_cnt = cap3 & 0x3fff;
1170 ud->tchan_cnt = cap2 & 0x1ff;
1171 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1172 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1173 ch_count = ud->tchan_cnt + ud->rchan_cnt;
1174
1175 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1176 sizeof(unsigned long), GFP_KERNEL);
1177 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1178 GFP_KERNEL);
1179 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1180 sizeof(unsigned long), GFP_KERNEL);
1181 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1182 GFP_KERNEL);
1183 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1184 sizeof(unsigned long), GFP_KERNEL);
1185 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1186 sizeof(unsigned long),
1187 GFP_KERNEL);
1188 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1189 GFP_KERNEL);
1190
1191 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1192 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1193 !ud->rflows)
1194 return -ENOMEM;
1195
1196 /*
1197 * RX flows with the same Ids as RX channels are reserved to be used
1198 * as default flows if remote HW can't generate flow_ids. Those
1199 * RX flows can be requested only explicitly by id.
1200 */
1201 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1202
1203 /* Get resource ranges from tisci */
1204 for (i = 0; i < RM_RANGE_LAST; i++)
1205 tisci_rm->rm_ranges[i] =
1206 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1207 tisci_rm->tisci_dev_id,
1208 (char *)range_names[i]);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301209
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301210 /* tchan ranges */
1211 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1212 if (IS_ERR(rm_res)) {
1213 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1214 } else {
1215 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1216 for (i = 0; i < rm_res->sets; i++) {
1217 rm_desc = &rm_res->desc[i];
1218 bitmap_clear(ud->tchan_map, rm_desc->start,
1219 rm_desc->num);
1220 }
1221 }
1222
1223 /* rchan and matching default flow ranges */
1224 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1225 if (IS_ERR(rm_res)) {
1226 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1227 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1228 } else {
1229 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1230 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1231 for (i = 0; i < rm_res->sets; i++) {
1232 rm_desc = &rm_res->desc[i];
1233 bitmap_clear(ud->rchan_map, rm_desc->start,
1234 rm_desc->num);
1235 bitmap_clear(ud->rflow_map, rm_desc->start,
1236 rm_desc->num);
1237 }
1238 }
1239
1240 /* GP rflow ranges */
1241 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1242 if (IS_ERR(rm_res)) {
1243 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1244 ud->rflow_cnt - ud->rchan_cnt);
1245 } else {
1246 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1247 ud->rflow_cnt - ud->rchan_cnt);
1248 for (i = 0; i < rm_res->sets; i++) {
1249 rm_desc = &rm_res->desc[i];
1250 bitmap_clear(ud->rflow_map, rm_desc->start,
1251 rm_desc->num);
1252 }
1253 }
1254
1255 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1256 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1257 if (!ch_count)
1258 return -ENODEV;
1259
1260 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1261 GFP_KERNEL);
1262 if (!ud->channels)
1263 return -ENOMEM;
1264
1265 dev_info(dev,
1266 "Channels: %d (tchan: %u, echan: %u, rchan: %u, rflow: %u)\n",
1267 ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1268 ud->rflow_cnt);
1269
1270 return ch_count;
1271}
Vignesh R3a9dbf32019-02-05 17:31:24 +05301272static int udma_probe(struct udevice *dev)
1273{
1274 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1275 struct udma_dev *ud = dev_get_priv(dev);
1276 int i, ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301277 struct udevice *tmp;
1278 struct udevice *tisci_dev = NULL;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301279 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1280 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1281
Vignesh R3a9dbf32019-02-05 17:31:24 +05301282
1283 ret = udma_get_mmrs(dev);
1284 if (ret)
1285 return ret;
1286
1287 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1288 "ti,ringacc", &tmp);
1289 ud->ringacc = dev_get_priv(tmp);
1290 if (IS_ERR(ud->ringacc))
1291 return PTR_ERR(ud->ringacc);
1292
1293 ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0);
1294 if (!ud->psil_base) {
1295 dev_info(dev,
1296 "Missing ti,psil-base property, using %d.\n", ret);
1297 return -EINVAL;
1298 }
1299
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301300 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1301 "ti,sci", &tisci_dev);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301302 if (ret) {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301303 debug("Failed to get TISCI phandle (%d)\n", ret);
1304 tisci_rm->tisci = NULL;
1305 return -EINVAL;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301306 }
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301307 tisci_rm->tisci = (struct ti_sci_handle *)
1308 (ti_sci_get_handle_from_sysfw(tisci_dev));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301309
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301310 tisci_rm->tisci_dev_id = -1;
1311 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1312 if (ret) {
1313 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1314 return ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301315 }
1316
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301317 tisci_rm->tisci_navss_dev_id = -1;
1318 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1319 &tisci_rm->tisci_navss_dev_id);
1320 if (ret) {
1321 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1322 return ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301323 }
1324
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301325 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1326 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301327
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301328 ud->dev = dev;
1329 ud->ch_count = udma_setup_resources(ud);
1330 if (ud->ch_count <= 0)
1331 return ud->ch_count;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301332
1333 dev_info(dev,
1334 "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1335 ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301336 tisci_rm->tisci_dev_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301337 dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1338
Vignesh R3a9dbf32019-02-05 17:31:24 +05301339 for (i = 0; i < ud->tchan_cnt; i++) {
1340 struct udma_tchan *tchan = &ud->tchans[i];
1341
1342 tchan->id = i;
1343 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1344 }
1345
1346 for (i = 0; i < ud->rchan_cnt; i++) {
1347 struct udma_rchan *rchan = &ud->rchans[i];
1348
1349 rchan->id = i;
1350 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1351 }
1352
1353 for (i = 0; i < ud->rflow_cnt; i++) {
1354 struct udma_rflow *rflow = &ud->rflows[i];
1355
1356 rflow->id = i;
1357 }
1358
1359 for (i = 0; i < ud->ch_count; i++) {
1360 struct udma_chan *uc = &ud->channels[i];
1361
1362 uc->ud = ud;
1363 uc->id = i;
1364 uc->slave_thread_id = -1;
1365 uc->tchan = NULL;
1366 uc->rchan = NULL;
1367 uc->dir = DMA_MEM_TO_MEM;
1368 sprintf(uc->name, "UDMA chan%d\n", i);
1369 if (!i)
1370 uc->in_use = true;
1371 }
1372
1373 pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1374 udma_read(ud->mmrs[MMR_GCFG], 0),
1375 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1376 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1377 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1378 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1379
1380 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1381
1382 return ret;
1383}
1384
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301385static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1386{
1387 u64 addr = 0;
1388
1389 memcpy(&addr, &elem, sizeof(elem));
1390 return k3_nav_ringacc_ring_push(ring, &addr);
1391}
1392
Vignesh R3a9dbf32019-02-05 17:31:24 +05301393static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1394 dma_addr_t src, size_t len)
1395{
1396 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1397 struct cppi5_tr_type15_t *tr_req;
1398 int num_tr;
1399 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1400 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1401 unsigned long dummy;
1402 void *tr_desc;
1403 size_t desc_size;
1404
1405 if (len < SZ_64K) {
1406 num_tr = 1;
1407 tr0_cnt0 = len;
1408 tr0_cnt1 = 1;
1409 } else {
1410 unsigned long align_to = __ffs(src | dest);
1411
1412 if (align_to > 3)
1413 align_to = 3;
1414 /*
1415 * Keep simple: tr0: SZ_64K-alignment blocks,
1416 * tr1: the remaining
1417 */
1418 num_tr = 2;
1419 tr0_cnt0 = (SZ_64K - BIT(align_to));
1420 if (len / tr0_cnt0 >= SZ_64K) {
1421 dev_err(uc->ud->dev, "size %zu is not supported\n",
1422 len);
1423 return NULL;
1424 }
1425
1426 tr0_cnt1 = len / tr0_cnt0;
1427 tr1_cnt0 = len % tr0_cnt0;
1428 }
1429
1430 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1431 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1432 if (!tr_desc)
1433 return NULL;
1434 memset(tr_desc, 0, desc_size);
1435
1436 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1437 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1438 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1439
1440 tr_req = tr_desc + tr_size;
1441
1442 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1443 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1444 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1445
1446 tr_req[0].addr = src;
1447 tr_req[0].icnt0 = tr0_cnt0;
1448 tr_req[0].icnt1 = tr0_cnt1;
1449 tr_req[0].icnt2 = 1;
1450 tr_req[0].icnt3 = 1;
1451 tr_req[0].dim1 = tr0_cnt0;
1452
1453 tr_req[0].daddr = dest;
1454 tr_req[0].dicnt0 = tr0_cnt0;
1455 tr_req[0].dicnt1 = tr0_cnt1;
1456 tr_req[0].dicnt2 = 1;
1457 tr_req[0].dicnt3 = 1;
1458 tr_req[0].ddim1 = tr0_cnt0;
1459
1460 if (num_tr == 2) {
1461 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1462 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1463 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1464
1465 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1466 tr_req[1].icnt0 = tr1_cnt0;
1467 tr_req[1].icnt1 = 1;
1468 tr_req[1].icnt2 = 1;
1469 tr_req[1].icnt3 = 1;
1470
1471 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1472 tr_req[1].dicnt0 = tr1_cnt0;
1473 tr_req[1].dicnt1 = 1;
1474 tr_req[1].dicnt2 = 1;
1475 tr_req[1].dicnt3 = 1;
1476 }
1477
1478 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1479
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301480 flush_dcache_range((unsigned long)tr_desc,
1481 ALIGN((unsigned long)tr_desc + desc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301482 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301483
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301484 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301485
1486 return 0;
1487}
1488
1489static int udma_transfer(struct udevice *dev, int direction,
1490 void *dst, void *src, size_t len)
1491{
1492 struct udma_dev *ud = dev_get_priv(dev);
1493 /* Channel0 is reserved for memcpy */
1494 struct udma_chan *uc = &ud->channels[0];
1495 dma_addr_t paddr = 0;
1496 int ret;
1497
1498 ret = udma_alloc_chan_resources(uc);
1499 if (ret)
1500 return ret;
1501
1502 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1503 udma_start(uc);
1504 udma_poll_completion(uc, &paddr);
1505 udma_stop(uc);
1506
1507 udma_free_chan_resources(uc);
1508 return 0;
1509}
1510
1511static int udma_request(struct dma *dma)
1512{
1513 struct udma_dev *ud = dev_get_priv(dma->dev);
1514 struct udma_chan *uc;
1515 unsigned long dummy;
1516 int ret;
1517
1518 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1519 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1520 return -EINVAL;
1521 }
1522
1523 uc = &ud->channels[dma->id];
1524 ret = udma_alloc_chan_resources(uc);
1525 if (ret) {
1526 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1527 return -EINVAL;
1528 }
1529
1530 uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib,
1531 uc->psd_size, 0);
1532 uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN);
1533
1534 if (uc->dir == DMA_MEM_TO_DEV) {
1535 uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy);
1536 memset(uc->desc_tx, 0, uc->hdesc_size);
1537 } else {
1538 uc->desc_rx = dma_alloc_coherent(
1539 uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1540 memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM);
1541 }
1542
1543 uc->in_use = true;
1544 uc->desc_rx_cur = 0;
1545 uc->num_rx_bufs = 0;
1546
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301547 if (uc->dir == DMA_DEV_TO_MEM) {
1548 uc->cfg_data.flow_id_base = uc->rflow->id;
1549 uc->cfg_data.flow_id_cnt = 1;
1550 }
1551
Vignesh R3a9dbf32019-02-05 17:31:24 +05301552 return 0;
1553}
1554
Simon Glass75c0ad62020-02-03 07:35:55 -07001555static int udma_rfree(struct dma *dma)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301556{
1557 struct udma_dev *ud = dev_get_priv(dma->dev);
1558 struct udma_chan *uc;
1559
1560 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1561 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1562 return -EINVAL;
1563 }
1564 uc = &ud->channels[dma->id];
1565
1566 if (udma_is_chan_running(uc))
1567 udma_stop(uc);
1568 udma_free_chan_resources(uc);
1569
1570 uc->in_use = false;
1571
1572 return 0;
1573}
1574
1575static int udma_enable(struct dma *dma)
1576{
1577 struct udma_dev *ud = dev_get_priv(dma->dev);
1578 struct udma_chan *uc;
1579 int ret;
1580
1581 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1582 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1583 return -EINVAL;
1584 }
1585 uc = &ud->channels[dma->id];
1586
1587 ret = udma_start(uc);
1588
1589 return ret;
1590}
1591
1592static int udma_disable(struct dma *dma)
1593{
1594 struct udma_dev *ud = dev_get_priv(dma->dev);
1595 struct udma_chan *uc;
1596 int ret = 0;
1597
1598 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1599 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1600 return -EINVAL;
1601 }
1602 uc = &ud->channels[dma->id];
1603
1604 if (udma_is_chan_running(uc))
1605 ret = udma_stop(uc);
1606 else
1607 dev_err(dma->dev, "%s not running\n", __func__);
1608
1609 return ret;
1610}
1611
1612static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1613{
1614 struct udma_dev *ud = dev_get_priv(dma->dev);
1615 struct cppi5_host_desc_t *desc_tx;
1616 dma_addr_t dma_src = (dma_addr_t)src;
1617 struct ti_udma_drv_packet_data packet_data = { 0 };
1618 dma_addr_t paddr;
1619 struct udma_chan *uc;
1620 u32 tc_ring_id;
1621 int ret;
1622
Keerthya3c8bb12019-04-24 16:33:54 +05301623 if (metadata)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301624 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1625
1626 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1627 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1628 return -EINVAL;
1629 }
1630 uc = &ud->channels[dma->id];
1631
1632 if (uc->dir != DMA_MEM_TO_DEV)
1633 return -EINVAL;
1634
1635 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1636
1637 desc_tx = uc->desc_tx;
1638
1639 cppi5_hdesc_reset_hbdesc(desc_tx);
1640
1641 cppi5_hdesc_init(desc_tx,
1642 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1643 uc->psd_size);
1644 cppi5_hdesc_set_pktlen(desc_tx, len);
1645 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1646 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1647 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1648 /* pass below information from caller */
1649 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1650 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1651
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301652 flush_dcache_range((unsigned long)dma_src,
1653 ALIGN((unsigned long)dma_src + len,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301654 ARCH_DMA_MINALIGN));
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301655 flush_dcache_range((unsigned long)desc_tx,
1656 ALIGN((unsigned long)desc_tx + uc->hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301657 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301658
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301659 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301660 if (ret) {
1661 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1662 dma->id, ret);
1663 return ret;
1664 }
1665
1666 udma_poll_completion(uc, &paddr);
1667
1668 return 0;
1669}
1670
1671static int udma_receive(struct dma *dma, void **dst, void *metadata)
1672{
1673 struct udma_dev *ud = dev_get_priv(dma->dev);
1674 struct cppi5_host_desc_t *desc_rx;
1675 dma_addr_t buf_dma;
1676 struct udma_chan *uc;
1677 u32 buf_dma_len, pkt_len;
1678 u32 port_id = 0;
1679 int ret;
1680
1681 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1682 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1683 return -EINVAL;
1684 }
1685 uc = &ud->channels[dma->id];
1686
1687 if (uc->dir != DMA_DEV_TO_MEM)
1688 return -EINVAL;
1689 if (!uc->num_rx_bufs)
1690 return -EINVAL;
1691
1692 ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
1693 if (ret && ret != -ENODATA) {
1694 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1695 return ret;
1696 } else if (ret == -ENODATA) {
1697 return 0;
1698 }
1699
1700 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301701 invalidate_dcache_range((ulong)desc_rx,
1702 (ulong)(desc_rx + uc->hdesc_size));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301703
1704 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1705 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1706
1707 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301708 invalidate_dcache_range((ulong)buf_dma,
1709 (ulong)(buf_dma + buf_dma_len));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301710
1711 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1712
1713 *dst = (void *)buf_dma;
1714 uc->num_rx_bufs--;
1715
1716 return pkt_len;
1717}
1718
1719static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1720{
1721 struct udma_dev *ud = dev_get_priv(dma->dev);
1722 struct udma_chan *uc = &ud->channels[0];
1723 ofnode chconf_node, slave_node;
1724 char prop[50];
1725 u32 val;
1726
1727 for (val = 0; val < ud->ch_count; val++) {
1728 uc = &ud->channels[val];
1729 if (!uc->in_use)
1730 break;
1731 }
1732
1733 if (val == ud->ch_count)
1734 return -EBUSY;
1735
1736 uc->dir = DMA_DEV_TO_MEM;
1737 if (args->args[2] == UDMA_DIR_TX)
1738 uc->dir = DMA_MEM_TO_DEV;
1739
1740 slave_node = ofnode_get_by_phandle(args->args[0]);
1741 if (!ofnode_valid(slave_node)) {
1742 dev_err(ud->dev, "slave node is missing\n");
1743 return -EINVAL;
1744 }
1745
1746 snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]);
1747 chconf_node = ofnode_find_subnode(slave_node, prop);
1748 if (!ofnode_valid(chconf_node)) {
1749 dev_err(ud->dev, "Channel configuration node is missing\n");
1750 return -EINVAL;
1751 }
1752
1753 if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) {
1754 if (val == UDMA_PKT_MODE)
1755 uc->pkt_mode = true;
1756 }
1757
1758 if (!ofnode_read_u32(chconf_node, "statictr-type", &val))
1759 uc->static_tr_type = val;
1760
1761 uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib");
1762 if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val))
1763 uc->psd_size = val;
1764 uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size;
1765
1766 if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) {
1767 dev_err(ud->dev, "ti,psil-base is missing\n");
1768 return -EINVAL;
1769 }
1770
1771 uc->slave_thread_id = val + args->args[1];
1772
1773 dma->id = uc->id;
1774 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
1775 dma->id, uc->needs_epib,
1776 uc->psd_size, uc->metadata_size,
1777 uc->slave_thread_id);
1778
1779 return 0;
1780}
1781
1782int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1783{
1784 struct udma_dev *ud = dev_get_priv(dma->dev);
1785 struct cppi5_host_desc_t *desc_rx;
1786 dma_addr_t dma_dst;
1787 struct udma_chan *uc;
1788 u32 desc_num;
1789
1790 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1791 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1792 return -EINVAL;
1793 }
1794 uc = &ud->channels[dma->id];
1795
1796 if (uc->dir != DMA_DEV_TO_MEM)
1797 return -EINVAL;
1798
1799 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1800 return -EINVAL;
1801
1802 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
1803 desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size);
1804 dma_dst = (dma_addr_t)dst;
1805
1806 cppi5_hdesc_reset_hbdesc(desc_rx);
1807
1808 cppi5_hdesc_init(desc_rx,
1809 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1810 uc->psd_size);
1811 cppi5_hdesc_set_pktlen(desc_rx, size);
1812 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1813
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301814 flush_dcache_range((unsigned long)desc_rx,
1815 ALIGN((unsigned long)desc_rx + uc->hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301816 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301817
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301818 udma_push_to_ring(uc->rchan->fd_ring, desc_rx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301819
1820 uc->num_rx_bufs++;
1821 uc->desc_rx_cur++;
1822
1823 return 0;
1824}
1825
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301826static int udma_get_cfg(struct dma *dma, u32 id, void **data)
1827{
1828 struct udma_dev *ud = dev_get_priv(dma->dev);
1829 struct udma_chan *uc;
1830
1831 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1832 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1833 return -EINVAL;
1834 }
1835
1836 switch (id) {
1837 case TI_UDMA_CHAN_PRIV_INFO:
1838 uc = &ud->channels[dma->id];
1839 *data = &uc->cfg_data;
1840 return 0;
1841 }
1842
1843 return -EINVAL;
1844}
1845
Vignesh R3a9dbf32019-02-05 17:31:24 +05301846static const struct dma_ops udma_ops = {
1847 .transfer = udma_transfer,
1848 .of_xlate = udma_of_xlate,
1849 .request = udma_request,
Simon Glass75c0ad62020-02-03 07:35:55 -07001850 .rfree = udma_rfree,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301851 .enable = udma_enable,
1852 .disable = udma_disable,
1853 .send = udma_send,
1854 .receive = udma_receive,
1855 .prepare_rcv_buf = udma_prepare_rcv_buf,
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301856 .get_cfg = udma_get_cfg,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301857};
1858
1859static const struct udevice_id udma_ids[] = {
1860 { .compatible = "ti,k3-navss-udmap" },
Vignesh Raghavendra30bc6ea2019-12-04 22:17:23 +05301861 { .compatible = "ti,j721e-navss-mcu-udmap" },
Vignesh R3a9dbf32019-02-05 17:31:24 +05301862 { }
1863};
1864
1865U_BOOT_DRIVER(ti_edma3) = {
1866 .name = "ti-udma",
1867 .id = UCLASS_DMA,
1868 .of_match = udma_ids,
1869 .ops = &udma_ops,
1870 .probe = udma_probe,
1871 .priv_auto_alloc_size = sizeof(struct udma_dev),
1872};