blob: 375e904fa9a15c69c6f25c1263569897773f9223 [file] [log] [blame]
Vignesh R3a9dbf32019-02-05 17:31:24 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#define pr_fmt(fmt) "udma: " fmt
7
8#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
Simon Glass0f2af882020-05-10 11:40:05 -060010#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060011#include <asm/cache.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053012#include <asm/io.h>
13#include <asm/bitops.h>
14#include <malloc.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060015#include <linux/bitops.h>
Masahiro Yamada6373a172020-02-14 16:40:19 +090016#include <linux/dma-mapping.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053017#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070018#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070019#include <dm/devres.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053020#include <dm/read.h>
21#include <dm/of_access.h>
22#include <dma.h>
23#include <dma-uclass.h>
24#include <linux/delay.h>
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053025#include <linux/bitmap.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070026#include <linux/err.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053027#include <linux/soc/ti/k3-navss-ringacc.h>
28#include <linux/soc/ti/cppi5.h>
29#include <linux/soc/ti/ti-udma.h>
30#include <linux/soc/ti/ti_sci_protocol.h>
31
32#include "k3-udma-hwdef.h"
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +053033#include "k3-psil-priv.h"
Vignesh R3a9dbf32019-02-05 17:31:24 +053034
35#if BITS_PER_LONG == 64
36#define RINGACC_RING_USE_PROXY (0)
37#else
38#define RINGACC_RING_USE_PROXY (1)
39#endif
40
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053041#define K3_UDMA_MAX_RFLOWS 1024
42
Vignesh R3a9dbf32019-02-05 17:31:24 +053043struct udma_chan;
44
45enum udma_mmr {
46 MMR_GCFG = 0,
47 MMR_RCHANRT,
48 MMR_TCHANRT,
49 MMR_LAST,
50};
51
52static const char * const mmr_names[] = {
53 "gcfg", "rchanrt", "tchanrt"
54};
55
56struct udma_tchan {
57 void __iomem *reg_rt;
58
59 int id;
60 struct k3_nav_ring *t_ring; /* Transmit ring */
61 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
62};
63
64struct udma_rchan {
65 void __iomem *reg_rt;
66
67 int id;
68 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
69 struct k3_nav_ring *r_ring; /* Receive ring*/
70};
71
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +053072#define UDMA_FLAG_PDMA_ACC32 BIT(0)
73#define UDMA_FLAG_PDMA_BURST BIT(1)
74#define UDMA_FLAG_TDTYPE BIT(2)
75
76struct udma_match_data {
77 u32 psil_base;
78 bool enable_memcpy_support;
79 u32 flags;
80 u32 statictr_z_mask;
81 u32 rchan_oes_offset;
82
83 u8 tpl_levels;
84 u32 level_start_idx[];
85};
86
Vignesh R3a9dbf32019-02-05 17:31:24 +053087struct udma_rflow {
88 int id;
89};
90
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053091enum udma_rm_range {
92 RM_RANGE_TCHAN = 0,
93 RM_RANGE_RCHAN,
94 RM_RANGE_RFLOW,
95 RM_RANGE_LAST,
96};
97
98struct udma_tisci_rm {
99 const struct ti_sci_handle *tisci;
100 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
101 u32 tisci_dev_id;
102
103 /* tisci information for PSI-L thread pairing/unpairing */
104 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
105 u32 tisci_navss_dev_id;
106
107 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
108};
109
Vignesh R3a9dbf32019-02-05 17:31:24 +0530110struct udma_dev {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530111 struct udevice *dev;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530112 void __iomem *mmrs[MMR_LAST];
113
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530114 struct udma_tisci_rm tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530115 struct k3_nav_ringacc *ringacc;
116
117 u32 features;
118
119 int tchan_cnt;
120 int echan_cnt;
121 int rchan_cnt;
122 int rflow_cnt;
123 unsigned long *tchan_map;
124 unsigned long *rchan_map;
125 unsigned long *rflow_map;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530126 unsigned long *rflow_map_reserved;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530127
128 struct udma_tchan *tchans;
129 struct udma_rchan *rchans;
130 struct udma_rflow *rflows;
131
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530132 struct udma_match_data *match_data;
133
Vignesh R3a9dbf32019-02-05 17:31:24 +0530134 struct udma_chan *channels;
135 u32 psil_base;
136
137 u32 ch_count;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530138};
139
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530140struct udma_chan_config {
141 u32 psd_size; /* size of Protocol Specific Data */
142 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
143 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
144 int remote_thread_id;
145 u32 atype;
146 u32 src_thread;
147 u32 dst_thread;
148 enum psil_endpoint_type ep_type;
149 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
150
151 enum dma_direction dir;
152
153 unsigned int pkt_mode:1; /* TR or packet */
154 unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
155 unsigned int enable_acc32:1;
156 unsigned int enable_burst:1;
157 unsigned int notdpkt:1; /* Suppress sending TDC packet */
158};
159
Vignesh R3a9dbf32019-02-05 17:31:24 +0530160struct udma_chan {
161 struct udma_dev *ud;
162 char name[20];
163
164 struct udma_tchan *tchan;
165 struct udma_rchan *rchan;
166 struct udma_rflow *rflow;
167
Vignesh Raghavendra39349892019-12-04 22:17:21 +0530168 struct ti_udma_drv_chan_cfg_data cfg_data;
169
Vignesh R3a9dbf32019-02-05 17:31:24 +0530170 u32 bcnt; /* number of bytes completed since the start of the channel */
171
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530172 struct udma_chan_config config;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530173
174 u32 id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530175
176 struct cppi5_host_desc_t *desc_tx;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530177 bool in_use;
178 void *desc_rx;
179 u32 num_rx_bufs;
180 u32 desc_rx_cur;
181
182};
183
184#define UDMA_CH_1000(ch) (ch * 0x1000)
185#define UDMA_CH_100(ch) (ch * 0x100)
186#define UDMA_CH_40(ch) (ch * 0x40)
187
188#ifdef PKTBUFSRX
189#define UDMA_RX_DESC_NUM PKTBUFSRX
190#else
191#define UDMA_RX_DESC_NUM 4
192#endif
193
194/* Generic register access functions */
195static inline u32 udma_read(void __iomem *base, int reg)
196{
197 u32 v;
198
199 v = __raw_readl(base + reg);
200 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
201 return v;
202}
203
204static inline void udma_write(void __iomem *base, int reg, u32 val)
205{
206 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
207 __raw_writel(val, base + reg);
208}
209
210static inline void udma_update_bits(void __iomem *base, int reg,
211 u32 mask, u32 val)
212{
213 u32 tmp, orig;
214
215 orig = udma_read(base, reg);
216 tmp = orig & ~mask;
217 tmp |= (val & mask);
218
219 if (tmp != orig)
220 udma_write(base, reg, tmp);
221}
222
223/* TCHANRT */
224static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
225{
226 if (!tchan)
227 return 0;
228 return udma_read(tchan->reg_rt, reg);
229}
230
231static inline void udma_tchanrt_write(struct udma_tchan *tchan,
232 int reg, u32 val)
233{
234 if (!tchan)
235 return;
236 udma_write(tchan->reg_rt, reg, val);
237}
238
239/* RCHANRT */
240static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
241{
242 if (!rchan)
243 return 0;
244 return udma_read(rchan->reg_rt, reg);
245}
246
247static inline void udma_rchanrt_write(struct udma_rchan *rchan,
248 int reg, u32 val)
249{
250 if (!rchan)
251 return;
252 udma_write(rchan->reg_rt, reg, val);
253}
254
255static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
256 u32 dst_thread)
257{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530258 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
259
Vignesh R3a9dbf32019-02-05 17:31:24 +0530260 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530261
262 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
263 tisci_rm->tisci_navss_dev_id,
264 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530265}
266
267static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
268 u32 dst_thread)
269{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530270 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
271
Vignesh R3a9dbf32019-02-05 17:31:24 +0530272 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530273
274 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
275 tisci_rm->tisci_navss_dev_id,
276 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530277}
278
279static inline char *udma_get_dir_text(enum dma_direction dir)
280{
281 switch (dir) {
282 case DMA_DEV_TO_MEM:
283 return "DEV_TO_MEM";
284 case DMA_MEM_TO_DEV:
285 return "MEM_TO_DEV";
286 case DMA_MEM_TO_MEM:
287 return "MEM_TO_MEM";
288 case DMA_DEV_TO_DEV:
289 return "DEV_TO_DEV";
290 default:
291 break;
292 }
293
294 return "invalid";
295}
296
297static inline bool udma_is_chan_running(struct udma_chan *uc)
298{
299 u32 trt_ctl = 0;
300 u32 rrt_ctl = 0;
301
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530302 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530303 case DMA_DEV_TO_MEM:
304 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
305 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
306 __func__, rrt_ctl,
307 udma_rchanrt_read(uc->rchan,
308 UDMA_RCHAN_RT_PEER_RT_EN_REG));
309 break;
310 case DMA_MEM_TO_DEV:
311 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
312 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
313 __func__, trt_ctl,
314 udma_tchanrt_read(uc->tchan,
315 UDMA_TCHAN_RT_PEER_RT_EN_REG));
316 break;
317 case DMA_MEM_TO_MEM:
318 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
319 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
320 break;
321 default:
322 break;
323 }
324
325 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
326 return true;
327
328 return false;
329}
330
Vignesh R3a9dbf32019-02-05 17:31:24 +0530331static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
332{
333 struct k3_nav_ring *ring = NULL;
334 int ret = -ENOENT;
335
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530336 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530337 case DMA_DEV_TO_MEM:
338 ring = uc->rchan->r_ring;
339 break;
340 case DMA_MEM_TO_DEV:
341 ring = uc->tchan->tc_ring;
342 break;
343 case DMA_MEM_TO_MEM:
344 ring = uc->tchan->tc_ring;
345 break;
346 default:
347 break;
348 }
349
350 if (ring && k3_nav_ringacc_ring_get_occ(ring))
351 ret = k3_nav_ringacc_ring_pop(ring, addr);
352
353 return ret;
354}
355
356static void udma_reset_rings(struct udma_chan *uc)
357{
358 struct k3_nav_ring *ring1 = NULL;
359 struct k3_nav_ring *ring2 = NULL;
360
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530361 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530362 case DMA_DEV_TO_MEM:
363 ring1 = uc->rchan->fd_ring;
364 ring2 = uc->rchan->r_ring;
365 break;
366 case DMA_MEM_TO_DEV:
367 ring1 = uc->tchan->t_ring;
368 ring2 = uc->tchan->tc_ring;
369 break;
370 case DMA_MEM_TO_MEM:
371 ring1 = uc->tchan->t_ring;
372 ring2 = uc->tchan->tc_ring;
373 break;
374 default:
375 break;
376 }
377
378 if (ring1)
379 k3_nav_ringacc_ring_reset_dma(ring1, 0);
380 if (ring2)
381 k3_nav_ringacc_ring_reset(ring2);
382}
383
384static void udma_reset_counters(struct udma_chan *uc)
385{
386 u32 val;
387
388 if (uc->tchan) {
389 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
390 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
391
392 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
393 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
394
395 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
396 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
397
398 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
399 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
400 }
401
402 if (uc->rchan) {
403 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
404 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
405
406 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
407 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
408
409 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
410 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
411
412 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
413 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
414 }
415
416 uc->bcnt = 0;
417}
418
419static inline int udma_stop_hard(struct udma_chan *uc)
420{
421 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
422
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530423 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530424 case DMA_DEV_TO_MEM:
425 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
426 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
427 break;
428 case DMA_MEM_TO_DEV:
429 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
430 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
431 break;
432 case DMA_MEM_TO_MEM:
433 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
434 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
435 break;
436 default:
437 return -EINVAL;
438 }
439
440 return 0;
441}
442
443static int udma_start(struct udma_chan *uc)
444{
445 /* Channel is already running, no need to proceed further */
446 if (udma_is_chan_running(uc))
447 goto out;
448
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530449 pr_debug("%s: chan:%d dir:%s\n",
450 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh R3a9dbf32019-02-05 17:31:24 +0530451
452 /* Make sure that we clear the teardown bit, if it is set */
453 udma_stop_hard(uc);
454
455 /* Reset all counters */
456 udma_reset_counters(uc);
457
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530458 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530459 case DMA_DEV_TO_MEM:
460 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
461 UDMA_CHAN_RT_CTL_EN);
462
463 /* Enable remote */
464 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
465 UDMA_PEER_RT_EN_ENABLE);
466
467 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
468 __func__,
469 udma_rchanrt_read(uc->rchan,
470 UDMA_RCHAN_RT_CTL_REG),
471 udma_rchanrt_read(uc->rchan,
472 UDMA_RCHAN_RT_PEER_RT_EN_REG));
473 break;
474 case DMA_MEM_TO_DEV:
475 /* Enable remote */
476 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
477 UDMA_PEER_RT_EN_ENABLE);
478
479 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
480 UDMA_CHAN_RT_CTL_EN);
481
482 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
483 __func__,
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530484 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530485 UDMA_TCHAN_RT_CTL_REG),
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530486 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530487 UDMA_TCHAN_RT_PEER_RT_EN_REG));
488 break;
489 case DMA_MEM_TO_MEM:
490 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
491 UDMA_CHAN_RT_CTL_EN);
492 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
493 UDMA_CHAN_RT_CTL_EN);
494
495 break;
496 default:
497 return -EINVAL;
498 }
499
500 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
501out:
502 return 0;
503}
504
505static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
506{
507 int i = 0;
508 u32 val;
509
510 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
511 UDMA_CHAN_RT_CTL_EN |
512 UDMA_CHAN_RT_CTL_TDOWN);
513
514 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
515
516 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
517 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
518 udelay(1);
519 if (i > 1000) {
520 printf(" %s TIMEOUT !\n", __func__);
521 break;
522 }
523 i++;
524 }
525
526 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
527 if (val & UDMA_PEER_RT_EN_ENABLE)
528 printf("%s: peer not stopped TIMEOUT !\n", __func__);
529}
530
531static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
532{
533 int i = 0;
534 u32 val;
535
536 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
537 UDMA_PEER_RT_EN_ENABLE |
538 UDMA_PEER_RT_EN_TEARDOWN);
539
540 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
541
542 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
543 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
544 udelay(1);
545 if (i > 1000) {
546 printf("%s TIMEOUT !\n", __func__);
547 break;
548 }
549 i++;
550 }
551
552 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
553 if (val & UDMA_PEER_RT_EN_ENABLE)
554 printf("%s: peer not stopped TIMEOUT !\n", __func__);
555}
556
557static inline int udma_stop(struct udma_chan *uc)
558{
559 pr_debug("%s: chan:%d dir:%s\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530560 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh R3a9dbf32019-02-05 17:31:24 +0530561
562 udma_reset_counters(uc);
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530563 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530564 case DMA_DEV_TO_MEM:
565 udma_stop_dev2mem(uc, true);
566 break;
567 case DMA_MEM_TO_DEV:
568 udma_stop_mem2dev(uc, true);
569 break;
570 case DMA_MEM_TO_MEM:
571 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
572 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
573 break;
574 default:
575 return -EINVAL;
576 }
577
578 return 0;
579}
580
581static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
582{
583 int i = 1;
584
585 while (udma_pop_from_ring(uc, paddr)) {
586 udelay(1);
587 if (!(i % 1000000))
588 printf(".");
589 i++;
590 }
591}
592
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530593static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
594{
595 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
596
597 if (id >= 0) {
598 if (test_bit(id, ud->rflow_map)) {
599 dev_err(ud->dev, "rflow%d is in use\n", id);
600 return ERR_PTR(-ENOENT);
601 }
602 } else {
603 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
604 ud->rflow_cnt);
605
606 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
607 if (id >= ud->rflow_cnt)
608 return ERR_PTR(-ENOENT);
609 }
610
611 __set_bit(id, ud->rflow_map);
612 return &ud->rflows[id];
613}
614
Vignesh R3a9dbf32019-02-05 17:31:24 +0530615#define UDMA_RESERVE_RESOURCE(res) \
616static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
617 int id) \
618{ \
619 if (id >= 0) { \
620 if (test_bit(id, ud->res##_map)) { \
621 dev_err(ud->dev, "res##%d is in use\n", id); \
622 return ERR_PTR(-ENOENT); \
623 } \
624 } else { \
625 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
626 if (id == ud->res##_cnt) { \
627 return ERR_PTR(-ENOENT); \
628 } \
629 } \
630 \
631 __set_bit(id, ud->res##_map); \
632 return &ud->res##s[id]; \
633}
634
635UDMA_RESERVE_RESOURCE(tchan);
636UDMA_RESERVE_RESOURCE(rchan);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530637
638static int udma_get_tchan(struct udma_chan *uc)
639{
640 struct udma_dev *ud = uc->ud;
641
642 if (uc->tchan) {
643 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
644 uc->id, uc->tchan->id);
645 return 0;
646 }
647
648 uc->tchan = __udma_reserve_tchan(ud, -1);
649 if (IS_ERR(uc->tchan))
650 return PTR_ERR(uc->tchan);
651
652 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
653
Vignesh R3a9dbf32019-02-05 17:31:24 +0530654 return 0;
655}
656
657static int udma_get_rchan(struct udma_chan *uc)
658{
659 struct udma_dev *ud = uc->ud;
660
661 if (uc->rchan) {
662 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
663 uc->id, uc->rchan->id);
664 return 0;
665 }
666
667 uc->rchan = __udma_reserve_rchan(ud, -1);
668 if (IS_ERR(uc->rchan))
669 return PTR_ERR(uc->rchan);
670
671 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
672
Vignesh R3a9dbf32019-02-05 17:31:24 +0530673 return 0;
674}
675
676static int udma_get_chan_pair(struct udma_chan *uc)
677{
678 struct udma_dev *ud = uc->ud;
679 int chan_id, end;
680
681 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
682 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
683 uc->id, uc->tchan->id);
684 return 0;
685 }
686
687 if (uc->tchan) {
688 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
689 uc->id, uc->tchan->id);
690 return -EBUSY;
691 } else if (uc->rchan) {
692 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
693 uc->id, uc->rchan->id);
694 return -EBUSY;
695 }
696
697 /* Can be optimized, but let's have it like this for now */
698 end = min(ud->tchan_cnt, ud->rchan_cnt);
699 for (chan_id = 0; chan_id < end; chan_id++) {
700 if (!test_bit(chan_id, ud->tchan_map) &&
701 !test_bit(chan_id, ud->rchan_map))
702 break;
703 }
704
705 if (chan_id == end)
706 return -ENOENT;
707
708 __set_bit(chan_id, ud->tchan_map);
709 __set_bit(chan_id, ud->rchan_map);
710 uc->tchan = &ud->tchans[chan_id];
711 uc->rchan = &ud->rchans[chan_id];
712
713 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
714
Vignesh R3a9dbf32019-02-05 17:31:24 +0530715 return 0;
716}
717
718static int udma_get_rflow(struct udma_chan *uc, int flow_id)
719{
720 struct udma_dev *ud = uc->ud;
721
722 if (uc->rflow) {
723 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
724 uc->id, uc->rflow->id);
725 return 0;
726 }
727
728 if (!uc->rchan)
729 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
730
731 uc->rflow = __udma_reserve_rflow(ud, flow_id);
732 if (IS_ERR(uc->rflow))
733 return PTR_ERR(uc->rflow);
734
735 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
736 return 0;
737}
738
739static void udma_put_rchan(struct udma_chan *uc)
740{
741 struct udma_dev *ud = uc->ud;
742
743 if (uc->rchan) {
744 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
745 uc->rchan->id);
746 __clear_bit(uc->rchan->id, ud->rchan_map);
747 uc->rchan = NULL;
748 }
749}
750
751static void udma_put_tchan(struct udma_chan *uc)
752{
753 struct udma_dev *ud = uc->ud;
754
755 if (uc->tchan) {
756 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
757 uc->tchan->id);
758 __clear_bit(uc->tchan->id, ud->tchan_map);
759 uc->tchan = NULL;
760 }
761}
762
763static void udma_put_rflow(struct udma_chan *uc)
764{
765 struct udma_dev *ud = uc->ud;
766
767 if (uc->rflow) {
768 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
769 uc->rflow->id);
770 __clear_bit(uc->rflow->id, ud->rflow_map);
771 uc->rflow = NULL;
772 }
773}
774
775static void udma_free_tx_resources(struct udma_chan *uc)
776{
777 if (!uc->tchan)
778 return;
779
780 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
781 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
782 uc->tchan->t_ring = NULL;
783 uc->tchan->tc_ring = NULL;
784
785 udma_put_tchan(uc);
786}
787
788static int udma_alloc_tx_resources(struct udma_chan *uc)
789{
790 struct k3_nav_ring_cfg ring_cfg;
791 struct udma_dev *ud = uc->ud;
792 int ret;
793
794 ret = udma_get_tchan(uc);
795 if (ret)
796 return ret;
797
798 uc->tchan->t_ring = k3_nav_ringacc_request_ring(
799 ud->ringacc, uc->tchan->id,
800 RINGACC_RING_USE_PROXY);
801 if (!uc->tchan->t_ring) {
802 ret = -EBUSY;
803 goto err_tx_ring;
804 }
805
806 uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
807 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
808 if (!uc->tchan->tc_ring) {
809 ret = -EBUSY;
810 goto err_txc_ring;
811 }
812
813 memset(&ring_cfg, 0, sizeof(ring_cfg));
814 ring_cfg.size = 16;
815 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530816 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530817
818 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
819 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
820
821 if (ret)
822 goto err_ringcfg;
823
824 return 0;
825
826err_ringcfg:
827 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
828 uc->tchan->tc_ring = NULL;
829err_txc_ring:
830 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
831 uc->tchan->t_ring = NULL;
832err_tx_ring:
833 udma_put_tchan(uc);
834
835 return ret;
836}
837
838static void udma_free_rx_resources(struct udma_chan *uc)
839{
840 if (!uc->rchan)
841 return;
842
843 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
844 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
845 uc->rchan->fd_ring = NULL;
846 uc->rchan->r_ring = NULL;
847
848 udma_put_rflow(uc);
849 udma_put_rchan(uc);
850}
851
852static int udma_alloc_rx_resources(struct udma_chan *uc)
853{
854 struct k3_nav_ring_cfg ring_cfg;
855 struct udma_dev *ud = uc->ud;
856 int fd_ring_id;
857 int ret;
858
859 ret = udma_get_rchan(uc);
860 if (ret)
861 return ret;
862
863 /* For MEM_TO_MEM we don't need rflow or rings */
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530864 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +0530865 return 0;
866
867 ret = udma_get_rflow(uc, uc->rchan->id);
868 if (ret) {
869 ret = -EBUSY;
870 goto err_rflow;
871 }
872
873 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
874
875 uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
876 ud->ringacc, fd_ring_id,
877 RINGACC_RING_USE_PROXY);
878 if (!uc->rchan->fd_ring) {
879 ret = -EBUSY;
880 goto err_rx_ring;
881 }
882
883 uc->rchan->r_ring = k3_nav_ringacc_request_ring(
884 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
885 if (!uc->rchan->r_ring) {
886 ret = -EBUSY;
887 goto err_rxc_ring;
888 }
889
890 memset(&ring_cfg, 0, sizeof(ring_cfg));
891 ring_cfg.size = 16;
892 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530893 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530894
895 ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
896 ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
897
898 if (ret)
899 goto err_ringcfg;
900
901 return 0;
902
903err_ringcfg:
904 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
905 uc->rchan->r_ring = NULL;
906err_rxc_ring:
907 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
908 uc->rchan->fd_ring = NULL;
909err_rx_ring:
910 udma_put_rflow(uc);
911err_rflow:
912 udma_put_rchan(uc);
913
914 return ret;
915}
916
917static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
918{
919 struct udma_dev *ud = uc->ud;
920 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
921 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530922 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530923 u32 mode;
924 int ret;
925
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530926 if (uc->config.pkt_mode)
Vignesh R3a9dbf32019-02-05 17:31:24 +0530927 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
928 else
929 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
930
931 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
932 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
933 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530934 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530935 req.index = uc->tchan->id;
936 req.tx_chan_type = mode;
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530937 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +0530938 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
939 else
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530940 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
941 uc->config.psd_size,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530942 0) >> 2;
943 req.txcq_qnum = tc_ring;
944
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530945 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530946 if (ret)
947 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
948
949 return ret;
950}
951
952static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
953{
954 struct udma_dev *ud = uc->ud;
955 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
956 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
957 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
958 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
959 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530960 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530961 u32 mode;
962 int ret;
963
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530964 if (uc->config.pkt_mode)
Vignesh R3a9dbf32019-02-05 17:31:24 +0530965 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
966 else
967 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
968
969 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
970 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
Lokesh Vutla9eae8622020-02-28 17:56:20 +0530971 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
972 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
973 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530974 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530975 req.index = uc->rchan->id;
976 req.rx_chan_type = mode;
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530977 if (uc->config.dir == DMA_MEM_TO_MEM) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530978 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
979 req.rxcq_qnum = tc_ring;
980 } else {
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530981 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
982 uc->config.psd_size,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530983 0) >> 2;
984 req.rxcq_qnum = rx_ring;
985 }
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530986 if (uc->rflow->id != uc->rchan->id && uc->config.dir != DMA_MEM_TO_MEM) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530987 req.flowid_start = uc->rflow->id;
988 req.flowid_cnt = 1;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530989 }
990
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530991 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530992 if (ret) {
993 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
994 uc->rchan->id, ret);
995 return ret;
996 }
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530997 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +0530998 return ret;
999
1000 flow_req.valid_params =
1001 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1002 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1003 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1004 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1005 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1006 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1007 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1008 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1009 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1010 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1011 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1012 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1013 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
1014 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
1015
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301016 flow_req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301017 flow_req.flow_index = uc->rflow->id;
1018
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301019 if (uc->config.needs_epib)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301020 flow_req.rx_einfo_present = 1;
1021 else
1022 flow_req.rx_einfo_present = 0;
1023
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301024 if (uc->config.psd_size)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301025 flow_req.rx_psinfo_present = 1;
1026 else
1027 flow_req.rx_psinfo_present = 0;
1028
1029 flow_req.rx_error_handling = 0;
1030 flow_req.rx_desc_type = 0;
1031 flow_req.rx_dest_qnum = rx_ring;
1032 flow_req.rx_src_tag_hi_sel = 2;
1033 flow_req.rx_src_tag_lo_sel = 4;
1034 flow_req.rx_dest_tag_hi_sel = 5;
1035 flow_req.rx_dest_tag_lo_sel = 4;
1036 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1037 flow_req.rx_fdq1_qnum = fd_ring;
1038 flow_req.rx_fdq2_qnum = fd_ring;
1039 flow_req.rx_fdq3_qnum = fd_ring;
1040 flow_req.rx_ps_location = 0;
1041
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301042 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1043 &flow_req);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301044 if (ret)
1045 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1046 uc->rchan->id, uc->rflow->id, ret);
1047
1048 return ret;
1049}
1050
1051static int udma_alloc_chan_resources(struct udma_chan *uc)
1052{
1053 struct udma_dev *ud = uc->ud;
1054 int ret;
1055
1056 pr_debug("%s: chan:%d as %s\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301057 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301058
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301059 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301060 case DMA_MEM_TO_MEM:
1061 /* Non synchronized - mem to mem type of transfer */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301062 uc->config.pkt_mode = false;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301063 ret = udma_get_chan_pair(uc);
1064 if (ret)
1065 return ret;
1066
1067 ret = udma_alloc_tx_resources(uc);
1068 if (ret)
1069 goto err_free_res;
1070
1071 ret = udma_alloc_rx_resources(uc);
1072 if (ret)
1073 goto err_free_res;
1074
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301075 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1076 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301077 break;
1078 case DMA_MEM_TO_DEV:
1079 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1080 ret = udma_alloc_tx_resources(uc);
1081 if (ret)
1082 goto err_free_res;
1083
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301084 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1085 uc->config.dst_thread = uc->config.remote_thread_id;
1086 uc->config.dst_thread |= 0x8000;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301087
1088 break;
1089 case DMA_DEV_TO_MEM:
1090 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1091 ret = udma_alloc_rx_resources(uc);
1092 if (ret)
1093 goto err_free_res;
1094
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301095 uc->config.src_thread = uc->config.remote_thread_id;
1096 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301097
1098 break;
1099 default:
1100 /* Can not happen */
1101 pr_debug("%s: chan:%d invalid direction (%u)\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301102 __func__, uc->id, uc->config.dir);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301103 return -EINVAL;
1104 }
1105
1106 /* We have channel indexes and rings */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301107 if (uc->config.dir == DMA_MEM_TO_MEM) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301108 ret = udma_alloc_tchan_sci_req(uc);
1109 if (ret)
1110 goto err_free_res;
1111
1112 ret = udma_alloc_rchan_sci_req(uc);
1113 if (ret)
1114 goto err_free_res;
1115 } else {
1116 /* Slave transfer */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301117 if (uc->config.dir == DMA_MEM_TO_DEV) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301118 ret = udma_alloc_tchan_sci_req(uc);
1119 if (ret)
1120 goto err_free_res;
1121 } else {
1122 ret = udma_alloc_rchan_sci_req(uc);
1123 if (ret)
1124 goto err_free_res;
1125 }
1126 }
1127
Peter Ujfalusid15f8652019-04-25 12:08:15 +05301128 if (udma_is_chan_running(uc)) {
1129 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1130 udma_stop(uc);
1131 if (udma_is_chan_running(uc)) {
1132 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1133 goto err_free_res;
1134 }
1135 }
1136
Vignesh R3a9dbf32019-02-05 17:31:24 +05301137 /* PSI-L pairing */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301138 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301139 if (ret) {
1140 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1141 goto err_free_res;
1142 }
1143
1144 return 0;
1145
1146err_free_res:
1147 udma_free_tx_resources(uc);
1148 udma_free_rx_resources(uc);
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301149 uc->config.remote_thread_id = -1;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301150 return ret;
1151}
1152
1153static void udma_free_chan_resources(struct udma_chan *uc)
1154{
1155 /* Some configuration to UDMA-P channel: disable, reset, whatever */
1156
1157 /* Release PSI-L pairing */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301158 udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301159
1160 /* Reset the rings for a new start */
1161 udma_reset_rings(uc);
1162 udma_free_tx_resources(uc);
1163 udma_free_rx_resources(uc);
1164
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301165 uc->config.remote_thread_id = -1;
1166 uc->config.dir = DMA_MEM_TO_MEM;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301167}
1168
1169static int udma_get_mmrs(struct udevice *dev)
1170{
1171 struct udma_dev *ud = dev_get_priv(dev);
1172 int i;
1173
1174 for (i = 0; i < MMR_LAST; i++) {
1175 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1176 mmr_names[i]);
1177 if (!ud->mmrs[i])
1178 return -EINVAL;
1179 }
1180
1181 return 0;
1182}
1183
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301184static int udma_setup_resources(struct udma_dev *ud)
1185{
1186 struct udevice *dev = ud->dev;
1187 int ch_count, i;
1188 u32 cap2, cap3;
1189 struct ti_sci_resource_desc *rm_desc;
1190 struct ti_sci_resource *rm_res;
1191 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1192 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
1193 "ti,sci-rm-range-rchan",
1194 "ti,sci-rm-range-rflow" };
1195
1196 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1197 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1198
1199 ud->rflow_cnt = cap3 & 0x3fff;
1200 ud->tchan_cnt = cap2 & 0x1ff;
1201 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1202 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1203 ch_count = ud->tchan_cnt + ud->rchan_cnt;
1204
1205 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1206 sizeof(unsigned long), GFP_KERNEL);
1207 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1208 GFP_KERNEL);
1209 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1210 sizeof(unsigned long), GFP_KERNEL);
1211 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1212 GFP_KERNEL);
1213 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1214 sizeof(unsigned long), GFP_KERNEL);
1215 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1216 sizeof(unsigned long),
1217 GFP_KERNEL);
1218 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1219 GFP_KERNEL);
1220
1221 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1222 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1223 !ud->rflows)
1224 return -ENOMEM;
1225
1226 /*
1227 * RX flows with the same Ids as RX channels are reserved to be used
1228 * as default flows if remote HW can't generate flow_ids. Those
1229 * RX flows can be requested only explicitly by id.
1230 */
1231 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1232
1233 /* Get resource ranges from tisci */
1234 for (i = 0; i < RM_RANGE_LAST; i++)
1235 tisci_rm->rm_ranges[i] =
1236 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1237 tisci_rm->tisci_dev_id,
1238 (char *)range_names[i]);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301239
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301240 /* tchan ranges */
1241 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1242 if (IS_ERR(rm_res)) {
1243 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1244 } else {
1245 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1246 for (i = 0; i < rm_res->sets; i++) {
1247 rm_desc = &rm_res->desc[i];
1248 bitmap_clear(ud->tchan_map, rm_desc->start,
1249 rm_desc->num);
1250 }
1251 }
1252
1253 /* rchan and matching default flow ranges */
1254 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1255 if (IS_ERR(rm_res)) {
1256 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1257 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1258 } else {
1259 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1260 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1261 for (i = 0; i < rm_res->sets; i++) {
1262 rm_desc = &rm_res->desc[i];
1263 bitmap_clear(ud->rchan_map, rm_desc->start,
1264 rm_desc->num);
1265 bitmap_clear(ud->rflow_map, rm_desc->start,
1266 rm_desc->num);
1267 }
1268 }
1269
1270 /* GP rflow ranges */
1271 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1272 if (IS_ERR(rm_res)) {
1273 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1274 ud->rflow_cnt - ud->rchan_cnt);
1275 } else {
1276 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1277 ud->rflow_cnt - ud->rchan_cnt);
1278 for (i = 0; i < rm_res->sets; i++) {
1279 rm_desc = &rm_res->desc[i];
1280 bitmap_clear(ud->rflow_map, rm_desc->start,
1281 rm_desc->num);
1282 }
1283 }
1284
1285 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1286 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1287 if (!ch_count)
1288 return -ENODEV;
1289
1290 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1291 GFP_KERNEL);
1292 if (!ud->channels)
1293 return -ENOMEM;
1294
1295 dev_info(dev,
1296 "Channels: %d (tchan: %u, echan: %u, rchan: %u, rflow: %u)\n",
1297 ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1298 ud->rflow_cnt);
1299
1300 return ch_count;
1301}
Vignesh R3a9dbf32019-02-05 17:31:24 +05301302static int udma_probe(struct udevice *dev)
1303{
1304 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1305 struct udma_dev *ud = dev_get_priv(dev);
1306 int i, ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301307 struct udevice *tmp;
1308 struct udevice *tisci_dev = NULL;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301309 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1310 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1311
Vignesh R3a9dbf32019-02-05 17:31:24 +05301312
1313 ret = udma_get_mmrs(dev);
1314 if (ret)
1315 return ret;
1316
1317 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1318 "ti,ringacc", &tmp);
1319 ud->ringacc = dev_get_priv(tmp);
1320 if (IS_ERR(ud->ringacc))
1321 return PTR_ERR(ud->ringacc);
1322
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05301323 ud->match_data = (void *)dev_get_driver_data(dev);
1324 ud->psil_base = ud->match_data->psil_base;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301325
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301326 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1327 "ti,sci", &tisci_dev);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301328 if (ret) {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301329 debug("Failed to get TISCI phandle (%d)\n", ret);
1330 tisci_rm->tisci = NULL;
1331 return -EINVAL;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301332 }
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301333 tisci_rm->tisci = (struct ti_sci_handle *)
1334 (ti_sci_get_handle_from_sysfw(tisci_dev));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301335
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301336 tisci_rm->tisci_dev_id = -1;
1337 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1338 if (ret) {
1339 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1340 return ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301341 }
1342
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301343 tisci_rm->tisci_navss_dev_id = -1;
1344 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1345 &tisci_rm->tisci_navss_dev_id);
1346 if (ret) {
1347 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1348 return ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301349 }
1350
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301351 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1352 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301353
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301354 ud->dev = dev;
1355 ud->ch_count = udma_setup_resources(ud);
1356 if (ud->ch_count <= 0)
1357 return ud->ch_count;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301358
1359 dev_info(dev,
1360 "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1361 ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301362 tisci_rm->tisci_dev_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301363 dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1364
Vignesh R3a9dbf32019-02-05 17:31:24 +05301365 for (i = 0; i < ud->tchan_cnt; i++) {
1366 struct udma_tchan *tchan = &ud->tchans[i];
1367
1368 tchan->id = i;
1369 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1370 }
1371
1372 for (i = 0; i < ud->rchan_cnt; i++) {
1373 struct udma_rchan *rchan = &ud->rchans[i];
1374
1375 rchan->id = i;
1376 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1377 }
1378
1379 for (i = 0; i < ud->rflow_cnt; i++) {
1380 struct udma_rflow *rflow = &ud->rflows[i];
1381
1382 rflow->id = i;
1383 }
1384
1385 for (i = 0; i < ud->ch_count; i++) {
1386 struct udma_chan *uc = &ud->channels[i];
1387
1388 uc->ud = ud;
1389 uc->id = i;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301390 uc->config.remote_thread_id = -1;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301391 uc->tchan = NULL;
1392 uc->rchan = NULL;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301393 uc->config.dir = DMA_MEM_TO_MEM;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301394 sprintf(uc->name, "UDMA chan%d\n", i);
1395 if (!i)
1396 uc->in_use = true;
1397 }
1398
1399 pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1400 udma_read(ud->mmrs[MMR_GCFG], 0),
1401 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1402 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1403 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1404 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1405
1406 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1407
1408 return ret;
1409}
1410
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301411static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1412{
1413 u64 addr = 0;
1414
1415 memcpy(&addr, &elem, sizeof(elem));
1416 return k3_nav_ringacc_ring_push(ring, &addr);
1417}
1418
Vignesh R3a9dbf32019-02-05 17:31:24 +05301419static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1420 dma_addr_t src, size_t len)
1421{
1422 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1423 struct cppi5_tr_type15_t *tr_req;
1424 int num_tr;
1425 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1426 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1427 unsigned long dummy;
1428 void *tr_desc;
1429 size_t desc_size;
1430
1431 if (len < SZ_64K) {
1432 num_tr = 1;
1433 tr0_cnt0 = len;
1434 tr0_cnt1 = 1;
1435 } else {
1436 unsigned long align_to = __ffs(src | dest);
1437
1438 if (align_to > 3)
1439 align_to = 3;
1440 /*
1441 * Keep simple: tr0: SZ_64K-alignment blocks,
1442 * tr1: the remaining
1443 */
1444 num_tr = 2;
1445 tr0_cnt0 = (SZ_64K - BIT(align_to));
1446 if (len / tr0_cnt0 >= SZ_64K) {
1447 dev_err(uc->ud->dev, "size %zu is not supported\n",
1448 len);
1449 return NULL;
1450 }
1451
1452 tr0_cnt1 = len / tr0_cnt0;
1453 tr1_cnt0 = len % tr0_cnt0;
1454 }
1455
1456 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1457 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1458 if (!tr_desc)
1459 return NULL;
1460 memset(tr_desc, 0, desc_size);
1461
1462 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1463 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1464 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1465
1466 tr_req = tr_desc + tr_size;
1467
1468 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1469 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1470 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1471
1472 tr_req[0].addr = src;
1473 tr_req[0].icnt0 = tr0_cnt0;
1474 tr_req[0].icnt1 = tr0_cnt1;
1475 tr_req[0].icnt2 = 1;
1476 tr_req[0].icnt3 = 1;
1477 tr_req[0].dim1 = tr0_cnt0;
1478
1479 tr_req[0].daddr = dest;
1480 tr_req[0].dicnt0 = tr0_cnt0;
1481 tr_req[0].dicnt1 = tr0_cnt1;
1482 tr_req[0].dicnt2 = 1;
1483 tr_req[0].dicnt3 = 1;
1484 tr_req[0].ddim1 = tr0_cnt0;
1485
1486 if (num_tr == 2) {
1487 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1488 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1489 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1490
1491 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1492 tr_req[1].icnt0 = tr1_cnt0;
1493 tr_req[1].icnt1 = 1;
1494 tr_req[1].icnt2 = 1;
1495 tr_req[1].icnt3 = 1;
1496
1497 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1498 tr_req[1].dicnt0 = tr1_cnt0;
1499 tr_req[1].dicnt1 = 1;
1500 tr_req[1].dicnt2 = 1;
1501 tr_req[1].dicnt3 = 1;
1502 }
1503
1504 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1505
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301506 flush_dcache_range((unsigned long)tr_desc,
1507 ALIGN((unsigned long)tr_desc + desc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301508 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301509
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301510 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301511
1512 return 0;
1513}
1514
1515static int udma_transfer(struct udevice *dev, int direction,
1516 void *dst, void *src, size_t len)
1517{
1518 struct udma_dev *ud = dev_get_priv(dev);
1519 /* Channel0 is reserved for memcpy */
1520 struct udma_chan *uc = &ud->channels[0];
1521 dma_addr_t paddr = 0;
1522 int ret;
1523
1524 ret = udma_alloc_chan_resources(uc);
1525 if (ret)
1526 return ret;
1527
1528 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1529 udma_start(uc);
1530 udma_poll_completion(uc, &paddr);
1531 udma_stop(uc);
1532
1533 udma_free_chan_resources(uc);
1534 return 0;
1535}
1536
1537static int udma_request(struct dma *dma)
1538{
1539 struct udma_dev *ud = dev_get_priv(dma->dev);
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301540 struct udma_chan_config *ucc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301541 struct udma_chan *uc;
1542 unsigned long dummy;
1543 int ret;
1544
1545 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1546 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1547 return -EINVAL;
1548 }
1549
1550 uc = &ud->channels[dma->id];
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301551 ucc = &uc->config;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301552 ret = udma_alloc_chan_resources(uc);
1553 if (ret) {
1554 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1555 return -EINVAL;
1556 }
1557
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301558 if (uc->config.dir == DMA_MEM_TO_DEV) {
1559 uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
1560 memset(uc->desc_tx, 0, ucc->hdesc_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301561 } else {
1562 uc->desc_rx = dma_alloc_coherent(
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301563 ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1564 memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301565 }
1566
1567 uc->in_use = true;
1568 uc->desc_rx_cur = 0;
1569 uc->num_rx_bufs = 0;
1570
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301571 if (uc->config.dir == DMA_DEV_TO_MEM) {
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301572 uc->cfg_data.flow_id_base = uc->rflow->id;
1573 uc->cfg_data.flow_id_cnt = 1;
1574 }
1575
Vignesh R3a9dbf32019-02-05 17:31:24 +05301576 return 0;
1577}
1578
Simon Glass75c0ad62020-02-03 07:35:55 -07001579static int udma_rfree(struct dma *dma)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301580{
1581 struct udma_dev *ud = dev_get_priv(dma->dev);
1582 struct udma_chan *uc;
1583
1584 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1585 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1586 return -EINVAL;
1587 }
1588 uc = &ud->channels[dma->id];
1589
1590 if (udma_is_chan_running(uc))
1591 udma_stop(uc);
1592 udma_free_chan_resources(uc);
1593
1594 uc->in_use = false;
1595
1596 return 0;
1597}
1598
1599static int udma_enable(struct dma *dma)
1600{
1601 struct udma_dev *ud = dev_get_priv(dma->dev);
1602 struct udma_chan *uc;
1603 int ret;
1604
1605 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1606 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1607 return -EINVAL;
1608 }
1609 uc = &ud->channels[dma->id];
1610
1611 ret = udma_start(uc);
1612
1613 return ret;
1614}
1615
1616static int udma_disable(struct dma *dma)
1617{
1618 struct udma_dev *ud = dev_get_priv(dma->dev);
1619 struct udma_chan *uc;
1620 int ret = 0;
1621
1622 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1623 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1624 return -EINVAL;
1625 }
1626 uc = &ud->channels[dma->id];
1627
1628 if (udma_is_chan_running(uc))
1629 ret = udma_stop(uc);
1630 else
1631 dev_err(dma->dev, "%s not running\n", __func__);
1632
1633 return ret;
1634}
1635
1636static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1637{
1638 struct udma_dev *ud = dev_get_priv(dma->dev);
1639 struct cppi5_host_desc_t *desc_tx;
1640 dma_addr_t dma_src = (dma_addr_t)src;
1641 struct ti_udma_drv_packet_data packet_data = { 0 };
1642 dma_addr_t paddr;
1643 struct udma_chan *uc;
1644 u32 tc_ring_id;
1645 int ret;
1646
Keerthya3c8bb12019-04-24 16:33:54 +05301647 if (metadata)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301648 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1649
1650 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1651 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1652 return -EINVAL;
1653 }
1654 uc = &ud->channels[dma->id];
1655
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301656 if (uc->config.dir != DMA_MEM_TO_DEV)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301657 return -EINVAL;
1658
1659 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1660
1661 desc_tx = uc->desc_tx;
1662
1663 cppi5_hdesc_reset_hbdesc(desc_tx);
1664
1665 cppi5_hdesc_init(desc_tx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301666 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1667 uc->config.psd_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301668 cppi5_hdesc_set_pktlen(desc_tx, len);
1669 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1670 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1671 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1672 /* pass below information from caller */
1673 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1674 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1675
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301676 flush_dcache_range((unsigned long)dma_src,
1677 ALIGN((unsigned long)dma_src + len,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301678 ARCH_DMA_MINALIGN));
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301679 flush_dcache_range((unsigned long)desc_tx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301680 ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301681 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301682
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301683 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301684 if (ret) {
1685 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1686 dma->id, ret);
1687 return ret;
1688 }
1689
1690 udma_poll_completion(uc, &paddr);
1691
1692 return 0;
1693}
1694
1695static int udma_receive(struct dma *dma, void **dst, void *metadata)
1696{
1697 struct udma_dev *ud = dev_get_priv(dma->dev);
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301698 struct udma_chan_config *ucc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301699 struct cppi5_host_desc_t *desc_rx;
1700 dma_addr_t buf_dma;
1701 struct udma_chan *uc;
1702 u32 buf_dma_len, pkt_len;
1703 u32 port_id = 0;
1704 int ret;
1705
1706 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1707 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1708 return -EINVAL;
1709 }
1710 uc = &ud->channels[dma->id];
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301711 ucc = &uc->config;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301712
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301713 if (uc->config.dir != DMA_DEV_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301714 return -EINVAL;
1715 if (!uc->num_rx_bufs)
1716 return -EINVAL;
1717
1718 ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
1719 if (ret && ret != -ENODATA) {
1720 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1721 return ret;
1722 } else if (ret == -ENODATA) {
1723 return 0;
1724 }
1725
1726 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301727 invalidate_dcache_range((ulong)desc_rx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301728 (ulong)(desc_rx + ucc->hdesc_size));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301729
1730 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1731 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1732
1733 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301734 invalidate_dcache_range((ulong)buf_dma,
1735 (ulong)(buf_dma + buf_dma_len));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301736
1737 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1738
1739 *dst = (void *)buf_dma;
1740 uc->num_rx_bufs--;
1741
1742 return pkt_len;
1743}
1744
1745static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1746{
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301747 struct udma_chan_config *ucc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301748 struct udma_dev *ud = dev_get_priv(dma->dev);
1749 struct udma_chan *uc = &ud->channels[0];
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05301750 struct psil_endpoint_config *ep_config;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301751 u32 val;
1752
1753 for (val = 0; val < ud->ch_count; val++) {
1754 uc = &ud->channels[val];
1755 if (!uc->in_use)
1756 break;
1757 }
1758
1759 if (val == ud->ch_count)
1760 return -EBUSY;
1761
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301762 ucc = &uc->config;
1763 ucc->remote_thread_id = args->args[0];
1764 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
1765 ucc->dir = DMA_MEM_TO_DEV;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05301766 else
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301767 ucc->dir = DMA_DEV_TO_MEM;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301768
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301769 ep_config = psil_get_ep_config(ucc->remote_thread_id);
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05301770 if (IS_ERR(ep_config)) {
1771 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301772 uc->config.remote_thread_id);
1773 ucc->dir = DMA_MEM_TO_MEM;
1774 ucc->remote_thread_id = -1;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05301775 return false;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301776 }
1777
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301778 ucc->pkt_mode = ep_config->pkt_mode;
1779 ucc->channel_tpl = ep_config->channel_tpl;
1780 ucc->notdpkt = ep_config->notdpkt;
1781 ucc->ep_type = ep_config->ep_type;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301782
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301783 ucc->needs_epib = ep_config->needs_epib;
1784 ucc->psd_size = ep_config->psd_size;
1785 ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
1786
1787 ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
1788 ucc->psd_size, 0);
1789 ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301790
1791 dma->id = uc->id;
1792 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301793 dma->id, ucc->needs_epib,
1794 ucc->psd_size, ucc->metadata_size,
1795 ucc->remote_thread_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301796
1797 return 0;
1798}
1799
1800int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1801{
1802 struct udma_dev *ud = dev_get_priv(dma->dev);
1803 struct cppi5_host_desc_t *desc_rx;
1804 dma_addr_t dma_dst;
1805 struct udma_chan *uc;
1806 u32 desc_num;
1807
1808 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1809 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1810 return -EINVAL;
1811 }
1812 uc = &ud->channels[dma->id];
1813
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301814 if (uc->config.dir != DMA_DEV_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301815 return -EINVAL;
1816
1817 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1818 return -EINVAL;
1819
1820 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301821 desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301822 dma_dst = (dma_addr_t)dst;
1823
1824 cppi5_hdesc_reset_hbdesc(desc_rx);
1825
1826 cppi5_hdesc_init(desc_rx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301827 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1828 uc->config.psd_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301829 cppi5_hdesc_set_pktlen(desc_rx, size);
1830 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1831
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301832 flush_dcache_range((unsigned long)desc_rx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301833 ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301834 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301835
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301836 udma_push_to_ring(uc->rchan->fd_ring, desc_rx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301837
1838 uc->num_rx_bufs++;
1839 uc->desc_rx_cur++;
1840
1841 return 0;
1842}
1843
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301844static int udma_get_cfg(struct dma *dma, u32 id, void **data)
1845{
1846 struct udma_dev *ud = dev_get_priv(dma->dev);
1847 struct udma_chan *uc;
1848
1849 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1850 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1851 return -EINVAL;
1852 }
1853
1854 switch (id) {
1855 case TI_UDMA_CHAN_PRIV_INFO:
1856 uc = &ud->channels[dma->id];
1857 *data = &uc->cfg_data;
1858 return 0;
1859 }
1860
1861 return -EINVAL;
1862}
1863
Vignesh R3a9dbf32019-02-05 17:31:24 +05301864static const struct dma_ops udma_ops = {
1865 .transfer = udma_transfer,
1866 .of_xlate = udma_of_xlate,
1867 .request = udma_request,
Simon Glass75c0ad62020-02-03 07:35:55 -07001868 .rfree = udma_rfree,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301869 .enable = udma_enable,
1870 .disable = udma_disable,
1871 .send = udma_send,
1872 .receive = udma_receive,
1873 .prepare_rcv_buf = udma_prepare_rcv_buf,
Vignesh Raghavendra39349892019-12-04 22:17:21 +05301874 .get_cfg = udma_get_cfg,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301875};
1876
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05301877static struct udma_match_data am654_main_data = {
1878 .psil_base = 0x1000,
1879 .enable_memcpy_support = true,
1880 .statictr_z_mask = GENMASK(11, 0),
1881 .rchan_oes_offset = 0x200,
1882 .tpl_levels = 2,
1883 .level_start_idx = {
1884 [0] = 8, /* Normal channels */
1885 [1] = 0, /* High Throughput channels */
1886 },
1887};
1888
1889static struct udma_match_data am654_mcu_data = {
1890 .psil_base = 0x6000,
1891 .enable_memcpy_support = true,
1892 .statictr_z_mask = GENMASK(11, 0),
1893 .rchan_oes_offset = 0x200,
1894 .tpl_levels = 2,
1895 .level_start_idx = {
1896 [0] = 2, /* Normal channels */
1897 [1] = 0, /* High Throughput channels */
1898 },
1899};
1900
1901static struct udma_match_data j721e_main_data = {
1902 .psil_base = 0x1000,
1903 .enable_memcpy_support = true,
1904 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
1905 .statictr_z_mask = GENMASK(23, 0),
1906 .rchan_oes_offset = 0x400,
1907 .tpl_levels = 3,
1908 .level_start_idx = {
1909 [0] = 16, /* Normal channels */
1910 [1] = 4, /* High Throughput channels */
1911 [2] = 0, /* Ultra High Throughput channels */
1912 },
1913};
1914
1915static struct udma_match_data j721e_mcu_data = {
1916 .psil_base = 0x6000,
1917 .enable_memcpy_support = true,
1918 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
1919 .statictr_z_mask = GENMASK(23, 0),
1920 .rchan_oes_offset = 0x400,
1921 .tpl_levels = 2,
1922 .level_start_idx = {
1923 [0] = 2, /* Normal channels */
1924 [1] = 0, /* High Throughput channels */
1925 },
1926};
1927
Vignesh R3a9dbf32019-02-05 17:31:24 +05301928static const struct udevice_id udma_ids[] = {
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05301929 {
1930 .compatible = "ti,am654-navss-main-udmap",
1931 .data = (ulong)&am654_main_data,
1932 },
1933 {
1934 .compatible = "ti,am654-navss-mcu-udmap",
1935 .data = (ulong)&am654_mcu_data,
1936 }, {
1937 .compatible = "ti,j721e-navss-main-udmap",
1938 .data = (ulong)&j721e_main_data,
1939 }, {
1940 .compatible = "ti,j721e-navss-mcu-udmap",
1941 .data = (ulong)&j721e_mcu_data,
1942 },
1943 { /* Sentinel */ },
Vignesh R3a9dbf32019-02-05 17:31:24 +05301944};
1945
1946U_BOOT_DRIVER(ti_edma3) = {
1947 .name = "ti-udma",
1948 .id = UCLASS_DMA,
1949 .of_match = udma_ids,
1950 .ops = &udma_ops,
1951 .probe = udma_probe,
1952 .priv_auto_alloc_size = sizeof(struct udma_dev),
1953};