blob: 8e11d817a5b4419ea24657744a65c648a5ea932a [file] [log] [blame]
Vignesh R3a9dbf32019-02-05 17:31:24 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
Nishanth Menoneaa39c62023-11-01 15:56:03 -05003 * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com
Vignesh R3a9dbf32019-02-05 17:31:24 +05304 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#define pr_fmt(fmt) "udma: " fmt
7
Simon Glass63334482019-11-14 12:57:39 -07008#include <cpu_func.h>
Simon Glass0f2af882020-05-10 11:40:05 -06009#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060010#include <asm/cache.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053011#include <asm/io.h>
12#include <asm/bitops.h>
13#include <malloc.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060014#include <linux/bitops.h>
Masahiro Yamada6373a172020-02-14 16:40:19 +090015#include <linux/dma-mapping.h>
Dhruva Golee6b42392022-09-20 10:56:02 +053016#include <linux/sizes.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053017#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070018#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070019#include <dm/devres.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053020#include <dm/read.h>
21#include <dm/of_access.h>
22#include <dma.h>
23#include <dma-uclass.h>
24#include <linux/delay.h>
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053025#include <linux/bitmap.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070026#include <linux/err.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060027#include <linux/printk.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053028#include <linux/soc/ti/k3-navss-ringacc.h>
29#include <linux/soc/ti/cppi5.h>
30#include <linux/soc/ti/ti-udma.h>
31#include <linux/soc/ti/ti_sci_protocol.h>
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053032#include <linux/soc/ti/cppi5.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053033
34#include "k3-udma-hwdef.h"
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +053035#include "k3-psil-priv.h"
Vignesh R3a9dbf32019-02-05 17:31:24 +053036
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053037#define K3_UDMA_MAX_RFLOWS 1024
38
Vignesh R3a9dbf32019-02-05 17:31:24 +053039struct udma_chan;
40
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053041enum k3_dma_type {
42 DMA_TYPE_UDMA = 0,
43 DMA_TYPE_BCDMA,
44 DMA_TYPE_PKTDMA,
45};
46
Vignesh R3a9dbf32019-02-05 17:31:24 +053047enum udma_mmr {
48 MMR_GCFG = 0,
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053049 MMR_BCHANRT,
Vignesh R3a9dbf32019-02-05 17:31:24 +053050 MMR_RCHANRT,
51 MMR_TCHANRT,
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053052 MMR_RCHAN,
53 MMR_TCHAN,
54 MMR_RFLOW,
Vignesh R3a9dbf32019-02-05 17:31:24 +053055 MMR_LAST,
56};
57
58static const char * const mmr_names[] = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053059 [MMR_GCFG] = "gcfg",
60 [MMR_BCHANRT] = "bchanrt",
61 [MMR_RCHANRT] = "rchanrt",
62 [MMR_TCHANRT] = "tchanrt",
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053063 [MMR_RCHAN] = "rchan",
64 [MMR_TCHAN] = "tchan",
65 [MMR_RFLOW] = "rflow",
Vignesh R3a9dbf32019-02-05 17:31:24 +053066};
67
68struct udma_tchan {
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053069 void __iomem *reg_chan;
Vignesh R3a9dbf32019-02-05 17:31:24 +053070 void __iomem *reg_rt;
71
72 int id;
73 struct k3_nav_ring *t_ring; /* Transmit ring */
74 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053075 int tflow_id; /* applicable only for PKTDMA */
76
77};
78
79#define udma_bchan udma_tchan
80
81struct udma_rflow {
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053082 void __iomem *reg_rflow;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053083 int id;
84 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
85 struct k3_nav_ring *r_ring; /* Receive ring */
Vignesh R3a9dbf32019-02-05 17:31:24 +053086};
87
88struct udma_rchan {
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053089 void __iomem *reg_chan;
Vignesh R3a9dbf32019-02-05 17:31:24 +053090 void __iomem *reg_rt;
91
92 int id;
Vignesh R3a9dbf32019-02-05 17:31:24 +053093};
94
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053095struct udma_oes_offsets {
96 /* K3 UDMA Output Event Offset */
97 u32 udma_rchan;
98
99 /* BCDMA Output Event Offsets */
100 u32 bcdma_bchan_data;
101 u32 bcdma_bchan_ring;
102 u32 bcdma_tchan_data;
103 u32 bcdma_tchan_ring;
104 u32 bcdma_rchan_data;
105 u32 bcdma_rchan_ring;
106
107 /* PKTDMA Output Event Offsets */
108 u32 pktdma_tchan_flow;
109 u32 pktdma_rchan_flow;
110};
111
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530112#define UDMA_FLAG_PDMA_ACC32 BIT(0)
113#define UDMA_FLAG_PDMA_BURST BIT(1)
114#define UDMA_FLAG_TDTYPE BIT(2)
115
116struct udma_match_data {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530117 enum k3_dma_type type;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530118 u32 psil_base;
119 bool enable_memcpy_support;
120 u32 flags;
121 u32 statictr_z_mask;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530122 struct udma_oes_offsets oes;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530123
124 u8 tpl_levels;
125 u32 level_start_idx[];
126};
127
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530128enum udma_rm_range {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530129 RM_RANGE_BCHAN = 0,
130 RM_RANGE_TCHAN,
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530131 RM_RANGE_RCHAN,
132 RM_RANGE_RFLOW,
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530133 RM_RANGE_TFLOW,
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530134 RM_RANGE_LAST,
135};
136
137struct udma_tisci_rm {
138 const struct ti_sci_handle *tisci;
139 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
140 u32 tisci_dev_id;
141
142 /* tisci information for PSI-L thread pairing/unpairing */
143 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
144 u32 tisci_navss_dev_id;
145
146 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
147};
148
Vignesh R3a9dbf32019-02-05 17:31:24 +0530149struct udma_dev {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530150 struct udevice *dev;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530151 void __iomem *mmrs[MMR_LAST];
152
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530153 struct udma_tisci_rm tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530154 struct k3_nav_ringacc *ringacc;
155
156 u32 features;
157
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530158 int bchan_cnt;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530159 int tchan_cnt;
160 int echan_cnt;
161 int rchan_cnt;
162 int rflow_cnt;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530163 int tflow_cnt;
164 unsigned long *bchan_map;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530165 unsigned long *tchan_map;
166 unsigned long *rchan_map;
167 unsigned long *rflow_map;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530168 unsigned long *rflow_map_reserved;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530169 unsigned long *tflow_map;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530170
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530171 struct udma_bchan *bchans;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530172 struct udma_tchan *tchans;
173 struct udma_rchan *rchans;
174 struct udma_rflow *rflows;
175
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530176 struct udma_match_data *match_data;
177
Vignesh R3a9dbf32019-02-05 17:31:24 +0530178 struct udma_chan *channels;
179 u32 psil_base;
180
181 u32 ch_count;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530182};
183
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530184struct udma_chan_config {
185 u32 psd_size; /* size of Protocol Specific Data */
186 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
187 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
188 int remote_thread_id;
189 u32 atype;
190 u32 src_thread;
191 u32 dst_thread;
192 enum psil_endpoint_type ep_type;
193 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
194
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530195 /* PKTDMA mapped channel */
196 int mapped_channel_id;
197 /* PKTDMA default tflow or rflow for mapped channel */
198 int default_flow_id;
199
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530200 enum dma_direction dir;
201
202 unsigned int pkt_mode:1; /* TR or packet */
203 unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
204 unsigned int enable_acc32:1;
205 unsigned int enable_burst:1;
206 unsigned int notdpkt:1; /* Suppress sending TDC packet */
207};
208
Vignesh R3a9dbf32019-02-05 17:31:24 +0530209struct udma_chan {
210 struct udma_dev *ud;
211 char name[20];
212
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530213 struct udma_bchan *bchan;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530214 struct udma_tchan *tchan;
215 struct udma_rchan *rchan;
216 struct udma_rflow *rflow;
217
Vignesh Raghavendra39349892019-12-04 22:17:21 +0530218 struct ti_udma_drv_chan_cfg_data cfg_data;
219
Vignesh R3a9dbf32019-02-05 17:31:24 +0530220 u32 bcnt; /* number of bytes completed since the start of the channel */
221
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530222 struct udma_chan_config config;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530223
224 u32 id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530225
226 struct cppi5_host_desc_t *desc_tx;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530227 bool in_use;
228 void *desc_rx;
229 u32 num_rx_bufs;
230 u32 desc_rx_cur;
231
232};
233
234#define UDMA_CH_1000(ch) (ch * 0x1000)
235#define UDMA_CH_100(ch) (ch * 0x100)
236#define UDMA_CH_40(ch) (ch * 0x40)
237
238#ifdef PKTBUFSRX
239#define UDMA_RX_DESC_NUM PKTBUFSRX
240#else
241#define UDMA_RX_DESC_NUM 4
242#endif
243
244/* Generic register access functions */
245static inline u32 udma_read(void __iomem *base, int reg)
246{
247 u32 v;
248
249 v = __raw_readl(base + reg);
250 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
251 return v;
252}
253
254static inline void udma_write(void __iomem *base, int reg, u32 val)
255{
256 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
257 __raw_writel(val, base + reg);
258}
259
260static inline void udma_update_bits(void __iomem *base, int reg,
261 u32 mask, u32 val)
262{
263 u32 tmp, orig;
264
265 orig = udma_read(base, reg);
266 tmp = orig & ~mask;
267 tmp |= (val & mask);
268
269 if (tmp != orig)
270 udma_write(base, reg, tmp);
271}
272
273/* TCHANRT */
274static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
275{
276 if (!tchan)
277 return 0;
278 return udma_read(tchan->reg_rt, reg);
279}
280
281static inline void udma_tchanrt_write(struct udma_tchan *tchan,
282 int reg, u32 val)
283{
284 if (!tchan)
285 return;
286 udma_write(tchan->reg_rt, reg, val);
287}
288
289/* RCHANRT */
290static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
291{
292 if (!rchan)
293 return 0;
294 return udma_read(rchan->reg_rt, reg);
295}
296
297static inline void udma_rchanrt_write(struct udma_rchan *rchan,
298 int reg, u32 val)
299{
300 if (!rchan)
301 return;
302 udma_write(rchan->reg_rt, reg, val);
303}
304
305static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
306 u32 dst_thread)
307{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530308 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
309
Vignesh R3a9dbf32019-02-05 17:31:24 +0530310 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530311
312 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
313 tisci_rm->tisci_navss_dev_id,
314 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530315}
316
317static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
318 u32 dst_thread)
319{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530320 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
321
Vignesh R3a9dbf32019-02-05 17:31:24 +0530322 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530323
324 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
325 tisci_rm->tisci_navss_dev_id,
326 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530327}
328
329static inline char *udma_get_dir_text(enum dma_direction dir)
330{
331 switch (dir) {
332 case DMA_DEV_TO_MEM:
333 return "DEV_TO_MEM";
334 case DMA_MEM_TO_DEV:
335 return "MEM_TO_DEV";
336 case DMA_MEM_TO_MEM:
337 return "MEM_TO_MEM";
338 case DMA_DEV_TO_DEV:
339 return "DEV_TO_DEV";
340 default:
341 break;
342 }
343
344 return "invalid";
345}
346
Vignesh Raghavendra27e72502021-06-07 19:47:53 +0530347#include "k3-udma-u-boot.c"
348
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530349static void udma_reset_uchan(struct udma_chan *uc)
350{
351 memset(&uc->config, 0, sizeof(uc->config));
352 uc->config.remote_thread_id = -1;
353 uc->config.mapped_channel_id = -1;
354 uc->config.default_flow_id = -1;
355}
356
Vignesh R3a9dbf32019-02-05 17:31:24 +0530357static inline bool udma_is_chan_running(struct udma_chan *uc)
358{
359 u32 trt_ctl = 0;
360 u32 rrt_ctl = 0;
361
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530362 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530363 case DMA_DEV_TO_MEM:
364 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
365 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
366 __func__, rrt_ctl,
367 udma_rchanrt_read(uc->rchan,
368 UDMA_RCHAN_RT_PEER_RT_EN_REG));
369 break;
370 case DMA_MEM_TO_DEV:
371 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
372 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
373 __func__, trt_ctl,
374 udma_tchanrt_read(uc->tchan,
375 UDMA_TCHAN_RT_PEER_RT_EN_REG));
376 break;
377 case DMA_MEM_TO_MEM:
378 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
379 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
380 break;
381 default:
382 break;
383 }
384
385 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
386 return true;
387
388 return false;
389}
390
Vignesh R3a9dbf32019-02-05 17:31:24 +0530391static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
392{
393 struct k3_nav_ring *ring = NULL;
394 int ret = -ENOENT;
395
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530396 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530397 case DMA_DEV_TO_MEM:
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +0530398 ring = uc->rflow->r_ring;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530399 break;
400 case DMA_MEM_TO_DEV:
401 ring = uc->tchan->tc_ring;
402 break;
403 case DMA_MEM_TO_MEM:
404 ring = uc->tchan->tc_ring;
405 break;
406 default:
407 break;
408 }
409
410 if (ring && k3_nav_ringacc_ring_get_occ(ring))
411 ret = k3_nav_ringacc_ring_pop(ring, addr);
412
413 return ret;
414}
415
416static void udma_reset_rings(struct udma_chan *uc)
417{
418 struct k3_nav_ring *ring1 = NULL;
419 struct k3_nav_ring *ring2 = NULL;
420
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530421 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530422 case DMA_DEV_TO_MEM:
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +0530423 ring1 = uc->rflow->fd_ring;
424 ring2 = uc->rflow->r_ring;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530425 break;
426 case DMA_MEM_TO_DEV:
427 ring1 = uc->tchan->t_ring;
428 ring2 = uc->tchan->tc_ring;
429 break;
430 case DMA_MEM_TO_MEM:
431 ring1 = uc->tchan->t_ring;
432 ring2 = uc->tchan->tc_ring;
433 break;
434 default:
435 break;
436 }
437
438 if (ring1)
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530439 k3_nav_ringacc_ring_reset_dma(ring1, k3_nav_ringacc_ring_get_occ(ring1));
Vignesh R3a9dbf32019-02-05 17:31:24 +0530440 if (ring2)
441 k3_nav_ringacc_ring_reset(ring2);
442}
443
444static void udma_reset_counters(struct udma_chan *uc)
445{
446 u32 val;
447
448 if (uc->tchan) {
449 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
450 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
451
452 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
453 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
454
455 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
456 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
457
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530458 if (!uc->bchan) {
459 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
460 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
461 }
Vignesh R3a9dbf32019-02-05 17:31:24 +0530462 }
463
464 if (uc->rchan) {
465 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
466 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
467
468 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
469 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
470
471 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
472 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
473
474 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
475 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
476 }
477
478 uc->bcnt = 0;
479}
480
481static inline int udma_stop_hard(struct udma_chan *uc)
482{
483 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
484
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530485 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530486 case DMA_DEV_TO_MEM:
487 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
488 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
489 break;
490 case DMA_MEM_TO_DEV:
491 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
492 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
493 break;
494 case DMA_MEM_TO_MEM:
495 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
496 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
497 break;
498 default:
499 return -EINVAL;
500 }
501
502 return 0;
503}
504
505static int udma_start(struct udma_chan *uc)
506{
507 /* Channel is already running, no need to proceed further */
508 if (udma_is_chan_running(uc))
509 goto out;
510
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530511 pr_debug("%s: chan:%d dir:%s\n",
512 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh R3a9dbf32019-02-05 17:31:24 +0530513
514 /* Make sure that we clear the teardown bit, if it is set */
515 udma_stop_hard(uc);
516
517 /* Reset all counters */
518 udma_reset_counters(uc);
519
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530520 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530521 case DMA_DEV_TO_MEM:
522 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
523 UDMA_CHAN_RT_CTL_EN);
524
525 /* Enable remote */
526 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
527 UDMA_PEER_RT_EN_ENABLE);
528
529 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
530 __func__,
531 udma_rchanrt_read(uc->rchan,
532 UDMA_RCHAN_RT_CTL_REG),
533 udma_rchanrt_read(uc->rchan,
534 UDMA_RCHAN_RT_PEER_RT_EN_REG));
535 break;
536 case DMA_MEM_TO_DEV:
537 /* Enable remote */
538 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
539 UDMA_PEER_RT_EN_ENABLE);
540
541 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
542 UDMA_CHAN_RT_CTL_EN);
543
544 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
545 __func__,
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530546 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530547 UDMA_TCHAN_RT_CTL_REG),
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530548 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530549 UDMA_TCHAN_RT_PEER_RT_EN_REG));
550 break;
551 case DMA_MEM_TO_MEM:
552 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
553 UDMA_CHAN_RT_CTL_EN);
554 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
555 UDMA_CHAN_RT_CTL_EN);
556
557 break;
558 default:
559 return -EINVAL;
560 }
561
562 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
563out:
564 return 0;
565}
566
567static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
568{
569 int i = 0;
570 u32 val;
571
572 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
573 UDMA_CHAN_RT_CTL_EN |
574 UDMA_CHAN_RT_CTL_TDOWN);
575
576 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
577
578 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
579 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
580 udelay(1);
581 if (i > 1000) {
582 printf(" %s TIMEOUT !\n", __func__);
583 break;
584 }
585 i++;
586 }
587
588 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
589 if (val & UDMA_PEER_RT_EN_ENABLE)
590 printf("%s: peer not stopped TIMEOUT !\n", __func__);
591}
592
593static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
594{
595 int i = 0;
596 u32 val;
597
598 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
599 UDMA_PEER_RT_EN_ENABLE |
600 UDMA_PEER_RT_EN_TEARDOWN);
601
602 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
603
604 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
605 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
606 udelay(1);
607 if (i > 1000) {
608 printf("%s TIMEOUT !\n", __func__);
609 break;
610 }
611 i++;
612 }
613
614 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
615 if (val & UDMA_PEER_RT_EN_ENABLE)
616 printf("%s: peer not stopped TIMEOUT !\n", __func__);
617}
618
619static inline int udma_stop(struct udma_chan *uc)
620{
621 pr_debug("%s: chan:%d dir:%s\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530622 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh R3a9dbf32019-02-05 17:31:24 +0530623
624 udma_reset_counters(uc);
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530625 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530626 case DMA_DEV_TO_MEM:
627 udma_stop_dev2mem(uc, true);
628 break;
629 case DMA_MEM_TO_DEV:
630 udma_stop_mem2dev(uc, true);
631 break;
632 case DMA_MEM_TO_MEM:
633 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
634 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
635 break;
636 default:
637 return -EINVAL;
638 }
639
640 return 0;
641}
642
643static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
644{
645 int i = 1;
646
647 while (udma_pop_from_ring(uc, paddr)) {
648 udelay(1);
649 if (!(i % 1000000))
650 printf(".");
651 i++;
652 }
653}
654
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530655static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
656{
657 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
658
659 if (id >= 0) {
660 if (test_bit(id, ud->rflow_map)) {
661 dev_err(ud->dev, "rflow%d is in use\n", id);
662 return ERR_PTR(-ENOENT);
663 }
664 } else {
665 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
666 ud->rflow_cnt);
667
668 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
669 if (id >= ud->rflow_cnt)
670 return ERR_PTR(-ENOENT);
671 }
672
673 __set_bit(id, ud->rflow_map);
674 return &ud->rflows[id];
675}
676
Vignesh R3a9dbf32019-02-05 17:31:24 +0530677#define UDMA_RESERVE_RESOURCE(res) \
678static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
679 int id) \
680{ \
681 if (id >= 0) { \
682 if (test_bit(id, ud->res##_map)) { \
683 dev_err(ud->dev, "res##%d is in use\n", id); \
684 return ERR_PTR(-ENOENT); \
685 } \
686 } else { \
687 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
688 if (id == ud->res##_cnt) { \
689 return ERR_PTR(-ENOENT); \
690 } \
691 } \
692 \
693 __set_bit(id, ud->res##_map); \
694 return &ud->res##s[id]; \
695}
696
697UDMA_RESERVE_RESOURCE(tchan);
698UDMA_RESERVE_RESOURCE(rchan);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530699
700static int udma_get_tchan(struct udma_chan *uc)
701{
702 struct udma_dev *ud = uc->ud;
703
704 if (uc->tchan) {
705 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
706 uc->id, uc->tchan->id);
707 return 0;
708 }
709
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530710 uc->tchan = __udma_reserve_tchan(ud, uc->config.mapped_channel_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530711 if (IS_ERR(uc->tchan))
712 return PTR_ERR(uc->tchan);
713
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530714 if (ud->tflow_cnt) {
715 int tflow_id;
716
717 /* Only PKTDMA have support for tx flows */
718 if (uc->config.default_flow_id >= 0)
719 tflow_id = uc->config.default_flow_id;
720 else
721 tflow_id = uc->tchan->id;
722
723 if (test_bit(tflow_id, ud->tflow_map)) {
724 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
725 __clear_bit(uc->tchan->id, ud->tchan_map);
726 uc->tchan = NULL;
727 return -ENOENT;
728 }
729
730 uc->tchan->tflow_id = tflow_id;
731 __set_bit(tflow_id, ud->tflow_map);
732 } else {
733 uc->tchan->tflow_id = -1;
734 }
735
Vignesh R3a9dbf32019-02-05 17:31:24 +0530736 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
737
Vignesh R3a9dbf32019-02-05 17:31:24 +0530738 return 0;
739}
740
741static int udma_get_rchan(struct udma_chan *uc)
742{
743 struct udma_dev *ud = uc->ud;
744
745 if (uc->rchan) {
746 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
747 uc->id, uc->rchan->id);
748 return 0;
749 }
750
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530751 uc->rchan = __udma_reserve_rchan(ud, uc->config.mapped_channel_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530752 if (IS_ERR(uc->rchan))
753 return PTR_ERR(uc->rchan);
754
755 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
756
Vignesh R3a9dbf32019-02-05 17:31:24 +0530757 return 0;
758}
759
760static int udma_get_chan_pair(struct udma_chan *uc)
761{
762 struct udma_dev *ud = uc->ud;
763 int chan_id, end;
764
765 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
766 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
767 uc->id, uc->tchan->id);
768 return 0;
769 }
770
771 if (uc->tchan) {
772 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
773 uc->id, uc->tchan->id);
774 return -EBUSY;
775 } else if (uc->rchan) {
776 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
777 uc->id, uc->rchan->id);
778 return -EBUSY;
779 }
780
781 /* Can be optimized, but let's have it like this for now */
782 end = min(ud->tchan_cnt, ud->rchan_cnt);
783 for (chan_id = 0; chan_id < end; chan_id++) {
784 if (!test_bit(chan_id, ud->tchan_map) &&
785 !test_bit(chan_id, ud->rchan_map))
786 break;
787 }
788
789 if (chan_id == end)
790 return -ENOENT;
791
792 __set_bit(chan_id, ud->tchan_map);
793 __set_bit(chan_id, ud->rchan_map);
794 uc->tchan = &ud->tchans[chan_id];
795 uc->rchan = &ud->rchans[chan_id];
796
797 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
798
Vignesh R3a9dbf32019-02-05 17:31:24 +0530799 return 0;
800}
801
802static int udma_get_rflow(struct udma_chan *uc, int flow_id)
803{
804 struct udma_dev *ud = uc->ud;
805
806 if (uc->rflow) {
807 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
808 uc->id, uc->rflow->id);
809 return 0;
810 }
811
812 if (!uc->rchan)
813 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
814
815 uc->rflow = __udma_reserve_rflow(ud, flow_id);
816 if (IS_ERR(uc->rflow))
817 return PTR_ERR(uc->rflow);
818
819 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
820 return 0;
821}
822
823static void udma_put_rchan(struct udma_chan *uc)
824{
825 struct udma_dev *ud = uc->ud;
826
827 if (uc->rchan) {
828 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
829 uc->rchan->id);
830 __clear_bit(uc->rchan->id, ud->rchan_map);
831 uc->rchan = NULL;
832 }
833}
834
835static void udma_put_tchan(struct udma_chan *uc)
836{
837 struct udma_dev *ud = uc->ud;
838
839 if (uc->tchan) {
840 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
841 uc->tchan->id);
842 __clear_bit(uc->tchan->id, ud->tchan_map);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530843 if (uc->tchan->tflow_id >= 0)
844 __clear_bit(uc->tchan->tflow_id, ud->tflow_map);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530845 uc->tchan = NULL;
846 }
847}
848
849static void udma_put_rflow(struct udma_chan *uc)
850{
851 struct udma_dev *ud = uc->ud;
852
853 if (uc->rflow) {
854 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
855 uc->rflow->id);
856 __clear_bit(uc->rflow->id, ud->rflow_map);
857 uc->rflow = NULL;
858 }
859}
860
861static void udma_free_tx_resources(struct udma_chan *uc)
862{
863 if (!uc->tchan)
864 return;
865
866 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
867 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
868 uc->tchan->t_ring = NULL;
869 uc->tchan->tc_ring = NULL;
870
871 udma_put_tchan(uc);
872}
873
874static int udma_alloc_tx_resources(struct udma_chan *uc)
875{
876 struct k3_nav_ring_cfg ring_cfg;
877 struct udma_dev *ud = uc->ud;
MD Danish Anwar25abdb32024-01-30 11:48:04 +0530878 struct udma_tchan *tchan;
879 int ring_idx, ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530880
881 ret = udma_get_tchan(uc);
882 if (ret)
883 return ret;
884
MD Danish Anwar25abdb32024-01-30 11:48:04 +0530885 tchan = uc->tchan;
Udit Kumarf084e402024-02-21 19:53:44 +0530886 if (tchan->tflow_id > 0)
MD Danish Anwar25abdb32024-01-30 11:48:04 +0530887 ring_idx = tchan->tflow_id;
888 else
Udit Kumarf084e402024-02-21 19:53:44 +0530889 ring_idx = tchan->id;
MD Danish Anwar25abdb32024-01-30 11:48:04 +0530890
891 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530892 &uc->tchan->t_ring,
893 &uc->tchan->tc_ring);
894 if (ret) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530895 ret = -EBUSY;
896 goto err_tx_ring;
897 }
898
Vignesh R3a9dbf32019-02-05 17:31:24 +0530899 memset(&ring_cfg, 0, sizeof(ring_cfg));
900 ring_cfg.size = 16;
901 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530902 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530903
904 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
905 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
906
907 if (ret)
908 goto err_ringcfg;
909
910 return 0;
911
912err_ringcfg:
913 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
914 uc->tchan->tc_ring = NULL;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530915 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
916 uc->tchan->t_ring = NULL;
917err_tx_ring:
918 udma_put_tchan(uc);
919
920 return ret;
921}
922
923static void udma_free_rx_resources(struct udma_chan *uc)
924{
925 if (!uc->rchan)
926 return;
927
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +0530928 if (uc->rflow) {
929 k3_nav_ringacc_ring_free(uc->rflow->fd_ring);
930 k3_nav_ringacc_ring_free(uc->rflow->r_ring);
931 uc->rflow->fd_ring = NULL;
932 uc->rflow->r_ring = NULL;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530933
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +0530934 udma_put_rflow(uc);
935 }
936
Vignesh R3a9dbf32019-02-05 17:31:24 +0530937 udma_put_rchan(uc);
938}
939
940static int udma_alloc_rx_resources(struct udma_chan *uc)
941{
942 struct k3_nav_ring_cfg ring_cfg;
943 struct udma_dev *ud = uc->ud;
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530944 struct udma_rflow *rflow;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530945 int fd_ring_id;
946 int ret;
947
948 ret = udma_get_rchan(uc);
949 if (ret)
950 return ret;
951
952 /* For MEM_TO_MEM we don't need rflow or rings */
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530953 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +0530954 return 0;
955
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530956 if (uc->config.default_flow_id >= 0)
957 ret = udma_get_rflow(uc, uc->config.default_flow_id);
958 else
959 ret = udma_get_rflow(uc, uc->rchan->id);
960
Vignesh R3a9dbf32019-02-05 17:31:24 +0530961 if (ret) {
962 ret = -EBUSY;
963 goto err_rflow;
964 }
965
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530966 rflow = uc->rflow;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530967 if (ud->tflow_cnt) {
968 fd_ring_id = ud->tflow_cnt + rflow->id;
969 } else {
970 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
971 uc->rchan->id;
972 }
973
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530974 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
975 &rflow->fd_ring, &rflow->r_ring);
976 if (ret) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530977 ret = -EBUSY;
978 goto err_rx_ring;
979 }
980
Vignesh R3a9dbf32019-02-05 17:31:24 +0530981 memset(&ring_cfg, 0, sizeof(ring_cfg));
982 ring_cfg.size = 16;
983 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530984 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530985
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530986 ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
987 ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530988 if (ret)
989 goto err_ringcfg;
990
991 return 0;
992
993err_ringcfg:
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530994 k3_nav_ringacc_ring_free(rflow->r_ring);
995 rflow->r_ring = NULL;
996 k3_nav_ringacc_ring_free(rflow->fd_ring);
997 rflow->fd_ring = NULL;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530998err_rx_ring:
999 udma_put_rflow(uc);
1000err_rflow:
1001 udma_put_rchan(uc);
1002
1003 return ret;
1004}
1005
1006static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
1007{
1008 struct udma_dev *ud = uc->ud;
1009 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1010 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301011 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301012 u32 mode;
1013 int ret;
1014
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301015 if (uc->config.pkt_mode)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301016 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1017 else
1018 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1019
1020 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
1021 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1022 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301023 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301024 req.index = uc->tchan->id;
1025 req.tx_chan_type = mode;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301026 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301027 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1028 else
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301029 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1030 uc->config.psd_size,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301031 0) >> 2;
1032 req.txcq_qnum = tc_ring;
1033
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301034 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301035 if (ret) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301036 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301037 return ret;
1038 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05301039
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301040 /*
1041 * Above TI SCI call handles firewall configuration, cfg
1042 * register configuration still has to be done locally in
1043 * absence of RM services.
1044 */
1045 if (IS_ENABLED(CONFIG_K3_DM_FW))
1046 udma_alloc_tchan_raw(uc);
1047
1048 return 0;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301049}
1050
1051static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
1052{
1053 struct udma_dev *ud = uc->ud;
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +05301054 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
1055 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301056 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1057 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
1058 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301059 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301060 u32 mode;
1061 int ret;
1062
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301063 if (uc->config.pkt_mode)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301064 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1065 else
1066 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1067
1068 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1069 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301070 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301071 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301072 req.index = uc->rchan->id;
1073 req.rx_chan_type = mode;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301074 if (uc->config.dir == DMA_MEM_TO_MEM) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301075 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1076 req.rxcq_qnum = tc_ring;
1077 } else {
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301078 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1079 uc->config.psd_size,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301080 0) >> 2;
1081 req.rxcq_qnum = rx_ring;
1082 }
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301083 if (ud->match_data->type == DMA_TYPE_UDMA &&
1084 uc->rflow->id != uc->rchan->id &&
1085 uc->config.dir != DMA_MEM_TO_MEM) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301086 req.flowid_start = uc->rflow->id;
1087 req.flowid_cnt = 1;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301088 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
1089 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301090 }
1091
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301092 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301093 if (ret) {
1094 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
1095 uc->rchan->id, ret);
1096 return ret;
1097 }
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301098 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301099 return ret;
1100
1101 flow_req.valid_params =
1102 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1103 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1104 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1105 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1106 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1107 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1108 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1109 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1110 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1111 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1112 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1113 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1114 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
1115 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
1116
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301117 flow_req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301118 flow_req.flow_index = uc->rflow->id;
1119
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301120 if (uc->config.needs_epib)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301121 flow_req.rx_einfo_present = 1;
1122 else
1123 flow_req.rx_einfo_present = 0;
1124
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301125 if (uc->config.psd_size)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301126 flow_req.rx_psinfo_present = 1;
1127 else
1128 flow_req.rx_psinfo_present = 0;
1129
1130 flow_req.rx_error_handling = 0;
1131 flow_req.rx_desc_type = 0;
1132 flow_req.rx_dest_qnum = rx_ring;
1133 flow_req.rx_src_tag_hi_sel = 2;
1134 flow_req.rx_src_tag_lo_sel = 4;
1135 flow_req.rx_dest_tag_hi_sel = 5;
1136 flow_req.rx_dest_tag_lo_sel = 4;
1137 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1138 flow_req.rx_fdq1_qnum = fd_ring;
1139 flow_req.rx_fdq2_qnum = fd_ring;
1140 flow_req.rx_fdq3_qnum = fd_ring;
1141 flow_req.rx_ps_location = 0;
1142
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301143 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1144 &flow_req);
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301145 if (ret) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301146 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1147 uc->rchan->id, uc->rflow->id, ret);
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301148 return ret;
1149 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05301150
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301151 /*
1152 * Above TI SCI call handles firewall configuration, cfg
1153 * register configuration still has to be done locally in
1154 * absence of RM services.
1155 */
1156 if (IS_ENABLED(CONFIG_K3_DM_FW))
1157 udma_alloc_rchan_raw(uc);
1158
1159 return 0;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301160}
1161
1162static int udma_alloc_chan_resources(struct udma_chan *uc)
1163{
1164 struct udma_dev *ud = uc->ud;
1165 int ret;
1166
1167 pr_debug("%s: chan:%d as %s\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301168 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301169
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301170 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301171 case DMA_MEM_TO_MEM:
1172 /* Non synchronized - mem to mem type of transfer */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301173 uc->config.pkt_mode = false;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301174 ret = udma_get_chan_pair(uc);
1175 if (ret)
1176 return ret;
1177
1178 ret = udma_alloc_tx_resources(uc);
1179 if (ret)
1180 goto err_free_res;
1181
1182 ret = udma_alloc_rx_resources(uc);
1183 if (ret)
1184 goto err_free_res;
1185
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301186 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1187 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301188 break;
1189 case DMA_MEM_TO_DEV:
1190 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1191 ret = udma_alloc_tx_resources(uc);
1192 if (ret)
1193 goto err_free_res;
1194
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301195 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1196 uc->config.dst_thread = uc->config.remote_thread_id;
1197 uc->config.dst_thread |= 0x8000;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301198
1199 break;
1200 case DMA_DEV_TO_MEM:
1201 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1202 ret = udma_alloc_rx_resources(uc);
1203 if (ret)
1204 goto err_free_res;
1205
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301206 uc->config.src_thread = uc->config.remote_thread_id;
1207 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301208
1209 break;
1210 default:
1211 /* Can not happen */
1212 pr_debug("%s: chan:%d invalid direction (%u)\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301213 __func__, uc->id, uc->config.dir);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301214 return -EINVAL;
1215 }
1216
1217 /* We have channel indexes and rings */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301218 if (uc->config.dir == DMA_MEM_TO_MEM) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301219 ret = udma_alloc_tchan_sci_req(uc);
1220 if (ret)
1221 goto err_free_res;
1222
1223 ret = udma_alloc_rchan_sci_req(uc);
1224 if (ret)
1225 goto err_free_res;
1226 } else {
1227 /* Slave transfer */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301228 if (uc->config.dir == DMA_MEM_TO_DEV) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301229 ret = udma_alloc_tchan_sci_req(uc);
1230 if (ret)
1231 goto err_free_res;
1232 } else {
1233 ret = udma_alloc_rchan_sci_req(uc);
1234 if (ret)
1235 goto err_free_res;
1236 }
1237 }
1238
Peter Ujfalusid15f8652019-04-25 12:08:15 +05301239 if (udma_is_chan_running(uc)) {
1240 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1241 udma_stop(uc);
1242 if (udma_is_chan_running(uc)) {
1243 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1244 goto err_free_res;
1245 }
1246 }
1247
Vignesh R3a9dbf32019-02-05 17:31:24 +05301248 /* PSI-L pairing */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301249 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301250 if (ret) {
1251 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1252 goto err_free_res;
1253 }
1254
1255 return 0;
1256
1257err_free_res:
1258 udma_free_tx_resources(uc);
1259 udma_free_rx_resources(uc);
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301260 uc->config.remote_thread_id = -1;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301261 return ret;
1262}
1263
1264static void udma_free_chan_resources(struct udma_chan *uc)
1265{
Vignesh Raghavendrabe7bdcc2020-09-17 20:11:22 +05301266 /* Hard reset UDMA channel */
1267 udma_stop_hard(uc);
1268 udma_reset_counters(uc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301269
1270 /* Release PSI-L pairing */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301271 udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301272
1273 /* Reset the rings for a new start */
1274 udma_reset_rings(uc);
1275 udma_free_tx_resources(uc);
1276 udma_free_rx_resources(uc);
1277
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301278 uc->config.remote_thread_id = -1;
1279 uc->config.dir = DMA_MEM_TO_MEM;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301280}
1281
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301282static const char * const range_names[] = {
1283 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
1284 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
1285 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
1286 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
1287 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
1288};
1289
Vignesh R3a9dbf32019-02-05 17:31:24 +05301290static int udma_get_mmrs(struct udevice *dev)
1291{
1292 struct udma_dev *ud = dev_get_priv(dev);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301293 u32 cap2, cap3, cap4;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301294 int i;
1295
Matthias Schiffer47331932023-09-27 15:33:34 +02001296 ud->mmrs[MMR_GCFG] = dev_read_addr_name_ptr(dev, mmr_names[MMR_GCFG]);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301297 if (!ud->mmrs[MMR_GCFG])
1298 return -EINVAL;
1299
1300 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1301 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1302
1303 switch (ud->match_data->type) {
1304 case DMA_TYPE_UDMA:
1305 ud->rflow_cnt = cap3 & 0x3fff;
1306 ud->tchan_cnt = cap2 & 0x1ff;
1307 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1308 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1309 break;
1310 case DMA_TYPE_BCDMA:
1311 ud->bchan_cnt = cap2 & 0x1ff;
1312 ud->tchan_cnt = (cap2 >> 9) & 0x1ff;
1313 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1314 break;
1315 case DMA_TYPE_PKTDMA:
1316 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
1317 ud->tchan_cnt = cap2 & 0x1ff;
1318 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1319 ud->rflow_cnt = cap3 & 0x3fff;
1320 ud->tflow_cnt = cap4 & 0x3fff;
1321 break;
1322 default:
1323 return -EINVAL;
1324 }
1325
1326 for (i = 1; i < MMR_LAST; i++) {
1327 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
1328 continue;
1329 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
1330 continue;
1331 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
1332 continue;
1333
Matthias Schiffer47331932023-09-27 15:33:34 +02001334 ud->mmrs[i] = dev_read_addr_name_ptr(dev, mmr_names[i]);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301335 if (!ud->mmrs[i])
1336 return -EINVAL;
1337 }
1338
1339 return 0;
1340}
1341
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301342static int udma_setup_resources(struct udma_dev *ud)
1343{
1344 struct udevice *dev = ud->dev;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301345 int i;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301346 struct ti_sci_resource_desc *rm_desc;
1347 struct ti_sci_resource *rm_res;
1348 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301349
1350 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1351 sizeof(unsigned long), GFP_KERNEL);
1352 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1353 GFP_KERNEL);
1354 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1355 sizeof(unsigned long), GFP_KERNEL);
1356 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1357 GFP_KERNEL);
1358 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1359 sizeof(unsigned long), GFP_KERNEL);
1360 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1361 sizeof(unsigned long),
1362 GFP_KERNEL);
1363 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1364 GFP_KERNEL);
1365
1366 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1367 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1368 !ud->rflows)
1369 return -ENOMEM;
1370
1371 /*
1372 * RX flows with the same Ids as RX channels are reserved to be used
1373 * as default flows if remote HW can't generate flow_ids. Those
1374 * RX flows can be requested only explicitly by id.
1375 */
1376 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1377
1378 /* Get resource ranges from tisci */
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301379 for (i = 0; i < RM_RANGE_LAST; i++) {
1380 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
1381 continue;
1382
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301383 tisci_rm->rm_ranges[i] =
1384 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1385 tisci_rm->tisci_dev_id,
1386 (char *)range_names[i]);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301387 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05301388
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301389 /* tchan ranges */
1390 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1391 if (IS_ERR(rm_res)) {
1392 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1393 } else {
1394 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1395 for (i = 0; i < rm_res->sets; i++) {
1396 rm_desc = &rm_res->desc[i];
1397 bitmap_clear(ud->tchan_map, rm_desc->start,
1398 rm_desc->num);
1399 }
1400 }
1401
1402 /* rchan and matching default flow ranges */
1403 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1404 if (IS_ERR(rm_res)) {
1405 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1406 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1407 } else {
1408 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1409 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1410 for (i = 0; i < rm_res->sets; i++) {
1411 rm_desc = &rm_res->desc[i];
1412 bitmap_clear(ud->rchan_map, rm_desc->start,
1413 rm_desc->num);
1414 bitmap_clear(ud->rflow_map, rm_desc->start,
1415 rm_desc->num);
1416 }
1417 }
1418
1419 /* GP rflow ranges */
1420 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1421 if (IS_ERR(rm_res)) {
1422 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1423 ud->rflow_cnt - ud->rchan_cnt);
1424 } else {
1425 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1426 ud->rflow_cnt - ud->rchan_cnt);
1427 for (i = 0; i < rm_res->sets; i++) {
1428 rm_desc = &rm_res->desc[i];
1429 bitmap_clear(ud->rflow_map, rm_desc->start,
1430 rm_desc->num);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301431 }
1432 }
1433
1434 return 0;
1435}
1436
1437static int bcdma_setup_resources(struct udma_dev *ud)
1438{
1439 int i;
1440 struct udevice *dev = ud->dev;
1441 struct ti_sci_resource_desc *rm_desc;
1442 struct ti_sci_resource *rm_res;
1443 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1444
1445 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
1446 sizeof(unsigned long), GFP_KERNEL);
1447 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
1448 GFP_KERNEL);
1449 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1450 sizeof(unsigned long), GFP_KERNEL);
1451 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1452 GFP_KERNEL);
1453 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1454 sizeof(unsigned long), GFP_KERNEL);
1455 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1456 GFP_KERNEL);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301457 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
1458 GFP_KERNEL);
1459
1460 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301461 !ud->bchans || !ud->tchans || !ud->rchans ||
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301462 !ud->rflows)
1463 return -ENOMEM;
1464
1465 /* Get resource ranges from tisci */
1466 for (i = 0; i < RM_RANGE_LAST; i++) {
1467 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
1468 continue;
1469
1470 tisci_rm->rm_ranges[i] =
1471 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1472 tisci_rm->tisci_dev_id,
1473 (char *)range_names[i]);
1474 }
1475
1476 /* bchan ranges */
1477 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
1478 if (IS_ERR(rm_res)) {
1479 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
1480 } else {
1481 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
1482 for (i = 0; i < rm_res->sets; i++) {
1483 rm_desc = &rm_res->desc[i];
1484 bitmap_clear(ud->bchan_map, rm_desc->start,
1485 rm_desc->num);
1486 dev_dbg(dev, "ti-sci-res: bchan: %d:%d\n",
1487 rm_desc->start, rm_desc->num);
1488 }
1489 }
1490
1491 /* tchan ranges */
1492 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1493 if (IS_ERR(rm_res)) {
1494 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1495 } else {
1496 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1497 for (i = 0; i < rm_res->sets; i++) {
1498 rm_desc = &rm_res->desc[i];
1499 bitmap_clear(ud->tchan_map, rm_desc->start,
1500 rm_desc->num);
1501 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1502 rm_desc->start, rm_desc->num);
1503 }
1504 }
1505
1506 /* rchan ranges */
1507 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1508 if (IS_ERR(rm_res)) {
1509 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1510 } else {
1511 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1512 for (i = 0; i < rm_res->sets; i++) {
1513 rm_desc = &rm_res->desc[i];
1514 bitmap_clear(ud->rchan_map, rm_desc->start,
1515 rm_desc->num);
1516 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1517 rm_desc->start, rm_desc->num);
1518 }
1519 }
1520
1521 return 0;
1522}
1523
1524static int pktdma_setup_resources(struct udma_dev *ud)
1525{
1526 int i;
1527 struct udevice *dev = ud->dev;
1528 struct ti_sci_resource *rm_res;
1529 struct ti_sci_resource_desc *rm_desc;
1530 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1531
1532 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1533 sizeof(unsigned long), GFP_KERNEL);
1534 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1535 GFP_KERNEL);
1536 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1537 sizeof(unsigned long), GFP_KERNEL);
1538 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1539 GFP_KERNEL);
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301540 ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1541 sizeof(unsigned long),
1542 GFP_KERNEL);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301543 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1544 GFP_KERNEL);
1545 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
1546 sizeof(unsigned long), GFP_KERNEL);
1547
1548 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301549 !ud->rchans || !ud->rflows || !ud->rflow_map)
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301550 return -ENOMEM;
1551
1552 /* Get resource ranges from tisci */
1553 for (i = 0; i < RM_RANGE_LAST; i++) {
1554 if (i == RM_RANGE_BCHAN)
1555 continue;
1556
1557 tisci_rm->rm_ranges[i] =
1558 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1559 tisci_rm->tisci_dev_id,
1560 (char *)range_names[i]);
1561 }
1562
1563 /* tchan ranges */
1564 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1565 if (IS_ERR(rm_res)) {
1566 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1567 } else {
1568 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1569 for (i = 0; i < rm_res->sets; i++) {
1570 rm_desc = &rm_res->desc[i];
1571 bitmap_clear(ud->tchan_map, rm_desc->start,
1572 rm_desc->num);
1573 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1574 rm_desc->start, rm_desc->num);
1575 }
1576 }
1577
1578 /* rchan ranges */
1579 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1580 if (IS_ERR(rm_res)) {
1581 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1582 } else {
1583 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1584 for (i = 0; i < rm_res->sets; i++) {
1585 rm_desc = &rm_res->desc[i];
1586 bitmap_clear(ud->rchan_map, rm_desc->start,
1587 rm_desc->num);
1588 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1589 rm_desc->start, rm_desc->num);
1590 }
1591 }
1592
1593 /* rflow ranges */
1594 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1595 if (IS_ERR(rm_res)) {
1596 /* all rflows are assigned exclusively to Linux */
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301597 bitmap_zero(ud->rflow_map, ud->rflow_cnt);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301598 } else {
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301599 bitmap_fill(ud->rflow_map, ud->rflow_cnt);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301600 for (i = 0; i < rm_res->sets; i++) {
1601 rm_desc = &rm_res->desc[i];
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301602 bitmap_clear(ud->rflow_map, rm_desc->start,
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301603 rm_desc->num);
1604 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
1605 rm_desc->start, rm_desc->num);
1606 }
1607 }
1608
1609 /* tflow ranges */
1610 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
1611 if (IS_ERR(rm_res)) {
1612 /* all tflows are assigned exclusively to Linux */
1613 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
1614 } else {
1615 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
1616 for (i = 0; i < rm_res->sets; i++) {
1617 rm_desc = &rm_res->desc[i];
1618 bitmap_clear(ud->tflow_map, rm_desc->start,
1619 rm_desc->num);
1620 dev_dbg(dev, "ti-sci-res: tflow: %d:%d\n",
1621 rm_desc->start, rm_desc->num);
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301622 }
1623 }
1624
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301625 return 0;
1626}
1627
1628static int setup_resources(struct udma_dev *ud)
1629{
1630 struct udevice *dev = ud->dev;
1631 int ch_count, ret;
1632
1633 switch (ud->match_data->type) {
1634 case DMA_TYPE_UDMA:
1635 ret = udma_setup_resources(ud);
1636 break;
1637 case DMA_TYPE_BCDMA:
1638 ret = bcdma_setup_resources(ud);
1639 break;
1640 case DMA_TYPE_PKTDMA:
1641 ret = pktdma_setup_resources(ud);
1642 break;
1643 default:
1644 return -EINVAL;
1645 }
1646
1647 if (ret)
1648 return ret;
1649
1650 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
1651 if (ud->bchan_cnt)
1652 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301653 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1654 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1655 if (!ch_count)
1656 return -ENODEV;
1657
1658 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1659 GFP_KERNEL);
1660 if (!ud->channels)
1661 return -ENOMEM;
1662
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301663 switch (ud->match_data->type) {
1664 case DMA_TYPE_UDMA:
1665 dev_dbg(dev,
1666 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
1667 ch_count,
1668 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1669 ud->tchan_cnt),
1670 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1671 ud->rchan_cnt),
1672 ud->rflow_cnt - bitmap_weight(ud->rflow_map,
1673 ud->rflow_cnt));
1674 break;
1675 case DMA_TYPE_BCDMA:
1676 dev_dbg(dev,
1677 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
1678 ch_count,
1679 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
1680 ud->bchan_cnt),
1681 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1682 ud->tchan_cnt),
1683 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1684 ud->rchan_cnt));
1685 break;
1686 case DMA_TYPE_PKTDMA:
1687 dev_dbg(dev,
1688 "Channels: %d (tchan: %u, rchan: %u)\n",
1689 ch_count,
1690 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1691 ud->tchan_cnt),
1692 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1693 ud->rchan_cnt));
1694 break;
1695 default:
1696 break;
1697 }
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301698
1699 return ch_count;
1700}
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301701
Vignesh R3a9dbf32019-02-05 17:31:24 +05301702static int udma_probe(struct udevice *dev)
1703{
1704 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1705 struct udma_dev *ud = dev_get_priv(dev);
1706 int i, ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301707 struct udevice *tmp;
1708 struct udevice *tisci_dev = NULL;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301709 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1710 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1711
Vignesh R3a9dbf32019-02-05 17:31:24 +05301712
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301713 ud->match_data = (void *)dev_get_driver_data(dev);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301714 ret = udma_get_mmrs(dev);
1715 if (ret)
1716 return ret;
1717
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05301718 ud->psil_base = ud->match_data->psil_base;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301719
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301720 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1721 "ti,sci", &tisci_dev);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301722 if (ret) {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301723 debug("Failed to get TISCI phandle (%d)\n", ret);
1724 tisci_rm->tisci = NULL;
1725 return -EINVAL;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301726 }
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301727 tisci_rm->tisci = (struct ti_sci_handle *)
1728 (ti_sci_get_handle_from_sysfw(tisci_dev));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301729
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301730 tisci_rm->tisci_dev_id = -1;
1731 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1732 if (ret) {
1733 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1734 return ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301735 }
1736
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301737 tisci_rm->tisci_navss_dev_id = -1;
1738 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1739 &tisci_rm->tisci_navss_dev_id);
1740 if (ret) {
1741 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1742 return ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301743 }
1744
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301745 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1746 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301747
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301748 if (ud->match_data->type == DMA_TYPE_UDMA) {
1749 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1750 "ti,ringacc", &tmp);
1751 ud->ringacc = dev_get_priv(tmp);
1752 } else {
1753 struct k3_ringacc_init_data ring_init_data;
1754
1755 ring_init_data.tisci = ud->tisci_rm.tisci;
1756 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
1757 if (ud->match_data->type == DMA_TYPE_BCDMA) {
1758 ring_init_data.num_rings = ud->bchan_cnt +
1759 ud->tchan_cnt +
1760 ud->rchan_cnt;
1761 } else {
1762 ring_init_data.num_rings = ud->rflow_cnt +
1763 ud->tflow_cnt;
1764 }
1765
1766 ud->ringacc = k3_ringacc_dmarings_init(dev, &ring_init_data);
1767 }
1768 if (IS_ERR(ud->ringacc))
1769 return PTR_ERR(ud->ringacc);
1770
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301771 ud->dev = dev;
Siddharth Vadapalli679bc0f2024-02-20 15:34:51 +05301772 ret = setup_resources(ud);
1773 if (ret < 0)
1774 return ret;
1775
1776 ud->ch_count = ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301777
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301778 for (i = 0; i < ud->bchan_cnt; i++) {
1779 struct udma_bchan *bchan = &ud->bchans[i];
1780
1781 bchan->id = i;
1782 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
1783 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05301784
Vignesh R3a9dbf32019-02-05 17:31:24 +05301785 for (i = 0; i < ud->tchan_cnt; i++) {
1786 struct udma_tchan *tchan = &ud->tchans[i];
1787
1788 tchan->id = i;
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301789 tchan->reg_chan = ud->mmrs[MMR_TCHAN] + UDMA_CH_100(i);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301790 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1791 }
1792
1793 for (i = 0; i < ud->rchan_cnt; i++) {
1794 struct udma_rchan *rchan = &ud->rchans[i];
1795
1796 rchan->id = i;
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301797 rchan->reg_chan = ud->mmrs[MMR_RCHAN] + UDMA_CH_100(i);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301798 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1799 }
1800
1801 for (i = 0; i < ud->rflow_cnt; i++) {
1802 struct udma_rflow *rflow = &ud->rflows[i];
1803
1804 rflow->id = i;
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301805 rflow->reg_rflow = ud->mmrs[MMR_RFLOW] + UDMA_CH_40(i);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301806 }
1807
1808 for (i = 0; i < ud->ch_count; i++) {
1809 struct udma_chan *uc = &ud->channels[i];
1810
1811 uc->ud = ud;
1812 uc->id = i;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301813 uc->config.remote_thread_id = -1;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301814 uc->bchan = NULL;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301815 uc->tchan = NULL;
1816 uc->rchan = NULL;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301817 uc->config.mapped_channel_id = -1;
1818 uc->config.default_flow_id = -1;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301819 uc->config.dir = DMA_MEM_TO_MEM;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301820 sprintf(uc->name, "UDMA chan%d\n", i);
1821 if (!i)
1822 uc->in_use = true;
1823 }
1824
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301825 pr_debug("%s(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1826 dev->name,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301827 udma_read(ud->mmrs[MMR_GCFG], 0),
1828 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1829 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1830 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1831 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1832
1833 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1834
Siddharth Vadapalli679bc0f2024-02-20 15:34:51 +05301835 return 0;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301836}
1837
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301838static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1839{
1840 u64 addr = 0;
1841
1842 memcpy(&addr, &elem, sizeof(elem));
1843 return k3_nav_ringacc_ring_push(ring, &addr);
1844}
1845
Vignesh R3a9dbf32019-02-05 17:31:24 +05301846static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1847 dma_addr_t src, size_t len)
1848{
1849 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1850 struct cppi5_tr_type15_t *tr_req;
1851 int num_tr;
1852 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1853 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1854 unsigned long dummy;
1855 void *tr_desc;
1856 size_t desc_size;
1857
1858 if (len < SZ_64K) {
1859 num_tr = 1;
1860 tr0_cnt0 = len;
1861 tr0_cnt1 = 1;
1862 } else {
1863 unsigned long align_to = __ffs(src | dest);
1864
1865 if (align_to > 3)
1866 align_to = 3;
1867 /*
1868 * Keep simple: tr0: SZ_64K-alignment blocks,
1869 * tr1: the remaining
1870 */
1871 num_tr = 2;
1872 tr0_cnt0 = (SZ_64K - BIT(align_to));
1873 if (len / tr0_cnt0 >= SZ_64K) {
1874 dev_err(uc->ud->dev, "size %zu is not supported\n",
1875 len);
1876 return NULL;
1877 }
1878
1879 tr0_cnt1 = len / tr0_cnt0;
1880 tr1_cnt0 = len % tr0_cnt0;
1881 }
1882
1883 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1884 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1885 if (!tr_desc)
1886 return NULL;
1887 memset(tr_desc, 0, desc_size);
1888
1889 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1890 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1891 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1892
1893 tr_req = tr_desc + tr_size;
1894
1895 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1896 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1897 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1898
1899 tr_req[0].addr = src;
1900 tr_req[0].icnt0 = tr0_cnt0;
1901 tr_req[0].icnt1 = tr0_cnt1;
1902 tr_req[0].icnt2 = 1;
1903 tr_req[0].icnt3 = 1;
1904 tr_req[0].dim1 = tr0_cnt0;
1905
1906 tr_req[0].daddr = dest;
1907 tr_req[0].dicnt0 = tr0_cnt0;
1908 tr_req[0].dicnt1 = tr0_cnt1;
1909 tr_req[0].dicnt2 = 1;
1910 tr_req[0].dicnt3 = 1;
1911 tr_req[0].ddim1 = tr0_cnt0;
1912
1913 if (num_tr == 2) {
1914 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1915 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1916 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1917
1918 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1919 tr_req[1].icnt0 = tr1_cnt0;
1920 tr_req[1].icnt1 = 1;
1921 tr_req[1].icnt2 = 1;
1922 tr_req[1].icnt3 = 1;
1923
1924 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1925 tr_req[1].dicnt0 = tr1_cnt0;
1926 tr_req[1].dicnt1 = 1;
1927 tr_req[1].dicnt2 = 1;
1928 tr_req[1].dicnt3 = 1;
1929 }
1930
1931 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1932
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301933 flush_dcache_range((unsigned long)tr_desc,
1934 ALIGN((unsigned long)tr_desc + desc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301935 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301936
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301937 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301938
1939 return 0;
1940}
1941
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301942#define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1943 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1944 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1945
1946#define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1947 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1948 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1949
1950#define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1951 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1952
1953#define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1954 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1955 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1956 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1957 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1958 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1959 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1960 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1961 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1962
1963#define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1964 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1965 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1966 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1967 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1968 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1969 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1970 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1971 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1972 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1973
1974static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1975{
1976 struct udma_dev *ud = uc->ud;
1977 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1978 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1979 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1980 struct udma_bchan *bchan = uc->bchan;
1981 int ret = 0;
1982
1983 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1984 req_tx.nav_id = tisci_rm->tisci_dev_id;
1985 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1986 req_tx.index = bchan->id;
1987
1988 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1989 if (ret)
1990 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1991
1992 return ret;
1993}
1994
1995static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id)
1996{
1997 if (id >= 0) {
1998 if (test_bit(id, ud->bchan_map)) {
1999 dev_err(ud->dev, "bchan%d is in use\n", id);
2000 return ERR_PTR(-ENOENT);
2001 }
2002 } else {
2003 id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0);
2004 if (id == ud->bchan_cnt)
2005 return ERR_PTR(-ENOENT);
2006 }
2007 __set_bit(id, ud->bchan_map);
2008 return &ud->bchans[id];
2009}
2010
2011static int bcdma_get_bchan(struct udma_chan *uc)
2012{
2013 struct udma_dev *ud = uc->ud;
2014
2015 if (uc->bchan) {
2016 dev_err(ud->dev, "chan%d: already have bchan%d allocated\n",
2017 uc->id, uc->bchan->id);
2018 return 0;
2019 }
2020
2021 uc->bchan = __bcdma_reserve_bchan(ud, -1);
2022 if (IS_ERR(uc->bchan))
2023 return PTR_ERR(uc->bchan);
2024
2025 uc->tchan = uc->bchan;
2026
2027 return 0;
2028}
2029
2030static void bcdma_put_bchan(struct udma_chan *uc)
2031{
2032 struct udma_dev *ud = uc->ud;
2033
2034 if (uc->bchan) {
2035 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
2036 uc->bchan->id);
2037 __clear_bit(uc->bchan->id, ud->bchan_map);
2038 uc->bchan = NULL;
2039 uc->tchan = NULL;
2040 }
2041}
2042
2043static void bcdma_free_bchan_resources(struct udma_chan *uc)
2044{
2045 if (!uc->bchan)
2046 return;
2047
2048 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2049 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2050 uc->bchan->tc_ring = NULL;
2051 uc->bchan->t_ring = NULL;
2052
2053 bcdma_put_bchan(uc);
2054}
2055
2056static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
2057{
2058 struct k3_nav_ring_cfg ring_cfg;
2059 struct udma_dev *ud = uc->ud;
2060 int ret;
2061
2062 ret = bcdma_get_bchan(uc);
2063 if (ret)
2064 return ret;
2065
2066 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
2067 &uc->bchan->t_ring,
2068 &uc->bchan->tc_ring);
2069 if (ret) {
2070 ret = -EBUSY;
2071 goto err_ring;
2072 }
2073
2074 memset(&ring_cfg, 0, sizeof(ring_cfg));
2075 ring_cfg.size = 16;
2076 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
2077 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
2078
2079 ret = k3_nav_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
2080 if (ret)
2081 goto err_ringcfg;
2082
2083 return 0;
2084
2085err_ringcfg:
2086 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2087 uc->bchan->tc_ring = NULL;
2088 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2089 uc->bchan->t_ring = NULL;
2090err_ring:
2091 bcdma_put_bchan(uc);
2092
2093 return ret;
2094}
2095
2096static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2097{
2098 struct udma_dev *ud = uc->ud;
2099 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2100 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2101 struct udma_tchan *tchan = uc->tchan;
2102 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2103 int ret = 0;
2104
2105 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2106 req_tx.nav_id = tisci_rm->tisci_dev_id;
2107 req_tx.index = tchan->id;
2108 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2109 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
2110 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2111 /* wait for peer to complete the teardown for PDMAs */
2112 req_tx.valid_params |=
2113 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2114 req_tx.tx_tdtype = 1;
2115 }
2116
2117 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2118 if (ret)
2119 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2120
2121 return ret;
2122}
2123
2124#define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2125
2126static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2127{
2128 struct udma_dev *ud = uc->ud;
2129 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2130 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2131 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2132 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2133 int ret = 0;
2134
2135 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2136 req_rx.nav_id = tisci_rm->tisci_dev_id;
2137 req_rx.index = uc->rchan->id;
2138
2139 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2140 if (ret) {
2141 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2142 return ret;
2143 }
2144
2145 flow_req.valid_params =
2146 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2147 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2148 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2149
2150 flow_req.nav_id = tisci_rm->tisci_dev_id;
2151 flow_req.flow_index = uc->rflow->id;
2152
2153 if (uc->config.needs_epib)
2154 flow_req.rx_einfo_present = 1;
2155 else
2156 flow_req.rx_einfo_present = 0;
2157 if (uc->config.psd_size)
2158 flow_req.rx_psinfo_present = 1;
2159 else
2160 flow_req.rx_psinfo_present = 0;
Vignesh Raghavendra87fa0d62023-03-08 09:42:57 +05302161 flow_req.rx_error_handling = 0;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302162
2163 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2164
2165 if (ret)
2166 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2167 ret);
2168
2169 return ret;
2170}
2171
2172static int bcdma_alloc_chan_resources(struct udma_chan *uc)
2173{
2174 int ret;
2175
2176 uc->config.pkt_mode = false;
2177
2178 switch (uc->config.dir) {
2179 case DMA_MEM_TO_MEM:
2180 /* Non synchronized - mem to mem type of transfer */
2181 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2182 uc->id);
2183
2184 ret = bcdma_alloc_bchan_resources(uc);
2185 if (ret)
2186 return ret;
2187
2188 ret = bcdma_tisci_m2m_channel_config(uc);
2189 break;
2190 default:
2191 /* Can not happen */
2192 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2193 __func__, uc->id, uc->config.dir);
2194 return -EINVAL;
2195 }
2196
2197 /* check if the channel configuration was successful */
2198 if (ret)
2199 goto err_res_free;
2200
2201 if (udma_is_chan_running(uc)) {
2202 dev_warn(uc->ud->dev, "chan%d: is running!\n", uc->id);
2203 udma_stop(uc);
2204 if (udma_is_chan_running(uc)) {
2205 dev_err(uc->ud->dev, "chan%d: won't stop!\n", uc->id);
2206 goto err_res_free;
2207 }
2208 }
2209
2210 udma_reset_rings(uc);
2211
2212 return 0;
2213
2214err_res_free:
2215 bcdma_free_bchan_resources(uc);
2216 udma_free_tx_resources(uc);
2217 udma_free_rx_resources(uc);
2218
2219 udma_reset_uchan(uc);
2220
2221 return ret;
2222}
2223
2224static int pktdma_alloc_chan_resources(struct udma_chan *uc)
2225{
2226 struct udma_dev *ud = uc->ud;
2227 int ret;
2228
2229 switch (uc->config.dir) {
2230 case DMA_MEM_TO_DEV:
2231 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2232 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2233 uc->id);
2234
2235 ret = udma_alloc_tx_resources(uc);
2236 if (ret) {
2237 uc->config.remote_thread_id = -1;
2238 return ret;
2239 }
2240
2241 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2242 uc->config.dst_thread = uc->config.remote_thread_id;
2243 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2244
2245 ret = pktdma_tisci_tx_channel_config(uc);
2246 break;
2247 case DMA_DEV_TO_MEM:
2248 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2249 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2250 uc->id);
2251
2252 ret = udma_alloc_rx_resources(uc);
2253 if (ret) {
2254 uc->config.remote_thread_id = -1;
2255 return ret;
2256 }
2257
2258 uc->config.src_thread = uc->config.remote_thread_id;
2259 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2260 K3_PSIL_DST_THREAD_ID_OFFSET;
2261
2262 ret = pktdma_tisci_rx_channel_config(uc);
2263 break;
2264 default:
2265 /* Can not happen */
2266 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2267 __func__, uc->id, uc->config.dir);
2268 return -EINVAL;
2269 }
2270
2271 /* check if the channel configuration was successful */
2272 if (ret)
2273 goto err_res_free;
2274
2275 /* PSI-L pairing */
2276 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2277 if (ret) {
2278 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2279 uc->config.src_thread, uc->config.dst_thread);
2280 goto err_res_free;
2281 }
2282
2283 if (udma_is_chan_running(uc)) {
2284 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2285 udma_stop(uc);
2286 if (udma_is_chan_running(uc)) {
2287 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2288 goto err_res_free;
2289 }
2290 }
2291
2292 udma_reset_rings(uc);
2293
2294 if (uc->tchan)
2295 dev_dbg(ud->dev,
2296 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2297 uc->id, uc->tchan->id, uc->tchan->tflow_id,
2298 uc->config.remote_thread_id);
2299 else if (uc->rchan)
2300 dev_dbg(ud->dev,
2301 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2302 uc->id, uc->rchan->id, uc->rflow->id,
2303 uc->config.remote_thread_id);
2304 return 0;
2305
2306err_res_free:
2307 udma_free_tx_resources(uc);
2308 udma_free_rx_resources(uc);
2309
2310 udma_reset_uchan(uc);
2311
2312 return ret;
2313}
2314
Vignesh R3a9dbf32019-02-05 17:31:24 +05302315static int udma_transfer(struct udevice *dev, int direction,
Andrew Davisd2da2842022-10-07 12:11:13 -05002316 dma_addr_t dst, dma_addr_t src, size_t len)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302317{
2318 struct udma_dev *ud = dev_get_priv(dev);
2319 /* Channel0 is reserved for memcpy */
2320 struct udma_chan *uc = &ud->channels[0];
2321 dma_addr_t paddr = 0;
2322 int ret;
2323
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302324 switch (ud->match_data->type) {
2325 case DMA_TYPE_UDMA:
2326 ret = udma_alloc_chan_resources(uc);
2327 break;
2328 case DMA_TYPE_BCDMA:
2329 ret = bcdma_alloc_chan_resources(uc);
2330 break;
2331 default:
2332 return -EINVAL;
2333 };
Vignesh R3a9dbf32019-02-05 17:31:24 +05302334 if (ret)
2335 return ret;
2336
Andrew Davisd2da2842022-10-07 12:11:13 -05002337 udma_prep_dma_memcpy(uc, dst, src, len);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302338 udma_start(uc);
2339 udma_poll_completion(uc, &paddr);
2340 udma_stop(uc);
2341
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302342 switch (ud->match_data->type) {
2343 case DMA_TYPE_UDMA:
2344 udma_free_chan_resources(uc);
2345 break;
2346 case DMA_TYPE_BCDMA:
2347 bcdma_free_bchan_resources(uc);
2348 break;
2349 default:
2350 return -EINVAL;
2351 };
2352
Vignesh R3a9dbf32019-02-05 17:31:24 +05302353 return 0;
2354}
2355
2356static int udma_request(struct dma *dma)
2357{
2358 struct udma_dev *ud = dev_get_priv(dma->dev);
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302359 struct udma_chan_config *ucc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302360 struct udma_chan *uc;
2361 unsigned long dummy;
2362 int ret;
2363
2364 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2365 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2366 return -EINVAL;
2367 }
2368
2369 uc = &ud->channels[dma->id];
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302370 ucc = &uc->config;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302371 switch (ud->match_data->type) {
2372 case DMA_TYPE_UDMA:
2373 ret = udma_alloc_chan_resources(uc);
2374 break;
2375 case DMA_TYPE_BCDMA:
2376 ret = bcdma_alloc_chan_resources(uc);
2377 break;
2378 case DMA_TYPE_PKTDMA:
2379 ret = pktdma_alloc_chan_resources(uc);
2380 break;
2381 default:
2382 return -EINVAL;
2383 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05302384 if (ret) {
2385 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
2386 return -EINVAL;
2387 }
2388
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302389 if (uc->config.dir == DMA_MEM_TO_DEV) {
2390 uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
2391 memset(uc->desc_tx, 0, ucc->hdesc_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302392 } else {
2393 uc->desc_rx = dma_alloc_coherent(
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302394 ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
2395 memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302396 }
2397
2398 uc->in_use = true;
2399 uc->desc_rx_cur = 0;
2400 uc->num_rx_bufs = 0;
2401
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302402 if (uc->config.dir == DMA_DEV_TO_MEM) {
Vignesh Raghavendra39349892019-12-04 22:17:21 +05302403 uc->cfg_data.flow_id_base = uc->rflow->id;
2404 uc->cfg_data.flow_id_cnt = 1;
2405 }
2406
Vignesh R3a9dbf32019-02-05 17:31:24 +05302407 return 0;
2408}
2409
Simon Glass75c0ad62020-02-03 07:35:55 -07002410static int udma_rfree(struct dma *dma)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302411{
2412 struct udma_dev *ud = dev_get_priv(dma->dev);
2413 struct udma_chan *uc;
2414
2415 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2416 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2417 return -EINVAL;
2418 }
2419 uc = &ud->channels[dma->id];
2420
2421 if (udma_is_chan_running(uc))
2422 udma_stop(uc);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302423
2424 udma_navss_psil_unpair(ud, uc->config.src_thread,
2425 uc->config.dst_thread);
2426
2427 bcdma_free_bchan_resources(uc);
2428 udma_free_tx_resources(uc);
2429 udma_free_rx_resources(uc);
2430 udma_reset_uchan(uc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302431
2432 uc->in_use = false;
2433
2434 return 0;
2435}
2436
2437static int udma_enable(struct dma *dma)
2438{
2439 struct udma_dev *ud = dev_get_priv(dma->dev);
2440 struct udma_chan *uc;
2441 int ret;
2442
2443 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2444 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2445 return -EINVAL;
2446 }
2447 uc = &ud->channels[dma->id];
2448
2449 ret = udma_start(uc);
2450
2451 return ret;
2452}
2453
2454static int udma_disable(struct dma *dma)
2455{
2456 struct udma_dev *ud = dev_get_priv(dma->dev);
2457 struct udma_chan *uc;
2458 int ret = 0;
2459
2460 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2461 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2462 return -EINVAL;
2463 }
2464 uc = &ud->channels[dma->id];
2465
2466 if (udma_is_chan_running(uc))
2467 ret = udma_stop(uc);
2468 else
2469 dev_err(dma->dev, "%s not running\n", __func__);
2470
2471 return ret;
2472}
2473
2474static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
2475{
2476 struct udma_dev *ud = dev_get_priv(dma->dev);
2477 struct cppi5_host_desc_t *desc_tx;
2478 dma_addr_t dma_src = (dma_addr_t)src;
2479 struct ti_udma_drv_packet_data packet_data = { 0 };
2480 dma_addr_t paddr;
2481 struct udma_chan *uc;
2482 u32 tc_ring_id;
2483 int ret;
2484
Keerthya3c8bb12019-04-24 16:33:54 +05302485 if (metadata)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302486 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
2487
2488 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2489 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2490 return -EINVAL;
2491 }
2492 uc = &ud->channels[dma->id];
2493
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302494 if (uc->config.dir != DMA_MEM_TO_DEV)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302495 return -EINVAL;
2496
2497 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
2498
2499 desc_tx = uc->desc_tx;
2500
2501 cppi5_hdesc_reset_hbdesc(desc_tx);
2502
2503 cppi5_hdesc_init(desc_tx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302504 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2505 uc->config.psd_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302506 cppi5_hdesc_set_pktlen(desc_tx, len);
2507 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
2508 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
2509 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
2510 /* pass below information from caller */
2511 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
2512 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
2513
Vignesh Raghavendrace431412019-12-09 10:25:39 +05302514 flush_dcache_range((unsigned long)dma_src,
2515 ALIGN((unsigned long)dma_src + len,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302516 ARCH_DMA_MINALIGN));
Vignesh Raghavendrace431412019-12-09 10:25:39 +05302517 flush_dcache_range((unsigned long)desc_tx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302518 ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302519 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05302520
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05302521 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302522 if (ret) {
2523 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
2524 dma->id, ret);
2525 return ret;
2526 }
2527
2528 udma_poll_completion(uc, &paddr);
2529
2530 return 0;
2531}
2532
2533static int udma_receive(struct dma *dma, void **dst, void *metadata)
2534{
2535 struct udma_dev *ud = dev_get_priv(dma->dev);
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302536 struct udma_chan_config *ucc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302537 struct cppi5_host_desc_t *desc_rx;
2538 dma_addr_t buf_dma;
2539 struct udma_chan *uc;
2540 u32 buf_dma_len, pkt_len;
2541 u32 port_id = 0;
2542 int ret;
2543
2544 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2545 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2546 return -EINVAL;
2547 }
2548 uc = &ud->channels[dma->id];
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302549 ucc = &uc->config;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302550
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302551 if (uc->config.dir != DMA_DEV_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302552 return -EINVAL;
2553 if (!uc->num_rx_bufs)
2554 return -EINVAL;
2555
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +05302556 ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302557 if (ret && ret != -ENODATA) {
2558 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
2559 return ret;
2560 } else if (ret == -ENODATA) {
2561 return 0;
2562 }
2563
2564 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302565 invalidate_dcache_range((ulong)desc_rx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302566 (ulong)(desc_rx + ucc->hdesc_size));
Vignesh R3a9dbf32019-02-05 17:31:24 +05302567
2568 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
2569 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
2570
2571 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302572 invalidate_dcache_range((ulong)buf_dma,
2573 (ulong)(buf_dma + buf_dma_len));
Vignesh R3a9dbf32019-02-05 17:31:24 +05302574
2575 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
2576
2577 *dst = (void *)buf_dma;
2578 uc->num_rx_bufs--;
2579
2580 return pkt_len;
2581}
2582
2583static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
2584{
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302585 struct udma_chan_config *ucc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302586 struct udma_dev *ud = dev_get_priv(dma->dev);
2587 struct udma_chan *uc = &ud->channels[0];
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302588 struct psil_endpoint_config *ep_config;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302589 u32 val;
2590
2591 for (val = 0; val < ud->ch_count; val++) {
2592 uc = &ud->channels[val];
2593 if (!uc->in_use)
2594 break;
2595 }
2596
2597 if (val == ud->ch_count)
2598 return -EBUSY;
2599
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302600 ucc = &uc->config;
2601 ucc->remote_thread_id = args->args[0];
2602 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
2603 ucc->dir = DMA_MEM_TO_DEV;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302604 else
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302605 ucc->dir = DMA_DEV_TO_MEM;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302606
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302607 ep_config = psil_get_ep_config(ucc->remote_thread_id);
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302608 if (IS_ERR(ep_config)) {
2609 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302610 uc->config.remote_thread_id);
2611 ucc->dir = DMA_MEM_TO_MEM;
2612 ucc->remote_thread_id = -1;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302613 return false;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302614 }
2615
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302616 ucc->pkt_mode = ep_config->pkt_mode;
2617 ucc->channel_tpl = ep_config->channel_tpl;
2618 ucc->notdpkt = ep_config->notdpkt;
2619 ucc->ep_type = ep_config->ep_type;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302620
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302621 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
2622 ep_config->mapped_channel_id >= 0) {
2623 ucc->mapped_channel_id = ep_config->mapped_channel_id;
2624 ucc->default_flow_id = ep_config->default_flow_id;
2625 } else {
2626 ucc->mapped_channel_id = -1;
2627 ucc->default_flow_id = -1;
2628 }
2629
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302630 ucc->needs_epib = ep_config->needs_epib;
2631 ucc->psd_size = ep_config->psd_size;
2632 ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
2633
2634 ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
2635 ucc->psd_size, 0);
2636 ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302637
2638 dma->id = uc->id;
2639 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302640 dma->id, ucc->needs_epib,
2641 ucc->psd_size, ucc->metadata_size,
2642 ucc->remote_thread_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302643
2644 return 0;
2645}
2646
2647int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
2648{
2649 struct udma_dev *ud = dev_get_priv(dma->dev);
2650 struct cppi5_host_desc_t *desc_rx;
2651 dma_addr_t dma_dst;
2652 struct udma_chan *uc;
2653 u32 desc_num;
2654
2655 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2656 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2657 return -EINVAL;
2658 }
2659 uc = &ud->channels[dma->id];
2660
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302661 if (uc->config.dir != DMA_DEV_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302662 return -EINVAL;
2663
2664 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
2665 return -EINVAL;
2666
2667 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302668 desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302669 dma_dst = (dma_addr_t)dst;
2670
2671 cppi5_hdesc_reset_hbdesc(desc_rx);
2672
2673 cppi5_hdesc_init(desc_rx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302674 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2675 uc->config.psd_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302676 cppi5_hdesc_set_pktlen(desc_rx, size);
2677 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
2678
Vignesh Raghavendrace431412019-12-09 10:25:39 +05302679 flush_dcache_range((unsigned long)desc_rx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302680 ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302681 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05302682
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +05302683 udma_push_to_ring(uc->rflow->fd_ring, desc_rx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302684
2685 uc->num_rx_bufs++;
2686 uc->desc_rx_cur++;
2687
2688 return 0;
2689}
2690
Vignesh Raghavendra39349892019-12-04 22:17:21 +05302691static int udma_get_cfg(struct dma *dma, u32 id, void **data)
2692{
2693 struct udma_dev *ud = dev_get_priv(dma->dev);
2694 struct udma_chan *uc;
2695
2696 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2697 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2698 return -EINVAL;
2699 }
2700
2701 switch (id) {
2702 case TI_UDMA_CHAN_PRIV_INFO:
2703 uc = &ud->channels[dma->id];
2704 *data = &uc->cfg_data;
2705 return 0;
2706 }
2707
2708 return -EINVAL;
2709}
2710
Vignesh R3a9dbf32019-02-05 17:31:24 +05302711static const struct dma_ops udma_ops = {
2712 .transfer = udma_transfer,
2713 .of_xlate = udma_of_xlate,
2714 .request = udma_request,
Simon Glass75c0ad62020-02-03 07:35:55 -07002715 .rfree = udma_rfree,
Vignesh R3a9dbf32019-02-05 17:31:24 +05302716 .enable = udma_enable,
2717 .disable = udma_disable,
2718 .send = udma_send,
2719 .receive = udma_receive,
2720 .prepare_rcv_buf = udma_prepare_rcv_buf,
Vignesh Raghavendra39349892019-12-04 22:17:21 +05302721 .get_cfg = udma_get_cfg,
Vignesh R3a9dbf32019-02-05 17:31:24 +05302722};
2723
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302724static struct udma_match_data am654_main_data = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302725 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302726 .psil_base = 0x1000,
2727 .enable_memcpy_support = true,
2728 .statictr_z_mask = GENMASK(11, 0),
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302729 .oes = {
2730 .udma_rchan = 0x200,
2731 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302732 .tpl_levels = 2,
2733 .level_start_idx = {
2734 [0] = 8, /* Normal channels */
2735 [1] = 0, /* High Throughput channels */
2736 },
2737};
2738
2739static struct udma_match_data am654_mcu_data = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302740 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302741 .psil_base = 0x6000,
2742 .enable_memcpy_support = true,
2743 .statictr_z_mask = GENMASK(11, 0),
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302744 .oes = {
2745 .udma_rchan = 0x200,
2746 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302747 .tpl_levels = 2,
2748 .level_start_idx = {
2749 [0] = 2, /* Normal channels */
2750 [1] = 0, /* High Throughput channels */
2751 },
2752};
2753
2754static struct udma_match_data j721e_main_data = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302755 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302756 .psil_base = 0x1000,
2757 .enable_memcpy_support = true,
2758 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2759 .statictr_z_mask = GENMASK(23, 0),
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302760 .oes = {
2761 .udma_rchan = 0x400,
2762 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302763 .tpl_levels = 3,
2764 .level_start_idx = {
2765 [0] = 16, /* Normal channels */
2766 [1] = 4, /* High Throughput channels */
2767 [2] = 0, /* Ultra High Throughput channels */
2768 },
2769};
2770
2771static struct udma_match_data j721e_mcu_data = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302772 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302773 .psil_base = 0x6000,
2774 .enable_memcpy_support = true,
2775 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2776 .statictr_z_mask = GENMASK(23, 0),
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302777 .oes = {
2778 .udma_rchan = 0x400,
2779 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302780 .tpl_levels = 2,
2781 .level_start_idx = {
2782 [0] = 2, /* Normal channels */
2783 [1] = 0, /* High Throughput channels */
2784 },
2785};
2786
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302787static struct udma_match_data am64_bcdma_data = {
2788 .type = DMA_TYPE_BCDMA,
2789 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
2790 .enable_memcpy_support = true, /* Supported via bchan */
2791 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2792 .statictr_z_mask = GENMASK(23, 0),
2793 .oes = {
2794 .bcdma_bchan_data = 0x2200,
2795 .bcdma_bchan_ring = 0x2400,
2796 .bcdma_tchan_data = 0x2800,
2797 .bcdma_tchan_ring = 0x2a00,
2798 .bcdma_rchan_data = 0x2e00,
2799 .bcdma_rchan_ring = 0x3000,
2800 },
2801 /* No throughput levels */
2802};
2803
2804static struct udma_match_data am64_pktdma_data = {
2805 .type = DMA_TYPE_PKTDMA,
2806 .psil_base = 0x1000,
2807 .enable_memcpy_support = false,
2808 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2809 .statictr_z_mask = GENMASK(23, 0),
2810 .oes = {
2811 .pktdma_tchan_flow = 0x1200,
2812 .pktdma_rchan_flow = 0x1600,
2813 },
2814 /* No throughput levels */
2815};
2816
Vignesh R3a9dbf32019-02-05 17:31:24 +05302817static const struct udevice_id udma_ids[] = {
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302818 {
2819 .compatible = "ti,am654-navss-main-udmap",
2820 .data = (ulong)&am654_main_data,
2821 },
2822 {
2823 .compatible = "ti,am654-navss-mcu-udmap",
2824 .data = (ulong)&am654_mcu_data,
2825 }, {
2826 .compatible = "ti,j721e-navss-main-udmap",
2827 .data = (ulong)&j721e_main_data,
2828 }, {
2829 .compatible = "ti,j721e-navss-mcu-udmap",
2830 .data = (ulong)&j721e_mcu_data,
2831 },
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302832 {
2833 .compatible = "ti,am64-dmss-bcdma",
2834 .data = (ulong)&am64_bcdma_data,
2835 },
2836 {
2837 .compatible = "ti,am64-dmss-pktdma",
2838 .data = (ulong)&am64_pktdma_data,
2839 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302840 { /* Sentinel */ },
Vignesh R3a9dbf32019-02-05 17:31:24 +05302841};
2842
2843U_BOOT_DRIVER(ti_edma3) = {
2844 .name = "ti-udma",
2845 .id = UCLASS_DMA,
2846 .of_match = udma_ids,
2847 .ops = &udma_ops,
2848 .probe = udma_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -07002849 .priv_auto = sizeof(struct udma_dev),
Vignesh R3a9dbf32019-02-05 17:31:24 +05302850};