blob: 4f2effd39a803af9cb200d15f059d1d5c4e2ad2f [file] [log] [blame]
Vignesh R3a9dbf32019-02-05 17:31:24 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#define pr_fmt(fmt) "udma: " fmt
7
8#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
Simon Glass0f2af882020-05-10 11:40:05 -060010#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060011#include <asm/cache.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053012#include <asm/io.h>
13#include <asm/bitops.h>
14#include <malloc.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060015#include <linux/bitops.h>
Masahiro Yamada6373a172020-02-14 16:40:19 +090016#include <linux/dma-mapping.h>
Dhruva Golee6b42392022-09-20 10:56:02 +053017#include <linux/sizes.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053018#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070019#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070020#include <dm/devres.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053021#include <dm/read.h>
22#include <dm/of_access.h>
23#include <dma.h>
24#include <dma-uclass.h>
25#include <linux/delay.h>
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053026#include <linux/bitmap.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070027#include <linux/err.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060028#include <linux/printk.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053029#include <linux/soc/ti/k3-navss-ringacc.h>
30#include <linux/soc/ti/cppi5.h>
31#include <linux/soc/ti/ti-udma.h>
32#include <linux/soc/ti/ti_sci_protocol.h>
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053033#include <linux/soc/ti/cppi5.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053034
35#include "k3-udma-hwdef.h"
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +053036#include "k3-psil-priv.h"
Vignesh R3a9dbf32019-02-05 17:31:24 +053037
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053038#define K3_UDMA_MAX_RFLOWS 1024
39
Vignesh R3a9dbf32019-02-05 17:31:24 +053040struct udma_chan;
41
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053042enum k3_dma_type {
43 DMA_TYPE_UDMA = 0,
44 DMA_TYPE_BCDMA,
45 DMA_TYPE_PKTDMA,
46};
47
Vignesh R3a9dbf32019-02-05 17:31:24 +053048enum udma_mmr {
49 MMR_GCFG = 0,
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053050 MMR_BCHANRT,
Vignesh R3a9dbf32019-02-05 17:31:24 +053051 MMR_RCHANRT,
52 MMR_TCHANRT,
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053053 MMR_RCHAN,
54 MMR_TCHAN,
55 MMR_RFLOW,
Vignesh R3a9dbf32019-02-05 17:31:24 +053056 MMR_LAST,
57};
58
59static const char * const mmr_names[] = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053060 [MMR_GCFG] = "gcfg",
61 [MMR_BCHANRT] = "bchanrt",
62 [MMR_RCHANRT] = "rchanrt",
63 [MMR_TCHANRT] = "tchanrt",
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053064 [MMR_RCHAN] = "rchan",
65 [MMR_TCHAN] = "tchan",
66 [MMR_RFLOW] = "rflow",
Vignesh R3a9dbf32019-02-05 17:31:24 +053067};
68
69struct udma_tchan {
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053070 void __iomem *reg_chan;
Vignesh R3a9dbf32019-02-05 17:31:24 +053071 void __iomem *reg_rt;
72
73 int id;
74 struct k3_nav_ring *t_ring; /* Transmit ring */
75 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053076 int tflow_id; /* applicable only for PKTDMA */
77
78};
79
80#define udma_bchan udma_tchan
81
82struct udma_rflow {
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053083 void __iomem *reg_rflow;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053084 int id;
85 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
86 struct k3_nav_ring *r_ring; /* Receive ring */
Vignesh R3a9dbf32019-02-05 17:31:24 +053087};
88
89struct udma_rchan {
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053090 void __iomem *reg_chan;
Vignesh R3a9dbf32019-02-05 17:31:24 +053091 void __iomem *reg_rt;
92
93 int id;
Vignesh R3a9dbf32019-02-05 17:31:24 +053094};
95
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053096struct udma_oes_offsets {
97 /* K3 UDMA Output Event Offset */
98 u32 udma_rchan;
99
100 /* BCDMA Output Event Offsets */
101 u32 bcdma_bchan_data;
102 u32 bcdma_bchan_ring;
103 u32 bcdma_tchan_data;
104 u32 bcdma_tchan_ring;
105 u32 bcdma_rchan_data;
106 u32 bcdma_rchan_ring;
107
108 /* PKTDMA Output Event Offsets */
109 u32 pktdma_tchan_flow;
110 u32 pktdma_rchan_flow;
111};
112
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530113#define UDMA_FLAG_PDMA_ACC32 BIT(0)
114#define UDMA_FLAG_PDMA_BURST BIT(1)
115#define UDMA_FLAG_TDTYPE BIT(2)
116
117struct udma_match_data {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530118 enum k3_dma_type type;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530119 u32 psil_base;
120 bool enable_memcpy_support;
121 u32 flags;
122 u32 statictr_z_mask;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530123 struct udma_oes_offsets oes;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530124
125 u8 tpl_levels;
126 u32 level_start_idx[];
127};
128
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530129enum udma_rm_range {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530130 RM_RANGE_BCHAN = 0,
131 RM_RANGE_TCHAN,
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530132 RM_RANGE_RCHAN,
133 RM_RANGE_RFLOW,
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530134 RM_RANGE_TFLOW,
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530135 RM_RANGE_LAST,
136};
137
138struct udma_tisci_rm {
139 const struct ti_sci_handle *tisci;
140 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
141 u32 tisci_dev_id;
142
143 /* tisci information for PSI-L thread pairing/unpairing */
144 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
145 u32 tisci_navss_dev_id;
146
147 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
148};
149
Vignesh R3a9dbf32019-02-05 17:31:24 +0530150struct udma_dev {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530151 struct udevice *dev;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530152 void __iomem *mmrs[MMR_LAST];
153
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530154 struct udma_tisci_rm tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530155 struct k3_nav_ringacc *ringacc;
156
157 u32 features;
158
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530159 int bchan_cnt;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530160 int tchan_cnt;
161 int echan_cnt;
162 int rchan_cnt;
163 int rflow_cnt;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530164 int tflow_cnt;
165 unsigned long *bchan_map;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530166 unsigned long *tchan_map;
167 unsigned long *rchan_map;
168 unsigned long *rflow_map;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530169 unsigned long *rflow_map_reserved;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530170 unsigned long *tflow_map;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530171
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530172 struct udma_bchan *bchans;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530173 struct udma_tchan *tchans;
174 struct udma_rchan *rchans;
175 struct udma_rflow *rflows;
176
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530177 struct udma_match_data *match_data;
178
Vignesh R3a9dbf32019-02-05 17:31:24 +0530179 struct udma_chan *channels;
180 u32 psil_base;
181
182 u32 ch_count;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530183};
184
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530185struct udma_chan_config {
186 u32 psd_size; /* size of Protocol Specific Data */
187 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
188 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
189 int remote_thread_id;
190 u32 atype;
191 u32 src_thread;
192 u32 dst_thread;
193 enum psil_endpoint_type ep_type;
194 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
195
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530196 /* PKTDMA mapped channel */
197 int mapped_channel_id;
198 /* PKTDMA default tflow or rflow for mapped channel */
199 int default_flow_id;
200
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530201 enum dma_direction dir;
202
203 unsigned int pkt_mode:1; /* TR or packet */
204 unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
205 unsigned int enable_acc32:1;
206 unsigned int enable_burst:1;
207 unsigned int notdpkt:1; /* Suppress sending TDC packet */
208};
209
Vignesh R3a9dbf32019-02-05 17:31:24 +0530210struct udma_chan {
211 struct udma_dev *ud;
212 char name[20];
213
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530214 struct udma_bchan *bchan;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530215 struct udma_tchan *tchan;
216 struct udma_rchan *rchan;
217 struct udma_rflow *rflow;
218
Vignesh Raghavendra39349892019-12-04 22:17:21 +0530219 struct ti_udma_drv_chan_cfg_data cfg_data;
220
Vignesh R3a9dbf32019-02-05 17:31:24 +0530221 u32 bcnt; /* number of bytes completed since the start of the channel */
222
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530223 struct udma_chan_config config;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530224
225 u32 id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530226
227 struct cppi5_host_desc_t *desc_tx;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530228 bool in_use;
229 void *desc_rx;
230 u32 num_rx_bufs;
231 u32 desc_rx_cur;
232
233};
234
235#define UDMA_CH_1000(ch) (ch * 0x1000)
236#define UDMA_CH_100(ch) (ch * 0x100)
237#define UDMA_CH_40(ch) (ch * 0x40)
238
239#ifdef PKTBUFSRX
240#define UDMA_RX_DESC_NUM PKTBUFSRX
241#else
242#define UDMA_RX_DESC_NUM 4
243#endif
244
245/* Generic register access functions */
246static inline u32 udma_read(void __iomem *base, int reg)
247{
248 u32 v;
249
250 v = __raw_readl(base + reg);
251 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
252 return v;
253}
254
255static inline void udma_write(void __iomem *base, int reg, u32 val)
256{
257 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
258 __raw_writel(val, base + reg);
259}
260
261static inline void udma_update_bits(void __iomem *base, int reg,
262 u32 mask, u32 val)
263{
264 u32 tmp, orig;
265
266 orig = udma_read(base, reg);
267 tmp = orig & ~mask;
268 tmp |= (val & mask);
269
270 if (tmp != orig)
271 udma_write(base, reg, tmp);
272}
273
274/* TCHANRT */
275static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
276{
277 if (!tchan)
278 return 0;
279 return udma_read(tchan->reg_rt, reg);
280}
281
282static inline void udma_tchanrt_write(struct udma_tchan *tchan,
283 int reg, u32 val)
284{
285 if (!tchan)
286 return;
287 udma_write(tchan->reg_rt, reg, val);
288}
289
290/* RCHANRT */
291static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
292{
293 if (!rchan)
294 return 0;
295 return udma_read(rchan->reg_rt, reg);
296}
297
298static inline void udma_rchanrt_write(struct udma_rchan *rchan,
299 int reg, u32 val)
300{
301 if (!rchan)
302 return;
303 udma_write(rchan->reg_rt, reg, val);
304}
305
306static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
307 u32 dst_thread)
308{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530309 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
310
Vignesh R3a9dbf32019-02-05 17:31:24 +0530311 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530312
313 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
314 tisci_rm->tisci_navss_dev_id,
315 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530316}
317
318static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
319 u32 dst_thread)
320{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530321 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
322
Vignesh R3a9dbf32019-02-05 17:31:24 +0530323 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530324
325 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
326 tisci_rm->tisci_navss_dev_id,
327 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530328}
329
330static inline char *udma_get_dir_text(enum dma_direction dir)
331{
332 switch (dir) {
333 case DMA_DEV_TO_MEM:
334 return "DEV_TO_MEM";
335 case DMA_MEM_TO_DEV:
336 return "MEM_TO_DEV";
337 case DMA_MEM_TO_MEM:
338 return "MEM_TO_MEM";
339 case DMA_DEV_TO_DEV:
340 return "DEV_TO_DEV";
341 default:
342 break;
343 }
344
345 return "invalid";
346}
347
Vignesh Raghavendra27e72502021-06-07 19:47:53 +0530348#include "k3-udma-u-boot.c"
349
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530350static void udma_reset_uchan(struct udma_chan *uc)
351{
352 memset(&uc->config, 0, sizeof(uc->config));
353 uc->config.remote_thread_id = -1;
354 uc->config.mapped_channel_id = -1;
355 uc->config.default_flow_id = -1;
356}
357
Vignesh R3a9dbf32019-02-05 17:31:24 +0530358static inline bool udma_is_chan_running(struct udma_chan *uc)
359{
360 u32 trt_ctl = 0;
361 u32 rrt_ctl = 0;
362
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530363 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530364 case DMA_DEV_TO_MEM:
365 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
366 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
367 __func__, rrt_ctl,
368 udma_rchanrt_read(uc->rchan,
369 UDMA_RCHAN_RT_PEER_RT_EN_REG));
370 break;
371 case DMA_MEM_TO_DEV:
372 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
373 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
374 __func__, trt_ctl,
375 udma_tchanrt_read(uc->tchan,
376 UDMA_TCHAN_RT_PEER_RT_EN_REG));
377 break;
378 case DMA_MEM_TO_MEM:
379 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
380 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
381 break;
382 default:
383 break;
384 }
385
386 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
387 return true;
388
389 return false;
390}
391
Vignesh R3a9dbf32019-02-05 17:31:24 +0530392static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
393{
394 struct k3_nav_ring *ring = NULL;
395 int ret = -ENOENT;
396
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530397 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530398 case DMA_DEV_TO_MEM:
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +0530399 ring = uc->rflow->r_ring;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530400 break;
401 case DMA_MEM_TO_DEV:
402 ring = uc->tchan->tc_ring;
403 break;
404 case DMA_MEM_TO_MEM:
405 ring = uc->tchan->tc_ring;
406 break;
407 default:
408 break;
409 }
410
411 if (ring && k3_nav_ringacc_ring_get_occ(ring))
412 ret = k3_nav_ringacc_ring_pop(ring, addr);
413
414 return ret;
415}
416
417static void udma_reset_rings(struct udma_chan *uc)
418{
419 struct k3_nav_ring *ring1 = NULL;
420 struct k3_nav_ring *ring2 = NULL;
421
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530422 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530423 case DMA_DEV_TO_MEM:
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +0530424 ring1 = uc->rflow->fd_ring;
425 ring2 = uc->rflow->r_ring;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530426 break;
427 case DMA_MEM_TO_DEV:
428 ring1 = uc->tchan->t_ring;
429 ring2 = uc->tchan->tc_ring;
430 break;
431 case DMA_MEM_TO_MEM:
432 ring1 = uc->tchan->t_ring;
433 ring2 = uc->tchan->tc_ring;
434 break;
435 default:
436 break;
437 }
438
439 if (ring1)
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530440 k3_nav_ringacc_ring_reset_dma(ring1, k3_nav_ringacc_ring_get_occ(ring1));
Vignesh R3a9dbf32019-02-05 17:31:24 +0530441 if (ring2)
442 k3_nav_ringacc_ring_reset(ring2);
443}
444
445static void udma_reset_counters(struct udma_chan *uc)
446{
447 u32 val;
448
449 if (uc->tchan) {
450 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
451 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
452
453 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
454 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
455
456 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
457 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
458
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530459 if (!uc->bchan) {
460 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
461 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
462 }
Vignesh R3a9dbf32019-02-05 17:31:24 +0530463 }
464
465 if (uc->rchan) {
466 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
467 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
468
469 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
470 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
471
472 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
473 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
474
475 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
476 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
477 }
478
479 uc->bcnt = 0;
480}
481
482static inline int udma_stop_hard(struct udma_chan *uc)
483{
484 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
485
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530486 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530487 case DMA_DEV_TO_MEM:
488 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
489 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
490 break;
491 case DMA_MEM_TO_DEV:
492 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
493 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
494 break;
495 case DMA_MEM_TO_MEM:
496 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
497 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
498 break;
499 default:
500 return -EINVAL;
501 }
502
503 return 0;
504}
505
506static int udma_start(struct udma_chan *uc)
507{
508 /* Channel is already running, no need to proceed further */
509 if (udma_is_chan_running(uc))
510 goto out;
511
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530512 pr_debug("%s: chan:%d dir:%s\n",
513 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh R3a9dbf32019-02-05 17:31:24 +0530514
515 /* Make sure that we clear the teardown bit, if it is set */
516 udma_stop_hard(uc);
517
518 /* Reset all counters */
519 udma_reset_counters(uc);
520
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530521 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530522 case DMA_DEV_TO_MEM:
523 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
524 UDMA_CHAN_RT_CTL_EN);
525
526 /* Enable remote */
527 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
528 UDMA_PEER_RT_EN_ENABLE);
529
530 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
531 __func__,
532 udma_rchanrt_read(uc->rchan,
533 UDMA_RCHAN_RT_CTL_REG),
534 udma_rchanrt_read(uc->rchan,
535 UDMA_RCHAN_RT_PEER_RT_EN_REG));
536 break;
537 case DMA_MEM_TO_DEV:
538 /* Enable remote */
539 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
540 UDMA_PEER_RT_EN_ENABLE);
541
542 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
543 UDMA_CHAN_RT_CTL_EN);
544
545 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
546 __func__,
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530547 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530548 UDMA_TCHAN_RT_CTL_REG),
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530549 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530550 UDMA_TCHAN_RT_PEER_RT_EN_REG));
551 break;
552 case DMA_MEM_TO_MEM:
553 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
554 UDMA_CHAN_RT_CTL_EN);
555 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
556 UDMA_CHAN_RT_CTL_EN);
557
558 break;
559 default:
560 return -EINVAL;
561 }
562
563 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
564out:
565 return 0;
566}
567
568static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
569{
570 int i = 0;
571 u32 val;
572
573 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
574 UDMA_CHAN_RT_CTL_EN |
575 UDMA_CHAN_RT_CTL_TDOWN);
576
577 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
578
579 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
580 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
581 udelay(1);
582 if (i > 1000) {
583 printf(" %s TIMEOUT !\n", __func__);
584 break;
585 }
586 i++;
587 }
588
589 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
590 if (val & UDMA_PEER_RT_EN_ENABLE)
591 printf("%s: peer not stopped TIMEOUT !\n", __func__);
592}
593
594static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
595{
596 int i = 0;
597 u32 val;
598
599 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
600 UDMA_PEER_RT_EN_ENABLE |
601 UDMA_PEER_RT_EN_TEARDOWN);
602
603 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
604
605 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
606 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
607 udelay(1);
608 if (i > 1000) {
609 printf("%s TIMEOUT !\n", __func__);
610 break;
611 }
612 i++;
613 }
614
615 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
616 if (val & UDMA_PEER_RT_EN_ENABLE)
617 printf("%s: peer not stopped TIMEOUT !\n", __func__);
618}
619
620static inline int udma_stop(struct udma_chan *uc)
621{
622 pr_debug("%s: chan:%d dir:%s\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530623 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh R3a9dbf32019-02-05 17:31:24 +0530624
625 udma_reset_counters(uc);
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530626 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530627 case DMA_DEV_TO_MEM:
628 udma_stop_dev2mem(uc, true);
629 break;
630 case DMA_MEM_TO_DEV:
631 udma_stop_mem2dev(uc, true);
632 break;
633 case DMA_MEM_TO_MEM:
634 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
635 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
636 break;
637 default:
638 return -EINVAL;
639 }
640
641 return 0;
642}
643
644static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
645{
646 int i = 1;
647
648 while (udma_pop_from_ring(uc, paddr)) {
649 udelay(1);
650 if (!(i % 1000000))
651 printf(".");
652 i++;
653 }
654}
655
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530656static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
657{
658 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
659
660 if (id >= 0) {
661 if (test_bit(id, ud->rflow_map)) {
662 dev_err(ud->dev, "rflow%d is in use\n", id);
663 return ERR_PTR(-ENOENT);
664 }
665 } else {
666 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
667 ud->rflow_cnt);
668
669 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
670 if (id >= ud->rflow_cnt)
671 return ERR_PTR(-ENOENT);
672 }
673
674 __set_bit(id, ud->rflow_map);
675 return &ud->rflows[id];
676}
677
Vignesh R3a9dbf32019-02-05 17:31:24 +0530678#define UDMA_RESERVE_RESOURCE(res) \
679static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
680 int id) \
681{ \
682 if (id >= 0) { \
683 if (test_bit(id, ud->res##_map)) { \
684 dev_err(ud->dev, "res##%d is in use\n", id); \
685 return ERR_PTR(-ENOENT); \
686 } \
687 } else { \
688 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
689 if (id == ud->res##_cnt) { \
690 return ERR_PTR(-ENOENT); \
691 } \
692 } \
693 \
694 __set_bit(id, ud->res##_map); \
695 return &ud->res##s[id]; \
696}
697
698UDMA_RESERVE_RESOURCE(tchan);
699UDMA_RESERVE_RESOURCE(rchan);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530700
701static int udma_get_tchan(struct udma_chan *uc)
702{
703 struct udma_dev *ud = uc->ud;
704
705 if (uc->tchan) {
706 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
707 uc->id, uc->tchan->id);
708 return 0;
709 }
710
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530711 uc->tchan = __udma_reserve_tchan(ud, uc->config.mapped_channel_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530712 if (IS_ERR(uc->tchan))
713 return PTR_ERR(uc->tchan);
714
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530715 if (ud->tflow_cnt) {
716 int tflow_id;
717
718 /* Only PKTDMA have support for tx flows */
719 if (uc->config.default_flow_id >= 0)
720 tflow_id = uc->config.default_flow_id;
721 else
722 tflow_id = uc->tchan->id;
723
724 if (test_bit(tflow_id, ud->tflow_map)) {
725 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
726 __clear_bit(uc->tchan->id, ud->tchan_map);
727 uc->tchan = NULL;
728 return -ENOENT;
729 }
730
731 uc->tchan->tflow_id = tflow_id;
732 __set_bit(tflow_id, ud->tflow_map);
733 } else {
734 uc->tchan->tflow_id = -1;
735 }
736
Vignesh R3a9dbf32019-02-05 17:31:24 +0530737 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
738
Vignesh R3a9dbf32019-02-05 17:31:24 +0530739 return 0;
740}
741
742static int udma_get_rchan(struct udma_chan *uc)
743{
744 struct udma_dev *ud = uc->ud;
745
746 if (uc->rchan) {
747 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
748 uc->id, uc->rchan->id);
749 return 0;
750 }
751
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530752 uc->rchan = __udma_reserve_rchan(ud, uc->config.mapped_channel_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530753 if (IS_ERR(uc->rchan))
754 return PTR_ERR(uc->rchan);
755
756 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
757
Vignesh R3a9dbf32019-02-05 17:31:24 +0530758 return 0;
759}
760
761static int udma_get_chan_pair(struct udma_chan *uc)
762{
763 struct udma_dev *ud = uc->ud;
764 int chan_id, end;
765
766 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
767 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
768 uc->id, uc->tchan->id);
769 return 0;
770 }
771
772 if (uc->tchan) {
773 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
774 uc->id, uc->tchan->id);
775 return -EBUSY;
776 } else if (uc->rchan) {
777 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
778 uc->id, uc->rchan->id);
779 return -EBUSY;
780 }
781
782 /* Can be optimized, but let's have it like this for now */
783 end = min(ud->tchan_cnt, ud->rchan_cnt);
784 for (chan_id = 0; chan_id < end; chan_id++) {
785 if (!test_bit(chan_id, ud->tchan_map) &&
786 !test_bit(chan_id, ud->rchan_map))
787 break;
788 }
789
790 if (chan_id == end)
791 return -ENOENT;
792
793 __set_bit(chan_id, ud->tchan_map);
794 __set_bit(chan_id, ud->rchan_map);
795 uc->tchan = &ud->tchans[chan_id];
796 uc->rchan = &ud->rchans[chan_id];
797
798 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
799
Vignesh R3a9dbf32019-02-05 17:31:24 +0530800 return 0;
801}
802
803static int udma_get_rflow(struct udma_chan *uc, int flow_id)
804{
805 struct udma_dev *ud = uc->ud;
806
807 if (uc->rflow) {
808 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
809 uc->id, uc->rflow->id);
810 return 0;
811 }
812
813 if (!uc->rchan)
814 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
815
816 uc->rflow = __udma_reserve_rflow(ud, flow_id);
817 if (IS_ERR(uc->rflow))
818 return PTR_ERR(uc->rflow);
819
820 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
821 return 0;
822}
823
824static void udma_put_rchan(struct udma_chan *uc)
825{
826 struct udma_dev *ud = uc->ud;
827
828 if (uc->rchan) {
829 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
830 uc->rchan->id);
831 __clear_bit(uc->rchan->id, ud->rchan_map);
832 uc->rchan = NULL;
833 }
834}
835
836static void udma_put_tchan(struct udma_chan *uc)
837{
838 struct udma_dev *ud = uc->ud;
839
840 if (uc->tchan) {
841 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
842 uc->tchan->id);
843 __clear_bit(uc->tchan->id, ud->tchan_map);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530844 if (uc->tchan->tflow_id >= 0)
845 __clear_bit(uc->tchan->tflow_id, ud->tflow_map);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530846 uc->tchan = NULL;
847 }
848}
849
850static void udma_put_rflow(struct udma_chan *uc)
851{
852 struct udma_dev *ud = uc->ud;
853
854 if (uc->rflow) {
855 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
856 uc->rflow->id);
857 __clear_bit(uc->rflow->id, ud->rflow_map);
858 uc->rflow = NULL;
859 }
860}
861
862static void udma_free_tx_resources(struct udma_chan *uc)
863{
864 if (!uc->tchan)
865 return;
866
867 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
868 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
869 uc->tchan->t_ring = NULL;
870 uc->tchan->tc_ring = NULL;
871
872 udma_put_tchan(uc);
873}
874
875static int udma_alloc_tx_resources(struct udma_chan *uc)
876{
877 struct k3_nav_ring_cfg ring_cfg;
878 struct udma_dev *ud = uc->ud;
879 int ret;
880
881 ret = udma_get_tchan(uc);
882 if (ret)
883 return ret;
884
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530885 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
886 &uc->tchan->t_ring,
887 &uc->tchan->tc_ring);
888 if (ret) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530889 ret = -EBUSY;
890 goto err_tx_ring;
891 }
892
Vignesh R3a9dbf32019-02-05 17:31:24 +0530893 memset(&ring_cfg, 0, sizeof(ring_cfg));
894 ring_cfg.size = 16;
895 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530896 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530897
898 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
899 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
900
901 if (ret)
902 goto err_ringcfg;
903
904 return 0;
905
906err_ringcfg:
907 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
908 uc->tchan->tc_ring = NULL;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530909 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
910 uc->tchan->t_ring = NULL;
911err_tx_ring:
912 udma_put_tchan(uc);
913
914 return ret;
915}
916
917static void udma_free_rx_resources(struct udma_chan *uc)
918{
919 if (!uc->rchan)
920 return;
921
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +0530922 if (uc->rflow) {
923 k3_nav_ringacc_ring_free(uc->rflow->fd_ring);
924 k3_nav_ringacc_ring_free(uc->rflow->r_ring);
925 uc->rflow->fd_ring = NULL;
926 uc->rflow->r_ring = NULL;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530927
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +0530928 udma_put_rflow(uc);
929 }
930
Vignesh R3a9dbf32019-02-05 17:31:24 +0530931 udma_put_rchan(uc);
932}
933
934static int udma_alloc_rx_resources(struct udma_chan *uc)
935{
936 struct k3_nav_ring_cfg ring_cfg;
937 struct udma_dev *ud = uc->ud;
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530938 struct udma_rflow *rflow;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530939 int fd_ring_id;
940 int ret;
941
942 ret = udma_get_rchan(uc);
943 if (ret)
944 return ret;
945
946 /* For MEM_TO_MEM we don't need rflow or rings */
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530947 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +0530948 return 0;
949
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530950 if (uc->config.default_flow_id >= 0)
951 ret = udma_get_rflow(uc, uc->config.default_flow_id);
952 else
953 ret = udma_get_rflow(uc, uc->rchan->id);
954
Vignesh R3a9dbf32019-02-05 17:31:24 +0530955 if (ret) {
956 ret = -EBUSY;
957 goto err_rflow;
958 }
959
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530960 rflow = uc->rflow;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530961 if (ud->tflow_cnt) {
962 fd_ring_id = ud->tflow_cnt + rflow->id;
963 } else {
964 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
965 uc->rchan->id;
966 }
967
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530968 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
969 &rflow->fd_ring, &rflow->r_ring);
970 if (ret) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530971 ret = -EBUSY;
972 goto err_rx_ring;
973 }
974
Vignesh R3a9dbf32019-02-05 17:31:24 +0530975 memset(&ring_cfg, 0, sizeof(ring_cfg));
976 ring_cfg.size = 16;
977 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530978 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530979
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530980 ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
981 ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530982 if (ret)
983 goto err_ringcfg;
984
985 return 0;
986
987err_ringcfg:
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530988 k3_nav_ringacc_ring_free(rflow->r_ring);
989 rflow->r_ring = NULL;
990 k3_nav_ringacc_ring_free(rflow->fd_ring);
991 rflow->fd_ring = NULL;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530992err_rx_ring:
993 udma_put_rflow(uc);
994err_rflow:
995 udma_put_rchan(uc);
996
997 return ret;
998}
999
1000static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
1001{
1002 struct udma_dev *ud = uc->ud;
1003 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1004 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301005 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301006 u32 mode;
1007 int ret;
1008
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301009 if (uc->config.pkt_mode)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301010 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1011 else
1012 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1013
1014 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
1015 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1016 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301017 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301018 req.index = uc->tchan->id;
1019 req.tx_chan_type = mode;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301020 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301021 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1022 else
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301023 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1024 uc->config.psd_size,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301025 0) >> 2;
1026 req.txcq_qnum = tc_ring;
1027
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301028 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301029 if (ret) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301030 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301031 return ret;
1032 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05301033
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301034 /*
1035 * Above TI SCI call handles firewall configuration, cfg
1036 * register configuration still has to be done locally in
1037 * absence of RM services.
1038 */
1039 if (IS_ENABLED(CONFIG_K3_DM_FW))
1040 udma_alloc_tchan_raw(uc);
1041
1042 return 0;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301043}
1044
1045static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
1046{
1047 struct udma_dev *ud = uc->ud;
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +05301048 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
1049 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301050 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1051 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
1052 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301053 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301054 u32 mode;
1055 int ret;
1056
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301057 if (uc->config.pkt_mode)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301058 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1059 else
1060 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1061
1062 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1063 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301064 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301065 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301066 req.index = uc->rchan->id;
1067 req.rx_chan_type = mode;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301068 if (uc->config.dir == DMA_MEM_TO_MEM) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301069 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1070 req.rxcq_qnum = tc_ring;
1071 } else {
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301072 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1073 uc->config.psd_size,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301074 0) >> 2;
1075 req.rxcq_qnum = rx_ring;
1076 }
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301077 if (ud->match_data->type == DMA_TYPE_UDMA &&
1078 uc->rflow->id != uc->rchan->id &&
1079 uc->config.dir != DMA_MEM_TO_MEM) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301080 req.flowid_start = uc->rflow->id;
1081 req.flowid_cnt = 1;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301082 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
1083 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301084 }
1085
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301086 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301087 if (ret) {
1088 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
1089 uc->rchan->id, ret);
1090 return ret;
1091 }
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301092 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301093 return ret;
1094
1095 flow_req.valid_params =
1096 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1097 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1098 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1099 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1100 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1101 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1102 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1103 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1104 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1105 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1106 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1107 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1108 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
1109 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
1110
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301111 flow_req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301112 flow_req.flow_index = uc->rflow->id;
1113
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301114 if (uc->config.needs_epib)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301115 flow_req.rx_einfo_present = 1;
1116 else
1117 flow_req.rx_einfo_present = 0;
1118
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301119 if (uc->config.psd_size)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301120 flow_req.rx_psinfo_present = 1;
1121 else
1122 flow_req.rx_psinfo_present = 0;
1123
1124 flow_req.rx_error_handling = 0;
1125 flow_req.rx_desc_type = 0;
1126 flow_req.rx_dest_qnum = rx_ring;
1127 flow_req.rx_src_tag_hi_sel = 2;
1128 flow_req.rx_src_tag_lo_sel = 4;
1129 flow_req.rx_dest_tag_hi_sel = 5;
1130 flow_req.rx_dest_tag_lo_sel = 4;
1131 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1132 flow_req.rx_fdq1_qnum = fd_ring;
1133 flow_req.rx_fdq2_qnum = fd_ring;
1134 flow_req.rx_fdq3_qnum = fd_ring;
1135 flow_req.rx_ps_location = 0;
1136
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301137 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1138 &flow_req);
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301139 if (ret) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301140 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1141 uc->rchan->id, uc->rflow->id, ret);
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301142 return ret;
1143 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05301144
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301145 /*
1146 * Above TI SCI call handles firewall configuration, cfg
1147 * register configuration still has to be done locally in
1148 * absence of RM services.
1149 */
1150 if (IS_ENABLED(CONFIG_K3_DM_FW))
1151 udma_alloc_rchan_raw(uc);
1152
1153 return 0;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301154}
1155
1156static int udma_alloc_chan_resources(struct udma_chan *uc)
1157{
1158 struct udma_dev *ud = uc->ud;
1159 int ret;
1160
1161 pr_debug("%s: chan:%d as %s\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301162 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301163
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301164 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301165 case DMA_MEM_TO_MEM:
1166 /* Non synchronized - mem to mem type of transfer */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301167 uc->config.pkt_mode = false;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301168 ret = udma_get_chan_pair(uc);
1169 if (ret)
1170 return ret;
1171
1172 ret = udma_alloc_tx_resources(uc);
1173 if (ret)
1174 goto err_free_res;
1175
1176 ret = udma_alloc_rx_resources(uc);
1177 if (ret)
1178 goto err_free_res;
1179
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301180 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1181 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301182 break;
1183 case DMA_MEM_TO_DEV:
1184 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1185 ret = udma_alloc_tx_resources(uc);
1186 if (ret)
1187 goto err_free_res;
1188
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301189 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1190 uc->config.dst_thread = uc->config.remote_thread_id;
1191 uc->config.dst_thread |= 0x8000;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301192
1193 break;
1194 case DMA_DEV_TO_MEM:
1195 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1196 ret = udma_alloc_rx_resources(uc);
1197 if (ret)
1198 goto err_free_res;
1199
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301200 uc->config.src_thread = uc->config.remote_thread_id;
1201 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301202
1203 break;
1204 default:
1205 /* Can not happen */
1206 pr_debug("%s: chan:%d invalid direction (%u)\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301207 __func__, uc->id, uc->config.dir);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301208 return -EINVAL;
1209 }
1210
1211 /* We have channel indexes and rings */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301212 if (uc->config.dir == DMA_MEM_TO_MEM) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301213 ret = udma_alloc_tchan_sci_req(uc);
1214 if (ret)
1215 goto err_free_res;
1216
1217 ret = udma_alloc_rchan_sci_req(uc);
1218 if (ret)
1219 goto err_free_res;
1220 } else {
1221 /* Slave transfer */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301222 if (uc->config.dir == DMA_MEM_TO_DEV) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301223 ret = udma_alloc_tchan_sci_req(uc);
1224 if (ret)
1225 goto err_free_res;
1226 } else {
1227 ret = udma_alloc_rchan_sci_req(uc);
1228 if (ret)
1229 goto err_free_res;
1230 }
1231 }
1232
Peter Ujfalusid15f8652019-04-25 12:08:15 +05301233 if (udma_is_chan_running(uc)) {
1234 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1235 udma_stop(uc);
1236 if (udma_is_chan_running(uc)) {
1237 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1238 goto err_free_res;
1239 }
1240 }
1241
Vignesh R3a9dbf32019-02-05 17:31:24 +05301242 /* PSI-L pairing */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301243 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301244 if (ret) {
1245 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1246 goto err_free_res;
1247 }
1248
1249 return 0;
1250
1251err_free_res:
1252 udma_free_tx_resources(uc);
1253 udma_free_rx_resources(uc);
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301254 uc->config.remote_thread_id = -1;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301255 return ret;
1256}
1257
1258static void udma_free_chan_resources(struct udma_chan *uc)
1259{
Vignesh Raghavendrabe7bdcc2020-09-17 20:11:22 +05301260 /* Hard reset UDMA channel */
1261 udma_stop_hard(uc);
1262 udma_reset_counters(uc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301263
1264 /* Release PSI-L pairing */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301265 udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301266
1267 /* Reset the rings for a new start */
1268 udma_reset_rings(uc);
1269 udma_free_tx_resources(uc);
1270 udma_free_rx_resources(uc);
1271
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301272 uc->config.remote_thread_id = -1;
1273 uc->config.dir = DMA_MEM_TO_MEM;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301274}
1275
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301276static const char * const range_names[] = {
1277 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
1278 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
1279 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
1280 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
1281 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
1282};
1283
Vignesh R3a9dbf32019-02-05 17:31:24 +05301284static int udma_get_mmrs(struct udevice *dev)
1285{
1286 struct udma_dev *ud = dev_get_priv(dev);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301287 u32 cap2, cap3, cap4;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301288 int i;
1289
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301290 ud->mmrs[MMR_GCFG] = (uint32_t *)devfdt_get_addr_name(dev, mmr_names[MMR_GCFG]);
1291 if (!ud->mmrs[MMR_GCFG])
1292 return -EINVAL;
1293
1294 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1295 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1296
1297 switch (ud->match_data->type) {
1298 case DMA_TYPE_UDMA:
1299 ud->rflow_cnt = cap3 & 0x3fff;
1300 ud->tchan_cnt = cap2 & 0x1ff;
1301 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1302 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1303 break;
1304 case DMA_TYPE_BCDMA:
1305 ud->bchan_cnt = cap2 & 0x1ff;
1306 ud->tchan_cnt = (cap2 >> 9) & 0x1ff;
1307 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1308 break;
1309 case DMA_TYPE_PKTDMA:
1310 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
1311 ud->tchan_cnt = cap2 & 0x1ff;
1312 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1313 ud->rflow_cnt = cap3 & 0x3fff;
1314 ud->tflow_cnt = cap4 & 0x3fff;
1315 break;
1316 default:
1317 return -EINVAL;
1318 }
1319
1320 for (i = 1; i < MMR_LAST; i++) {
1321 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
1322 continue;
1323 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
1324 continue;
1325 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
1326 continue;
1327
Vignesh R3a9dbf32019-02-05 17:31:24 +05301328 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1329 mmr_names[i]);
1330 if (!ud->mmrs[i])
1331 return -EINVAL;
1332 }
1333
1334 return 0;
1335}
1336
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301337static int udma_setup_resources(struct udma_dev *ud)
1338{
1339 struct udevice *dev = ud->dev;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301340 int i;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301341 struct ti_sci_resource_desc *rm_desc;
1342 struct ti_sci_resource *rm_res;
1343 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301344
1345 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1346 sizeof(unsigned long), GFP_KERNEL);
1347 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1348 GFP_KERNEL);
1349 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1350 sizeof(unsigned long), GFP_KERNEL);
1351 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1352 GFP_KERNEL);
1353 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1354 sizeof(unsigned long), GFP_KERNEL);
1355 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1356 sizeof(unsigned long),
1357 GFP_KERNEL);
1358 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1359 GFP_KERNEL);
1360
1361 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1362 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1363 !ud->rflows)
1364 return -ENOMEM;
1365
1366 /*
1367 * RX flows with the same Ids as RX channels are reserved to be used
1368 * as default flows if remote HW can't generate flow_ids. Those
1369 * RX flows can be requested only explicitly by id.
1370 */
1371 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1372
1373 /* Get resource ranges from tisci */
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301374 for (i = 0; i < RM_RANGE_LAST; i++) {
1375 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
1376 continue;
1377
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301378 tisci_rm->rm_ranges[i] =
1379 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1380 tisci_rm->tisci_dev_id,
1381 (char *)range_names[i]);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301382 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05301383
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301384 /* tchan ranges */
1385 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1386 if (IS_ERR(rm_res)) {
1387 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1388 } else {
1389 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1390 for (i = 0; i < rm_res->sets; i++) {
1391 rm_desc = &rm_res->desc[i];
1392 bitmap_clear(ud->tchan_map, rm_desc->start,
1393 rm_desc->num);
1394 }
1395 }
1396
1397 /* rchan and matching default flow ranges */
1398 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1399 if (IS_ERR(rm_res)) {
1400 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1401 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1402 } else {
1403 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1404 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1405 for (i = 0; i < rm_res->sets; i++) {
1406 rm_desc = &rm_res->desc[i];
1407 bitmap_clear(ud->rchan_map, rm_desc->start,
1408 rm_desc->num);
1409 bitmap_clear(ud->rflow_map, rm_desc->start,
1410 rm_desc->num);
1411 }
1412 }
1413
1414 /* GP rflow ranges */
1415 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1416 if (IS_ERR(rm_res)) {
1417 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1418 ud->rflow_cnt - ud->rchan_cnt);
1419 } else {
1420 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1421 ud->rflow_cnt - ud->rchan_cnt);
1422 for (i = 0; i < rm_res->sets; i++) {
1423 rm_desc = &rm_res->desc[i];
1424 bitmap_clear(ud->rflow_map, rm_desc->start,
1425 rm_desc->num);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301426 }
1427 }
1428
1429 return 0;
1430}
1431
1432static int bcdma_setup_resources(struct udma_dev *ud)
1433{
1434 int i;
1435 struct udevice *dev = ud->dev;
1436 struct ti_sci_resource_desc *rm_desc;
1437 struct ti_sci_resource *rm_res;
1438 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1439
1440 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
1441 sizeof(unsigned long), GFP_KERNEL);
1442 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
1443 GFP_KERNEL);
1444 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1445 sizeof(unsigned long), GFP_KERNEL);
1446 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1447 GFP_KERNEL);
1448 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1449 sizeof(unsigned long), GFP_KERNEL);
1450 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1451 GFP_KERNEL);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301452 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
1453 GFP_KERNEL);
1454
1455 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301456 !ud->bchans || !ud->tchans || !ud->rchans ||
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301457 !ud->rflows)
1458 return -ENOMEM;
1459
1460 /* Get resource ranges from tisci */
1461 for (i = 0; i < RM_RANGE_LAST; i++) {
1462 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
1463 continue;
1464
1465 tisci_rm->rm_ranges[i] =
1466 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1467 tisci_rm->tisci_dev_id,
1468 (char *)range_names[i]);
1469 }
1470
1471 /* bchan ranges */
1472 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
1473 if (IS_ERR(rm_res)) {
1474 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
1475 } else {
1476 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
1477 for (i = 0; i < rm_res->sets; i++) {
1478 rm_desc = &rm_res->desc[i];
1479 bitmap_clear(ud->bchan_map, rm_desc->start,
1480 rm_desc->num);
1481 dev_dbg(dev, "ti-sci-res: bchan: %d:%d\n",
1482 rm_desc->start, rm_desc->num);
1483 }
1484 }
1485
1486 /* tchan ranges */
1487 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1488 if (IS_ERR(rm_res)) {
1489 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1490 } else {
1491 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1492 for (i = 0; i < rm_res->sets; i++) {
1493 rm_desc = &rm_res->desc[i];
1494 bitmap_clear(ud->tchan_map, rm_desc->start,
1495 rm_desc->num);
1496 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1497 rm_desc->start, rm_desc->num);
1498 }
1499 }
1500
1501 /* rchan ranges */
1502 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1503 if (IS_ERR(rm_res)) {
1504 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1505 } else {
1506 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1507 for (i = 0; i < rm_res->sets; i++) {
1508 rm_desc = &rm_res->desc[i];
1509 bitmap_clear(ud->rchan_map, rm_desc->start,
1510 rm_desc->num);
1511 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1512 rm_desc->start, rm_desc->num);
1513 }
1514 }
1515
1516 return 0;
1517}
1518
1519static int pktdma_setup_resources(struct udma_dev *ud)
1520{
1521 int i;
1522 struct udevice *dev = ud->dev;
1523 struct ti_sci_resource *rm_res;
1524 struct ti_sci_resource_desc *rm_desc;
1525 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1526
1527 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1528 sizeof(unsigned long), GFP_KERNEL);
1529 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1530 GFP_KERNEL);
1531 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1532 sizeof(unsigned long), GFP_KERNEL);
1533 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1534 GFP_KERNEL);
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301535 ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1536 sizeof(unsigned long),
1537 GFP_KERNEL);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301538 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1539 GFP_KERNEL);
1540 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
1541 sizeof(unsigned long), GFP_KERNEL);
1542
1543 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301544 !ud->rchans || !ud->rflows || !ud->rflow_map)
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301545 return -ENOMEM;
1546
1547 /* Get resource ranges from tisci */
1548 for (i = 0; i < RM_RANGE_LAST; i++) {
1549 if (i == RM_RANGE_BCHAN)
1550 continue;
1551
1552 tisci_rm->rm_ranges[i] =
1553 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1554 tisci_rm->tisci_dev_id,
1555 (char *)range_names[i]);
1556 }
1557
1558 /* tchan ranges */
1559 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1560 if (IS_ERR(rm_res)) {
1561 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1562 } else {
1563 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1564 for (i = 0; i < rm_res->sets; i++) {
1565 rm_desc = &rm_res->desc[i];
1566 bitmap_clear(ud->tchan_map, rm_desc->start,
1567 rm_desc->num);
1568 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1569 rm_desc->start, rm_desc->num);
1570 }
1571 }
1572
1573 /* rchan ranges */
1574 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1575 if (IS_ERR(rm_res)) {
1576 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1577 } else {
1578 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1579 for (i = 0; i < rm_res->sets; i++) {
1580 rm_desc = &rm_res->desc[i];
1581 bitmap_clear(ud->rchan_map, rm_desc->start,
1582 rm_desc->num);
1583 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1584 rm_desc->start, rm_desc->num);
1585 }
1586 }
1587
1588 /* rflow ranges */
1589 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1590 if (IS_ERR(rm_res)) {
1591 /* all rflows are assigned exclusively to Linux */
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301592 bitmap_zero(ud->rflow_map, ud->rflow_cnt);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301593 } else {
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301594 bitmap_fill(ud->rflow_map, ud->rflow_cnt);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301595 for (i = 0; i < rm_res->sets; i++) {
1596 rm_desc = &rm_res->desc[i];
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301597 bitmap_clear(ud->rflow_map, rm_desc->start,
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301598 rm_desc->num);
1599 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
1600 rm_desc->start, rm_desc->num);
1601 }
1602 }
1603
1604 /* tflow ranges */
1605 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
1606 if (IS_ERR(rm_res)) {
1607 /* all tflows are assigned exclusively to Linux */
1608 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
1609 } else {
1610 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
1611 for (i = 0; i < rm_res->sets; i++) {
1612 rm_desc = &rm_res->desc[i];
1613 bitmap_clear(ud->tflow_map, rm_desc->start,
1614 rm_desc->num);
1615 dev_dbg(dev, "ti-sci-res: tflow: %d:%d\n",
1616 rm_desc->start, rm_desc->num);
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301617 }
1618 }
1619
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301620 return 0;
1621}
1622
1623static int setup_resources(struct udma_dev *ud)
1624{
1625 struct udevice *dev = ud->dev;
1626 int ch_count, ret;
1627
1628 switch (ud->match_data->type) {
1629 case DMA_TYPE_UDMA:
1630 ret = udma_setup_resources(ud);
1631 break;
1632 case DMA_TYPE_BCDMA:
1633 ret = bcdma_setup_resources(ud);
1634 break;
1635 case DMA_TYPE_PKTDMA:
1636 ret = pktdma_setup_resources(ud);
1637 break;
1638 default:
1639 return -EINVAL;
1640 }
1641
1642 if (ret)
1643 return ret;
1644
1645 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
1646 if (ud->bchan_cnt)
1647 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301648 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1649 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1650 if (!ch_count)
1651 return -ENODEV;
1652
1653 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1654 GFP_KERNEL);
1655 if (!ud->channels)
1656 return -ENOMEM;
1657
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301658 switch (ud->match_data->type) {
1659 case DMA_TYPE_UDMA:
1660 dev_dbg(dev,
1661 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
1662 ch_count,
1663 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1664 ud->tchan_cnt),
1665 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1666 ud->rchan_cnt),
1667 ud->rflow_cnt - bitmap_weight(ud->rflow_map,
1668 ud->rflow_cnt));
1669 break;
1670 case DMA_TYPE_BCDMA:
1671 dev_dbg(dev,
1672 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
1673 ch_count,
1674 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
1675 ud->bchan_cnt),
1676 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1677 ud->tchan_cnt),
1678 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1679 ud->rchan_cnt));
1680 break;
1681 case DMA_TYPE_PKTDMA:
1682 dev_dbg(dev,
1683 "Channels: %d (tchan: %u, rchan: %u)\n",
1684 ch_count,
1685 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1686 ud->tchan_cnt),
1687 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1688 ud->rchan_cnt));
1689 break;
1690 default:
1691 break;
1692 }
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301693
1694 return ch_count;
1695}
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301696
Vignesh R3a9dbf32019-02-05 17:31:24 +05301697static int udma_probe(struct udevice *dev)
1698{
1699 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1700 struct udma_dev *ud = dev_get_priv(dev);
1701 int i, ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301702 struct udevice *tmp;
1703 struct udevice *tisci_dev = NULL;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301704 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1705 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1706
Vignesh R3a9dbf32019-02-05 17:31:24 +05301707
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301708 ud->match_data = (void *)dev_get_driver_data(dev);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301709 ret = udma_get_mmrs(dev);
1710 if (ret)
1711 return ret;
1712
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05301713 ud->psil_base = ud->match_data->psil_base;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301714
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301715 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1716 "ti,sci", &tisci_dev);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301717 if (ret) {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301718 debug("Failed to get TISCI phandle (%d)\n", ret);
1719 tisci_rm->tisci = NULL;
1720 return -EINVAL;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301721 }
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301722 tisci_rm->tisci = (struct ti_sci_handle *)
1723 (ti_sci_get_handle_from_sysfw(tisci_dev));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301724
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301725 tisci_rm->tisci_dev_id = -1;
1726 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1727 if (ret) {
1728 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1729 return ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301730 }
1731
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301732 tisci_rm->tisci_navss_dev_id = -1;
1733 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1734 &tisci_rm->tisci_navss_dev_id);
1735 if (ret) {
1736 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1737 return ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301738 }
1739
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301740 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1741 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301742
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301743 if (ud->match_data->type == DMA_TYPE_UDMA) {
1744 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1745 "ti,ringacc", &tmp);
1746 ud->ringacc = dev_get_priv(tmp);
1747 } else {
1748 struct k3_ringacc_init_data ring_init_data;
1749
1750 ring_init_data.tisci = ud->tisci_rm.tisci;
1751 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
1752 if (ud->match_data->type == DMA_TYPE_BCDMA) {
1753 ring_init_data.num_rings = ud->bchan_cnt +
1754 ud->tchan_cnt +
1755 ud->rchan_cnt;
1756 } else {
1757 ring_init_data.num_rings = ud->rflow_cnt +
1758 ud->tflow_cnt;
1759 }
1760
1761 ud->ringacc = k3_ringacc_dmarings_init(dev, &ring_init_data);
1762 }
1763 if (IS_ERR(ud->ringacc))
1764 return PTR_ERR(ud->ringacc);
1765
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301766 ud->dev = dev;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301767 ud->ch_count = setup_resources(ud);
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301768 if (ud->ch_count <= 0)
1769 return ud->ch_count;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301770
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301771 for (i = 0; i < ud->bchan_cnt; i++) {
1772 struct udma_bchan *bchan = &ud->bchans[i];
1773
1774 bchan->id = i;
1775 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
1776 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05301777
Vignesh R3a9dbf32019-02-05 17:31:24 +05301778 for (i = 0; i < ud->tchan_cnt; i++) {
1779 struct udma_tchan *tchan = &ud->tchans[i];
1780
1781 tchan->id = i;
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301782 tchan->reg_chan = ud->mmrs[MMR_TCHAN] + UDMA_CH_100(i);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301783 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1784 }
1785
1786 for (i = 0; i < ud->rchan_cnt; i++) {
1787 struct udma_rchan *rchan = &ud->rchans[i];
1788
1789 rchan->id = i;
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301790 rchan->reg_chan = ud->mmrs[MMR_RCHAN] + UDMA_CH_100(i);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301791 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1792 }
1793
1794 for (i = 0; i < ud->rflow_cnt; i++) {
1795 struct udma_rflow *rflow = &ud->rflows[i];
1796
1797 rflow->id = i;
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301798 rflow->reg_rflow = ud->mmrs[MMR_RFLOW] + UDMA_CH_40(i);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301799 }
1800
1801 for (i = 0; i < ud->ch_count; i++) {
1802 struct udma_chan *uc = &ud->channels[i];
1803
1804 uc->ud = ud;
1805 uc->id = i;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301806 uc->config.remote_thread_id = -1;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301807 uc->bchan = NULL;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301808 uc->tchan = NULL;
1809 uc->rchan = NULL;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301810 uc->config.mapped_channel_id = -1;
1811 uc->config.default_flow_id = -1;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301812 uc->config.dir = DMA_MEM_TO_MEM;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301813 sprintf(uc->name, "UDMA chan%d\n", i);
1814 if (!i)
1815 uc->in_use = true;
1816 }
1817
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301818 pr_debug("%s(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1819 dev->name,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301820 udma_read(ud->mmrs[MMR_GCFG], 0),
1821 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1822 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1823 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1824 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1825
1826 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1827
1828 return ret;
1829}
1830
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301831static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1832{
1833 u64 addr = 0;
1834
1835 memcpy(&addr, &elem, sizeof(elem));
1836 return k3_nav_ringacc_ring_push(ring, &addr);
1837}
1838
Vignesh R3a9dbf32019-02-05 17:31:24 +05301839static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1840 dma_addr_t src, size_t len)
1841{
1842 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1843 struct cppi5_tr_type15_t *tr_req;
1844 int num_tr;
1845 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1846 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1847 unsigned long dummy;
1848 void *tr_desc;
1849 size_t desc_size;
1850
1851 if (len < SZ_64K) {
1852 num_tr = 1;
1853 tr0_cnt0 = len;
1854 tr0_cnt1 = 1;
1855 } else {
1856 unsigned long align_to = __ffs(src | dest);
1857
1858 if (align_to > 3)
1859 align_to = 3;
1860 /*
1861 * Keep simple: tr0: SZ_64K-alignment blocks,
1862 * tr1: the remaining
1863 */
1864 num_tr = 2;
1865 tr0_cnt0 = (SZ_64K - BIT(align_to));
1866 if (len / tr0_cnt0 >= SZ_64K) {
1867 dev_err(uc->ud->dev, "size %zu is not supported\n",
1868 len);
1869 return NULL;
1870 }
1871
1872 tr0_cnt1 = len / tr0_cnt0;
1873 tr1_cnt0 = len % tr0_cnt0;
1874 }
1875
1876 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1877 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1878 if (!tr_desc)
1879 return NULL;
1880 memset(tr_desc, 0, desc_size);
1881
1882 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1883 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1884 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1885
1886 tr_req = tr_desc + tr_size;
1887
1888 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1889 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1890 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1891
1892 tr_req[0].addr = src;
1893 tr_req[0].icnt0 = tr0_cnt0;
1894 tr_req[0].icnt1 = tr0_cnt1;
1895 tr_req[0].icnt2 = 1;
1896 tr_req[0].icnt3 = 1;
1897 tr_req[0].dim1 = tr0_cnt0;
1898
1899 tr_req[0].daddr = dest;
1900 tr_req[0].dicnt0 = tr0_cnt0;
1901 tr_req[0].dicnt1 = tr0_cnt1;
1902 tr_req[0].dicnt2 = 1;
1903 tr_req[0].dicnt3 = 1;
1904 tr_req[0].ddim1 = tr0_cnt0;
1905
1906 if (num_tr == 2) {
1907 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1908 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1909 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1910
1911 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1912 tr_req[1].icnt0 = tr1_cnt0;
1913 tr_req[1].icnt1 = 1;
1914 tr_req[1].icnt2 = 1;
1915 tr_req[1].icnt3 = 1;
1916
1917 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1918 tr_req[1].dicnt0 = tr1_cnt0;
1919 tr_req[1].dicnt1 = 1;
1920 tr_req[1].dicnt2 = 1;
1921 tr_req[1].dicnt3 = 1;
1922 }
1923
1924 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1925
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301926 flush_dcache_range((unsigned long)tr_desc,
1927 ALIGN((unsigned long)tr_desc + desc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301928 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301929
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301930 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301931
1932 return 0;
1933}
1934
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301935#define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1936 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1937 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1938
1939#define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1940 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1941 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1942
1943#define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1944 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1945
1946#define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1947 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1948 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1949 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1950 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1951 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1952 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1953 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1954 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1955
1956#define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1957 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1958 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1959 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1960 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1961 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1962 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1963 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1964 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1965 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1966
1967static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1968{
1969 struct udma_dev *ud = uc->ud;
1970 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1971 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1972 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1973 struct udma_bchan *bchan = uc->bchan;
1974 int ret = 0;
1975
1976 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1977 req_tx.nav_id = tisci_rm->tisci_dev_id;
1978 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1979 req_tx.index = bchan->id;
1980
1981 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1982 if (ret)
1983 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1984
1985 return ret;
1986}
1987
1988static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id)
1989{
1990 if (id >= 0) {
1991 if (test_bit(id, ud->bchan_map)) {
1992 dev_err(ud->dev, "bchan%d is in use\n", id);
1993 return ERR_PTR(-ENOENT);
1994 }
1995 } else {
1996 id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0);
1997 if (id == ud->bchan_cnt)
1998 return ERR_PTR(-ENOENT);
1999 }
2000 __set_bit(id, ud->bchan_map);
2001 return &ud->bchans[id];
2002}
2003
2004static int bcdma_get_bchan(struct udma_chan *uc)
2005{
2006 struct udma_dev *ud = uc->ud;
2007
2008 if (uc->bchan) {
2009 dev_err(ud->dev, "chan%d: already have bchan%d allocated\n",
2010 uc->id, uc->bchan->id);
2011 return 0;
2012 }
2013
2014 uc->bchan = __bcdma_reserve_bchan(ud, -1);
2015 if (IS_ERR(uc->bchan))
2016 return PTR_ERR(uc->bchan);
2017
2018 uc->tchan = uc->bchan;
2019
2020 return 0;
2021}
2022
2023static void bcdma_put_bchan(struct udma_chan *uc)
2024{
2025 struct udma_dev *ud = uc->ud;
2026
2027 if (uc->bchan) {
2028 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
2029 uc->bchan->id);
2030 __clear_bit(uc->bchan->id, ud->bchan_map);
2031 uc->bchan = NULL;
2032 uc->tchan = NULL;
2033 }
2034}
2035
2036static void bcdma_free_bchan_resources(struct udma_chan *uc)
2037{
2038 if (!uc->bchan)
2039 return;
2040
2041 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2042 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2043 uc->bchan->tc_ring = NULL;
2044 uc->bchan->t_ring = NULL;
2045
2046 bcdma_put_bchan(uc);
2047}
2048
2049static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
2050{
2051 struct k3_nav_ring_cfg ring_cfg;
2052 struct udma_dev *ud = uc->ud;
2053 int ret;
2054
2055 ret = bcdma_get_bchan(uc);
2056 if (ret)
2057 return ret;
2058
2059 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
2060 &uc->bchan->t_ring,
2061 &uc->bchan->tc_ring);
2062 if (ret) {
2063 ret = -EBUSY;
2064 goto err_ring;
2065 }
2066
2067 memset(&ring_cfg, 0, sizeof(ring_cfg));
2068 ring_cfg.size = 16;
2069 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
2070 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
2071
2072 ret = k3_nav_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
2073 if (ret)
2074 goto err_ringcfg;
2075
2076 return 0;
2077
2078err_ringcfg:
2079 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2080 uc->bchan->tc_ring = NULL;
2081 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2082 uc->bchan->t_ring = NULL;
2083err_ring:
2084 bcdma_put_bchan(uc);
2085
2086 return ret;
2087}
2088
2089static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2090{
2091 struct udma_dev *ud = uc->ud;
2092 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2093 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2094 struct udma_tchan *tchan = uc->tchan;
2095 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2096 int ret = 0;
2097
2098 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2099 req_tx.nav_id = tisci_rm->tisci_dev_id;
2100 req_tx.index = tchan->id;
2101 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2102 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
2103 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2104 /* wait for peer to complete the teardown for PDMAs */
2105 req_tx.valid_params |=
2106 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2107 req_tx.tx_tdtype = 1;
2108 }
2109
2110 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2111 if (ret)
2112 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2113
2114 return ret;
2115}
2116
2117#define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2118
2119static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2120{
2121 struct udma_dev *ud = uc->ud;
2122 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2123 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2124 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2125 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2126 int ret = 0;
2127
2128 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2129 req_rx.nav_id = tisci_rm->tisci_dev_id;
2130 req_rx.index = uc->rchan->id;
2131
2132 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2133 if (ret) {
2134 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2135 return ret;
2136 }
2137
2138 flow_req.valid_params =
2139 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2140 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2141 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2142
2143 flow_req.nav_id = tisci_rm->tisci_dev_id;
2144 flow_req.flow_index = uc->rflow->id;
2145
2146 if (uc->config.needs_epib)
2147 flow_req.rx_einfo_present = 1;
2148 else
2149 flow_req.rx_einfo_present = 0;
2150 if (uc->config.psd_size)
2151 flow_req.rx_psinfo_present = 1;
2152 else
2153 flow_req.rx_psinfo_present = 0;
Vignesh Raghavendra87fa0d62023-03-08 09:42:57 +05302154 flow_req.rx_error_handling = 0;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302155
2156 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2157
2158 if (ret)
2159 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2160 ret);
2161
2162 return ret;
2163}
2164
2165static int bcdma_alloc_chan_resources(struct udma_chan *uc)
2166{
2167 int ret;
2168
2169 uc->config.pkt_mode = false;
2170
2171 switch (uc->config.dir) {
2172 case DMA_MEM_TO_MEM:
2173 /* Non synchronized - mem to mem type of transfer */
2174 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2175 uc->id);
2176
2177 ret = bcdma_alloc_bchan_resources(uc);
2178 if (ret)
2179 return ret;
2180
2181 ret = bcdma_tisci_m2m_channel_config(uc);
2182 break;
2183 default:
2184 /* Can not happen */
2185 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2186 __func__, uc->id, uc->config.dir);
2187 return -EINVAL;
2188 }
2189
2190 /* check if the channel configuration was successful */
2191 if (ret)
2192 goto err_res_free;
2193
2194 if (udma_is_chan_running(uc)) {
2195 dev_warn(uc->ud->dev, "chan%d: is running!\n", uc->id);
2196 udma_stop(uc);
2197 if (udma_is_chan_running(uc)) {
2198 dev_err(uc->ud->dev, "chan%d: won't stop!\n", uc->id);
2199 goto err_res_free;
2200 }
2201 }
2202
2203 udma_reset_rings(uc);
2204
2205 return 0;
2206
2207err_res_free:
2208 bcdma_free_bchan_resources(uc);
2209 udma_free_tx_resources(uc);
2210 udma_free_rx_resources(uc);
2211
2212 udma_reset_uchan(uc);
2213
2214 return ret;
2215}
2216
2217static int pktdma_alloc_chan_resources(struct udma_chan *uc)
2218{
2219 struct udma_dev *ud = uc->ud;
2220 int ret;
2221
2222 switch (uc->config.dir) {
2223 case DMA_MEM_TO_DEV:
2224 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2225 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2226 uc->id);
2227
2228 ret = udma_alloc_tx_resources(uc);
2229 if (ret) {
2230 uc->config.remote_thread_id = -1;
2231 return ret;
2232 }
2233
2234 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2235 uc->config.dst_thread = uc->config.remote_thread_id;
2236 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2237
2238 ret = pktdma_tisci_tx_channel_config(uc);
2239 break;
2240 case DMA_DEV_TO_MEM:
2241 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2242 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2243 uc->id);
2244
2245 ret = udma_alloc_rx_resources(uc);
2246 if (ret) {
2247 uc->config.remote_thread_id = -1;
2248 return ret;
2249 }
2250
2251 uc->config.src_thread = uc->config.remote_thread_id;
2252 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2253 K3_PSIL_DST_THREAD_ID_OFFSET;
2254
2255 ret = pktdma_tisci_rx_channel_config(uc);
2256 break;
2257 default:
2258 /* Can not happen */
2259 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2260 __func__, uc->id, uc->config.dir);
2261 return -EINVAL;
2262 }
2263
2264 /* check if the channel configuration was successful */
2265 if (ret)
2266 goto err_res_free;
2267
2268 /* PSI-L pairing */
2269 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2270 if (ret) {
2271 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2272 uc->config.src_thread, uc->config.dst_thread);
2273 goto err_res_free;
2274 }
2275
2276 if (udma_is_chan_running(uc)) {
2277 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2278 udma_stop(uc);
2279 if (udma_is_chan_running(uc)) {
2280 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2281 goto err_res_free;
2282 }
2283 }
2284
2285 udma_reset_rings(uc);
2286
2287 if (uc->tchan)
2288 dev_dbg(ud->dev,
2289 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2290 uc->id, uc->tchan->id, uc->tchan->tflow_id,
2291 uc->config.remote_thread_id);
2292 else if (uc->rchan)
2293 dev_dbg(ud->dev,
2294 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2295 uc->id, uc->rchan->id, uc->rflow->id,
2296 uc->config.remote_thread_id);
2297 return 0;
2298
2299err_res_free:
2300 udma_free_tx_resources(uc);
2301 udma_free_rx_resources(uc);
2302
2303 udma_reset_uchan(uc);
2304
2305 return ret;
2306}
2307
Vignesh R3a9dbf32019-02-05 17:31:24 +05302308static int udma_transfer(struct udevice *dev, int direction,
Andrew Davisd2da2842022-10-07 12:11:13 -05002309 dma_addr_t dst, dma_addr_t src, size_t len)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302310{
2311 struct udma_dev *ud = dev_get_priv(dev);
2312 /* Channel0 is reserved for memcpy */
2313 struct udma_chan *uc = &ud->channels[0];
2314 dma_addr_t paddr = 0;
2315 int ret;
2316
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302317 switch (ud->match_data->type) {
2318 case DMA_TYPE_UDMA:
2319 ret = udma_alloc_chan_resources(uc);
2320 break;
2321 case DMA_TYPE_BCDMA:
2322 ret = bcdma_alloc_chan_resources(uc);
2323 break;
2324 default:
2325 return -EINVAL;
2326 };
Vignesh R3a9dbf32019-02-05 17:31:24 +05302327 if (ret)
2328 return ret;
2329
Andrew Davisd2da2842022-10-07 12:11:13 -05002330 udma_prep_dma_memcpy(uc, dst, src, len);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302331 udma_start(uc);
2332 udma_poll_completion(uc, &paddr);
2333 udma_stop(uc);
2334
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302335 switch (ud->match_data->type) {
2336 case DMA_TYPE_UDMA:
2337 udma_free_chan_resources(uc);
2338 break;
2339 case DMA_TYPE_BCDMA:
2340 bcdma_free_bchan_resources(uc);
2341 break;
2342 default:
2343 return -EINVAL;
2344 };
2345
Vignesh R3a9dbf32019-02-05 17:31:24 +05302346 return 0;
2347}
2348
2349static int udma_request(struct dma *dma)
2350{
2351 struct udma_dev *ud = dev_get_priv(dma->dev);
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302352 struct udma_chan_config *ucc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302353 struct udma_chan *uc;
2354 unsigned long dummy;
2355 int ret;
2356
2357 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2358 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2359 return -EINVAL;
2360 }
2361
2362 uc = &ud->channels[dma->id];
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302363 ucc = &uc->config;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302364 switch (ud->match_data->type) {
2365 case DMA_TYPE_UDMA:
2366 ret = udma_alloc_chan_resources(uc);
2367 break;
2368 case DMA_TYPE_BCDMA:
2369 ret = bcdma_alloc_chan_resources(uc);
2370 break;
2371 case DMA_TYPE_PKTDMA:
2372 ret = pktdma_alloc_chan_resources(uc);
2373 break;
2374 default:
2375 return -EINVAL;
2376 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05302377 if (ret) {
2378 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
2379 return -EINVAL;
2380 }
2381
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302382 if (uc->config.dir == DMA_MEM_TO_DEV) {
2383 uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
2384 memset(uc->desc_tx, 0, ucc->hdesc_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302385 } else {
2386 uc->desc_rx = dma_alloc_coherent(
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302387 ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
2388 memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302389 }
2390
2391 uc->in_use = true;
2392 uc->desc_rx_cur = 0;
2393 uc->num_rx_bufs = 0;
2394
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302395 if (uc->config.dir == DMA_DEV_TO_MEM) {
Vignesh Raghavendra39349892019-12-04 22:17:21 +05302396 uc->cfg_data.flow_id_base = uc->rflow->id;
2397 uc->cfg_data.flow_id_cnt = 1;
2398 }
2399
Vignesh R3a9dbf32019-02-05 17:31:24 +05302400 return 0;
2401}
2402
Simon Glass75c0ad62020-02-03 07:35:55 -07002403static int udma_rfree(struct dma *dma)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302404{
2405 struct udma_dev *ud = dev_get_priv(dma->dev);
2406 struct udma_chan *uc;
2407
2408 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2409 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2410 return -EINVAL;
2411 }
2412 uc = &ud->channels[dma->id];
2413
2414 if (udma_is_chan_running(uc))
2415 udma_stop(uc);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302416
2417 udma_navss_psil_unpair(ud, uc->config.src_thread,
2418 uc->config.dst_thread);
2419
2420 bcdma_free_bchan_resources(uc);
2421 udma_free_tx_resources(uc);
2422 udma_free_rx_resources(uc);
2423 udma_reset_uchan(uc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302424
2425 uc->in_use = false;
2426
2427 return 0;
2428}
2429
2430static int udma_enable(struct dma *dma)
2431{
2432 struct udma_dev *ud = dev_get_priv(dma->dev);
2433 struct udma_chan *uc;
2434 int ret;
2435
2436 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2437 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2438 return -EINVAL;
2439 }
2440 uc = &ud->channels[dma->id];
2441
2442 ret = udma_start(uc);
2443
2444 return ret;
2445}
2446
2447static int udma_disable(struct dma *dma)
2448{
2449 struct udma_dev *ud = dev_get_priv(dma->dev);
2450 struct udma_chan *uc;
2451 int ret = 0;
2452
2453 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2454 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2455 return -EINVAL;
2456 }
2457 uc = &ud->channels[dma->id];
2458
2459 if (udma_is_chan_running(uc))
2460 ret = udma_stop(uc);
2461 else
2462 dev_err(dma->dev, "%s not running\n", __func__);
2463
2464 return ret;
2465}
2466
2467static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
2468{
2469 struct udma_dev *ud = dev_get_priv(dma->dev);
2470 struct cppi5_host_desc_t *desc_tx;
2471 dma_addr_t dma_src = (dma_addr_t)src;
2472 struct ti_udma_drv_packet_data packet_data = { 0 };
2473 dma_addr_t paddr;
2474 struct udma_chan *uc;
2475 u32 tc_ring_id;
2476 int ret;
2477
Keerthya3c8bb12019-04-24 16:33:54 +05302478 if (metadata)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302479 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
2480
2481 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2482 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2483 return -EINVAL;
2484 }
2485 uc = &ud->channels[dma->id];
2486
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302487 if (uc->config.dir != DMA_MEM_TO_DEV)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302488 return -EINVAL;
2489
2490 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
2491
2492 desc_tx = uc->desc_tx;
2493
2494 cppi5_hdesc_reset_hbdesc(desc_tx);
2495
2496 cppi5_hdesc_init(desc_tx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302497 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2498 uc->config.psd_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302499 cppi5_hdesc_set_pktlen(desc_tx, len);
2500 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
2501 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
2502 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
2503 /* pass below information from caller */
2504 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
2505 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
2506
Vignesh Raghavendrace431412019-12-09 10:25:39 +05302507 flush_dcache_range((unsigned long)dma_src,
2508 ALIGN((unsigned long)dma_src + len,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302509 ARCH_DMA_MINALIGN));
Vignesh Raghavendrace431412019-12-09 10:25:39 +05302510 flush_dcache_range((unsigned long)desc_tx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302511 ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302512 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05302513
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05302514 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302515 if (ret) {
2516 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
2517 dma->id, ret);
2518 return ret;
2519 }
2520
2521 udma_poll_completion(uc, &paddr);
2522
2523 return 0;
2524}
2525
2526static int udma_receive(struct dma *dma, void **dst, void *metadata)
2527{
2528 struct udma_dev *ud = dev_get_priv(dma->dev);
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302529 struct udma_chan_config *ucc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302530 struct cppi5_host_desc_t *desc_rx;
2531 dma_addr_t buf_dma;
2532 struct udma_chan *uc;
2533 u32 buf_dma_len, pkt_len;
2534 u32 port_id = 0;
2535 int ret;
2536
2537 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2538 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2539 return -EINVAL;
2540 }
2541 uc = &ud->channels[dma->id];
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302542 ucc = &uc->config;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302543
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302544 if (uc->config.dir != DMA_DEV_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302545 return -EINVAL;
2546 if (!uc->num_rx_bufs)
2547 return -EINVAL;
2548
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +05302549 ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302550 if (ret && ret != -ENODATA) {
2551 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
2552 return ret;
2553 } else if (ret == -ENODATA) {
2554 return 0;
2555 }
2556
2557 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302558 invalidate_dcache_range((ulong)desc_rx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302559 (ulong)(desc_rx + ucc->hdesc_size));
Vignesh R3a9dbf32019-02-05 17:31:24 +05302560
2561 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
2562 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
2563
2564 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302565 invalidate_dcache_range((ulong)buf_dma,
2566 (ulong)(buf_dma + buf_dma_len));
Vignesh R3a9dbf32019-02-05 17:31:24 +05302567
2568 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
2569
2570 *dst = (void *)buf_dma;
2571 uc->num_rx_bufs--;
2572
2573 return pkt_len;
2574}
2575
2576static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
2577{
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302578 struct udma_chan_config *ucc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302579 struct udma_dev *ud = dev_get_priv(dma->dev);
2580 struct udma_chan *uc = &ud->channels[0];
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302581 struct psil_endpoint_config *ep_config;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302582 u32 val;
2583
2584 for (val = 0; val < ud->ch_count; val++) {
2585 uc = &ud->channels[val];
2586 if (!uc->in_use)
2587 break;
2588 }
2589
2590 if (val == ud->ch_count)
2591 return -EBUSY;
2592
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302593 ucc = &uc->config;
2594 ucc->remote_thread_id = args->args[0];
2595 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
2596 ucc->dir = DMA_MEM_TO_DEV;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302597 else
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302598 ucc->dir = DMA_DEV_TO_MEM;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302599
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302600 ep_config = psil_get_ep_config(ucc->remote_thread_id);
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302601 if (IS_ERR(ep_config)) {
2602 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302603 uc->config.remote_thread_id);
2604 ucc->dir = DMA_MEM_TO_MEM;
2605 ucc->remote_thread_id = -1;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302606 return false;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302607 }
2608
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302609 ucc->pkt_mode = ep_config->pkt_mode;
2610 ucc->channel_tpl = ep_config->channel_tpl;
2611 ucc->notdpkt = ep_config->notdpkt;
2612 ucc->ep_type = ep_config->ep_type;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302613
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302614 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
2615 ep_config->mapped_channel_id >= 0) {
2616 ucc->mapped_channel_id = ep_config->mapped_channel_id;
2617 ucc->default_flow_id = ep_config->default_flow_id;
2618 } else {
2619 ucc->mapped_channel_id = -1;
2620 ucc->default_flow_id = -1;
2621 }
2622
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302623 ucc->needs_epib = ep_config->needs_epib;
2624 ucc->psd_size = ep_config->psd_size;
2625 ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
2626
2627 ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
2628 ucc->psd_size, 0);
2629 ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302630
2631 dma->id = uc->id;
2632 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302633 dma->id, ucc->needs_epib,
2634 ucc->psd_size, ucc->metadata_size,
2635 ucc->remote_thread_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302636
2637 return 0;
2638}
2639
2640int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
2641{
2642 struct udma_dev *ud = dev_get_priv(dma->dev);
2643 struct cppi5_host_desc_t *desc_rx;
2644 dma_addr_t dma_dst;
2645 struct udma_chan *uc;
2646 u32 desc_num;
2647
2648 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2649 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2650 return -EINVAL;
2651 }
2652 uc = &ud->channels[dma->id];
2653
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302654 if (uc->config.dir != DMA_DEV_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302655 return -EINVAL;
2656
2657 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
2658 return -EINVAL;
2659
2660 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302661 desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302662 dma_dst = (dma_addr_t)dst;
2663
2664 cppi5_hdesc_reset_hbdesc(desc_rx);
2665
2666 cppi5_hdesc_init(desc_rx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302667 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2668 uc->config.psd_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302669 cppi5_hdesc_set_pktlen(desc_rx, size);
2670 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
2671
Vignesh Raghavendrace431412019-12-09 10:25:39 +05302672 flush_dcache_range((unsigned long)desc_rx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302673 ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302674 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05302675
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +05302676 udma_push_to_ring(uc->rflow->fd_ring, desc_rx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302677
2678 uc->num_rx_bufs++;
2679 uc->desc_rx_cur++;
2680
2681 return 0;
2682}
2683
Vignesh Raghavendra39349892019-12-04 22:17:21 +05302684static int udma_get_cfg(struct dma *dma, u32 id, void **data)
2685{
2686 struct udma_dev *ud = dev_get_priv(dma->dev);
2687 struct udma_chan *uc;
2688
2689 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2690 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2691 return -EINVAL;
2692 }
2693
2694 switch (id) {
2695 case TI_UDMA_CHAN_PRIV_INFO:
2696 uc = &ud->channels[dma->id];
2697 *data = &uc->cfg_data;
2698 return 0;
2699 }
2700
2701 return -EINVAL;
2702}
2703
Vignesh R3a9dbf32019-02-05 17:31:24 +05302704static const struct dma_ops udma_ops = {
2705 .transfer = udma_transfer,
2706 .of_xlate = udma_of_xlate,
2707 .request = udma_request,
Simon Glass75c0ad62020-02-03 07:35:55 -07002708 .rfree = udma_rfree,
Vignesh R3a9dbf32019-02-05 17:31:24 +05302709 .enable = udma_enable,
2710 .disable = udma_disable,
2711 .send = udma_send,
2712 .receive = udma_receive,
2713 .prepare_rcv_buf = udma_prepare_rcv_buf,
Vignesh Raghavendra39349892019-12-04 22:17:21 +05302714 .get_cfg = udma_get_cfg,
Vignesh R3a9dbf32019-02-05 17:31:24 +05302715};
2716
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302717static struct udma_match_data am654_main_data = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302718 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302719 .psil_base = 0x1000,
2720 .enable_memcpy_support = true,
2721 .statictr_z_mask = GENMASK(11, 0),
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302722 .oes = {
2723 .udma_rchan = 0x200,
2724 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302725 .tpl_levels = 2,
2726 .level_start_idx = {
2727 [0] = 8, /* Normal channels */
2728 [1] = 0, /* High Throughput channels */
2729 },
2730};
2731
2732static struct udma_match_data am654_mcu_data = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302733 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302734 .psil_base = 0x6000,
2735 .enable_memcpy_support = true,
2736 .statictr_z_mask = GENMASK(11, 0),
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302737 .oes = {
2738 .udma_rchan = 0x200,
2739 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302740 .tpl_levels = 2,
2741 .level_start_idx = {
2742 [0] = 2, /* Normal channels */
2743 [1] = 0, /* High Throughput channels */
2744 },
2745};
2746
2747static struct udma_match_data j721e_main_data = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302748 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302749 .psil_base = 0x1000,
2750 .enable_memcpy_support = true,
2751 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2752 .statictr_z_mask = GENMASK(23, 0),
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302753 .oes = {
2754 .udma_rchan = 0x400,
2755 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302756 .tpl_levels = 3,
2757 .level_start_idx = {
2758 [0] = 16, /* Normal channels */
2759 [1] = 4, /* High Throughput channels */
2760 [2] = 0, /* Ultra High Throughput channels */
2761 },
2762};
2763
2764static struct udma_match_data j721e_mcu_data = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302765 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302766 .psil_base = 0x6000,
2767 .enable_memcpy_support = true,
2768 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2769 .statictr_z_mask = GENMASK(23, 0),
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302770 .oes = {
2771 .udma_rchan = 0x400,
2772 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302773 .tpl_levels = 2,
2774 .level_start_idx = {
2775 [0] = 2, /* Normal channels */
2776 [1] = 0, /* High Throughput channels */
2777 },
2778};
2779
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302780static struct udma_match_data am64_bcdma_data = {
2781 .type = DMA_TYPE_BCDMA,
2782 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
2783 .enable_memcpy_support = true, /* Supported via bchan */
2784 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2785 .statictr_z_mask = GENMASK(23, 0),
2786 .oes = {
2787 .bcdma_bchan_data = 0x2200,
2788 .bcdma_bchan_ring = 0x2400,
2789 .bcdma_tchan_data = 0x2800,
2790 .bcdma_tchan_ring = 0x2a00,
2791 .bcdma_rchan_data = 0x2e00,
2792 .bcdma_rchan_ring = 0x3000,
2793 },
2794 /* No throughput levels */
2795};
2796
2797static struct udma_match_data am64_pktdma_data = {
2798 .type = DMA_TYPE_PKTDMA,
2799 .psil_base = 0x1000,
2800 .enable_memcpy_support = false,
2801 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2802 .statictr_z_mask = GENMASK(23, 0),
2803 .oes = {
2804 .pktdma_tchan_flow = 0x1200,
2805 .pktdma_rchan_flow = 0x1600,
2806 },
2807 /* No throughput levels */
2808};
2809
Vignesh R3a9dbf32019-02-05 17:31:24 +05302810static const struct udevice_id udma_ids[] = {
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302811 {
2812 .compatible = "ti,am654-navss-main-udmap",
2813 .data = (ulong)&am654_main_data,
2814 },
2815 {
2816 .compatible = "ti,am654-navss-mcu-udmap",
2817 .data = (ulong)&am654_mcu_data,
2818 }, {
2819 .compatible = "ti,j721e-navss-main-udmap",
2820 .data = (ulong)&j721e_main_data,
2821 }, {
2822 .compatible = "ti,j721e-navss-mcu-udmap",
2823 .data = (ulong)&j721e_mcu_data,
2824 },
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302825 {
2826 .compatible = "ti,am64-dmss-bcdma",
2827 .data = (ulong)&am64_bcdma_data,
2828 },
2829 {
2830 .compatible = "ti,am64-dmss-pktdma",
2831 .data = (ulong)&am64_pktdma_data,
2832 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302833 { /* Sentinel */ },
Vignesh R3a9dbf32019-02-05 17:31:24 +05302834};
2835
2836U_BOOT_DRIVER(ti_edma3) = {
2837 .name = "ti-udma",
2838 .id = UCLASS_DMA,
2839 .of_match = udma_ids,
2840 .ops = &udma_ops,
2841 .probe = udma_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -07002842 .priv_auto = sizeof(struct udma_dev),
Vignesh R3a9dbf32019-02-05 17:31:24 +05302843};