blob: 723265ab2e5665abe3c75d8127dbb978afa73650 [file] [log] [blame]
Vignesh R3a9dbf32019-02-05 17:31:24 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
Nishanth Menoneaa39c62023-11-01 15:56:03 -05003 * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com
Vignesh R3a9dbf32019-02-05 17:31:24 +05304 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#define pr_fmt(fmt) "udma: " fmt
7
Simon Glass63334482019-11-14 12:57:39 -07008#include <cpu_func.h>
Simon Glass0f2af882020-05-10 11:40:05 -06009#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060010#include <asm/cache.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053011#include <asm/io.h>
12#include <asm/bitops.h>
13#include <malloc.h>
Matthias Schifferfe2ee882024-04-26 10:02:27 +020014#include <net.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060015#include <linux/bitops.h>
Masahiro Yamada6373a172020-02-14 16:40:19 +090016#include <linux/dma-mapping.h>
Dhruva Golee6b42392022-09-20 10:56:02 +053017#include <linux/sizes.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053018#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070019#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070020#include <dm/devres.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053021#include <dm/read.h>
22#include <dm/of_access.h>
23#include <dma.h>
24#include <dma-uclass.h>
25#include <linux/delay.h>
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053026#include <linux/bitmap.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070027#include <linux/err.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060028#include <linux/printk.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053029#include <linux/soc/ti/k3-navss-ringacc.h>
30#include <linux/soc/ti/cppi5.h>
31#include <linux/soc/ti/ti-udma.h>
32#include <linux/soc/ti/ti_sci_protocol.h>
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053033#include <linux/soc/ti/cppi5.h>
Vignesh R3a9dbf32019-02-05 17:31:24 +053034
35#include "k3-udma-hwdef.h"
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +053036#include "k3-psil-priv.h"
Vignesh R3a9dbf32019-02-05 17:31:24 +053037
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053038#define K3_UDMA_MAX_RFLOWS 1024
Prasanth Babu Mantena246231f2025-02-20 18:48:27 +053039#define K3_UDMA_MAX_TR 2
Vignesh Raghavendrac4106862019-12-09 10:25:32 +053040
Vignesh R3a9dbf32019-02-05 17:31:24 +053041struct udma_chan;
42
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053043enum k3_dma_type {
44 DMA_TYPE_UDMA = 0,
45 DMA_TYPE_BCDMA,
46 DMA_TYPE_PKTDMA,
47};
48
Vignesh R3a9dbf32019-02-05 17:31:24 +053049enum udma_mmr {
50 MMR_GCFG = 0,
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053051 MMR_BCHANRT,
Vignesh R3a9dbf32019-02-05 17:31:24 +053052 MMR_RCHANRT,
53 MMR_TCHANRT,
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053054 MMR_RCHAN,
55 MMR_TCHAN,
56 MMR_RFLOW,
Vignesh R3a9dbf32019-02-05 17:31:24 +053057 MMR_LAST,
58};
59
60static const char * const mmr_names[] = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053061 [MMR_GCFG] = "gcfg",
62 [MMR_BCHANRT] = "bchanrt",
63 [MMR_RCHANRT] = "rchanrt",
64 [MMR_TCHANRT] = "tchanrt",
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053065 [MMR_RCHAN] = "rchan",
66 [MMR_TCHAN] = "tchan",
67 [MMR_RFLOW] = "rflow",
Vignesh R3a9dbf32019-02-05 17:31:24 +053068};
69
70struct udma_tchan {
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053071 void __iomem *reg_chan;
Vignesh R3a9dbf32019-02-05 17:31:24 +053072 void __iomem *reg_rt;
73
74 int id;
75 struct k3_nav_ring *t_ring; /* Transmit ring */
76 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053077 int tflow_id; /* applicable only for PKTDMA */
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053078};
79
80#define udma_bchan udma_tchan
81
82struct udma_rflow {
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053083 void __iomem *reg_rflow;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053084 int id;
85 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
86 struct k3_nav_ring *r_ring; /* Receive ring */
Vignesh R3a9dbf32019-02-05 17:31:24 +053087};
88
89struct udma_rchan {
Vignesh Raghavendra27e72502021-06-07 19:47:53 +053090 void __iomem *reg_chan;
Vignesh R3a9dbf32019-02-05 17:31:24 +053091 void __iomem *reg_rt;
92
93 int id;
Vignesh R3a9dbf32019-02-05 17:31:24 +053094};
95
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +053096struct udma_oes_offsets {
97 /* K3 UDMA Output Event Offset */
98 u32 udma_rchan;
99
100 /* BCDMA Output Event Offsets */
101 u32 bcdma_bchan_data;
102 u32 bcdma_bchan_ring;
103 u32 bcdma_tchan_data;
104 u32 bcdma_tchan_ring;
105 u32 bcdma_rchan_data;
106 u32 bcdma_rchan_ring;
107
108 /* PKTDMA Output Event Offsets */
109 u32 pktdma_tchan_flow;
110 u32 pktdma_rchan_flow;
111};
112
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530113#define UDMA_FLAG_PDMA_ACC32 BIT(0)
114#define UDMA_FLAG_PDMA_BURST BIT(1)
115#define UDMA_FLAG_TDTYPE BIT(2)
116
117struct udma_match_data {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530118 enum k3_dma_type type;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530119 u32 psil_base;
120 bool enable_memcpy_support;
121 u32 flags;
122 u32 statictr_z_mask;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530123 struct udma_oes_offsets oes;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530124
125 u8 tpl_levels;
126 u32 level_start_idx[];
127};
128
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530129enum udma_rm_range {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530130 RM_RANGE_BCHAN = 0,
131 RM_RANGE_TCHAN,
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530132 RM_RANGE_RCHAN,
133 RM_RANGE_RFLOW,
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530134 RM_RANGE_TFLOW,
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530135 RM_RANGE_LAST,
136};
137
138struct udma_tisci_rm {
139 const struct ti_sci_handle *tisci;
140 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
141 u32 tisci_dev_id;
142
143 /* tisci information for PSI-L thread pairing/unpairing */
144 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
145 u32 tisci_navss_dev_id;
146
147 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
148};
149
Vignesh R3a9dbf32019-02-05 17:31:24 +0530150struct udma_dev {
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530151 struct udevice *dev;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530152 void __iomem *mmrs[MMR_LAST];
153
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530154 struct udma_tisci_rm tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530155 struct k3_nav_ringacc *ringacc;
156
157 u32 features;
158
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530159 int bchan_cnt;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530160 int tchan_cnt;
161 int echan_cnt;
162 int rchan_cnt;
163 int rflow_cnt;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530164 int tflow_cnt;
165 unsigned long *bchan_map;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530166 unsigned long *tchan_map;
167 unsigned long *rchan_map;
168 unsigned long *rflow_map;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530169 unsigned long *rflow_map_reserved;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530170 unsigned long *tflow_map;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530171
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530172 struct udma_bchan *bchans;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530173 struct udma_tchan *tchans;
174 struct udma_rchan *rchans;
175 struct udma_rflow *rflows;
176
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530177 struct udma_match_data *match_data;
Prasanth Babu Mantena246231f2025-02-20 18:48:27 +0530178 void *bc_desc;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +0530179
Vignesh R3a9dbf32019-02-05 17:31:24 +0530180 struct udma_chan *channels;
181 u32 psil_base;
182
183 u32 ch_count;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530184};
185
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530186struct udma_chan_config {
187 u32 psd_size; /* size of Protocol Specific Data */
188 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
189 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
190 int remote_thread_id;
191 u32 atype;
192 u32 src_thread;
193 u32 dst_thread;
194 enum psil_endpoint_type ep_type;
195 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
196
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530197 /* PKTDMA mapped channel */
198 int mapped_channel_id;
199 /* PKTDMA default tflow or rflow for mapped channel */
200 int default_flow_id;
201
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530202 enum dma_direction dir;
203
204 unsigned int pkt_mode:1; /* TR or packet */
205 unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
206 unsigned int enable_acc32:1;
207 unsigned int enable_burst:1;
208 unsigned int notdpkt:1; /* Suppress sending TDC packet */
209};
210
Vignesh R3a9dbf32019-02-05 17:31:24 +0530211struct udma_chan {
212 struct udma_dev *ud;
213 char name[20];
214
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530215 struct udma_bchan *bchan;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530216 struct udma_tchan *tchan;
217 struct udma_rchan *rchan;
218 struct udma_rflow *rflow;
219
Vignesh Raghavendra39349892019-12-04 22:17:21 +0530220 struct ti_udma_drv_chan_cfg_data cfg_data;
221
Vignesh R3a9dbf32019-02-05 17:31:24 +0530222 u32 bcnt; /* number of bytes completed since the start of the channel */
223
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530224 struct udma_chan_config config;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530225
226 u32 id;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530227
228 struct cppi5_host_desc_t *desc_tx;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530229 bool in_use;
230 void *desc_rx;
231 u32 num_rx_bufs;
232 u32 desc_rx_cur;
233
234};
235
236#define UDMA_CH_1000(ch) (ch * 0x1000)
237#define UDMA_CH_100(ch) (ch * 0x100)
238#define UDMA_CH_40(ch) (ch * 0x40)
239
240#ifdef PKTBUFSRX
241#define UDMA_RX_DESC_NUM PKTBUFSRX
242#else
243#define UDMA_RX_DESC_NUM 4
244#endif
245
246/* Generic register access functions */
247static inline u32 udma_read(void __iomem *base, int reg)
248{
249 u32 v;
250
251 v = __raw_readl(base + reg);
252 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
253 return v;
254}
255
256static inline void udma_write(void __iomem *base, int reg, u32 val)
257{
258 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
259 __raw_writel(val, base + reg);
260}
261
262static inline void udma_update_bits(void __iomem *base, int reg,
263 u32 mask, u32 val)
264{
265 u32 tmp, orig;
266
267 orig = udma_read(base, reg);
268 tmp = orig & ~mask;
269 tmp |= (val & mask);
270
271 if (tmp != orig)
272 udma_write(base, reg, tmp);
273}
274
275/* TCHANRT */
276static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
277{
278 if (!tchan)
279 return 0;
280 return udma_read(tchan->reg_rt, reg);
281}
282
283static inline void udma_tchanrt_write(struct udma_tchan *tchan,
284 int reg, u32 val)
285{
286 if (!tchan)
287 return;
288 udma_write(tchan->reg_rt, reg, val);
289}
290
291/* RCHANRT */
292static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
293{
294 if (!rchan)
295 return 0;
296 return udma_read(rchan->reg_rt, reg);
297}
298
299static inline void udma_rchanrt_write(struct udma_rchan *rchan,
300 int reg, u32 val)
301{
302 if (!rchan)
303 return;
304 udma_write(rchan->reg_rt, reg, val);
305}
306
307static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
308 u32 dst_thread)
309{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530310 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
311
Vignesh R3a9dbf32019-02-05 17:31:24 +0530312 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530313
314 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
315 tisci_rm->tisci_navss_dev_id,
316 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530317}
318
319static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
320 u32 dst_thread)
321{
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530322 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
323
Vignesh R3a9dbf32019-02-05 17:31:24 +0530324 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530325
326 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
327 tisci_rm->tisci_navss_dev_id,
328 src_thread, dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530329}
330
331static inline char *udma_get_dir_text(enum dma_direction dir)
332{
333 switch (dir) {
334 case DMA_DEV_TO_MEM:
335 return "DEV_TO_MEM";
336 case DMA_MEM_TO_DEV:
337 return "MEM_TO_DEV";
338 case DMA_MEM_TO_MEM:
339 return "MEM_TO_MEM";
340 case DMA_DEV_TO_DEV:
341 return "DEV_TO_DEV";
342 default:
343 break;
344 }
345
346 return "invalid";
347}
348
Vignesh Raghavendra27e72502021-06-07 19:47:53 +0530349#include "k3-udma-u-boot.c"
350
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530351static void udma_reset_uchan(struct udma_chan *uc)
352{
353 memset(&uc->config, 0, sizeof(uc->config));
354 uc->config.remote_thread_id = -1;
355 uc->config.mapped_channel_id = -1;
356 uc->config.default_flow_id = -1;
357}
358
Vignesh R3a9dbf32019-02-05 17:31:24 +0530359static inline bool udma_is_chan_running(struct udma_chan *uc)
360{
361 u32 trt_ctl = 0;
362 u32 rrt_ctl = 0;
363
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530364 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530365 case DMA_DEV_TO_MEM:
366 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
367 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
368 __func__, rrt_ctl,
369 udma_rchanrt_read(uc->rchan,
370 UDMA_RCHAN_RT_PEER_RT_EN_REG));
371 break;
372 case DMA_MEM_TO_DEV:
373 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
374 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
375 __func__, trt_ctl,
376 udma_tchanrt_read(uc->tchan,
377 UDMA_TCHAN_RT_PEER_RT_EN_REG));
378 break;
379 case DMA_MEM_TO_MEM:
380 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
381 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
382 break;
383 default:
384 break;
385 }
386
387 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
388 return true;
389
390 return false;
391}
392
Vignesh R3a9dbf32019-02-05 17:31:24 +0530393static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
394{
395 struct k3_nav_ring *ring = NULL;
396 int ret = -ENOENT;
397
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530398 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530399 case DMA_DEV_TO_MEM:
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +0530400 ring = uc->rflow->r_ring;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530401 break;
402 case DMA_MEM_TO_DEV:
403 ring = uc->tchan->tc_ring;
404 break;
405 case DMA_MEM_TO_MEM:
406 ring = uc->tchan->tc_ring;
407 break;
408 default:
409 break;
410 }
411
412 if (ring && k3_nav_ringacc_ring_get_occ(ring))
413 ret = k3_nav_ringacc_ring_pop(ring, addr);
414
415 return ret;
416}
417
418static void udma_reset_rings(struct udma_chan *uc)
419{
420 struct k3_nav_ring *ring1 = NULL;
421 struct k3_nav_ring *ring2 = NULL;
422
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530423 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530424 case DMA_DEV_TO_MEM:
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +0530425 ring1 = uc->rflow->fd_ring;
426 ring2 = uc->rflow->r_ring;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530427 break;
428 case DMA_MEM_TO_DEV:
429 ring1 = uc->tchan->t_ring;
430 ring2 = uc->tchan->tc_ring;
431 break;
432 case DMA_MEM_TO_MEM:
433 ring1 = uc->tchan->t_ring;
434 ring2 = uc->tchan->tc_ring;
435 break;
436 default:
437 break;
438 }
439
440 if (ring1)
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530441 k3_nav_ringacc_ring_reset_dma(ring1, k3_nav_ringacc_ring_get_occ(ring1));
Vignesh R3a9dbf32019-02-05 17:31:24 +0530442 if (ring2)
443 k3_nav_ringacc_ring_reset(ring2);
444}
445
446static void udma_reset_counters(struct udma_chan *uc)
447{
448 u32 val;
449
450 if (uc->tchan) {
451 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
452 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
453
454 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
455 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
456
457 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
458 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
459
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530460 if (!uc->bchan) {
461 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
462 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
463 }
Vignesh R3a9dbf32019-02-05 17:31:24 +0530464 }
465
466 if (uc->rchan) {
467 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
468 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
469
470 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
471 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
472
473 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
474 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
475
476 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
477 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
478 }
479
480 uc->bcnt = 0;
481}
482
483static inline int udma_stop_hard(struct udma_chan *uc)
484{
485 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
486
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530487 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530488 case DMA_DEV_TO_MEM:
489 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
490 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
491 break;
492 case DMA_MEM_TO_DEV:
493 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
494 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
495 break;
496 case DMA_MEM_TO_MEM:
497 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
498 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
499 break;
500 default:
501 return -EINVAL;
502 }
503
504 return 0;
505}
506
507static int udma_start(struct udma_chan *uc)
508{
509 /* Channel is already running, no need to proceed further */
510 if (udma_is_chan_running(uc))
511 goto out;
512
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530513 pr_debug("%s: chan:%d dir:%s\n",
514 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh R3a9dbf32019-02-05 17:31:24 +0530515
516 /* Make sure that we clear the teardown bit, if it is set */
517 udma_stop_hard(uc);
518
519 /* Reset all counters */
520 udma_reset_counters(uc);
521
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530522 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530523 case DMA_DEV_TO_MEM:
524 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
525 UDMA_CHAN_RT_CTL_EN);
526
527 /* Enable remote */
528 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
529 UDMA_PEER_RT_EN_ENABLE);
530
531 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
532 __func__,
533 udma_rchanrt_read(uc->rchan,
534 UDMA_RCHAN_RT_CTL_REG),
535 udma_rchanrt_read(uc->rchan,
536 UDMA_RCHAN_RT_PEER_RT_EN_REG));
537 break;
538 case DMA_MEM_TO_DEV:
539 /* Enable remote */
540 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
541 UDMA_PEER_RT_EN_ENABLE);
542
543 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
544 UDMA_CHAN_RT_CTL_EN);
545
546 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
547 __func__,
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530548 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530549 UDMA_TCHAN_RT_CTL_REG),
Vignesh Raghavendrac2237992019-12-09 10:25:36 +0530550 udma_tchanrt_read(uc->tchan,
Vignesh R3a9dbf32019-02-05 17:31:24 +0530551 UDMA_TCHAN_RT_PEER_RT_EN_REG));
552 break;
553 case DMA_MEM_TO_MEM:
554 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
555 UDMA_CHAN_RT_CTL_EN);
556 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
557 UDMA_CHAN_RT_CTL_EN);
558
559 break;
560 default:
561 return -EINVAL;
562 }
563
564 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
565out:
566 return 0;
567}
568
569static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
570{
571 int i = 0;
572 u32 val;
573
574 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
575 UDMA_CHAN_RT_CTL_EN |
576 UDMA_CHAN_RT_CTL_TDOWN);
577
578 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
579
580 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
581 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
582 udelay(1);
583 if (i > 1000) {
584 printf(" %s TIMEOUT !\n", __func__);
585 break;
586 }
587 i++;
588 }
589
590 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
591 if (val & UDMA_PEER_RT_EN_ENABLE)
592 printf("%s: peer not stopped TIMEOUT !\n", __func__);
593}
594
595static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
596{
597 int i = 0;
598 u32 val;
599
600 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
601 UDMA_PEER_RT_EN_ENABLE |
602 UDMA_PEER_RT_EN_TEARDOWN);
603
604 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
605
606 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
607 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
608 udelay(1);
609 if (i > 1000) {
610 printf("%s TIMEOUT !\n", __func__);
611 break;
612 }
613 i++;
614 }
615
616 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
617 if (val & UDMA_PEER_RT_EN_ENABLE)
618 printf("%s: peer not stopped TIMEOUT !\n", __func__);
619}
620
621static inline int udma_stop(struct udma_chan *uc)
622{
623 pr_debug("%s: chan:%d dir:%s\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530624 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh R3a9dbf32019-02-05 17:31:24 +0530625
626 udma_reset_counters(uc);
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530627 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530628 case DMA_DEV_TO_MEM:
629 udma_stop_dev2mem(uc, true);
630 break;
631 case DMA_MEM_TO_DEV:
632 udma_stop_mem2dev(uc, true);
633 break;
634 case DMA_MEM_TO_MEM:
635 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
636 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
637 break;
638 default:
639 return -EINVAL;
640 }
641
642 return 0;
643}
644
645static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
646{
647 int i = 1;
648
649 while (udma_pop_from_ring(uc, paddr)) {
650 udelay(1);
651 if (!(i % 1000000))
652 printf(".");
653 i++;
654 }
655}
656
Vignesh Raghavendrac4106862019-12-09 10:25:32 +0530657static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
658{
659 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
660
661 if (id >= 0) {
662 if (test_bit(id, ud->rflow_map)) {
663 dev_err(ud->dev, "rflow%d is in use\n", id);
664 return ERR_PTR(-ENOENT);
665 }
666 } else {
667 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
668 ud->rflow_cnt);
669
670 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
671 if (id >= ud->rflow_cnt)
672 return ERR_PTR(-ENOENT);
673 }
674
675 __set_bit(id, ud->rflow_map);
676 return &ud->rflows[id];
677}
678
Vignesh R3a9dbf32019-02-05 17:31:24 +0530679#define UDMA_RESERVE_RESOURCE(res) \
680static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
681 int id) \
682{ \
683 if (id >= 0) { \
684 if (test_bit(id, ud->res##_map)) { \
685 dev_err(ud->dev, "res##%d is in use\n", id); \
686 return ERR_PTR(-ENOENT); \
687 } \
688 } else { \
689 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
690 if (id == ud->res##_cnt) { \
691 return ERR_PTR(-ENOENT); \
692 } \
693 } \
694 \
695 __set_bit(id, ud->res##_map); \
696 return &ud->res##s[id]; \
697}
698
699UDMA_RESERVE_RESOURCE(tchan);
700UDMA_RESERVE_RESOURCE(rchan);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530701
702static int udma_get_tchan(struct udma_chan *uc)
703{
704 struct udma_dev *ud = uc->ud;
705
706 if (uc->tchan) {
707 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
708 uc->id, uc->tchan->id);
709 return 0;
710 }
711
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530712 uc->tchan = __udma_reserve_tchan(ud, uc->config.mapped_channel_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530713 if (IS_ERR(uc->tchan))
714 return PTR_ERR(uc->tchan);
715
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530716 if (ud->tflow_cnt) {
717 int tflow_id;
718
719 /* Only PKTDMA have support for tx flows */
720 if (uc->config.default_flow_id >= 0)
721 tflow_id = uc->config.default_flow_id;
722 else
723 tflow_id = uc->tchan->id;
724
725 if (test_bit(tflow_id, ud->tflow_map)) {
726 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
727 __clear_bit(uc->tchan->id, ud->tchan_map);
728 uc->tchan = NULL;
729 return -ENOENT;
730 }
731
732 uc->tchan->tflow_id = tflow_id;
733 __set_bit(tflow_id, ud->tflow_map);
734 } else {
735 uc->tchan->tflow_id = -1;
736 }
737
Vignesh R3a9dbf32019-02-05 17:31:24 +0530738 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
739
Vignesh R3a9dbf32019-02-05 17:31:24 +0530740 return 0;
741}
742
743static int udma_get_rchan(struct udma_chan *uc)
744{
745 struct udma_dev *ud = uc->ud;
746
747 if (uc->rchan) {
748 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
749 uc->id, uc->rchan->id);
750 return 0;
751 }
752
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530753 uc->rchan = __udma_reserve_rchan(ud, uc->config.mapped_channel_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530754 if (IS_ERR(uc->rchan))
755 return PTR_ERR(uc->rchan);
756
757 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
758
Vignesh R3a9dbf32019-02-05 17:31:24 +0530759 return 0;
760}
761
762static int udma_get_chan_pair(struct udma_chan *uc)
763{
764 struct udma_dev *ud = uc->ud;
765 int chan_id, end;
766
767 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
768 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
769 uc->id, uc->tchan->id);
770 return 0;
771 }
772
773 if (uc->tchan) {
774 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
775 uc->id, uc->tchan->id);
776 return -EBUSY;
777 } else if (uc->rchan) {
778 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
779 uc->id, uc->rchan->id);
780 return -EBUSY;
781 }
782
783 /* Can be optimized, but let's have it like this for now */
784 end = min(ud->tchan_cnt, ud->rchan_cnt);
785 for (chan_id = 0; chan_id < end; chan_id++) {
786 if (!test_bit(chan_id, ud->tchan_map) &&
787 !test_bit(chan_id, ud->rchan_map))
788 break;
789 }
790
791 if (chan_id == end)
792 return -ENOENT;
793
794 __set_bit(chan_id, ud->tchan_map);
795 __set_bit(chan_id, ud->rchan_map);
796 uc->tchan = &ud->tchans[chan_id];
797 uc->rchan = &ud->rchans[chan_id];
798
799 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
800
Vignesh R3a9dbf32019-02-05 17:31:24 +0530801 return 0;
802}
803
804static int udma_get_rflow(struct udma_chan *uc, int flow_id)
805{
806 struct udma_dev *ud = uc->ud;
807
808 if (uc->rflow) {
809 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
810 uc->id, uc->rflow->id);
811 return 0;
812 }
813
814 if (!uc->rchan)
815 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
816
817 uc->rflow = __udma_reserve_rflow(ud, flow_id);
818 if (IS_ERR(uc->rflow))
819 return PTR_ERR(uc->rflow);
820
821 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
822 return 0;
823}
824
825static void udma_put_rchan(struct udma_chan *uc)
826{
827 struct udma_dev *ud = uc->ud;
828
829 if (uc->rchan) {
830 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
831 uc->rchan->id);
832 __clear_bit(uc->rchan->id, ud->rchan_map);
833 uc->rchan = NULL;
834 }
835}
836
837static void udma_put_tchan(struct udma_chan *uc)
838{
839 struct udma_dev *ud = uc->ud;
840
841 if (uc->tchan) {
842 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
843 uc->tchan->id);
844 __clear_bit(uc->tchan->id, ud->tchan_map);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530845 if (uc->tchan->tflow_id >= 0)
846 __clear_bit(uc->tchan->tflow_id, ud->tflow_map);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530847 uc->tchan = NULL;
848 }
849}
850
851static void udma_put_rflow(struct udma_chan *uc)
852{
853 struct udma_dev *ud = uc->ud;
854
855 if (uc->rflow) {
856 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
857 uc->rflow->id);
858 __clear_bit(uc->rflow->id, ud->rflow_map);
859 uc->rflow = NULL;
860 }
861}
862
863static void udma_free_tx_resources(struct udma_chan *uc)
864{
865 if (!uc->tchan)
866 return;
867
868 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
869 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
870 uc->tchan->t_ring = NULL;
871 uc->tchan->tc_ring = NULL;
872
873 udma_put_tchan(uc);
874}
875
876static int udma_alloc_tx_resources(struct udma_chan *uc)
877{
878 struct k3_nav_ring_cfg ring_cfg;
879 struct udma_dev *ud = uc->ud;
MD Danish Anwar25abdb32024-01-30 11:48:04 +0530880 struct udma_tchan *tchan;
881 int ring_idx, ret;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530882
883 ret = udma_get_tchan(uc);
884 if (ret)
885 return ret;
886
MD Danish Anwar25abdb32024-01-30 11:48:04 +0530887 tchan = uc->tchan;
Udit Kumarf084e402024-02-21 19:53:44 +0530888 if (tchan->tflow_id > 0)
MD Danish Anwar25abdb32024-01-30 11:48:04 +0530889 ring_idx = tchan->tflow_id;
890 else
Udit Kumarf084e402024-02-21 19:53:44 +0530891 ring_idx = tchan->id;
MD Danish Anwar25abdb32024-01-30 11:48:04 +0530892
893 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530894 &uc->tchan->t_ring,
895 &uc->tchan->tc_ring);
896 if (ret) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530897 ret = -EBUSY;
898 goto err_tx_ring;
899 }
900
Vignesh R3a9dbf32019-02-05 17:31:24 +0530901 memset(&ring_cfg, 0, sizeof(ring_cfg));
902 ring_cfg.size = 16;
903 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530904 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530905
906 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
907 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
908
909 if (ret)
910 goto err_ringcfg;
911
912 return 0;
913
914err_ringcfg:
915 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
916 uc->tchan->tc_ring = NULL;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530917 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
918 uc->tchan->t_ring = NULL;
919err_tx_ring:
920 udma_put_tchan(uc);
921
922 return ret;
923}
924
925static void udma_free_rx_resources(struct udma_chan *uc)
926{
927 if (!uc->rchan)
928 return;
929
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +0530930 if (uc->rflow) {
931 k3_nav_ringacc_ring_free(uc->rflow->fd_ring);
932 k3_nav_ringacc_ring_free(uc->rflow->r_ring);
933 uc->rflow->fd_ring = NULL;
934 uc->rflow->r_ring = NULL;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530935
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +0530936 udma_put_rflow(uc);
937 }
938
Vignesh R3a9dbf32019-02-05 17:31:24 +0530939 udma_put_rchan(uc);
940}
941
942static int udma_alloc_rx_resources(struct udma_chan *uc)
943{
944 struct k3_nav_ring_cfg ring_cfg;
945 struct udma_dev *ud = uc->ud;
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530946 struct udma_rflow *rflow;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530947 int fd_ring_id;
948 int ret;
949
950 ret = udma_get_rchan(uc);
951 if (ret)
952 return ret;
953
954 /* For MEM_TO_MEM we don't need rflow or rings */
Vignesh Raghavendra07826212020-07-06 13:26:25 +0530955 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +0530956 return 0;
957
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530958 if (uc->config.default_flow_id >= 0)
959 ret = udma_get_rflow(uc, uc->config.default_flow_id);
960 else
961 ret = udma_get_rflow(uc, uc->rchan->id);
962
Vignesh R3a9dbf32019-02-05 17:31:24 +0530963 if (ret) {
964 ret = -EBUSY;
965 goto err_rflow;
966 }
967
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530968 rflow = uc->rflow;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +0530969 if (ud->tflow_cnt) {
970 fd_ring_id = ud->tflow_cnt + rflow->id;
971 } else {
972 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
973 uc->rchan->id;
974 }
975
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530976 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
977 &rflow->fd_ring, &rflow->r_ring);
978 if (ret) {
Vignesh R3a9dbf32019-02-05 17:31:24 +0530979 ret = -EBUSY;
980 goto err_rx_ring;
981 }
982
Vignesh R3a9dbf32019-02-05 17:31:24 +0530983 memset(&ring_cfg, 0, sizeof(ring_cfg));
984 ring_cfg.size = 16;
985 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendra0fe24d32019-12-09 10:25:37 +0530986 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh R3a9dbf32019-02-05 17:31:24 +0530987
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530988 ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
989 ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
Vignesh R3a9dbf32019-02-05 17:31:24 +0530990 if (ret)
991 goto err_ringcfg;
992
993 return 0;
994
995err_ringcfg:
Vignesh Raghavendrad7c3eb02020-07-06 13:26:27 +0530996 k3_nav_ringacc_ring_free(rflow->r_ring);
997 rflow->r_ring = NULL;
998 k3_nav_ringacc_ring_free(rflow->fd_ring);
999 rflow->fd_ring = NULL;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301000err_rx_ring:
1001 udma_put_rflow(uc);
1002err_rflow:
1003 udma_put_rchan(uc);
1004
1005 return ret;
1006}
1007
1008static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
1009{
1010 struct udma_dev *ud = uc->ud;
1011 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1012 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301013 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301014 u32 mode;
1015 int ret;
1016
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301017 if (uc->config.pkt_mode)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301018 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1019 else
1020 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1021
1022 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
1023 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1024 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301025 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301026 req.index = uc->tchan->id;
1027 req.tx_chan_type = mode;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301028 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301029 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1030 else
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301031 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1032 uc->config.psd_size,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301033 0) >> 2;
1034 req.txcq_qnum = tc_ring;
1035
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301036 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301037 if (ret) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301038 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301039 return ret;
1040 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05301041
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301042 /*
1043 * Above TI SCI call handles firewall configuration, cfg
1044 * register configuration still has to be done locally in
1045 * absence of RM services.
1046 */
1047 if (IS_ENABLED(CONFIG_K3_DM_FW))
1048 udma_alloc_tchan_raw(uc);
1049
1050 return 0;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301051}
1052
1053static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
1054{
1055 struct udma_dev *ud = uc->ud;
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +05301056 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
1057 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301058 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1059 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
1060 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301061 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301062 u32 mode;
1063 int ret;
1064
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301065 if (uc->config.pkt_mode)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301066 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1067 else
1068 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1069
1070 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1071 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301072 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301073 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301074 req.index = uc->rchan->id;
1075 req.rx_chan_type = mode;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301076 if (uc->config.dir == DMA_MEM_TO_MEM) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301077 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1078 req.rxcq_qnum = tc_ring;
1079 } else {
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301080 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1081 uc->config.psd_size,
Vignesh R3a9dbf32019-02-05 17:31:24 +05301082 0) >> 2;
1083 req.rxcq_qnum = rx_ring;
1084 }
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301085 if (ud->match_data->type == DMA_TYPE_UDMA &&
1086 uc->rflow->id != uc->rchan->id &&
1087 uc->config.dir != DMA_MEM_TO_MEM) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301088 req.flowid_start = uc->rflow->id;
1089 req.flowid_cnt = 1;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301090 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
1091 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301092 }
1093
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301094 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301095 if (ret) {
1096 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
1097 uc->rchan->id, ret);
1098 return ret;
1099 }
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301100 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301101 return ret;
1102
1103 flow_req.valid_params =
1104 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1105 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1106 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1107 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1108 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1109 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1110 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1111 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1112 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1113 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1114 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1115 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1116 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
1117 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
1118
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301119 flow_req.nav_id = tisci_rm->tisci_dev_id;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301120 flow_req.flow_index = uc->rflow->id;
1121
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301122 if (uc->config.needs_epib)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301123 flow_req.rx_einfo_present = 1;
1124 else
1125 flow_req.rx_einfo_present = 0;
1126
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301127 if (uc->config.psd_size)
Vignesh R3a9dbf32019-02-05 17:31:24 +05301128 flow_req.rx_psinfo_present = 1;
1129 else
1130 flow_req.rx_psinfo_present = 0;
1131
1132 flow_req.rx_error_handling = 0;
1133 flow_req.rx_desc_type = 0;
1134 flow_req.rx_dest_qnum = rx_ring;
1135 flow_req.rx_src_tag_hi_sel = 2;
1136 flow_req.rx_src_tag_lo_sel = 4;
1137 flow_req.rx_dest_tag_hi_sel = 5;
1138 flow_req.rx_dest_tag_lo_sel = 4;
1139 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1140 flow_req.rx_fdq1_qnum = fd_ring;
1141 flow_req.rx_fdq2_qnum = fd_ring;
1142 flow_req.rx_fdq3_qnum = fd_ring;
1143 flow_req.rx_ps_location = 0;
1144
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301145 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1146 &flow_req);
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301147 if (ret) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301148 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1149 uc->rchan->id, uc->rflow->id, ret);
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301150 return ret;
1151 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05301152
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301153 /*
1154 * Above TI SCI call handles firewall configuration, cfg
1155 * register configuration still has to be done locally in
1156 * absence of RM services.
1157 */
1158 if (IS_ENABLED(CONFIG_K3_DM_FW))
1159 udma_alloc_rchan_raw(uc);
1160
1161 return 0;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301162}
1163
1164static int udma_alloc_chan_resources(struct udma_chan *uc)
1165{
1166 struct udma_dev *ud = uc->ud;
1167 int ret;
1168
1169 pr_debug("%s: chan:%d as %s\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301170 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301171
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301172 switch (uc->config.dir) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301173 case DMA_MEM_TO_MEM:
1174 /* Non synchronized - mem to mem type of transfer */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301175 uc->config.pkt_mode = false;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301176 ret = udma_get_chan_pair(uc);
1177 if (ret)
1178 return ret;
1179
1180 ret = udma_alloc_tx_resources(uc);
1181 if (ret)
1182 goto err_free_res;
1183
1184 ret = udma_alloc_rx_resources(uc);
1185 if (ret)
1186 goto err_free_res;
1187
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301188 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1189 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301190 break;
1191 case DMA_MEM_TO_DEV:
1192 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1193 ret = udma_alloc_tx_resources(uc);
1194 if (ret)
1195 goto err_free_res;
1196
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301197 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1198 uc->config.dst_thread = uc->config.remote_thread_id;
1199 uc->config.dst_thread |= 0x8000;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301200
1201 break;
1202 case DMA_DEV_TO_MEM:
1203 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1204 ret = udma_alloc_rx_resources(uc);
1205 if (ret)
1206 goto err_free_res;
1207
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301208 uc->config.src_thread = uc->config.remote_thread_id;
1209 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301210
1211 break;
1212 default:
1213 /* Can not happen */
1214 pr_debug("%s: chan:%d invalid direction (%u)\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301215 __func__, uc->id, uc->config.dir);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301216 return -EINVAL;
1217 }
1218
1219 /* We have channel indexes and rings */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301220 if (uc->config.dir == DMA_MEM_TO_MEM) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301221 ret = udma_alloc_tchan_sci_req(uc);
1222 if (ret)
1223 goto err_free_res;
1224
1225 ret = udma_alloc_rchan_sci_req(uc);
1226 if (ret)
1227 goto err_free_res;
1228 } else {
1229 /* Slave transfer */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301230 if (uc->config.dir == DMA_MEM_TO_DEV) {
Vignesh R3a9dbf32019-02-05 17:31:24 +05301231 ret = udma_alloc_tchan_sci_req(uc);
1232 if (ret)
1233 goto err_free_res;
1234 } else {
1235 ret = udma_alloc_rchan_sci_req(uc);
1236 if (ret)
1237 goto err_free_res;
1238 }
1239 }
1240
Peter Ujfalusid15f8652019-04-25 12:08:15 +05301241 if (udma_is_chan_running(uc)) {
1242 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1243 udma_stop(uc);
1244 if (udma_is_chan_running(uc)) {
1245 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1246 goto err_free_res;
1247 }
1248 }
1249
Vignesh R3a9dbf32019-02-05 17:31:24 +05301250 /* PSI-L pairing */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301251 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301252 if (ret) {
1253 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1254 goto err_free_res;
1255 }
1256
1257 return 0;
1258
1259err_free_res:
1260 udma_free_tx_resources(uc);
1261 udma_free_rx_resources(uc);
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301262 uc->config.remote_thread_id = -1;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301263 return ret;
1264}
1265
1266static void udma_free_chan_resources(struct udma_chan *uc)
1267{
Vignesh Raghavendrabe7bdcc2020-09-17 20:11:22 +05301268 /* Hard reset UDMA channel */
1269 udma_stop_hard(uc);
1270 udma_reset_counters(uc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301271
1272 /* Release PSI-L pairing */
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301273 udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301274
1275 /* Reset the rings for a new start */
1276 udma_reset_rings(uc);
1277 udma_free_tx_resources(uc);
1278 udma_free_rx_resources(uc);
1279
Vignesh Raghavendra07826212020-07-06 13:26:25 +05301280 uc->config.remote_thread_id = -1;
1281 uc->config.dir = DMA_MEM_TO_MEM;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301282}
1283
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301284static const char * const range_names[] = {
1285 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
1286 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
1287 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
1288 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
1289 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
1290};
1291
Vignesh R3a9dbf32019-02-05 17:31:24 +05301292static int udma_get_mmrs(struct udevice *dev)
1293{
1294 struct udma_dev *ud = dev_get_priv(dev);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301295 u32 cap2, cap3, cap4;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301296 int i;
1297
Matthias Schiffer47331932023-09-27 15:33:34 +02001298 ud->mmrs[MMR_GCFG] = dev_read_addr_name_ptr(dev, mmr_names[MMR_GCFG]);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301299 if (!ud->mmrs[MMR_GCFG])
1300 return -EINVAL;
1301
1302 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1303 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1304
1305 switch (ud->match_data->type) {
1306 case DMA_TYPE_UDMA:
1307 ud->rflow_cnt = cap3 & 0x3fff;
1308 ud->tchan_cnt = cap2 & 0x1ff;
1309 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1310 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1311 break;
1312 case DMA_TYPE_BCDMA:
1313 ud->bchan_cnt = cap2 & 0x1ff;
1314 ud->tchan_cnt = (cap2 >> 9) & 0x1ff;
1315 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1316 break;
1317 case DMA_TYPE_PKTDMA:
1318 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
1319 ud->tchan_cnt = cap2 & 0x1ff;
1320 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1321 ud->rflow_cnt = cap3 & 0x3fff;
1322 ud->tflow_cnt = cap4 & 0x3fff;
1323 break;
1324 default:
1325 return -EINVAL;
1326 }
1327
1328 for (i = 1; i < MMR_LAST; i++) {
1329 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
1330 continue;
1331 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
1332 continue;
1333 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
1334 continue;
Prasanth Babu Mantena32911232024-12-18 18:30:45 +05301335 if (i == MMR_RFLOW && ud->match_data->type == DMA_TYPE_BCDMA)
1336 continue;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301337
Matthias Schiffer47331932023-09-27 15:33:34 +02001338 ud->mmrs[i] = dev_read_addr_name_ptr(dev, mmr_names[i]);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301339 if (!ud->mmrs[i])
1340 return -EINVAL;
1341 }
1342
1343 return 0;
1344}
1345
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301346static int udma_setup_resources(struct udma_dev *ud)
1347{
1348 struct udevice *dev = ud->dev;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301349 int i;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301350 struct ti_sci_resource_desc *rm_desc;
1351 struct ti_sci_resource *rm_res;
1352 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Prasanth Babu Mantena246231f2025-02-20 18:48:27 +05301353 size_t desc_size;
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301354
1355 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1356 sizeof(unsigned long), GFP_KERNEL);
1357 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1358 GFP_KERNEL);
1359 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1360 sizeof(unsigned long), GFP_KERNEL);
1361 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1362 GFP_KERNEL);
1363 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1364 sizeof(unsigned long), GFP_KERNEL);
1365 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1366 sizeof(unsigned long),
1367 GFP_KERNEL);
1368 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1369 GFP_KERNEL);
1370
Prasanth Babu Mantena246231f2025-02-20 18:48:27 +05301371 desc_size = cppi5_trdesc_calc_size(K3_UDMA_MAX_TR, sizeof(struct cppi5_tr_type15_t));
1372 ud->bc_desc = devm_kzalloc(dev, ALIGN(desc_size, ARCH_DMA_MINALIGN), GFP_KERNEL);
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301373 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1374 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
Prasanth Babu Mantena246231f2025-02-20 18:48:27 +05301375 !ud->rflows || !ud->bc_desc)
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301376 return -ENOMEM;
1377
1378 /*
1379 * RX flows with the same Ids as RX channels are reserved to be used
1380 * as default flows if remote HW can't generate flow_ids. Those
1381 * RX flows can be requested only explicitly by id.
1382 */
1383 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1384
1385 /* Get resource ranges from tisci */
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301386 for (i = 0; i < RM_RANGE_LAST; i++) {
1387 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
1388 continue;
1389
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301390 tisci_rm->rm_ranges[i] =
1391 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1392 tisci_rm->tisci_dev_id,
1393 (char *)range_names[i]);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301394 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05301395
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301396 /* tchan ranges */
1397 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1398 if (IS_ERR(rm_res)) {
1399 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1400 } else {
1401 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1402 for (i = 0; i < rm_res->sets; i++) {
1403 rm_desc = &rm_res->desc[i];
1404 bitmap_clear(ud->tchan_map, rm_desc->start,
1405 rm_desc->num);
1406 }
1407 }
1408
1409 /* rchan and matching default flow ranges */
1410 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1411 if (IS_ERR(rm_res)) {
1412 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1413 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1414 } else {
1415 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1416 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1417 for (i = 0; i < rm_res->sets; i++) {
1418 rm_desc = &rm_res->desc[i];
1419 bitmap_clear(ud->rchan_map, rm_desc->start,
1420 rm_desc->num);
1421 bitmap_clear(ud->rflow_map, rm_desc->start,
1422 rm_desc->num);
1423 }
1424 }
1425
1426 /* GP rflow ranges */
1427 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1428 if (IS_ERR(rm_res)) {
1429 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1430 ud->rflow_cnt - ud->rchan_cnt);
1431 } else {
1432 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1433 ud->rflow_cnt - ud->rchan_cnt);
1434 for (i = 0; i < rm_res->sets; i++) {
1435 rm_desc = &rm_res->desc[i];
1436 bitmap_clear(ud->rflow_map, rm_desc->start,
1437 rm_desc->num);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301438 }
1439 }
1440
1441 return 0;
1442}
1443
1444static int bcdma_setup_resources(struct udma_dev *ud)
1445{
1446 int i;
1447 struct udevice *dev = ud->dev;
1448 struct ti_sci_resource_desc *rm_desc;
1449 struct ti_sci_resource *rm_res;
1450 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Prasanth Babu Mantena246231f2025-02-20 18:48:27 +05301451 size_t desc_size;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301452
1453 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
1454 sizeof(unsigned long), GFP_KERNEL);
1455 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
1456 GFP_KERNEL);
1457 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1458 sizeof(unsigned long), GFP_KERNEL);
1459 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1460 GFP_KERNEL);
1461 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1462 sizeof(unsigned long), GFP_KERNEL);
1463 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1464 GFP_KERNEL);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301465 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
1466 GFP_KERNEL);
1467
Prasanth Babu Mantena246231f2025-02-20 18:48:27 +05301468 desc_size = cppi5_trdesc_calc_size(K3_UDMA_MAX_TR, sizeof(struct cppi5_tr_type15_t));
1469 ud->bc_desc = devm_kzalloc(dev, ALIGN(desc_size, ARCH_DMA_MINALIGN), GFP_KERNEL);
1470
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301471 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301472 !ud->bchans || !ud->tchans || !ud->rchans ||
Prasanth Babu Mantena246231f2025-02-20 18:48:27 +05301473 !ud->rflows || !ud->bc_desc)
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301474 return -ENOMEM;
1475
1476 /* Get resource ranges from tisci */
1477 for (i = 0; i < RM_RANGE_LAST; i++) {
1478 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
1479 continue;
1480
1481 tisci_rm->rm_ranges[i] =
1482 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1483 tisci_rm->tisci_dev_id,
1484 (char *)range_names[i]);
1485 }
1486
1487 /* bchan ranges */
1488 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
1489 if (IS_ERR(rm_res)) {
1490 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
1491 } else {
1492 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
1493 for (i = 0; i < rm_res->sets; i++) {
1494 rm_desc = &rm_res->desc[i];
1495 bitmap_clear(ud->bchan_map, rm_desc->start,
1496 rm_desc->num);
1497 dev_dbg(dev, "ti-sci-res: bchan: %d:%d\n",
1498 rm_desc->start, rm_desc->num);
1499 }
1500 }
1501
1502 /* tchan ranges */
1503 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1504 if (IS_ERR(rm_res)) {
1505 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1506 } else {
1507 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1508 for (i = 0; i < rm_res->sets; i++) {
1509 rm_desc = &rm_res->desc[i];
1510 bitmap_clear(ud->tchan_map, rm_desc->start,
1511 rm_desc->num);
1512 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1513 rm_desc->start, rm_desc->num);
1514 }
1515 }
1516
1517 /* rchan ranges */
1518 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1519 if (IS_ERR(rm_res)) {
1520 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1521 } else {
1522 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1523 for (i = 0; i < rm_res->sets; i++) {
1524 rm_desc = &rm_res->desc[i];
1525 bitmap_clear(ud->rchan_map, rm_desc->start,
1526 rm_desc->num);
1527 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1528 rm_desc->start, rm_desc->num);
1529 }
1530 }
1531
1532 return 0;
1533}
1534
1535static int pktdma_setup_resources(struct udma_dev *ud)
1536{
1537 int i;
1538 struct udevice *dev = ud->dev;
1539 struct ti_sci_resource *rm_res;
1540 struct ti_sci_resource_desc *rm_desc;
1541 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1542
1543 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1544 sizeof(unsigned long), GFP_KERNEL);
1545 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1546 GFP_KERNEL);
1547 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1548 sizeof(unsigned long), GFP_KERNEL);
1549 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1550 GFP_KERNEL);
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301551 ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1552 sizeof(unsigned long),
1553 GFP_KERNEL);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301554 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1555 GFP_KERNEL);
1556 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
1557 sizeof(unsigned long), GFP_KERNEL);
1558
1559 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301560 !ud->rchans || !ud->rflows || !ud->rflow_map)
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301561 return -ENOMEM;
1562
1563 /* Get resource ranges from tisci */
1564 for (i = 0; i < RM_RANGE_LAST; i++) {
1565 if (i == RM_RANGE_BCHAN)
1566 continue;
1567
1568 tisci_rm->rm_ranges[i] =
1569 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1570 tisci_rm->tisci_dev_id,
1571 (char *)range_names[i]);
1572 }
1573
1574 /* tchan ranges */
1575 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1576 if (IS_ERR(rm_res)) {
1577 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1578 } else {
1579 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1580 for (i = 0; i < rm_res->sets; i++) {
1581 rm_desc = &rm_res->desc[i];
1582 bitmap_clear(ud->tchan_map, rm_desc->start,
1583 rm_desc->num);
1584 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1585 rm_desc->start, rm_desc->num);
1586 }
1587 }
1588
1589 /* rchan ranges */
1590 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1591 if (IS_ERR(rm_res)) {
1592 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1593 } else {
1594 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1595 for (i = 0; i < rm_res->sets; i++) {
1596 rm_desc = &rm_res->desc[i];
1597 bitmap_clear(ud->rchan_map, rm_desc->start,
1598 rm_desc->num);
1599 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1600 rm_desc->start, rm_desc->num);
1601 }
1602 }
1603
1604 /* rflow ranges */
1605 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1606 if (IS_ERR(rm_res)) {
1607 /* all rflows are assigned exclusively to Linux */
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301608 bitmap_zero(ud->rflow_map, ud->rflow_cnt);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301609 } else {
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301610 bitmap_fill(ud->rflow_map, ud->rflow_cnt);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301611 for (i = 0; i < rm_res->sets; i++) {
1612 rm_desc = &rm_res->desc[i];
Vignesh Raghavendra8ade6b02021-12-23 19:27:30 +05301613 bitmap_clear(ud->rflow_map, rm_desc->start,
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301614 rm_desc->num);
1615 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
1616 rm_desc->start, rm_desc->num);
1617 }
1618 }
1619
1620 /* tflow ranges */
1621 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
1622 if (IS_ERR(rm_res)) {
1623 /* all tflows are assigned exclusively to Linux */
1624 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
1625 } else {
1626 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
1627 for (i = 0; i < rm_res->sets; i++) {
1628 rm_desc = &rm_res->desc[i];
1629 bitmap_clear(ud->tflow_map, rm_desc->start,
1630 rm_desc->num);
1631 dev_dbg(dev, "ti-sci-res: tflow: %d:%d\n",
1632 rm_desc->start, rm_desc->num);
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301633 }
1634 }
1635
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301636 return 0;
1637}
1638
1639static int setup_resources(struct udma_dev *ud)
1640{
1641 struct udevice *dev = ud->dev;
1642 int ch_count, ret;
1643
1644 switch (ud->match_data->type) {
1645 case DMA_TYPE_UDMA:
1646 ret = udma_setup_resources(ud);
1647 break;
1648 case DMA_TYPE_BCDMA:
1649 ret = bcdma_setup_resources(ud);
1650 break;
1651 case DMA_TYPE_PKTDMA:
1652 ret = pktdma_setup_resources(ud);
1653 break;
1654 default:
1655 return -EINVAL;
1656 }
1657
1658 if (ret)
1659 return ret;
1660
1661 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
1662 if (ud->bchan_cnt)
1663 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301664 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1665 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1666 if (!ch_count)
1667 return -ENODEV;
1668
1669 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1670 GFP_KERNEL);
1671 if (!ud->channels)
1672 return -ENOMEM;
1673
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301674 switch (ud->match_data->type) {
1675 case DMA_TYPE_UDMA:
1676 dev_dbg(dev,
1677 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
1678 ch_count,
1679 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1680 ud->tchan_cnt),
1681 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1682 ud->rchan_cnt),
1683 ud->rflow_cnt - bitmap_weight(ud->rflow_map,
1684 ud->rflow_cnt));
1685 break;
1686 case DMA_TYPE_BCDMA:
1687 dev_dbg(dev,
1688 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
1689 ch_count,
1690 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
1691 ud->bchan_cnt),
1692 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1693 ud->tchan_cnt),
1694 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1695 ud->rchan_cnt));
1696 break;
1697 case DMA_TYPE_PKTDMA:
1698 dev_dbg(dev,
1699 "Channels: %d (tchan: %u, rchan: %u)\n",
1700 ch_count,
1701 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1702 ud->tchan_cnt),
1703 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1704 ud->rchan_cnt));
1705 break;
1706 default:
1707 break;
1708 }
Vignesh Raghavendrac4106862019-12-09 10:25:32 +05301709
1710 return ch_count;
1711}
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301712
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301713static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1714{
1715 u64 addr = 0;
1716
1717 memcpy(&addr, &elem, sizeof(elem));
1718 return k3_nav_ringacc_ring_push(ring, &addr);
1719}
1720
Vignesh R3a9dbf32019-02-05 17:31:24 +05301721static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1722 dma_addr_t src, size_t len)
1723{
1724 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1725 struct cppi5_tr_type15_t *tr_req;
1726 int num_tr;
1727 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1728 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
Prasanth Babu Mantena246231f2025-02-20 18:48:27 +05301729 void *tr_desc = uc->ud->bc_desc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05301730 size_t desc_size;
1731
1732 if (len < SZ_64K) {
1733 num_tr = 1;
1734 tr0_cnt0 = len;
1735 tr0_cnt1 = 1;
1736 } else {
1737 unsigned long align_to = __ffs(src | dest);
1738
1739 if (align_to > 3)
1740 align_to = 3;
1741 /*
1742 * Keep simple: tr0: SZ_64K-alignment blocks,
1743 * tr1: the remaining
1744 */
1745 num_tr = 2;
1746 tr0_cnt0 = (SZ_64K - BIT(align_to));
1747 if (len / tr0_cnt0 >= SZ_64K) {
1748 dev_err(uc->ud->dev, "size %zu is not supported\n",
1749 len);
1750 return NULL;
1751 }
1752
1753 tr0_cnt1 = len / tr0_cnt0;
1754 tr1_cnt0 = len % tr0_cnt0;
1755 }
1756
1757 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301758 memset(tr_desc, 0, desc_size);
1759
1760 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1761 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1762 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1763
1764 tr_req = tr_desc + tr_size;
1765
1766 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1767 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1768 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1769
1770 tr_req[0].addr = src;
1771 tr_req[0].icnt0 = tr0_cnt0;
1772 tr_req[0].icnt1 = tr0_cnt1;
1773 tr_req[0].icnt2 = 1;
1774 tr_req[0].icnt3 = 1;
1775 tr_req[0].dim1 = tr0_cnt0;
1776
1777 tr_req[0].daddr = dest;
1778 tr_req[0].dicnt0 = tr0_cnt0;
1779 tr_req[0].dicnt1 = tr0_cnt1;
1780 tr_req[0].dicnt2 = 1;
1781 tr_req[0].dicnt3 = 1;
1782 tr_req[0].ddim1 = tr0_cnt0;
1783
1784 if (num_tr == 2) {
1785 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1786 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1787 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1788
1789 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1790 tr_req[1].icnt0 = tr1_cnt0;
1791 tr_req[1].icnt1 = 1;
1792 tr_req[1].icnt2 = 1;
1793 tr_req[1].icnt3 = 1;
1794
1795 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1796 tr_req[1].dicnt0 = tr1_cnt0;
1797 tr_req[1].dicnt1 = 1;
1798 tr_req[1].dicnt2 = 1;
1799 tr_req[1].dicnt3 = 1;
1800 }
1801
1802 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1803
Vignesh Raghavendrace431412019-12-09 10:25:39 +05301804 flush_dcache_range((unsigned long)tr_desc,
1805 ALIGN((unsigned long)tr_desc + desc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05301806 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05301807
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05301808 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05301809
1810 return 0;
1811}
1812
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301813#define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1814 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1815 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1816
1817#define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1818 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1819 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1820
1821#define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1822 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1823
1824#define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1825 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1826 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1827 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1828 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1829 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1830 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1831 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1832 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1833
1834#define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1835 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1836 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1837 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1838 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1839 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1840 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1841 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1842 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1843 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1844
1845static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1846{
1847 struct udma_dev *ud = uc->ud;
1848 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1849 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1850 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1851 struct udma_bchan *bchan = uc->bchan;
1852 int ret = 0;
1853
1854 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1855 req_tx.nav_id = tisci_rm->tisci_dev_id;
1856 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1857 req_tx.index = bchan->id;
1858
1859 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1860 if (ret)
1861 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1862
1863 return ret;
1864}
1865
1866static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id)
1867{
1868 if (id >= 0) {
1869 if (test_bit(id, ud->bchan_map)) {
1870 dev_err(ud->dev, "bchan%d is in use\n", id);
1871 return ERR_PTR(-ENOENT);
1872 }
1873 } else {
1874 id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0);
1875 if (id == ud->bchan_cnt)
1876 return ERR_PTR(-ENOENT);
1877 }
1878 __set_bit(id, ud->bchan_map);
1879 return &ud->bchans[id];
1880}
1881
1882static int bcdma_get_bchan(struct udma_chan *uc)
1883{
1884 struct udma_dev *ud = uc->ud;
1885
1886 if (uc->bchan) {
1887 dev_err(ud->dev, "chan%d: already have bchan%d allocated\n",
1888 uc->id, uc->bchan->id);
1889 return 0;
1890 }
1891
1892 uc->bchan = __bcdma_reserve_bchan(ud, -1);
1893 if (IS_ERR(uc->bchan))
1894 return PTR_ERR(uc->bchan);
1895
1896 uc->tchan = uc->bchan;
1897
1898 return 0;
1899}
1900
1901static void bcdma_put_bchan(struct udma_chan *uc)
1902{
1903 struct udma_dev *ud = uc->ud;
1904
1905 if (uc->bchan) {
1906 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1907 uc->bchan->id);
1908 __clear_bit(uc->bchan->id, ud->bchan_map);
1909 uc->bchan = NULL;
1910 uc->tchan = NULL;
1911 }
1912}
1913
1914static void bcdma_free_bchan_resources(struct udma_chan *uc)
1915{
1916 if (!uc->bchan)
1917 return;
1918
1919 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
1920 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
1921 uc->bchan->tc_ring = NULL;
1922 uc->bchan->t_ring = NULL;
1923
1924 bcdma_put_bchan(uc);
1925}
1926
1927static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
1928{
1929 struct k3_nav_ring_cfg ring_cfg;
1930 struct udma_dev *ud = uc->ud;
1931 int ret;
1932
1933 ret = bcdma_get_bchan(uc);
1934 if (ret)
1935 return ret;
1936
1937 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1938 &uc->bchan->t_ring,
1939 &uc->bchan->tc_ring);
1940 if (ret) {
1941 ret = -EBUSY;
1942 goto err_ring;
1943 }
1944
1945 memset(&ring_cfg, 0, sizeof(ring_cfg));
1946 ring_cfg.size = 16;
1947 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
1948 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
1949
1950 ret = k3_nav_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
1951 if (ret)
1952 goto err_ringcfg;
1953
1954 return 0;
1955
1956err_ringcfg:
1957 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
1958 uc->bchan->tc_ring = NULL;
1959 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
1960 uc->bchan->t_ring = NULL;
1961err_ring:
1962 bcdma_put_bchan(uc);
1963
1964 return ret;
1965}
1966
1967static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
1968{
1969 struct udma_dev *ud = uc->ud;
1970 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1971 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1972 struct udma_tchan *tchan = uc->tchan;
1973 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1974 int ret = 0;
1975
1976 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
1977 req_tx.nav_id = tisci_rm->tisci_dev_id;
1978 req_tx.index = tchan->id;
1979 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1980 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
1981 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1982 /* wait for peer to complete the teardown for PDMAs */
1983 req_tx.valid_params |=
1984 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1985 req_tx.tx_tdtype = 1;
1986 }
1987
1988 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1989 if (ret)
1990 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1991
Kishon Vijay Abraham I8878dad2024-08-26 15:55:10 +05301992 if (IS_ENABLED(CONFIG_K3_DM_FW))
1993 udma_alloc_tchan_raw(uc);
1994
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05301995 return ret;
1996}
1997
1998#define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
1999
2000static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2001{
2002 struct udma_dev *ud = uc->ud;
2003 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2004 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2005 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2006 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2007 int ret = 0;
2008
2009 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2010 req_rx.nav_id = tisci_rm->tisci_dev_id;
2011 req_rx.index = uc->rchan->id;
2012
2013 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2014 if (ret) {
2015 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2016 return ret;
2017 }
2018
2019 flow_req.valid_params =
2020 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2021 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2022 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2023
2024 flow_req.nav_id = tisci_rm->tisci_dev_id;
2025 flow_req.flow_index = uc->rflow->id;
2026
2027 if (uc->config.needs_epib)
2028 flow_req.rx_einfo_present = 1;
2029 else
2030 flow_req.rx_einfo_present = 0;
2031 if (uc->config.psd_size)
2032 flow_req.rx_psinfo_present = 1;
2033 else
2034 flow_req.rx_psinfo_present = 0;
Vignesh Raghavendra87fa0d62023-03-08 09:42:57 +05302035 flow_req.rx_error_handling = 0;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302036
2037 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2038
2039 if (ret)
2040 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2041 ret);
2042
Kishon Vijay Abraham I8878dad2024-08-26 15:55:10 +05302043 if (IS_ENABLED(CONFIG_K3_DM_FW))
2044 udma_alloc_rchan_raw(uc);
2045
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302046 return ret;
2047}
2048
2049static int bcdma_alloc_chan_resources(struct udma_chan *uc)
2050{
2051 int ret;
2052
2053 uc->config.pkt_mode = false;
2054
2055 switch (uc->config.dir) {
2056 case DMA_MEM_TO_MEM:
2057 /* Non synchronized - mem to mem type of transfer */
2058 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2059 uc->id);
2060
2061 ret = bcdma_alloc_bchan_resources(uc);
2062 if (ret)
2063 return ret;
2064
2065 ret = bcdma_tisci_m2m_channel_config(uc);
2066 break;
2067 default:
2068 /* Can not happen */
2069 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2070 __func__, uc->id, uc->config.dir);
2071 return -EINVAL;
2072 }
2073
2074 /* check if the channel configuration was successful */
2075 if (ret)
2076 goto err_res_free;
2077
2078 if (udma_is_chan_running(uc)) {
2079 dev_warn(uc->ud->dev, "chan%d: is running!\n", uc->id);
2080 udma_stop(uc);
2081 if (udma_is_chan_running(uc)) {
2082 dev_err(uc->ud->dev, "chan%d: won't stop!\n", uc->id);
2083 goto err_res_free;
2084 }
2085 }
2086
2087 udma_reset_rings(uc);
2088
2089 return 0;
2090
2091err_res_free:
2092 bcdma_free_bchan_resources(uc);
2093 udma_free_tx_resources(uc);
2094 udma_free_rx_resources(uc);
2095
2096 udma_reset_uchan(uc);
2097
2098 return ret;
2099}
2100
2101static int pktdma_alloc_chan_resources(struct udma_chan *uc)
2102{
2103 struct udma_dev *ud = uc->ud;
2104 int ret;
2105
2106 switch (uc->config.dir) {
2107 case DMA_MEM_TO_DEV:
2108 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2109 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2110 uc->id);
2111
2112 ret = udma_alloc_tx_resources(uc);
2113 if (ret) {
2114 uc->config.remote_thread_id = -1;
2115 return ret;
2116 }
2117
2118 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2119 uc->config.dst_thread = uc->config.remote_thread_id;
2120 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2121
2122 ret = pktdma_tisci_tx_channel_config(uc);
2123 break;
2124 case DMA_DEV_TO_MEM:
2125 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2126 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2127 uc->id);
2128
2129 ret = udma_alloc_rx_resources(uc);
2130 if (ret) {
2131 uc->config.remote_thread_id = -1;
2132 return ret;
2133 }
2134
2135 uc->config.src_thread = uc->config.remote_thread_id;
2136 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2137 K3_PSIL_DST_THREAD_ID_OFFSET;
2138
2139 ret = pktdma_tisci_rx_channel_config(uc);
2140 break;
2141 default:
2142 /* Can not happen */
2143 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2144 __func__, uc->id, uc->config.dir);
2145 return -EINVAL;
2146 }
2147
2148 /* check if the channel configuration was successful */
2149 if (ret)
2150 goto err_res_free;
2151
2152 /* PSI-L pairing */
2153 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2154 if (ret) {
2155 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2156 uc->config.src_thread, uc->config.dst_thread);
2157 goto err_res_free;
2158 }
2159
2160 if (udma_is_chan_running(uc)) {
2161 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2162 udma_stop(uc);
2163 if (udma_is_chan_running(uc)) {
2164 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2165 goto err_res_free;
2166 }
2167 }
2168
2169 udma_reset_rings(uc);
2170
2171 if (uc->tchan)
2172 dev_dbg(ud->dev,
2173 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2174 uc->id, uc->tchan->id, uc->tchan->tflow_id,
2175 uc->config.remote_thread_id);
2176 else if (uc->rchan)
2177 dev_dbg(ud->dev,
2178 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2179 uc->id, uc->rchan->id, uc->rflow->id,
2180 uc->config.remote_thread_id);
2181 return 0;
2182
2183err_res_free:
2184 udma_free_tx_resources(uc);
2185 udma_free_rx_resources(uc);
2186
2187 udma_reset_uchan(uc);
2188
2189 return ret;
2190}
2191
Vignesh R3a9dbf32019-02-05 17:31:24 +05302192static int udma_transfer(struct udevice *dev, int direction,
Andrew Davisd2da2842022-10-07 12:11:13 -05002193 dma_addr_t dst, dma_addr_t src, size_t len)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302194{
2195 struct udma_dev *ud = dev_get_priv(dev);
2196 /* Channel0 is reserved for memcpy */
2197 struct udma_chan *uc = &ud->channels[0];
2198 dma_addr_t paddr = 0;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302199
Andrew Davisd2da2842022-10-07 12:11:13 -05002200 udma_prep_dma_memcpy(uc, dst, src, len);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302201 udma_start(uc);
2202 udma_poll_completion(uc, &paddr);
2203 udma_stop(uc);
2204
Vignesh R3a9dbf32019-02-05 17:31:24 +05302205 return 0;
2206}
2207
2208static int udma_request(struct dma *dma)
2209{
2210 struct udma_dev *ud = dev_get_priv(dma->dev);
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302211 struct udma_chan_config *ucc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302212 struct udma_chan *uc;
2213 unsigned long dummy;
2214 int ret;
2215
2216 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2217 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2218 return -EINVAL;
2219 }
2220
2221 uc = &ud->channels[dma->id];
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302222 ucc = &uc->config;
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302223 switch (ud->match_data->type) {
2224 case DMA_TYPE_UDMA:
2225 ret = udma_alloc_chan_resources(uc);
2226 break;
2227 case DMA_TYPE_BCDMA:
2228 ret = bcdma_alloc_chan_resources(uc);
2229 break;
2230 case DMA_TYPE_PKTDMA:
2231 ret = pktdma_alloc_chan_resources(uc);
2232 break;
2233 default:
2234 return -EINVAL;
2235 }
Vignesh R3a9dbf32019-02-05 17:31:24 +05302236 if (ret) {
2237 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
2238 return -EINVAL;
2239 }
2240
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302241 if (uc->config.dir == DMA_MEM_TO_DEV) {
2242 uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
2243 memset(uc->desc_tx, 0, ucc->hdesc_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302244 } else {
2245 uc->desc_rx = dma_alloc_coherent(
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302246 ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
2247 memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302248 }
2249
2250 uc->in_use = true;
2251 uc->desc_rx_cur = 0;
2252 uc->num_rx_bufs = 0;
2253
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302254 if (uc->config.dir == DMA_DEV_TO_MEM) {
Vignesh Raghavendra39349892019-12-04 22:17:21 +05302255 uc->cfg_data.flow_id_base = uc->rflow->id;
2256 uc->cfg_data.flow_id_cnt = 1;
2257 }
2258
Vignesh R3a9dbf32019-02-05 17:31:24 +05302259 return 0;
2260}
2261
Simon Glass75c0ad62020-02-03 07:35:55 -07002262static int udma_rfree(struct dma *dma)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302263{
2264 struct udma_dev *ud = dev_get_priv(dma->dev);
2265 struct udma_chan *uc;
2266
2267 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2268 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2269 return -EINVAL;
2270 }
2271 uc = &ud->channels[dma->id];
2272
2273 if (udma_is_chan_running(uc))
2274 udma_stop(uc);
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302275
2276 udma_navss_psil_unpair(ud, uc->config.src_thread,
2277 uc->config.dst_thread);
2278
2279 bcdma_free_bchan_resources(uc);
2280 udma_free_tx_resources(uc);
2281 udma_free_rx_resources(uc);
2282 udma_reset_uchan(uc);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302283
2284 uc->in_use = false;
2285
2286 return 0;
2287}
2288
2289static int udma_enable(struct dma *dma)
2290{
2291 struct udma_dev *ud = dev_get_priv(dma->dev);
2292 struct udma_chan *uc;
2293 int ret;
2294
2295 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2296 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2297 return -EINVAL;
2298 }
2299 uc = &ud->channels[dma->id];
2300
2301 ret = udma_start(uc);
2302
2303 return ret;
2304}
2305
2306static int udma_disable(struct dma *dma)
2307{
2308 struct udma_dev *ud = dev_get_priv(dma->dev);
2309 struct udma_chan *uc;
2310 int ret = 0;
2311
2312 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2313 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2314 return -EINVAL;
2315 }
2316 uc = &ud->channels[dma->id];
2317
2318 if (udma_is_chan_running(uc))
2319 ret = udma_stop(uc);
2320 else
2321 dev_err(dma->dev, "%s not running\n", __func__);
2322
2323 return ret;
2324}
2325
2326static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
2327{
2328 struct udma_dev *ud = dev_get_priv(dma->dev);
2329 struct cppi5_host_desc_t *desc_tx;
2330 dma_addr_t dma_src = (dma_addr_t)src;
2331 struct ti_udma_drv_packet_data packet_data = { 0 };
2332 dma_addr_t paddr;
2333 struct udma_chan *uc;
2334 u32 tc_ring_id;
2335 int ret;
2336
Keerthya3c8bb12019-04-24 16:33:54 +05302337 if (metadata)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302338 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
2339
2340 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2341 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2342 return -EINVAL;
2343 }
2344 uc = &ud->channels[dma->id];
2345
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302346 if (uc->config.dir != DMA_MEM_TO_DEV)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302347 return -EINVAL;
2348
2349 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
2350
2351 desc_tx = uc->desc_tx;
2352
2353 cppi5_hdesc_reset_hbdesc(desc_tx);
2354
2355 cppi5_hdesc_init(desc_tx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302356 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2357 uc->config.psd_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302358 cppi5_hdesc_set_pktlen(desc_tx, len);
2359 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
2360 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
2361 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
2362 /* pass below information from caller */
2363 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
2364 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
2365
Vignesh Raghavendrace431412019-12-09 10:25:39 +05302366 flush_dcache_range((unsigned long)dma_src,
2367 ALIGN((unsigned long)dma_src + len,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302368 ARCH_DMA_MINALIGN));
Vignesh Raghavendrace431412019-12-09 10:25:39 +05302369 flush_dcache_range((unsigned long)desc_tx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302370 ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302371 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05302372
Vignesh Raghavendrafc7a33f2019-12-09 10:25:38 +05302373 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302374 if (ret) {
2375 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
2376 dma->id, ret);
2377 return ret;
2378 }
2379
2380 udma_poll_completion(uc, &paddr);
2381
2382 return 0;
2383}
2384
2385static int udma_receive(struct dma *dma, void **dst, void *metadata)
2386{
2387 struct udma_dev *ud = dev_get_priv(dma->dev);
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302388 struct udma_chan_config *ucc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302389 struct cppi5_host_desc_t *desc_rx;
2390 dma_addr_t buf_dma;
2391 struct udma_chan *uc;
2392 u32 buf_dma_len, pkt_len;
2393 u32 port_id = 0;
2394 int ret;
2395
2396 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2397 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2398 return -EINVAL;
2399 }
2400 uc = &ud->channels[dma->id];
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302401 ucc = &uc->config;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302402
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302403 if (uc->config.dir != DMA_DEV_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302404 return -EINVAL;
2405 if (!uc->num_rx_bufs)
2406 return -EINVAL;
2407
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +05302408 ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302409 if (ret && ret != -ENODATA) {
2410 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
2411 return ret;
2412 } else if (ret == -ENODATA) {
2413 return 0;
2414 }
2415
2416 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302417 invalidate_dcache_range((ulong)desc_rx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302418 (ulong)(desc_rx + ucc->hdesc_size));
Vignesh R3a9dbf32019-02-05 17:31:24 +05302419
2420 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
2421 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
2422
2423 /* invalidate cache data */
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302424 invalidate_dcache_range((ulong)buf_dma,
2425 (ulong)(buf_dma + buf_dma_len));
Vignesh R3a9dbf32019-02-05 17:31:24 +05302426
2427 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
2428
2429 *dst = (void *)buf_dma;
2430 uc->num_rx_bufs--;
2431
2432 return pkt_len;
2433}
2434
2435static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
2436{
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302437 struct udma_chan_config *ucc;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302438 struct udma_dev *ud = dev_get_priv(dma->dev);
2439 struct udma_chan *uc = &ud->channels[0];
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302440 struct psil_endpoint_config *ep_config;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302441 u32 val;
2442
2443 for (val = 0; val < ud->ch_count; val++) {
2444 uc = &ud->channels[val];
2445 if (!uc->in_use)
2446 break;
2447 }
2448
2449 if (val == ud->ch_count)
2450 return -EBUSY;
2451
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302452 ucc = &uc->config;
2453 ucc->remote_thread_id = args->args[0];
2454 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
2455 ucc->dir = DMA_MEM_TO_DEV;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302456 else
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302457 ucc->dir = DMA_DEV_TO_MEM;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302458
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302459 ep_config = psil_get_ep_config(ucc->remote_thread_id);
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302460 if (IS_ERR(ep_config)) {
2461 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302462 uc->config.remote_thread_id);
2463 ucc->dir = DMA_MEM_TO_MEM;
2464 ucc->remote_thread_id = -1;
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302465 return false;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302466 }
2467
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302468 ucc->pkt_mode = ep_config->pkt_mode;
2469 ucc->channel_tpl = ep_config->channel_tpl;
2470 ucc->notdpkt = ep_config->notdpkt;
2471 ucc->ep_type = ep_config->ep_type;
Vignesh R3a9dbf32019-02-05 17:31:24 +05302472
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302473 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
2474 ep_config->mapped_channel_id >= 0) {
2475 ucc->mapped_channel_id = ep_config->mapped_channel_id;
2476 ucc->default_flow_id = ep_config->default_flow_id;
2477 } else {
2478 ucc->mapped_channel_id = -1;
2479 ucc->default_flow_id = -1;
2480 }
2481
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302482 ucc->needs_epib = ep_config->needs_epib;
2483 ucc->psd_size = ep_config->psd_size;
2484 ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
2485
2486 ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
2487 ucc->psd_size, 0);
2488 ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302489
2490 dma->id = uc->id;
2491 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302492 dma->id, ucc->needs_epib,
2493 ucc->psd_size, ucc->metadata_size,
2494 ucc->remote_thread_id);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302495
2496 return 0;
2497}
2498
2499int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
2500{
2501 struct udma_dev *ud = dev_get_priv(dma->dev);
2502 struct cppi5_host_desc_t *desc_rx;
2503 dma_addr_t dma_dst;
2504 struct udma_chan *uc;
2505 u32 desc_num;
2506
2507 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2508 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2509 return -EINVAL;
2510 }
2511 uc = &ud->channels[dma->id];
2512
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302513 if (uc->config.dir != DMA_DEV_TO_MEM)
Vignesh R3a9dbf32019-02-05 17:31:24 +05302514 return -EINVAL;
2515
2516 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
2517 return -EINVAL;
2518
2519 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302520 desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302521 dma_dst = (dma_addr_t)dst;
2522
2523 cppi5_hdesc_reset_hbdesc(desc_rx);
2524
2525 cppi5_hdesc_init(desc_rx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302526 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2527 uc->config.psd_size);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302528 cppi5_hdesc_set_pktlen(desc_rx, size);
2529 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
2530
Matthias Schiffer65aef702024-04-26 10:02:28 +02002531 invalidate_dcache_range((unsigned long)dma_dst,
2532 (unsigned long)(dma_dst + size));
2533
Vignesh Raghavendrace431412019-12-09 10:25:39 +05302534 flush_dcache_range((unsigned long)desc_rx,
Vignesh Raghavendra07826212020-07-06 13:26:25 +05302535 ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
Vignesh Raghavendra05b711f2019-12-09 10:25:35 +05302536 ARCH_DMA_MINALIGN));
Vignesh R3a9dbf32019-02-05 17:31:24 +05302537
Vignesh Raghavendra2db3b282020-07-06 13:26:26 +05302538 udma_push_to_ring(uc->rflow->fd_ring, desc_rx);
Vignesh R3a9dbf32019-02-05 17:31:24 +05302539
2540 uc->num_rx_bufs++;
2541 uc->desc_rx_cur++;
2542
2543 return 0;
2544}
2545
Vignesh Raghavendra39349892019-12-04 22:17:21 +05302546static int udma_get_cfg(struct dma *dma, u32 id, void **data)
2547{
2548 struct udma_dev *ud = dev_get_priv(dma->dev);
2549 struct udma_chan *uc;
2550
2551 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2552 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2553 return -EINVAL;
2554 }
2555
2556 switch (id) {
2557 case TI_UDMA_CHAN_PRIV_INFO:
2558 uc = &ud->channels[dma->id];
2559 *data = &uc->cfg_data;
2560 return 0;
2561 }
2562
2563 return -EINVAL;
2564}
2565
Santhosh Kumar K976edc62024-10-09 20:27:02 +05302566static int udma_probe(struct udevice *dev)
2567{
2568 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
2569 struct udma_dev *ud = dev_get_priv(dev);
2570 int i, ret;
2571 struct udevice *tmp;
2572 struct udevice *tisci_dev = NULL;
2573 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Santhosh Kumar K16e3d042024-10-09 20:27:03 +05302574 struct udma_chan *uc;
Santhosh Kumar K976edc62024-10-09 20:27:02 +05302575 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
2576
2577 ud->match_data = (void *)dev_get_driver_data(dev);
2578 ret = udma_get_mmrs(dev);
2579 if (ret)
2580 return ret;
2581
2582 ud->psil_base = ud->match_data->psil_base;
2583
2584 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
2585 "ti,sci", &tisci_dev);
2586 if (ret) {
2587 debug("Failed to get TISCI phandle (%d)\n", ret);
2588 tisci_rm->tisci = NULL;
2589 return -EINVAL;
2590 }
2591 tisci_rm->tisci = (struct ti_sci_handle *)
2592 (ti_sci_get_handle_from_sysfw(tisci_dev));
2593
2594 tisci_rm->tisci_dev_id = -1;
2595 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
2596 if (ret) {
2597 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
2598 return ret;
2599 }
2600
2601 tisci_rm->tisci_navss_dev_id = -1;
2602 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
2603 &tisci_rm->tisci_navss_dev_id);
2604 if (ret) {
2605 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
2606 return ret;
2607 }
2608
2609 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
2610 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
2611
2612 if (ud->match_data->type == DMA_TYPE_UDMA) {
2613 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
2614 "ti,ringacc", &tmp);
2615 ud->ringacc = dev_get_priv(tmp);
2616 } else {
2617 struct k3_ringacc_init_data ring_init_data;
2618
2619 ring_init_data.tisci = ud->tisci_rm.tisci;
2620 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
2621 if (ud->match_data->type == DMA_TYPE_BCDMA) {
2622 ring_init_data.num_rings = ud->bchan_cnt +
2623 ud->tchan_cnt +
2624 ud->rchan_cnt;
2625 } else {
2626 ring_init_data.num_rings = ud->rflow_cnt +
2627 ud->tflow_cnt;
2628 }
2629
2630 ud->ringacc = k3_ringacc_dmarings_init(dev, &ring_init_data);
2631 }
2632 if (IS_ERR(ud->ringacc))
2633 return PTR_ERR(ud->ringacc);
2634
2635 ud->dev = dev;
2636 ret = setup_resources(ud);
2637 if (ret < 0)
2638 return ret;
2639
2640 ud->ch_count = ret;
2641
2642 for (i = 0; i < ud->bchan_cnt; i++) {
2643 struct udma_bchan *bchan = &ud->bchans[i];
2644
2645 bchan->id = i;
2646 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
2647 }
2648
2649 for (i = 0; i < ud->tchan_cnt; i++) {
2650 struct udma_tchan *tchan = &ud->tchans[i];
2651
2652 tchan->id = i;
2653 tchan->reg_chan = ud->mmrs[MMR_TCHAN] + UDMA_CH_100(i);
2654 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
2655 }
2656
2657 for (i = 0; i < ud->rchan_cnt; i++) {
2658 struct udma_rchan *rchan = &ud->rchans[i];
2659
2660 rchan->id = i;
2661 rchan->reg_chan = ud->mmrs[MMR_RCHAN] + UDMA_CH_100(i);
2662 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
2663 }
2664
2665 for (i = 0; i < ud->rflow_cnt; i++) {
2666 struct udma_rflow *rflow = &ud->rflows[i];
2667
2668 rflow->id = i;
2669 rflow->reg_rflow = ud->mmrs[MMR_RFLOW] + UDMA_CH_40(i);
2670 }
2671
2672 for (i = 0; i < ud->ch_count; i++) {
2673 struct udma_chan *uc = &ud->channels[i];
2674
2675 uc->ud = ud;
2676 uc->id = i;
2677 uc->config.remote_thread_id = -1;
2678 uc->bchan = NULL;
2679 uc->tchan = NULL;
2680 uc->rchan = NULL;
2681 uc->config.mapped_channel_id = -1;
2682 uc->config.default_flow_id = -1;
2683 uc->config.dir = DMA_MEM_TO_MEM;
2684 sprintf(uc->name, "UDMA chan%d\n", i);
2685 if (!i)
2686 uc->in_use = true;
2687 }
2688
2689 pr_debug("%s(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
2690 dev->name,
2691 udma_read(ud->mmrs[MMR_GCFG], 0),
2692 udma_read(ud->mmrs[MMR_GCFG], 0x20),
2693 udma_read(ud->mmrs[MMR_GCFG], 0x24),
2694 udma_read(ud->mmrs[MMR_GCFG], 0x28),
2695 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
2696
2697 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
2698
Santhosh Kumar K16e3d042024-10-09 20:27:03 +05302699 uc = &ud->channels[0];
2700 ret = 0;
2701 switch (ud->match_data->type) {
2702 case DMA_TYPE_UDMA:
2703 ret = udma_alloc_chan_resources(uc);
2704 break;
2705 case DMA_TYPE_BCDMA:
2706 ret = bcdma_alloc_chan_resources(uc);
2707 break;
2708 default:
2709 break; /* Do nothing in any other case */
2710 };
2711
2712 if (ret)
2713 dev_err(dev, " Channel 0 allocation failure %d\n", ret);
2714
2715 return ret;
2716}
2717
2718static int udma_remove(struct udevice *dev)
2719{
2720 struct udma_dev *ud = dev_get_priv(dev);
2721 struct udma_chan *uc = &ud->channels[0];
2722
2723 switch (ud->match_data->type) {
2724 case DMA_TYPE_UDMA:
2725 udma_free_chan_resources(uc);
2726 break;
2727 case DMA_TYPE_BCDMA:
2728 bcdma_free_bchan_resources(uc);
2729 break;
2730 default:
2731 break;
2732 };
2733
Santhosh Kumar K976edc62024-10-09 20:27:02 +05302734 return 0;
2735}
2736
Vignesh R3a9dbf32019-02-05 17:31:24 +05302737static const struct dma_ops udma_ops = {
2738 .transfer = udma_transfer,
2739 .of_xlate = udma_of_xlate,
2740 .request = udma_request,
Simon Glass75c0ad62020-02-03 07:35:55 -07002741 .rfree = udma_rfree,
Vignesh R3a9dbf32019-02-05 17:31:24 +05302742 .enable = udma_enable,
2743 .disable = udma_disable,
2744 .send = udma_send,
2745 .receive = udma_receive,
2746 .prepare_rcv_buf = udma_prepare_rcv_buf,
Vignesh Raghavendra39349892019-12-04 22:17:21 +05302747 .get_cfg = udma_get_cfg,
Vignesh R3a9dbf32019-02-05 17:31:24 +05302748};
2749
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302750static struct udma_match_data am654_main_data = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302751 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302752 .psil_base = 0x1000,
2753 .enable_memcpy_support = true,
2754 .statictr_z_mask = GENMASK(11, 0),
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302755 .oes = {
2756 .udma_rchan = 0x200,
2757 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302758 .tpl_levels = 2,
2759 .level_start_idx = {
2760 [0] = 8, /* Normal channels */
2761 [1] = 0, /* High Throughput channels */
2762 },
2763};
2764
2765static struct udma_match_data am654_mcu_data = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302766 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302767 .psil_base = 0x6000,
2768 .enable_memcpy_support = true,
2769 .statictr_z_mask = GENMASK(11, 0),
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302770 .oes = {
2771 .udma_rchan = 0x200,
2772 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302773 .tpl_levels = 2,
2774 .level_start_idx = {
2775 [0] = 2, /* Normal channels */
2776 [1] = 0, /* High Throughput channels */
2777 },
2778};
2779
2780static struct udma_match_data j721e_main_data = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302781 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302782 .psil_base = 0x1000,
2783 .enable_memcpy_support = true,
2784 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2785 .statictr_z_mask = GENMASK(23, 0),
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302786 .oes = {
2787 .udma_rchan = 0x400,
2788 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302789 .tpl_levels = 3,
2790 .level_start_idx = {
2791 [0] = 16, /* Normal channels */
2792 [1] = 4, /* High Throughput channels */
2793 [2] = 0, /* Ultra High Throughput channels */
2794 },
2795};
2796
2797static struct udma_match_data j721e_mcu_data = {
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302798 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302799 .psil_base = 0x6000,
2800 .enable_memcpy_support = true,
2801 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2802 .statictr_z_mask = GENMASK(23, 0),
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302803 .oes = {
2804 .udma_rchan = 0x400,
2805 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302806 .tpl_levels = 2,
2807 .level_start_idx = {
2808 [0] = 2, /* Normal channels */
2809 [1] = 0, /* High Throughput channels */
2810 },
2811};
2812
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302813static struct udma_match_data am64_bcdma_data = {
2814 .type = DMA_TYPE_BCDMA,
2815 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
2816 .enable_memcpy_support = true, /* Supported via bchan */
2817 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2818 .statictr_z_mask = GENMASK(23, 0),
2819 .oes = {
2820 .bcdma_bchan_data = 0x2200,
2821 .bcdma_bchan_ring = 0x2400,
2822 .bcdma_tchan_data = 0x2800,
2823 .bcdma_tchan_ring = 0x2a00,
2824 .bcdma_rchan_data = 0x2e00,
2825 .bcdma_rchan_ring = 0x3000,
2826 },
2827 /* No throughput levels */
2828};
2829
2830static struct udma_match_data am64_pktdma_data = {
2831 .type = DMA_TYPE_PKTDMA,
2832 .psil_base = 0x1000,
2833 .enable_memcpy_support = false,
2834 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2835 .statictr_z_mask = GENMASK(23, 0),
2836 .oes = {
2837 .pktdma_tchan_flow = 0x1200,
2838 .pktdma_rchan_flow = 0x1600,
2839 },
2840 /* No throughput levels */
2841};
2842
Vignesh R3a9dbf32019-02-05 17:31:24 +05302843static const struct udevice_id udma_ids[] = {
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302844 {
2845 .compatible = "ti,am654-navss-main-udmap",
2846 .data = (ulong)&am654_main_data,
2847 },
2848 {
2849 .compatible = "ti,am654-navss-mcu-udmap",
2850 .data = (ulong)&am654_mcu_data,
2851 }, {
2852 .compatible = "ti,j721e-navss-main-udmap",
2853 .data = (ulong)&j721e_main_data,
2854 }, {
2855 .compatible = "ti,j721e-navss-mcu-udmap",
2856 .data = (ulong)&j721e_mcu_data,
2857 },
Vignesh Raghavendra5a7589c2021-05-10 20:06:08 +05302858 {
2859 .compatible = "ti,am64-dmss-bcdma",
2860 .data = (ulong)&am64_bcdma_data,
2861 },
2862 {
2863 .compatible = "ti,am64-dmss-pktdma",
2864 .data = (ulong)&am64_pktdma_data,
2865 },
Vignesh Raghavendra222f5d82020-07-07 13:43:34 +05302866 { /* Sentinel */ },
Vignesh R3a9dbf32019-02-05 17:31:24 +05302867};
2868
2869U_BOOT_DRIVER(ti_edma3) = {
2870 .name = "ti-udma",
2871 .id = UCLASS_DMA,
2872 .of_match = udma_ids,
2873 .ops = &udma_ops,
2874 .probe = udma_probe,
Santhosh Kumar K16e3d042024-10-09 20:27:03 +05302875 .remove = udma_remove,
Simon Glass8a2b47f2020-12-03 16:55:17 -07002876 .priv_auto = sizeof(struct udma_dev),
Santhosh Kumar K16e3d042024-10-09 20:27:03 +05302877 .flags = DM_FLAG_OS_PREPARE,
Vignesh R3a9dbf32019-02-05 17:31:24 +05302878};