blob: 81dbb4da1074ef74a594d89007726f1e591fb50e [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Mugunthan V N8c3c9182016-02-15 15:31:37 +05302/*
3 * Direct Memory Access U-Class driver
4 *
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +01005 * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com>
7 * Written by Mugunthan V N <mugunthanvnm@ti.com>
Mugunthan V N8c3c9182016-02-15 15:31:37 +05308 *
9 * Author: Mugunthan V N <mugunthanvnm@ti.com>
Mugunthan V N8c3c9182016-02-15 15:31:37 +053010 */
11
Patrick Delaunay81313352021-04-27 11:02:19 +020012#define LOG_CATEGORY UCLASS_DMA
13
Mugunthan V N8c3c9182016-02-15 15:31:37 +053014#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070015#include <cpu_func.h>
Mugunthan V N8c3c9182016-02-15 15:31:37 +053016#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060017#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070018#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060019#include <asm/cache.h>
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010020#include <dm/read.h>
Álvaro Fernández Rojas308cd6f2018-11-28 19:17:49 +010021#include <dma-uclass.h>
Andrew Davisafea0a22022-10-07 12:11:11 -050022#include <linux/dma-mapping.h>
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010023#include <dt-structs.h>
Mugunthan V N8c3c9182016-02-15 15:31:37 +053024#include <errno.h>
25
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010026#ifdef CONFIG_DMA_CHANNELS
27static inline struct dma_ops *dma_dev_ops(struct udevice *dev)
28{
29 return (struct dma_ops *)dev->driver->ops;
30}
31
32# if CONFIG_IS_ENABLED(OF_CONTROL)
33static int dma_of_xlate_default(struct dma *dma,
34 struct ofnode_phandle_args *args)
35{
36 debug("%s(dma=%p)\n", __func__, dma);
37
38 if (args->args_count > 1) {
Sean Andersona1b654b2021-12-01 14:26:53 -050039 pr_err("Invalid args_count: %d\n", args->args_count);
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010040 return -EINVAL;
41 }
42
43 if (args->args_count)
44 dma->id = args->args[0];
45 else
46 dma->id = 0;
47
48 return 0;
49}
50
51int dma_get_by_index(struct udevice *dev, int index, struct dma *dma)
52{
53 int ret;
54 struct ofnode_phandle_args args;
55 struct udevice *dev_dma;
56 const struct dma_ops *ops;
57
58 debug("%s(dev=%p, index=%d, dma=%p)\n", __func__, dev, index, dma);
59
60 assert(dma);
61 dma->dev = NULL;
62
63 ret = dev_read_phandle_with_args(dev, "dmas", "#dma-cells", 0, index,
64 &args);
65 if (ret) {
66 pr_err("%s: dev_read_phandle_with_args failed: err=%d\n",
67 __func__, ret);
68 return ret;
69 }
70
71 ret = uclass_get_device_by_ofnode(UCLASS_DMA, args.node, &dev_dma);
72 if (ret) {
73 pr_err("%s: uclass_get_device_by_ofnode failed: err=%d\n",
74 __func__, ret);
75 return ret;
76 }
77
78 dma->dev = dev_dma;
79
80 ops = dma_dev_ops(dev_dma);
81
82 if (ops->of_xlate)
83 ret = ops->of_xlate(dma, &args);
84 else
85 ret = dma_of_xlate_default(dma, &args);
86 if (ret) {
87 pr_err("of_xlate() failed: %d\n", ret);
88 return ret;
89 }
90
91 return dma_request(dev_dma, dma);
92}
93
94int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma)
95{
96 int index;
97
98 debug("%s(dev=%p, name=%s, dma=%p)\n", __func__, dev, name, dma);
99 dma->dev = NULL;
100
101 index = dev_read_stringlist_search(dev, "dma-names", name);
102 if (index < 0) {
103 pr_err("dev_read_stringlist_search() failed: %d\n", index);
104 return index;
105 }
106
107 return dma_get_by_index(dev, index, dma);
108}
109# endif /* OF_CONTROL */
110
111int dma_request(struct udevice *dev, struct dma *dma)
112{
113 struct dma_ops *ops = dma_dev_ops(dev);
114
115 debug("%s(dev=%p, dma=%p)\n", __func__, dev, dma);
116
117 dma->dev = dev;
118
119 if (!ops->request)
120 return 0;
121
122 return ops->request(dma);
123}
124
125int dma_free(struct dma *dma)
126{
127 struct dma_ops *ops = dma_dev_ops(dma->dev);
128
129 debug("%s(dma=%p)\n", __func__, dma);
130
Simon Glass75c0ad62020-02-03 07:35:55 -0700131 if (!ops->rfree)
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100132 return 0;
133
Simon Glass75c0ad62020-02-03 07:35:55 -0700134 return ops->rfree(dma);
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100135}
136
137int dma_enable(struct dma *dma)
138{
139 struct dma_ops *ops = dma_dev_ops(dma->dev);
140
141 debug("%s(dma=%p)\n", __func__, dma);
142
143 if (!ops->enable)
144 return -ENOSYS;
145
146 return ops->enable(dma);
147}
148
149int dma_disable(struct dma *dma)
150{
151 struct dma_ops *ops = dma_dev_ops(dma->dev);
152
153 debug("%s(dma=%p)\n", __func__, dma);
154
155 if (!ops->disable)
156 return -ENOSYS;
157
158 return ops->disable(dma);
159}
160
161int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
162{
163 struct dma_ops *ops = dma_dev_ops(dma->dev);
164
165 debug("%s(dma=%p)\n", __func__, dma);
166
167 if (!ops->prepare_rcv_buf)
168 return -1;
169
170 return ops->prepare_rcv_buf(dma, dst, size);
171}
172
173int dma_receive(struct dma *dma, void **dst, void *metadata)
174{
175 struct dma_ops *ops = dma_dev_ops(dma->dev);
176
177 debug("%s(dma=%p)\n", __func__, dma);
178
179 if (!ops->receive)
180 return -ENOSYS;
181
182 return ops->receive(dma, dst, metadata);
183}
184
185int dma_send(struct dma *dma, void *src, size_t len, void *metadata)
186{
187 struct dma_ops *ops = dma_dev_ops(dma->dev);
188
189 debug("%s(dma=%p)\n", __func__, dma);
190
191 if (!ops->send)
192 return -ENOSYS;
193
194 return ops->send(dma, src, len, metadata);
195}
Vignesh Raghavendrab18fb7e2019-12-04 22:17:20 +0530196
197int dma_get_cfg(struct dma *dma, u32 cfg_id, void **cfg_data)
198{
199 struct dma_ops *ops = dma_dev_ops(dma->dev);
200
201 debug("%s(dma=%p)\n", __func__, dma);
202
203 if (!ops->get_cfg)
204 return -ENOSYS;
205
206 return ops->get_cfg(dma, cfg_id, cfg_data);
207}
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100208#endif /* CONFIG_DMA_CHANNELS */
209
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530210int dma_get_device(u32 transfer_type, struct udevice **devp)
211{
212 struct udevice *dev;
213 int ret;
214
215 for (ret = uclass_first_device(UCLASS_DMA, &dev); dev && !ret;
216 ret = uclass_next_device(&dev)) {
217 struct dma_dev_priv *uc_priv;
218
219 uc_priv = dev_get_uclass_priv(dev);
220 if (uc_priv->supported & transfer_type)
221 break;
222 }
223
224 if (!dev) {
Vignesh Raghavendra518d9242020-09-17 16:53:07 +0530225 pr_debug("No DMA device found that supports %x type\n",
226 transfer_type);
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530227 return -EPROTONOSUPPORT;
228 }
229
230 *devp = dev;
231
232 return ret;
233}
234
235int dma_memcpy(void *dst, void *src, size_t len)
236{
237 struct udevice *dev;
238 const struct dma_ops *ops;
Andrew Davisafea0a22022-10-07 12:11:11 -0500239 dma_addr_t destination;
240 dma_addr_t source;
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530241 int ret;
242
243 ret = dma_get_device(DMA_SUPPORTS_MEM_TO_MEM, &dev);
244 if (ret < 0)
245 return ret;
246
247 ops = device_get_ops(dev);
248 if (!ops->transfer)
249 return -ENOSYS;
250
Andrew Davisafea0a22022-10-07 12:11:11 -0500251 /* Clean the areas, so no writeback into the RAM races with DMA */
252 destination = dma_map_single(dst, len, DMA_FROM_DEVICE);
253 source = dma_map_single(src, len, DMA_TO_DEVICE);
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530254
Andrew Davisd2da2842022-10-07 12:11:13 -0500255 ret = ops->transfer(dev, DMA_MEM_TO_MEM, destination, source, len);
Andrew Davisafea0a22022-10-07 12:11:11 -0500256
257 /* Clean+Invalidate the areas after, so we can see DMA'd data */
258 dma_unmap_single(destination, len, DMA_FROM_DEVICE);
259 dma_unmap_single(source, len, DMA_TO_DEVICE);
260
261 return ret;
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530262}
263
264UCLASS_DRIVER(dma) = {
265 .id = UCLASS_DMA,
266 .name = "dma",
267 .flags = DM_UC_FLAG_SEQ_ALIAS,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700268 .per_device_auto = sizeof(struct dma_dev_priv),
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530269};