blob: 9d5a7fc796c488e708cfba2f05ed74b18edf7880 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Mugunthan V N8c3c9182016-02-15 15:31:37 +05302/*
3 * Direct Memory Access U-Class driver
4 *
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +01005 * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com>
7 * Written by Mugunthan V N <mugunthanvnm@ti.com>
Mugunthan V N8c3c9182016-02-15 15:31:37 +05308 *
9 * Author: Mugunthan V N <mugunthanvnm@ti.com>
Mugunthan V N8c3c9182016-02-15 15:31:37 +053010 */
11
12#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070013#include <cpu_func.h>
Mugunthan V N8c3c9182016-02-15 15:31:37 +053014#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070015#include <malloc.h>
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010016#include <dm/read.h>
Álvaro Fernández Rojas308cd6f2018-11-28 19:17:49 +010017#include <dma-uclass.h>
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010018#include <dt-structs.h>
Mugunthan V N8c3c9182016-02-15 15:31:37 +053019#include <errno.h>
20
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010021#ifdef CONFIG_DMA_CHANNELS
22static inline struct dma_ops *dma_dev_ops(struct udevice *dev)
23{
24 return (struct dma_ops *)dev->driver->ops;
25}
26
27# if CONFIG_IS_ENABLED(OF_CONTROL)
28static int dma_of_xlate_default(struct dma *dma,
29 struct ofnode_phandle_args *args)
30{
31 debug("%s(dma=%p)\n", __func__, dma);
32
33 if (args->args_count > 1) {
34 pr_err("Invaild args_count: %d\n", args->args_count);
35 return -EINVAL;
36 }
37
38 if (args->args_count)
39 dma->id = args->args[0];
40 else
41 dma->id = 0;
42
43 return 0;
44}
45
46int dma_get_by_index(struct udevice *dev, int index, struct dma *dma)
47{
48 int ret;
49 struct ofnode_phandle_args args;
50 struct udevice *dev_dma;
51 const struct dma_ops *ops;
52
53 debug("%s(dev=%p, index=%d, dma=%p)\n", __func__, dev, index, dma);
54
55 assert(dma);
56 dma->dev = NULL;
57
58 ret = dev_read_phandle_with_args(dev, "dmas", "#dma-cells", 0, index,
59 &args);
60 if (ret) {
61 pr_err("%s: dev_read_phandle_with_args failed: err=%d\n",
62 __func__, ret);
63 return ret;
64 }
65
66 ret = uclass_get_device_by_ofnode(UCLASS_DMA, args.node, &dev_dma);
67 if (ret) {
68 pr_err("%s: uclass_get_device_by_ofnode failed: err=%d\n",
69 __func__, ret);
70 return ret;
71 }
72
73 dma->dev = dev_dma;
74
75 ops = dma_dev_ops(dev_dma);
76
77 if (ops->of_xlate)
78 ret = ops->of_xlate(dma, &args);
79 else
80 ret = dma_of_xlate_default(dma, &args);
81 if (ret) {
82 pr_err("of_xlate() failed: %d\n", ret);
83 return ret;
84 }
85
86 return dma_request(dev_dma, dma);
87}
88
89int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma)
90{
91 int index;
92
93 debug("%s(dev=%p, name=%s, dma=%p)\n", __func__, dev, name, dma);
94 dma->dev = NULL;
95
96 index = dev_read_stringlist_search(dev, "dma-names", name);
97 if (index < 0) {
98 pr_err("dev_read_stringlist_search() failed: %d\n", index);
99 return index;
100 }
101
102 return dma_get_by_index(dev, index, dma);
103}
104# endif /* OF_CONTROL */
105
106int dma_request(struct udevice *dev, struct dma *dma)
107{
108 struct dma_ops *ops = dma_dev_ops(dev);
109
110 debug("%s(dev=%p, dma=%p)\n", __func__, dev, dma);
111
112 dma->dev = dev;
113
114 if (!ops->request)
115 return 0;
116
117 return ops->request(dma);
118}
119
120int dma_free(struct dma *dma)
121{
122 struct dma_ops *ops = dma_dev_ops(dma->dev);
123
124 debug("%s(dma=%p)\n", __func__, dma);
125
Simon Glass75c0ad62020-02-03 07:35:55 -0700126 if (!ops->rfree)
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100127 return 0;
128
Simon Glass75c0ad62020-02-03 07:35:55 -0700129 return ops->rfree(dma);
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100130}
131
132int dma_enable(struct dma *dma)
133{
134 struct dma_ops *ops = dma_dev_ops(dma->dev);
135
136 debug("%s(dma=%p)\n", __func__, dma);
137
138 if (!ops->enable)
139 return -ENOSYS;
140
141 return ops->enable(dma);
142}
143
144int dma_disable(struct dma *dma)
145{
146 struct dma_ops *ops = dma_dev_ops(dma->dev);
147
148 debug("%s(dma=%p)\n", __func__, dma);
149
150 if (!ops->disable)
151 return -ENOSYS;
152
153 return ops->disable(dma);
154}
155
156int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
157{
158 struct dma_ops *ops = dma_dev_ops(dma->dev);
159
160 debug("%s(dma=%p)\n", __func__, dma);
161
162 if (!ops->prepare_rcv_buf)
163 return -1;
164
165 return ops->prepare_rcv_buf(dma, dst, size);
166}
167
168int dma_receive(struct dma *dma, void **dst, void *metadata)
169{
170 struct dma_ops *ops = dma_dev_ops(dma->dev);
171
172 debug("%s(dma=%p)\n", __func__, dma);
173
174 if (!ops->receive)
175 return -ENOSYS;
176
177 return ops->receive(dma, dst, metadata);
178}
179
180int dma_send(struct dma *dma, void *src, size_t len, void *metadata)
181{
182 struct dma_ops *ops = dma_dev_ops(dma->dev);
183
184 debug("%s(dma=%p)\n", __func__, dma);
185
186 if (!ops->send)
187 return -ENOSYS;
188
189 return ops->send(dma, src, len, metadata);
190}
Vignesh Raghavendrab18fb7e2019-12-04 22:17:20 +0530191
192int dma_get_cfg(struct dma *dma, u32 cfg_id, void **cfg_data)
193{
194 struct dma_ops *ops = dma_dev_ops(dma->dev);
195
196 debug("%s(dma=%p)\n", __func__, dma);
197
198 if (!ops->get_cfg)
199 return -ENOSYS;
200
201 return ops->get_cfg(dma, cfg_id, cfg_data);
202}
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100203#endif /* CONFIG_DMA_CHANNELS */
204
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530205int dma_get_device(u32 transfer_type, struct udevice **devp)
206{
207 struct udevice *dev;
208 int ret;
209
210 for (ret = uclass_first_device(UCLASS_DMA, &dev); dev && !ret;
211 ret = uclass_next_device(&dev)) {
212 struct dma_dev_priv *uc_priv;
213
214 uc_priv = dev_get_uclass_priv(dev);
215 if (uc_priv->supported & transfer_type)
216 break;
217 }
218
219 if (!dev) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900220 pr_err("No DMA device found that supports %x type\n",
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530221 transfer_type);
222 return -EPROTONOSUPPORT;
223 }
224
225 *devp = dev;
226
227 return ret;
228}
229
230int dma_memcpy(void *dst, void *src, size_t len)
231{
232 struct udevice *dev;
233 const struct dma_ops *ops;
234 int ret;
235
236 ret = dma_get_device(DMA_SUPPORTS_MEM_TO_MEM, &dev);
237 if (ret < 0)
238 return ret;
239
240 ops = device_get_ops(dev);
241 if (!ops->transfer)
242 return -ENOSYS;
243
244 /* Invalidate the area, so no writeback into the RAM races with DMA */
245 invalidate_dcache_range((unsigned long)dst, (unsigned long)dst +
246 roundup(len, ARCH_DMA_MINALIGN));
247
248 return ops->transfer(dev, DMA_MEM_TO_MEM, dst, src, len);
249}
250
251UCLASS_DRIVER(dma) = {
252 .id = UCLASS_DMA,
253 .name = "dma",
254 .flags = DM_UC_FLAG_SEQ_ALIAS,
255 .per_device_auto_alloc_size = sizeof(struct dma_dev_priv),
256};