blob: 2c76ba3fe328d577b1fabee2ac41383a0e98a213 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Mugunthan V N8c3c9182016-02-15 15:31:37 +05302/*
3 * Direct Memory Access U-Class driver
4 *
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +01005 * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com>
7 * Written by Mugunthan V N <mugunthanvnm@ti.com>
Mugunthan V N8c3c9182016-02-15 15:31:37 +05308 *
9 * Author: Mugunthan V N <mugunthanvnm@ti.com>
Mugunthan V N8c3c9182016-02-15 15:31:37 +053010 */
11
Patrick Delaunay81313352021-04-27 11:02:19 +020012#define LOG_CATEGORY UCLASS_DMA
13
Simon Glass63334482019-11-14 12:57:39 -070014#include <cpu_func.h>
Mugunthan V N8c3c9182016-02-15 15:31:37 +053015#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060016#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070017#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060018#include <asm/cache.h>
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010019#include <dm/read.h>
Álvaro Fernández Rojas308cd6f2018-11-28 19:17:49 +010020#include <dma-uclass.h>
Andrew Davisafea0a22022-10-07 12:11:11 -050021#include <linux/dma-mapping.h>
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010022#include <dt-structs.h>
Mugunthan V N8c3c9182016-02-15 15:31:37 +053023#include <errno.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060024#include <linux/printk.h>
Mugunthan V N8c3c9182016-02-15 15:31:37 +053025
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010026#ifdef CONFIG_DMA_CHANNELS
27static inline struct dma_ops *dma_dev_ops(struct udevice *dev)
28{
29 return (struct dma_ops *)dev->driver->ops;
30}
31
32# if CONFIG_IS_ENABLED(OF_CONTROL)
33static int dma_of_xlate_default(struct dma *dma,
34 struct ofnode_phandle_args *args)
35{
36 debug("%s(dma=%p)\n", __func__, dma);
37
38 if (args->args_count > 1) {
Sean Andersona1b654b2021-12-01 14:26:53 -050039 pr_err("Invalid args_count: %d\n", args->args_count);
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010040 return -EINVAL;
41 }
42
43 if (args->args_count)
44 dma->id = args->args[0];
45 else
46 dma->id = 0;
47
48 return 0;
49}
50
51int dma_get_by_index(struct udevice *dev, int index, struct dma *dma)
52{
53 int ret;
54 struct ofnode_phandle_args args;
55 struct udevice *dev_dma;
56 const struct dma_ops *ops;
57
58 debug("%s(dev=%p, index=%d, dma=%p)\n", __func__, dev, index, dma);
59
60 assert(dma);
61 dma->dev = NULL;
62
63 ret = dev_read_phandle_with_args(dev, "dmas", "#dma-cells", 0, index,
64 &args);
65 if (ret) {
66 pr_err("%s: dev_read_phandle_with_args failed: err=%d\n",
67 __func__, ret);
68 return ret;
69 }
70
71 ret = uclass_get_device_by_ofnode(UCLASS_DMA, args.node, &dev_dma);
72 if (ret) {
73 pr_err("%s: uclass_get_device_by_ofnode failed: err=%d\n",
74 __func__, ret);
75 return ret;
76 }
77
78 dma->dev = dev_dma;
79
80 ops = dma_dev_ops(dev_dma);
81
82 if (ops->of_xlate)
83 ret = ops->of_xlate(dma, &args);
84 else
85 ret = dma_of_xlate_default(dma, &args);
86 if (ret) {
87 pr_err("of_xlate() failed: %d\n", ret);
88 return ret;
89 }
90
91 return dma_request(dev_dma, dma);
92}
93
94int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma)
95{
96 int index;
97
98 debug("%s(dev=%p, name=%s, dma=%p)\n", __func__, dev, name, dma);
99 dma->dev = NULL;
100
101 index = dev_read_stringlist_search(dev, "dma-names", name);
102 if (index < 0) {
103 pr_err("dev_read_stringlist_search() failed: %d\n", index);
104 return index;
105 }
106
107 return dma_get_by_index(dev, index, dma);
108}
109# endif /* OF_CONTROL */
110
111int dma_request(struct udevice *dev, struct dma *dma)
112{
113 struct dma_ops *ops = dma_dev_ops(dev);
114
115 debug("%s(dev=%p, dma=%p)\n", __func__, dev, dma);
116
117 dma->dev = dev;
118
119 if (!ops->request)
120 return 0;
121
122 return ops->request(dma);
123}
124
125int dma_free(struct dma *dma)
126{
127 struct dma_ops *ops = dma_dev_ops(dma->dev);
128
129 debug("%s(dma=%p)\n", __func__, dma);
130
Simon Glass75c0ad62020-02-03 07:35:55 -0700131 if (!ops->rfree)
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100132 return 0;
133
Simon Glass75c0ad62020-02-03 07:35:55 -0700134 return ops->rfree(dma);
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100135}
136
137int dma_enable(struct dma *dma)
138{
139 struct dma_ops *ops = dma_dev_ops(dma->dev);
140
141 debug("%s(dma=%p)\n", __func__, dma);
142
143 if (!ops->enable)
144 return -ENOSYS;
145
146 return ops->enable(dma);
147}
148
149int dma_disable(struct dma *dma)
150{
151 struct dma_ops *ops = dma_dev_ops(dma->dev);
152
153 debug("%s(dma=%p)\n", __func__, dma);
154
155 if (!ops->disable)
156 return -ENOSYS;
157
158 return ops->disable(dma);
159}
160
161int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
162{
163 struct dma_ops *ops = dma_dev_ops(dma->dev);
164
165 debug("%s(dma=%p)\n", __func__, dma);
166
167 if (!ops->prepare_rcv_buf)
168 return -1;
169
170 return ops->prepare_rcv_buf(dma, dst, size);
171}
172
173int dma_receive(struct dma *dma, void **dst, void *metadata)
174{
175 struct dma_ops *ops = dma_dev_ops(dma->dev);
176
177 debug("%s(dma=%p)\n", __func__, dma);
178
179 if (!ops->receive)
180 return -ENOSYS;
181
182 return ops->receive(dma, dst, metadata);
183}
184
185int dma_send(struct dma *dma, void *src, size_t len, void *metadata)
186{
187 struct dma_ops *ops = dma_dev_ops(dma->dev);
188
189 debug("%s(dma=%p)\n", __func__, dma);
190
191 if (!ops->send)
192 return -ENOSYS;
193
194 return ops->send(dma, src, len, metadata);
195}
Vignesh Raghavendrab18fb7e2019-12-04 22:17:20 +0530196
197int dma_get_cfg(struct dma *dma, u32 cfg_id, void **cfg_data)
198{
199 struct dma_ops *ops = dma_dev_ops(dma->dev);
200
201 debug("%s(dma=%p)\n", __func__, dma);
202
203 if (!ops->get_cfg)
204 return -ENOSYS;
205
206 return ops->get_cfg(dma, cfg_id, cfg_data);
207}
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100208#endif /* CONFIG_DMA_CHANNELS */
209
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530210int dma_get_device(u32 transfer_type, struct udevice **devp)
211{
212 struct udevice *dev;
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530213
Michal Suchanek91c96fe2022-10-12 21:58:08 +0200214 for (uclass_first_device(UCLASS_DMA, &dev); dev;
215 uclass_next_device(&dev)) {
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530216 struct dma_dev_priv *uc_priv;
217
218 uc_priv = dev_get_uclass_priv(dev);
219 if (uc_priv->supported & transfer_type)
220 break;
221 }
222
223 if (!dev) {
Vignesh Raghavendra518d9242020-09-17 16:53:07 +0530224 pr_debug("No DMA device found that supports %x type\n",
225 transfer_type);
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530226 return -EPROTONOSUPPORT;
227 }
228
229 *devp = dev;
230
Michal Suchanek91c96fe2022-10-12 21:58:08 +0200231 return 0;
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530232}
233
234int dma_memcpy(void *dst, void *src, size_t len)
235{
236 struct udevice *dev;
237 const struct dma_ops *ops;
Andrew Davisafea0a22022-10-07 12:11:11 -0500238 dma_addr_t destination;
239 dma_addr_t source;
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530240 int ret;
241
242 ret = dma_get_device(DMA_SUPPORTS_MEM_TO_MEM, &dev);
243 if (ret < 0)
244 return ret;
245
246 ops = device_get_ops(dev);
247 if (!ops->transfer)
248 return -ENOSYS;
249
Andrew Davisafea0a22022-10-07 12:11:11 -0500250 /* Clean the areas, so no writeback into the RAM races with DMA */
251 destination = dma_map_single(dst, len, DMA_FROM_DEVICE);
252 source = dma_map_single(src, len, DMA_TO_DEVICE);
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530253
Andrew Davisd2da2842022-10-07 12:11:13 -0500254 ret = ops->transfer(dev, DMA_MEM_TO_MEM, destination, source, len);
Andrew Davisafea0a22022-10-07 12:11:11 -0500255
256 /* Clean+Invalidate the areas after, so we can see DMA'd data */
257 dma_unmap_single(destination, len, DMA_FROM_DEVICE);
258 dma_unmap_single(source, len, DMA_TO_DEVICE);
259
260 return ret;
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530261}
262
263UCLASS_DRIVER(dma) = {
264 .id = UCLASS_DMA,
265 .name = "dma",
266 .flags = DM_UC_FLAG_SEQ_ALIAS,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700267 .per_device_auto = sizeof(struct dma_dev_priv),
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530268};