blob: 9b392212277f90d67f7f321892c25f95b7b7c645 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
Mugunthan V N8c3c9182016-02-15 15:31:37 +05302/*
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +01003 * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com>
4 * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com>
5 * Written by Mugunthan V N <mugunthanvnm@ti.com>
6 *
Mugunthan V N8c3c9182016-02-15 15:31:37 +05307 */
8
9#ifndef _DMA_H_
10#define _DMA_H_
11
Simon Glass4dcacfc2020-05-10 11:40:13 -060012#include <linux/bitops.h>
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010013#include <linux/errno.h>
14#include <linux/types.h>
15
Simon Glass3ba929a2020-10-30 21:38:53 -060016struct udevice;
17
Mugunthan V N8c3c9182016-02-15 15:31:37 +053018/*
19 * enum dma_direction - dma transfer direction indicator
20 * @DMA_MEM_TO_MEM: Memcpy mode
21 * @DMA_MEM_TO_DEV: From Memory to Device
22 * @DMA_DEV_TO_MEM: From Device to Memory
23 * @DMA_DEV_TO_DEV: From Device to Device
24 */
25enum dma_direction {
26 DMA_MEM_TO_MEM,
27 DMA_MEM_TO_DEV,
28 DMA_DEV_TO_MEM,
29 DMA_DEV_TO_DEV,
30};
31
32#define DMA_SUPPORTS_MEM_TO_MEM BIT(0)
33#define DMA_SUPPORTS_MEM_TO_DEV BIT(1)
34#define DMA_SUPPORTS_DEV_TO_MEM BIT(2)
35#define DMA_SUPPORTS_DEV_TO_DEV BIT(3)
36
37/*
Mugunthan V N8c3c9182016-02-15 15:31:37 +053038 * struct dma_dev_priv - information about a device used by the uclass
39 *
40 * @supported: mode of transfers that DMA can support, should be
41 * one/multiple of DMA_SUPPORTS_*
42 */
43struct dma_dev_priv {
44 u32 supported;
45};
46
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +010047#ifdef CONFIG_DMA_CHANNELS
48/**
49 * A DMA is a feature of computer systems that allows certain hardware
50 * subsystems to access main system memory, independent of the CPU.
51 * DMA channels are typically generated externally to the HW module
52 * consuming them, by an entity this API calls a DMA provider. This API
53 * provides a standard means for drivers to enable and disable DMAs, and to
54 * copy, send and receive data using DMA.
55 *
56 * A driver that implements UCLASS_DMA is a DMA provider. A provider will
57 * often implement multiple separate DMAs, since the hardware it manages
58 * often has this capability. dma_uclass.h describes the interface which
59 * DMA providers must implement.
60 *
61 * DMA consumers/clients are the HW modules driven by the DMA channels. This
62 * header file describes the API used by drivers for those HW modules.
63 *
64 * DMA consumer DMA_MEM_TO_DEV (transmit) usage example (based on networking).
65 * Note. dma_send() is sync operation always - it'll start transfer and will
66 * poll for it to complete:
67 * - get/request dma channel
68 * struct dma dma_tx;
69 * ret = dma_get_by_name(common->dev, "tx0", &dma_tx);
70 * if (ret) ...
71 *
72 * - enable dma channel
73 * ret = dma_enable(&dma_tx);
74 * if (ret) ...
75 *
76 * - dma transmit DMA_MEM_TO_DEV.
77 * struct ti_drv_packet_data packet_data;
78 *
79 * packet_data.opt1 = val1;
80 * packet_data.opt2 = val2;
81 * ret = dma_send(&dma_tx, packet, length, &packet_data);
82 * if (ret) ..
83 *
84 * DMA consumer DMA_DEV_TO_MEM (receive) usage example (based on networking).
85 * Note. dma_receive() is sync operation always - it'll start transfer
86 * (if required) and will poll for it to complete (or for any previously
87 * configured dev2mem transfer to complete):
88 * - get/request dma channel
89 * struct dma dma_rx;
90 * ret = dma_get_by_name(common->dev, "rx0", &dma_rx);
91 * if (ret) ...
92 *
93 * - enable dma channel
94 * ret = dma_enable(&dma_rx);
95 * if (ret) ...
96 *
97 * - dma receive DMA_DEV_TO_MEM.
98 * struct ti_drv_packet_data packet_data;
99 *
100 * len = dma_receive(&dma_rx, (void **)packet, &packet_data);
101 * if (ret < 0) ...
102 *
103 * DMA consumer DMA_DEV_TO_MEM (receive) zero-copy usage example (based on
104 * networking). Networking subsystem allows to configure and use few receive
105 * buffers (dev2mem), as Networking RX DMA channels usually implemented
106 * as streaming interface
107 * - get/request dma channel
108 * struct dma dma_rx;
109 * ret = dma_get_by_name(common->dev, "rx0", &dma_rx);
110 * if (ret) ...
111 *
112 * for (i = 0; i < RX_DESC_NUM; i++) {
113 * ret = dma_prepare_rcv_buf(&dma_rx,
114 * net_rx_packets[i],
115 * RX_BUF_SIZE);
116 * if (ret) ...
117 * }
118 *
119 * - enable dma channel
120 * ret = dma_enable(&dma_rx);
121 * if (ret) ...
122 *
123 * - dma receive DMA_DEV_TO_MEM.
124 * struct ti_drv_packet_data packet_data;
125 *
126 * len = dma_receive(&dma_rx, (void **)packet, &packet_data);
127 * if (ret < 0) ..
128 *
129 * -- process packet --
130 *
131 * - return buffer back to DAM channel
132 * ret = dma_prepare_rcv_buf(&dma_rx,
133 * net_rx_packets[rx_next],
134 * RX_BUF_SIZE);
135 */
136
137struct udevice;
138
139/**
140 * struct dma - A handle to (allowing control of) a single DMA.
141 *
142 * Clients provide storage for DMA handles. The content of the structure is
143 * managed solely by the DMA API and DMA drivers. A DMA struct is
144 * initialized by "get"ing the DMA struct. The DMA struct is passed to all
145 * other DMA APIs to identify which DMA channel to operate upon.
146 *
147 * @dev: The device which implements the DMA channel.
148 * @id: The DMA channel ID within the provider.
149 *
150 * Currently, the DMA API assumes that a single integer ID is enough to
151 * identify and configure any DMA channel for any DMA provider. If this
152 * assumption becomes invalid in the future, the struct could be expanded to
153 * either (a) add more fields to allow DMA providers to store additional
154 * information, or (b) replace the id field with an opaque pointer, which the
155 * provider would dynamically allocated during its .of_xlate op, and process
156 * during is .request op. This may require the addition of an extra op to clean
157 * up the allocation.
158 */
159struct dma {
160 struct udevice *dev;
161 /*
162 * Written by of_xlate. We assume a single id is enough for now. In the
163 * future, we might add more fields here.
164 */
165 unsigned long id;
166};
167
168# if CONFIG_IS_ENABLED(OF_CONTROL) && CONFIG_IS_ENABLED(DMA)
169/**
170 * dma_get_by_index - Get/request a DMA by integer index.
171 *
172 * This looks up and requests a DMA. The index is relative to the client
173 * device; each device is assumed to have n DMAs associated with it somehow,
174 * and this function finds and requests one of them. The mapping of client
175 * device DMA indices to provider DMAs may be via device-tree properties,
176 * board-provided mapping tables, or some other mechanism.
177 *
178 * @dev: The client device.
179 * @index: The index of the DMA to request, within the client's list of
180 * DMA channels.
181 * @dma: A pointer to a DMA struct to initialize.
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100182 * Return: 0 if OK, or a negative error code.
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100183 */
184int dma_get_by_index(struct udevice *dev, int index, struct dma *dma);
185
186/**
187 * dma_get_by_name - Get/request a DMA by name.
188 *
189 * This looks up and requests a DMA. The name is relative to the client
190 * device; each device is assumed to have n DMAs associated with it somehow,
191 * and this function finds and requests one of them. The mapping of client
192 * device DMA names to provider DMAs may be via device-tree properties,
193 * board-provided mapping tables, or some other mechanism.
194 *
195 * @dev: The client device.
196 * @name: The name of the DMA to request, within the client's list of
197 * DMA channels.
198 * @dma: A pointer to a DMA struct to initialize.
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100199 * Return: 0 if OK, or a negative error code.
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100200 */
201int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma);
202# else
203static inline int dma_get_by_index(struct udevice *dev, int index,
204 struct dma *dma)
205{
206 return -ENOSYS;
207}
208
209static inline int dma_get_by_name(struct udevice *dev, const char *name,
210 struct dma *dma)
211{
212 return -ENOSYS;
213}
214# endif
215
216/**
217 * dma_request - Request a DMA by provider-specific ID.
218 *
219 * This requests a DMA using a provider-specific ID. Generally, this function
220 * should not be used, since dma_get_by_index/name() provide an interface that
221 * better separates clients from intimate knowledge of DMA providers.
222 * However, this function may be useful in core SoC-specific code.
223 *
224 * @dev: The DMA provider device.
225 * @dma: A pointer to a DMA struct to initialize. The caller must
226 * have already initialized any field in this struct which the
227 * DMA provider uses to identify the DMA channel.
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100228 * Return: 0 if OK, or a negative error code.
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100229 */
230int dma_request(struct udevice *dev, struct dma *dma);
231
232/**
233 * dma_free - Free a previously requested DMA.
234 *
235 * @dma: A DMA struct that was previously successfully requested by
236 * dma_request/get_by_*().
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100237 * Return: 0 if OK, or a negative error code.
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100238 */
239int dma_free(struct dma *dma);
240
241/**
242 * dma_enable() - Enable (turn on) a DMA channel.
243 *
244 * @dma: A DMA struct that was previously successfully requested by
245 * dma_request/get_by_*().
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100246 * Return: zero on success, or -ve error code.
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100247 */
248int dma_enable(struct dma *dma);
249
250/**
251 * dma_disable() - Disable (turn off) a DMA channel.
252 *
253 * @dma: A DMA struct that was previously successfully requested by
254 * dma_request/get_by_*().
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100255 * Return: zero on success, or -ve error code.
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100256 */
257int dma_disable(struct dma *dma);
258
259/**
260 * dma_prepare_rcv_buf() - Prepare/add receive DMA buffer.
261 *
262 * It allows to implement zero-copy async DMA_DEV_TO_MEM (receive) transactions
263 * if supported by DMA providers.
264 *
265 * @dma: A DMA struct that was previously successfully requested by
266 * dma_request/get_by_*().
267 * @dst: The receive buffer pointer.
268 * @size: The receive buffer size
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100269 * Return: zero on success, or -ve error code.
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100270 */
271int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size);
272
273/**
274 * dma_receive() - Receive a DMA transfer.
275 *
276 * @dma: A DMA struct that was previously successfully requested by
277 * dma_request/get_by_*().
278 * @dst: The destination pointer.
279 * @metadata: DMA driver's channel specific data
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100280 * Return: length of received data on success, or zero - no data,
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100281 * or -ve error code.
282 */
283int dma_receive(struct dma *dma, void **dst, void *metadata);
284
285/**
286 * dma_send() - Send a DMA transfer.
287 *
288 * @dma: A DMA struct that was previously successfully requested by
289 * dma_request/get_by_*().
290 * @src: The source pointer.
291 * @len: Length of the data to be sent (number of bytes).
292 * @metadata: DMA driver's channel specific data
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100293 * Return: zero on success, or -ve error code.
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100294 */
295int dma_send(struct dma *dma, void *src, size_t len, void *metadata);
Vignesh Raghavendrab18fb7e2019-12-04 22:17:20 +0530296
297/**
298 * dma_get_cfg() - Get DMA channel configuration for client's use
299 *
300 * @dma: The DMA Channel to manipulate
301 * @cfg_id: DMA provider specific ID to identify what
302 * configuration data client needs
303 * @cfg_data: Pointer to store pointer to DMA driver specific
304 * configuration data for the given cfg_id (output param)
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100305 * Return: zero on success, or -ve error code.
Vignesh Raghavendrab18fb7e2019-12-04 22:17:20 +0530306 */
307int dma_get_cfg(struct dma *dma, u32 cfg_id, void **cfg_data);
Álvaro Fernández Rojasd8cedab2018-11-28 19:17:50 +0100308#endif /* CONFIG_DMA_CHANNELS */
309
Vignesh Raghavendraf4c27872019-11-15 17:00:42 +0530310#if CONFIG_IS_ENABLED(DMA)
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530311/*
312 * dma_get_device - get a DMA device which supports transfer
313 * type of transfer_type
314 *
315 * @transfer_type - transfer type should be one/multiple of
316 * DMA_SUPPORTS_*
317 * @devp - udevice pointer to return the found device
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100318 * Return: - will return on success and devp will hold the
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530319 * pointer to the device
320 */
321int dma_get_device(u32 transfer_type, struct udevice **devp);
322
323/*
324 * dma_memcpy - try to use DMA to do a mem copy which will be
325 * much faster than CPU mem copy
326 *
327 * @dst - destination pointer
328 * @src - souce pointer
329 * @len - data length to be copied
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100330 * Return: - on successful transfer returns no of bytes
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530331 transferred and on failure return error code.
332 */
333int dma_memcpy(void *dst, void *src, size_t len);
Vignesh Raghavendraf4c27872019-11-15 17:00:42 +0530334#else
335static inline int dma_get_device(u32 transfer_type, struct udevice **devp)
336{
337 return -ENOSYS;
338}
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530339
Vignesh Raghavendraf4c27872019-11-15 17:00:42 +0530340static inline int dma_memcpy(void *dst, void *src, size_t len)
341{
342 return -ENOSYS;
343}
344#endif /* CONFIG_DMA */
Mugunthan V N8c3c9182016-02-15 15:31:37 +0530345#endif /* _DMA_H_ */