blob: 0c1cd831e01df689ef1c4a7ac421b55f5a299fa4 [file] [log] [blame]
Marek Vasutf7c752c2011-11-08 23:18:15 +00001/*
2 * Freescale i.MX28 APBH DMA driver
3 *
4 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
5 * on behalf of DENX Software Engineering GmbH
6 *
7 * Based on code from LTIB:
8 * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 */
24
25#include <linux/list.h>
26
27#include <common.h>
28#include <malloc.h>
29#include <asm/errno.h>
30#include <asm/io.h>
31#include <asm/arch/clock.h>
32#include <asm/arch/imx-regs.h>
33#include <asm/arch/sys_proto.h>
34#include <asm/arch/dma.h>
35
36static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
37
38/*
39 * Test is the DMA channel is valid channel
40 */
41int mxs_dma_validate_chan(int channel)
42{
43 struct mxs_dma_chan *pchan;
44
45 if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
46 return -EINVAL;
47
48 pchan = mxs_dma_channels + channel;
49 if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
50 return -EINVAL;
51
52 return 0;
53}
54
55/*
Marek Vasut4742c262011-11-23 10:59:13 +000056 * Return the address of the command within a descriptor.
57 */
58static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
59{
60 return desc->address + offsetof(struct mxs_dma_desc, cmd);
61}
62
63/*
64 * Read a DMA channel's hardware semaphore.
65 *
66 * As used by the MXS platform's DMA software, the DMA channel's hardware
67 * semaphore reflects the number of DMA commands the hardware will process, but
68 * has not yet finished. This is a volatile value read directly from hardware,
69 * so it must be be viewed as immediately stale.
70 *
71 * If the channel is not marked busy, or has finished processing all its
72 * commands, this value should be zero.
73 *
74 * See mxs_dma_append() for details on how DMA command blocks must be configured
75 * to maintain the expected behavior of the semaphore's value.
76 */
77static int mxs_dma_read_semaphore(int channel)
78{
Otavio Salvador22f4ff92012-08-05 09:05:31 +000079 struct mxs_apbh_regs *apbh_regs =
80 (struct mxs_apbh_regs *)MXS_APBH_BASE;
Marek Vasut4742c262011-11-23 10:59:13 +000081 uint32_t tmp;
82 int ret;
83
84 ret = mxs_dma_validate_chan(channel);
85 if (ret)
86 return ret;
87
88 tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema);
89
90 tmp &= APBH_CHn_SEMA_PHORE_MASK;
91 tmp >>= APBH_CHn_SEMA_PHORE_OFFSET;
92
93 return tmp;
94}
95
Marek Vasut26427662012-03-15 18:33:18 +000096#ifndef CONFIG_SYS_DCACHE_OFF
97void mxs_dma_flush_desc(struct mxs_dma_desc *desc)
98{
99 uint32_t addr;
100 uint32_t size;
101
102 addr = (uint32_t)desc;
103 size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
104
105 flush_dcache_range(addr, addr + size);
106}
107#else
108inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {}
109#endif
110
Marek Vasut4742c262011-11-23 10:59:13 +0000111/*
Marek Vasutf7c752c2011-11-08 23:18:15 +0000112 * Enable a DMA channel.
113 *
114 * If the given channel has any DMA descriptors on its active list, this
115 * function causes the DMA hardware to begin processing them.
116 *
117 * This function marks the DMA channel as "busy," whether or not there are any
118 * descriptors to process.
119 */
Marek Vasut4742c262011-11-23 10:59:13 +0000120static int mxs_dma_enable(int channel)
Marek Vasutf7c752c2011-11-08 23:18:15 +0000121{
Otavio Salvador22f4ff92012-08-05 09:05:31 +0000122 struct mxs_apbh_regs *apbh_regs =
123 (struct mxs_apbh_regs *)MXS_APBH_BASE;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000124 unsigned int sem;
125 struct mxs_dma_chan *pchan;
126 struct mxs_dma_desc *pdesc;
127 int ret;
128
129 ret = mxs_dma_validate_chan(channel);
130 if (ret)
131 return ret;
132
133 pchan = mxs_dma_channels + channel;
134
135 if (pchan->pending_num == 0) {
136 pchan->flags |= MXS_DMA_FLAGS_BUSY;
137 return 0;
138 }
139
140 pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
141 if (pdesc == NULL)
142 return -EFAULT;
143
144 if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
145 if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
146 return 0;
147
148 sem = mxs_dma_read_semaphore(channel);
149 if (sem == 0)
150 return 0;
151
152 if (sem == 1) {
153 pdesc = list_entry(pdesc->node.next,
154 struct mxs_dma_desc, node);
155 writel(mxs_dma_cmd_address(pdesc),
156 &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
157 }
158 writel(pchan->pending_num,
159 &apbh_regs->ch[channel].hw_apbh_ch_sema);
160 pchan->active_num += pchan->pending_num;
161 pchan->pending_num = 0;
162 } else {
163 pchan->active_num += pchan->pending_num;
164 pchan->pending_num = 0;
165 writel(mxs_dma_cmd_address(pdesc),
166 &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
167 writel(pchan->active_num,
168 &apbh_regs->ch[channel].hw_apbh_ch_sema);
169 writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
170 &apbh_regs->hw_apbh_ctrl0_clr);
171 }
172
173 pchan->flags |= MXS_DMA_FLAGS_BUSY;
174 return 0;
175}
176
177/*
178 * Disable a DMA channel.
179 *
180 * This function shuts down a DMA channel and marks it as "not busy." Any
181 * descriptors on the active list are immediately moved to the head of the
182 * "done" list, whether or not they have actually been processed by the
183 * hardware. The "ready" flags of these descriptors are NOT cleared, so they
184 * still appear to be active.
185 *
186 * This function immediately shuts down a DMA channel's hardware, aborting any
187 * I/O that may be in progress, potentially leaving I/O hardware in an undefined
188 * state. It is unwise to call this function if there is ANY chance the hardware
189 * is still processing a command.
190 */
Marek Vasut4742c262011-11-23 10:59:13 +0000191static int mxs_dma_disable(int channel)
Marek Vasutf7c752c2011-11-08 23:18:15 +0000192{
193 struct mxs_dma_chan *pchan;
Otavio Salvador22f4ff92012-08-05 09:05:31 +0000194 struct mxs_apbh_regs *apbh_regs =
195 (struct mxs_apbh_regs *)MXS_APBH_BASE;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000196 int ret;
197
198 ret = mxs_dma_validate_chan(channel);
199 if (ret)
200 return ret;
201
202 pchan = mxs_dma_channels + channel;
203
204 if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
205 return -EINVAL;
206
207 writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
208 &apbh_regs->hw_apbh_ctrl0_set);
209
210 pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
211 pchan->active_num = 0;
212 pchan->pending_num = 0;
213 list_splice_init(&pchan->active, &pchan->done);
214
215 return 0;
216}
217
218/*
219 * Resets the DMA channel hardware.
220 */
Marek Vasut4742c262011-11-23 10:59:13 +0000221static int mxs_dma_reset(int channel)
Marek Vasutf7c752c2011-11-08 23:18:15 +0000222{
Otavio Salvador22f4ff92012-08-05 09:05:31 +0000223 struct mxs_apbh_regs *apbh_regs =
224 (struct mxs_apbh_regs *)MXS_APBH_BASE;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000225 int ret;
Marek Vasut154af3a2013-01-22 15:01:00 +0000226#if defined(CONFIG_MX23)
227 uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set);
228 uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET;
229#elif defined(CONFIG_MX28)
230 uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_channel_ctrl_set);
231 uint32_t offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET;
232#endif
Marek Vasutf7c752c2011-11-08 23:18:15 +0000233
234 ret = mxs_dma_validate_chan(channel);
235 if (ret)
236 return ret;
237
Marek Vasut154af3a2013-01-22 15:01:00 +0000238 writel(1 << (channel + offset), setreg);
Marek Vasutf7c752c2011-11-08 23:18:15 +0000239
240 return 0;
241}
242
243/*
Marek Vasutf7c752c2011-11-08 23:18:15 +0000244 * Enable or disable DMA interrupt.
245 *
246 * This function enables the given DMA channel to interrupt the CPU.
247 */
Marek Vasut4742c262011-11-23 10:59:13 +0000248static int mxs_dma_enable_irq(int channel, int enable)
Marek Vasutf7c752c2011-11-08 23:18:15 +0000249{
Otavio Salvador22f4ff92012-08-05 09:05:31 +0000250 struct mxs_apbh_regs *apbh_regs =
251 (struct mxs_apbh_regs *)MXS_APBH_BASE;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000252 int ret;
253
254 ret = mxs_dma_validate_chan(channel);
255 if (ret)
256 return ret;
257
258 if (enable)
259 writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
260 &apbh_regs->hw_apbh_ctrl1_set);
261 else
262 writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
263 &apbh_regs->hw_apbh_ctrl1_clr);
264
265 return 0;
266}
267
268/*
Marek Vasutf7c752c2011-11-08 23:18:15 +0000269 * Clear DMA interrupt.
270 *
271 * The software that is using the DMA channel must register to receive its
272 * interrupts and, when they arrive, must call this function to clear them.
273 */
Marek Vasut4742c262011-11-23 10:59:13 +0000274static int mxs_dma_ack_irq(int channel)
Marek Vasutf7c752c2011-11-08 23:18:15 +0000275{
Otavio Salvador22f4ff92012-08-05 09:05:31 +0000276 struct mxs_apbh_regs *apbh_regs =
277 (struct mxs_apbh_regs *)MXS_APBH_BASE;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000278 int ret;
279
280 ret = mxs_dma_validate_chan(channel);
281 if (ret)
282 return ret;
283
284 writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr);
285 writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr);
286
287 return 0;
288}
289
290/*
291 * Request to reserve a DMA channel
292 */
Marek Vasut4742c262011-11-23 10:59:13 +0000293static int mxs_dma_request(int channel)
Marek Vasutf7c752c2011-11-08 23:18:15 +0000294{
295 struct mxs_dma_chan *pchan;
296
297 if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
298 return -EINVAL;
299
300 pchan = mxs_dma_channels + channel;
301 if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
302 return -ENODEV;
303
304 if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
305 return -EBUSY;
306
307 pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
308 pchan->active_num = 0;
309 pchan->pending_num = 0;
310
311 INIT_LIST_HEAD(&pchan->active);
312 INIT_LIST_HEAD(&pchan->done);
313
314 return 0;
315}
316
317/*
318 * Release a DMA channel.
319 *
320 * This function releases a DMA channel from its current owner.
321 *
322 * The channel will NOT be released if it's marked "busy" (see
323 * mxs_dma_enable()).
324 */
Marek Vasut93541b42012-04-08 17:34:46 +0000325int mxs_dma_release(int channel)
Marek Vasutf7c752c2011-11-08 23:18:15 +0000326{
327 struct mxs_dma_chan *pchan;
328 int ret;
329
330 ret = mxs_dma_validate_chan(channel);
331 if (ret)
332 return ret;
333
334 pchan = mxs_dma_channels + channel;
335
336 if (pchan->flags & MXS_DMA_FLAGS_BUSY)
337 return -EBUSY;
338
339 pchan->dev = 0;
340 pchan->active_num = 0;
341 pchan->pending_num = 0;
342 pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
343
344 return 0;
345}
346
347/*
348 * Allocate DMA descriptor
349 */
350struct mxs_dma_desc *mxs_dma_desc_alloc(void)
351{
352 struct mxs_dma_desc *pdesc;
Marek Vasut26427662012-03-15 18:33:18 +0000353 uint32_t size;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000354
Marek Vasut26427662012-03-15 18:33:18 +0000355 size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
356 pdesc = memalign(MXS_DMA_ALIGNMENT, size);
Marek Vasutf7c752c2011-11-08 23:18:15 +0000357
358 if (pdesc == NULL)
359 return NULL;
360
361 memset(pdesc, 0, sizeof(*pdesc));
362 pdesc->address = (dma_addr_t)pdesc;
363
364 return pdesc;
365};
366
367/*
368 * Free DMA descriptor
369 */
370void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
371{
372 if (pdesc == NULL)
373 return;
374
375 free(pdesc);
376}
377
378/*
Marek Vasutf7c752c2011-11-08 23:18:15 +0000379 * Add a DMA descriptor to a channel.
380 *
381 * If the descriptor list for this channel is not empty, this function sets the
382 * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so
383 * it will chain to the new descriptor's command.
384 *
385 * Then, this function marks the new descriptor as "ready," adds it to the end
386 * of the active descriptor list, and increments the count of pending
387 * descriptors.
388 *
389 * The MXS platform DMA software imposes some rules on DMA commands to maintain
390 * important invariants. These rules are NOT checked, but they must be carefully
391 * applied by software that uses MXS DMA channels.
392 *
393 * Invariant:
394 * The DMA channel's hardware semaphore must reflect the number of DMA
395 * commands the hardware will process, but has not yet finished.
396 *
397 * Explanation:
398 * A DMA channel begins processing commands when its hardware semaphore is
399 * written with a value greater than zero, and it stops processing commands
400 * when the semaphore returns to zero.
401 *
402 * When a channel finishes a DMA command, it will decrement its semaphore if
403 * the DECREMENT_SEMAPHORE bit is set in that command's flags bits.
404 *
405 * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set,
406 * unless it suits the purposes of the software. For example, one could
407 * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE
408 * bit set only in the last one. Then, setting the DMA channel's hardware
409 * semaphore to one would cause the entire series of five commands to be
410 * processed. However, this example would violate the invariant given above.
411 *
412 * Rule:
413 * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA
414 * channel's hardware semaphore will be decremented EVERY time a command is
415 * processed.
416 */
417int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
418{
419 struct mxs_dma_chan *pchan;
420 struct mxs_dma_desc *last;
421 int ret;
422
423 ret = mxs_dma_validate_chan(channel);
424 if (ret)
425 return ret;
426
427 pchan = mxs_dma_channels + channel;
428
429 pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
430 pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
431
432 if (!list_empty(&pchan->active)) {
433 last = list_entry(pchan->active.prev, struct mxs_dma_desc,
434 node);
435
436 pdesc->flags &= ~MXS_DMA_DESC_FIRST;
437 last->flags &= ~MXS_DMA_DESC_LAST;
438
439 last->cmd.next = mxs_dma_cmd_address(pdesc);
440 last->cmd.data |= MXS_DMA_DESC_CHAIN;
Marek Vasut26427662012-03-15 18:33:18 +0000441
442 mxs_dma_flush_desc(last);
Marek Vasutf7c752c2011-11-08 23:18:15 +0000443 }
444 pdesc->flags |= MXS_DMA_DESC_READY;
445 if (pdesc->flags & MXS_DMA_DESC_FIRST)
446 pchan->pending_num++;
447 list_add_tail(&pdesc->node, &pchan->active);
448
Marek Vasut26427662012-03-15 18:33:18 +0000449 mxs_dma_flush_desc(pdesc);
450
Marek Vasutf7c752c2011-11-08 23:18:15 +0000451 return ret;
452}
453
454/*
Marek Vasutf7c752c2011-11-08 23:18:15 +0000455 * Clean up processed DMA descriptors.
456 *
457 * This function removes processed DMA descriptors from the "active" list. Pass
458 * in a non-NULL list head to get the descriptors moved to your list. Pass NULL
459 * to get the descriptors moved to the channel's "done" list. Descriptors on
460 * the "done" list can be retrieved with mxs_dma_get_finished().
461 *
462 * This function marks the DMA channel as "not busy" if no unprocessed
463 * descriptors remain on the "active" list.
464 */
Marek Vasut4742c262011-11-23 10:59:13 +0000465static int mxs_dma_finish(int channel, struct list_head *head)
Marek Vasutf7c752c2011-11-08 23:18:15 +0000466{
467 int sem;
468 struct mxs_dma_chan *pchan;
469 struct list_head *p, *q;
470 struct mxs_dma_desc *pdesc;
471 int ret;
472
473 ret = mxs_dma_validate_chan(channel);
474 if (ret)
475 return ret;
476
477 pchan = mxs_dma_channels + channel;
478
479 sem = mxs_dma_read_semaphore(channel);
480 if (sem < 0)
481 return sem;
482
483 if (sem == pchan->active_num)
484 return 0;
485
486 list_for_each_safe(p, q, &pchan->active) {
487 if ((pchan->active_num) <= sem)
488 break;
489
490 pdesc = list_entry(p, struct mxs_dma_desc, node);
491 pdesc->flags &= ~MXS_DMA_DESC_READY;
492
493 if (head)
494 list_move_tail(p, head);
495 else
496 list_move_tail(p, &pchan->done);
497
498 if (pdesc->flags & MXS_DMA_DESC_LAST)
499 pchan->active_num--;
500 }
501
502 if (sem == 0)
503 pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
504
505 return 0;
506}
507
508/*
509 * Wait for DMA channel to complete
510 */
Marek Vasut4742c262011-11-23 10:59:13 +0000511static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
Marek Vasutf7c752c2011-11-08 23:18:15 +0000512{
Otavio Salvador22f4ff92012-08-05 09:05:31 +0000513 struct mxs_apbh_regs *apbh_regs =
514 (struct mxs_apbh_regs *)MXS_APBH_BASE;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000515 int ret;
516
517 ret = mxs_dma_validate_chan(chan);
518 if (ret)
519 return ret;
520
Otavio Salvadorcbf0bf22012-08-13 09:53:12 +0000521 if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg,
Marek Vasutf7c752c2011-11-08 23:18:15 +0000522 1 << chan, timeout)) {
523 ret = -ETIMEDOUT;
524 mxs_dma_reset(chan);
525 }
526
Wolfram Sang98ee9232011-11-18 01:17:44 +0000527 return ret;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000528}
529
530/*
531 * Execute the DMA channel
532 */
533int mxs_dma_go(int chan)
534{
Marek Vasut0f6f68c2012-08-21 16:17:26 +0000535 uint32_t timeout = 10000000;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000536 int ret;
537
538 LIST_HEAD(tmp_desc_list);
539
540 mxs_dma_enable_irq(chan, 1);
541 mxs_dma_enable(chan);
542
543 /* Wait for DMA to finish. */
544 ret = mxs_dma_wait_complete(timeout, chan);
545
546 /* Clear out the descriptors we just ran. */
547 mxs_dma_finish(chan, &tmp_desc_list);
548
549 /* Shut the DMA channel down. */
550 mxs_dma_ack_irq(chan);
551 mxs_dma_reset(chan);
552 mxs_dma_enable_irq(chan, 0);
553 mxs_dma_disable(chan);
554
555 return ret;
556}
557
558/*
559 * Initialize the DMA hardware
560 */
Marek Vasut93541b42012-04-08 17:34:46 +0000561void mxs_dma_init(void)
Marek Vasutf7c752c2011-11-08 23:18:15 +0000562{
Otavio Salvador22f4ff92012-08-05 09:05:31 +0000563 struct mxs_apbh_regs *apbh_regs =
564 (struct mxs_apbh_regs *)MXS_APBH_BASE;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000565
Otavio Salvadorcbf0bf22012-08-13 09:53:12 +0000566 mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg);
Marek Vasutf7c752c2011-11-08 23:18:15 +0000567
568#ifdef CONFIG_APBH_DMA_BURST8
569 writel(APBH_CTRL0_AHB_BURST8_EN,
570 &apbh_regs->hw_apbh_ctrl0_set);
571#else
572 writel(APBH_CTRL0_AHB_BURST8_EN,
573 &apbh_regs->hw_apbh_ctrl0_clr);
574#endif
575
576#ifdef CONFIG_APBH_DMA_BURST
577 writel(APBH_CTRL0_APB_BURST_EN,
578 &apbh_regs->hw_apbh_ctrl0_set);
579#else
580 writel(APBH_CTRL0_APB_BURST_EN,
581 &apbh_regs->hw_apbh_ctrl0_clr);
582#endif
Marek Vasut93541b42012-04-08 17:34:46 +0000583}
Marek Vasutf7c752c2011-11-08 23:18:15 +0000584
Marek Vasut93541b42012-04-08 17:34:46 +0000585int mxs_dma_init_channel(int channel)
586{
587 struct mxs_dma_chan *pchan;
588 int ret;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000589
Marek Vasut93541b42012-04-08 17:34:46 +0000590 pchan = mxs_dma_channels + channel;
591 pchan->flags = MXS_DMA_FLAGS_VALID;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000592
Marek Vasut93541b42012-04-08 17:34:46 +0000593 ret = mxs_dma_request(channel);
Marek Vasutf7c752c2011-11-08 23:18:15 +0000594
Marek Vasut93541b42012-04-08 17:34:46 +0000595 if (ret) {
596 printf("MXS DMA: Can't acquire DMA channel %i\n",
597 channel);
598 return ret;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000599 }
600
Marek Vasut93541b42012-04-08 17:34:46 +0000601 mxs_dma_reset(channel);
602 mxs_dma_ack_irq(channel);
Marek Vasutf7c752c2011-11-08 23:18:15 +0000603
Marek Vasut93541b42012-04-08 17:34:46 +0000604 return 0;
Marek Vasutf7c752c2011-11-08 23:18:15 +0000605}