blob: 054d9e0302ccbd9bff1f17874b284c5234763da7 [file] [log] [blame]
Marek Vasutae723e12012-08-26 15:19:06 +00001/*
2 * Generic bounce buffer implementation
3 *
4 * Copyright (C) 2012 Marek Vasut <marex@denx.de>
5 *
Wolfgang Denkd79de1d2013-07-08 09:37:19 +02006 * SPDX-License-Identifier: GPL-2.0+
Marek Vasutae723e12012-08-26 15:19:06 +00007 */
8
9#include <common.h>
10#include <malloc.h>
11#include <errno.h>
12#include <bouncebuf.h>
13
Stephen Warren4a8629e2012-11-06 11:27:29 +000014static int addr_aligned(struct bounce_buffer *state)
Marek Vasutae723e12012-08-26 15:19:06 +000015{
16 const ulong align_mask = ARCH_DMA_MINALIGN - 1;
17
18 /* Check if start is aligned */
Stephen Warren4a8629e2012-11-06 11:27:29 +000019 if ((ulong)state->user_buffer & align_mask) {
20 debug("Unaligned buffer address %p\n", state->user_buffer);
Marek Vasutae723e12012-08-26 15:19:06 +000021 return 0;
22 }
23
Stephen Warren4a8629e2012-11-06 11:27:29 +000024 /* Check if length is aligned */
25 if (state->len != state->len_aligned) {
Vasili Galka38c3fff2014-08-26 13:45:48 +030026 debug("Unaligned buffer length %zu\n", state->len);
Marek Vasutae723e12012-08-26 15:19:06 +000027 return 0;
28 }
29
30 /* Aligned */
31 return 1;
32}
33
Stephen Warren4a8629e2012-11-06 11:27:29 +000034int bounce_buffer_start(struct bounce_buffer *state, void *data,
35 size_t len, unsigned int flags)
Marek Vasutae723e12012-08-26 15:19:06 +000036{
Stephen Warren4a8629e2012-11-06 11:27:29 +000037 state->user_buffer = data;
38 state->bounce_buffer = data;
39 state->len = len;
40 state->len_aligned = roundup(len, ARCH_DMA_MINALIGN);
41 state->flags = flags;
Marek Vasutae723e12012-08-26 15:19:06 +000042
Stephen Warren4a8629e2012-11-06 11:27:29 +000043 if (!addr_aligned(state)) {
44 state->bounce_buffer = memalign(ARCH_DMA_MINALIGN,
45 state->len_aligned);
46 if (!state->bounce_buffer)
47 return -ENOMEM;
Marek Vasutae723e12012-08-26 15:19:06 +000048
Stephen Warren4a8629e2012-11-06 11:27:29 +000049 if (state->flags & GEN_BB_READ)
50 memcpy(state->bounce_buffer, state->user_buffer,
51 state->len);
52 }
Marek Vasutae723e12012-08-26 15:19:06 +000053
Stephen Warren4a8629e2012-11-06 11:27:29 +000054 /*
55 * Flush data to RAM so DMA reads can pick it up,
56 * and any CPU writebacks don't race with DMA writes
57 */
58 flush_dcache_range((unsigned long)state->bounce_buffer,
59 (unsigned long)(state->bounce_buffer) +
60 state->len_aligned);
Marek Vasutae723e12012-08-26 15:19:06 +000061
62 return 0;
63}
64
Stephen Warren4a8629e2012-11-06 11:27:29 +000065int bounce_buffer_stop(struct bounce_buffer *state)
Marek Vasutae723e12012-08-26 15:19:06 +000066{
Stephen Warren4a8629e2012-11-06 11:27:29 +000067 if (state->flags & GEN_BB_WRITE) {
68 /* Invalidate cache so that CPU can see any newly DMA'd data */
69 invalidate_dcache_range((unsigned long)state->bounce_buffer,
70 (unsigned long)(state->bounce_buffer) +
71 state->len_aligned);
72 }
Marek Vasutae723e12012-08-26 15:19:06 +000073
Stephen Warren4a8629e2012-11-06 11:27:29 +000074 if (state->bounce_buffer == state->user_buffer)
Marek Vasutae723e12012-08-26 15:19:06 +000075 return 0;
76
Stephen Warren4a8629e2012-11-06 11:27:29 +000077 if (state->flags & GEN_BB_WRITE)
78 memcpy(state->user_buffer, state->bounce_buffer, state->len);
Marek Vasutae723e12012-08-26 15:19:06 +000079
Stephen Warren4a8629e2012-11-06 11:27:29 +000080 free(state->bounce_buffer);
Marek Vasutae723e12012-08-26 15:19:06 +000081
82 return 0;
83}