blob: a7098e2caf4c03cad805ccc1160e47280d331aed [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasutae723e12012-08-26 15:19:06 +00002/*
3 * Generic bounce buffer implementation
4 *
5 * Copyright (C) 2012 Marek Vasut <marex@denx.de>
Marek Vasutae723e12012-08-26 15:19:06 +00006 */
7
8#include <common.h>
9#include <malloc.h>
10#include <errno.h>
11#include <bouncebuf.h>
12
Stephen Warren4a8629e2012-11-06 11:27:29 +000013static int addr_aligned(struct bounce_buffer *state)
Marek Vasutae723e12012-08-26 15:19:06 +000014{
15 const ulong align_mask = ARCH_DMA_MINALIGN - 1;
16
17 /* Check if start is aligned */
Stephen Warren4a8629e2012-11-06 11:27:29 +000018 if ((ulong)state->user_buffer & align_mask) {
19 debug("Unaligned buffer address %p\n", state->user_buffer);
Marek Vasutae723e12012-08-26 15:19:06 +000020 return 0;
21 }
22
Stephen Warren4a8629e2012-11-06 11:27:29 +000023 /* Check if length is aligned */
24 if (state->len != state->len_aligned) {
Vasili Galka38c3fff2014-08-26 13:45:48 +030025 debug("Unaligned buffer length %zu\n", state->len);
Marek Vasutae723e12012-08-26 15:19:06 +000026 return 0;
27 }
28
29 /* Aligned */
30 return 1;
31}
32
Stephen Warren4a8629e2012-11-06 11:27:29 +000033int bounce_buffer_start(struct bounce_buffer *state, void *data,
34 size_t len, unsigned int flags)
Marek Vasutae723e12012-08-26 15:19:06 +000035{
Stephen Warren4a8629e2012-11-06 11:27:29 +000036 state->user_buffer = data;
37 state->bounce_buffer = data;
38 state->len = len;
39 state->len_aligned = roundup(len, ARCH_DMA_MINALIGN);
40 state->flags = flags;
Marek Vasutae723e12012-08-26 15:19:06 +000041
Stephen Warren4a8629e2012-11-06 11:27:29 +000042 if (!addr_aligned(state)) {
43 state->bounce_buffer = memalign(ARCH_DMA_MINALIGN,
44 state->len_aligned);
45 if (!state->bounce_buffer)
46 return -ENOMEM;
Marek Vasutae723e12012-08-26 15:19:06 +000047
Stephen Warren4a8629e2012-11-06 11:27:29 +000048 if (state->flags & GEN_BB_READ)
49 memcpy(state->bounce_buffer, state->user_buffer,
50 state->len);
51 }
Marek Vasutae723e12012-08-26 15:19:06 +000052
Stephen Warren4a8629e2012-11-06 11:27:29 +000053 /*
54 * Flush data to RAM so DMA reads can pick it up,
55 * and any CPU writebacks don't race with DMA writes
56 */
57 flush_dcache_range((unsigned long)state->bounce_buffer,
58 (unsigned long)(state->bounce_buffer) +
59 state->len_aligned);
Marek Vasutae723e12012-08-26 15:19:06 +000060
61 return 0;
62}
63
Stephen Warren4a8629e2012-11-06 11:27:29 +000064int bounce_buffer_stop(struct bounce_buffer *state)
Marek Vasutae723e12012-08-26 15:19:06 +000065{
Stephen Warren4a8629e2012-11-06 11:27:29 +000066 if (state->flags & GEN_BB_WRITE) {
67 /* Invalidate cache so that CPU can see any newly DMA'd data */
68 invalidate_dcache_range((unsigned long)state->bounce_buffer,
69 (unsigned long)(state->bounce_buffer) +
70 state->len_aligned);
71 }
Marek Vasutae723e12012-08-26 15:19:06 +000072
Stephen Warren4a8629e2012-11-06 11:27:29 +000073 if (state->bounce_buffer == state->user_buffer)
Marek Vasutae723e12012-08-26 15:19:06 +000074 return 0;
75
Stephen Warren4a8629e2012-11-06 11:27:29 +000076 if (state->flags & GEN_BB_WRITE)
77 memcpy(state->user_buffer, state->bounce_buffer, state->len);
Marek Vasutae723e12012-08-26 15:19:06 +000078
Stephen Warren4a8629e2012-11-06 11:27:29 +000079 free(state->bounce_buffer);
Marek Vasutae723e12012-08-26 15:19:06 +000080
81 return 0;
82}