Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Generic bounce buffer implementation |
| 4 | * |
| 5 | * Copyright (C) 2012 Marek Vasut <marex@denx.de> |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 6 | */ |
| 7 | |
Tom Rini | abb9a04 | 2024-05-18 20:20:43 -0600 | [diff] [blame] | 8 | #include <common.h> |
Simon Glass | 6333448 | 2019-11-14 12:57:39 -0700 | [diff] [blame] | 9 | #include <cpu_func.h> |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 10 | #include <log.h> |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 11 | #include <malloc.h> |
| 12 | #include <errno.h> |
| 13 | #include <bouncebuf.h> |
Simon Glass | 274e0b0 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 14 | #include <asm/cache.h> |
Andrew Davis | 4a207b8 | 2023-01-06 12:02:50 -0600 | [diff] [blame] | 15 | #include <linux/dma-mapping.h> |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 16 | |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 17 | static int addr_aligned(struct bounce_buffer *state) |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 18 | { |
| 19 | const ulong align_mask = ARCH_DMA_MINALIGN - 1; |
| 20 | |
| 21 | /* Check if start is aligned */ |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 22 | if ((ulong)state->user_buffer & align_mask) { |
| 23 | debug("Unaligned buffer address %p\n", state->user_buffer); |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 24 | return 0; |
| 25 | } |
| 26 | |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 27 | /* Check if length is aligned */ |
| 28 | if (state->len != state->len_aligned) { |
Vasili Galka | 38c3fff | 2014-08-26 13:45:48 +0300 | [diff] [blame] | 29 | debug("Unaligned buffer length %zu\n", state->len); |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 30 | return 0; |
| 31 | } |
| 32 | |
| 33 | /* Aligned */ |
| 34 | return 1; |
| 35 | } |
| 36 | |
Marek Vasut | 393de78 | 2020-04-04 12:45:02 +0200 | [diff] [blame] | 37 | int bounce_buffer_start_extalign(struct bounce_buffer *state, void *data, |
| 38 | size_t len, unsigned int flags, |
| 39 | size_t alignment, |
| 40 | int (*addr_is_aligned)(struct bounce_buffer *state)) |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 41 | { |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 42 | state->user_buffer = data; |
| 43 | state->bounce_buffer = data; |
| 44 | state->len = len; |
Marek Vasut | 393de78 | 2020-04-04 12:45:02 +0200 | [diff] [blame] | 45 | state->len_aligned = roundup(len, alignment); |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 46 | state->flags = flags; |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 47 | |
Marek Vasut | 393de78 | 2020-04-04 12:45:02 +0200 | [diff] [blame] | 48 | if (!addr_is_aligned(state)) { |
| 49 | state->bounce_buffer = memalign(alignment, |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 50 | state->len_aligned); |
| 51 | if (!state->bounce_buffer) |
| 52 | return -ENOMEM; |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 53 | |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 54 | if (state->flags & GEN_BB_READ) |
| 55 | memcpy(state->bounce_buffer, state->user_buffer, |
| 56 | state->len); |
| 57 | } |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 58 | |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 59 | /* |
| 60 | * Flush data to RAM so DMA reads can pick it up, |
| 61 | * and any CPU writebacks don't race with DMA writes |
| 62 | */ |
Andrew Davis | 4a207b8 | 2023-01-06 12:02:50 -0600 | [diff] [blame] | 63 | dma_map_single(state->bounce_buffer, |
| 64 | state->len_aligned, |
| 65 | DMA_BIDIRECTIONAL); |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 66 | |
| 67 | return 0; |
| 68 | } |
| 69 | |
Marek Vasut | 393de78 | 2020-04-04 12:45:02 +0200 | [diff] [blame] | 70 | int bounce_buffer_start(struct bounce_buffer *state, void *data, |
| 71 | size_t len, unsigned int flags) |
| 72 | { |
| 73 | return bounce_buffer_start_extalign(state, data, len, flags, |
| 74 | ARCH_DMA_MINALIGN, |
| 75 | addr_aligned); |
| 76 | } |
| 77 | |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 78 | int bounce_buffer_stop(struct bounce_buffer *state) |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 79 | { |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 80 | if (state->flags & GEN_BB_WRITE) { |
| 81 | /* Invalidate cache so that CPU can see any newly DMA'd data */ |
Marek Vasut | f23e8e7 | 2023-08-14 01:47:47 +0200 | [diff] [blame] | 82 | dma_unmap_single((dma_addr_t)(uintptr_t)state->bounce_buffer, |
Andrew Davis | 4a207b8 | 2023-01-06 12:02:50 -0600 | [diff] [blame] | 83 | state->len_aligned, |
| 84 | DMA_BIDIRECTIONAL); |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 85 | } |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 86 | |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 87 | if (state->bounce_buffer == state->user_buffer) |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 88 | return 0; |
| 89 | |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 90 | if (state->flags & GEN_BB_WRITE) |
| 91 | memcpy(state->user_buffer, state->bounce_buffer, state->len); |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 92 | |
Stephen Warren | 4a8629e | 2012-11-06 11:27:29 +0000 | [diff] [blame] | 93 | free(state->bounce_buffer); |
Marek Vasut | ae723e1 | 2012-08-26 15:19:06 +0000 | [diff] [blame] | 94 | |
| 95 | return 0; |
| 96 | } |