blob: eaa9f6b5cbd66a600f6d556058fbfc1faeab9a38 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
Simon Glassa87fc0a2015-09-02 17:24:57 -06002/*
3 * Copyright (c) 2015 Google, Inc
Simon Glassa87fc0a2015-09-02 17:24:57 -06004 */
5
6#ifndef __ALIGNMEM_H
7#define __ALIGNMEM_H
8
9/*
10 * ARCH_DMA_MINALIGN is defined in asm/cache.h for each architecture. It
11 * is used to align DMA buffers.
12 */
13#ifndef __ASSEMBLY__
Tom Rinia4a8ea92023-12-14 13:16:57 -050014#include <linux/kernel.h>
Simon Glassa87fc0a2015-09-02 17:24:57 -060015#include <asm/cache.h>
Simon Glassa87fc0a2015-09-02 17:24:57 -060016#include <malloc.h>
17
Simon Glass2dd337a2015-09-02 17:24:58 -060018/*
19 * The ALLOC_CACHE_ALIGN_BUFFER macro is used to allocate a buffer on the
20 * stack that meets the minimum architecture alignment requirements for DMA.
21 * Such a buffer is useful for DMA operations where flushing and invalidating
22 * the cache before and after a read and/or write operation is required for
23 * correct operations.
24 *
25 * When called the macro creates an array on the stack that is sized such
26 * that:
27 *
28 * 1) The beginning of the array can be advanced enough to be aligned.
29 *
30 * 2) The size of the aligned portion of the array is a multiple of the minimum
31 * architecture alignment required for DMA.
32 *
33 * 3) The aligned portion contains enough space for the original number of
34 * elements requested.
35 *
36 * The macro then creates a pointer to the aligned portion of this array and
37 * assigns to the pointer the address of the first element in the aligned
38 * portion of the array.
39 *
40 * Calling the macro as:
41 *
42 * ALLOC_CACHE_ALIGN_BUFFER(uint32_t, buffer, 1024);
43 *
44 * Will result in something similar to saying:
45 *
46 * uint32_t buffer[1024];
47 *
48 * The following differences exist:
49 *
50 * 1) The resulting buffer is guaranteed to be aligned to the value of
51 * ARCH_DMA_MINALIGN.
52 *
53 * 2) The buffer variable created by the macro is a pointer to the specified
54 * type, and NOT an array of the specified type. This can be very important
55 * if you want the address of the buffer, which you probably do, to pass it
56 * to the DMA hardware. The value of &buffer is different in the two cases.
57 * In the macro case it will be the address of the pointer, not the address
58 * of the space reserved for the buffer. However, in the second case it
59 * would be the address of the buffer. So if you are replacing hard coded
60 * stack buffers with this macro you need to make sure you remove the & from
61 * the locations where you are taking the address of the buffer.
62 *
63 * Note that the size parameter is the number of array elements to allocate,
64 * not the number of bytes.
65 *
66 * This macro can not be used outside of function scope, or for the creation
67 * of a function scoped static buffer. It can not be used to create a cache
68 * line aligned global buffer.
69 */
70#define PAD_COUNT(s, pad) (((s) - 1) / (pad) + 1)
71#define PAD_SIZE(s, pad) (PAD_COUNT(s, pad) * pad)
72#define ALLOC_ALIGN_BUFFER_PAD(type, name, size, align, pad) \
73 char __##name[ROUND(PAD_SIZE((size) * sizeof(type), pad), align) \
74 + (align - 1)]; \
75 \
76 type *name = (type *)ALIGN((uintptr_t)__##name, align)
77#define ALLOC_ALIGN_BUFFER(type, name, size, align) \
78 ALLOC_ALIGN_BUFFER_PAD(type, name, size, align, 1)
79#define ALLOC_CACHE_ALIGN_BUFFER_PAD(type, name, size, pad) \
80 ALLOC_ALIGN_BUFFER_PAD(type, name, size, ARCH_DMA_MINALIGN, pad)
81#define ALLOC_CACHE_ALIGN_BUFFER(type, name, size) \
82 ALLOC_ALIGN_BUFFER(type, name, size, ARCH_DMA_MINALIGN)
83
84/*
85 * DEFINE_CACHE_ALIGN_BUFFER() is similar to ALLOC_CACHE_ALIGN_BUFFER, but it's
86 * purpose is to allow allocating aligned buffers outside of function scope.
87 * Usage of this macro shall be avoided or used with extreme care!
88 */
89#define DEFINE_ALIGN_BUFFER(type, name, size, align) \
90 static char __##name[ALIGN(size * sizeof(type), align)] \
91 __aligned(align); \
92 \
93 static type *name = (type *)__##name
94#define DEFINE_CACHE_ALIGN_BUFFER(type, name, size) \
95 DEFINE_ALIGN_BUFFER(type, name, size, ARCH_DMA_MINALIGN)
96
97/**
98 * malloc_cache_aligned() - allocate a memory region aligned to cache line size
99 *
100 * This allocates memory at a cache-line boundary. The amount allocated may
101 * be larger than requested as it is rounded up to the nearest multiple of the
102 * cache-line size. This ensured that subsequent cache operations on this
103 * memory (flush, invalidate) will not affect subsequently allocated regions.
104 *
105 * @size: Minimum number of bytes to allocate
106 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100107 * Return: pointer to new memory region, or NULL if there is no more memory
Simon Glass2dd337a2015-09-02 17:24:58 -0600108 * available.
109 */
Simon Glassa87fc0a2015-09-02 17:24:57 -0600110static inline void *malloc_cache_aligned(size_t size)
111{
112 return memalign(ARCH_DMA_MINALIGN, ALIGN(size, ARCH_DMA_MINALIGN));
113}
114#endif
115
116#endif /* __ALIGNMEM_H */