Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 2 | #ifndef __LINUX_COMPILER_H |
| 3 | #define __LINUX_COMPILER_H |
| 4 | |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 5 | #include <linux/compiler_types.h> |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 6 | |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 7 | #ifndef __ASSEMBLY__ |
Masahiro Yamada | c42aad5 | 2014-09-04 02:40:58 +0900 | [diff] [blame] | 8 | |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 9 | #ifdef __KERNEL__ |
| 10 | |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 11 | /* |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 12 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
| 13 | * to disable branch tracing on a per file basis. |
| 14 | */ |
| 15 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
| 16 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 17 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
| 18 | int expect, int is_constant); |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 19 | |
| 20 | #define likely_notrace(x) __builtin_expect(!!(x), 1) |
| 21 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) |
| 22 | |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 23 | #define __branch_check__(x, expect, is_constant) ({ \ |
| 24 | long ______r; \ |
| 25 | static struct ftrace_likely_data \ |
| 26 | __aligned(4) \ |
Marek Behún | 4bebdd3 | 2021-05-20 13:23:52 +0200 | [diff] [blame] | 27 | __section("_ftrace_annotated_branch") \ |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 28 | ______f = { \ |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 29 | .data.func = __func__, \ |
| 30 | .data.file = __FILE__, \ |
| 31 | .data.line = __LINE__, \ |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 32 | }; \ |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 33 | ______r = __builtin_expect(!!(x), expect); \ |
| 34 | ftrace_likely_update(&______f, ______r, \ |
| 35 | expect, is_constant); \ |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 36 | ______r; \ |
| 37 | }) |
| 38 | |
| 39 | /* |
| 40 | * Using __builtin_constant_p(x) to ignore cases where the return |
| 41 | * value is always the same. This idea is taken from a similar patch |
| 42 | * written by Daniel Walker. |
| 43 | */ |
| 44 | # ifndef likely |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 45 | # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 46 | # endif |
| 47 | # ifndef unlikely |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 48 | # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 49 | # endif |
| 50 | |
| 51 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
| 52 | /* |
| 53 | * "Define 'is'", Bill Clinton |
| 54 | * "Define 'if'", Steven Rostedt |
| 55 | */ |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 56 | #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) |
| 57 | |
| 58 | #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) |
| 59 | |
| 60 | #define __trace_if_value(cond) ({ \ |
| 61 | static struct ftrace_branch_data \ |
| 62 | __aligned(4) \ |
Marek Behún | 4bebdd3 | 2021-05-20 13:23:52 +0200 | [diff] [blame] | 63 | __section("_ftrace_branch") \ |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 64 | __if_trace = { \ |
| 65 | .func = __func__, \ |
| 66 | .file = __FILE__, \ |
| 67 | .line = __LINE__, \ |
| 68 | }; \ |
| 69 | (cond) ? \ |
| 70 | (__if_trace.miss_hit[1]++,1) : \ |
| 71 | (__if_trace.miss_hit[0]++,0); \ |
| 72 | }) |
| 73 | |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 74 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
| 75 | |
| 76 | #else |
| 77 | # define likely(x) __builtin_expect(!!(x), 1) |
| 78 | # define unlikely(x) __builtin_expect(!!(x), 0) |
| 79 | #endif |
| 80 | |
| 81 | /* Optimization barrier */ |
| 82 | #ifndef barrier |
| 83 | # define barrier() __memory_barrier() |
| 84 | #endif |
| 85 | |
Tom Rini | 978a047 | 2016-02-29 11:34:15 -0500 | [diff] [blame] | 86 | #ifndef barrier_data |
| 87 | # define barrier_data(ptr) barrier() |
| 88 | #endif |
| 89 | |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 90 | /* workaround for GCC PR82365 if needed */ |
| 91 | #ifndef barrier_before_unreachable |
| 92 | # define barrier_before_unreachable() do { } while (0) |
| 93 | #endif |
| 94 | |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 95 | /* Unreachable code */ |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 96 | #ifdef CONFIG_STACK_VALIDATION |
| 97 | /* |
| 98 | * These macros help objtool understand GCC code flow for unreachable code. |
| 99 | * The __COUNTER__ based labels are a hack to make each instance of the macros |
| 100 | * unique, to convince GCC not to merge duplicate inline asm statements. |
| 101 | */ |
| 102 | #define annotate_reachable() ({ \ |
| 103 | asm volatile("%c0:\n\t" \ |
| 104 | ".pushsection .discard.reachable\n\t" \ |
| 105 | ".long %c0b - .\n\t" \ |
| 106 | ".popsection\n\t" : : "i" (__COUNTER__)); \ |
| 107 | }) |
| 108 | #define annotate_unreachable() ({ \ |
| 109 | asm volatile("%c0:\n\t" \ |
| 110 | ".pushsection .discard.unreachable\n\t" \ |
| 111 | ".long %c0b - .\n\t" \ |
| 112 | ".popsection\n\t" : : "i" (__COUNTER__)); \ |
| 113 | }) |
| 114 | #define ASM_UNREACHABLE \ |
| 115 | "999:\n\t" \ |
| 116 | ".pushsection .discard.unreachable\n\t" \ |
| 117 | ".long 999b - .\n\t" \ |
| 118 | ".popsection\n\t" |
| 119 | |
| 120 | /* Annotate a C jump table to allow objtool to follow the code flow */ |
Marek Behún | 4bebdd3 | 2021-05-20 13:23:52 +0200 | [diff] [blame] | 121 | #define __annotate_jump_table __section(".rodata..c_jump_table") |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 122 | |
| 123 | #else |
| 124 | #define annotate_reachable() |
| 125 | #define annotate_unreachable() |
| 126 | #define __annotate_jump_table |
| 127 | #endif |
| 128 | |
| 129 | #ifndef ASM_UNREACHABLE |
| 130 | # define ASM_UNREACHABLE |
| 131 | #endif |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 132 | #ifndef unreachable |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 133 | # define unreachable() do { \ |
| 134 | annotate_unreachable(); \ |
| 135 | __builtin_unreachable(); \ |
| 136 | } while (0) |
| 137 | #endif |
| 138 | |
| 139 | /* |
| 140 | * KENTRY - kernel entry point |
| 141 | * This can be used to annotate symbols (functions or data) that are used |
| 142 | * without their linker symbol being referenced explicitly. For example, |
| 143 | * interrupt vector handlers, or functions in the kernel image that are found |
| 144 | * programatically. |
| 145 | * |
| 146 | * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those |
| 147 | * are handled in their own way (with KEEP() in linker scripts). |
| 148 | * |
| 149 | * KENTRY can be avoided if the symbols in question are marked as KEEP() in the |
| 150 | * linker script. For example an architecture could KEEP() its entire |
| 151 | * boot/exception vector code rather than annotate each function and data. |
| 152 | */ |
| 153 | #ifndef KENTRY |
| 154 | # define KENTRY(sym) \ |
| 155 | extern typeof(sym) sym; \ |
| 156 | static const unsigned long __kentry_##sym \ |
| 157 | __used \ |
| 158 | __section("___kentry" "+" #sym ) \ |
| 159 | = (unsigned long)&sym; |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 160 | #endif |
| 161 | |
| 162 | #ifndef RELOC_HIDE |
| 163 | # define RELOC_HIDE(ptr, off) \ |
| 164 | ({ unsigned long __ptr; \ |
| 165 | __ptr = (unsigned long) (ptr); \ |
| 166 | (typeof(ptr)) (__ptr + (off)); }) |
| 167 | #endif |
| 168 | |
Masahiro Yamada | c42aad5 | 2014-09-04 02:40:58 +0900 | [diff] [blame] | 169 | #ifndef OPTIMIZER_HIDE_VAR |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 170 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ |
| 171 | #define OPTIMIZER_HIDE_VAR(var) \ |
| 172 | __asm__ ("" : "=r" (var) : "0" (var)) |
Masahiro Yamada | c42aad5 | 2014-09-04 02:40:58 +0900 | [diff] [blame] | 173 | #endif |
| 174 | |
| 175 | /* Not-quite-unique ID. */ |
| 176 | #ifndef __UNIQUE_ID |
| 177 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
| 178 | #endif |
| 179 | |
Tom Rini | 978a047 | 2016-02-29 11:34:15 -0500 | [diff] [blame] | 180 | #include <linux/types.h> |
| 181 | |
| 182 | #define __READ_ONCE_SIZE \ |
| 183 | ({ \ |
| 184 | switch (size) { \ |
| 185 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ |
| 186 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ |
| 187 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ |
| 188 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ |
| 189 | default: \ |
| 190 | barrier(); \ |
| 191 | __builtin_memcpy((void *)res, (const void *)p, size); \ |
| 192 | barrier(); \ |
| 193 | } \ |
| 194 | }) |
| 195 | |
| 196 | static __always_inline |
| 197 | void __read_once_size(const volatile void *p, void *res, int size) |
| 198 | { |
| 199 | __READ_ONCE_SIZE; |
| 200 | } |
| 201 | |
| 202 | #ifdef CONFIG_KASAN |
| 203 | /* |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 204 | * We can't declare function 'inline' because __no_sanitize_address confilcts |
Tom Rini | 978a047 | 2016-02-29 11:34:15 -0500 | [diff] [blame] | 205 | * with inlining. Attempt to inline it may cause a build failure. |
| 206 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 |
| 207 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. |
| 208 | */ |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 209 | # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused |
Tom Rini | 978a047 | 2016-02-29 11:34:15 -0500 | [diff] [blame] | 210 | #else |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 211 | # define __no_kasan_or_inline __always_inline |
| 212 | #endif |
| 213 | |
| 214 | static __no_kasan_or_inline |
Tom Rini | 978a047 | 2016-02-29 11:34:15 -0500 | [diff] [blame] | 215 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
| 216 | { |
| 217 | __READ_ONCE_SIZE; |
| 218 | } |
Tom Rini | 978a047 | 2016-02-29 11:34:15 -0500 | [diff] [blame] | 219 | |
| 220 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
| 221 | { |
| 222 | switch (size) { |
| 223 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; |
| 224 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; |
| 225 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; |
| 226 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
| 227 | default: |
| 228 | barrier(); |
| 229 | __builtin_memcpy((void *)p, (const void *)res, size); |
| 230 | barrier(); |
| 231 | } |
| 232 | } |
| 233 | |
| 234 | /* |
| 235 | * Prevent the compiler from merging or refetching reads or writes. The |
| 236 | * compiler is also forbidden from reordering successive instances of |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 237 | * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some |
| 238 | * particular ordering. One way to make the compiler aware of ordering is to |
| 239 | * put the two invocations of READ_ONCE or WRITE_ONCE in different C |
| 240 | * statements. |
Tom Rini | 978a047 | 2016-02-29 11:34:15 -0500 | [diff] [blame] | 241 | * |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 242 | * These two macros will also work on aggregate data types like structs or |
| 243 | * unions. If the size of the accessed data type exceeds the word size of |
| 244 | * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will |
| 245 | * fall back to memcpy(). There's at least two memcpy()s: one for the |
| 246 | * __builtin_memcpy() and then one for the macro doing the copy of variable |
| 247 | * - '__u' allocated on the stack. |
Tom Rini | 978a047 | 2016-02-29 11:34:15 -0500 | [diff] [blame] | 248 | * |
| 249 | * Their two major use cases are: (1) Mediating communication between |
| 250 | * process-level code and irq/NMI handlers, all running on the same CPU, |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 251 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
Tom Rini | 978a047 | 2016-02-29 11:34:15 -0500 | [diff] [blame] | 252 | * mutilate accesses that either do not require ordering or that interact |
| 253 | * with an explicit memory barrier or atomic instruction that provides the |
| 254 | * required ordering. |
| 255 | */ |
| 256 | |
| 257 | #define __READ_ONCE(x, check) \ |
| 258 | ({ \ |
| 259 | union { typeof(x) __val; char __c[1]; } __u; \ |
| 260 | if (check) \ |
| 261 | __read_once_size(&(x), __u.__c, sizeof(x)); \ |
| 262 | else \ |
| 263 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ |
| 264 | __u.__val; \ |
| 265 | }) |
| 266 | #define READ_ONCE(x) __READ_ONCE(x, 1) |
| 267 | |
| 268 | /* |
| 269 | * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need |
| 270 | * to hide memory access from KASAN. |
| 271 | */ |
| 272 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) |
| 273 | |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 274 | static __no_kasan_or_inline |
| 275 | unsigned long read_word_at_a_time(const void *addr) |
| 276 | { |
| 277 | return *(unsigned long *)addr; |
| 278 | } |
| 279 | |
Tom Rini | 978a047 | 2016-02-29 11:34:15 -0500 | [diff] [blame] | 280 | #define WRITE_ONCE(x, val) \ |
| 281 | ({ \ |
| 282 | union { typeof(x) __val; char __c[1]; } __u = \ |
| 283 | { .__val = (__force typeof(x)) (val) }; \ |
| 284 | __write_once_size(&(x), __u.__c, sizeof(x)); \ |
| 285 | __u.__val; \ |
| 286 | }) |
| 287 | |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 288 | #endif /* __KERNEL__ */ |
| 289 | |
| 290 | /* |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 291 | * Force the compiler to emit 'sym' as a symbol, so that we can reference |
| 292 | * it from inline assembler. Necessary in case 'sym' could be inlined |
| 293 | * otherwise, or eliminated entirely due to lack of references that are |
| 294 | * visible to the compiler. |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 295 | */ |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 296 | #define __ADDRESSABLE(sym) \ |
Marek Behún | 4bebdd3 | 2021-05-20 13:23:52 +0200 | [diff] [blame] | 297 | static void * __section(".discard.addressable") __used \ |
Marek Behún | 959473d | 2021-05-20 13:23:53 +0200 | [diff] [blame] | 298 | __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym; |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 299 | |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 300 | /** |
| 301 | * offset_to_ptr - convert a relative memory offset to an absolute pointer |
| 302 | * @off: the address of the 32-bit offset value |
Tom Rini | 978a047 | 2016-02-29 11:34:15 -0500 | [diff] [blame] | 303 | */ |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 304 | static inline void *offset_to_ptr(const int *off) |
| 305 | { |
| 306 | return (void *)((unsigned long)off + *off); |
| 307 | } |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 308 | |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 309 | #endif /* __ASSEMBLY__ */ |
Masahiro Yamada | c42aad5 | 2014-09-04 02:40:58 +0900 | [diff] [blame] | 310 | |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 311 | /* Compile time object size, -1 for unknown */ |
| 312 | #ifndef __compiletime_object_size |
| 313 | # define __compiletime_object_size(obj) -1 |
| 314 | #endif |
| 315 | #ifndef __compiletime_warning |
| 316 | # define __compiletime_warning(message) |
| 317 | #endif |
| 318 | #ifndef __compiletime_error |
| 319 | # define __compiletime_error(message) |
Masahiro Yamada | c42aad5 | 2014-09-04 02:40:58 +0900 | [diff] [blame] | 320 | #endif |
| 321 | |
Masahiro Yamada | 75035f3 | 2017-09-16 14:10:44 +0900 | [diff] [blame] | 322 | #ifdef __OPTIMIZE__ |
| 323 | # define __compiletime_assert(condition, msg, prefix, suffix) \ |
Masahiro Yamada | c42aad5 | 2014-09-04 02:40:58 +0900 | [diff] [blame] | 324 | do { \ |
Masahiro Yamada | c42aad5 | 2014-09-04 02:40:58 +0900 | [diff] [blame] | 325 | extern void prefix ## suffix(void) __compiletime_error(msg); \ |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 326 | if (!(condition)) \ |
Masahiro Yamada | c42aad5 | 2014-09-04 02:40:58 +0900 | [diff] [blame] | 327 | prefix ## suffix(); \ |
Masahiro Yamada | c42aad5 | 2014-09-04 02:40:58 +0900 | [diff] [blame] | 328 | } while (0) |
Masahiro Yamada | 75035f3 | 2017-09-16 14:10:44 +0900 | [diff] [blame] | 329 | #else |
| 330 | # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) |
| 331 | #endif |
Masahiro Yamada | c42aad5 | 2014-09-04 02:40:58 +0900 | [diff] [blame] | 332 | |
| 333 | #define _compiletime_assert(condition, msg, prefix, suffix) \ |
| 334 | __compiletime_assert(condition, msg, prefix, suffix) |
| 335 | |
| 336 | /** |
| 337 | * compiletime_assert - break build and emit msg if condition is false |
| 338 | * @condition: a compile-time constant condition to check |
| 339 | * @msg: a message to emit if condition is false |
| 340 | * |
| 341 | * In tradition of POSIX assert, this macro will break the build if the |
| 342 | * supplied condition is *false*, emitting the supplied error message if the |
| 343 | * compiler has support to do so. |
| 344 | */ |
| 345 | #define compiletime_assert(condition, msg) \ |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 346 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) |
Masahiro Yamada | c42aad5 | 2014-09-04 02:40:58 +0900 | [diff] [blame] | 347 | |
| 348 | #define compiletime_assert_atomic_type(t) \ |
| 349 | compiletime_assert(__native_word(t), \ |
| 350 | "Need native word sized stores/loads for atomicity.") |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 351 | |
Tom Rini | 7c34ae4 | 2020-05-14 08:30:06 -0400 | [diff] [blame] | 352 | /* &a[0] degrades to a pointer: a different type from an array */ |
| 353 | #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 354 | |
Graeme Russ | 1bab104 | 2010-04-24 00:05:49 +1000 | [diff] [blame] | 355 | #endif /* __LINUX_COMPILER_H */ |