Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 1 | /* |
Boyan Karatotev | a439dfd | 2023-12-04 16:09:14 +0000 | [diff] [blame] | 2 | * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved. |
Varun Wadekar | 0a176e3 | 2020-02-13 13:07:12 -0800 | [diff] [blame] | 3 | * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. |
Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 4 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 5 | * SPDX-License-Identifier: BSD-3-Clause |
Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
Antonio Nino Diaz | bb77f88 | 2018-07-18 13:23:07 +0100 | [diff] [blame] | 8 | #ifndef UTILS_DEF_H |
| 9 | #define UTILS_DEF_H |
Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 10 | |
Julius Werner | 2a231e3 | 2019-05-28 21:03:58 -0700 | [diff] [blame] | 11 | #include <export/lib/utils_def_exp.h> |
| 12 | |
Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 13 | /* Compute the number of elements in the given array */ |
| 14 | #define ARRAY_SIZE(a) \ |
| 15 | (sizeof(a) / sizeof((a)[0])) |
| 16 | |
| 17 | #define IS_POWER_OF_TWO(x) \ |
| 18 | (((x) & ((x) - 1)) == 0) |
| 19 | |
John Powell | a5c6636 | 2020-03-20 14:21:05 -0500 | [diff] [blame] | 20 | #define SIZE_FROM_LOG2_WORDS(n) (U(4) << (n)) |
Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 21 | |
Ghennadi Procopciuc | bfeaf0f | 2024-06-18 09:41:29 +0300 | [diff] [blame] | 22 | #if defined(__LINKER__) || defined(__ASSEMBLER__) |
Yann Gautier | c70fed5 | 2018-06-14 13:28:31 +0200 | [diff] [blame] | 23 | #define BIT_32(nr) (U(1) << (nr)) |
| 24 | #define BIT_64(nr) (ULL(1) << (nr)) |
Ghennadi Procopciuc | bfeaf0f | 2024-06-18 09:41:29 +0300 | [diff] [blame] | 25 | #else |
| 26 | #define BIT_32(nr) (((uint32_t)(1U)) << (nr)) |
| 27 | #define BIT_64(nr) (((uint64_t)(1ULL)) << (nr)) |
| 28 | #endif |
Yann Gautier | c70fed5 | 2018-06-14 13:28:31 +0200 | [diff] [blame] | 29 | |
Julius Werner | 8e0ef0f | 2019-07-09 14:02:43 -0700 | [diff] [blame] | 30 | #ifdef __aarch64__ |
Yann Gautier | c70fed5 | 2018-06-14 13:28:31 +0200 | [diff] [blame] | 31 | #define BIT BIT_64 |
Julius Werner | 8e0ef0f | 2019-07-09 14:02:43 -0700 | [diff] [blame] | 32 | #else |
| 33 | #define BIT BIT_32 |
Yann Gautier | c70fed5 | 2018-06-14 13:28:31 +0200 | [diff] [blame] | 34 | #endif |
Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 35 | |
Soby Mathew | 327548c | 2017-07-13 15:19:51 +0100 | [diff] [blame] | 36 | /* |
Ghennadi Procopciuc | 18562c7 | 2024-06-26 19:37:18 +0300 | [diff] [blame] | 37 | * Create a contiguous bitmask starting at bit position @low and ending at |
| 38 | * position @high. For example |
Yann Gautier | 953e94d | 2018-06-14 18:35:33 +0200 | [diff] [blame] | 39 | * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000. |
| 40 | */ |
Julius Werner | 53456fc | 2019-07-09 13:49:11 -0700 | [diff] [blame] | 41 | #if defined(__LINKER__) || defined(__ASSEMBLER__) |
Ghennadi Procopciuc | 18562c7 | 2024-06-26 19:37:18 +0300 | [diff] [blame] | 42 | #define GENMASK_32(high, low) \ |
| 43 | (((0xFFFFFFFF) << (low)) & (0xFFFFFFFF >> (32 - 1 - (high)))) |
Yann Gautier | d2c9a68 | 2018-11-15 09:49:24 +0100 | [diff] [blame] | 44 | |
Ghennadi Procopciuc | 18562c7 | 2024-06-26 19:37:18 +0300 | [diff] [blame] | 45 | #define GENMASK_64(high, low) \ |
| 46 | ((~0 << (low)) & (~0 >> (64 - 1 - (high)))) |
Yann Gautier | d2c9a68 | 2018-11-15 09:49:24 +0100 | [diff] [blame] | 47 | #else |
Ghennadi Procopciuc | 18562c7 | 2024-06-26 19:37:18 +0300 | [diff] [blame] | 48 | #define GENMASK_32(high, low) \ |
| 49 | ((~UINT32_C(0) >> (32U - 1U - (high))) ^ ((BIT_32(low) - 1U))) |
Yann Gautier | 953e94d | 2018-06-14 18:35:33 +0200 | [diff] [blame] | 50 | |
Ghennadi Procopciuc | 18562c7 | 2024-06-26 19:37:18 +0300 | [diff] [blame] | 51 | #define GENMASK_64(high, low) \ |
| 52 | ((~UINT64_C(0) >> (64U - 1U - (high))) ^ ((BIT_64(low) - 1U))) |
Yann Gautier | d2c9a68 | 2018-11-15 09:49:24 +0100 | [diff] [blame] | 53 | #endif |
Yann Gautier | 953e94d | 2018-06-14 18:35:33 +0200 | [diff] [blame] | 54 | |
Julius Werner | 8e0ef0f | 2019-07-09 14:02:43 -0700 | [diff] [blame] | 55 | #ifdef __aarch64__ |
Yann Gautier | 953e94d | 2018-06-14 18:35:33 +0200 | [diff] [blame] | 56 | #define GENMASK GENMASK_64 |
Julius Werner | 8e0ef0f | 2019-07-09 14:02:43 -0700 | [diff] [blame] | 57 | #else |
| 58 | #define GENMASK GENMASK_32 |
Yann Gautier | 953e94d | 2018-06-14 18:35:33 +0200 | [diff] [blame] | 59 | #endif |
| 60 | |
Boyan Karatotev | a439dfd | 2023-12-04 16:09:14 +0000 | [diff] [blame] | 61 | #define HI(addr) (addr >> 32) |
| 62 | #define LO(addr) (addr & 0xffffffff) |
| 63 | |
Yann Gautier | 953e94d | 2018-06-14 18:35:33 +0200 | [diff] [blame] | 64 | /* |
Soby Mathew | 327548c | 2017-07-13 15:19:51 +0100 | [diff] [blame] | 65 | * This variant of div_round_up can be used in macro definition but should not |
| 66 | * be used in C code as the `div` parameter is evaluated twice. |
| 67 | */ |
| 68 | #define DIV_ROUND_UP_2EVAL(n, d) (((n) + (d) - 1) / (d)) |
| 69 | |
Julius Werner | 54b6d20 | 2018-01-22 13:56:13 -0800 | [diff] [blame] | 70 | #define div_round_up(val, div) __extension__ ({ \ |
| 71 | __typeof__(div) _div = (div); \ |
Sathees Balya | 006b15e | 2018-09-19 14:23:03 +0100 | [diff] [blame] | 72 | ((val) + _div - (__typeof__(div)) 1) / _div; \ |
Julius Werner | 54b6d20 | 2018-01-22 13:56:13 -0800 | [diff] [blame] | 73 | }) |
| 74 | |
Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 75 | #define MIN(x, y) __extension__ ({ \ |
| 76 | __typeof__(x) _x = (x); \ |
| 77 | __typeof__(y) _y = (y); \ |
| 78 | (void)(&_x == &_y); \ |
Yann Gautier | 2bc097c | 2022-11-18 15:00:43 +0100 | [diff] [blame] | 79 | (_x < _y) ? _x : _y; \ |
Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 80 | }) |
| 81 | |
| 82 | #define MAX(x, y) __extension__ ({ \ |
| 83 | __typeof__(x) _x = (x); \ |
| 84 | __typeof__(y) _y = (y); \ |
| 85 | (void)(&_x == &_y); \ |
Yann Gautier | 2bc097c | 2022-11-18 15:00:43 +0100 | [diff] [blame] | 86 | (_x > _y) ? _x : _y; \ |
Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 87 | }) |
| 88 | |
Lionel Debieve | a265ade | 2020-01-02 11:14:16 +0100 | [diff] [blame] | 89 | #define CLAMP(x, min, max) __extension__ ({ \ |
| 90 | __typeof__(x) _x = (x); \ |
| 91 | __typeof__(min) _min = (min); \ |
| 92 | __typeof__(max) _max = (max); \ |
| 93 | (void)(&_x == &_min); \ |
| 94 | (void)(&_x == &_max); \ |
Yann Gautier | 2bc097c | 2022-11-18 15:00:43 +0100 | [diff] [blame] | 95 | ((_x > _max) ? _max : ((_x < _min) ? _min : _x)); \ |
Lionel Debieve | a265ade | 2020-01-02 11:14:16 +0100 | [diff] [blame] | 96 | }) |
| 97 | |
Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 98 | /* |
| 99 | * The round_up() macro rounds up a value to the given boundary in a |
| 100 | * type-agnostic yet type-safe manner. The boundary must be a power of two. |
| 101 | * In other words, it computes the smallest multiple of boundary which is |
| 102 | * greater than or equal to value. |
| 103 | * |
| 104 | * round_down() is similar but rounds the value down instead. |
| 105 | */ |
| 106 | #define round_boundary(value, boundary) \ |
| 107 | ((__typeof__(value))((boundary) - 1)) |
| 108 | |
| 109 | #define round_up(value, boundary) \ |
| 110 | ((((value) - 1) | round_boundary(value, boundary)) + 1) |
| 111 | |
| 112 | #define round_down(value, boundary) \ |
| 113 | ((value) & ~round_boundary(value, boundary)) |
| 114 | |
Raymond Mao | 9898339 | 2023-07-25 07:53:35 -0700 | [diff] [blame] | 115 | /* add operation together with checking whether the operation overflowed |
| 116 | * The result is '*res', |
| 117 | * return 0 on success and 1 on overflow |
| 118 | */ |
| 119 | #define add_overflow(a, b, res) __builtin_add_overflow((a), (b), (res)) |
| 120 | |
| 121 | /* |
| 122 | * Round up a value to align with a given size and |
| 123 | * check whether overflow happens. |
| 124 | * The rounduped value is '*res', |
| 125 | * return 0 on success and 1 on overflow |
| 126 | */ |
| 127 | #define round_up_overflow(v, size, res) (__extension__({ \ |
| 128 | typeof(res) __res = res; \ |
| 129 | typeof(*(__res)) __roundup_tmp = 0; \ |
| 130 | typeof(v) __roundup_mask = (typeof(v))(size) - 1; \ |
| 131 | \ |
| 132 | add_overflow((v), __roundup_mask, &__roundup_tmp) ? 1 : \ |
| 133 | (void)(*(__res) = __roundup_tmp & ~__roundup_mask), 0; \ |
| 134 | })) |
| 135 | |
| 136 | /* |
| 137 | * Add a with b, then round up the result to align with a given size and |
| 138 | * check whether overflow happens. |
| 139 | * The rounduped value is '*res', |
| 140 | * return 0 on success and 1 on overflow |
| 141 | */ |
| 142 | #define add_with_round_up_overflow(a, b, size, res) (__extension__({ \ |
| 143 | typeof(a) __a = (a); \ |
| 144 | typeof(__a) __add_res = 0; \ |
| 145 | \ |
| 146 | add_overflow((__a), (b), &__add_res) ? 1 : \ |
| 147 | round_up_overflow(__add_res, (size), (res)) ? 1 : 0; \ |
| 148 | })) |
| 149 | |
Marc Bonnici | d1907f0 | 2022-04-19 17:42:53 +0100 | [diff] [blame] | 150 | /** |
| 151 | * Helper macro to ensure a value lies on a given boundary. |
| 152 | */ |
| 153 | #define is_aligned(value, boundary) \ |
| 154 | (round_up((uintptr_t) value, boundary) == \ |
| 155 | round_down((uintptr_t) value, boundary)) |
| 156 | |
Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 157 | /* |
| 158 | * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise. |
| 159 | * Both arguments must be unsigned pointer values (i.e. uintptr_t). |
| 160 | */ |
Antonio Nino Diaz | bb77f88 | 2018-07-18 13:23:07 +0100 | [diff] [blame] | 161 | #define check_uptr_overflow(_ptr, _inc) \ |
| 162 | ((_ptr) > (UINTPTR_MAX - (_inc))) |
Scott Branden | bf404c0 | 2017-04-10 11:45:52 -0700 | [diff] [blame] | 163 | |
| 164 | /* |
Jeenu Viswambharan | 19f6cf2 | 2017-12-07 08:43:05 +0000 | [diff] [blame] | 165 | * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise. |
| 166 | * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t). |
| 167 | */ |
Antonio Nino Diaz | bb77f88 | 2018-07-18 13:23:07 +0100 | [diff] [blame] | 168 | #define check_u32_overflow(_u32, _inc) \ |
| 169 | ((_u32) > (UINT32_MAX - (_inc))) |
Jeenu Viswambharan | 19f6cf2 | 2017-12-07 08:43:05 +0000 | [diff] [blame] | 170 | |
Julius Werner | 02eb727 | 2017-12-12 14:23:26 -0800 | [diff] [blame] | 171 | /* Register size of the current architecture. */ |
Julius Werner | 8e0ef0f | 2019-07-09 14:02:43 -0700 | [diff] [blame] | 172 | #ifdef __aarch64__ |
Julius Werner | 02eb727 | 2017-12-12 14:23:26 -0800 | [diff] [blame] | 173 | #define REGSZ U(8) |
Julius Werner | 8e0ef0f | 2019-07-09 14:02:43 -0700 | [diff] [blame] | 174 | #else |
| 175 | #define REGSZ U(4) |
Julius Werner | 02eb727 | 2017-12-12 14:23:26 -0800 | [diff] [blame] | 176 | #endif |
| 177 | |
Jeenu Viswambharan | 0bc79d9 | 2017-08-16 11:44:25 +0100 | [diff] [blame] | 178 | /* |
| 179 | * Test for the current architecture version to be at least the version |
| 180 | * expected. |
| 181 | */ |
| 182 | #define ARM_ARCH_AT_LEAST(_maj, _min) \ |
Jeenu Viswambharan | 58e8148 | 2018-04-27 15:06:57 +0100 | [diff] [blame] | 183 | ((ARM_ARCH_MAJOR > (_maj)) || \ |
| 184 | ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min)))) |
Jeenu Viswambharan | 0bc79d9 | 2017-08-16 11:44:25 +0100 | [diff] [blame] | 185 | |
Joel Hutton | 5cc3bc8 | 2018-03-21 11:40:57 +0000 | [diff] [blame] | 186 | /* |
| 187 | * Import an assembly or linker symbol as a C expression with the specified |
| 188 | * type |
| 189 | */ |
| 190 | #define IMPORT_SYM(type, sym, name) \ |
| 191 | extern char sym[];\ |
| 192 | static const __attribute__((unused)) type name = (type) sym; |
| 193 | |
| 194 | /* |
| 195 | * When the symbol is used to hold a pointer, its alignment can be asserted |
| 196 | * with this macro. For example, if there is a linker symbol that is going to |
| 197 | * be used as a 64-bit pointer, the value of the linker symbol must also be |
| 198 | * aligned to 64 bit. This macro makes sure this is the case. |
| 199 | */ |
| 200 | #define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0) |
| 201 | |
Antonio Nino Diaz | 8257f5b | 2018-11-22 15:53:17 +0000 | [diff] [blame] | 202 | #define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory") |
Joel Hutton | 5cc3bc8 | 2018-03-21 11:40:57 +0000 | [diff] [blame] | 203 | |
Joel Hutton | d522759 | 2018-10-09 14:08:42 +0100 | [diff] [blame] | 204 | /* Compiler builtin of GCC >= 9 and planned in llvm */ |
| 205 | #ifdef __HAVE_SPECULATION_SAFE_VALUE |
| 206 | # define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var) |
| 207 | #else |
| 208 | # define SPECULATION_SAFE_VALUE(var) var |
| 209 | #endif |
| 210 | |
Varun Wadekar | 0a176e3 | 2020-02-13 13:07:12 -0800 | [diff] [blame] | 211 | /* |
| 212 | * Ticks elapsed in one second with a signal of 1 MHz |
| 213 | */ |
| 214 | #define MHZ_TICKS_PER_SEC U(1000000) |
| 215 | |
Pankaj Gupta | 68a181e | 2020-12-09 14:02:38 +0530 | [diff] [blame] | 216 | /* |
| 217 | * Ticks elapsed in one second with a signal of 1 KHz |
| 218 | */ |
| 219 | #define KHZ_TICKS_PER_SEC U(1000) |
| 220 | |
Antonio Nino Diaz | bb77f88 | 2018-07-18 13:23:07 +0100 | [diff] [blame] | 221 | #endif /* UTILS_DEF_H */ |