blob: a170a09d2eda82a7576d5289cf69afb74b5b85f0 [file] [log] [blame]
Scott Brandenbf404c02017-04-10 11:45:52 -07001/*
Raymond Mao98983392023-07-25 07:53:35 -07002 * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
Varun Wadekar0a176e32020-02-13 13:07:12 -08003 * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
Scott Brandenbf404c02017-04-10 11:45:52 -07004 *
dp-armfa3cf0b2017-05-03 09:38:09 +01005 * SPDX-License-Identifier: BSD-3-Clause
Scott Brandenbf404c02017-04-10 11:45:52 -07006 */
7
Antonio Nino Diazbb77f882018-07-18 13:23:07 +01008#ifndef UTILS_DEF_H
9#define UTILS_DEF_H
Scott Brandenbf404c02017-04-10 11:45:52 -070010
Julius Werner2a231e32019-05-28 21:03:58 -070011#include <export/lib/utils_def_exp.h>
12
Scott Brandenbf404c02017-04-10 11:45:52 -070013/* Compute the number of elements in the given array */
14#define ARRAY_SIZE(a) \
15 (sizeof(a) / sizeof((a)[0]))
16
17#define IS_POWER_OF_TWO(x) \
18 (((x) & ((x) - 1)) == 0)
19
John Powella5c66362020-03-20 14:21:05 -050020#define SIZE_FROM_LOG2_WORDS(n) (U(4) << (n))
Scott Brandenbf404c02017-04-10 11:45:52 -070021
Yann Gautierc70fed52018-06-14 13:28:31 +020022#define BIT_32(nr) (U(1) << (nr))
23#define BIT_64(nr) (ULL(1) << (nr))
24
Julius Werner8e0ef0f2019-07-09 14:02:43 -070025#ifdef __aarch64__
Yann Gautierc70fed52018-06-14 13:28:31 +020026#define BIT BIT_64
Julius Werner8e0ef0f2019-07-09 14:02:43 -070027#else
28#define BIT BIT_32
Yann Gautierc70fed52018-06-14 13:28:31 +020029#endif
Scott Brandenbf404c02017-04-10 11:45:52 -070030
Soby Mathew327548c2017-07-13 15:19:51 +010031/*
Yann Gautier953e94d2018-06-14 18:35:33 +020032 * Create a contiguous bitmask starting at bit position @l and ending at
33 * position @h. For example
34 * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
35 */
Julius Werner53456fc2019-07-09 13:49:11 -070036#if defined(__LINKER__) || defined(__ASSEMBLER__)
Yann Gautierd2c9a682018-11-15 09:49:24 +010037#define GENMASK_32(h, l) \
38 (((0xFFFFFFFF) << (l)) & (0xFFFFFFFF >> (32 - 1 - (h))))
39
40#define GENMASK_64(h, l) \
41 ((~0 << (l)) & (~0 >> (64 - 1 - (h))))
42#else
Yann Gautier953e94d2018-06-14 18:35:33 +020043#define GENMASK_32(h, l) \
44 (((~UINT32_C(0)) << (l)) & (~UINT32_C(0) >> (32 - 1 - (h))))
45
46#define GENMASK_64(h, l) \
47 (((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
Yann Gautierd2c9a682018-11-15 09:49:24 +010048#endif
Yann Gautier953e94d2018-06-14 18:35:33 +020049
Julius Werner8e0ef0f2019-07-09 14:02:43 -070050#ifdef __aarch64__
Yann Gautier953e94d2018-06-14 18:35:33 +020051#define GENMASK GENMASK_64
Julius Werner8e0ef0f2019-07-09 14:02:43 -070052#else
53#define GENMASK GENMASK_32
Yann Gautier953e94d2018-06-14 18:35:33 +020054#endif
55
56/*
Soby Mathew327548c2017-07-13 15:19:51 +010057 * This variant of div_round_up can be used in macro definition but should not
58 * be used in C code as the `div` parameter is evaluated twice.
59 */
60#define DIV_ROUND_UP_2EVAL(n, d) (((n) + (d) - 1) / (d))
61
Julius Werner54b6d202018-01-22 13:56:13 -080062#define div_round_up(val, div) __extension__ ({ \
63 __typeof__(div) _div = (div); \
Sathees Balya006b15e2018-09-19 14:23:03 +010064 ((val) + _div - (__typeof__(div)) 1) / _div; \
Julius Werner54b6d202018-01-22 13:56:13 -080065})
66
Scott Brandenbf404c02017-04-10 11:45:52 -070067#define MIN(x, y) __extension__ ({ \
68 __typeof__(x) _x = (x); \
69 __typeof__(y) _y = (y); \
70 (void)(&_x == &_y); \
Yann Gautier2bc097c2022-11-18 15:00:43 +010071 (_x < _y) ? _x : _y; \
Scott Brandenbf404c02017-04-10 11:45:52 -070072})
73
74#define MAX(x, y) __extension__ ({ \
75 __typeof__(x) _x = (x); \
76 __typeof__(y) _y = (y); \
77 (void)(&_x == &_y); \
Yann Gautier2bc097c2022-11-18 15:00:43 +010078 (_x > _y) ? _x : _y; \
Scott Brandenbf404c02017-04-10 11:45:52 -070079})
80
Lionel Debievea265ade2020-01-02 11:14:16 +010081#define CLAMP(x, min, max) __extension__ ({ \
82 __typeof__(x) _x = (x); \
83 __typeof__(min) _min = (min); \
84 __typeof__(max) _max = (max); \
85 (void)(&_x == &_min); \
86 (void)(&_x == &_max); \
Yann Gautier2bc097c2022-11-18 15:00:43 +010087 ((_x > _max) ? _max : ((_x < _min) ? _min : _x)); \
Lionel Debievea265ade2020-01-02 11:14:16 +010088})
89
Scott Brandenbf404c02017-04-10 11:45:52 -070090/*
91 * The round_up() macro rounds up a value to the given boundary in a
92 * type-agnostic yet type-safe manner. The boundary must be a power of two.
93 * In other words, it computes the smallest multiple of boundary which is
94 * greater than or equal to value.
95 *
96 * round_down() is similar but rounds the value down instead.
97 */
98#define round_boundary(value, boundary) \
99 ((__typeof__(value))((boundary) - 1))
100
101#define round_up(value, boundary) \
102 ((((value) - 1) | round_boundary(value, boundary)) + 1)
103
104#define round_down(value, boundary) \
105 ((value) & ~round_boundary(value, boundary))
106
Raymond Mao98983392023-07-25 07:53:35 -0700107/* add operation together with checking whether the operation overflowed
108 * The result is '*res',
109 * return 0 on success and 1 on overflow
110 */
111#define add_overflow(a, b, res) __builtin_add_overflow((a), (b), (res))
112
113/*
114 * Round up a value to align with a given size and
115 * check whether overflow happens.
116 * The rounduped value is '*res',
117 * return 0 on success and 1 on overflow
118 */
119#define round_up_overflow(v, size, res) (__extension__({ \
120 typeof(res) __res = res; \
121 typeof(*(__res)) __roundup_tmp = 0; \
122 typeof(v) __roundup_mask = (typeof(v))(size) - 1; \
123 \
124 add_overflow((v), __roundup_mask, &__roundup_tmp) ? 1 : \
125 (void)(*(__res) = __roundup_tmp & ~__roundup_mask), 0; \
126}))
127
128/*
129 * Add a with b, then round up the result to align with a given size and
130 * check whether overflow happens.
131 * The rounduped value is '*res',
132 * return 0 on success and 1 on overflow
133 */
134#define add_with_round_up_overflow(a, b, size, res) (__extension__({ \
135 typeof(a) __a = (a); \
136 typeof(__a) __add_res = 0; \
137 \
138 add_overflow((__a), (b), &__add_res) ? 1 : \
139 round_up_overflow(__add_res, (size), (res)) ? 1 : 0; \
140}))
141
Marc Bonnicid1907f02022-04-19 17:42:53 +0100142/**
143 * Helper macro to ensure a value lies on a given boundary.
144 */
145#define is_aligned(value, boundary) \
146 (round_up((uintptr_t) value, boundary) == \
147 round_down((uintptr_t) value, boundary))
148
Scott Brandenbf404c02017-04-10 11:45:52 -0700149/*
150 * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
151 * Both arguments must be unsigned pointer values (i.e. uintptr_t).
152 */
Antonio Nino Diazbb77f882018-07-18 13:23:07 +0100153#define check_uptr_overflow(_ptr, _inc) \
154 ((_ptr) > (UINTPTR_MAX - (_inc)))
Scott Brandenbf404c02017-04-10 11:45:52 -0700155
156/*
Jeenu Viswambharan19f6cf22017-12-07 08:43:05 +0000157 * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
158 * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
159 */
Antonio Nino Diazbb77f882018-07-18 13:23:07 +0100160#define check_u32_overflow(_u32, _inc) \
161 ((_u32) > (UINT32_MAX - (_inc)))
Jeenu Viswambharan19f6cf22017-12-07 08:43:05 +0000162
Julius Werner02eb7272017-12-12 14:23:26 -0800163/* Register size of the current architecture. */
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700164#ifdef __aarch64__
Julius Werner02eb7272017-12-12 14:23:26 -0800165#define REGSZ U(8)
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700166#else
167#define REGSZ U(4)
Julius Werner02eb7272017-12-12 14:23:26 -0800168#endif
169
Jeenu Viswambharan0bc79d92017-08-16 11:44:25 +0100170/*
171 * Test for the current architecture version to be at least the version
172 * expected.
173 */
174#define ARM_ARCH_AT_LEAST(_maj, _min) \
Jeenu Viswambharan58e81482018-04-27 15:06:57 +0100175 ((ARM_ARCH_MAJOR > (_maj)) || \
176 ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
Jeenu Viswambharan0bc79d92017-08-16 11:44:25 +0100177
Joel Hutton5cc3bc82018-03-21 11:40:57 +0000178/*
179 * Import an assembly or linker symbol as a C expression with the specified
180 * type
181 */
182#define IMPORT_SYM(type, sym, name) \
183 extern char sym[];\
184 static const __attribute__((unused)) type name = (type) sym;
185
186/*
187 * When the symbol is used to hold a pointer, its alignment can be asserted
188 * with this macro. For example, if there is a linker symbol that is going to
189 * be used as a 64-bit pointer, the value of the linker symbol must also be
190 * aligned to 64 bit. This macro makes sure this is the case.
191 */
192#define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0)
193
Antonio Nino Diaz8257f5b2018-11-22 15:53:17 +0000194#define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory")
Joel Hutton5cc3bc82018-03-21 11:40:57 +0000195
Joel Huttond5227592018-10-09 14:08:42 +0100196/* Compiler builtin of GCC >= 9 and planned in llvm */
197#ifdef __HAVE_SPECULATION_SAFE_VALUE
198# define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var)
199#else
200# define SPECULATION_SAFE_VALUE(var) var
201#endif
202
Varun Wadekar0a176e32020-02-13 13:07:12 -0800203/*
204 * Ticks elapsed in one second with a signal of 1 MHz
205 */
206#define MHZ_TICKS_PER_SEC U(1000000)
207
Pankaj Gupta68a181e2020-12-09 14:02:38 +0530208/*
209 * Ticks elapsed in one second with a signal of 1 KHz
210 */
211#define KHZ_TICKS_PER_SEC U(1000)
212
Antonio Nino Diazbb77f882018-07-18 13:23:07 +0100213#endif /* UTILS_DEF_H */