Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 1 | /* plock - progressive locks |
| 2 | * |
| 3 | * Copyright (C) 2012-2017 Willy Tarreau <w@1wt.eu> |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining |
| 6 | * a copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sublicense, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be |
| 14 | * included in all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 17 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES |
| 18 | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 19 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT |
| 20 | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, |
| 21 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 23 | * OTHER DEALINGS IN THE SOFTWARE. |
| 24 | */ |
| 25 | |
Willy Tarreau | aa15db9 | 2023-08-26 17:27:24 +0200 | [diff] [blame] | 26 | #ifndef PL_PLOCK_H |
| 27 | #define PL_PLOCK_H |
| 28 | |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 29 | #include "atomic-ops.h" |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 30 | #ifdef _POSIX_PRIORITY_SCHEDULING |
| 31 | #include <sched.h> |
| 32 | #endif |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 33 | |
| 34 | /* 64 bit */ |
| 35 | #define PLOCK64_RL_1 0x0000000000000004ULL |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 36 | #define PLOCK64_RL_2PL 0x00000000FFFFFFF8ULL |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 37 | #define PLOCK64_RL_ANY 0x00000000FFFFFFFCULL |
| 38 | #define PLOCK64_SL_1 0x0000000100000000ULL |
| 39 | #define PLOCK64_SL_ANY 0x0000000300000000ULL |
| 40 | #define PLOCK64_WL_1 0x0000000400000000ULL |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 41 | #define PLOCK64_WL_2PL 0xFFFFFFF800000000ULL |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 42 | #define PLOCK64_WL_ANY 0xFFFFFFFC00000000ULL |
| 43 | |
| 44 | /* 32 bit */ |
| 45 | #define PLOCK32_RL_1 0x00000004 |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 46 | #define PLOCK32_RL_2PL 0x0000FFF8 |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 47 | #define PLOCK32_RL_ANY 0x0000FFFC |
| 48 | #define PLOCK32_SL_1 0x00010000 |
| 49 | #define PLOCK32_SL_ANY 0x00030000 |
| 50 | #define PLOCK32_WL_1 0x00040000 |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 51 | #define PLOCK32_WL_2PL 0xFFF80000 |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 52 | #define PLOCK32_WL_ANY 0xFFFC0000 |
| 53 | |
| 54 | /* dereferences <*p> as unsigned long without causing aliasing issues */ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 55 | #define pl_deref_long(p) ({ volatile unsigned long *__pl_l = (unsigned long *)(p); *__pl_l; }) |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 56 | |
| 57 | /* dereferences <*p> as unsigned int without causing aliasing issues */ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 58 | #define pl_deref_int(p) ({ volatile unsigned int *__pl_i = (unsigned int *)(p); *__pl_i; }) |
| 59 | |
| 60 | /* This function waits for <lock> to release all bits covered by <mask>, and |
| 61 | * enforces an exponential backoff using CPU pauses to limit the pollution to |
| 62 | * the other threads' caches. The progression follows (1.5^N)-1, limited to |
| 63 | * 16384 iterations, which is way sufficient even for very large numbers of |
Willy Tarreau | b13044c | 2022-10-11 17:02:02 +0200 | [diff] [blame] | 64 | * threads. It's possible to disable exponential backoff (EBO) for debugging |
| 65 | * purposes by setting PLOCK_DISABLE_EBO, in which case the function will be |
| 66 | * replaced with a simpler macro. This may for example be useful to more |
| 67 | * easily track callers' CPU usage. The macro was not designed to be used |
| 68 | * outside of the functions defined here. |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 69 | */ |
Willy Tarreau | b13044c | 2022-10-11 17:02:02 +0200 | [diff] [blame] | 70 | #if defined(PLOCK_DISABLE_EBO) |
| 71 | #define pl_wait_unlock_long(lock, mask) \ |
| 72 | ({ \ |
| 73 | unsigned long _r; \ |
| 74 | do { \ |
| 75 | pl_cpu_relax(); \ |
| 76 | _r = pl_deref_long(lock); \ |
| 77 | } while (_r & mask); \ |
| 78 | _r; /* return value */ \ |
| 79 | }) |
| 80 | #else |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 81 | __attribute__((unused,noinline,no_instrument_function)) |
| 82 | static unsigned long pl_wait_unlock_long(const unsigned long *lock, const unsigned long mask) |
| 83 | { |
| 84 | unsigned long ret; |
| 85 | unsigned int m = 0; |
| 86 | |
| 87 | do { |
| 88 | unsigned int loops = m; |
| 89 | |
| 90 | #ifdef _POSIX_PRIORITY_SCHEDULING |
| 91 | if (loops >= 65536) { |
| 92 | sched_yield(); |
| 93 | loops -= 32768; |
| 94 | } |
| 95 | #endif |
| 96 | for (; loops >= 200; loops -= 10) |
| 97 | pl_cpu_relax(); |
| 98 | |
| 99 | for (; loops >= 1; loops--) |
| 100 | pl_barrier(); |
| 101 | |
| 102 | ret = pl_deref_long(lock); |
| 103 | if (__builtin_expect(ret & mask, 0) == 0) |
| 104 | break; |
| 105 | |
| 106 | /* the below produces an exponential growth with loops to lower |
| 107 | * values and still growing. This allows competing threads to |
| 108 | * wait different times once the threshold is reached. |
| 109 | */ |
| 110 | m = ((m + (m >> 1)) + 2) & 0x3ffff; |
| 111 | } while (1); |
| 112 | |
| 113 | return ret; |
| 114 | } |
Willy Tarreau | b13044c | 2022-10-11 17:02:02 +0200 | [diff] [blame] | 115 | #endif /* PLOCK_DISABLE_EBO */ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 116 | |
| 117 | /* This function waits for <lock> to release all bits covered by <mask>, and |
| 118 | * enforces an exponential backoff using CPU pauses to limit the pollution to |
| 119 | * the other threads' caches. The progression follows (2^N)-1, limited to 255 |
| 120 | * iterations, which is way sufficient even for very large numbers of threads. |
| 121 | * The function slightly benefits from size optimization under gcc, but Clang |
| 122 | * cannot do it, so it's not done here, as it doesn't make a big difference. |
Willy Tarreau | b13044c | 2022-10-11 17:02:02 +0200 | [diff] [blame] | 123 | * It is possible to disable exponential backoff (EBO) for debugging purposes |
| 124 | * by setting PLOCK_DISABLE_EBO, in which case the function will be replaced |
| 125 | * with a simpler macro. This may for example be useful to more easily track |
| 126 | * callers' CPU usage. The macro was not designed to be used outside of the |
| 127 | * functions defined here. |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 128 | */ |
Willy Tarreau | b13044c | 2022-10-11 17:02:02 +0200 | [diff] [blame] | 129 | #if defined(PLOCK_DISABLE_EBO) |
| 130 | #define pl_wait_unlock_int(lock, mask) \ |
| 131 | ({ \ |
| 132 | unsigned int _r; \ |
| 133 | do { \ |
| 134 | pl_cpu_relax(); \ |
| 135 | _r = pl_deref_int(lock); \ |
| 136 | } while (_r & mask); \ |
| 137 | _r; /* return value */ \ |
| 138 | }) |
| 139 | #else |
Willy Tarreau | 315b718 | 2023-08-16 22:25:34 +0200 | [diff] [blame] | 140 | # if defined(PLOCK_INLINE_EBO) |
| 141 | __attribute__((unused,always_inline,no_instrument_function)) inline |
| 142 | # else |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 143 | __attribute__((unused,noinline,no_instrument_function)) |
Willy Tarreau | 315b718 | 2023-08-16 22:25:34 +0200 | [diff] [blame] | 144 | # endif |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 145 | static unsigned int pl_wait_unlock_int(const unsigned int *lock, const unsigned int mask) |
| 146 | { |
| 147 | unsigned int ret; |
| 148 | unsigned int m = 0; |
| 149 | |
| 150 | do { |
| 151 | unsigned int loops = m; |
| 152 | |
| 153 | #ifdef _POSIX_PRIORITY_SCHEDULING |
| 154 | if (loops >= 65536) { |
| 155 | sched_yield(); |
| 156 | loops -= 32768; |
| 157 | } |
| 158 | #endif |
| 159 | for (; loops >= 200; loops -= 10) |
| 160 | pl_cpu_relax(); |
| 161 | |
| 162 | for (; loops >= 1; loops--) |
| 163 | pl_barrier(); |
| 164 | |
| 165 | ret = pl_deref_int(lock); |
| 166 | if (__builtin_expect(ret & mask, 0) == 0) |
| 167 | break; |
| 168 | |
| 169 | /* the below produces an exponential growth with loops to lower |
| 170 | * values and still growing. This allows competing threads to |
| 171 | * wait different times once the threshold is reached. |
| 172 | */ |
| 173 | m = ((m + (m >> 1)) + 2) & 0x3ffff; |
| 174 | } while (1); |
| 175 | |
| 176 | return ret; |
| 177 | } |
Willy Tarreau | b13044c | 2022-10-11 17:02:02 +0200 | [diff] [blame] | 178 | #endif /* PLOCK_DISABLE_EBO */ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 179 | |
| 180 | /* This function waits for <lock> to change from value <prev> and returns the |
| 181 | * new value. It enforces an exponential backoff using CPU pauses to limit the |
| 182 | * pollution to the other threads' caches. The progression follows (2^N)-1, |
| 183 | * limited to 255 iterations, which is way sufficient even for very large |
| 184 | * numbers of threads. It is designed to be called after a first test which |
| 185 | * retrieves the previous value, so it starts by waiting. The function slightly |
| 186 | * benefits from size optimization under gcc, but Clang cannot do it, so it's |
| 187 | * not done here, as it doesn't make a big difference. |
| 188 | */ |
| 189 | __attribute__((unused,noinline,no_instrument_function)) |
| 190 | static unsigned long pl_wait_new_long(const unsigned long *lock, const unsigned long prev) |
| 191 | { |
| 192 | unsigned char m = 0; |
| 193 | unsigned long curr; |
| 194 | |
| 195 | do { |
| 196 | unsigned char loops = m + 1; |
| 197 | m = (m << 1) + 1; |
| 198 | do { |
| 199 | pl_cpu_relax(); |
| 200 | } while (__builtin_expect(--loops, 0)); |
| 201 | curr = pl_deref_long(lock); |
| 202 | } while (__builtin_expect(curr == prev, 0)); |
| 203 | return curr; |
| 204 | } |
| 205 | |
| 206 | /* This function waits for <lock> to change from value <prev> and returns the |
| 207 | * new value. It enforces an exponential backoff using CPU pauses to limit the |
| 208 | * pollution to the other threads' caches. The progression follows (2^N)-1, |
| 209 | * limited to 255 iterations, which is way sufficient even for very large |
| 210 | * numbers of threads. It is designed to be called after a first test which |
| 211 | * retrieves the previous value, so it starts by waiting. The function slightly |
| 212 | * benefits from size optimization under gcc, but Clang cannot do it, so it's |
| 213 | * not done here, as it doesn't make a big difference. |
| 214 | */ |
| 215 | __attribute__((unused,noinline,no_instrument_function)) |
| 216 | static unsigned int pl_wait_new_int(const unsigned int *lock, const unsigned int prev) |
| 217 | { |
| 218 | unsigned char m = 0; |
| 219 | unsigned int curr; |
| 220 | |
| 221 | do { |
| 222 | unsigned char loops = m + 1; |
| 223 | m = (m << 1) + 1; |
| 224 | do { |
| 225 | pl_cpu_relax(); |
| 226 | } while (__builtin_expect(--loops, 0)); |
| 227 | curr = pl_deref_int(lock); |
| 228 | } while (__builtin_expect(curr == prev, 0)); |
| 229 | return curr; |
| 230 | } |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 231 | |
| 232 | /* request shared read access (R), return non-zero on success, otherwise 0 */ |
| 233 | #define pl_try_r(lock) ( \ |
| 234 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 235 | register unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_WL_ANY; \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 236 | pl_barrier(); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 237 | if (!__builtin_expect(__pl_r, 0)) { \ |
| 238 | __pl_r = pl_xadd((lock), PLOCK64_RL_1) & PLOCK64_WL_ANY; \ |
| 239 | if (__builtin_expect(__pl_r, 0)) \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 240 | pl_sub((lock), PLOCK64_RL_1); \ |
| 241 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 242 | !__pl_r; /* return value */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 243 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 244 | register unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_WL_ANY; \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 245 | pl_barrier(); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 246 | if (!__builtin_expect(__pl_r, 0)) { \ |
| 247 | __pl_r = pl_xadd((lock), PLOCK32_RL_1) & PLOCK32_WL_ANY; \ |
| 248 | if (__builtin_expect(__pl_r, 0)) \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 249 | pl_sub((lock), PLOCK32_RL_1); \ |
| 250 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 251 | !__pl_r; /* return value */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 252 | }) : ({ \ |
| 253 | void __unsupported_argument_size_for_pl_try_r__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 254 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 255 | __unsupported_argument_size_for_pl_try_r__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 256 | 0; \ |
| 257 | }) \ |
| 258 | ) |
| 259 | |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 260 | /* request shared read access (R) and wait for it. In order not to disturb a W |
| 261 | * lock waiting for all readers to leave, we first check if a W lock is held |
| 262 | * before trying to claim the R lock. |
| 263 | */ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 264 | #define pl_take_r(lock) \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 265 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 266 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 267 | register unsigned long __set_r = PLOCK64_RL_1; \ |
| 268 | register unsigned long __msk_r = PLOCK64_WL_ANY; \ |
| 269 | while (1) { \ |
| 270 | if (__builtin_expect(pl_deref_long(__lk_r) & __msk_r, 0)) \ |
| 271 | pl_wait_unlock_long(__lk_r, __msk_r); \ |
| 272 | if (!__builtin_expect(pl_xadd(__lk_r, __set_r) & __msk_r, 0)) \ |
| 273 | break; \ |
| 274 | pl_sub(__lk_r, __set_r); \ |
| 275 | } \ |
| 276 | pl_barrier(); \ |
| 277 | 0; \ |
| 278 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 279 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 280 | register unsigned int __set_r = PLOCK32_RL_1; \ |
| 281 | register unsigned int __msk_r = PLOCK32_WL_ANY; \ |
| 282 | while (1) { \ |
| 283 | if (__builtin_expect(pl_deref_int(__lk_r) & __msk_r, 0)) \ |
| 284 | pl_wait_unlock_int(__lk_r, __msk_r); \ |
| 285 | if (!__builtin_expect(pl_xadd(__lk_r, __set_r) & __msk_r, 0)) \ |
| 286 | break; \ |
| 287 | pl_sub(__lk_r, __set_r); \ |
| 288 | } \ |
| 289 | pl_barrier(); \ |
| 290 | 0; \ |
| 291 | }) : ({ \ |
| 292 | void __unsupported_argument_size_for_pl_take_r__(char *,int); \ |
| 293 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 294 | __unsupported_argument_size_for_pl_take_r__(__FILE__,__LINE__); \ |
| 295 | 0; \ |
| 296 | }) |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 297 | |
| 298 | /* release the read access (R) lock */ |
| 299 | #define pl_drop_r(lock) ( \ |
| 300 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 301 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 302 | pl_sub(lock, PLOCK64_RL_1); \ |
| 303 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 304 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 305 | pl_sub(lock, PLOCK32_RL_1); \ |
| 306 | }) : ({ \ |
| 307 | void __unsupported_argument_size_for_pl_drop_r__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 308 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 309 | __unsupported_argument_size_for_pl_drop_r__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 310 | }) \ |
| 311 | ) |
| 312 | |
| 313 | /* request a seek access (S), return non-zero on success, otherwise 0 */ |
| 314 | #define pl_try_s(lock) ( \ |
| 315 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 316 | register unsigned long __pl_r = pl_deref_long(lock); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 317 | pl_barrier(); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 318 | if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \ |
| 319 | __pl_r = pl_xadd((lock), PLOCK64_SL_1 | PLOCK64_RL_1) & \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 320 | (PLOCK64_WL_ANY | PLOCK64_SL_ANY); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 321 | if (__builtin_expect(__pl_r, 0)) \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 322 | pl_sub((lock), PLOCK64_SL_1 | PLOCK64_RL_1); \ |
| 323 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 324 | !__pl_r; /* return value */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 325 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 326 | register unsigned int __pl_r = pl_deref_int(lock); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 327 | pl_barrier(); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 328 | if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \ |
| 329 | __pl_r = pl_xadd((lock), PLOCK32_SL_1 | PLOCK32_RL_1) & \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 330 | (PLOCK32_WL_ANY | PLOCK32_SL_ANY); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 331 | if (__builtin_expect(__pl_r, 0)) \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 332 | pl_sub((lock), PLOCK32_SL_1 | PLOCK32_RL_1); \ |
| 333 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 334 | !__pl_r; /* return value */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 335 | }) : ({ \ |
| 336 | void __unsupported_argument_size_for_pl_try_s__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 337 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 338 | __unsupported_argument_size_for_pl_try_s__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 339 | 0; \ |
| 340 | }) \ |
| 341 | ) |
| 342 | |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 343 | /* request a seek access (S) and wait for it. The lock is immediately claimed, |
| 344 | * and only upon failure an exponential backoff is used. S locks rarely compete |
| 345 | * with W locks so S will generally not disturb W. As the S lock may be used as |
| 346 | * a spinlock, it's important to grab it as fast as possible. |
| 347 | */ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 348 | #define pl_take_s(lock) \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 349 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 350 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 351 | register unsigned long __set_r = PLOCK64_SL_1 | PLOCK64_RL_1; \ |
| 352 | register unsigned long __msk_r = PLOCK64_WL_ANY | PLOCK64_SL_ANY; \ |
| 353 | while (1) { \ |
| 354 | if (!__builtin_expect(pl_xadd(__lk_r, __set_r) & __msk_r, 0)) \ |
| 355 | break; \ |
| 356 | pl_sub(__lk_r, __set_r); \ |
| 357 | pl_wait_unlock_long(__lk_r, __msk_r); \ |
| 358 | } \ |
| 359 | pl_barrier(); \ |
| 360 | 0; \ |
| 361 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 362 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 363 | register unsigned int __set_r = PLOCK32_SL_1 | PLOCK32_RL_1; \ |
| 364 | register unsigned int __msk_r = PLOCK32_WL_ANY | PLOCK32_SL_ANY; \ |
| 365 | while (1) { \ |
| 366 | if (!__builtin_expect(pl_xadd(__lk_r, __set_r) & __msk_r, 0)) \ |
| 367 | break; \ |
| 368 | pl_sub(__lk_r, __set_r); \ |
| 369 | pl_wait_unlock_int(__lk_r, __msk_r); \ |
| 370 | } \ |
| 371 | pl_barrier(); \ |
| 372 | 0; \ |
| 373 | }) : ({ \ |
| 374 | void __unsupported_argument_size_for_pl_take_s__(char *,int); \ |
| 375 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 376 | __unsupported_argument_size_for_pl_take_s__(__FILE__,__LINE__); \ |
| 377 | 0; \ |
| 378 | }) |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 379 | |
| 380 | /* release the seek access (S) lock */ |
| 381 | #define pl_drop_s(lock) ( \ |
| 382 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 383 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 384 | pl_sub(lock, PLOCK64_SL_1 + PLOCK64_RL_1); \ |
| 385 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 386 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 387 | pl_sub(lock, PLOCK32_SL_1 + PLOCK32_RL_1); \ |
| 388 | }) : ({ \ |
| 389 | void __unsupported_argument_size_for_pl_drop_s__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 390 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 391 | __unsupported_argument_size_for_pl_drop_s__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 392 | }) \ |
| 393 | ) |
| 394 | |
| 395 | /* drop the S lock and go back to the R lock */ |
| 396 | #define pl_stor(lock) ( \ |
| 397 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 398 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 399 | pl_sub(lock, PLOCK64_SL_1); \ |
| 400 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 401 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 402 | pl_sub(lock, PLOCK32_SL_1); \ |
| 403 | }) : ({ \ |
| 404 | void __unsupported_argument_size_for_pl_stor__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 405 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 406 | __unsupported_argument_size_for_pl_stor__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 407 | }) \ |
| 408 | ) |
| 409 | |
| 410 | /* take the W lock under the S lock */ |
| 411 | #define pl_stow(lock) ( \ |
| 412 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 413 | register unsigned long __pl_r = pl_xadd((lock), PLOCK64_WL_1); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 414 | while ((__pl_r & PLOCK64_RL_ANY) != PLOCK64_RL_1) \ |
| 415 | __pl_r = pl_deref_long(lock); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 416 | pl_barrier(); \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 417 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 418 | register unsigned int __pl_r = pl_xadd((lock), PLOCK32_WL_1); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 419 | while ((__pl_r & PLOCK32_RL_ANY) != PLOCK32_RL_1) \ |
| 420 | __pl_r = pl_deref_int(lock); \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 421 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 422 | }) : ({ \ |
| 423 | void __unsupported_argument_size_for_pl_stow__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 424 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 425 | __unsupported_argument_size_for_pl_stow__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 426 | }) \ |
| 427 | ) |
| 428 | |
| 429 | /* drop the W lock and go back to the S lock */ |
| 430 | #define pl_wtos(lock) ( \ |
| 431 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 432 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 433 | pl_sub(lock, PLOCK64_WL_1); \ |
| 434 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 435 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 436 | pl_sub(lock, PLOCK32_WL_1); \ |
| 437 | }) : ({ \ |
| 438 | void __unsupported_argument_size_for_pl_wtos__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 439 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 440 | __unsupported_argument_size_for_pl_wtos__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 441 | }) \ |
| 442 | ) |
| 443 | |
| 444 | /* drop the W lock and go back to the R lock */ |
| 445 | #define pl_wtor(lock) ( \ |
| 446 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 447 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 448 | pl_sub(lock, PLOCK64_WL_1 | PLOCK64_SL_1); \ |
| 449 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 450 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 451 | pl_sub(lock, PLOCK32_WL_1 | PLOCK32_SL_1); \ |
| 452 | }) : ({ \ |
| 453 | void __unsupported_argument_size_for_pl_wtor__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 454 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 455 | __unsupported_argument_size_for_pl_wtor__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 456 | }) \ |
| 457 | ) |
| 458 | |
| 459 | /* request a write access (W), return non-zero on success, otherwise 0. |
| 460 | * |
| 461 | * Below there is something important : by taking both W and S, we will cause |
| 462 | * an overflow of W at 4/5 of the maximum value that can be stored into W due |
| 463 | * to the fact that S is 2 bits, so we're effectively adding 5 to the word |
| 464 | * composed by W:S. But for all words multiple of 4 bits, the maximum value is |
| 465 | * multiple of 15 thus of 5. So the largest value we can store with all bits |
| 466 | * set to one will be met by adding 5, and then adding 5 again will place value |
| 467 | * 1 in W and value 0 in S, so we never leave W with 0. Also, even upon such an |
| 468 | * overflow, there's no risk to confuse it with an atomic lock because R is not |
| 469 | * null since it will not have overflown. For 32-bit locks, this situation |
| 470 | * happens when exactly 13108 threads try to grab the lock at once, W=1, S=0 |
| 471 | * and R=13108. For 64-bit locks, it happens at 858993460 concurrent writers |
| 472 | * where W=1, S=0 and R=858993460. |
| 473 | */ |
| 474 | #define pl_try_w(lock) ( \ |
| 475 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 476 | register unsigned long __pl_r = pl_deref_long(lock); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 477 | pl_barrier(); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 478 | if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \ |
| 479 | __pl_r = pl_xadd((lock), PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \ |
| 480 | if (__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 481 | /* a writer, seeker or atomic is present, let's leave */ \ |
| 482 | pl_sub((lock), PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 483 | __pl_r &= (PLOCK64_WL_ANY | PLOCK64_SL_ANY); /* return value */\ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 484 | } else { \ |
| 485 | /* wait for all other readers to leave */ \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 486 | while (__pl_r) \ |
| 487 | __pl_r = pl_deref_long(lock) - \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 488 | (PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 489 | } \ |
| 490 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 491 | !__pl_r; /* return value */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 492 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 493 | register unsigned int __pl_r = pl_deref_int(lock); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 494 | pl_barrier(); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 495 | if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \ |
| 496 | __pl_r = pl_xadd((lock), PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \ |
| 497 | if (__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 498 | /* a writer, seeker or atomic is present, let's leave */ \ |
| 499 | pl_sub((lock), PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 500 | __pl_r &= (PLOCK32_WL_ANY | PLOCK32_SL_ANY); /* return value */\ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 501 | } else { \ |
| 502 | /* wait for all other readers to leave */ \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 503 | while (__pl_r) \ |
| 504 | __pl_r = pl_deref_int(lock) - \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 505 | (PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 506 | } \ |
| 507 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 508 | !__pl_r; /* return value */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 509 | }) : ({ \ |
| 510 | void __unsupported_argument_size_for_pl_try_w__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 511 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 512 | __unsupported_argument_size_for_pl_try_w__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 513 | 0; \ |
| 514 | }) \ |
| 515 | ) |
| 516 | |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 517 | /* request a write access (W) and wait for it. The lock is immediately claimed, |
| 518 | * and only upon failure an exponential backoff is used. |
| 519 | */ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 520 | #define pl_take_w(lock) \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 521 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 522 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 523 | register unsigned long __set_r = PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1; \ |
| 524 | register unsigned long __msk_r = PLOCK64_WL_ANY | PLOCK64_SL_ANY; \ |
| 525 | register unsigned long __pl_r; \ |
| 526 | while (1) { \ |
| 527 | __pl_r = pl_xadd(__lk_r, __set_r); \ |
| 528 | if (!__builtin_expect(__pl_r & __msk_r, 0)) \ |
| 529 | break; \ |
| 530 | pl_sub(__lk_r, __set_r); \ |
| 531 | __pl_r = pl_wait_unlock_long(__lk_r, __msk_r); \ |
| 532 | } \ |
| 533 | /* wait for all other readers to leave */ \ |
| 534 | while (__builtin_expect(__pl_r, 0)) \ |
| 535 | __pl_r = pl_deref_long(__lk_r) - __set_r; \ |
| 536 | pl_barrier(); \ |
| 537 | 0; \ |
| 538 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 539 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 540 | register unsigned int __set_r = PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1; \ |
| 541 | register unsigned int __msk_r = PLOCK32_WL_ANY | PLOCK32_SL_ANY; \ |
| 542 | register unsigned int __pl_r; \ |
| 543 | while (1) { \ |
| 544 | __pl_r = pl_xadd(__lk_r, __set_r); \ |
| 545 | if (!__builtin_expect(__pl_r & __msk_r, 0)) \ |
| 546 | break; \ |
| 547 | pl_sub(__lk_r, __set_r); \ |
| 548 | __pl_r = pl_wait_unlock_int(__lk_r, __msk_r); \ |
| 549 | } \ |
| 550 | /* wait for all other readers to leave */ \ |
| 551 | while (__builtin_expect(__pl_r, 0)) \ |
| 552 | __pl_r = pl_deref_int(__lk_r) - __set_r; \ |
| 553 | pl_barrier(); \ |
| 554 | 0; \ |
| 555 | }) : ({ \ |
| 556 | void __unsupported_argument_size_for_pl_take_w__(char *,int); \ |
| 557 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 558 | __unsupported_argument_size_for_pl_take_w__(__FILE__,__LINE__); \ |
| 559 | 0; \ |
| 560 | }) |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 561 | |
| 562 | /* drop the write (W) lock entirely */ |
| 563 | #define pl_drop_w(lock) ( \ |
| 564 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 565 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 566 | pl_sub(lock, PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \ |
| 567 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 568 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 569 | pl_sub(lock, PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \ |
| 570 | }) : ({ \ |
| 571 | void __unsupported_argument_size_for_pl_drop_w__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 572 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 573 | __unsupported_argument_size_for_pl_drop_w__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 574 | }) \ |
| 575 | ) |
| 576 | |
| 577 | /* Try to upgrade from R to S, return non-zero on success, otherwise 0. |
| 578 | * This lock will fail if S or W are already held. In case of failure to grab |
| 579 | * the lock, it MUST NOT be retried without first dropping R, or it may never |
| 580 | * complete due to S waiting for R to leave before upgrading to W. |
| 581 | */ |
| 582 | #define pl_try_rtos(lock) ( \ |
| 583 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 584 | register unsigned long __pl_r = pl_deref_long(lock); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 585 | pl_barrier(); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 586 | if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \ |
| 587 | __pl_r = pl_xadd((lock), PLOCK64_SL_1) & \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 588 | (PLOCK64_WL_ANY | PLOCK64_SL_ANY); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 589 | if (__builtin_expect(__pl_r, 0)) \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 590 | pl_sub((lock), PLOCK64_SL_1); \ |
| 591 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 592 | !__pl_r; /* return value */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 593 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 594 | register unsigned int __pl_r = pl_deref_int(lock); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 595 | pl_barrier(); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 596 | if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \ |
| 597 | __pl_r = pl_xadd((lock), PLOCK32_SL_1) & \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 598 | (PLOCK32_WL_ANY | PLOCK32_SL_ANY); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 599 | if (__builtin_expect(__pl_r, 0)) \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 600 | pl_sub((lock), PLOCK32_SL_1); \ |
| 601 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 602 | !__pl_r; /* return value */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 603 | }) : ({ \ |
| 604 | void __unsupported_argument_size_for_pl_try_rtos__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 605 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 606 | __unsupported_argument_size_for_pl_try_rtos__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 607 | 0; \ |
| 608 | }) \ |
| 609 | ) |
| 610 | |
| 611 | |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 612 | /* Try to upgrade from R to W, return non-zero on success, otherwise 0. |
| 613 | * This lock will fail if S or W are already held. In case of failure to grab |
| 614 | * the lock, it MUST NOT be retried without first dropping R, or it may never |
| 615 | * complete due to S waiting for R to leave before upgrading to W. It waits for |
| 616 | * the last readers to leave. |
| 617 | */ |
| 618 | #define pl_try_rtow(lock) ( \ |
| 619 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 620 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 621 | register unsigned long __set_r = PLOCK64_WL_1 | PLOCK64_SL_1; \ |
| 622 | register unsigned long __msk_r = PLOCK64_WL_ANY | PLOCK64_SL_ANY; \ |
| 623 | register unsigned long __pl_r; \ |
| 624 | pl_barrier(); \ |
| 625 | while (1) { \ |
| 626 | __pl_r = pl_xadd(__lk_r, __set_r); \ |
| 627 | if (__builtin_expect(__pl_r & __msk_r, 0)) { \ |
| 628 | if (pl_xadd(__lk_r, - __set_r)) \ |
| 629 | break; /* the caller needs to drop the lock now */ \ |
| 630 | continue; /* lock was released, try again */ \ |
| 631 | } \ |
| 632 | /* ok we're the only writer, wait for readers to leave */ \ |
| 633 | while (__builtin_expect(__pl_r, 0)) \ |
| 634 | __pl_r = pl_deref_long(__lk_r) - (PLOCK64_WL_1|PLOCK64_SL_1|PLOCK64_RL_1); \ |
| 635 | /* now return with __pl_r = 0 */ \ |
| 636 | break; \ |
| 637 | } \ |
| 638 | !__pl_r; /* return value */ \ |
| 639 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 640 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 641 | register unsigned int __set_r = PLOCK32_WL_1 | PLOCK32_SL_1; \ |
| 642 | register unsigned int __msk_r = PLOCK32_WL_ANY | PLOCK32_SL_ANY; \ |
| 643 | register unsigned int __pl_r; \ |
| 644 | pl_barrier(); \ |
| 645 | while (1) { \ |
| 646 | __pl_r = pl_xadd(__lk_r, __set_r); \ |
| 647 | if (__builtin_expect(__pl_r & __msk_r, 0)) { \ |
| 648 | if (pl_xadd(__lk_r, - __set_r)) \ |
| 649 | break; /* the caller needs to drop the lock now */ \ |
| 650 | continue; /* lock was released, try again */ \ |
| 651 | } \ |
| 652 | /* ok we're the only writer, wait for readers to leave */ \ |
| 653 | while (__builtin_expect(__pl_r, 0)) \ |
| 654 | __pl_r = pl_deref_int(__lk_r) - (PLOCK32_WL_1|PLOCK32_SL_1|PLOCK32_RL_1); \ |
| 655 | /* now return with __pl_r = 0 */ \ |
| 656 | break; \ |
| 657 | } \ |
| 658 | !__pl_r; /* return value */ \ |
| 659 | }) : ({ \ |
| 660 | void __unsupported_argument_size_for_pl_try_rtow__(char *,int); \ |
| 661 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 662 | __unsupported_argument_size_for_pl_try_rtow__(__FILE__,__LINE__); \ |
| 663 | 0; \ |
| 664 | }) \ |
| 665 | ) |
| 666 | |
| 667 | |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 668 | /* request atomic write access (A), return non-zero on success, otherwise 0. |
| 669 | * It's a bit tricky as we only use the W bits for this and want to distinguish |
| 670 | * between other atomic users and regular lock users. We have to give up if an |
| 671 | * S lock appears. It's possible that such a lock stays hidden in the W bits |
| 672 | * after an overflow, but in this case R is still held, ensuring we stay in the |
| 673 | * loop until we discover the conflict. The lock only return successfully if all |
| 674 | * readers are gone (or converted to A). |
| 675 | */ |
| 676 | #define pl_try_a(lock) ( \ |
| 677 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 678 | register unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_SL_ANY; \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 679 | pl_barrier(); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 680 | if (!__builtin_expect(__pl_r, 0)) { \ |
| 681 | __pl_r = pl_xadd((lock), PLOCK64_WL_1); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 682 | while (1) { \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 683 | if (__builtin_expect(__pl_r & PLOCK64_SL_ANY, 0)) { \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 684 | pl_sub((lock), PLOCK64_WL_1); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 685 | break; /* return !__pl_r */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 686 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 687 | __pl_r &= PLOCK64_RL_ANY; \ |
| 688 | if (!__builtin_expect(__pl_r, 0)) \ |
| 689 | break; /* return !__pl_r */ \ |
| 690 | __pl_r = pl_deref_long(lock); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 691 | } \ |
| 692 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 693 | !__pl_r; /* return value */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 694 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 695 | register unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_SL_ANY; \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 696 | pl_barrier(); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 697 | if (!__builtin_expect(__pl_r, 0)) { \ |
| 698 | __pl_r = pl_xadd((lock), PLOCK32_WL_1); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 699 | while (1) { \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 700 | if (__builtin_expect(__pl_r & PLOCK32_SL_ANY, 0)) { \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 701 | pl_sub((lock), PLOCK32_WL_1); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 702 | break; /* return !__pl_r */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 703 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 704 | __pl_r &= PLOCK32_RL_ANY; \ |
| 705 | if (!__builtin_expect(__pl_r, 0)) \ |
| 706 | break; /* return !__pl_r */ \ |
| 707 | __pl_r = pl_deref_int(lock); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 708 | } \ |
| 709 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 710 | !__pl_r; /* return value */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 711 | }) : ({ \ |
| 712 | void __unsupported_argument_size_for_pl_try_a__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 713 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 714 | __unsupported_argument_size_for_pl_try_a__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 715 | 0; \ |
| 716 | }) \ |
| 717 | ) |
| 718 | |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 719 | /* request atomic write access (A) and wait for it. See comments in pl_try_a() for |
| 720 | * explanations. |
| 721 | */ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 722 | #define pl_take_a(lock) \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 723 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 724 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 725 | register unsigned long __set_r = PLOCK64_WL_1; \ |
| 726 | register unsigned long __msk_r = PLOCK64_SL_ANY; \ |
| 727 | register unsigned long __pl_r; \ |
| 728 | __pl_r = pl_xadd(__lk_r, __set_r); \ |
| 729 | while (__builtin_expect(__pl_r & PLOCK64_RL_ANY, 0)) { \ |
| 730 | if (__builtin_expect(__pl_r & __msk_r, 0)) { \ |
| 731 | pl_sub(__lk_r, __set_r); \ |
| 732 | pl_wait_unlock_long(__lk_r, __msk_r); \ |
| 733 | __pl_r = pl_xadd(__lk_r, __set_r); \ |
| 734 | continue; \ |
| 735 | } \ |
| 736 | /* wait for all readers to leave or upgrade */ \ |
| 737 | pl_cpu_relax(); pl_cpu_relax(); pl_cpu_relax(); \ |
| 738 | __pl_r = pl_deref_long(lock); \ |
| 739 | } \ |
| 740 | pl_barrier(); \ |
| 741 | 0; \ |
| 742 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 743 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 744 | register unsigned int __set_r = PLOCK32_WL_1; \ |
| 745 | register unsigned int __msk_r = PLOCK32_SL_ANY; \ |
| 746 | register unsigned int __pl_r; \ |
| 747 | __pl_r = pl_xadd(__lk_r, __set_r); \ |
| 748 | while (__builtin_expect(__pl_r & PLOCK32_RL_ANY, 0)) { \ |
| 749 | if (__builtin_expect(__pl_r & __msk_r, 0)) { \ |
| 750 | pl_sub(__lk_r, __set_r); \ |
| 751 | pl_wait_unlock_int(__lk_r, __msk_r); \ |
| 752 | __pl_r = pl_xadd(__lk_r, __set_r); \ |
| 753 | continue; \ |
| 754 | } \ |
| 755 | /* wait for all readers to leave or upgrade */ \ |
| 756 | pl_cpu_relax(); pl_cpu_relax(); pl_cpu_relax(); \ |
| 757 | __pl_r = pl_deref_int(lock); \ |
| 758 | } \ |
| 759 | pl_barrier(); \ |
| 760 | 0; \ |
| 761 | }) : ({ \ |
| 762 | void __unsupported_argument_size_for_pl_take_a__(char *,int); \ |
| 763 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 764 | __unsupported_argument_size_for_pl_take_a__(__FILE__,__LINE__); \ |
| 765 | 0; \ |
| 766 | }) |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 767 | |
| 768 | /* release atomic write access (A) lock */ |
| 769 | #define pl_drop_a(lock) ( \ |
| 770 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 771 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 772 | pl_sub(lock, PLOCK64_WL_1); \ |
| 773 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 774 | pl_barrier(); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 775 | pl_sub(lock, PLOCK32_WL_1); \ |
| 776 | }) : ({ \ |
| 777 | void __unsupported_argument_size_for_pl_drop_a__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 778 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 779 | __unsupported_argument_size_for_pl_drop_a__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 780 | }) \ |
| 781 | ) |
| 782 | |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 783 | /* Downgrade A to R. Inc(R), dec(W) then wait for W==0 */ |
| 784 | #define pl_ator(lock) ( \ |
| 785 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 786 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 787 | register unsigned long __set_r = PLOCK64_RL_1 - PLOCK64_WL_1; \ |
| 788 | register unsigned long __msk_r = PLOCK64_WL_ANY; \ |
| 789 | register unsigned long __pl_r = pl_xadd(__lk_r, __set_r) + __set_r; \ |
| 790 | while (__builtin_expect(__pl_r & __msk_r, 0)) { \ |
| 791 | __pl_r = pl_wait_unlock_long(__lk_r, __msk_r); \ |
| 792 | } \ |
| 793 | pl_barrier(); \ |
| 794 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 795 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 796 | register unsigned int __set_r = PLOCK32_RL_1 - PLOCK32_WL_1; \ |
| 797 | register unsigned int __msk_r = PLOCK32_WL_ANY; \ |
| 798 | register unsigned int __pl_r = pl_xadd(__lk_r, __set_r) + __set_r; \ |
| 799 | while (__builtin_expect(__pl_r & __msk_r, 0)) { \ |
| 800 | __pl_r = pl_wait_unlock_int(__lk_r, __msk_r); \ |
| 801 | } \ |
| 802 | pl_barrier(); \ |
| 803 | }) : ({ \ |
| 804 | void __unsupported_argument_size_for_pl_ator__(char *,int); \ |
| 805 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 806 | __unsupported_argument_size_for_pl_ator__(__FILE__,__LINE__); \ |
| 807 | }) \ |
| 808 | ) |
| 809 | |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 810 | /* Try to upgrade from R to A, return non-zero on success, otherwise 0. |
| 811 | * This lock will fail if S is held or appears while waiting (typically due to |
| 812 | * a previous grab that was disguised as a W due to an overflow). In case of |
| 813 | * failure to grab the lock, it MUST NOT be retried without first dropping R, |
| 814 | * or it may never complete due to S waiting for R to leave before upgrading |
| 815 | * to W. The lock succeeds once there's no more R (ie all of them have either |
| 816 | * completed or were turned to A). |
| 817 | */ |
| 818 | #define pl_try_rtoa(lock) ( \ |
| 819 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 820 | register unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_SL_ANY; \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 821 | pl_barrier(); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 822 | if (!__builtin_expect(__pl_r, 0)) { \ |
| 823 | __pl_r = pl_xadd((lock), PLOCK64_WL_1 - PLOCK64_RL_1); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 824 | while (1) { \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 825 | if (__builtin_expect(__pl_r & PLOCK64_SL_ANY, 0)) { \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 826 | pl_sub((lock), PLOCK64_WL_1 - PLOCK64_RL_1); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 827 | break; /* return !__pl_r */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 828 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 829 | __pl_r &= PLOCK64_RL_ANY; \ |
| 830 | if (!__builtin_expect(__pl_r, 0)) \ |
| 831 | break; /* return !__pl_r */ \ |
| 832 | __pl_r = pl_deref_long(lock); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 833 | } \ |
| 834 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 835 | !__pl_r; /* return value */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 836 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 837 | register unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_SL_ANY; \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 838 | pl_barrier(); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 839 | if (!__builtin_expect(__pl_r, 0)) { \ |
| 840 | __pl_r = pl_xadd((lock), PLOCK32_WL_1 - PLOCK32_RL_1); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 841 | while (1) { \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 842 | if (__builtin_expect(__pl_r & PLOCK32_SL_ANY, 0)) { \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 843 | pl_sub((lock), PLOCK32_WL_1 - PLOCK32_RL_1); \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 844 | break; /* return !__pl_r */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 845 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 846 | __pl_r &= PLOCK32_RL_ANY; \ |
| 847 | if (!__builtin_expect(__pl_r, 0)) \ |
| 848 | break; /* return !__pl_r */ \ |
| 849 | __pl_r = pl_deref_int(lock); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 850 | } \ |
| 851 | } \ |
Willy Tarreau | f7ba77e | 2017-07-18 14:21:40 +0200 | [diff] [blame] | 852 | !__pl_r; /* return value */ \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 853 | }) : ({ \ |
| 854 | void __unsupported_argument_size_for_pl_try_rtoa__(char *,int); \ |
Willy Tarreau | 2532bd2 | 2017-11-20 19:25:18 +0100 | [diff] [blame] | 855 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 856 | __unsupported_argument_size_for_pl_try_rtoa__(__FILE__,__LINE__); \ |
Emeric Brun | 7122ab3 | 2017-07-07 10:26:46 +0200 | [diff] [blame] | 857 | 0; \ |
| 858 | }) \ |
| 859 | ) |
Willy Tarreau | 688709d | 2022-07-29 17:53:31 +0200 | [diff] [blame] | 860 | |
| 861 | |
| 862 | /* |
| 863 | * The following operations cover the multiple writers model : U->R->J->C->A |
| 864 | */ |
| 865 | |
| 866 | |
| 867 | /* Upgrade R to J. Inc(W) then wait for R==W or S != 0 */ |
| 868 | #define pl_rtoj(lock) ( \ |
| 869 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 870 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 871 | register unsigned long __pl_r = pl_xadd(__lk_r, PLOCK64_WL_1) + PLOCK64_WL_1; \ |
| 872 | register unsigned char __m = 0; \ |
| 873 | while (!(__pl_r & PLOCK64_SL_ANY) && \ |
| 874 | (__pl_r / PLOCK64_WL_1 != (__pl_r & PLOCK64_RL_ANY) / PLOCK64_RL_1)) { \ |
| 875 | unsigned char __loops = __m + 1; \ |
| 876 | __m = (__m << 1) + 1; \ |
| 877 | do { \ |
| 878 | pl_cpu_relax(); \ |
| 879 | pl_cpu_relax(); \ |
| 880 | } while (--__loops); \ |
| 881 | __pl_r = pl_deref_long(__lk_r); \ |
| 882 | } \ |
| 883 | pl_barrier(); \ |
| 884 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 885 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 886 | register unsigned int __pl_r = pl_xadd(__lk_r, PLOCK32_WL_1) + PLOCK32_WL_1; \ |
| 887 | register unsigned char __m = 0; \ |
| 888 | while (!(__pl_r & PLOCK32_SL_ANY) && \ |
| 889 | (__pl_r / PLOCK32_WL_1 != (__pl_r & PLOCK32_RL_ANY) / PLOCK32_RL_1)) { \ |
| 890 | unsigned char __loops = __m + 1; \ |
| 891 | __m = (__m << 1) + 1; \ |
| 892 | do { \ |
| 893 | pl_cpu_relax(); \ |
| 894 | pl_cpu_relax(); \ |
| 895 | } while (--__loops); \ |
| 896 | __pl_r = pl_deref_int(__lk_r); \ |
| 897 | } \ |
| 898 | pl_barrier(); \ |
| 899 | }) : ({ \ |
| 900 | void __unsupported_argument_size_for_pl_rtoj__(char *,int); \ |
| 901 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 902 | __unsupported_argument_size_for_pl_rtoj__(__FILE__,__LINE__); \ |
| 903 | }) \ |
| 904 | ) |
| 905 | |
| 906 | /* Upgrade J to C. Set S. Only one thread needs to do it though it's idempotent */ |
| 907 | #define pl_jtoc(lock) ( \ |
| 908 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 909 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 910 | register unsigned long __pl_r = pl_deref_long(__lk_r); \ |
| 911 | if (!(__pl_r & PLOCK64_SL_ANY)) \ |
| 912 | pl_or(__lk_r, PLOCK64_SL_1); \ |
| 913 | pl_barrier(); \ |
| 914 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 915 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 916 | register unsigned int __pl_r = pl_deref_int(__lk_r); \ |
| 917 | if (!(__pl_r & PLOCK32_SL_ANY)) \ |
| 918 | pl_or(__lk_r, PLOCK32_SL_1); \ |
| 919 | pl_barrier(); \ |
| 920 | }) : ({ \ |
| 921 | void __unsupported_argument_size_for_pl_jtoc__(char *,int); \ |
| 922 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 923 | __unsupported_argument_size_for_pl_jtoc__(__FILE__,__LINE__); \ |
| 924 | }) \ |
| 925 | ) |
| 926 | |
| 927 | /* Upgrade R to C. Inc(W) then wait for R==W or S != 0 */ |
| 928 | #define pl_rtoc(lock) ( \ |
| 929 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 930 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 931 | register unsigned long __pl_r = pl_xadd(__lk_r, PLOCK64_WL_1) + PLOCK64_WL_1; \ |
| 932 | register unsigned char __m = 0; \ |
| 933 | while (__builtin_expect(!(__pl_r & PLOCK64_SL_ANY), 0)) { \ |
| 934 | unsigned char __loops; \ |
| 935 | if (__pl_r / PLOCK64_WL_1 == (__pl_r & PLOCK64_RL_ANY) / PLOCK64_RL_1) { \ |
| 936 | pl_or(__lk_r, PLOCK64_SL_1); \ |
| 937 | break; \ |
| 938 | } \ |
| 939 | __loops = __m + 1; \ |
| 940 | __m = (__m << 1) + 1; \ |
| 941 | do { \ |
| 942 | pl_cpu_relax(); \ |
| 943 | pl_cpu_relax(); \ |
| 944 | } while (--__loops); \ |
| 945 | __pl_r = pl_deref_long(__lk_r); \ |
| 946 | } \ |
| 947 | pl_barrier(); \ |
| 948 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 949 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 950 | register unsigned int __pl_r = pl_xadd(__lk_r, PLOCK32_WL_1) + PLOCK32_WL_1; \ |
| 951 | register unsigned char __m = 0; \ |
| 952 | while (__builtin_expect(!(__pl_r & PLOCK32_SL_ANY), 0)) { \ |
| 953 | unsigned char __loops; \ |
| 954 | if (__pl_r / PLOCK32_WL_1 == (__pl_r & PLOCK32_RL_ANY) / PLOCK32_RL_1) { \ |
| 955 | pl_or(__lk_r, PLOCK32_SL_1); \ |
| 956 | break; \ |
| 957 | } \ |
| 958 | __loops = __m + 1; \ |
| 959 | __m = (__m << 1) + 1; \ |
| 960 | do { \ |
| 961 | pl_cpu_relax(); \ |
| 962 | pl_cpu_relax(); \ |
| 963 | } while (--__loops); \ |
| 964 | __pl_r = pl_deref_int(__lk_r); \ |
| 965 | } \ |
| 966 | pl_barrier(); \ |
| 967 | }) : ({ \ |
| 968 | void __unsupported_argument_size_for_pl_rtoj__(char *,int); \ |
| 969 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 970 | __unsupported_argument_size_for_pl_rtoj__(__FILE__,__LINE__); \ |
| 971 | }) \ |
| 972 | ) |
| 973 | |
| 974 | /* Drop the claim (C) lock : R--,W-- then clear S if !R */ |
| 975 | #define pl_drop_c(lock) ( \ |
| 976 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 977 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 978 | register unsigned long __set_r = - PLOCK64_RL_1 - PLOCK64_WL_1; \ |
| 979 | register unsigned long __pl_r = pl_xadd(__lk_r, __set_r) + __set_r; \ |
| 980 | if (!(__pl_r & PLOCK64_RL_ANY)) \ |
| 981 | pl_and(__lk_r, ~PLOCK64_SL_1); \ |
| 982 | pl_barrier(); \ |
| 983 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 984 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 985 | register unsigned int __set_r = - PLOCK32_RL_1 - PLOCK32_WL_1; \ |
| 986 | register unsigned int __pl_r = pl_xadd(__lk_r, __set_r) + __set_r; \ |
| 987 | if (!(__pl_r & PLOCK32_RL_ANY)) \ |
| 988 | pl_and(__lk_r, ~PLOCK32_SL_1); \ |
| 989 | pl_barrier(); \ |
| 990 | }) : ({ \ |
| 991 | void __unsupported_argument_size_for_pl_drop_c__(char *,int); \ |
| 992 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 993 | __unsupported_argument_size_for_pl_drop_c__(__FILE__,__LINE__); \ |
| 994 | }) \ |
| 995 | ) |
| 996 | |
| 997 | /* Upgrade C to A. R-- then wait for !S or clear S if !R */ |
| 998 | #define pl_ctoa(lock) ( \ |
| 999 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 1000 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 1001 | register unsigned long __pl_r = pl_xadd(__lk_r, -PLOCK64_RL_1) - PLOCK64_RL_1; \ |
| 1002 | while (__pl_r & PLOCK64_SL_ANY) { \ |
| 1003 | if (!(__pl_r & PLOCK64_RL_ANY)) { \ |
| 1004 | pl_and(__lk_r, ~PLOCK64_SL_1); \ |
| 1005 | break; \ |
| 1006 | } \ |
| 1007 | pl_cpu_relax(); \ |
| 1008 | pl_cpu_relax(); \ |
| 1009 | __pl_r = pl_deref_long(__lk_r); \ |
| 1010 | } \ |
| 1011 | pl_barrier(); \ |
| 1012 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 1013 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 1014 | register unsigned int __pl_r = pl_xadd(__lk_r, -PLOCK32_RL_1) - PLOCK32_RL_1; \ |
| 1015 | while (__pl_r & PLOCK32_SL_ANY) { \ |
| 1016 | if (!(__pl_r & PLOCK32_RL_ANY)) { \ |
| 1017 | pl_and(__lk_r, ~PLOCK32_SL_1); \ |
| 1018 | break; \ |
| 1019 | } \ |
| 1020 | pl_cpu_relax(); \ |
| 1021 | pl_cpu_relax(); \ |
| 1022 | __pl_r = pl_deref_int(__lk_r); \ |
| 1023 | } \ |
| 1024 | pl_barrier(); \ |
| 1025 | }) : ({ \ |
| 1026 | void __unsupported_argument_size_for_pl_ctoa__(char *,int); \ |
| 1027 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 1028 | __unsupported_argument_size_for_pl_ctoa__(__FILE__,__LINE__); \ |
| 1029 | }) \ |
| 1030 | ) |
| 1031 | |
| 1032 | /* downgrade the atomic write access lock (A) to join (J) */ |
| 1033 | #define pl_atoj(lock) ( \ |
| 1034 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 1035 | pl_barrier(); \ |
| 1036 | pl_add(lock, PLOCK64_RL_1); \ |
| 1037 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 1038 | pl_barrier(); \ |
| 1039 | pl_add(lock, PLOCK32_RL_1); \ |
| 1040 | }) : ({ \ |
| 1041 | void __unsupported_argument_size_for_pl_atoj__(char *,int); \ |
| 1042 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 1043 | __unsupported_argument_size_for_pl_atoj__(__FILE__,__LINE__); \ |
| 1044 | }) \ |
| 1045 | ) |
| 1046 | |
| 1047 | /* Returns non-zero if the thread calling it is the last writer, otherwise zero. It is |
| 1048 | * designed to be called before pl_drop_j(), pl_drop_c() or pl_drop_a() for operations |
| 1049 | * which need to be called only once. |
| 1050 | */ |
| 1051 | #define pl_last_writer(lock) ( \ |
| 1052 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 1053 | !(pl_deref_long(lock) & PLOCK64_WL_2PL); \ |
| 1054 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 1055 | !(pl_deref_int(lock) & PLOCK32_WL_2PL); \ |
| 1056 | }) : ({ \ |
| 1057 | void __unsupported_argument_size_for_pl_last_j__(char *,int); \ |
| 1058 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 1059 | __unsupported_argument_size_for_pl_last_j__(__FILE__,__LINE__); \ |
| 1060 | 0; \ |
| 1061 | }) \ |
| 1062 | ) |
| 1063 | |
| 1064 | /* attempt to get an exclusive write access via the J lock and wait for it. |
| 1065 | * Only one thread may succeed in this operation. It will not conflict with |
| 1066 | * other users and will first wait for all writers to leave, then for all |
| 1067 | * readers to leave before starting. This offers a solution to obtain an |
| 1068 | * exclusive access to a shared resource in the R/J/C/A model. A concurrent |
| 1069 | * take_a() will wait for this one to finish first. Using a CAS instead of XADD |
| 1070 | * should make the operation converge slightly faster. Returns non-zero on |
| 1071 | * success otherwise 0. |
| 1072 | */ |
| 1073 | #define pl_try_j(lock) ( \ |
| 1074 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 1075 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 1076 | register unsigned long __set_r = PLOCK64_WL_1 | PLOCK64_RL_1; \ |
| 1077 | register unsigned long __msk_r = PLOCK64_WL_ANY; \ |
| 1078 | register unsigned long __pl_r; \ |
| 1079 | register unsigned char __m; \ |
| 1080 | pl_wait_unlock_long(__lk_r, __msk_r); \ |
| 1081 | __pl_r = pl_xadd(__lk_r, __set_r) + __set_r; \ |
| 1082 | /* wait for all other readers to leave */ \ |
| 1083 | __m = 0; \ |
| 1084 | while (__builtin_expect(__pl_r & PLOCK64_RL_2PL, 0)) { \ |
| 1085 | unsigned char __loops; \ |
| 1086 | /* give up on other writers */ \ |
| 1087 | if (__builtin_expect(__pl_r & PLOCK64_WL_2PL, 0)) { \ |
| 1088 | pl_sub(__lk_r, __set_r); \ |
| 1089 | __pl_r = 0; /* failed to get the lock */ \ |
| 1090 | break; \ |
| 1091 | } \ |
| 1092 | __loops = __m + 1; \ |
| 1093 | __m = (__m << 1) + 1; \ |
| 1094 | do { \ |
| 1095 | pl_cpu_relax(); \ |
| 1096 | pl_cpu_relax(); \ |
| 1097 | } while (--__loops); \ |
| 1098 | __pl_r = pl_deref_long(__lk_r); \ |
| 1099 | } \ |
| 1100 | pl_barrier(); \ |
| 1101 | __pl_r; /* return value, cannot be null on success */ \ |
| 1102 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 1103 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 1104 | register unsigned int __set_r = PLOCK32_WL_1 | PLOCK32_RL_1; \ |
| 1105 | register unsigned int __msk_r = PLOCK32_WL_ANY; \ |
| 1106 | register unsigned int __pl_r; \ |
| 1107 | register unsigned char __m; \ |
| 1108 | pl_wait_unlock_int(__lk_r, __msk_r); \ |
| 1109 | __pl_r = pl_xadd(__lk_r, __set_r) + __set_r; \ |
| 1110 | /* wait for all other readers to leave */ \ |
| 1111 | __m = 0; \ |
| 1112 | while (__builtin_expect(__pl_r & PLOCK32_RL_2PL, 0)) { \ |
| 1113 | unsigned char __loops; \ |
| 1114 | /* but rollback on other writers */ \ |
| 1115 | if (__builtin_expect(__pl_r & PLOCK32_WL_2PL, 0)) { \ |
| 1116 | pl_sub(__lk_r, __set_r); \ |
| 1117 | __pl_r = 0; /* failed to get the lock */ \ |
| 1118 | break; \ |
| 1119 | } \ |
| 1120 | __loops = __m + 1; \ |
| 1121 | __m = (__m << 1) + 1; \ |
| 1122 | do { \ |
| 1123 | pl_cpu_relax(); \ |
| 1124 | pl_cpu_relax(); \ |
| 1125 | } while (--__loops); \ |
| 1126 | __pl_r = pl_deref_int(__lk_r); \ |
| 1127 | } \ |
| 1128 | pl_barrier(); \ |
| 1129 | __pl_r; /* return value, cannot be null on success */ \ |
| 1130 | }) : ({ \ |
| 1131 | void __unsupported_argument_size_for_pl_try_j__(char *,int); \ |
| 1132 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 1133 | __unsupported_argument_size_for_pl_try_j__(__FILE__,__LINE__); \ |
| 1134 | 0; \ |
| 1135 | }) \ |
| 1136 | ) |
| 1137 | |
| 1138 | /* request an exclusive write access via the J lock and wait for it. Only one |
| 1139 | * thread may succeed in this operation. It will not conflict with other users |
| 1140 | * and will first wait for all writers to leave, then for all readers to leave |
| 1141 | * before starting. This offers a solution to obtain an exclusive access to a |
| 1142 | * shared resource in the R/J/C/A model. A concurrent take_a() will wait for |
| 1143 | * this one to finish first. Using a CAS instead of XADD should make the |
| 1144 | * operation converge slightly faster. |
| 1145 | */ |
| 1146 | #define pl_take_j(lock) ( \ |
| 1147 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 1148 | __label__ __retry; \ |
| 1149 | register unsigned long *__lk_r = (unsigned long *)(lock); \ |
| 1150 | register unsigned long __set_r = PLOCK64_WL_1 | PLOCK64_RL_1; \ |
| 1151 | register unsigned long __msk_r = PLOCK64_WL_ANY; \ |
| 1152 | register unsigned long __pl_r; \ |
| 1153 | register unsigned char __m; \ |
| 1154 | __retry: \ |
| 1155 | pl_wait_unlock_long(__lk_r, __msk_r); \ |
| 1156 | __pl_r = pl_xadd(__lk_r, __set_r) + __set_r; \ |
| 1157 | /* wait for all other readers to leave */ \ |
| 1158 | __m = 0; \ |
| 1159 | while (__builtin_expect(__pl_r & PLOCK64_RL_2PL, 0)) { \ |
| 1160 | unsigned char __loops; \ |
| 1161 | /* but rollback on other writers */ \ |
| 1162 | if (__builtin_expect(__pl_r & PLOCK64_WL_2PL, 0)) { \ |
| 1163 | pl_sub(__lk_r, __set_r); \ |
| 1164 | goto __retry; \ |
| 1165 | } \ |
| 1166 | __loops = __m + 1; \ |
| 1167 | __m = (__m << 1) + 1; \ |
| 1168 | do { \ |
| 1169 | pl_cpu_relax(); \ |
| 1170 | pl_cpu_relax(); \ |
| 1171 | } while (--__loops); \ |
| 1172 | __pl_r = pl_deref_long(__lk_r); \ |
| 1173 | } \ |
| 1174 | pl_barrier(); \ |
| 1175 | 0; \ |
| 1176 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 1177 | __label__ __retry; \ |
| 1178 | register unsigned int *__lk_r = (unsigned int *)(lock); \ |
| 1179 | register unsigned int __set_r = PLOCK32_WL_1 | PLOCK32_RL_1; \ |
| 1180 | register unsigned int __msk_r = PLOCK32_WL_ANY; \ |
| 1181 | register unsigned int __pl_r; \ |
| 1182 | register unsigned char __m; \ |
| 1183 | __retry: \ |
| 1184 | pl_wait_unlock_int(__lk_r, __msk_r); \ |
| 1185 | __pl_r = pl_xadd(__lk_r, __set_r) + __set_r; \ |
| 1186 | /* wait for all other readers to leave */ \ |
| 1187 | __m = 0; \ |
| 1188 | while (__builtin_expect(__pl_r & PLOCK32_RL_2PL, 0)) { \ |
| 1189 | unsigned char __loops; \ |
| 1190 | /* but rollback on other writers */ \ |
| 1191 | if (__builtin_expect(__pl_r & PLOCK32_WL_2PL, 0)) { \ |
| 1192 | pl_sub(__lk_r, __set_r); \ |
| 1193 | goto __retry; \ |
| 1194 | } \ |
| 1195 | __loops = __m + 1; \ |
| 1196 | __m = (__m << 1) + 1; \ |
| 1197 | do { \ |
| 1198 | pl_cpu_relax(); \ |
| 1199 | pl_cpu_relax(); \ |
| 1200 | } while (--__loops); \ |
| 1201 | __pl_r = pl_deref_int(__lk_r); \ |
| 1202 | } \ |
| 1203 | pl_barrier(); \ |
| 1204 | 0; \ |
| 1205 | }) : ({ \ |
| 1206 | void __unsupported_argument_size_for_pl_take_j__(char *,int); \ |
| 1207 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 1208 | __unsupported_argument_size_for_pl_take_j__(__FILE__,__LINE__); \ |
| 1209 | 0; \ |
| 1210 | }) \ |
| 1211 | ) |
| 1212 | |
| 1213 | /* drop the join (J) lock entirely */ |
| 1214 | #define pl_drop_j(lock) ( \ |
| 1215 | (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \ |
| 1216 | pl_barrier(); \ |
| 1217 | pl_sub(lock, PLOCK64_WL_1 | PLOCK64_RL_1); \ |
| 1218 | }) : (sizeof(*(lock)) == 4) ? ({ \ |
| 1219 | pl_barrier(); \ |
| 1220 | pl_sub(lock, PLOCK32_WL_1 | PLOCK32_RL_1); \ |
| 1221 | }) : ({ \ |
| 1222 | void __unsupported_argument_size_for_pl_drop_j__(char *,int); \ |
| 1223 | if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \ |
| 1224 | __unsupported_argument_size_for_pl_drop_j__(__FILE__,__LINE__); \ |
| 1225 | }) \ |
| 1226 | ) |
| 1227 | |
| 1228 | /* |
| 1229 | * The part below is for Low Overhead R/W locks (LORW). These ones are not |
| 1230 | * upgradable and not necessarily fair but they try to be fast when uncontended |
| 1231 | * and to limit the cost and perturbation during contention. Writers always |
| 1232 | * have precedence over readers to preserve latency as much as possible. |
| 1233 | * |
| 1234 | * The principle is to offer a fast no-contention path and a limited total |
| 1235 | * number of writes for the contended path. Since R/W locks are expected to be |
| 1236 | * used in situations where there is a benefit in separating reads from writes, |
| 1237 | * it is expected that reads are common (typ >= 50%) and that there is often at |
| 1238 | * least one reader (otherwise a spinlock wouldn't be a problem). As such, a |
| 1239 | * reader will try to pass instantly, detect contention and immediately retract |
| 1240 | * and wait in the queue in case there is contention. A writer will first also |
| 1241 | * try to pass instantly, and if it fails due to pending readers, it will mark |
| 1242 | * that it's waiting so that readers stop entering. This will leave the writer |
| 1243 | * waiting as close as possible to the point of being granted access. New |
| 1244 | * writers will also notice this previous contention and will wait outside. |
| 1245 | * This means that a successful access for a reader or a writer requires a |
| 1246 | * single CAS, and a contended attempt will require one failed CAS and one |
| 1247 | * successful XADD for a reader, or an optional OR and a N+1 CAS for the |
| 1248 | * writer. |
| 1249 | * |
| 1250 | * A counter of shared users indicates the number of active readers, while a |
| 1251 | * (single-bit) counter of exclusive writers indicates whether the lock is |
| 1252 | * currently held for writes. This distinction also permits to use a single |
| 1253 | * function to release the lock if desired, since the exclusive bit indicates |
| 1254 | * the state of the caller of unlock(). The WRQ bit is cleared during the |
| 1255 | * unlock. |
| 1256 | * |
| 1257 | * Layout: (32/64 bit): |
| 1258 | * 31 2 1 0 |
| 1259 | * +-----------+--------------+-----+-----+ |
| 1260 | * | | SHR | WRQ | EXC | |
| 1261 | * +-----------+--------------+-----+-----+ |
| 1262 | * |
| 1263 | * In order to minimize operations, the WRQ bit is held during EXC so that the |
| 1264 | * write waiter that had to fight for EXC doesn't have to release WRQ during |
| 1265 | * its operations, and will just drop it along with EXC upon unlock. |
| 1266 | * |
| 1267 | * This means the following costs: |
| 1268 | * reader: |
| 1269 | * success: 1 CAS |
| 1270 | * failure: 1 CAS + 1 XADD |
| 1271 | * unlock: 1 SUB |
| 1272 | * writer: |
| 1273 | * success: 1 RD + 1 CAS |
| 1274 | * failure: 1 RD + 1 CAS + 0/1 OR + N CAS |
| 1275 | * unlock: 1 AND |
| 1276 | */ |
| 1277 | |
| 1278 | #define PLOCK_LORW_EXC_BIT ((sizeof(long) == 8) ? 0 : 0) |
| 1279 | #define PLOCK_LORW_EXC_SIZE ((sizeof(long) == 8) ? 1 : 1) |
| 1280 | #define PLOCK_LORW_EXC_BASE (1UL << PLOCK_LORW_EXC_BIT) |
| 1281 | #define PLOCK_LORW_EXC_MASK (((1UL << PLOCK_LORW_EXC_SIZE) - 1UL) << PLOCK_LORW_EXC_BIT) |
| 1282 | |
| 1283 | #define PLOCK_LORW_WRQ_BIT ((sizeof(long) == 8) ? 1 : 1) |
| 1284 | #define PLOCK_LORW_WRQ_SIZE ((sizeof(long) == 8) ? 1 : 1) |
| 1285 | #define PLOCK_LORW_WRQ_BASE (1UL << PLOCK_LORW_WRQ_BIT) |
| 1286 | #define PLOCK_LORW_WRQ_MASK (((1UL << PLOCK_LORW_WRQ_SIZE) - 1UL) << PLOCK_LORW_WRQ_BIT) |
| 1287 | |
| 1288 | #define PLOCK_LORW_SHR_BIT ((sizeof(long) == 8) ? 2 : 2) |
| 1289 | #define PLOCK_LORW_SHR_SIZE ((sizeof(long) == 8) ? 30 : 30) |
| 1290 | #define PLOCK_LORW_SHR_BASE (1UL << PLOCK_LORW_SHR_BIT) |
| 1291 | #define PLOCK_LORW_SHR_MASK (((1UL << PLOCK_LORW_SHR_SIZE) - 1UL) << PLOCK_LORW_SHR_BIT) |
| 1292 | |
| 1293 | __attribute__((unused,always_inline,no_instrument_function)) |
| 1294 | static inline void pl_lorw_rdlock(unsigned long *lock) |
| 1295 | { |
| 1296 | unsigned long lk = 0; |
| 1297 | |
| 1298 | /* First, assume we're alone and try to get the read lock (fast path). |
| 1299 | * It often works because read locks are often used on low-contention |
| 1300 | * structs. |
| 1301 | */ |
| 1302 | lk = pl_cmpxchg(lock, 0, PLOCK_LORW_SHR_BASE); |
| 1303 | if (!lk) |
| 1304 | return; |
| 1305 | |
| 1306 | /* so we were not alone, make sure there's no writer waiting for the |
| 1307 | * lock to be empty of visitors. |
| 1308 | */ |
| 1309 | if (lk & PLOCK_LORW_WRQ_MASK) |
| 1310 | lk = pl_wait_unlock_long(lock, PLOCK_LORW_WRQ_MASK); |
| 1311 | |
| 1312 | /* count us as visitor among others */ |
| 1313 | lk = pl_xadd(lock, PLOCK_LORW_SHR_BASE); |
| 1314 | |
| 1315 | /* wait for end of exclusive access if any */ |
| 1316 | if (lk & PLOCK_LORW_EXC_MASK) |
| 1317 | lk = pl_wait_unlock_long(lock, PLOCK_LORW_EXC_MASK); |
| 1318 | } |
| 1319 | |
| 1320 | |
| 1321 | __attribute__((unused,always_inline,no_instrument_function)) |
| 1322 | static inline void pl_lorw_wrlock(unsigned long *lock) |
| 1323 | { |
| 1324 | unsigned long lk = 0; |
| 1325 | unsigned long old = 0; |
| 1326 | |
| 1327 | /* first, make sure another writer is not already blocked waiting for |
| 1328 | * readers to leave. Note that tests have shown that it can be even |
| 1329 | * faster to avoid the first check and to unconditionally wait. |
| 1330 | */ |
| 1331 | lk = pl_deref_long(lock); |
| 1332 | if (__builtin_expect(lk & PLOCK_LORW_WRQ_MASK, 1)) |
| 1333 | lk = pl_wait_unlock_long(lock, PLOCK_LORW_WRQ_MASK); |
| 1334 | |
| 1335 | do { |
| 1336 | /* let's check for the two sources of contention at once */ |
| 1337 | |
| 1338 | if (__builtin_expect(lk & (PLOCK_LORW_SHR_MASK | PLOCK_LORW_EXC_MASK), 1)) { |
| 1339 | /* check if there are still readers coming. If so, close the door and |
| 1340 | * wait for them to leave. |
| 1341 | */ |
| 1342 | if (lk & PLOCK_LORW_SHR_MASK) { |
| 1343 | /* note below, an OR is significantly cheaper than BTS or XADD */ |
| 1344 | if (!(lk & PLOCK_LORW_WRQ_MASK)) |
| 1345 | pl_or(lock, PLOCK_LORW_WRQ_BASE); |
| 1346 | lk = pl_wait_unlock_long(lock, PLOCK_LORW_SHR_MASK); |
| 1347 | } |
| 1348 | |
| 1349 | /* And also wait for a previous writer to finish. */ |
| 1350 | if (lk & PLOCK_LORW_EXC_MASK) |
| 1351 | lk = pl_wait_unlock_long(lock, PLOCK_LORW_EXC_MASK); |
| 1352 | } |
| 1353 | |
| 1354 | /* A fresh new reader may appear right now if there were none |
| 1355 | * above and we didn't close the door. |
| 1356 | */ |
| 1357 | old = lk & ~PLOCK_LORW_SHR_MASK & ~PLOCK_LORW_EXC_MASK; |
| 1358 | lk = pl_cmpxchg(lock, old, old | PLOCK_LORW_EXC_BASE); |
| 1359 | } while (lk != old); |
| 1360 | |
| 1361 | /* done, not waiting anymore, the WRQ bit if any, will be dropped by the |
| 1362 | * unlock |
| 1363 | */ |
| 1364 | } |
| 1365 | |
| 1366 | |
| 1367 | __attribute__((unused,always_inline,no_instrument_function)) |
| 1368 | static inline void pl_lorw_rdunlock(unsigned long *lock) |
| 1369 | { |
| 1370 | pl_sub(lock, PLOCK_LORW_SHR_BASE); |
| 1371 | } |
| 1372 | |
| 1373 | __attribute__((unused,always_inline,no_instrument_function)) |
| 1374 | static inline void pl_lorw_wrunlock(unsigned long *lock) |
| 1375 | { |
| 1376 | pl_and(lock, ~(PLOCK_LORW_WRQ_MASK | PLOCK_LORW_EXC_MASK)); |
| 1377 | } |
| 1378 | |
| 1379 | __attribute__((unused,always_inline,no_instrument_function)) |
| 1380 | static inline void pl_lorw_unlock(unsigned long *lock) |
| 1381 | { |
| 1382 | if (pl_deref_long(lock) & PLOCK_LORW_EXC_MASK) |
| 1383 | pl_lorw_wrunlock(lock); |
| 1384 | else |
| 1385 | pl_lorw_rdunlock(lock); |
| 1386 | } |
Willy Tarreau | aa15db9 | 2023-08-26 17:27:24 +0200 | [diff] [blame] | 1387 | |
| 1388 | #endif /* PL_PLOCK_H */ |