blob: 22244c5a64b86cfee459ac7494b87127d07bd2ee [file] [log] [blame]
Emeric Brun7122ab32017-07-07 10:26:46 +02001/* plock - progressive locks
2 *
3 * Copyright (C) 2012-2017 Willy Tarreau <w@1wt.eu>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
20 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include "atomic-ops.h"
27
28/* 64 bit */
29#define PLOCK64_RL_1 0x0000000000000004ULL
30#define PLOCK64_RL_ANY 0x00000000FFFFFFFCULL
31#define PLOCK64_SL_1 0x0000000100000000ULL
32#define PLOCK64_SL_ANY 0x0000000300000000ULL
33#define PLOCK64_WL_1 0x0000000400000000ULL
34#define PLOCK64_WL_ANY 0xFFFFFFFC00000000ULL
35
36/* 32 bit */
37#define PLOCK32_RL_1 0x00000004
38#define PLOCK32_RL_ANY 0x0000FFFC
39#define PLOCK32_SL_1 0x00010000
40#define PLOCK32_SL_ANY 0x00030000
41#define PLOCK32_WL_1 0x00040000
42#define PLOCK32_WL_ANY 0xFFFC0000
43
44/* dereferences <*p> as unsigned long without causing aliasing issues */
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020045#define pl_deref_long(p) ({ volatile unsigned long *__pl_l = (void *)(p); *__pl_l; })
Emeric Brun7122ab32017-07-07 10:26:46 +020046
47/* dereferences <*p> as unsigned int without causing aliasing issues */
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020048#define pl_deref_int(p) ({ volatile unsigned int *__pl_i = (void *)(p); *__pl_i; })
Emeric Brun7122ab32017-07-07 10:26:46 +020049
50/* request shared read access (R), return non-zero on success, otherwise 0 */
51#define pl_try_r(lock) ( \
52 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020053 unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_WL_ANY; \
Emeric Brun7122ab32017-07-07 10:26:46 +020054 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020055 if (!__builtin_expect(__pl_r, 0)) { \
56 __pl_r = pl_xadd((lock), PLOCK64_RL_1) & PLOCK64_WL_ANY; \
57 if (__builtin_expect(__pl_r, 0)) \
Emeric Brun7122ab32017-07-07 10:26:46 +020058 pl_sub((lock), PLOCK64_RL_1); \
59 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020060 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +020061 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020062 unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_WL_ANY; \
Emeric Brun7122ab32017-07-07 10:26:46 +020063 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020064 if (!__builtin_expect(__pl_r, 0)) { \
65 __pl_r = pl_xadd((lock), PLOCK32_RL_1) & PLOCK32_WL_ANY; \
66 if (__builtin_expect(__pl_r, 0)) \
Emeric Brun7122ab32017-07-07 10:26:46 +020067 pl_sub((lock), PLOCK32_RL_1); \
68 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020069 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +020070 }) : ({ \
71 void __unsupported_argument_size_for_pl_try_r__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +010072 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
73 __unsupported_argument_size_for_pl_try_r__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +020074 0; \
75 }) \
76)
77
78/* request shared read access (R) and wait for it */
79#define pl_take_r(lock) \
80 do { \
81 while (__builtin_expect(pl_try_r(lock), 1) == 0) \
82 pl_cpu_relax(); \
83 } while (0)
84
85/* release the read access (R) lock */
86#define pl_drop_r(lock) ( \
87 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
88 pl_sub(lock, PLOCK64_RL_1); \
89 }) : (sizeof(*(lock)) == 4) ? ({ \
90 pl_sub(lock, PLOCK32_RL_1); \
91 }) : ({ \
92 void __unsupported_argument_size_for_pl_drop_r__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +010093 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
94 __unsupported_argument_size_for_pl_drop_r__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +020095 }) \
96)
97
98/* request a seek access (S), return non-zero on success, otherwise 0 */
99#define pl_try_s(lock) ( \
100 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200101 unsigned long __pl_r = pl_deref_long(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200102 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200103 if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
104 __pl_r = pl_xadd((lock), PLOCK64_SL_1 | PLOCK64_RL_1) & \
Emeric Brun7122ab32017-07-07 10:26:46 +0200105 (PLOCK64_WL_ANY | PLOCK64_SL_ANY); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200106 if (__builtin_expect(__pl_r, 0)) \
Emeric Brun7122ab32017-07-07 10:26:46 +0200107 pl_sub((lock), PLOCK64_SL_1 | PLOCK64_RL_1); \
108 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200109 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200110 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200111 unsigned int __pl_r = pl_deref_int(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200112 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200113 if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
114 __pl_r = pl_xadd((lock), PLOCK32_SL_1 | PLOCK32_RL_1) & \
Emeric Brun7122ab32017-07-07 10:26:46 +0200115 (PLOCK32_WL_ANY | PLOCK32_SL_ANY); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200116 if (__builtin_expect(__pl_r, 0)) \
Emeric Brun7122ab32017-07-07 10:26:46 +0200117 pl_sub((lock), PLOCK32_SL_1 | PLOCK32_RL_1); \
118 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200119 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200120 }) : ({ \
121 void __unsupported_argument_size_for_pl_try_s__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100122 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
123 __unsupported_argument_size_for_pl_try_s__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200124 0; \
125 }) \
126)
127
128/* request a seek access (S) and wait for it */
129#define pl_take_s(lock) \
130 do { \
131 while (__builtin_expect(pl_try_s(lock), 0) == 0) \
132 pl_cpu_relax(); \
133 } while (0)
134
135/* release the seek access (S) lock */
136#define pl_drop_s(lock) ( \
137 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
138 pl_sub(lock, PLOCK64_SL_1 + PLOCK64_RL_1); \
139 }) : (sizeof(*(lock)) == 4) ? ({ \
140 pl_sub(lock, PLOCK32_SL_1 + PLOCK32_RL_1); \
141 }) : ({ \
142 void __unsupported_argument_size_for_pl_drop_s__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100143 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
144 __unsupported_argument_size_for_pl_drop_s__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200145 }) \
146)
147
148/* drop the S lock and go back to the R lock */
149#define pl_stor(lock) ( \
150 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
151 pl_sub(lock, PLOCK64_SL_1); \
152 }) : (sizeof(*(lock)) == 4) ? ({ \
153 pl_sub(lock, PLOCK32_SL_1); \
154 }) : ({ \
155 void __unsupported_argument_size_for_pl_stor__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100156 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
157 __unsupported_argument_size_for_pl_stor__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200158 }) \
159)
160
161/* take the W lock under the S lock */
162#define pl_stow(lock) ( \
163 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200164 unsigned long __pl_r = pl_xadd((lock), PLOCK64_WL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200165 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200166 while ((__pl_r & PLOCK64_RL_ANY) != PLOCK64_RL_1) \
167 __pl_r = pl_deref_long(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200168 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200169 unsigned int __pl_r = pl_xadd((lock), PLOCK32_WL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200170 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200171 while ((__pl_r & PLOCK32_RL_ANY) != PLOCK32_RL_1) \
172 __pl_r = pl_deref_int(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200173 }) : ({ \
174 void __unsupported_argument_size_for_pl_stow__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100175 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
176 __unsupported_argument_size_for_pl_stow__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200177 }) \
178)
179
180/* drop the W lock and go back to the S lock */
181#define pl_wtos(lock) ( \
182 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
183 pl_sub(lock, PLOCK64_WL_1); \
184 }) : (sizeof(*(lock)) == 4) ? ({ \
185 pl_sub(lock, PLOCK32_WL_1); \
186 }) : ({ \
187 void __unsupported_argument_size_for_pl_wtos__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100188 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
189 __unsupported_argument_size_for_pl_wtos__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200190 }) \
191)
192
193/* drop the W lock and go back to the R lock */
194#define pl_wtor(lock) ( \
195 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
196 pl_sub(lock, PLOCK64_WL_1 | PLOCK64_SL_1); \
197 }) : (sizeof(*(lock)) == 4) ? ({ \
198 pl_sub(lock, PLOCK32_WL_1 | PLOCK32_SL_1); \
199 }) : ({ \
200 void __unsupported_argument_size_for_pl_wtor__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100201 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
202 __unsupported_argument_size_for_pl_wtor__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200203 }) \
204)
205
206/* request a write access (W), return non-zero on success, otherwise 0.
207 *
208 * Below there is something important : by taking both W and S, we will cause
209 * an overflow of W at 4/5 of the maximum value that can be stored into W due
210 * to the fact that S is 2 bits, so we're effectively adding 5 to the word
211 * composed by W:S. But for all words multiple of 4 bits, the maximum value is
212 * multiple of 15 thus of 5. So the largest value we can store with all bits
213 * set to one will be met by adding 5, and then adding 5 again will place value
214 * 1 in W and value 0 in S, so we never leave W with 0. Also, even upon such an
215 * overflow, there's no risk to confuse it with an atomic lock because R is not
216 * null since it will not have overflown. For 32-bit locks, this situation
217 * happens when exactly 13108 threads try to grab the lock at once, W=1, S=0
218 * and R=13108. For 64-bit locks, it happens at 858993460 concurrent writers
219 * where W=1, S=0 and R=858993460.
220 */
221#define pl_try_w(lock) ( \
222 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200223 unsigned long __pl_r = pl_deref_long(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200224 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200225 if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
226 __pl_r = pl_xadd((lock), PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \
227 if (__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
Emeric Brun7122ab32017-07-07 10:26:46 +0200228 /* a writer, seeker or atomic is present, let's leave */ \
229 pl_sub((lock), PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200230 __pl_r &= (PLOCK64_WL_ANY | PLOCK64_SL_ANY); /* return value */\
Emeric Brun7122ab32017-07-07 10:26:46 +0200231 } else { \
232 /* wait for all other readers to leave */ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200233 while (__pl_r) \
234 __pl_r = pl_deref_long(lock) - \
Emeric Brun7122ab32017-07-07 10:26:46 +0200235 (PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200236 __pl_r = 0; \
Emeric Brun7122ab32017-07-07 10:26:46 +0200237 } \
238 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200239 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200240 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200241 unsigned int __pl_r = pl_deref_int(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200242 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200243 if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
244 __pl_r = pl_xadd((lock), PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \
245 if (__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
Emeric Brun7122ab32017-07-07 10:26:46 +0200246 /* a writer, seeker or atomic is present, let's leave */ \
247 pl_sub((lock), PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200248 __pl_r &= (PLOCK32_WL_ANY | PLOCK32_SL_ANY); /* return value */\
Emeric Brun7122ab32017-07-07 10:26:46 +0200249 } else { \
250 /* wait for all other readers to leave */ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200251 while (__pl_r) \
252 __pl_r = pl_deref_int(lock) - \
Emeric Brun7122ab32017-07-07 10:26:46 +0200253 (PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200254 __pl_r = 0; \
Emeric Brun7122ab32017-07-07 10:26:46 +0200255 } \
256 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200257 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200258 }) : ({ \
259 void __unsupported_argument_size_for_pl_try_w__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100260 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
261 __unsupported_argument_size_for_pl_try_w__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200262 0; \
263 }) \
264)
265
266/* request a seek access (W) and wait for it */
267#define pl_take_w(lock) \
268 do { \
269 while (__builtin_expect(pl_try_w(lock), 0) == 0) \
270 pl_cpu_relax(); \
271 } while (0)
272
273/* drop the write (W) lock entirely */
274#define pl_drop_w(lock) ( \
275 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
276 pl_sub(lock, PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \
277 }) : (sizeof(*(lock)) == 4) ? ({ \
278 pl_sub(lock, PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \
279 }) : ({ \
280 void __unsupported_argument_size_for_pl_drop_w__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100281 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
282 __unsupported_argument_size_for_pl_drop_w__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200283 }) \
284)
285
286/* Try to upgrade from R to S, return non-zero on success, otherwise 0.
287 * This lock will fail if S or W are already held. In case of failure to grab
288 * the lock, it MUST NOT be retried without first dropping R, or it may never
289 * complete due to S waiting for R to leave before upgrading to W.
290 */
291#define pl_try_rtos(lock) ( \
292 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200293 unsigned long __pl_r = pl_deref_long(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200294 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200295 if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
296 __pl_r = pl_xadd((lock), PLOCK64_SL_1) & \
Emeric Brun7122ab32017-07-07 10:26:46 +0200297 (PLOCK64_WL_ANY | PLOCK64_SL_ANY); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200298 if (__builtin_expect(__pl_r, 0)) \
Emeric Brun7122ab32017-07-07 10:26:46 +0200299 pl_sub((lock), PLOCK64_SL_1); \
300 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200301 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200302 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200303 unsigned int __pl_r = pl_deref_int(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200304 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200305 if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
306 __pl_r = pl_xadd((lock), PLOCK32_SL_1) & \
Emeric Brun7122ab32017-07-07 10:26:46 +0200307 (PLOCK32_WL_ANY | PLOCK32_SL_ANY); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200308 if (__builtin_expect(__pl_r, 0)) \
Emeric Brun7122ab32017-07-07 10:26:46 +0200309 pl_sub((lock), PLOCK32_SL_1); \
310 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200311 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200312 }) : ({ \
313 void __unsupported_argument_size_for_pl_try_rtos__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100314 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
315 __unsupported_argument_size_for_pl_try_rtos__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200316 0; \
317 }) \
318)
319
320
321/* request atomic write access (A), return non-zero on success, otherwise 0.
322 * It's a bit tricky as we only use the W bits for this and want to distinguish
323 * between other atomic users and regular lock users. We have to give up if an
324 * S lock appears. It's possible that such a lock stays hidden in the W bits
325 * after an overflow, but in this case R is still held, ensuring we stay in the
326 * loop until we discover the conflict. The lock only return successfully if all
327 * readers are gone (or converted to A).
328 */
329#define pl_try_a(lock) ( \
330 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200331 unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_SL_ANY; \
Emeric Brun7122ab32017-07-07 10:26:46 +0200332 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200333 if (!__builtin_expect(__pl_r, 0)) { \
334 __pl_r = pl_xadd((lock), PLOCK64_WL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200335 while (1) { \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200336 if (__builtin_expect(__pl_r & PLOCK64_SL_ANY, 0)) { \
Emeric Brun7122ab32017-07-07 10:26:46 +0200337 pl_sub((lock), PLOCK64_WL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200338 break; /* return !__pl_r */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200339 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200340 __pl_r &= PLOCK64_RL_ANY; \
341 if (!__builtin_expect(__pl_r, 0)) \
342 break; /* return !__pl_r */ \
343 __pl_r = pl_deref_long(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200344 } \
345 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200346 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200347 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200348 unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_SL_ANY; \
Emeric Brun7122ab32017-07-07 10:26:46 +0200349 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200350 if (!__builtin_expect(__pl_r, 0)) { \
351 __pl_r = pl_xadd((lock), PLOCK32_WL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200352 while (1) { \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200353 if (__builtin_expect(__pl_r & PLOCK32_SL_ANY, 0)) { \
Emeric Brun7122ab32017-07-07 10:26:46 +0200354 pl_sub((lock), PLOCK32_WL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200355 break; /* return !__pl_r */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200356 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200357 __pl_r &= PLOCK32_RL_ANY; \
358 if (!__builtin_expect(__pl_r, 0)) \
359 break; /* return !__pl_r */ \
360 __pl_r = pl_deref_int(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200361 } \
362 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200363 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200364 }) : ({ \
365 void __unsupported_argument_size_for_pl_try_a__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100366 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
367 __unsupported_argument_size_for_pl_try_a__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200368 0; \
369 }) \
370)
371
372/* request atomic write access (A) and wait for it */
373#define pl_take_a(lock) \
374 do { \
375 while (__builtin_expect(pl_try_a(lock), 1) == 0) \
376 pl_cpu_relax(); \
377 } while (0)
378
379/* release atomic write access (A) lock */
380#define pl_drop_a(lock) ( \
381 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
382 pl_sub(lock, PLOCK64_WL_1); \
383 }) : (sizeof(*(lock)) == 4) ? ({ \
384 pl_sub(lock, PLOCK32_WL_1); \
385 }) : ({ \
386 void __unsupported_argument_size_for_pl_drop_a__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100387 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
388 __unsupported_argument_size_for_pl_drop_a__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200389 }) \
390)
391
392/* Try to upgrade from R to A, return non-zero on success, otherwise 0.
393 * This lock will fail if S is held or appears while waiting (typically due to
394 * a previous grab that was disguised as a W due to an overflow). In case of
395 * failure to grab the lock, it MUST NOT be retried without first dropping R,
396 * or it may never complete due to S waiting for R to leave before upgrading
397 * to W. The lock succeeds once there's no more R (ie all of them have either
398 * completed or were turned to A).
399 */
400#define pl_try_rtoa(lock) ( \
401 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200402 unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_SL_ANY; \
Emeric Brun7122ab32017-07-07 10:26:46 +0200403 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200404 if (!__builtin_expect(__pl_r, 0)) { \
405 __pl_r = pl_xadd((lock), PLOCK64_WL_1 - PLOCK64_RL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200406 while (1) { \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200407 if (__builtin_expect(__pl_r & PLOCK64_SL_ANY, 0)) { \
Emeric Brun7122ab32017-07-07 10:26:46 +0200408 pl_sub((lock), PLOCK64_WL_1 - PLOCK64_RL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200409 break; /* return !__pl_r */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200410 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200411 __pl_r &= PLOCK64_RL_ANY; \
412 if (!__builtin_expect(__pl_r, 0)) \
413 break; /* return !__pl_r */ \
414 __pl_r = pl_deref_long(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200415 } \
416 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200417 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200418 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200419 unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_SL_ANY; \
Emeric Brun7122ab32017-07-07 10:26:46 +0200420 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200421 if (!__builtin_expect(__pl_r, 0)) { \
422 __pl_r = pl_xadd((lock), PLOCK32_WL_1 - PLOCK32_RL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200423 while (1) { \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200424 if (__builtin_expect(__pl_r & PLOCK32_SL_ANY, 0)) { \
Emeric Brun7122ab32017-07-07 10:26:46 +0200425 pl_sub((lock), PLOCK32_WL_1 - PLOCK32_RL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200426 break; /* return !__pl_r */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200427 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200428 __pl_r &= PLOCK32_RL_ANY; \
429 if (!__builtin_expect(__pl_r, 0)) \
430 break; /* return !__pl_r */ \
431 __pl_r = pl_deref_int(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200432 } \
433 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200434 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200435 }) : ({ \
436 void __unsupported_argument_size_for_pl_try_rtoa__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100437 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
438 __unsupported_argument_size_for_pl_try_rtoa__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200439 0; \
440 }) \
441)