blob: 058cc18502efeddd9dac37cea221f451c82e8ccb [file] [log] [blame]
Emeric Brun7122ab32017-07-07 10:26:46 +02001/* plock - progressive locks
2 *
3 * Copyright (C) 2012-2017 Willy Tarreau <w@1wt.eu>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
20 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include "atomic-ops.h"
27
28/* 64 bit */
29#define PLOCK64_RL_1 0x0000000000000004ULL
30#define PLOCK64_RL_ANY 0x00000000FFFFFFFCULL
31#define PLOCK64_SL_1 0x0000000100000000ULL
32#define PLOCK64_SL_ANY 0x0000000300000000ULL
33#define PLOCK64_WL_1 0x0000000400000000ULL
34#define PLOCK64_WL_ANY 0xFFFFFFFC00000000ULL
35
36/* 32 bit */
37#define PLOCK32_RL_1 0x00000004
38#define PLOCK32_RL_ANY 0x0000FFFC
39#define PLOCK32_SL_1 0x00010000
40#define PLOCK32_SL_ANY 0x00030000
41#define PLOCK32_WL_1 0x00040000
42#define PLOCK32_WL_ANY 0xFFFC0000
43
44/* dereferences <*p> as unsigned long without causing aliasing issues */
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020045#define pl_deref_long(p) ({ volatile unsigned long *__pl_l = (void *)(p); *__pl_l; })
Emeric Brun7122ab32017-07-07 10:26:46 +020046
47/* dereferences <*p> as unsigned int without causing aliasing issues */
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020048#define pl_deref_int(p) ({ volatile unsigned int *__pl_i = (void *)(p); *__pl_i; })
Emeric Brun7122ab32017-07-07 10:26:46 +020049
50/* request shared read access (R), return non-zero on success, otherwise 0 */
51#define pl_try_r(lock) ( \
52 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020053 unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_WL_ANY; \
Emeric Brun7122ab32017-07-07 10:26:46 +020054 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020055 if (!__builtin_expect(__pl_r, 0)) { \
56 __pl_r = pl_xadd((lock), PLOCK64_RL_1) & PLOCK64_WL_ANY; \
57 if (__builtin_expect(__pl_r, 0)) \
Emeric Brun7122ab32017-07-07 10:26:46 +020058 pl_sub((lock), PLOCK64_RL_1); \
59 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020060 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +020061 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020062 unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_WL_ANY; \
Emeric Brun7122ab32017-07-07 10:26:46 +020063 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020064 if (!__builtin_expect(__pl_r, 0)) { \
65 __pl_r = pl_xadd((lock), PLOCK32_RL_1) & PLOCK32_WL_ANY; \
66 if (__builtin_expect(__pl_r, 0)) \
Emeric Brun7122ab32017-07-07 10:26:46 +020067 pl_sub((lock), PLOCK32_RL_1); \
68 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +020069 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +020070 }) : ({ \
71 void __unsupported_argument_size_for_pl_try_r__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +010072 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
73 __unsupported_argument_size_for_pl_try_r__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +020074 0; \
75 }) \
76)
77
78/* request shared read access (R) and wait for it */
79#define pl_take_r(lock) \
80 do { \
81 while (__builtin_expect(pl_try_r(lock), 1) == 0) \
82 pl_cpu_relax(); \
83 } while (0)
84
85/* release the read access (R) lock */
86#define pl_drop_r(lock) ( \
87 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
88 pl_sub(lock, PLOCK64_RL_1); \
89 }) : (sizeof(*(lock)) == 4) ? ({ \
90 pl_sub(lock, PLOCK32_RL_1); \
91 }) : ({ \
92 void __unsupported_argument_size_for_pl_drop_r__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +010093 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
94 __unsupported_argument_size_for_pl_drop_r__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +020095 }) \
96)
97
98/* request a seek access (S), return non-zero on success, otherwise 0 */
99#define pl_try_s(lock) ( \
100 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200101 unsigned long __pl_r = pl_deref_long(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200102 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200103 if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
104 __pl_r = pl_xadd((lock), PLOCK64_SL_1 | PLOCK64_RL_1) & \
Emeric Brun7122ab32017-07-07 10:26:46 +0200105 (PLOCK64_WL_ANY | PLOCK64_SL_ANY); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200106 if (__builtin_expect(__pl_r, 0)) \
Emeric Brun7122ab32017-07-07 10:26:46 +0200107 pl_sub((lock), PLOCK64_SL_1 | PLOCK64_RL_1); \
108 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200109 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200110 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200111 unsigned int __pl_r = pl_deref_int(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200112 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200113 if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
114 __pl_r = pl_xadd((lock), PLOCK32_SL_1 | PLOCK32_RL_1) & \
Emeric Brun7122ab32017-07-07 10:26:46 +0200115 (PLOCK32_WL_ANY | PLOCK32_SL_ANY); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200116 if (__builtin_expect(__pl_r, 0)) \
Emeric Brun7122ab32017-07-07 10:26:46 +0200117 pl_sub((lock), PLOCK32_SL_1 | PLOCK32_RL_1); \
118 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200119 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200120 }) : ({ \
121 void __unsupported_argument_size_for_pl_try_s__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100122 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
123 __unsupported_argument_size_for_pl_try_s__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200124 0; \
125 }) \
126)
127
128/* request a seek access (S) and wait for it */
129#define pl_take_s(lock) \
130 do { \
131 while (__builtin_expect(pl_try_s(lock), 0) == 0) \
132 pl_cpu_relax(); \
133 } while (0)
134
135/* release the seek access (S) lock */
136#define pl_drop_s(lock) ( \
137 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
138 pl_sub(lock, PLOCK64_SL_1 + PLOCK64_RL_1); \
139 }) : (sizeof(*(lock)) == 4) ? ({ \
140 pl_sub(lock, PLOCK32_SL_1 + PLOCK32_RL_1); \
141 }) : ({ \
142 void __unsupported_argument_size_for_pl_drop_s__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100143 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
144 __unsupported_argument_size_for_pl_drop_s__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200145 }) \
146)
147
148/* drop the S lock and go back to the R lock */
149#define pl_stor(lock) ( \
150 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
151 pl_sub(lock, PLOCK64_SL_1); \
152 }) : (sizeof(*(lock)) == 4) ? ({ \
153 pl_sub(lock, PLOCK32_SL_1); \
154 }) : ({ \
155 void __unsupported_argument_size_for_pl_stor__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100156 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
157 __unsupported_argument_size_for_pl_stor__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200158 }) \
159)
160
161/* take the W lock under the S lock */
162#define pl_stow(lock) ( \
163 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200164 unsigned long __pl_r = pl_xadd((lock), PLOCK64_WL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200165 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200166 while ((__pl_r & PLOCK64_RL_ANY) != PLOCK64_RL_1) \
167 __pl_r = pl_deref_long(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200168 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200169 unsigned int __pl_r = pl_xadd((lock), PLOCK32_WL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200170 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200171 while ((__pl_r & PLOCK32_RL_ANY) != PLOCK32_RL_1) \
172 __pl_r = pl_deref_int(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200173 }) : ({ \
174 void __unsupported_argument_size_for_pl_stow__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100175 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
176 __unsupported_argument_size_for_pl_stow__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200177 }) \
178)
179
180/* drop the W lock and go back to the S lock */
181#define pl_wtos(lock) ( \
182 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
183 pl_sub(lock, PLOCK64_WL_1); \
184 }) : (sizeof(*(lock)) == 4) ? ({ \
185 pl_sub(lock, PLOCK32_WL_1); \
186 }) : ({ \
187 void __unsupported_argument_size_for_pl_wtos__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100188 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
189 __unsupported_argument_size_for_pl_wtos__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200190 }) \
191)
192
193/* drop the W lock and go back to the R lock */
194#define pl_wtor(lock) ( \
195 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
196 pl_sub(lock, PLOCK64_WL_1 | PLOCK64_SL_1); \
197 }) : (sizeof(*(lock)) == 4) ? ({ \
198 pl_sub(lock, PLOCK32_WL_1 | PLOCK32_SL_1); \
199 }) : ({ \
200 void __unsupported_argument_size_for_pl_wtor__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100201 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
202 __unsupported_argument_size_for_pl_wtor__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200203 }) \
204)
205
206/* request a write access (W), return non-zero on success, otherwise 0.
207 *
208 * Below there is something important : by taking both W and S, we will cause
209 * an overflow of W at 4/5 of the maximum value that can be stored into W due
210 * to the fact that S is 2 bits, so we're effectively adding 5 to the word
211 * composed by W:S. But for all words multiple of 4 bits, the maximum value is
212 * multiple of 15 thus of 5. So the largest value we can store with all bits
213 * set to one will be met by adding 5, and then adding 5 again will place value
214 * 1 in W and value 0 in S, so we never leave W with 0. Also, even upon such an
215 * overflow, there's no risk to confuse it with an atomic lock because R is not
216 * null since it will not have overflown. For 32-bit locks, this situation
217 * happens when exactly 13108 threads try to grab the lock at once, W=1, S=0
218 * and R=13108. For 64-bit locks, it happens at 858993460 concurrent writers
219 * where W=1, S=0 and R=858993460.
220 */
221#define pl_try_w(lock) ( \
222 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200223 unsigned long __pl_r = pl_deref_long(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200224 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200225 if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
226 __pl_r = pl_xadd((lock), PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \
227 if (__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
Emeric Brun7122ab32017-07-07 10:26:46 +0200228 /* a writer, seeker or atomic is present, let's leave */ \
229 pl_sub((lock), PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200230 __pl_r &= (PLOCK64_WL_ANY | PLOCK64_SL_ANY); /* return value */\
Emeric Brun7122ab32017-07-07 10:26:46 +0200231 } else { \
232 /* wait for all other readers to leave */ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200233 while (__pl_r) \
234 __pl_r = pl_deref_long(lock) - \
Emeric Brun7122ab32017-07-07 10:26:46 +0200235 (PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200236 } \
237 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200238 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200239 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200240 unsigned int __pl_r = pl_deref_int(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200241 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200242 if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
243 __pl_r = pl_xadd((lock), PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \
244 if (__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
Emeric Brun7122ab32017-07-07 10:26:46 +0200245 /* a writer, seeker or atomic is present, let's leave */ \
246 pl_sub((lock), PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200247 __pl_r &= (PLOCK32_WL_ANY | PLOCK32_SL_ANY); /* return value */\
Emeric Brun7122ab32017-07-07 10:26:46 +0200248 } else { \
249 /* wait for all other readers to leave */ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200250 while (__pl_r) \
251 __pl_r = pl_deref_int(lock) - \
Emeric Brun7122ab32017-07-07 10:26:46 +0200252 (PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200253 } \
254 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200255 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200256 }) : ({ \
257 void __unsupported_argument_size_for_pl_try_w__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100258 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
259 __unsupported_argument_size_for_pl_try_w__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200260 0; \
261 }) \
262)
263
264/* request a seek access (W) and wait for it */
265#define pl_take_w(lock) \
266 do { \
267 while (__builtin_expect(pl_try_w(lock), 0) == 0) \
268 pl_cpu_relax(); \
269 } while (0)
270
271/* drop the write (W) lock entirely */
272#define pl_drop_w(lock) ( \
273 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
274 pl_sub(lock, PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \
275 }) : (sizeof(*(lock)) == 4) ? ({ \
276 pl_sub(lock, PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \
277 }) : ({ \
278 void __unsupported_argument_size_for_pl_drop_w__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100279 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
280 __unsupported_argument_size_for_pl_drop_w__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200281 }) \
282)
283
284/* Try to upgrade from R to S, return non-zero on success, otherwise 0.
285 * This lock will fail if S or W are already held. In case of failure to grab
286 * the lock, it MUST NOT be retried without first dropping R, or it may never
287 * complete due to S waiting for R to leave before upgrading to W.
288 */
289#define pl_try_rtos(lock) ( \
290 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200291 unsigned long __pl_r = pl_deref_long(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200292 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200293 if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
294 __pl_r = pl_xadd((lock), PLOCK64_SL_1) & \
Emeric Brun7122ab32017-07-07 10:26:46 +0200295 (PLOCK64_WL_ANY | PLOCK64_SL_ANY); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200296 if (__builtin_expect(__pl_r, 0)) \
Emeric Brun7122ab32017-07-07 10:26:46 +0200297 pl_sub((lock), PLOCK64_SL_1); \
298 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200299 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200300 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200301 unsigned int __pl_r = pl_deref_int(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200302 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200303 if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
304 __pl_r = pl_xadd((lock), PLOCK32_SL_1) & \
Emeric Brun7122ab32017-07-07 10:26:46 +0200305 (PLOCK32_WL_ANY | PLOCK32_SL_ANY); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200306 if (__builtin_expect(__pl_r, 0)) \
Emeric Brun7122ab32017-07-07 10:26:46 +0200307 pl_sub((lock), PLOCK32_SL_1); \
308 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200309 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200310 }) : ({ \
311 void __unsupported_argument_size_for_pl_try_rtos__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100312 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
313 __unsupported_argument_size_for_pl_try_rtos__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200314 0; \
315 }) \
316)
317
318
319/* request atomic write access (A), return non-zero on success, otherwise 0.
320 * It's a bit tricky as we only use the W bits for this and want to distinguish
321 * between other atomic users and regular lock users. We have to give up if an
322 * S lock appears. It's possible that such a lock stays hidden in the W bits
323 * after an overflow, but in this case R is still held, ensuring we stay in the
324 * loop until we discover the conflict. The lock only return successfully if all
325 * readers are gone (or converted to A).
326 */
327#define pl_try_a(lock) ( \
328 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200329 unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_SL_ANY; \
Emeric Brun7122ab32017-07-07 10:26:46 +0200330 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200331 if (!__builtin_expect(__pl_r, 0)) { \
332 __pl_r = pl_xadd((lock), PLOCK64_WL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200333 while (1) { \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200334 if (__builtin_expect(__pl_r & PLOCK64_SL_ANY, 0)) { \
Emeric Brun7122ab32017-07-07 10:26:46 +0200335 pl_sub((lock), PLOCK64_WL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200336 break; /* return !__pl_r */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200337 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200338 __pl_r &= PLOCK64_RL_ANY; \
339 if (!__builtin_expect(__pl_r, 0)) \
340 break; /* return !__pl_r */ \
341 __pl_r = pl_deref_long(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200342 } \
343 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200344 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200345 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200346 unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_SL_ANY; \
Emeric Brun7122ab32017-07-07 10:26:46 +0200347 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200348 if (!__builtin_expect(__pl_r, 0)) { \
349 __pl_r = pl_xadd((lock), PLOCK32_WL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200350 while (1) { \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200351 if (__builtin_expect(__pl_r & PLOCK32_SL_ANY, 0)) { \
Emeric Brun7122ab32017-07-07 10:26:46 +0200352 pl_sub((lock), PLOCK32_WL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200353 break; /* return !__pl_r */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200354 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200355 __pl_r &= PLOCK32_RL_ANY; \
356 if (!__builtin_expect(__pl_r, 0)) \
357 break; /* return !__pl_r */ \
358 __pl_r = pl_deref_int(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200359 } \
360 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200361 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200362 }) : ({ \
363 void __unsupported_argument_size_for_pl_try_a__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100364 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
365 __unsupported_argument_size_for_pl_try_a__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200366 0; \
367 }) \
368)
369
370/* request atomic write access (A) and wait for it */
371#define pl_take_a(lock) \
372 do { \
373 while (__builtin_expect(pl_try_a(lock), 1) == 0) \
374 pl_cpu_relax(); \
375 } while (0)
376
377/* release atomic write access (A) lock */
378#define pl_drop_a(lock) ( \
379 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
380 pl_sub(lock, PLOCK64_WL_1); \
381 }) : (sizeof(*(lock)) == 4) ? ({ \
382 pl_sub(lock, PLOCK32_WL_1); \
383 }) : ({ \
384 void __unsupported_argument_size_for_pl_drop_a__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100385 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
386 __unsupported_argument_size_for_pl_drop_a__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200387 }) \
388)
389
390/* Try to upgrade from R to A, return non-zero on success, otherwise 0.
391 * This lock will fail if S is held or appears while waiting (typically due to
392 * a previous grab that was disguised as a W due to an overflow). In case of
393 * failure to grab the lock, it MUST NOT be retried without first dropping R,
394 * or it may never complete due to S waiting for R to leave before upgrading
395 * to W. The lock succeeds once there's no more R (ie all of them have either
396 * completed or were turned to A).
397 */
398#define pl_try_rtoa(lock) ( \
399 (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200400 unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_SL_ANY; \
Emeric Brun7122ab32017-07-07 10:26:46 +0200401 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200402 if (!__builtin_expect(__pl_r, 0)) { \
403 __pl_r = pl_xadd((lock), PLOCK64_WL_1 - PLOCK64_RL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200404 while (1) { \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200405 if (__builtin_expect(__pl_r & PLOCK64_SL_ANY, 0)) { \
Emeric Brun7122ab32017-07-07 10:26:46 +0200406 pl_sub((lock), PLOCK64_WL_1 - PLOCK64_RL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200407 break; /* return !__pl_r */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200408 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200409 __pl_r &= PLOCK64_RL_ANY; \
410 if (!__builtin_expect(__pl_r, 0)) \
411 break; /* return !__pl_r */ \
412 __pl_r = pl_deref_long(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200413 } \
414 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200415 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200416 }) : (sizeof(*(lock)) == 4) ? ({ \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200417 unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_SL_ANY; \
Emeric Brun7122ab32017-07-07 10:26:46 +0200418 pl_barrier(); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200419 if (!__builtin_expect(__pl_r, 0)) { \
420 __pl_r = pl_xadd((lock), PLOCK32_WL_1 - PLOCK32_RL_1); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200421 while (1) { \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200422 if (__builtin_expect(__pl_r & PLOCK32_SL_ANY, 0)) { \
Emeric Brun7122ab32017-07-07 10:26:46 +0200423 pl_sub((lock), PLOCK32_WL_1 - PLOCK32_RL_1); \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200424 break; /* return !__pl_r */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200425 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200426 __pl_r &= PLOCK32_RL_ANY; \
427 if (!__builtin_expect(__pl_r, 0)) \
428 break; /* return !__pl_r */ \
429 __pl_r = pl_deref_int(lock); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200430 } \
431 } \
Willy Tarreauf7ba77e2017-07-18 14:21:40 +0200432 !__pl_r; /* return value */ \
Emeric Brun7122ab32017-07-07 10:26:46 +0200433 }) : ({ \
434 void __unsupported_argument_size_for_pl_try_rtoa__(char *,int); \
Willy Tarreau2532bd22017-11-20 19:25:18 +0100435 if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
436 __unsupported_argument_size_for_pl_try_rtoa__(__FILE__,__LINE__); \
Emeric Brun7122ab32017-07-07 10:26:46 +0200437 0; \
438 }) \
439)