blob: ef7ba7fa534b2146d787a6619ffd62093cc69fcf [file] [log] [blame]
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001/*
2 * include/common/hathreads.h
3 * definitions, macros and inline functions about threads.
4 *
5 * Copyright (C) 2017 Christopher Fauet - cfaulet@haproxy.com
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#ifndef _COMMON_HATHREADS_H
23#define _COMMON_HATHREADS_H
24
25#include <common/config.h>
Willy Tarreau90fa97b2018-11-25 19:46:08 +010026#include <common/initcall.h>
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020027
Willy Tarreau0ccd3222018-07-30 10:34:35 +020028/* Note about all_threads_mask :
Willy Tarreauda9e9392019-02-02 17:03:41 +010029 * - this variable is comprised between 1 and LONGBITS.
30 * - with threads support disabled, this symbol is defined as constant 1UL.
31 * - with threads enabled, it contains the mask of enabled threads. Thus if
32 * only one thread is enabled, it equals 1.
Willy Tarreau0ccd3222018-07-30 10:34:35 +020033 */
34
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020035#ifndef USE_THREAD
36
Willy Tarreau421f02e2018-01-20 18:19:22 +010037#define MAX_THREADS 1
Willy Tarreau0c026f42018-08-01 19:12:20 +020038#define MAX_THREADS_MASK 1
39
40/* Only way found to replace variables with constants that are optimized away
41 * at build time.
42 */
43enum { all_threads_mask = 1UL };
44enum { tid_bit = 1UL };
45enum { tid = 0 };
Willy Tarreau421f02e2018-01-20 18:19:22 +010046
Christopher Faulet9dcf9b62017-11-13 10:34:01 +010047#define __decl_hathreads(decl)
Willy Tarreau90fa97b2018-11-25 19:46:08 +010048#define __decl_spinlock(lock)
49#define __decl_aligned_spinlock(lock)
50#define __decl_rwlock(lock)
51#define __decl_aligned_rwlock(lock)
Christopher Faulet9dcf9b62017-11-13 10:34:01 +010052
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020053#define HA_ATOMIC_CAS(val, old, new) ({((*val) == (*old)) ? (*(val) = (new) , 1) : (*(old) = *(val), 0);})
54#define HA_ATOMIC_ADD(val, i) ({*(val) += (i);})
55#define HA_ATOMIC_SUB(val, i) ({*(val) -= (i);})
Willy Tarreau9378df82018-09-05 16:11:03 +020056#define HA_ATOMIC_XADD(val, i) \
57 ({ \
58 typeof((val)) __p_xadd = (val); \
59 typeof(*(val)) __old_xadd = *__p_xadd; \
60 *__p_xadd += i; \
61 __old_xadd; \
62 })
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020063#define HA_ATOMIC_AND(val, flags) ({*(val) &= (flags);})
64#define HA_ATOMIC_OR(val, flags) ({*(val) |= (flags);})
65#define HA_ATOMIC_XCHG(val, new) \
66 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +020067 typeof(*(val)) __old_xchg = *(val); \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020068 *(val) = new; \
Christopher Faulet48aa13f2018-04-09 08:45:43 +020069 __old_xchg; \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020070 })
Willy Tarreau5266b3e2018-01-25 17:43:58 +010071#define HA_ATOMIC_BTS(val, bit) \
72 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +020073 typeof((val)) __p_bts = (val); \
74 typeof(*__p_bts) __b_bts = (1UL << (bit)); \
75 typeof(*__p_bts) __t_bts = *__p_bts & __b_bts; \
76 if (!__t_bts) \
77 *__p_bts |= __b_bts; \
78 __t_bts; \
Willy Tarreau5266b3e2018-01-25 17:43:58 +010079 })
80#define HA_ATOMIC_BTR(val, bit) \
81 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +020082 typeof((val)) __p_btr = (val); \
83 typeof(*__p_btr) __b_btr = (1UL << (bit)); \
84 typeof(*__p_btr) __t_btr = *__p_btr & __b_btr; \
85 if (__t_btr) \
86 *__p_btr &= ~__b_btr; \
87 __t_btr; \
Willy Tarreau5266b3e2018-01-25 17:43:58 +010088 })
Olivier Houchard9ce62b52019-04-30 13:38:02 +020089#define HA_ATOMIC_LOAD(val) *(val)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020090#define HA_ATOMIC_STORE(val, new) ({*(val) = new;})
91#define HA_ATOMIC_UPDATE_MAX(val, new) \
92 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +020093 typeof(*(val)) __new_max = (new); \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020094 \
Christopher Faulet48aa13f2018-04-09 08:45:43 +020095 if (*(val) < __new_max) \
96 *(val) = __new_max; \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020097 *(val); \
98 })
99
100#define HA_ATOMIC_UPDATE_MIN(val, new) \
101 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200102 typeof(*(val)) __new_min = (new); \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200103 \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200104 if (*(val) > __new_min) \
105 *(val) = __new_min; \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200106 *(val); \
107 })
108
Willy Tarreaub29dc952017-10-31 18:00:20 +0100109#define HA_BARRIER() do { } while (0)
Christopher Faulet339fff82017-10-19 11:59:15 +0200110
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100111#define HA_SPIN_INIT(l) do { /* do nothing */ } while(0)
112#define HA_SPIN_DESTROY(l) do { /* do nothing */ } while(0)
113#define HA_SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0)
114#define HA_SPIN_TRYLOCK(lbl, l) ({ 0; })
115#define HA_SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200116
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100117#define HA_RWLOCK_INIT(l) do { /* do nothing */ } while(0)
118#define HA_RWLOCK_DESTROY(l) do { /* do nothing */ } while(0)
119#define HA_RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0)
120#define HA_RWLOCK_TRYWRLOCK(lbl, l) ({ 0; })
121#define HA_RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
122#define HA_RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0)
123#define HA_RWLOCK_TRYRDLOCK(lbl, l) ({ 0; })
124#define HA_RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200125
William Lallemand6e1796e2018-06-07 11:23:40 +0200126#define ha_sigmask(how, set, oldset) sigprocmask(how, set, oldset)
127
Willy Tarreau0c026f42018-08-01 19:12:20 +0200128static inline void ha_set_tid(unsigned int tid)
129{
130}
William Lallemand6e1796e2018-06-07 11:23:40 +0200131
Olivier Houchard9abcf6e2019-03-07 18:45:00 +0100132static inline void __ha_barrier_atomic_load(void)
133{
134}
135
136static inline void __ha_barrier_atomic_store(void)
137{
138}
139
140static inline void __ha_barrier_atomic_full(void)
141{
142}
143
Olivier Houchardf61f0cb2017-12-21 17:13:05 +0100144static inline void __ha_barrier_load(void)
145{
146}
147
148static inline void __ha_barrier_store(void)
149{
150}
151
152static inline void __ha_barrier_full(void)
153{
154}
155
Chris Packhamf4436e12019-05-09 17:07:40 +1200156static inline int __ha_cas_dw(void *target, void *compare, void *set)
157{
158 return HA_ATOMIC_CAS(target, compare, set);
159}
160
Willy Tarreau60b639c2018-08-02 10:16:17 +0200161static inline void thread_harmless_now()
162{
163}
164
165static inline void thread_harmless_end()
166{
167}
168
169static inline void thread_isolate()
170{
171}
172
173static inline void thread_release()
174{
175}
176
177static inline unsigned long thread_isolated()
178{
179 return 1;
180}
181
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200182#else /* USE_THREAD */
183
184#include <stdio.h>
185#include <stdlib.h>
186#include <string.h>
187#include <pthread.h>
188#include <import/plock.h>
189
Willy Tarreauf5809cd2019-01-26 13:35:03 +0100190#ifndef MAX_THREADS
Willy Tarreau421f02e2018-01-20 18:19:22 +0100191#define MAX_THREADS LONGBITS
Willy Tarreauf5809cd2019-01-26 13:35:03 +0100192#endif
193
194#define MAX_THREADS_MASK (~0UL >> (LONGBITS - MAX_THREADS))
Willy Tarreau421f02e2018-01-20 18:19:22 +0100195
Christopher Faulet9dcf9b62017-11-13 10:34:01 +0100196#define __decl_hathreads(decl) decl
197
Willy Tarreau90fa97b2018-11-25 19:46:08 +0100198/* declare a self-initializing spinlock */
199#define __decl_spinlock(lock) \
200 HA_SPINLOCK_T (lock); \
201 INITCALL1(STG_LOCK, ha_spin_init, &(lock))
202
203/* declare a self-initializing spinlock, aligned on a cache line */
204#define __decl_aligned_spinlock(lock) \
205 HA_SPINLOCK_T (lock) __attribute__((aligned(64))); \
206 INITCALL1(STG_LOCK, ha_spin_init, &(lock))
207
208/* declare a self-initializing rwlock */
209#define __decl_rwlock(lock) \
210 HA_RWLOCK_T (lock); \
211 INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
212
213/* declare a self-initializing rwlock, aligned on a cache line */
214#define __decl_aligned_rwlock(lock) \
215 HA_RWLOCK_T (lock) __attribute__((aligned(64))); \
216 INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
217
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200218/* TODO: thread: For now, we rely on GCC builtins but it could be a good idea to
219 * have a header file regrouping all functions dealing with threads. */
Willy Tarreau1a69af62018-01-04 18:49:31 +0100220
David Carlierec5e8452018-01-11 14:20:43 +0000221#if defined(__GNUC__) && (__GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ < 7) && !defined(__clang__)
Willy Tarreau1a69af62018-01-04 18:49:31 +0100222/* gcc < 4.7 */
223
224#define HA_ATOMIC_ADD(val, i) __sync_add_and_fetch(val, i)
225#define HA_ATOMIC_SUB(val, i) __sync_sub_and_fetch(val, i)
Willy Tarreau9378df82018-09-05 16:11:03 +0200226#define HA_ATOMIC_XADD(val, i) __sync_fetch_and_add(val, i)
Willy Tarreau1a69af62018-01-04 18:49:31 +0100227#define HA_ATOMIC_AND(val, flags) __sync_and_and_fetch(val, flags)
228#define HA_ATOMIC_OR(val, flags) __sync_or_and_fetch(val, flags)
229
230/* the CAS is a bit complicated. The older API doesn't support returning the
231 * value and the swap's result at the same time. So here we take what looks
232 * like the safest route, consisting in using the boolean version guaranteeing
233 * that the operation was performed or not, and we snoop a previous value. If
234 * the compare succeeds, we return. If it fails, we return the previous value,
235 * but only if it differs from the expected one. If it's the same it's a race
236 * thus we try again to avoid confusing a possibly sensitive caller.
237 */
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200238#define HA_ATOMIC_CAS(val, old, new) \
239 ({ \
240 typeof((val)) __val_cas = (val); \
241 typeof((old)) __oldp_cas = (old); \
242 typeof(*(old)) __oldv_cas; \
243 typeof((new)) __new_cas = (new); \
244 int __ret_cas; \
245 do { \
246 __oldv_cas = *__val_cas; \
247 __ret_cas = __sync_bool_compare_and_swap(__val_cas, *__oldp_cas, __new_cas); \
248 } while (!__ret_cas && *__oldp_cas == __oldv_cas); \
249 if (!__ret_cas) \
250 *__oldp_cas = __oldv_cas; \
251 __ret_cas; \
Willy Tarreau1a69af62018-01-04 18:49:31 +0100252 })
253
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200254#define HA_ATOMIC_XCHG(val, new) \
255 ({ \
256 typeof((val)) __val_xchg = (val); \
257 typeof(*(val)) __old_xchg; \
258 typeof((new)) __new_xchg = (new); \
259 do { __old_xchg = *__val_xchg; \
260 } while (!__sync_bool_compare_and_swap(__val_xchg, __old_xchg, __new_xchg)); \
261 __old_xchg; \
Willy Tarreau1a69af62018-01-04 18:49:31 +0100262 })
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100263
264#define HA_ATOMIC_BTS(val, bit) \
265 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200266 typeof(*(val)) __b_bts = (1UL << (bit)); \
267 __sync_fetch_and_or((val), __b_bts) & __b_bts; \
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100268 })
269
270#define HA_ATOMIC_BTR(val, bit) \
271 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200272 typeof(*(val)) __b_btr = (1UL << (bit)); \
273 __sync_fetch_and_and((val), ~__b_btr) & __b_btr; \
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100274 })
275
Olivier Houchard9ce62b52019-04-30 13:38:02 +0200276#define HA_ATOMIC_LOAD(val) \
277 ({ \
278 typeof(*(val)) ret; \
279 __sync_synchronize(); \
280 ret = *(volatile typeof(val))val; \
281 __sync_synchronize(); \
282 ret; \
283 })
284
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200285#define HA_ATOMIC_STORE(val, new) \
286 ({ \
287 typeof((val)) __val_store = (val); \
288 typeof(*(val)) __old_store; \
289 typeof((new)) __new_store = (new); \
290 do { __old_store = *__val_store; \
291 } while (!__sync_bool_compare_and_swap(__val_store, __old_store, __new_store)); \
Willy Tarreau1a69af62018-01-04 18:49:31 +0100292 })
293#else
294/* gcc >= 4.7 */
Olivier Houchard11353792019-03-07 18:48:22 +0100295#define HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
296#define HA_ATOMIC_ADD(val, i) __atomic_add_fetch(val, i, __ATOMIC_SEQ_CST)
297#define HA_ATOMIC_XADD(val, i) __atomic_fetch_add(val, i, __ATOMIC_SEQ_CST)
298#define HA_ATOMIC_SUB(val, i) __atomic_sub_fetch(val, i, __ATOMIC_SEQ_CST)
299#define HA_ATOMIC_AND(val, flags) __atomic_and_fetch(val, flags, __ATOMIC_SEQ_CST)
300#define HA_ATOMIC_OR(val, flags) __atomic_or_fetch(val, flags, __ATOMIC_SEQ_CST)
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100301#define HA_ATOMIC_BTS(val, bit) \
302 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200303 typeof(*(val)) __b_bts = (1UL << (bit)); \
304 __sync_fetch_and_or((val), __b_bts) & __b_bts; \
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100305 })
306
307#define HA_ATOMIC_BTR(val, bit) \
308 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200309 typeof(*(val)) __b_btr = (1UL << (bit)); \
310 __sync_fetch_and_and((val), ~__b_btr) & __b_btr; \
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100311 })
312
Olivier Houchard11353792019-03-07 18:48:22 +0100313#define HA_ATOMIC_XCHG(val, new) __atomic_exchange_n(val, new, __ATOMIC_SEQ_CST)
314#define HA_ATOMIC_STORE(val, new) __atomic_store_n(val, new, __ATOMIC_SEQ_CST)
Olivier Houchard9ce62b52019-04-30 13:38:02 +0200315#define HA_ATOMIC_LOAD(val) __atomic_load_n(val, __ATOMIC_SEQ_CST)
Olivier Houchard3212a2c2019-04-15 21:14:25 +0200316
317/* Variants that don't generate any memory barrier.
318 * If you're unsure how to deal with barriers, just use the HA_ATOMIC_* version,
319 * that will always generate correct code.
320 * Usually it's fine to use those when updating data that have no dependency,
321 * ie updating a counter. Otherwise a barrier is required.
322 */
323#define _HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
324#define _HA_ATOMIC_ADD(val, i) __atomic_add_fetch(val, i, __ATOMIC_RELAXED)
325#define _HA_ATOMIC_XADD(val, i) __atomic_fetch_add(val, i, __ATOMIC_RELAXED)
326#define _HA_ATOMIC_SUB(val, i) __atomic_sub_fetch(val, i, __ATOMIC_RELAXED)
327#define _HA_ATOMIC_AND(val, flags) __atomic_and_fetch(val, flags, __ATOMIC_RELAXED)
328#define _HA_ATOMIC_OR(val, flags) __atomic_or_fetch(val, flags, __ATOMIC_RELAXED)
329#define _HA_ATOMIC_XCHG(val, new) __atomic_exchange_n(val, new, __ATOMIC_RELAXED)
330#define _HA_ATOMIC_STORE(val, new) __atomic_store_n(val, new, __ATOMIC_RELAXED)
Olivier Houchard9ce62b52019-04-30 13:38:02 +0200331#define _HA_ATOMIC_LOAD(val) __atomic_load_n(val, __ATOMIC_RELAXED)
Olivier Houchard3212a2c2019-04-15 21:14:25 +0200332
333#endif /* gcc >= 4.7 */
Willy Tarreau1a69af62018-01-04 18:49:31 +0100334
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200335#define HA_ATOMIC_UPDATE_MAX(val, new) \
336 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200337 typeof(*(val)) __old_max = *(val); \
338 typeof(*(val)) __new_max = (new); \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200339 \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200340 while (__old_max < __new_max && \
341 !HA_ATOMIC_CAS(val, &__old_max, __new_max)); \
342 *(val); \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200343 })
344#define HA_ATOMIC_UPDATE_MIN(val, new) \
345 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200346 typeof(*(val)) __old_min = *(val); \
347 typeof(*(val)) __new_min = (new); \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200348 \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200349 while (__old_min > __new_min && \
350 !HA_ATOMIC_CAS(val, &__old_min, __new_min)); \
351 *(val); \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200352 })
353
Willy Tarreaub29dc952017-10-31 18:00:20 +0100354#define HA_BARRIER() pl_barrier()
355
Willy Tarreau60b639c2018-08-02 10:16:17 +0200356void thread_harmless_till_end();
357void thread_isolate();
358void thread_release();
Christopher Faulet339fff82017-10-19 11:59:15 +0200359
Willy Tarreau0c026f42018-08-01 19:12:20 +0200360extern THREAD_LOCAL unsigned int tid; /* The thread id */
361extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
Christopher Fauletddb6c162018-07-20 09:31:53 +0200362extern volatile unsigned long all_threads_mask;
Willy Tarreau60b639c2018-08-02 10:16:17 +0200363extern volatile unsigned long threads_want_rdv_mask;
364extern volatile unsigned long threads_harmless_mask;
365
366/* explanation for threads_want_rdv_mask and threads_harmless_mask :
367 * - threads_want_rdv_mask is a bit field indicating all threads that have
368 * requested a rendez-vous of other threads using thread_isolate().
369 * - threads_harmless_mask is a bit field indicating all threads that are
370 * currently harmless in that they promise not to access a shared resource.
371 *
372 * For a given thread, its bits in want_rdv and harmless can be translated like
373 * this :
374 *
375 * ----------+----------+----------------------------------------------------
376 * want_rdv | harmless | description
377 * ----------+----------+----------------------------------------------------
378 * 0 | 0 | thread not interested in RDV, possibly harmful
379 * 0 | 1 | thread not interested in RDV but harmless
380 * 1 | 1 | thread interested in RDV and waiting for its turn
381 * 1 | 0 | thread currently working isolated from others
382 * ----------+----------+----------------------------------------------------
383 */
Olivier Houchard6b96f722018-04-25 16:58:25 +0200384
William Lallemand6e1796e2018-06-07 11:23:40 +0200385#define ha_sigmask(how, set, oldset) pthread_sigmask(how, set, oldset)
386
Willy Tarreau0c026f42018-08-01 19:12:20 +0200387/* sets the thread ID and the TID bit for the current thread */
388static inline void ha_set_tid(unsigned int data)
389{
390 tid = data;
391 tid_bit = (1UL << tid);
392}
393
Willy Tarreau60b639c2018-08-02 10:16:17 +0200394/* Marks the thread as harmless. Note: this must be true, i.e. the thread must
395 * not be touching any unprotected shared resource during this period. Usually
396 * this is called before poll(), but it may also be placed around very slow
397 * calls (eg: some crypto operations). Needs to be terminated using
398 * thread_harmless_end().
399 */
400static inline void thread_harmless_now()
401{
402 HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
403}
404
405/* Ends the harmless period started by thread_harmless_now(). Usually this is
406 * placed after the poll() call. If it is discovered that a job was running and
407 * is relying on the thread still being harmless, the thread waits for the
408 * other one to finish.
409 */
410static inline void thread_harmless_end()
411{
412 while (1) {
413 HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
414 if (likely((threads_want_rdv_mask & all_threads_mask) == 0))
415 break;
416 thread_harmless_till_end();
417 }
418}
419
420/* an isolated thread has harmless cleared and want_rdv set */
421static inline unsigned long thread_isolated()
422{
423 return threads_want_rdv_mask & ~threads_harmless_mask & tid_bit;
424}
425
William Lallemand6e1796e2018-06-07 11:23:40 +0200426
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200427#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
428
Christopher Fauletf51bac22018-01-30 11:04:29 +0100429/* WARNING!!! if you update this enum, please also keep lock_label() up to date below */
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200430enum lock_label {
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200431 FD_LOCK,
Emeric Brunc60def82017-09-27 14:59:38 +0200432 TASK_RQ_LOCK,
433 TASK_WQ_LOCK,
Christopher Fauletb349e482017-08-29 09:52:38 +0200434 POOL_LOCK,
Christopher Faulet8d8aa0d2017-05-30 15:36:50 +0200435 LISTENER_LOCK,
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200436 PROXY_LOCK,
Christopher Faulet29f77e82017-06-08 14:04:45 +0200437 SERVER_LOCK,
Christopher Faulet5b517552017-06-09 14:17:53 +0200438 LBPRM_LOCK,
Christopher Fauletb79a94c2017-05-30 15:34:30 +0200439 SIGNALS_LOCK,
Emeric Brun819fc6f2017-06-13 19:37:32 +0200440 STK_TABLE_LOCK,
441 STK_SESS_LOCK,
Emeric Brun1138fd02017-06-19 12:38:55 +0200442 APPLETS_LOCK,
Emeric Brun80527f52017-06-19 17:46:37 +0200443 PEER_LOCK,
Emeric Bruna1dd2432017-06-21 15:42:52 +0200444 BUF_WQ_LOCK,
Emeric Brun6b35e9b2017-06-30 16:23:45 +0200445 STRMS_LOCK,
Emeric Brun821bb9b2017-06-15 16:37:39 +0200446 SSL_LOCK,
447 SSL_GEN_CERTS_LOCK,
Emeric Brunb5997f72017-07-03 11:34:05 +0200448 PATREF_LOCK,
449 PATEXP_LOCK,
450 PATLRU_LOCK,
Christopher Faulete95f2c32017-07-24 16:30:34 +0200451 VARS_LOCK,
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +0200452 COMP_POOL_LOCK,
Thierry FOURNIER61ba0e22017-07-12 11:41:21 +0200453 LUA_LOCK,
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200454 NOTIF_LOCK,
Christopher Faulet24289f22017-09-25 14:48:02 +0200455 SPOE_APPLET_LOCK,
Christopher Fauletb2812a62017-10-04 16:17:58 +0200456 DNS_LOCK,
Christopher Fauletcfda8472017-10-20 15:40:23 +0200457 PID_LIST_LOCK,
Christopher Fauletc2a89a62017-10-23 15:54:24 +0200458 EMAIL_ALERTS_LOCK,
Emeric Brund8b3b652017-11-07 11:19:48 +0100459 PIPES_LOCK,
Willy Tarreau1605c7a2018-01-23 19:01:49 +0100460 START_LOCK,
Christopher Faulet16f45c82018-02-16 11:23:49 +0100461 TLSKEYS_REF_LOCK,
Willy Tarreau34d4b522018-10-29 18:02:54 +0100462 AUTH_LOCK,
Frédéric Lécailled803e472019-04-25 07:42:09 +0200463 LOGSRV_LOCK,
Ben51Degrees4ddf59d2019-02-05 13:24:00 +0000464 OTHER_LOCK,
Christopher Faulet339fff82017-10-19 11:59:15 +0200465 LOCK_LABELS
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200466};
467struct lock_stat {
468 uint64_t nsec_wait_for_write;
469 uint64_t nsec_wait_for_read;
470 uint64_t num_write_locked;
471 uint64_t num_write_unlocked;
472 uint64_t num_read_locked;
473 uint64_t num_read_unlocked;
474};
475
476extern struct lock_stat lock_stats[LOCK_LABELS];
477
478#define __HA_SPINLOCK_T unsigned long
479
480#define __SPIN_INIT(l) ({ (*l) = 0; })
481#define __SPIN_DESTROY(l) ({ (*l) = 0; })
Willy Tarreau88ac59b2017-11-06 01:03:26 +0100482#define __SPIN_LOCK(l) pl_take_s(l)
483#define __SPIN_TRYLOCK(l) !pl_try_s(l)
484#define __SPIN_UNLOCK(l) pl_drop_s(l)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200485
486#define __HA_RWLOCK_T unsigned long
487
488#define __RWLOCK_INIT(l) ({ (*l) = 0; })
489#define __RWLOCK_DESTROY(l) ({ (*l) = 0; })
490#define __RWLOCK_WRLOCK(l) pl_take_w(l)
491#define __RWLOCK_TRYWRLOCK(l) !pl_try_w(l)
492#define __RWLOCK_WRUNLOCK(l) pl_drop_w(l)
493#define __RWLOCK_RDLOCK(l) pl_take_r(l)
494#define __RWLOCK_TRYRDLOCK(l) !pl_try_r(l)
495#define __RWLOCK_RDUNLOCK(l) pl_drop_r(l)
496
497#define HA_SPINLOCK_T struct ha_spinlock
498
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100499#define HA_SPIN_INIT(l) __spin_init(l)
500#define HA_SPIN_DESTROY(l) __spin_destroy(l)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200501
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100502#define HA_SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
503#define HA_SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
504#define HA_SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200505
506#define HA_RWLOCK_T struct ha_rwlock
507
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100508#define HA_RWLOCK_INIT(l) __ha_rwlock_init((l))
509#define HA_RWLOCK_DESTROY(l) __ha_rwlock_destroy((l))
510#define HA_RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
511#define HA_RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
512#define HA_RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
513#define HA_RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l)
514#define HA_RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
515#define HA_RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200516
517struct ha_spinlock {
518 __HA_SPINLOCK_T lock;
519 struct {
520 unsigned long owner; /* a bit is set to 1 << tid for the lock owner */
521 unsigned long waiters; /* a bit is set to 1 << tid for waiting threads */
522 struct {
523 const char *function;
524 const char *file;
525 int line;
526 } last_location; /* location of the last owner */
527 } info;
528};
529
530struct ha_rwlock {
531 __HA_RWLOCK_T lock;
532 struct {
533 unsigned long cur_writer; /* a bit is set to 1 << tid for the lock owner */
534 unsigned long wait_writers; /* a bit is set to 1 << tid for waiting writers */
535 unsigned long cur_readers; /* a bit is set to 1 << tid for current readers */
536 unsigned long wait_readers; /* a bit is set to 1 << tid for waiting waiters */
537 struct {
538 const char *function;
539 const char *file;
540 int line;
541 } last_location; /* location of the last write owner */
542 } info;
543};
544
Christopher Fauletf51bac22018-01-30 11:04:29 +0100545static inline const char *lock_label(enum lock_label label)
546{
547 switch (label) {
Christopher Fauletf51bac22018-01-30 11:04:29 +0100548 case FD_LOCK: return "FD";
549 case TASK_RQ_LOCK: return "TASK_RQ";
550 case TASK_WQ_LOCK: return "TASK_WQ";
551 case POOL_LOCK: return "POOL";
552 case LISTENER_LOCK: return "LISTENER";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100553 case PROXY_LOCK: return "PROXY";
554 case SERVER_LOCK: return "SERVER";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100555 case LBPRM_LOCK: return "LBPRM";
556 case SIGNALS_LOCK: return "SIGNALS";
557 case STK_TABLE_LOCK: return "STK_TABLE";
558 case STK_SESS_LOCK: return "STK_SESS";
559 case APPLETS_LOCK: return "APPLETS";
560 case PEER_LOCK: return "PEER";
561 case BUF_WQ_LOCK: return "BUF_WQ";
562 case STRMS_LOCK: return "STRMS";
563 case SSL_LOCK: return "SSL";
564 case SSL_GEN_CERTS_LOCK: return "SSL_GEN_CERTS";
565 case PATREF_LOCK: return "PATREF";
566 case PATEXP_LOCK: return "PATEXP";
567 case PATLRU_LOCK: return "PATLRU";
568 case VARS_LOCK: return "VARS";
569 case COMP_POOL_LOCK: return "COMP_POOL";
570 case LUA_LOCK: return "LUA";
571 case NOTIF_LOCK: return "NOTIF";
572 case SPOE_APPLET_LOCK: return "SPOE_APPLET";
573 case DNS_LOCK: return "DNS";
574 case PID_LIST_LOCK: return "PID_LIST";
575 case EMAIL_ALERTS_LOCK: return "EMAIL_ALERTS";
576 case PIPES_LOCK: return "PIPES";
577 case START_LOCK: return "START";
Christopher Faulet16f45c82018-02-16 11:23:49 +0100578 case TLSKEYS_REF_LOCK: return "TLSKEYS_REF";
Willy Tarreau34d4b522018-10-29 18:02:54 +0100579 case AUTH_LOCK: return "AUTH";
Frédéric Lécailled803e472019-04-25 07:42:09 +0200580 case LOGSRV_LOCK: return "LOGSRV";
Ben51Degrees4ddf59d2019-02-05 13:24:00 +0000581 case OTHER_LOCK: return "OTHER";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100582 case LOCK_LABELS: break; /* keep compiler happy */
583 };
584 /* only way to come here is consecutive to an internal bug */
585 abort();
586}
587
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200588static inline void show_lock_stats()
589{
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200590 int lbl;
591
592 for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
593 fprintf(stderr,
594 "Stats about Lock %s: \n"
595 "\t # write lock : %lu\n"
596 "\t # write unlock: %lu (%ld)\n"
597 "\t # wait time for write : %.3f msec\n"
598 "\t # wait time for write/lock: %.3f nsec\n"
599 "\t # read lock : %lu\n"
600 "\t # read unlock : %lu (%ld)\n"
601 "\t # wait time for read : %.3f msec\n"
602 "\t # wait time for read/lock : %.3f nsec\n",
Christopher Fauletf51bac22018-01-30 11:04:29 +0100603 lock_label(lbl),
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200604 lock_stats[lbl].num_write_locked,
605 lock_stats[lbl].num_write_unlocked,
606 lock_stats[lbl].num_write_unlocked - lock_stats[lbl].num_write_locked,
607 (double)lock_stats[lbl].nsec_wait_for_write / 1000000.0,
608 lock_stats[lbl].num_write_locked ? ((double)lock_stats[lbl].nsec_wait_for_write / (double)lock_stats[lbl].num_write_locked) : 0,
609 lock_stats[lbl].num_read_locked,
610 lock_stats[lbl].num_read_unlocked,
611 lock_stats[lbl].num_read_unlocked - lock_stats[lbl].num_read_locked,
612 (double)lock_stats[lbl].nsec_wait_for_read / 1000000.0,
613 lock_stats[lbl].num_read_locked ? ((double)lock_stats[lbl].nsec_wait_for_read / (double)lock_stats[lbl].num_read_locked) : 0);
614 }
615}
616
617/* Following functions are used to collect some stats about locks. We wrap
618 * pthread functions to known how much time we wait in a lock. */
619
620static uint64_t nsec_now(void) {
621 struct timespec ts;
622
623 clock_gettime(CLOCK_MONOTONIC, &ts);
624 return ((uint64_t) ts.tv_sec * 1000000000ULL +
625 (uint64_t) ts.tv_nsec);
626}
627
628static inline void __ha_rwlock_init(struct ha_rwlock *l)
629{
630 memset(l, 0, sizeof(struct ha_rwlock));
631 __RWLOCK_INIT(&l->lock);
632}
633
634static inline void __ha_rwlock_destroy(struct ha_rwlock *l)
635{
636 __RWLOCK_DESTROY(&l->lock);
637 memset(l, 0, sizeof(struct ha_rwlock));
638}
639
640
641static inline void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
642 const char *func, const char *file, int line)
643{
644 uint64_t start_time;
645
646 if (unlikely(l->info.cur_writer & tid_bit)) {
647 /* the thread is already owning the lock for write */
648 abort();
649 }
650
651 if (unlikely(l->info.cur_readers & tid_bit)) {
652 /* the thread is already owning the lock for read */
653 abort();
654 }
655
656 HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
657
658 start_time = nsec_now();
659 __RWLOCK_WRLOCK(&l->lock);
660 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
661
662 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
663
664 l->info.cur_writer = tid_bit;
665 l->info.last_location.function = func;
666 l->info.last_location.file = file;
667 l->info.last_location.line = line;
668
669 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
670}
671
672static inline int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l,
673 const char *func, const char *file, int line)
674{
675 uint64_t start_time;
676 int r;
677
678 if (unlikely(l->info.cur_writer & tid_bit)) {
679 /* the thread is already owning the lock for write */
680 abort();
681 }
682
683 if (unlikely(l->info.cur_readers & tid_bit)) {
684 /* the thread is already owning the lock for read */
685 abort();
686 }
687
688 /* We set waiting writer because trywrlock could wait for readers to quit */
689 HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
690
691 start_time = nsec_now();
692 r = __RWLOCK_TRYWRLOCK(&l->lock);
693 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
694 if (unlikely(r)) {
695 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
696 return r;
697 }
698 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
699
700 l->info.cur_writer = tid_bit;
701 l->info.last_location.function = func;
702 l->info.last_location.file = file;
703 l->info.last_location.line = line;
704
705 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
706
707 return 0;
708}
709
710static inline void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l,
711 const char *func, const char *file, int line)
712{
713 if (unlikely(!(l->info.cur_writer & tid_bit))) {
714 /* the thread is not owning the lock for write */
715 abort();
716 }
717
718 l->info.cur_writer = 0;
719 l->info.last_location.function = func;
720 l->info.last_location.file = file;
721 l->info.last_location.line = line;
722
723 __RWLOCK_WRUNLOCK(&l->lock);
724
725 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1);
726}
727
728static inline void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l)
729{
730 uint64_t start_time;
731
732 if (unlikely(l->info.cur_writer & tid_bit)) {
733 /* the thread is already owning the lock for write */
734 abort();
735 }
736
737 if (unlikely(l->info.cur_readers & tid_bit)) {
738 /* the thread is already owning the lock for read */
739 abort();
740 }
741
742 HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
743
744 start_time = nsec_now();
745 __RWLOCK_RDLOCK(&l->lock);
746 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
747 HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
748
749 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
750
751 HA_ATOMIC_AND(&l->info.wait_readers, ~tid_bit);
752}
753
754static inline int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l)
755{
756 int r;
757
758 if (unlikely(l->info.cur_writer & tid_bit)) {
759 /* the thread is already owning the lock for write */
760 abort();
761 }
762
763 if (unlikely(l->info.cur_readers & tid_bit)) {
764 /* the thread is already owning the lock for read */
765 abort();
766 }
767
768 /* try read should never wait */
769 r = __RWLOCK_TRYRDLOCK(&l->lock);
770 if (unlikely(r))
771 return r;
772 HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
773
774 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
775
776 return 0;
777}
778
779static inline void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l)
780{
781 if (unlikely(!(l->info.cur_readers & tid_bit))) {
782 /* the thread is not owning the lock for read */
783 abort();
784 }
785
786 HA_ATOMIC_AND(&l->info.cur_readers, ~tid_bit);
787
788 __RWLOCK_RDUNLOCK(&l->lock);
789
790 HA_ATOMIC_ADD(&lock_stats[lbl].num_read_unlocked, 1);
791}
792
793static inline void __spin_init(struct ha_spinlock *l)
794{
795 memset(l, 0, sizeof(struct ha_spinlock));
796 __SPIN_INIT(&l->lock);
797}
798
799static inline void __spin_destroy(struct ha_spinlock *l)
800{
801 __SPIN_DESTROY(&l->lock);
802 memset(l, 0, sizeof(struct ha_spinlock));
803}
804
805static inline void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
806 const char *func, const char *file, int line)
807{
808 uint64_t start_time;
809
810 if (unlikely(l->info.owner & tid_bit)) {
811 /* the thread is already owning the lock */
812 abort();
813 }
814
815 HA_ATOMIC_OR(&l->info.waiters, tid_bit);
816
817 start_time = nsec_now();
818 __SPIN_LOCK(&l->lock);
819 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
820
821 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
822
823
824 l->info.owner = tid_bit;
825 l->info.last_location.function = func;
826 l->info.last_location.file = file;
827 l->info.last_location.line = line;
828
829 HA_ATOMIC_AND(&l->info.waiters, ~tid_bit);
830}
831
832static inline int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l,
833 const char *func, const char *file, int line)
834{
835 int r;
836
837 if (unlikely(l->info.owner & tid_bit)) {
838 /* the thread is already owning the lock */
839 abort();
840 }
841
842 /* try read should never wait */
843 r = __SPIN_TRYLOCK(&l->lock);
844 if (unlikely(r))
845 return r;
846 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
847
848 l->info.owner = tid_bit;
849 l->info.last_location.function = func;
850 l->info.last_location.file = file;
851 l->info.last_location.line = line;
852
853 return 0;
854}
855
856static inline void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
857 const char *func, const char *file, int line)
858{
859 if (unlikely(!(l->info.owner & tid_bit))) {
860 /* the thread is not owning the lock */
861 abort();
862 }
863
864 l->info.owner = 0;
865 l->info.last_location.function = func;
866 l->info.last_location.file = file;
867 l->info.last_location.line = line;
868
Willy Tarreau7c2a2ad2017-11-02 16:26:02 +0100869 __SPIN_UNLOCK(&l->lock);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200870 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1);
871}
872
873#else /* DEBUG_THREAD */
874
875#define HA_SPINLOCK_T unsigned long
876
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100877#define HA_SPIN_INIT(l) ({ (*l) = 0; })
878#define HA_SPIN_DESTROY(l) ({ (*l) = 0; })
879#define HA_SPIN_LOCK(lbl, l) pl_take_s(l)
880#define HA_SPIN_TRYLOCK(lbl, l) !pl_try_s(l)
881#define HA_SPIN_UNLOCK(lbl, l) pl_drop_s(l)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200882
883#define HA_RWLOCK_T unsigned long
884
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100885#define HA_RWLOCK_INIT(l) ({ (*l) = 0; })
886#define HA_RWLOCK_DESTROY(l) ({ (*l) = 0; })
887#define HA_RWLOCK_WRLOCK(lbl,l) pl_take_w(l)
888#define HA_RWLOCK_TRYWRLOCK(lbl,l) !pl_try_w(l)
889#define HA_RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l)
890#define HA_RWLOCK_RDLOCK(lbl,l) pl_take_r(l)
891#define HA_RWLOCK_TRYRDLOCK(lbl,l) !pl_try_r(l)
892#define HA_RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200893
894#endif /* DEBUG_THREAD */
895
Olivier Houchardf61f0cb2017-12-21 17:13:05 +0100896#ifdef __x86_64__
Willy Tarreau2325d8a2018-10-10 18:29:23 +0200897
Olivier Houchardf61f0cb2017-12-21 17:13:05 +0100898static __inline int
899__ha_cas_dw(void *target, void *compare, const void *set)
900{
901 char ret;
902
903 __asm __volatile("lock cmpxchg16b %0; setz %3"
904 : "+m" (*(void **)target),
905 "=a" (((void **)compare)[0]),
906 "=d" (((void **)compare)[1]),
907 "=q" (ret)
908 : "a" (((void **)compare)[0]),
909 "d" (((void **)compare)[1]),
910 "b" (((const void **)set)[0]),
911 "c" (((const void **)set)[1])
912 : "memory", "cc");
913 return (ret);
914}
915
Olivier Houchard9abcf6e2019-03-07 18:45:00 +0100916/* Use __ha_barrier_atomic* when you're trying to protect data that are
917 * are modified using HA_ATOMIC* (except HA_ATOMIC_STORE)
918 */
919static __inline void
920__ha_barrier_atomic_load(void)
921{
922 __asm __volatile("" ::: "memory");
923}
924
Olivier Houchardf61f0cb2017-12-21 17:13:05 +0100925static __inline void
Olivier Houchard9abcf6e2019-03-07 18:45:00 +0100926__ha_barrier_atomic_store(void)
927{
928 __asm __volatile("" ::: "memory");
929}
930
931static __inline void
932__ha_barrier_atomic_full(void)
933{
934 __asm __volatile("" ::: "memory");
935}
936
937static __inline void
Olivier Houchardf61f0cb2017-12-21 17:13:05 +0100938__ha_barrier_load(void)
939{
940 __asm __volatile("lfence" ::: "memory");
941}
942
943static __inline void
944__ha_barrier_store(void)
945{
946 __asm __volatile("sfence" ::: "memory");
947}
948
949static __inline void
950__ha_barrier_full(void)
951{
952 __asm __volatile("mfence" ::: "memory");
953}
954
955#elif defined(__arm__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__))
Willy Tarreau2325d8a2018-10-10 18:29:23 +0200956
Olivier Houchard9abcf6e2019-03-07 18:45:00 +0100957/* Use __ha_barrier_atomic* when you're trying to protect data that are
958 * are modified using HA_ATOMIC* (except HA_ATOMIC_STORE)
959 */
960static __inline void
961__ha_barrier_atomic_load(void)
962{
963 __asm __volatile("dmb" ::: "memory");
964}
965
966static __inline void
967__ha_barrier_atomic_store(void)
968{
969 __asm __volatile("dsb" ::: "memory");
970}
971
972static __inline void
973__ha_barrier_atomic_full(void)
974{
975 __asm __volatile("dmb" ::: "memory");
976}
977
Olivier Houchardf61f0cb2017-12-21 17:13:05 +0100978static __inline void
979__ha_barrier_load(void)
980{
981 __asm __volatile("dmb" ::: "memory");
982}
983
984static __inline void
985__ha_barrier_store(void)
986{
987 __asm __volatile("dsb" ::: "memory");
988}
989
990static __inline void
991__ha_barrier_full(void)
992{
993 __asm __volatile("dmb" ::: "memory");
994}
995
Willy Tarreau41ccb192018-02-14 14:16:28 +0100996static __inline int __ha_cas_dw(void *target, void *compare, const void *set)
Olivier Houchardf61f0cb2017-12-21 17:13:05 +0100997{
998 uint64_t previous;
999 int tmp;
1000
1001 __asm __volatile("1:"
1002 "ldrexd %0, [%4];"
1003 "cmp %Q0, %Q2;"
1004 "ittt eq;"
1005 "cmpeq %R0, %R2;"
1006 "strexdeq %1, %3, [%4];"
1007 "cmpeq %1, #1;"
1008 "beq 1b;"
1009 : "=&r" (previous), "=&r" (tmp)
Willy Tarreau41ccb192018-02-14 14:16:28 +01001010 : "r" (*(uint64_t *)compare), "r" (*(uint64_t *)set), "r" (target)
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001011 : "memory", "cc");
1012 tmp = (previous == *(uint64_t *)compare);
1013 *(uint64_t *)compare = previous;
1014 return (tmp);
1015}
1016
1017#elif defined (__aarch64__)
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001018
Olivier Houchard9abcf6e2019-03-07 18:45:00 +01001019/* Use __ha_barrier_atomic* when you're trying to protect data that are
1020 * are modified using HA_ATOMIC* (except HA_ATOMIC_STORE)
1021 */
1022static __inline void
1023__ha_barrier_atomic_load(void)
1024{
1025 __asm __volatile("dmb ishld" ::: "memory");
1026}
1027
1028static __inline void
1029__ha_barrier_atomic_store(void)
1030{
1031 __asm __volatile("dmb ishst" ::: "memory");
1032}
1033
1034static __inline void
1035__ha_barrier_atomic_full(void)
1036{
1037 __asm __volatile("dmb ish" ::: "memory");
1038}
1039
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001040static __inline void
1041__ha_barrier_load(void)
1042{
1043 __asm __volatile("dmb ishld" ::: "memory");
1044}
1045
1046static __inline void
1047__ha_barrier_store(void)
1048{
1049 __asm __volatile("dmb ishst" ::: "memory");
1050}
1051
1052static __inline void
1053__ha_barrier_full(void)
1054{
1055 __asm __volatile("dmb ish" ::: "memory");
1056}
1057
1058static __inline int __ha_cas_dw(void *target, void *compare, void *set)
1059{
1060 void *value[2];
1061 uint64_t tmp1, tmp2;
1062
1063 __asm__ __volatile__("1:"
1064 "ldxp %0, %1, [%4];"
1065 "mov %2, %0;"
1066 "mov %3, %1;"
1067 "eor %0, %0, %5;"
1068 "eor %1, %1, %6;"
1069 "orr %1, %0, %1;"
1070 "mov %w0, #0;"
1071 "cbnz %1, 2f;"
1072 "stxp %w0, %7, %8, [%4];"
1073 "cbnz %w0, 1b;"
1074 "mov %w0, #1;"
1075 "2:"
1076 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (value[0]), "=&r" (value[1])
1077 : "r" (target), "r" (((void **)(compare))[0]), "r" (((void **)(compare))[1]), "r" (((void **)(set))[0]), "r" (((void **)(set))[1])
1078 : "cc", "memory");
1079
1080 memcpy(compare, &value, sizeof(value));
1081 return (tmp1);
1082}
1083
1084#else
Olivier Houchard9abcf6e2019-03-07 18:45:00 +01001085#define __ha_barrier_atomic_load __sync_synchronize
1086#define __ha_barrier_atomic_store __sync_synchronize
1087#define __ha_barrier_atomic_full __sync_synchronize
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001088#define __ha_barrier_load __sync_synchronize
1089#define __ha_barrier_store __sync_synchronize
1090#define __ha_barrier_full __sync_synchronize
1091#endif
1092
Willy Tarreaua8ae77d2018-11-25 19:28:23 +01001093void ha_spin_init(HA_SPINLOCK_T *l);
1094void ha_rwlock_init(HA_RWLOCK_T *l);
1095
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001096#endif /* USE_THREAD */
1097
Willy Tarreau149ab772019-01-26 14:27:06 +01001098extern int thread_cpus_enabled_at_boot;
1099
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001100static inline void __ha_compiler_barrier(void)
1101{
1102 __asm __volatile("" ::: "memory");
1103}
1104
Willy Tarreau0ccd3222018-07-30 10:34:35 +02001105int parse_nbthread(const char *arg, char **err);
Willy Tarreau149ab772019-01-26 14:27:06 +01001106int thread_get_default_count();
Willy Tarreau4037a3f2018-03-28 18:06:47 +02001107
Olivier Houchardd0c3b882019-03-07 18:55:31 +01001108#ifndef _HA_ATOMIC_CAS
1109#define _HA_ATOMIC_CAS HA_ATOMIC_CAS
1110#endif /* !_HA_ATOMIC_CAS */
1111
1112#ifndef _HA_ATOMIC_ADD
1113#define _HA_ATOMIC_ADD HA_ATOMIC_ADD
1114#endif /* !_HA_ATOMIC_ADD */
1115
1116#ifndef _HA_ATOMIC_XADD
1117#define _HA_ATOMIC_XADD HA_ATOMIC_XADD
1118#endif /* !_HA_ATOMIC_SUB */
1119
1120#ifndef _HA_ATOMIC_SUB
1121#define _HA_ATOMIC_SUB HA_ATOMIC_SUB
1122#endif /* !_HA_ATOMIC_SUB */
1123
1124#ifndef _HA_ATOMIC_AND
1125#define _HA_ATOMIC_AND HA_ATOMIC_AND
1126#endif /* !_HA_ATOMIC_AND */
1127
1128#ifndef _HA_ATOMIC_OR
1129#define _HA_ATOMIC_OR HA_ATOMIC_OR
1130#endif /* !_HA_ATOMIC_OR */
1131
1132#ifndef _HA_ATOMIC_XCHG
1133#define _HA_ATOMIC_XCHG HA_ATOMIC_XCHG
1134#endif /* !_HA_ATOMIC_XCHG */
1135
1136#ifndef _HA_ATOMIC_STORE
1137#define _HA_ATOMIC_STORE HA_ATOMIC_STORE
1138#endif /* !_HA_ATOMIC_STORE */
Olivier Houchard9ce62b52019-04-30 13:38:02 +02001139
1140#ifndef _HA_ATOMIC_LOAD
1141#define _HA_ATOMIC_LOAD HA_ATOMIC_LOAD
1142#endif /* !_HA_ATOMIC_LOAD */
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001143#endif /* _COMMON_HATHREADS_H */