blob: cc54574fad77f9f68619d27787b903af3275fbb6 [file] [log] [blame]
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001/*
2 * include/common/hathreads.h
3 * definitions, macros and inline functions about threads.
4 *
5 * Copyright (C) 2017 Christopher Fauet - cfaulet@haproxy.com
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#ifndef _COMMON_HATHREADS_H
23#define _COMMON_HATHREADS_H
24
Willy Tarreau2beaaf72019-05-22 08:43:34 +020025#include <signal.h>
Willy Tarreau38171da2019-05-17 16:33:13 +020026#include <unistd.h>
27#ifdef _POSIX_PRIORITY_SCHEDULING
28#include <sched.h>
29#endif
30
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020031#include <common/config.h>
Willy Tarreau90fa97b2018-11-25 19:46:08 +010032#include <common/initcall.h>
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020033
Willy Tarreau0ccd3222018-07-30 10:34:35 +020034/* Note about all_threads_mask :
Willy Tarreauda9e9392019-02-02 17:03:41 +010035 * - this variable is comprised between 1 and LONGBITS.
36 * - with threads support disabled, this symbol is defined as constant 1UL.
37 * - with threads enabled, it contains the mask of enabled threads. Thus if
38 * only one thread is enabled, it equals 1.
Willy Tarreau0ccd3222018-07-30 10:34:35 +020039 */
40
David Carliera92c5ce2019-09-13 05:03:12 +010041/* thread info flags, for ha_thread_info[].flags */
Willy Tarreaue6a02fa2019-05-22 07:06:44 +020042#define TI_FL_STUCK 0x00000001
43
44
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020045#ifndef USE_THREAD
46
Willy Tarreau421f02e2018-01-20 18:19:22 +010047#define MAX_THREADS 1
Willy Tarreau0c026f42018-08-01 19:12:20 +020048#define MAX_THREADS_MASK 1
49
50/* Only way found to replace variables with constants that are optimized away
51 * at build time.
52 */
53enum { all_threads_mask = 1UL };
Willy Tarreau441259c2019-05-22 07:48:18 +020054enum { threads_harmless_mask = 0 };
55enum { threads_want_rdv_mask = 0 };
Willy Tarreau9a1f5732019-06-09 12:20:02 +020056enum { threads_sync_mask = 0 };
Willy Tarreau0c026f42018-08-01 19:12:20 +020057enum { tid_bit = 1UL };
58enum { tid = 0 };
Willy Tarreau421f02e2018-01-20 18:19:22 +010059
Willy Tarreau5a6e2242019-05-20 18:56:48 +020060extern struct thread_info {
Willy Tarreau624dcbf2019-05-20 20:23:06 +020061 clockid_t clock_id;
Willy Tarreau430f5902019-05-21 20:01:26 +020062 timer_t wd_timer; /* valid timer or TIMER_INVALID if not set */
Willy Tarreau81036f22019-05-20 19:24:50 +020063 uint64_t prev_cpu_time; /* previous per thread CPU time */
64 uint64_t prev_mono_time; /* previous system wide monotonic time */
65 unsigned int idle_pct; /* idle to total ratio over last sample (percent) */
Willy Tarreaue6a02fa2019-05-22 07:06:44 +020066 unsigned int flags; /* thread info flags, TI_FL_* */
Willy Tarreau5a6e2242019-05-20 18:56:48 +020067 /* pad to cache line (64B) */
68 char __pad[0]; /* unused except to check remaining room */
69 char __end[0] __attribute__((aligned(64)));
David Carliera92c5ce2019-09-13 05:03:12 +010070} ha_thread_info[MAX_THREADS];
Willy Tarreau5a6e2242019-05-20 18:56:48 +020071
Willy Tarreau8323a372019-05-20 18:57:53 +020072extern THREAD_LOCAL struct thread_info *ti; /* thread_info for the current thread */
73
Christopher Faulet9dcf9b62017-11-13 10:34:01 +010074#define __decl_hathreads(decl)
Willy Tarreau90fa97b2018-11-25 19:46:08 +010075#define __decl_spinlock(lock)
76#define __decl_aligned_spinlock(lock)
77#define __decl_rwlock(lock)
78#define __decl_aligned_rwlock(lock)
Christopher Faulet9dcf9b62017-11-13 10:34:01 +010079
Willy Tarreaud66345d2020-05-05 15:58:00 +020080#define HA_ATOMIC_CAS(val, old, new) \
81 ({ \
82 typeof(val) _v = (val); \
83 typeof(old) _o = (old); \
84 (*_v == *_o) ? ((*_v = (new)), 1) : ((*_o = *_v), 0); \
85 })
Willy Tarreauc3b59582019-05-27 17:37:20 +020086
87/* warning, n is a pointer to the double value for dwcas */
88#define HA_ATOMIC_DWCAS(val, o, n) \
89 ({ \
90 long *_v = (long*)(val); \
91 long *_o = (long*)(o); \
92 long *_n = (long*)(n); \
93 long _v0 = _v[0], _v1 = _v[1]; \
94 (_v0 == _o[0] && _v1 == _o[1]) ? \
95 (_v[0] = _n[0], _v[1] = _n[1], 1) : \
96 (_o[0] = _v0, _o[1] = _v1, 0); \
97 })
98
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020099#define HA_ATOMIC_ADD(val, i) ({*(val) += (i);})
100#define HA_ATOMIC_SUB(val, i) ({*(val) -= (i);})
Willy Tarreau9378df82018-09-05 16:11:03 +0200101#define HA_ATOMIC_XADD(val, i) \
102 ({ \
103 typeof((val)) __p_xadd = (val); \
104 typeof(*(val)) __old_xadd = *__p_xadd; \
105 *__p_xadd += i; \
106 __old_xadd; \
107 })
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200108#define HA_ATOMIC_AND(val, flags) ({*(val) &= (flags);})
109#define HA_ATOMIC_OR(val, flags) ({*(val) |= (flags);})
110#define HA_ATOMIC_XCHG(val, new) \
111 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200112 typeof(*(val)) __old_xchg = *(val); \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200113 *(val) = new; \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200114 __old_xchg; \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200115 })
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100116#define HA_ATOMIC_BTS(val, bit) \
117 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200118 typeof((val)) __p_bts = (val); \
119 typeof(*__p_bts) __b_bts = (1UL << (bit)); \
120 typeof(*__p_bts) __t_bts = *__p_bts & __b_bts; \
121 if (!__t_bts) \
122 *__p_bts |= __b_bts; \
123 __t_bts; \
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100124 })
125#define HA_ATOMIC_BTR(val, bit) \
126 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200127 typeof((val)) __p_btr = (val); \
128 typeof(*__p_btr) __b_btr = (1UL << (bit)); \
129 typeof(*__p_btr) __t_btr = *__p_btr & __b_btr; \
130 if (__t_btr) \
131 *__p_btr &= ~__b_btr; \
132 __t_btr; \
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100133 })
Olivier Houchard9ce62b52019-04-30 13:38:02 +0200134#define HA_ATOMIC_LOAD(val) *(val)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200135#define HA_ATOMIC_STORE(val, new) ({*(val) = new;})
136#define HA_ATOMIC_UPDATE_MAX(val, new) \
137 ({ \
Willy Tarreaua4d9ee32020-05-05 16:13:36 +0200138 typeof(val) __val = (val); \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200139 typeof(*(val)) __new_max = (new); \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200140 \
Willy Tarreaua4d9ee32020-05-05 16:13:36 +0200141 if (*__val < __new_max) \
142 *__val = __new_max; \
143 *__val; \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200144 })
145
146#define HA_ATOMIC_UPDATE_MIN(val, new) \
147 ({ \
Willy Tarreaua4d9ee32020-05-05 16:13:36 +0200148 typeof(val) __val = (val); \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200149 typeof(*(val)) __new_min = (new); \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200150 \
Willy Tarreaua4d9ee32020-05-05 16:13:36 +0200151 if (*__val > __new_min) \
152 *__val = __new_min; \
153 *__val; \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200154 })
155
Willy Tarreaub29dc952017-10-31 18:00:20 +0100156#define HA_BARRIER() do { } while (0)
Christopher Faulet339fff82017-10-19 11:59:15 +0200157
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100158#define HA_SPIN_INIT(l) do { /* do nothing */ } while(0)
159#define HA_SPIN_DESTROY(l) do { /* do nothing */ } while(0)
160#define HA_SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0)
161#define HA_SPIN_TRYLOCK(lbl, l) ({ 0; })
162#define HA_SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200163
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100164#define HA_RWLOCK_INIT(l) do { /* do nothing */ } while(0)
165#define HA_RWLOCK_DESTROY(l) do { /* do nothing */ } while(0)
166#define HA_RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0)
167#define HA_RWLOCK_TRYWRLOCK(lbl, l) ({ 0; })
168#define HA_RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
169#define HA_RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0)
170#define HA_RWLOCK_TRYRDLOCK(lbl, l) ({ 0; })
171#define HA_RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200172
William Lallemand6e1796e2018-06-07 11:23:40 +0200173#define ha_sigmask(how, set, oldset) sigprocmask(how, set, oldset)
174
Willy Tarreau0c026f42018-08-01 19:12:20 +0200175static inline void ha_set_tid(unsigned int tid)
176{
David Carliera92c5ce2019-09-13 05:03:12 +0100177 ti = &ha_thread_info[tid];
Willy Tarreau38171da2019-05-17 16:33:13 +0200178}
179
Willy Tarreauf0e5da22020-05-01 12:26:03 +0200180static inline unsigned long long ha_get_pthread_id(unsigned int thr)
Willy Tarreauff64d3b2020-05-01 11:28:49 +0200181{
182 return 0;
183}
184
Willy Tarreau38171da2019-05-17 16:33:13 +0200185static inline void ha_thread_relax(void)
186{
187#if _POSIX_PRIORITY_SCHEDULING
188 sched_yield();
189#endif
Willy Tarreau0c026f42018-08-01 19:12:20 +0200190}
William Lallemand6e1796e2018-06-07 11:23:40 +0200191
Willy Tarreau2beaaf72019-05-22 08:43:34 +0200192/* send signal <sig> to thread <thr> */
193static inline void ha_tkill(unsigned int thr, int sig)
194{
195 raise(sig);
196}
197
198/* send signal <sig> to all threads */
199static inline void ha_tkillall(int sig)
200{
201 raise(sig);
202}
203
Olivier Houchard9abcf6e2019-03-07 18:45:00 +0100204static inline void __ha_barrier_atomic_load(void)
205{
206}
207
208static inline void __ha_barrier_atomic_store(void)
209{
210}
211
212static inline void __ha_barrier_atomic_full(void)
213{
214}
215
Olivier Houchardf61f0cb2017-12-21 17:13:05 +0100216static inline void __ha_barrier_load(void)
217{
218}
219
220static inline void __ha_barrier_store(void)
221{
222}
223
224static inline void __ha_barrier_full(void)
225{
226}
227
Willy Tarreau60b639c2018-08-02 10:16:17 +0200228static inline void thread_harmless_now()
229{
230}
231
232static inline void thread_harmless_end()
233{
234}
235
236static inline void thread_isolate()
237{
238}
239
240static inline void thread_release()
241{
242}
243
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200244static inline void thread_sync_release()
245{
246}
247
Willy Tarreau60b639c2018-08-02 10:16:17 +0200248static inline unsigned long thread_isolated()
249{
250 return 1;
251}
252
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200253#else /* USE_THREAD */
254
255#include <stdio.h>
256#include <stdlib.h>
257#include <string.h>
258#include <pthread.h>
259#include <import/plock.h>
260
Willy Tarreauf5809cd2019-01-26 13:35:03 +0100261#ifndef MAX_THREADS
Willy Tarreau421f02e2018-01-20 18:19:22 +0100262#define MAX_THREADS LONGBITS
Willy Tarreauf5809cd2019-01-26 13:35:03 +0100263#endif
264
265#define MAX_THREADS_MASK (~0UL >> (LONGBITS - MAX_THREADS))
Willy Tarreau421f02e2018-01-20 18:19:22 +0100266
Christopher Faulet9dcf9b62017-11-13 10:34:01 +0100267#define __decl_hathreads(decl) decl
268
Willy Tarreau90fa97b2018-11-25 19:46:08 +0100269/* declare a self-initializing spinlock */
270#define __decl_spinlock(lock) \
271 HA_SPINLOCK_T (lock); \
272 INITCALL1(STG_LOCK, ha_spin_init, &(lock))
273
274/* declare a self-initializing spinlock, aligned on a cache line */
275#define __decl_aligned_spinlock(lock) \
276 HA_SPINLOCK_T (lock) __attribute__((aligned(64))); \
277 INITCALL1(STG_LOCK, ha_spin_init, &(lock))
278
279/* declare a self-initializing rwlock */
280#define __decl_rwlock(lock) \
281 HA_RWLOCK_T (lock); \
282 INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
283
284/* declare a self-initializing rwlock, aligned on a cache line */
285#define __decl_aligned_rwlock(lock) \
286 HA_RWLOCK_T (lock) __attribute__((aligned(64))); \
287 INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
288
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200289/* TODO: thread: For now, we rely on GCC builtins but it could be a good idea to
290 * have a header file regrouping all functions dealing with threads. */
Willy Tarreau1a69af62018-01-04 18:49:31 +0100291
David Carlierec5e8452018-01-11 14:20:43 +0000292#if defined(__GNUC__) && (__GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ < 7) && !defined(__clang__)
Willy Tarreau1a69af62018-01-04 18:49:31 +0100293/* gcc < 4.7 */
294
295#define HA_ATOMIC_ADD(val, i) __sync_add_and_fetch(val, i)
296#define HA_ATOMIC_SUB(val, i) __sync_sub_and_fetch(val, i)
Willy Tarreau9378df82018-09-05 16:11:03 +0200297#define HA_ATOMIC_XADD(val, i) __sync_fetch_and_add(val, i)
Willy Tarreau1a69af62018-01-04 18:49:31 +0100298#define HA_ATOMIC_AND(val, flags) __sync_and_and_fetch(val, flags)
299#define HA_ATOMIC_OR(val, flags) __sync_or_and_fetch(val, flags)
300
301/* the CAS is a bit complicated. The older API doesn't support returning the
302 * value and the swap's result at the same time. So here we take what looks
303 * like the safest route, consisting in using the boolean version guaranteeing
304 * that the operation was performed or not, and we snoop a previous value. If
305 * the compare succeeds, we return. If it fails, we return the previous value,
306 * but only if it differs from the expected one. If it's the same it's a race
307 * thus we try again to avoid confusing a possibly sensitive caller.
308 */
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200309#define HA_ATOMIC_CAS(val, old, new) \
310 ({ \
311 typeof((val)) __val_cas = (val); \
312 typeof((old)) __oldp_cas = (old); \
313 typeof(*(old)) __oldv_cas; \
314 typeof((new)) __new_cas = (new); \
315 int __ret_cas; \
316 do { \
317 __oldv_cas = *__val_cas; \
318 __ret_cas = __sync_bool_compare_and_swap(__val_cas, *__oldp_cas, __new_cas); \
319 } while (!__ret_cas && *__oldp_cas == __oldv_cas); \
320 if (!__ret_cas) \
321 *__oldp_cas = __oldv_cas; \
322 __ret_cas; \
Willy Tarreau1a69af62018-01-04 18:49:31 +0100323 })
324
Willy Tarreauc3b59582019-05-27 17:37:20 +0200325/* warning, n is a pointer to the double value for dwcas */
Willy Tarreau6a38b322019-05-11 18:04:24 +0200326#define HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
327
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200328#define HA_ATOMIC_XCHG(val, new) \
329 ({ \
330 typeof((val)) __val_xchg = (val); \
331 typeof(*(val)) __old_xchg; \
332 typeof((new)) __new_xchg = (new); \
333 do { __old_xchg = *__val_xchg; \
334 } while (!__sync_bool_compare_and_swap(__val_xchg, __old_xchg, __new_xchg)); \
335 __old_xchg; \
Willy Tarreau1a69af62018-01-04 18:49:31 +0100336 })
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100337
338#define HA_ATOMIC_BTS(val, bit) \
339 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200340 typeof(*(val)) __b_bts = (1UL << (bit)); \
341 __sync_fetch_and_or((val), __b_bts) & __b_bts; \
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100342 })
343
344#define HA_ATOMIC_BTR(val, bit) \
345 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200346 typeof(*(val)) __b_btr = (1UL << (bit)); \
347 __sync_fetch_and_and((val), ~__b_btr) & __b_btr; \
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100348 })
349
Olivier Houchard9ce62b52019-04-30 13:38:02 +0200350#define HA_ATOMIC_LOAD(val) \
351 ({ \
352 typeof(*(val)) ret; \
353 __sync_synchronize(); \
354 ret = *(volatile typeof(val))val; \
355 __sync_synchronize(); \
356 ret; \
357 })
358
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200359#define HA_ATOMIC_STORE(val, new) \
360 ({ \
361 typeof((val)) __val_store = (val); \
362 typeof(*(val)) __old_store; \
363 typeof((new)) __new_store = (new); \
364 do { __old_store = *__val_store; \
365 } while (!__sync_bool_compare_and_swap(__val_store, __old_store, __new_store)); \
Willy Tarreau1a69af62018-01-04 18:49:31 +0100366 })
367#else
368/* gcc >= 4.7 */
Olivier Houchard11353792019-03-07 18:48:22 +0100369#define HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
Willy Tarreauc3b59582019-05-27 17:37:20 +0200370/* warning, n is a pointer to the double value for dwcas */
Willy Tarreau6a38b322019-05-11 18:04:24 +0200371#define HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
Olivier Houchard11353792019-03-07 18:48:22 +0100372#define HA_ATOMIC_ADD(val, i) __atomic_add_fetch(val, i, __ATOMIC_SEQ_CST)
373#define HA_ATOMIC_XADD(val, i) __atomic_fetch_add(val, i, __ATOMIC_SEQ_CST)
374#define HA_ATOMIC_SUB(val, i) __atomic_sub_fetch(val, i, __ATOMIC_SEQ_CST)
375#define HA_ATOMIC_AND(val, flags) __atomic_and_fetch(val, flags, __ATOMIC_SEQ_CST)
376#define HA_ATOMIC_OR(val, flags) __atomic_or_fetch(val, flags, __ATOMIC_SEQ_CST)
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100377#define HA_ATOMIC_BTS(val, bit) \
378 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200379 typeof(*(val)) __b_bts = (1UL << (bit)); \
380 __sync_fetch_and_or((val), __b_bts) & __b_bts; \
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100381 })
382
383#define HA_ATOMIC_BTR(val, bit) \
384 ({ \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200385 typeof(*(val)) __b_btr = (1UL << (bit)); \
386 __sync_fetch_and_and((val), ~__b_btr) & __b_btr; \
Willy Tarreau5266b3e2018-01-25 17:43:58 +0100387 })
388
Olivier Houchard11353792019-03-07 18:48:22 +0100389#define HA_ATOMIC_XCHG(val, new) __atomic_exchange_n(val, new, __ATOMIC_SEQ_CST)
390#define HA_ATOMIC_STORE(val, new) __atomic_store_n(val, new, __ATOMIC_SEQ_CST)
Olivier Houchard9ce62b52019-04-30 13:38:02 +0200391#define HA_ATOMIC_LOAD(val) __atomic_load_n(val, __ATOMIC_SEQ_CST)
Olivier Houchard3212a2c2019-04-15 21:14:25 +0200392
393/* Variants that don't generate any memory barrier.
394 * If you're unsure how to deal with barriers, just use the HA_ATOMIC_* version,
395 * that will always generate correct code.
396 * Usually it's fine to use those when updating data that have no dependency,
397 * ie updating a counter. Otherwise a barrier is required.
398 */
399#define _HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
Willy Tarreauc3b59582019-05-27 17:37:20 +0200400/* warning, n is a pointer to the double value for dwcas */
Willy Tarreau6a38b322019-05-11 18:04:24 +0200401#define _HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
Olivier Houchard3212a2c2019-04-15 21:14:25 +0200402#define _HA_ATOMIC_ADD(val, i) __atomic_add_fetch(val, i, __ATOMIC_RELAXED)
403#define _HA_ATOMIC_XADD(val, i) __atomic_fetch_add(val, i, __ATOMIC_RELAXED)
404#define _HA_ATOMIC_SUB(val, i) __atomic_sub_fetch(val, i, __ATOMIC_RELAXED)
405#define _HA_ATOMIC_AND(val, flags) __atomic_and_fetch(val, flags, __ATOMIC_RELAXED)
406#define _HA_ATOMIC_OR(val, flags) __atomic_or_fetch(val, flags, __ATOMIC_RELAXED)
407#define _HA_ATOMIC_XCHG(val, new) __atomic_exchange_n(val, new, __ATOMIC_RELAXED)
408#define _HA_ATOMIC_STORE(val, new) __atomic_store_n(val, new, __ATOMIC_RELAXED)
Olivier Houchard9ce62b52019-04-30 13:38:02 +0200409#define _HA_ATOMIC_LOAD(val) __atomic_load_n(val, __ATOMIC_RELAXED)
Olivier Houchard3212a2c2019-04-15 21:14:25 +0200410
411#endif /* gcc >= 4.7 */
Willy Tarreau1a69af62018-01-04 18:49:31 +0100412
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200413#define HA_ATOMIC_UPDATE_MAX(val, new) \
414 ({ \
Willy Tarreaua4d9ee32020-05-05 16:13:36 +0200415 typeof(val) __val = (val); \
416 typeof(*(val)) __old_max = *__val; \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200417 typeof(*(val)) __new_max = (new); \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200418 \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200419 while (__old_max < __new_max && \
Willy Tarreaua4d9ee32020-05-05 16:13:36 +0200420 !HA_ATOMIC_CAS(__val, &__old_max, __new_max)); \
421 *__val; \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200422 })
423#define HA_ATOMIC_UPDATE_MIN(val, new) \
424 ({ \
Willy Tarreaua4d9ee32020-05-05 16:13:36 +0200425 typeof(val) __val = (val); \
426 typeof(*(val)) __old_min = *__val; \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200427 typeof(*(val)) __new_min = (new); \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200428 \
Christopher Faulet48aa13f2018-04-09 08:45:43 +0200429 while (__old_min > __new_min && \
Willy Tarreaua4d9ee32020-05-05 16:13:36 +0200430 !HA_ATOMIC_CAS(__val, &__old_min, __new_min)); \
431 *__val; \
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200432 })
433
Willy Tarreaub29dc952017-10-31 18:00:20 +0100434#define HA_BARRIER() pl_barrier()
435
Willy Tarreau60b639c2018-08-02 10:16:17 +0200436void thread_harmless_till_end();
437void thread_isolate();
438void thread_release();
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200439void thread_sync_release();
Willy Tarreau2beaaf72019-05-22 08:43:34 +0200440void ha_tkill(unsigned int thr, int sig);
441void ha_tkillall(int sig);
Christopher Faulet339fff82017-10-19 11:59:15 +0200442
Willy Tarreau5a6e2242019-05-20 18:56:48 +0200443extern struct thread_info {
444 pthread_t pthread;
445 clockid_t clock_id;
Willy Tarreau430f5902019-05-21 20:01:26 +0200446 timer_t wd_timer; /* valid timer or TIMER_INVALID if not set */
Willy Tarreau81036f22019-05-20 19:24:50 +0200447 uint64_t prev_cpu_time; /* previous per thread CPU time */
448 uint64_t prev_mono_time; /* previous system wide monotonic time */
449 unsigned int idle_pct; /* idle to total ratio over last sample (percent) */
Willy Tarreaue6a02fa2019-05-22 07:06:44 +0200450 unsigned int flags; /* thread info flags, TI_FL_* */
Willy Tarreau5a6e2242019-05-20 18:56:48 +0200451 /* pad to cache line (64B) */
452 char __pad[0]; /* unused except to check remaining room */
453 char __end[0] __attribute__((aligned(64)));
David Carliera92c5ce2019-09-13 05:03:12 +0100454} ha_thread_info[MAX_THREADS];
Willy Tarreau5a6e2242019-05-20 18:56:48 +0200455
Willy Tarreau0c026f42018-08-01 19:12:20 +0200456extern THREAD_LOCAL unsigned int tid; /* The thread id */
457extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
Willy Tarreau8323a372019-05-20 18:57:53 +0200458extern THREAD_LOCAL struct thread_info *ti; /* thread_info for the current thread */
Christopher Fauletddb6c162018-07-20 09:31:53 +0200459extern volatile unsigned long all_threads_mask;
Willy Tarreau60b639c2018-08-02 10:16:17 +0200460extern volatile unsigned long threads_want_rdv_mask;
461extern volatile unsigned long threads_harmless_mask;
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200462extern volatile unsigned long threads_sync_mask;
Willy Tarreau60b639c2018-08-02 10:16:17 +0200463
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200464/* explanation for threads_want_rdv_mask, threads_harmless_mask, and
465 * threads_sync_mask :
Willy Tarreau60b639c2018-08-02 10:16:17 +0200466 * - threads_want_rdv_mask is a bit field indicating all threads that have
467 * requested a rendez-vous of other threads using thread_isolate().
468 * - threads_harmless_mask is a bit field indicating all threads that are
469 * currently harmless in that they promise not to access a shared resource.
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200470 * - threads_sync_mask is a bit field indicating that a thread waiting for
471 * others to finish wants to leave synchronized with others and as such
472 * promises to do so as well using thread_sync_release().
Willy Tarreau60b639c2018-08-02 10:16:17 +0200473 *
474 * For a given thread, its bits in want_rdv and harmless can be translated like
475 * this :
476 *
477 * ----------+----------+----------------------------------------------------
478 * want_rdv | harmless | description
479 * ----------+----------+----------------------------------------------------
480 * 0 | 0 | thread not interested in RDV, possibly harmful
481 * 0 | 1 | thread not interested in RDV but harmless
482 * 1 | 1 | thread interested in RDV and waiting for its turn
483 * 1 | 0 | thread currently working isolated from others
484 * ----------+----------+----------------------------------------------------
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200485 *
486 * thread_sync_mask only delays the leaving of threads_sync_release() to make
487 * sure that each thread's harmless bit is cleared before leaving the function.
Willy Tarreau60b639c2018-08-02 10:16:17 +0200488 */
Olivier Houchard6b96f722018-04-25 16:58:25 +0200489
William Lallemand6e1796e2018-06-07 11:23:40 +0200490#define ha_sigmask(how, set, oldset) pthread_sigmask(how, set, oldset)
491
Willy Tarreau0c026f42018-08-01 19:12:20 +0200492/* sets the thread ID and the TID bit for the current thread */
493static inline void ha_set_tid(unsigned int data)
494{
495 tid = data;
496 tid_bit = (1UL << tid);
David Carliera92c5ce2019-09-13 05:03:12 +0100497 ti = &ha_thread_info[tid];
Willy Tarreau0c026f42018-08-01 19:12:20 +0200498}
499
Willy Tarreauff64d3b2020-05-01 11:28:49 +0200500/* Retrieves the opaque pthread_t of thread <thr> cast to an unsigned long long
501 * since POSIX took great care of not specifying its representation, making it
502 * hard to export for post-mortem analysis. For this reason we copy it into a
503 * union and will use the smallest scalar type at least as large as its size,
504 * which will keep endianness and alignment for all regular sizes. As a last
505 * resort we end up with a long long ligned to the first bytes in memory, which
506 * will be endian-dependent if pthread_t is larger than a long long (not seen
507 * yet).
508 */
509static inline unsigned long long ha_get_pthread_id(unsigned int thr)
510{
511 union {
512 pthread_t t;
513 unsigned long long ll;
514 unsigned int i;
515 unsigned short s;
516 unsigned char c;
517 } u;
518
519 memset(&u, 0, sizeof(u));
520 u.t = ha_thread_info[thr].pthread;
521
522 if (sizeof(u.t) <= sizeof(u.c))
523 return u.c;
524 else if (sizeof(u.t) <= sizeof(u.s))
525 return u.s;
526 else if (sizeof(u.t) <= sizeof(u.i))
527 return u.i;
528 return u.ll;
529}
530
Willy Tarreau38171da2019-05-17 16:33:13 +0200531static inline void ha_thread_relax(void)
532{
533#if _POSIX_PRIORITY_SCHEDULING
534 sched_yield();
535#else
536 pl_cpu_relax();
537#endif
538}
539
Willy Tarreau60b639c2018-08-02 10:16:17 +0200540/* Marks the thread as harmless. Note: this must be true, i.e. the thread must
541 * not be touching any unprotected shared resource during this period. Usually
542 * this is called before poll(), but it may also be placed around very slow
543 * calls (eg: some crypto operations). Needs to be terminated using
544 * thread_harmless_end().
545 */
546static inline void thread_harmless_now()
547{
548 HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
549}
550
551/* Ends the harmless period started by thread_harmless_now(). Usually this is
552 * placed after the poll() call. If it is discovered that a job was running and
553 * is relying on the thread still being harmless, the thread waits for the
554 * other one to finish.
555 */
556static inline void thread_harmless_end()
557{
558 while (1) {
559 HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
560 if (likely((threads_want_rdv_mask & all_threads_mask) == 0))
561 break;
562 thread_harmless_till_end();
563 }
564}
565
566/* an isolated thread has harmless cleared and want_rdv set */
567static inline unsigned long thread_isolated()
568{
569 return threads_want_rdv_mask & ~threads_harmless_mask & tid_bit;
570}
571
William Lallemand6e1796e2018-06-07 11:23:40 +0200572
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200573#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
574
Christopher Fauletf51bac22018-01-30 11:04:29 +0100575/* WARNING!!! if you update this enum, please also keep lock_label() up to date below */
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200576enum lock_label {
Emeric Brunc60def82017-09-27 14:59:38 +0200577 TASK_RQ_LOCK,
578 TASK_WQ_LOCK,
Christopher Fauletb349e482017-08-29 09:52:38 +0200579 POOL_LOCK,
Christopher Faulet8d8aa0d2017-05-30 15:36:50 +0200580 LISTENER_LOCK,
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200581 PROXY_LOCK,
Christopher Faulet29f77e82017-06-08 14:04:45 +0200582 SERVER_LOCK,
Christopher Faulet5b517552017-06-09 14:17:53 +0200583 LBPRM_LOCK,
Christopher Fauletb79a94c2017-05-30 15:34:30 +0200584 SIGNALS_LOCK,
Emeric Brun819fc6f2017-06-13 19:37:32 +0200585 STK_TABLE_LOCK,
586 STK_SESS_LOCK,
Emeric Brun1138fd02017-06-19 12:38:55 +0200587 APPLETS_LOCK,
Emeric Brun80527f52017-06-19 17:46:37 +0200588 PEER_LOCK,
Emeric Brun6b35e9b2017-06-30 16:23:45 +0200589 STRMS_LOCK,
Emeric Brun821bb9b2017-06-15 16:37:39 +0200590 SSL_LOCK,
591 SSL_GEN_CERTS_LOCK,
Emeric Brunb5997f72017-07-03 11:34:05 +0200592 PATREF_LOCK,
593 PATEXP_LOCK,
Christopher Faulete95f2c32017-07-24 16:30:34 +0200594 VARS_LOCK,
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +0200595 COMP_POOL_LOCK,
Thierry FOURNIER61ba0e22017-07-12 11:41:21 +0200596 LUA_LOCK,
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200597 NOTIF_LOCK,
Christopher Faulet24289f22017-09-25 14:48:02 +0200598 SPOE_APPLET_LOCK,
Christopher Fauletb2812a62017-10-04 16:17:58 +0200599 DNS_LOCK,
Christopher Fauletcfda8472017-10-20 15:40:23 +0200600 PID_LIST_LOCK,
Christopher Fauletc2a89a62017-10-23 15:54:24 +0200601 EMAIL_ALERTS_LOCK,
Emeric Brund8b3b652017-11-07 11:19:48 +0100602 PIPES_LOCK,
Christopher Faulet16f45c82018-02-16 11:23:49 +0100603 TLSKEYS_REF_LOCK,
Willy Tarreau34d4b522018-10-29 18:02:54 +0100604 AUTH_LOCK,
Frédéric Lécailled803e472019-04-25 07:42:09 +0200605 LOGSRV_LOCK,
Frédéric Lécaille4a3fef82019-05-28 14:47:17 +0200606 DICT_LOCK,
Willy Tarreaud6e0c032019-07-25 07:53:56 +0200607 PROTO_LOCK,
William Lallemand150bfa82019-09-19 17:12:49 +0200608 CKCH_LOCK,
609 SNI_LOCK,
Ben51Degrees4ddf59d2019-02-05 13:24:00 +0000610 OTHER_LOCK,
Christopher Faulet339fff82017-10-19 11:59:15 +0200611 LOCK_LABELS
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200612};
613struct lock_stat {
614 uint64_t nsec_wait_for_write;
615 uint64_t nsec_wait_for_read;
616 uint64_t num_write_locked;
617 uint64_t num_write_unlocked;
618 uint64_t num_read_locked;
619 uint64_t num_read_unlocked;
620};
621
622extern struct lock_stat lock_stats[LOCK_LABELS];
623
624#define __HA_SPINLOCK_T unsigned long
625
626#define __SPIN_INIT(l) ({ (*l) = 0; })
627#define __SPIN_DESTROY(l) ({ (*l) = 0; })
Willy Tarreau88ac59b2017-11-06 01:03:26 +0100628#define __SPIN_LOCK(l) pl_take_s(l)
629#define __SPIN_TRYLOCK(l) !pl_try_s(l)
630#define __SPIN_UNLOCK(l) pl_drop_s(l)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200631
632#define __HA_RWLOCK_T unsigned long
633
634#define __RWLOCK_INIT(l) ({ (*l) = 0; })
635#define __RWLOCK_DESTROY(l) ({ (*l) = 0; })
636#define __RWLOCK_WRLOCK(l) pl_take_w(l)
637#define __RWLOCK_TRYWRLOCK(l) !pl_try_w(l)
638#define __RWLOCK_WRUNLOCK(l) pl_drop_w(l)
639#define __RWLOCK_RDLOCK(l) pl_take_r(l)
640#define __RWLOCK_TRYRDLOCK(l) !pl_try_r(l)
641#define __RWLOCK_RDUNLOCK(l) pl_drop_r(l)
642
643#define HA_SPINLOCK_T struct ha_spinlock
644
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100645#define HA_SPIN_INIT(l) __spin_init(l)
646#define HA_SPIN_DESTROY(l) __spin_destroy(l)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200647
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100648#define HA_SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
649#define HA_SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
650#define HA_SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200651
652#define HA_RWLOCK_T struct ha_rwlock
653
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100654#define HA_RWLOCK_INIT(l) __ha_rwlock_init((l))
655#define HA_RWLOCK_DESTROY(l) __ha_rwlock_destroy((l))
656#define HA_RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
657#define HA_RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
658#define HA_RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
659#define HA_RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l)
660#define HA_RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
661#define HA_RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200662
663struct ha_spinlock {
664 __HA_SPINLOCK_T lock;
665 struct {
666 unsigned long owner; /* a bit is set to 1 << tid for the lock owner */
667 unsigned long waiters; /* a bit is set to 1 << tid for waiting threads */
668 struct {
669 const char *function;
670 const char *file;
671 int line;
672 } last_location; /* location of the last owner */
673 } info;
674};
675
676struct ha_rwlock {
677 __HA_RWLOCK_T lock;
678 struct {
679 unsigned long cur_writer; /* a bit is set to 1 << tid for the lock owner */
680 unsigned long wait_writers; /* a bit is set to 1 << tid for waiting writers */
681 unsigned long cur_readers; /* a bit is set to 1 << tid for current readers */
682 unsigned long wait_readers; /* a bit is set to 1 << tid for waiting waiters */
683 struct {
684 const char *function;
685 const char *file;
686 int line;
687 } last_location; /* location of the last write owner */
688 } info;
689};
690
Christopher Fauletf51bac22018-01-30 11:04:29 +0100691static inline const char *lock_label(enum lock_label label)
692{
693 switch (label) {
Christopher Fauletf51bac22018-01-30 11:04:29 +0100694 case TASK_RQ_LOCK: return "TASK_RQ";
695 case TASK_WQ_LOCK: return "TASK_WQ";
696 case POOL_LOCK: return "POOL";
697 case LISTENER_LOCK: return "LISTENER";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100698 case PROXY_LOCK: return "PROXY";
699 case SERVER_LOCK: return "SERVER";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100700 case LBPRM_LOCK: return "LBPRM";
701 case SIGNALS_LOCK: return "SIGNALS";
702 case STK_TABLE_LOCK: return "STK_TABLE";
703 case STK_SESS_LOCK: return "STK_SESS";
704 case APPLETS_LOCK: return "APPLETS";
705 case PEER_LOCK: return "PEER";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100706 case STRMS_LOCK: return "STRMS";
707 case SSL_LOCK: return "SSL";
708 case SSL_GEN_CERTS_LOCK: return "SSL_GEN_CERTS";
709 case PATREF_LOCK: return "PATREF";
710 case PATEXP_LOCK: return "PATEXP";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100711 case VARS_LOCK: return "VARS";
712 case COMP_POOL_LOCK: return "COMP_POOL";
713 case LUA_LOCK: return "LUA";
714 case NOTIF_LOCK: return "NOTIF";
715 case SPOE_APPLET_LOCK: return "SPOE_APPLET";
716 case DNS_LOCK: return "DNS";
717 case PID_LIST_LOCK: return "PID_LIST";
718 case EMAIL_ALERTS_LOCK: return "EMAIL_ALERTS";
719 case PIPES_LOCK: return "PIPES";
Christopher Faulet16f45c82018-02-16 11:23:49 +0100720 case TLSKEYS_REF_LOCK: return "TLSKEYS_REF";
Willy Tarreau34d4b522018-10-29 18:02:54 +0100721 case AUTH_LOCK: return "AUTH";
Frédéric Lécailled803e472019-04-25 07:42:09 +0200722 case LOGSRV_LOCK: return "LOGSRV";
Frédéric Lécaille4a3fef82019-05-28 14:47:17 +0200723 case DICT_LOCK: return "DICT";
Willy Tarreaud6e0c032019-07-25 07:53:56 +0200724 case PROTO_LOCK: return "PROTO";
William Lallemand150bfa82019-09-19 17:12:49 +0200725 case CKCH_LOCK: return "CKCH";
726 case SNI_LOCK: return "SNI";
Ben51Degrees4ddf59d2019-02-05 13:24:00 +0000727 case OTHER_LOCK: return "OTHER";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100728 case LOCK_LABELS: break; /* keep compiler happy */
729 };
730 /* only way to come here is consecutive to an internal bug */
731 abort();
732}
733
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200734static inline void show_lock_stats()
735{
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200736 int lbl;
737
738 for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
739 fprintf(stderr,
740 "Stats about Lock %s: \n"
741 "\t # write lock : %lu\n"
742 "\t # write unlock: %lu (%ld)\n"
743 "\t # wait time for write : %.3f msec\n"
744 "\t # wait time for write/lock: %.3f nsec\n"
745 "\t # read lock : %lu\n"
746 "\t # read unlock : %lu (%ld)\n"
747 "\t # wait time for read : %.3f msec\n"
748 "\t # wait time for read/lock : %.3f nsec\n",
Christopher Fauletf51bac22018-01-30 11:04:29 +0100749 lock_label(lbl),
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200750 lock_stats[lbl].num_write_locked,
751 lock_stats[lbl].num_write_unlocked,
752 lock_stats[lbl].num_write_unlocked - lock_stats[lbl].num_write_locked,
753 (double)lock_stats[lbl].nsec_wait_for_write / 1000000.0,
754 lock_stats[lbl].num_write_locked ? ((double)lock_stats[lbl].nsec_wait_for_write / (double)lock_stats[lbl].num_write_locked) : 0,
755 lock_stats[lbl].num_read_locked,
756 lock_stats[lbl].num_read_unlocked,
757 lock_stats[lbl].num_read_unlocked - lock_stats[lbl].num_read_locked,
758 (double)lock_stats[lbl].nsec_wait_for_read / 1000000.0,
759 lock_stats[lbl].num_read_locked ? ((double)lock_stats[lbl].nsec_wait_for_read / (double)lock_stats[lbl].num_read_locked) : 0);
760 }
761}
762
763/* Following functions are used to collect some stats about locks. We wrap
764 * pthread functions to known how much time we wait in a lock. */
765
766static uint64_t nsec_now(void) {
767 struct timespec ts;
768
769 clock_gettime(CLOCK_MONOTONIC, &ts);
770 return ((uint64_t) ts.tv_sec * 1000000000ULL +
771 (uint64_t) ts.tv_nsec);
772}
773
774static inline void __ha_rwlock_init(struct ha_rwlock *l)
775{
776 memset(l, 0, sizeof(struct ha_rwlock));
777 __RWLOCK_INIT(&l->lock);
778}
779
780static inline void __ha_rwlock_destroy(struct ha_rwlock *l)
781{
782 __RWLOCK_DESTROY(&l->lock);
783 memset(l, 0, sizeof(struct ha_rwlock));
784}
785
786
787static inline void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
788 const char *func, const char *file, int line)
789{
790 uint64_t start_time;
791
792 if (unlikely(l->info.cur_writer & tid_bit)) {
793 /* the thread is already owning the lock for write */
794 abort();
795 }
796
797 if (unlikely(l->info.cur_readers & tid_bit)) {
798 /* the thread is already owning the lock for read */
799 abort();
800 }
801
802 HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
803
804 start_time = nsec_now();
805 __RWLOCK_WRLOCK(&l->lock);
806 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
807
808 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
809
810 l->info.cur_writer = tid_bit;
811 l->info.last_location.function = func;
812 l->info.last_location.file = file;
813 l->info.last_location.line = line;
814
815 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
816}
817
818static inline int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l,
819 const char *func, const char *file, int line)
820{
821 uint64_t start_time;
822 int r;
823
824 if (unlikely(l->info.cur_writer & tid_bit)) {
825 /* the thread is already owning the lock for write */
826 abort();
827 }
828
829 if (unlikely(l->info.cur_readers & tid_bit)) {
830 /* the thread is already owning the lock for read */
831 abort();
832 }
833
834 /* We set waiting writer because trywrlock could wait for readers to quit */
835 HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
836
837 start_time = nsec_now();
838 r = __RWLOCK_TRYWRLOCK(&l->lock);
839 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
840 if (unlikely(r)) {
841 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
842 return r;
843 }
844 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
845
846 l->info.cur_writer = tid_bit;
847 l->info.last_location.function = func;
848 l->info.last_location.file = file;
849 l->info.last_location.line = line;
850
851 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
852
853 return 0;
854}
855
856static inline void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l,
857 const char *func, const char *file, int line)
858{
859 if (unlikely(!(l->info.cur_writer & tid_bit))) {
860 /* the thread is not owning the lock for write */
861 abort();
862 }
863
864 l->info.cur_writer = 0;
865 l->info.last_location.function = func;
866 l->info.last_location.file = file;
867 l->info.last_location.line = line;
868
869 __RWLOCK_WRUNLOCK(&l->lock);
870
871 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1);
872}
873
874static inline void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l)
875{
876 uint64_t start_time;
877
878 if (unlikely(l->info.cur_writer & tid_bit)) {
879 /* the thread is already owning the lock for write */
880 abort();
881 }
882
883 if (unlikely(l->info.cur_readers & tid_bit)) {
884 /* the thread is already owning the lock for read */
885 abort();
886 }
887
888 HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
889
890 start_time = nsec_now();
891 __RWLOCK_RDLOCK(&l->lock);
892 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
893 HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
894
895 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
896
897 HA_ATOMIC_AND(&l->info.wait_readers, ~tid_bit);
898}
899
900static inline int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l)
901{
902 int r;
903
904 if (unlikely(l->info.cur_writer & tid_bit)) {
905 /* the thread is already owning the lock for write */
906 abort();
907 }
908
909 if (unlikely(l->info.cur_readers & tid_bit)) {
910 /* the thread is already owning the lock for read */
911 abort();
912 }
913
914 /* try read should never wait */
915 r = __RWLOCK_TRYRDLOCK(&l->lock);
916 if (unlikely(r))
917 return r;
918 HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
919
920 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
921
922 return 0;
923}
924
925static inline void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l)
926{
927 if (unlikely(!(l->info.cur_readers & tid_bit))) {
928 /* the thread is not owning the lock for read */
929 abort();
930 }
931
932 HA_ATOMIC_AND(&l->info.cur_readers, ~tid_bit);
933
934 __RWLOCK_RDUNLOCK(&l->lock);
935
936 HA_ATOMIC_ADD(&lock_stats[lbl].num_read_unlocked, 1);
937}
938
939static inline void __spin_init(struct ha_spinlock *l)
940{
941 memset(l, 0, sizeof(struct ha_spinlock));
942 __SPIN_INIT(&l->lock);
943}
944
945static inline void __spin_destroy(struct ha_spinlock *l)
946{
947 __SPIN_DESTROY(&l->lock);
948 memset(l, 0, sizeof(struct ha_spinlock));
949}
950
951static inline void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
952 const char *func, const char *file, int line)
953{
954 uint64_t start_time;
955
956 if (unlikely(l->info.owner & tid_bit)) {
957 /* the thread is already owning the lock */
958 abort();
959 }
960
961 HA_ATOMIC_OR(&l->info.waiters, tid_bit);
962
963 start_time = nsec_now();
964 __SPIN_LOCK(&l->lock);
965 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
966
967 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
968
969
970 l->info.owner = tid_bit;
971 l->info.last_location.function = func;
972 l->info.last_location.file = file;
973 l->info.last_location.line = line;
974
975 HA_ATOMIC_AND(&l->info.waiters, ~tid_bit);
976}
977
978static inline int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l,
979 const char *func, const char *file, int line)
980{
981 int r;
982
983 if (unlikely(l->info.owner & tid_bit)) {
984 /* the thread is already owning the lock */
985 abort();
986 }
987
988 /* try read should never wait */
989 r = __SPIN_TRYLOCK(&l->lock);
990 if (unlikely(r))
991 return r;
992 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
993
994 l->info.owner = tid_bit;
995 l->info.last_location.function = func;
996 l->info.last_location.file = file;
997 l->info.last_location.line = line;
998
999 return 0;
1000}
1001
1002static inline void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
1003 const char *func, const char *file, int line)
1004{
1005 if (unlikely(!(l->info.owner & tid_bit))) {
1006 /* the thread is not owning the lock */
1007 abort();
1008 }
1009
1010 l->info.owner = 0;
1011 l->info.last_location.function = func;
1012 l->info.last_location.file = file;
1013 l->info.last_location.line = line;
1014
Willy Tarreau7c2a2ad2017-11-02 16:26:02 +01001015 __SPIN_UNLOCK(&l->lock);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001016 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1);
1017}
1018
1019#else /* DEBUG_THREAD */
1020
1021#define HA_SPINLOCK_T unsigned long
1022
Christopher Faulet2a944ee2017-11-07 10:42:54 +01001023#define HA_SPIN_INIT(l) ({ (*l) = 0; })
1024#define HA_SPIN_DESTROY(l) ({ (*l) = 0; })
1025#define HA_SPIN_LOCK(lbl, l) pl_take_s(l)
1026#define HA_SPIN_TRYLOCK(lbl, l) !pl_try_s(l)
1027#define HA_SPIN_UNLOCK(lbl, l) pl_drop_s(l)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001028
1029#define HA_RWLOCK_T unsigned long
1030
Christopher Faulet2a944ee2017-11-07 10:42:54 +01001031#define HA_RWLOCK_INIT(l) ({ (*l) = 0; })
1032#define HA_RWLOCK_DESTROY(l) ({ (*l) = 0; })
1033#define HA_RWLOCK_WRLOCK(lbl,l) pl_take_w(l)
1034#define HA_RWLOCK_TRYWRLOCK(lbl,l) !pl_try_w(l)
1035#define HA_RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l)
1036#define HA_RWLOCK_RDLOCK(lbl,l) pl_take_r(l)
1037#define HA_RWLOCK_TRYRDLOCK(lbl,l) !pl_try_r(l)
1038#define HA_RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001039
1040#endif /* DEBUG_THREAD */
1041
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001042#ifdef __x86_64__
Willy Tarreau2325d8a2018-10-10 18:29:23 +02001043
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001044static __inline int
1045__ha_cas_dw(void *target, void *compare, const void *set)
1046{
1047 char ret;
1048
1049 __asm __volatile("lock cmpxchg16b %0; setz %3"
1050 : "+m" (*(void **)target),
1051 "=a" (((void **)compare)[0]),
1052 "=d" (((void **)compare)[1]),
1053 "=q" (ret)
1054 : "a" (((void **)compare)[0]),
1055 "d" (((void **)compare)[1]),
1056 "b" (((const void **)set)[0]),
1057 "c" (((const void **)set)[1])
1058 : "memory", "cc");
1059 return (ret);
1060}
1061
Olivier Houchard9abcf6e2019-03-07 18:45:00 +01001062/* Use __ha_barrier_atomic* when you're trying to protect data that are
1063 * are modified using HA_ATOMIC* (except HA_ATOMIC_STORE)
1064 */
1065static __inline void
1066__ha_barrier_atomic_load(void)
1067{
1068 __asm __volatile("" ::: "memory");
1069}
1070
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001071static __inline void
Olivier Houchard9abcf6e2019-03-07 18:45:00 +01001072__ha_barrier_atomic_store(void)
1073{
1074 __asm __volatile("" ::: "memory");
1075}
1076
1077static __inline void
1078__ha_barrier_atomic_full(void)
1079{
1080 __asm __volatile("" ::: "memory");
1081}
1082
1083static __inline void
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001084__ha_barrier_load(void)
1085{
1086 __asm __volatile("lfence" ::: "memory");
1087}
1088
1089static __inline void
1090__ha_barrier_store(void)
1091{
1092 __asm __volatile("sfence" ::: "memory");
1093}
1094
1095static __inline void
1096__ha_barrier_full(void)
1097{
1098 __asm __volatile("mfence" ::: "memory");
1099}
1100
1101#elif defined(__arm__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__))
Willy Tarreau2325d8a2018-10-10 18:29:23 +02001102
Olivier Houchard9abcf6e2019-03-07 18:45:00 +01001103/* Use __ha_barrier_atomic* when you're trying to protect data that are
1104 * are modified using HA_ATOMIC* (except HA_ATOMIC_STORE)
1105 */
1106static __inline void
1107__ha_barrier_atomic_load(void)
1108{
1109 __asm __volatile("dmb" ::: "memory");
1110}
1111
1112static __inline void
1113__ha_barrier_atomic_store(void)
1114{
1115 __asm __volatile("dsb" ::: "memory");
1116}
1117
1118static __inline void
1119__ha_barrier_atomic_full(void)
1120{
1121 __asm __volatile("dmb" ::: "memory");
1122}
1123
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001124static __inline void
1125__ha_barrier_load(void)
1126{
1127 __asm __volatile("dmb" ::: "memory");
1128}
1129
1130static __inline void
1131__ha_barrier_store(void)
1132{
1133 __asm __volatile("dsb" ::: "memory");
1134}
1135
1136static __inline void
1137__ha_barrier_full(void)
1138{
1139 __asm __volatile("dmb" ::: "memory");
1140}
1141
Willy Tarreau41ccb192018-02-14 14:16:28 +01001142static __inline int __ha_cas_dw(void *target, void *compare, const void *set)
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001143{
1144 uint64_t previous;
1145 int tmp;
1146
1147 __asm __volatile("1:"
1148 "ldrexd %0, [%4];"
1149 "cmp %Q0, %Q2;"
1150 "ittt eq;"
1151 "cmpeq %R0, %R2;"
1152 "strexdeq %1, %3, [%4];"
1153 "cmpeq %1, #1;"
1154 "beq 1b;"
1155 : "=&r" (previous), "=&r" (tmp)
Willy Tarreau41ccb192018-02-14 14:16:28 +01001156 : "r" (*(uint64_t *)compare), "r" (*(uint64_t *)set), "r" (target)
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001157 : "memory", "cc");
1158 tmp = (previous == *(uint64_t *)compare);
1159 *(uint64_t *)compare = previous;
1160 return (tmp);
1161}
1162
1163#elif defined (__aarch64__)
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001164
Olivier Houchard9abcf6e2019-03-07 18:45:00 +01001165/* Use __ha_barrier_atomic* when you're trying to protect data that are
1166 * are modified using HA_ATOMIC* (except HA_ATOMIC_STORE)
1167 */
1168static __inline void
1169__ha_barrier_atomic_load(void)
1170{
1171 __asm __volatile("dmb ishld" ::: "memory");
1172}
1173
1174static __inline void
1175__ha_barrier_atomic_store(void)
1176{
1177 __asm __volatile("dmb ishst" ::: "memory");
1178}
1179
1180static __inline void
1181__ha_barrier_atomic_full(void)
1182{
1183 __asm __volatile("dmb ish" ::: "memory");
1184}
1185
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001186static __inline void
1187__ha_barrier_load(void)
1188{
1189 __asm __volatile("dmb ishld" ::: "memory");
1190}
1191
1192static __inline void
1193__ha_barrier_store(void)
1194{
1195 __asm __volatile("dmb ishst" ::: "memory");
1196}
1197
1198static __inline void
1199__ha_barrier_full(void)
1200{
1201 __asm __volatile("dmb ish" ::: "memory");
1202}
1203
1204static __inline int __ha_cas_dw(void *target, void *compare, void *set)
1205{
1206 void *value[2];
1207 uint64_t tmp1, tmp2;
1208
1209 __asm__ __volatile__("1:"
1210 "ldxp %0, %1, [%4];"
1211 "mov %2, %0;"
1212 "mov %3, %1;"
1213 "eor %0, %0, %5;"
1214 "eor %1, %1, %6;"
1215 "orr %1, %0, %1;"
1216 "mov %w0, #0;"
1217 "cbnz %1, 2f;"
1218 "stxp %w0, %7, %8, [%4];"
1219 "cbnz %w0, 1b;"
1220 "mov %w0, #1;"
1221 "2:"
1222 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (value[0]), "=&r" (value[1])
1223 : "r" (target), "r" (((void **)(compare))[0]), "r" (((void **)(compare))[1]), "r" (((void **)(set))[0]), "r" (((void **)(set))[1])
1224 : "cc", "memory");
1225
1226 memcpy(compare, &value, sizeof(value));
1227 return (tmp1);
1228}
1229
1230#else
Olivier Houchard9abcf6e2019-03-07 18:45:00 +01001231#define __ha_barrier_atomic_load __sync_synchronize
1232#define __ha_barrier_atomic_store __sync_synchronize
1233#define __ha_barrier_atomic_full __sync_synchronize
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001234#define __ha_barrier_load __sync_synchronize
1235#define __ha_barrier_store __sync_synchronize
1236#define __ha_barrier_full __sync_synchronize
1237#endif
1238
Willy Tarreaua8ae77d2018-11-25 19:28:23 +01001239void ha_spin_init(HA_SPINLOCK_T *l);
1240void ha_rwlock_init(HA_RWLOCK_T *l);
1241
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001242#endif /* USE_THREAD */
1243
Willy Tarreau149ab772019-01-26 14:27:06 +01001244extern int thread_cpus_enabled_at_boot;
1245
Olivier Houchardf61f0cb2017-12-21 17:13:05 +01001246static inline void __ha_compiler_barrier(void)
1247{
1248 __asm __volatile("" ::: "memory");
1249}
1250
Willy Tarreau0ccd3222018-07-30 10:34:35 +02001251int parse_nbthread(const char *arg, char **err);
Willy Tarreau149ab772019-01-26 14:27:06 +01001252int thread_get_default_count();
Willy Tarreau4037a3f2018-03-28 18:06:47 +02001253
Olivier Houchardd0c3b882019-03-07 18:55:31 +01001254#ifndef _HA_ATOMIC_CAS
1255#define _HA_ATOMIC_CAS HA_ATOMIC_CAS
1256#endif /* !_HA_ATOMIC_CAS */
1257
Willy Tarreau6a38b322019-05-11 18:04:24 +02001258#ifndef _HA_ATOMIC_DWCAS
1259#define _HA_ATOMIC_DWCAS HA_ATOMIC_DWCAS
1260#endif /* !_HA_ATOMIC_CAS */
1261
Olivier Houchardd0c3b882019-03-07 18:55:31 +01001262#ifndef _HA_ATOMIC_ADD
1263#define _HA_ATOMIC_ADD HA_ATOMIC_ADD
1264#endif /* !_HA_ATOMIC_ADD */
1265
1266#ifndef _HA_ATOMIC_XADD
1267#define _HA_ATOMIC_XADD HA_ATOMIC_XADD
1268#endif /* !_HA_ATOMIC_SUB */
1269
1270#ifndef _HA_ATOMIC_SUB
1271#define _HA_ATOMIC_SUB HA_ATOMIC_SUB
1272#endif /* !_HA_ATOMIC_SUB */
1273
1274#ifndef _HA_ATOMIC_AND
1275#define _HA_ATOMIC_AND HA_ATOMIC_AND
1276#endif /* !_HA_ATOMIC_AND */
1277
1278#ifndef _HA_ATOMIC_OR
1279#define _HA_ATOMIC_OR HA_ATOMIC_OR
1280#endif /* !_HA_ATOMIC_OR */
1281
1282#ifndef _HA_ATOMIC_XCHG
1283#define _HA_ATOMIC_XCHG HA_ATOMIC_XCHG
1284#endif /* !_HA_ATOMIC_XCHG */
1285
1286#ifndef _HA_ATOMIC_STORE
1287#define _HA_ATOMIC_STORE HA_ATOMIC_STORE
1288#endif /* !_HA_ATOMIC_STORE */
Olivier Houchard9ce62b52019-04-30 13:38:02 +02001289
1290#ifndef _HA_ATOMIC_LOAD
1291#define _HA_ATOMIC_LOAD HA_ATOMIC_LOAD
1292#endif /* !_HA_ATOMIC_LOAD */
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001293#endif /* _COMMON_HATHREADS_H */