blob: 44375e62f3a041d72db414f1f6b1409a669db125 [file] [log] [blame]
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001/*
2 * functions about threads.
3 *
4 * Copyright (C) 2017 Christopher Fauet - cfaulet@haproxy.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreau149ab772019-01-26 14:27:06 +010013#define _GNU_SOURCE
Christopher Faulet339fff82017-10-19 11:59:15 +020014#include <unistd.h>
Willy Tarreau0ccd3222018-07-30 10:34:35 +020015#include <stdlib.h>
Christopher Faulet339fff82017-10-19 11:59:15 +020016#include <fcntl.h>
17
Willy Tarreau149ab772019-01-26 14:27:06 +010018#ifdef USE_CPU_AFFINITY
19#include <sched.h>
20#endif
21
Olivier Houchard46453d32019-04-11 00:06:47 +020022#ifdef __FreeBSD__
23#include <sys/cpuset.h>
24#endif
25
Willy Tarreau6be78492020-06-05 00:00:29 +020026#include <haproxy/cfgparse.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020027#include <haproxy/fd.h>
28#include <haproxy/global.h>
Willy Tarreau11bd6f72021-05-08 20:33:02 +020029#include <haproxy/log.h>
Willy Tarreau3f567e42020-05-28 15:29:19 +020030#include <haproxy/thread.h>
Willy Tarreau48fbcae2020-06-03 18:09:46 +020031#include <haproxy/tools.h>
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020032
David Carliera92c5ce2019-09-13 05:03:12 +010033struct thread_info ha_thread_info[MAX_THREADS] = { };
34THREAD_LOCAL struct thread_info *ti = &ha_thread_info[0];
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020035
36#ifdef USE_THREAD
37
Willy Tarreau56c3b8b2021-04-10 17:28:18 +020038volatile unsigned long threads_want_rdv_mask __read_mostly = 0;
Willy Tarreau60b639c2018-08-02 10:16:17 +020039volatile unsigned long threads_harmless_mask = 0;
Willy Tarreau88d1c5d2021-08-04 11:44:17 +020040volatile unsigned long threads_idle_mask = 0;
Willy Tarreau9a1f5732019-06-09 12:20:02 +020041volatile unsigned long threads_sync_mask = 0;
Willy Tarreau56c3b8b2021-04-10 17:28:18 +020042volatile unsigned long all_threads_mask __read_mostly = 1; // nbthread 1 assumed by default
Willy Tarreau0c026f42018-08-01 19:12:20 +020043THREAD_LOCAL unsigned int tid = 0;
44THREAD_LOCAL unsigned long tid_bit = (1UL << 0);
Willy Tarreau149ab772019-01-26 14:27:06 +010045int thread_cpus_enabled_at_boot = 1;
Willy Tarreau0c026f42018-08-01 19:12:20 +020046
Christopher Faulet339fff82017-10-19 11:59:15 +020047
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020048#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
49struct lock_stat lock_stats[LOCK_LABELS];
50#endif
51
Willy Tarreau60b639c2018-08-02 10:16:17 +020052/* Marks the thread as harmless until the last thread using the rendez-vous
Christopher Fauleta9a9e9a2021-03-25 14:11:36 +010053 * point quits, excluding the current one. Thus an isolated thread may be safely
54 * marked as harmless. Given that we can wait for a long time, sched_yield() is
55 * used when available to offer the CPU resources to competing threads if
56 * needed.
Willy Tarreau60b639c2018-08-02 10:16:17 +020057 */
58void thread_harmless_till_end()
59{
Willy Tarreau286363b2021-08-04 10:33:57 +020060 _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
61 while (threads_want_rdv_mask & all_threads_mask & ~tid_bit) {
62 ha_thread_relax();
63 }
Willy Tarreau60b639c2018-08-02 10:16:17 +020064}
65
66/* Isolates the current thread : request the ability to work while all other
Willy Tarreauf519cfa2021-08-04 11:22:07 +020067 * threads are harmless, as defined by thread_harmless_now() (i.e. they're not
68 * going to touch any visible memory area). Only returns once all of them are
69 * harmless, with the current thread's bit in threads_harmless_mask cleared.
70 * Needs to be completed using thread_release().
Willy Tarreau60b639c2018-08-02 10:16:17 +020071 */
72void thread_isolate()
73{
74 unsigned long old;
75
Olivier Houchardb23a61f2019-03-08 18:51:17 +010076 _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
77 __ha_barrier_atomic_store();
78 _HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
Willy Tarreau60b639c2018-08-02 10:16:17 +020079
80 /* wait for all threads to become harmless */
81 old = threads_harmless_mask;
82 while (1) {
83 if (unlikely((old & all_threads_mask) != all_threads_mask))
84 old = threads_harmless_mask;
Olivier Houchardb23a61f2019-03-08 18:51:17 +010085 else if (_HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
Willy Tarreau60b639c2018-08-02 10:16:17 +020086 break;
87
Willy Tarreau38171da2019-05-17 16:33:13 +020088 ha_thread_relax();
Willy Tarreau60b639c2018-08-02 10:16:17 +020089 }
90 /* one thread gets released at a time here, with its harmess bit off.
91 * The loss of this bit makes the other one continue to spin while the
92 * thread is working alone.
93 */
94}
95
Willy Tarreau88d1c5d2021-08-04 11:44:17 +020096/* Isolates the current thread : request the ability to work while all other
97 * threads are idle, as defined by thread_idle_now(). It only returns once
98 * all of them are both harmless and idle, with the current thread's bit in
99 * threads_harmless_mask and idle_mask cleared. Needs to be completed using
100 * thread_release(). By doing so the thread also engages in being safe against
101 * any actions that other threads might be about to start under the same
102 * conditions. This specifically targets destruction of any internal structure,
103 * which implies that the current thread may not hold references to any object.
104 *
105 * Note that a concurrent thread_isolate() will usually win against
106 * thread_isolate_full() as it doesn't consider the idle_mask, allowing it to
107 * get back to the poller or any other fully idle location, that will
108 * ultimately release this one.
109 */
110void thread_isolate_full()
111{
112 unsigned long old;
113
114 _HA_ATOMIC_OR(&threads_idle_mask, tid_bit);
115 _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
116 __ha_barrier_atomic_store();
117 _HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
118
119 /* wait for all threads to become harmless */
120 old = threads_harmless_mask;
121 while (1) {
122 unsigned long idle = _HA_ATOMIC_LOAD(&threads_idle_mask);
123
124 if (unlikely((old & all_threads_mask) != all_threads_mask))
125 old = _HA_ATOMIC_LOAD(&threads_harmless_mask);
126 else if ((idle & all_threads_mask) == all_threads_mask &&
127 _HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
128 break;
129
130 ha_thread_relax();
131 }
132
133 /* we're not idle anymore at this point. Other threads waiting on this
134 * condition will need to wait until out next pass to the poller, or
135 * our next call to thread_isolate_full().
136 */
137 _HA_ATOMIC_AND(&threads_idle_mask, ~tid_bit);
138}
139
Willy Tarreau60b639c2018-08-02 10:16:17 +0200140/* Cancels the effect of thread_isolate() by releasing the current thread's bit
Willy Tarreauf519cfa2021-08-04 11:22:07 +0200141 * in threads_want_rdv_mask. This immediately allows other threads to expect be
142 * executed, though they will first have to wait for this thread to become
143 * harmless again (possibly by reaching the poller again).
Willy Tarreau60b639c2018-08-02 10:16:17 +0200144 */
145void thread_release()
146{
Olivier Houchardb23a61f2019-03-08 18:51:17 +0100147 _HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
Willy Tarreau60b639c2018-08-02 10:16:17 +0200148}
Christopher Faulet339fff82017-10-19 11:59:15 +0200149
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200150/* Cancels the effect of thread_isolate() by releasing the current thread's bit
151 * in threads_want_rdv_mask and by marking this thread as harmless until the
152 * last worker finishes. The difference with thread_release() is that this one
153 * will not leave the function before others are notified to do the same, so it
154 * guarantees that the current thread will not pass through a subsequent call
155 * to thread_isolate() before others finish.
156 */
157void thread_sync_release()
158{
159 _HA_ATOMIC_OR(&threads_sync_mask, tid_bit);
160 __ha_barrier_atomic_store();
161 _HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
162
163 while (threads_want_rdv_mask & all_threads_mask) {
164 _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
165 while (threads_want_rdv_mask & all_threads_mask)
166 ha_thread_relax();
167 HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
168 }
169
170 /* the current thread is not harmless anymore, thread_isolate()
171 * is forced to wait till all waiters finish.
172 */
173 _HA_ATOMIC_AND(&threads_sync_mask, ~tid_bit);
174 while (threads_sync_mask & all_threads_mask)
175 ha_thread_relax();
176}
177
Willy Tarreau2beaaf72019-05-22 08:43:34 +0200178/* send signal <sig> to thread <thr> */
179void ha_tkill(unsigned int thr, int sig)
180{
David Carliera92c5ce2019-09-13 05:03:12 +0100181 pthread_kill(ha_thread_info[thr].pthread, sig);
Willy Tarreau2beaaf72019-05-22 08:43:34 +0200182}
183
184/* send signal <sig> to all threads. The calling thread is signaled last in
185 * order to allow all threads to synchronize in the handler.
186 */
187void ha_tkillall(int sig)
188{
189 unsigned int thr;
190
191 for (thr = 0; thr < global.nbthread; thr++) {
192 if (!(all_threads_mask & (1UL << thr)))
193 continue;
194 if (thr == tid)
195 continue;
David Carliera92c5ce2019-09-13 05:03:12 +0100196 pthread_kill(ha_thread_info[thr].pthread, sig);
Willy Tarreau2beaaf72019-05-22 08:43:34 +0200197 }
198 raise(sig);
199}
200
Willy Tarreau3d184982020-10-18 10:20:59 +0200201/* these calls are used as callbacks at init time when debugging is on */
Willy Tarreaua8ae77d2018-11-25 19:28:23 +0100202void ha_spin_init(HA_SPINLOCK_T *l)
203{
204 HA_SPIN_INIT(l);
205}
206
Willy Tarreau3d184982020-10-18 10:20:59 +0200207/* these calls are used as callbacks at init time when debugging is on */
Willy Tarreaua8ae77d2018-11-25 19:28:23 +0100208void ha_rwlock_init(HA_RWLOCK_T *l)
209{
210 HA_RWLOCK_INIT(l);
211}
212
Willy Tarreau149ab772019-01-26 14:27:06 +0100213/* returns the number of CPUs the current process is enabled to run on */
214static int thread_cpus_enabled()
215{
216 int ret = 1;
217
218#ifdef USE_CPU_AFFINITY
219#if defined(__linux__) && defined(CPU_COUNT)
220 cpu_set_t mask;
221
222 if (sched_getaffinity(0, sizeof(mask), &mask) == 0)
223 ret = CPU_COUNT(&mask);
Olivier Houchard46453d32019-04-11 00:06:47 +0200224#elif defined(__FreeBSD__) && defined(USE_CPU_AFFINITY)
225 cpuset_t cpuset;
226 if (cpuset_getaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1,
227 sizeof(cpuset), &cpuset) == 0)
228 ret = CPU_COUNT(&cpuset);
David CARLIER6a906012021-01-15 08:09:56 +0000229#elif defined(__APPLE__)
230 ret = (int)sysconf(_SC_NPROCESSORS_ONLN);
Willy Tarreau149ab772019-01-26 14:27:06 +0100231#endif
232#endif
233 ret = MAX(ret, 1);
234 ret = MIN(ret, MAX_THREADS);
235 return ret;
236}
237
Amaury Denoyelle4c9efde2021-03-31 16:57:39 +0200238/* Returns 1 if the cpu set is currently restricted for the process else 0.
239 * Currently only implemented for the Linux platform.
240 */
241int thread_cpu_mask_forced()
242{
243#if defined(__linux__)
244 const int cpus_avail = sysconf(_SC_NPROCESSORS_ONLN);
245 return cpus_avail != thread_cpus_enabled();
246#else
247 return 0;
248#endif
249}
250
Willy Tarreauf734ebf2020-09-09 17:07:54 +0200251/* Depending on the platform and how libpthread was built, pthread_exit() may
252 * involve some code in libgcc_s that would be loaded on exit for the first
253 * time, causing aborts if the process is chrooted. It's harmless bit very
254 * dirty. There isn't much we can do to make sure libgcc_s is loaded only if
255 * needed, so what we do here is that during early boot we create a dummy
256 * thread that immediately exits. This will lead to libgcc_s being loaded
257 * during boot on the platforms where it's required.
258 */
259static void *dummy_thread_function(void *data)
260{
261 pthread_exit(NULL);
262 return NULL;
263}
264
265static inline void preload_libgcc_s(void)
266{
267 pthread_t dummy_thread;
268 pthread_create(&dummy_thread, NULL, dummy_thread_function, NULL);
269 pthread_join(dummy_thread, NULL);
270}
271
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200272__attribute__((constructor))
Willy Tarreau3f567e42020-05-28 15:29:19 +0200273static void __thread_init(void)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200274{
Willy Tarreauf5809cd2019-01-26 13:35:03 +0100275 char *ptr = NULL;
276
277 if (MAX_THREADS < 1 || MAX_THREADS > LONGBITS) {
278 ha_alert("MAX_THREADS value must be between 1 and %d inclusive; "
279 "HAProxy was built with value %d, please fix it and rebuild.\n",
280 LONGBITS, MAX_THREADS);
281 exit(1);
282 }
Willy Tarreau149ab772019-01-26 14:27:06 +0100283
Willy Tarreauf734ebf2020-09-09 17:07:54 +0200284 preload_libgcc_s();
Willy Tarreau77b98222020-09-02 08:04:35 +0200285
Willy Tarreau149ab772019-01-26 14:27:06 +0100286 thread_cpus_enabled_at_boot = thread_cpus_enabled();
287
288 memprintf(&ptr, "Built with multi-threading support (MAX_THREADS=%d, default=%d).",
289 MAX_THREADS, thread_cpus_enabled_at_boot);
Willy Tarreauf5809cd2019-01-26 13:35:03 +0100290 hap_register_build_opts(ptr, 1);
291
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200292#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
293 memset(lock_stats, 0, sizeof(lock_stats));
294#endif
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200295}
296
Willy Tarreau8459f252018-12-15 16:48:14 +0100297#else
298
299REGISTER_BUILD_OPTS("Built without multi-threading support (USE_THREAD not set).");
300
Willy Tarreau0ccd3222018-07-30 10:34:35 +0200301#endif // USE_THREAD
302
303
304/* Parse the number of threads in argument <arg>, returns it and adjusts a few
305 * internal variables accordingly, or fails and returns zero with an error
306 * reason in <errmsg>. May be called multiple times while parsing.
307 */
308int parse_nbthread(const char *arg, char **err)
309{
310 long nbthread;
311 char *errptr;
312
313 nbthread = strtol(arg, &errptr, 10);
314 if (!*arg || *errptr) {
315 memprintf(err, "passed a missing or unparsable integer value in '%s'", arg);
316 return 0;
317 }
318
319#ifndef USE_THREAD
320 if (nbthread != 1) {
321 memprintf(err, "specified with a value other than 1 while HAProxy is not compiled with threads support. Please check build options for USE_THREAD");
322 return 0;
323 }
324#else
325 if (nbthread < 1 || nbthread > MAX_THREADS) {
326 memprintf(err, "value must be between 1 and %d (was %ld)", MAX_THREADS, nbthread);
327 return 0;
328 }
329
Willy Tarreaufc647362019-02-02 17:05:03 +0100330 all_threads_mask = nbits(nbthread);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200331#endif
Willy Tarreau0ccd3222018-07-30 10:34:35 +0200332 return nbthread;
333}