blob: a3400010bc3a52bff0798834b10f35ba490a128c [file] [log] [blame]
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001/*
2 * functions about threads.
3 *
4 * Copyright (C) 2017 Christopher Fauet - cfaulet@haproxy.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreau149ab772019-01-26 14:27:06 +010013#define _GNU_SOURCE
Christopher Faulet339fff82017-10-19 11:59:15 +020014#include <unistd.h>
Willy Tarreau0ccd3222018-07-30 10:34:35 +020015#include <stdlib.h>
Christopher Faulet339fff82017-10-19 11:59:15 +020016#include <fcntl.h>
17
Willy Tarreauaa992762021-10-06 23:33:20 +020018#include <signal.h>
19#include <unistd.h>
20#ifdef _POSIX_PRIORITY_SCHEDULING
21#include <sched.h>
22#endif
23
Willy Tarreau5e03dfa2021-10-06 22:53:51 +020024#ifdef USE_THREAD
25# include <pthread.h>
26#endif
27
Willy Tarreau149ab772019-01-26 14:27:06 +010028#ifdef USE_CPU_AFFINITY
Willy Tarreaud10385a2021-10-06 22:22:40 +020029# include <sched.h>
30# if defined(__FreeBSD__) || defined(__DragonFly__)
31# include <sys/param.h>
32# ifdef __FreeBSD__
33# include <sys/cpuset.h>
34# endif
35# include <pthread_np.h>
36# endif
37# ifdef __APPLE__
38# include <mach/mach_types.h>
39# include <mach/thread_act.h>
40# include <mach/thread_policy.h>
41# endif
42# include <haproxy/cpuset.h>
Willy Tarreau149ab772019-01-26 14:27:06 +010043#endif
44
Willy Tarreau6be78492020-06-05 00:00:29 +020045#include <haproxy/cfgparse.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020046#include <haproxy/fd.h>
47#include <haproxy/global.h>
Willy Tarreau11bd6f72021-05-08 20:33:02 +020048#include <haproxy/log.h>
Willy Tarreau3f567e42020-05-28 15:29:19 +020049#include <haproxy/thread.h>
Willy Tarreaudced3eb2021-10-05 18:48:23 +020050#include <haproxy/time.h>
Willy Tarreau48fbcae2020-06-03 18:09:46 +020051#include <haproxy/tools.h>
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020052
David Carliera92c5ce2019-09-13 05:03:12 +010053struct thread_info ha_thread_info[MAX_THREADS] = { };
54THREAD_LOCAL struct thread_info *ti = &ha_thread_info[0];
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020055
56#ifdef USE_THREAD
57
Willy Tarreau56c3b8b2021-04-10 17:28:18 +020058volatile unsigned long threads_want_rdv_mask __read_mostly = 0;
Willy Tarreau60b639c2018-08-02 10:16:17 +020059volatile unsigned long threads_harmless_mask = 0;
Willy Tarreau88d1c5d2021-08-04 11:44:17 +020060volatile unsigned long threads_idle_mask = 0;
Willy Tarreau9a1f5732019-06-09 12:20:02 +020061volatile unsigned long threads_sync_mask = 0;
Willy Tarreau56c3b8b2021-04-10 17:28:18 +020062volatile unsigned long all_threads_mask __read_mostly = 1; // nbthread 1 assumed by default
Willy Tarreau0c026f42018-08-01 19:12:20 +020063THREAD_LOCAL unsigned int tid = 0;
64THREAD_LOCAL unsigned long tid_bit = (1UL << 0);
Willy Tarreau149ab772019-01-26 14:27:06 +010065int thread_cpus_enabled_at_boot = 1;
Willy Tarreau5e03dfa2021-10-06 22:53:51 +020066static pthread_t ha_pthread[MAX_THREADS] = { };
Willy Tarreau0c026f42018-08-01 19:12:20 +020067
Willy Tarreau60b639c2018-08-02 10:16:17 +020068/* Marks the thread as harmless until the last thread using the rendez-vous
Christopher Fauleta9a9e9a2021-03-25 14:11:36 +010069 * point quits, excluding the current one. Thus an isolated thread may be safely
70 * marked as harmless. Given that we can wait for a long time, sched_yield() is
71 * used when available to offer the CPU resources to competing threads if
72 * needed.
Willy Tarreau60b639c2018-08-02 10:16:17 +020073 */
74void thread_harmless_till_end()
75{
Willy Tarreau286363b2021-08-04 10:33:57 +020076 _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
77 while (threads_want_rdv_mask & all_threads_mask & ~tid_bit) {
78 ha_thread_relax();
79 }
Willy Tarreau60b639c2018-08-02 10:16:17 +020080}
81
82/* Isolates the current thread : request the ability to work while all other
Willy Tarreauf519cfa2021-08-04 11:22:07 +020083 * threads are harmless, as defined by thread_harmless_now() (i.e. they're not
84 * going to touch any visible memory area). Only returns once all of them are
85 * harmless, with the current thread's bit in threads_harmless_mask cleared.
86 * Needs to be completed using thread_release().
Willy Tarreau60b639c2018-08-02 10:16:17 +020087 */
88void thread_isolate()
89{
90 unsigned long old;
91
Olivier Houchardb23a61f2019-03-08 18:51:17 +010092 _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
93 __ha_barrier_atomic_store();
94 _HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
Willy Tarreau60b639c2018-08-02 10:16:17 +020095
96 /* wait for all threads to become harmless */
97 old = threads_harmless_mask;
98 while (1) {
99 if (unlikely((old & all_threads_mask) != all_threads_mask))
100 old = threads_harmless_mask;
Olivier Houchardb23a61f2019-03-08 18:51:17 +0100101 else if (_HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
Willy Tarreau60b639c2018-08-02 10:16:17 +0200102 break;
103
Willy Tarreau38171da2019-05-17 16:33:13 +0200104 ha_thread_relax();
Willy Tarreau60b639c2018-08-02 10:16:17 +0200105 }
106 /* one thread gets released at a time here, with its harmess bit off.
107 * The loss of this bit makes the other one continue to spin while the
108 * thread is working alone.
109 */
110}
111
Willy Tarreau88d1c5d2021-08-04 11:44:17 +0200112/* Isolates the current thread : request the ability to work while all other
113 * threads are idle, as defined by thread_idle_now(). It only returns once
114 * all of them are both harmless and idle, with the current thread's bit in
115 * threads_harmless_mask and idle_mask cleared. Needs to be completed using
116 * thread_release(). By doing so the thread also engages in being safe against
117 * any actions that other threads might be about to start under the same
118 * conditions. This specifically targets destruction of any internal structure,
119 * which implies that the current thread may not hold references to any object.
120 *
121 * Note that a concurrent thread_isolate() will usually win against
122 * thread_isolate_full() as it doesn't consider the idle_mask, allowing it to
123 * get back to the poller or any other fully idle location, that will
124 * ultimately release this one.
125 */
126void thread_isolate_full()
127{
128 unsigned long old;
129
130 _HA_ATOMIC_OR(&threads_idle_mask, tid_bit);
131 _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
132 __ha_barrier_atomic_store();
133 _HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
134
135 /* wait for all threads to become harmless */
136 old = threads_harmless_mask;
137 while (1) {
138 unsigned long idle = _HA_ATOMIC_LOAD(&threads_idle_mask);
139
140 if (unlikely((old & all_threads_mask) != all_threads_mask))
141 old = _HA_ATOMIC_LOAD(&threads_harmless_mask);
142 else if ((idle & all_threads_mask) == all_threads_mask &&
143 _HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
144 break;
145
146 ha_thread_relax();
147 }
148
149 /* we're not idle anymore at this point. Other threads waiting on this
150 * condition will need to wait until out next pass to the poller, or
151 * our next call to thread_isolate_full().
152 */
153 _HA_ATOMIC_AND(&threads_idle_mask, ~tid_bit);
154}
155
Willy Tarreau60b639c2018-08-02 10:16:17 +0200156/* Cancels the effect of thread_isolate() by releasing the current thread's bit
Willy Tarreauf519cfa2021-08-04 11:22:07 +0200157 * in threads_want_rdv_mask. This immediately allows other threads to expect be
158 * executed, though they will first have to wait for this thread to become
159 * harmless again (possibly by reaching the poller again).
Willy Tarreau60b639c2018-08-02 10:16:17 +0200160 */
161void thread_release()
162{
Olivier Houchardb23a61f2019-03-08 18:51:17 +0100163 _HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
Willy Tarreau60b639c2018-08-02 10:16:17 +0200164}
Christopher Faulet339fff82017-10-19 11:59:15 +0200165
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200166/* Cancels the effect of thread_isolate() by releasing the current thread's bit
167 * in threads_want_rdv_mask and by marking this thread as harmless until the
168 * last worker finishes. The difference with thread_release() is that this one
169 * will not leave the function before others are notified to do the same, so it
170 * guarantees that the current thread will not pass through a subsequent call
171 * to thread_isolate() before others finish.
172 */
173void thread_sync_release()
174{
175 _HA_ATOMIC_OR(&threads_sync_mask, tid_bit);
176 __ha_barrier_atomic_store();
177 _HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
178
179 while (threads_want_rdv_mask & all_threads_mask) {
180 _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
181 while (threads_want_rdv_mask & all_threads_mask)
182 ha_thread_relax();
183 HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
184 }
185
186 /* the current thread is not harmless anymore, thread_isolate()
187 * is forced to wait till all waiters finish.
188 */
189 _HA_ATOMIC_AND(&threads_sync_mask, ~tid_bit);
190 while (threads_sync_mask & all_threads_mask)
191 ha_thread_relax();
192}
193
Willy Tarreaud10385a2021-10-06 22:22:40 +0200194/* Sets up threads, signals and masks, and starts threads 2 and above.
195 * Does nothing when threads are disabled.
196 */
197void setup_extra_threads(void *(*handler)(void *))
198{
199 sigset_t blocked_sig, old_sig;
200 int i;
201
202 /* ensure the signals will be blocked in every thread */
203 sigfillset(&blocked_sig);
204 sigdelset(&blocked_sig, SIGPROF);
205 sigdelset(&blocked_sig, SIGBUS);
206 sigdelset(&blocked_sig, SIGFPE);
207 sigdelset(&blocked_sig, SIGILL);
208 sigdelset(&blocked_sig, SIGSEGV);
209 pthread_sigmask(SIG_SETMASK, &blocked_sig, &old_sig);
210
211 /* Create nbthread-1 thread. The first thread is the current process */
Willy Tarreau5e03dfa2021-10-06 22:53:51 +0200212 ha_pthread[0] = pthread_self();
Willy Tarreaud10385a2021-10-06 22:22:40 +0200213 for (i = 1; i < global.nbthread; i++)
Willy Tarreau5e03dfa2021-10-06 22:53:51 +0200214 pthread_create(&ha_pthread[i], NULL, handler, (void *)(long)i);
Willy Tarreaud10385a2021-10-06 22:22:40 +0200215}
216
217/* waits for all threads to terminate. Does nothing when threads are
218 * disabled.
219 */
220void wait_for_threads_completion()
221{
222 int i;
223
224 /* Wait the end of other threads */
225 for (i = 1; i < global.nbthread; i++)
Willy Tarreau5e03dfa2021-10-06 22:53:51 +0200226 pthread_join(ha_pthread[i], NULL);
Willy Tarreaud10385a2021-10-06 22:22:40 +0200227
228#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
229 show_lock_stats();
230#endif
231}
232
233/* Tries to set the current thread's CPU affinity according to the cpu_map */
234void set_thread_cpu_affinity()
235{
236#if defined(USE_CPU_AFFINITY)
237 /* no affinity setting for the master process */
238 if (master)
239 return;
240
241 /* Now the CPU affinity for all threads */
242 if (ha_cpuset_count(&cpu_map.proc))
243 ha_cpuset_and(&cpu_map.thread[tid], &cpu_map.proc);
244
245 if (ha_cpuset_count(&cpu_map.thread[tid])) {/* only do this if the thread has a THREAD map */
246# if defined(__APPLE__)
247 /* Note: this API is limited to the first 32/64 CPUs */
248 unsigned long set = cpu_map.thread[tid].cpuset;
249 int j;
250
251 while ((j = ffsl(set)) > 0) {
252 thread_affinity_policy_data_t cpu_set = { j - 1 };
253 thread_port_t mthread;
254
Willy Tarreau5e03dfa2021-10-06 22:53:51 +0200255 mthread = pthread_mach_thread_np(ha_pthread[tid]);
Willy Tarreaud10385a2021-10-06 22:22:40 +0200256 thread_policy_set(mthread, THREAD_AFFINITY_POLICY, (thread_policy_t)&cpu_set, 1);
257 set &= ~(1UL << (j - 1));
258 }
259# else
260 struct hap_cpuset *set = &cpu_map.thread[tid];
261
Willy Tarreau5e03dfa2021-10-06 22:53:51 +0200262 pthread_setaffinity_np(ha_pthread[tid], sizeof(set->cpuset), &set->cpuset);
Willy Tarreaud10385a2021-10-06 22:22:40 +0200263# endif
264 }
265#endif /* USE_CPU_AFFINITY */
266}
267
Willy Tarreau4eeb8832021-10-06 22:44:28 +0200268/* Retrieves the opaque pthread_t of thread <thr> cast to an unsigned long long
269 * since POSIX took great care of not specifying its representation, making it
270 * hard to export for post-mortem analysis. For this reason we copy it into a
271 * union and will use the smallest scalar type at least as large as its size,
272 * which will keep endianness and alignment for all regular sizes. As a last
273 * resort we end up with a long long ligned to the first bytes in memory, which
274 * will be endian-dependent if pthread_t is larger than a long long (not seen
275 * yet).
276 */
277unsigned long long ha_get_pthread_id(unsigned int thr)
278{
279 union {
280 pthread_t t;
281 unsigned long long ll;
282 unsigned int i;
283 unsigned short s;
284 unsigned char c;
285 } u = { 0 };
286
Willy Tarreau5e03dfa2021-10-06 22:53:51 +0200287 u.t = ha_pthread[thr];
Willy Tarreau4eeb8832021-10-06 22:44:28 +0200288
289 if (sizeof(u.t) <= sizeof(u.c))
290 return u.c;
291 else if (sizeof(u.t) <= sizeof(u.s))
292 return u.s;
293 else if (sizeof(u.t) <= sizeof(u.i))
294 return u.i;
295 return u.ll;
296}
297
Willy Tarreau2beaaf72019-05-22 08:43:34 +0200298/* send signal <sig> to thread <thr> */
299void ha_tkill(unsigned int thr, int sig)
300{
Willy Tarreau5e03dfa2021-10-06 22:53:51 +0200301 pthread_kill(ha_pthread[thr], sig);
Willy Tarreau2beaaf72019-05-22 08:43:34 +0200302}
303
304/* send signal <sig> to all threads. The calling thread is signaled last in
305 * order to allow all threads to synchronize in the handler.
306 */
307void ha_tkillall(int sig)
308{
309 unsigned int thr;
310
311 for (thr = 0; thr < global.nbthread; thr++) {
312 if (!(all_threads_mask & (1UL << thr)))
313 continue;
314 if (thr == tid)
315 continue;
Willy Tarreau5e03dfa2021-10-06 22:53:51 +0200316 pthread_kill(ha_pthread[thr], sig);
Willy Tarreau2beaaf72019-05-22 08:43:34 +0200317 }
318 raise(sig);
319}
320
Willy Tarreauaa992762021-10-06 23:33:20 +0200321void ha_thread_relax(void)
322{
323#ifdef _POSIX_PRIORITY_SCHEDULING
324 sched_yield();
325#else
326 pl_cpu_relax();
327#endif
328}
329
Willy Tarreau3d184982020-10-18 10:20:59 +0200330/* these calls are used as callbacks at init time when debugging is on */
Willy Tarreaua8ae77d2018-11-25 19:28:23 +0100331void ha_spin_init(HA_SPINLOCK_T *l)
332{
333 HA_SPIN_INIT(l);
334}
335
Willy Tarreau3d184982020-10-18 10:20:59 +0200336/* these calls are used as callbacks at init time when debugging is on */
Willy Tarreaua8ae77d2018-11-25 19:28:23 +0100337void ha_rwlock_init(HA_RWLOCK_T *l)
338{
339 HA_RWLOCK_INIT(l);
340}
341
Willy Tarreau149ab772019-01-26 14:27:06 +0100342/* returns the number of CPUs the current process is enabled to run on */
343static int thread_cpus_enabled()
344{
345 int ret = 1;
346
347#ifdef USE_CPU_AFFINITY
348#if defined(__linux__) && defined(CPU_COUNT)
349 cpu_set_t mask;
350
351 if (sched_getaffinity(0, sizeof(mask), &mask) == 0)
352 ret = CPU_COUNT(&mask);
Olivier Houchard46453d32019-04-11 00:06:47 +0200353#elif defined(__FreeBSD__) && defined(USE_CPU_AFFINITY)
354 cpuset_t cpuset;
355 if (cpuset_getaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1,
356 sizeof(cpuset), &cpuset) == 0)
357 ret = CPU_COUNT(&cpuset);
David CARLIER6a906012021-01-15 08:09:56 +0000358#elif defined(__APPLE__)
359 ret = (int)sysconf(_SC_NPROCESSORS_ONLN);
Willy Tarreau149ab772019-01-26 14:27:06 +0100360#endif
361#endif
362 ret = MAX(ret, 1);
363 ret = MIN(ret, MAX_THREADS);
364 return ret;
365}
366
Amaury Denoyelle4c9efde2021-03-31 16:57:39 +0200367/* Returns 1 if the cpu set is currently restricted for the process else 0.
368 * Currently only implemented for the Linux platform.
369 */
370int thread_cpu_mask_forced()
371{
372#if defined(__linux__)
373 const int cpus_avail = sysconf(_SC_NPROCESSORS_ONLN);
374 return cpus_avail != thread_cpus_enabled();
375#else
376 return 0;
377#endif
378}
379
Willy Tarreau407ef892021-10-05 18:39:27 +0200380/* Below come the lock-debugging functions */
381
382#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
383
384struct lock_stat lock_stats[LOCK_LABELS];
385
386/* this is only used below */
387static const char *lock_label(enum lock_label label)
388{
389 switch (label) {
390 case TASK_RQ_LOCK: return "TASK_RQ";
391 case TASK_WQ_LOCK: return "TASK_WQ";
392 case LISTENER_LOCK: return "LISTENER";
393 case PROXY_LOCK: return "PROXY";
394 case SERVER_LOCK: return "SERVER";
395 case LBPRM_LOCK: return "LBPRM";
396 case SIGNALS_LOCK: return "SIGNALS";
397 case STK_TABLE_LOCK: return "STK_TABLE";
398 case STK_SESS_LOCK: return "STK_SESS";
399 case APPLETS_LOCK: return "APPLETS";
400 case PEER_LOCK: return "PEER";
401 case SHCTX_LOCK: return "SHCTX";
402 case SSL_LOCK: return "SSL";
403 case SSL_GEN_CERTS_LOCK: return "SSL_GEN_CERTS";
404 case PATREF_LOCK: return "PATREF";
405 case PATEXP_LOCK: return "PATEXP";
406 case VARS_LOCK: return "VARS";
407 case COMP_POOL_LOCK: return "COMP_POOL";
408 case LUA_LOCK: return "LUA";
409 case NOTIF_LOCK: return "NOTIF";
410 case SPOE_APPLET_LOCK: return "SPOE_APPLET";
411 case DNS_LOCK: return "DNS";
412 case PID_LIST_LOCK: return "PID_LIST";
413 case EMAIL_ALERTS_LOCK: return "EMAIL_ALERTS";
414 case PIPES_LOCK: return "PIPES";
415 case TLSKEYS_REF_LOCK: return "TLSKEYS_REF";
416 case AUTH_LOCK: return "AUTH";
417 case LOGSRV_LOCK: return "LOGSRV";
418 case DICT_LOCK: return "DICT";
419 case PROTO_LOCK: return "PROTO";
420 case QUEUE_LOCK: return "QUEUE";
421 case CKCH_LOCK: return "CKCH";
422 case SNI_LOCK: return "SNI";
423 case SSL_SERVER_LOCK: return "SSL_SERVER";
424 case SFT_LOCK: return "SFT";
425 case IDLE_CONNS_LOCK: return "IDLE_CONNS";
426 case QUIC_LOCK: return "QUIC";
427 case OTHER_LOCK: return "OTHER";
428 case DEBUG1_LOCK: return "DEBUG1";
429 case DEBUG2_LOCK: return "DEBUG2";
430 case DEBUG3_LOCK: return "DEBUG3";
431 case DEBUG4_LOCK: return "DEBUG4";
432 case DEBUG5_LOCK: return "DEBUG5";
433 case LOCK_LABELS: break; /* keep compiler happy */
434 };
435 /* only way to come here is consecutive to an internal bug */
436 abort();
437}
438
439void show_lock_stats()
440{
441 int lbl;
442
443 for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
444 if (!lock_stats[lbl].num_write_locked &&
445 !lock_stats[lbl].num_seek_locked &&
446 !lock_stats[lbl].num_read_locked) {
447 fprintf(stderr,
448 "Stats about Lock %s: not used\n",
449 lock_label(lbl));
450 continue;
451 }
452
453 fprintf(stderr,
454 "Stats about Lock %s: \n",
455 lock_label(lbl));
456
457 if (lock_stats[lbl].num_write_locked)
458 fprintf(stderr,
459 "\t # write lock : %lu\n"
460 "\t # write unlock: %lu (%ld)\n"
461 "\t # wait time for write : %.3f msec\n"
462 "\t # wait time for write/lock: %.3f nsec\n",
463 lock_stats[lbl].num_write_locked,
464 lock_stats[lbl].num_write_unlocked,
465 lock_stats[lbl].num_write_unlocked - lock_stats[lbl].num_write_locked,
466 (double)lock_stats[lbl].nsec_wait_for_write / 1000000.0,
467 lock_stats[lbl].num_write_locked ? ((double)lock_stats[lbl].nsec_wait_for_write / (double)lock_stats[lbl].num_write_locked) : 0);
468
469 if (lock_stats[lbl].num_seek_locked)
470 fprintf(stderr,
471 "\t # seek lock : %lu\n"
472 "\t # seek unlock : %lu (%ld)\n"
473 "\t # wait time for seek : %.3f msec\n"
474 "\t # wait time for seek/lock : %.3f nsec\n",
475 lock_stats[lbl].num_seek_locked,
476 lock_stats[lbl].num_seek_unlocked,
477 lock_stats[lbl].num_seek_unlocked - lock_stats[lbl].num_seek_locked,
478 (double)lock_stats[lbl].nsec_wait_for_seek / 1000000.0,
479 lock_stats[lbl].num_seek_locked ? ((double)lock_stats[lbl].nsec_wait_for_seek / (double)lock_stats[lbl].num_seek_locked) : 0);
480
481 if (lock_stats[lbl].num_read_locked)
482 fprintf(stderr,
483 "\t # read lock : %lu\n"
484 "\t # read unlock : %lu (%ld)\n"
485 "\t # wait time for read : %.3f msec\n"
486 "\t # wait time for read/lock : %.3f nsec\n",
487 lock_stats[lbl].num_read_locked,
488 lock_stats[lbl].num_read_unlocked,
489 lock_stats[lbl].num_read_unlocked - lock_stats[lbl].num_read_locked,
490 (double)lock_stats[lbl].nsec_wait_for_read / 1000000.0,
491 lock_stats[lbl].num_read_locked ? ((double)lock_stats[lbl].nsec_wait_for_read / (double)lock_stats[lbl].num_read_locked) : 0);
492 }
493}
494
Willy Tarreau407ef892021-10-05 18:39:27 +0200495void __ha_rwlock_init(struct ha_rwlock *l)
496{
497 memset(l, 0, sizeof(struct ha_rwlock));
498 __RWLOCK_INIT(&l->lock);
499}
500
501void __ha_rwlock_destroy(struct ha_rwlock *l)
502{
503 __RWLOCK_DESTROY(&l->lock);
504 memset(l, 0, sizeof(struct ha_rwlock));
505}
506
507
508void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
509 const char *func, const char *file, int line)
510{
511 uint64_t start_time;
512
513 if ((l->info.cur_readers | l->info.cur_seeker | l->info.cur_writer) & tid_bit)
514 abort();
515
516 HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
517
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200518 start_time = now_mono_time();
Willy Tarreau407ef892021-10-05 18:39:27 +0200519 __RWLOCK_WRLOCK(&l->lock);
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200520 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
Willy Tarreau407ef892021-10-05 18:39:27 +0200521
522 HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
523
524 l->info.cur_writer = tid_bit;
525 l->info.last_location.function = func;
526 l->info.last_location.file = file;
527 l->info.last_location.line = line;
528
529 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
530}
531
532int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l,
533 const char *func, const char *file, int line)
534{
535 uint64_t start_time;
536 int r;
537
538 if ((l->info.cur_readers | l->info.cur_seeker | l->info.cur_writer) & tid_bit)
539 abort();
540
541 /* We set waiting writer because trywrlock could wait for readers to quit */
542 HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
543
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200544 start_time = now_mono_time();
Willy Tarreau407ef892021-10-05 18:39:27 +0200545 r = __RWLOCK_TRYWRLOCK(&l->lock);
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200546 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
Willy Tarreau407ef892021-10-05 18:39:27 +0200547 if (unlikely(r)) {
548 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
549 return r;
550 }
551 HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
552
553 l->info.cur_writer = tid_bit;
554 l->info.last_location.function = func;
555 l->info.last_location.file = file;
556 l->info.last_location.line = line;
557
558 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
559
560 return 0;
561}
562
563void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l,
564 const char *func, const char *file, int line)
565{
566 if (unlikely(!(l->info.cur_writer & tid_bit))) {
567 /* the thread is not owning the lock for write */
568 abort();
569 }
570
571 l->info.cur_writer = 0;
572 l->info.last_location.function = func;
573 l->info.last_location.file = file;
574 l->info.last_location.line = line;
575
576 __RWLOCK_WRUNLOCK(&l->lock);
577
578 HA_ATOMIC_INC(&lock_stats[lbl].num_write_unlocked);
579}
580
581void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l)
582{
583 uint64_t start_time;
584
585 if ((l->info.cur_readers | l->info.cur_seeker | l->info.cur_writer) & tid_bit)
586 abort();
587
588 HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
589
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200590 start_time = now_mono_time();
Willy Tarreau407ef892021-10-05 18:39:27 +0200591 __RWLOCK_RDLOCK(&l->lock);
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200592 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
Willy Tarreau407ef892021-10-05 18:39:27 +0200593 HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
594
595 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
596
597 HA_ATOMIC_AND(&l->info.wait_readers, ~tid_bit);
598}
599
600int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l)
601{
602 int r;
603
604 if ((l->info.cur_readers | l->info.cur_seeker | l->info.cur_writer) & tid_bit)
605 abort();
606
607 /* try read should never wait */
608 r = __RWLOCK_TRYRDLOCK(&l->lock);
609 if (unlikely(r))
610 return r;
611 HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
612
613 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
614
615 return 0;
616}
617
618void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l)
619{
620 if (unlikely(!(l->info.cur_readers & tid_bit))) {
621 /* the thread is not owning the lock for read */
622 abort();
623 }
624
625 HA_ATOMIC_AND(&l->info.cur_readers, ~tid_bit);
626
627 __RWLOCK_RDUNLOCK(&l->lock);
628
629 HA_ATOMIC_INC(&lock_stats[lbl].num_read_unlocked);
630}
631
632void __ha_rwlock_wrtord(enum lock_label lbl, struct ha_rwlock *l,
633 const char *func, const char *file, int line)
634{
635 uint64_t start_time;
636
637 if ((l->info.cur_readers | l->info.cur_seeker) & tid_bit)
638 abort();
639
640 if (!(l->info.cur_writer & tid_bit))
641 abort();
642
643 HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
644
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200645 start_time = now_mono_time();
Willy Tarreau407ef892021-10-05 18:39:27 +0200646 __RWLOCK_WRTORD(&l->lock);
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200647 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
Willy Tarreau407ef892021-10-05 18:39:27 +0200648
649 HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
650
651 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
652 HA_ATOMIC_AND(&l->info.cur_writer, ~tid_bit);
653 l->info.last_location.function = func;
654 l->info.last_location.file = file;
655 l->info.last_location.line = line;
656
657 HA_ATOMIC_AND(&l->info.wait_readers, ~tid_bit);
658}
659
660void __ha_rwlock_wrtosk(enum lock_label lbl, struct ha_rwlock *l,
661 const char *func, const char *file, int line)
662{
663 uint64_t start_time;
664
665 if ((l->info.cur_readers | l->info.cur_seeker) & tid_bit)
666 abort();
667
668 if (!(l->info.cur_writer & tid_bit))
669 abort();
670
671 HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
672
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200673 start_time = now_mono_time();
Willy Tarreau407ef892021-10-05 18:39:27 +0200674 __RWLOCK_WRTOSK(&l->lock);
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200675 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
Willy Tarreau407ef892021-10-05 18:39:27 +0200676
677 HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
678
679 HA_ATOMIC_OR(&l->info.cur_seeker, tid_bit);
680 HA_ATOMIC_AND(&l->info.cur_writer, ~tid_bit);
681 l->info.last_location.function = func;
682 l->info.last_location.file = file;
683 l->info.last_location.line = line;
684
685 HA_ATOMIC_AND(&l->info.wait_seekers, ~tid_bit);
686}
687
688void __ha_rwlock_sklock(enum lock_label lbl, struct ha_rwlock *l,
689 const char *func, const char *file, int line)
690{
691 uint64_t start_time;
692
693 if ((l->info.cur_readers | l->info.cur_seeker | l->info.cur_writer) & tid_bit)
694 abort();
695
696 HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
697
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200698 start_time = now_mono_time();
Willy Tarreau407ef892021-10-05 18:39:27 +0200699 __RWLOCK_SKLOCK(&l->lock);
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200700 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
Willy Tarreau407ef892021-10-05 18:39:27 +0200701
702 HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
703
704 HA_ATOMIC_OR(&l->info.cur_seeker, tid_bit);
705 l->info.last_location.function = func;
706 l->info.last_location.file = file;
707 l->info.last_location.line = line;
708
709 HA_ATOMIC_AND(&l->info.wait_seekers, ~tid_bit);
710}
711
712void __ha_rwlock_sktowr(enum lock_label lbl, struct ha_rwlock *l,
713 const char *func, const char *file, int line)
714{
715 uint64_t start_time;
716
717 if ((l->info.cur_readers | l->info.cur_writer) & tid_bit)
718 abort();
719
720 if (!(l->info.cur_seeker & tid_bit))
721 abort();
722
723 HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
724
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200725 start_time = now_mono_time();
Willy Tarreau407ef892021-10-05 18:39:27 +0200726 __RWLOCK_SKTOWR(&l->lock);
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200727 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
Willy Tarreau407ef892021-10-05 18:39:27 +0200728
729 HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
730
731 HA_ATOMIC_OR(&l->info.cur_writer, tid_bit);
732 HA_ATOMIC_AND(&l->info.cur_seeker, ~tid_bit);
733 l->info.last_location.function = func;
734 l->info.last_location.file = file;
735 l->info.last_location.line = line;
736
737 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
738}
739
740void __ha_rwlock_sktord(enum lock_label lbl, struct ha_rwlock *l,
741 const char *func, const char *file, int line)
742{
743 uint64_t start_time;
744
745 if ((l->info.cur_readers | l->info.cur_writer) & tid_bit)
746 abort();
747
748 if (!(l->info.cur_seeker & tid_bit))
749 abort();
750
751 HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
752
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200753 start_time = now_mono_time();
Willy Tarreau407ef892021-10-05 18:39:27 +0200754 __RWLOCK_SKTORD(&l->lock);
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200755 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
Willy Tarreau407ef892021-10-05 18:39:27 +0200756
757 HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
758
759 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
760 HA_ATOMIC_AND(&l->info.cur_seeker, ~tid_bit);
761 l->info.last_location.function = func;
762 l->info.last_location.file = file;
763 l->info.last_location.line = line;
764
765 HA_ATOMIC_AND(&l->info.wait_readers, ~tid_bit);
766}
767
768void __ha_rwlock_skunlock(enum lock_label lbl,struct ha_rwlock *l,
769 const char *func, const char *file, int line)
770{
771 if (!(l->info.cur_seeker & tid_bit))
772 abort();
773
774 HA_ATOMIC_AND(&l->info.cur_seeker, ~tid_bit);
775 l->info.last_location.function = func;
776 l->info.last_location.file = file;
777 l->info.last_location.line = line;
778
779 __RWLOCK_SKUNLOCK(&l->lock);
780
781 HA_ATOMIC_INC(&lock_stats[lbl].num_seek_unlocked);
782}
783
784int __ha_rwlock_trysklock(enum lock_label lbl, struct ha_rwlock *l,
785 const char *func, const char *file, int line)
786{
787 uint64_t start_time;
788 int r;
789
790 if ((l->info.cur_readers | l->info.cur_seeker | l->info.cur_writer) & tid_bit)
791 abort();
792
793 HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
794
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200795 start_time = now_mono_time();
Willy Tarreau407ef892021-10-05 18:39:27 +0200796 r = __RWLOCK_TRYSKLOCK(&l->lock);
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200797 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
Willy Tarreau407ef892021-10-05 18:39:27 +0200798
799 if (likely(!r)) {
800 /* got the lock ! */
801 HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
802 HA_ATOMIC_OR(&l->info.cur_seeker, tid_bit);
803 l->info.last_location.function = func;
804 l->info.last_location.file = file;
805 l->info.last_location.line = line;
806 }
807
808 HA_ATOMIC_AND(&l->info.wait_seekers, ~tid_bit);
809 return r;
810}
811
812int __ha_rwlock_tryrdtosk(enum lock_label lbl, struct ha_rwlock *l,
813 const char *func, const char *file, int line)
814{
815 uint64_t start_time;
816 int r;
817
818 if ((l->info.cur_writer | l->info.cur_seeker) & tid_bit)
819 abort();
820
821 if (!(l->info.cur_readers & tid_bit))
822 abort();
823
824 HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
825
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200826 start_time = now_mono_time();
Willy Tarreau407ef892021-10-05 18:39:27 +0200827 r = __RWLOCK_TRYRDTOSK(&l->lock);
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200828 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
Willy Tarreau407ef892021-10-05 18:39:27 +0200829
830 if (likely(!r)) {
831 /* got the lock ! */
832 HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
833 HA_ATOMIC_OR(&l->info.cur_seeker, tid_bit);
834 HA_ATOMIC_AND(&l->info.cur_readers, ~tid_bit);
835 l->info.last_location.function = func;
836 l->info.last_location.file = file;
837 l->info.last_location.line = line;
838 }
839
840 HA_ATOMIC_AND(&l->info.wait_seekers, ~tid_bit);
841 return r;
842}
843
844void __spin_init(struct ha_spinlock *l)
845{
846 memset(l, 0, sizeof(struct ha_spinlock));
847 __SPIN_INIT(&l->lock);
848}
849
850void __spin_destroy(struct ha_spinlock *l)
851{
852 __SPIN_DESTROY(&l->lock);
853 memset(l, 0, sizeof(struct ha_spinlock));
854}
855
856void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
857 const char *func, const char *file, int line)
858{
859 uint64_t start_time;
860
861 if (unlikely(l->info.owner & tid_bit)) {
862 /* the thread is already owning the lock */
863 abort();
864 }
865
866 HA_ATOMIC_OR(&l->info.waiters, tid_bit);
867
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200868 start_time = now_mono_time();
Willy Tarreau407ef892021-10-05 18:39:27 +0200869 __SPIN_LOCK(&l->lock);
Willy Tarreaudced3eb2021-10-05 18:48:23 +0200870 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
Willy Tarreau407ef892021-10-05 18:39:27 +0200871
872 HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
873
874
875 l->info.owner = tid_bit;
876 l->info.last_location.function = func;
877 l->info.last_location.file = file;
878 l->info.last_location.line = line;
879
880 HA_ATOMIC_AND(&l->info.waiters, ~tid_bit);
881}
882
883int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l,
884 const char *func, const char *file, int line)
885{
886 int r;
887
888 if (unlikely(l->info.owner & tid_bit)) {
889 /* the thread is already owning the lock */
890 abort();
891 }
892
893 /* try read should never wait */
894 r = __SPIN_TRYLOCK(&l->lock);
895 if (unlikely(r))
896 return r;
897 HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
898
899 l->info.owner = tid_bit;
900 l->info.last_location.function = func;
901 l->info.last_location.file = file;
902 l->info.last_location.line = line;
903
904 return 0;
905}
906
907void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
908 const char *func, const char *file, int line)
909{
910 if (unlikely(!(l->info.owner & tid_bit))) {
911 /* the thread is not owning the lock */
912 abort();
913 }
914
915 l->info.owner = 0;
916 l->info.last_location.function = func;
917 l->info.last_location.file = file;
918 l->info.last_location.line = line;
919
920 __SPIN_UNLOCK(&l->lock);
921 HA_ATOMIC_INC(&lock_stats[lbl].num_write_unlocked);
922}
923
924#endif // defined(DEBUG_THREAD) || defined(DEBUG_FULL)
925
Willy Tarreauf734ebf2020-09-09 17:07:54 +0200926/* Depending on the platform and how libpthread was built, pthread_exit() may
927 * involve some code in libgcc_s that would be loaded on exit for the first
928 * time, causing aborts if the process is chrooted. It's harmless bit very
929 * dirty. There isn't much we can do to make sure libgcc_s is loaded only if
930 * needed, so what we do here is that during early boot we create a dummy
931 * thread that immediately exits. This will lead to libgcc_s being loaded
932 * during boot on the platforms where it's required.
933 */
934static void *dummy_thread_function(void *data)
935{
936 pthread_exit(NULL);
937 return NULL;
938}
939
940static inline void preload_libgcc_s(void)
941{
942 pthread_t dummy_thread;
943 pthread_create(&dummy_thread, NULL, dummy_thread_function, NULL);
944 pthread_join(dummy_thread, NULL);
945}
946
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200947__attribute__((constructor))
Willy Tarreau3f567e42020-05-28 15:29:19 +0200948static void __thread_init(void)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200949{
Willy Tarreauf5809cd2019-01-26 13:35:03 +0100950 char *ptr = NULL;
951
952 if (MAX_THREADS < 1 || MAX_THREADS > LONGBITS) {
953 ha_alert("MAX_THREADS value must be between 1 and %d inclusive; "
954 "HAProxy was built with value %d, please fix it and rebuild.\n",
955 LONGBITS, MAX_THREADS);
956 exit(1);
957 }
Willy Tarreau149ab772019-01-26 14:27:06 +0100958
Willy Tarreauf734ebf2020-09-09 17:07:54 +0200959 preload_libgcc_s();
Willy Tarreau77b98222020-09-02 08:04:35 +0200960
Willy Tarreau149ab772019-01-26 14:27:06 +0100961 thread_cpus_enabled_at_boot = thread_cpus_enabled();
962
963 memprintf(&ptr, "Built with multi-threading support (MAX_THREADS=%d, default=%d).",
964 MAX_THREADS, thread_cpus_enabled_at_boot);
Willy Tarreauf5809cd2019-01-26 13:35:03 +0100965 hap_register_build_opts(ptr, 1);
966
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200967#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
968 memset(lock_stats, 0, sizeof(lock_stats));
969#endif
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200970}
971
Willy Tarreau8459f252018-12-15 16:48:14 +0100972#else
973
Willy Tarreauaa992762021-10-06 23:33:20 +0200974/* send signal <sig> to thread <thr> (send to process in fact) */
975void ha_tkill(unsigned int thr, int sig)
976{
977 raise(sig);
978}
979
980/* send signal <sig> to all threads (send to process in fact) */
981void ha_tkillall(int sig)
982{
983 raise(sig);
984}
985
986void ha_thread_relax(void)
987{
988#ifdef _POSIX_PRIORITY_SCHEDULING
989 sched_yield();
990#endif
991}
992
Willy Tarreau8459f252018-12-15 16:48:14 +0100993REGISTER_BUILD_OPTS("Built without multi-threading support (USE_THREAD not set).");
994
Willy Tarreau0ccd3222018-07-30 10:34:35 +0200995#endif // USE_THREAD
996
997
Willy Tarreau51ec03a2021-09-22 11:55:22 +0200998/* Parse the "nbthread" global directive, which takes an integer argument that
999 * contains the desired number of threads.
Willy Tarreau0ccd3222018-07-30 10:34:35 +02001000 */
Willy Tarreau51ec03a2021-09-22 11:55:22 +02001001static int cfg_parse_nbthread(char **args, int section_type, struct proxy *curpx,
1002 const struct proxy *defpx, const char *file, int line,
1003 char **err)
Willy Tarreau0ccd3222018-07-30 10:34:35 +02001004{
1005 long nbthread;
1006 char *errptr;
1007
Willy Tarreau51ec03a2021-09-22 11:55:22 +02001008 if (too_many_args(1, args, err, NULL))
1009 return -1;
1010
1011 nbthread = strtol(args[1], &errptr, 10);
1012 if (!*args[1] || *errptr) {
1013 memprintf(err, "'%s' passed a missing or unparsable integer value in '%s'", args[0], args[1]);
1014 return -1;
Willy Tarreau0ccd3222018-07-30 10:34:35 +02001015 }
1016
1017#ifndef USE_THREAD
1018 if (nbthread != 1) {
Willy Tarreau51ec03a2021-09-22 11:55:22 +02001019 memprintf(err, "'%s' specified with a value other than 1 while HAProxy is not compiled with threads support. Please check build options for USE_THREAD", args[0]);
1020 return -1;
Willy Tarreau0ccd3222018-07-30 10:34:35 +02001021 }
1022#else
1023 if (nbthread < 1 || nbthread > MAX_THREADS) {
Willy Tarreau51ec03a2021-09-22 11:55:22 +02001024 memprintf(err, "'%s' value must be between 1 and %d (was %ld)", args[0], MAX_THREADS, nbthread);
1025 return -1;
Willy Tarreau0ccd3222018-07-30 10:34:35 +02001026 }
1027
Willy Tarreaufc647362019-02-02 17:05:03 +01001028 all_threads_mask = nbits(nbthread);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001029#endif
Willy Tarreau51ec03a2021-09-22 11:55:22 +02001030
1031 HA_DIAG_WARNING_COND(global.nbthread,
1032 "parsing [%s:%d] : nbthread is already defined and will be overridden.\n",
1033 file, line);
1034
1035 global.nbthread = nbthread;
1036 return 0;
Willy Tarreau0ccd3222018-07-30 10:34:35 +02001037}
Willy Tarreau51ec03a2021-09-22 11:55:22 +02001038
1039/* config keyword parsers */
1040static struct cfg_kw_list cfg_kws = {ILH, {
1041 { CFG_GLOBAL, "nbthread", cfg_parse_nbthread, 0 },
1042 { 0, NULL, NULL }
1043}};
1044
1045INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);