blob: 63209cecc085af3c876ae97edcedb8f5c130fcd3 [file] [log] [blame]
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001/*
2 * functions about threads.
3 *
4 * Copyright (C) 2017 Christopher Fauet - cfaulet@haproxy.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreau149ab772019-01-26 14:27:06 +010013#define _GNU_SOURCE
Christopher Faulet339fff82017-10-19 11:59:15 +020014#include <unistd.h>
Willy Tarreau0ccd3222018-07-30 10:34:35 +020015#include <stdlib.h>
Christopher Faulet339fff82017-10-19 11:59:15 +020016#include <fcntl.h>
17
Willy Tarreau149ab772019-01-26 14:27:06 +010018#ifdef USE_CPU_AFFINITY
19#include <sched.h>
20#endif
21
Olivier Houchard46453d32019-04-11 00:06:47 +020022#ifdef __FreeBSD__
23#include <sys/cpuset.h>
24#endif
25
Willy Tarreau04931492017-11-03 23:39:25 +010026#include <common/cfgparse.h>
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020027#include <common/hathreads.h>
Christopher Faulet339fff82017-10-19 11:59:15 +020028#include <common/standard.h>
Willy Tarreau80713382018-11-26 10:19:54 +010029#include <types/global.h>
Christopher Faulet339fff82017-10-19 11:59:15 +020030#include <proto/fd.h>
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020031
Willy Tarreau05ed14c2019-05-22 06:42:27 +020032struct thread_info thread_info[MAX_THREADS] = { };
Willy Tarreau8323a372019-05-20 18:57:53 +020033THREAD_LOCAL struct thread_info *ti = &thread_info[0];
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020034
35#ifdef USE_THREAD
36
Willy Tarreau60b639c2018-08-02 10:16:17 +020037volatile unsigned long threads_want_rdv_mask = 0;
38volatile unsigned long threads_harmless_mask = 0;
Willy Tarreau9a1f5732019-06-09 12:20:02 +020039volatile unsigned long threads_sync_mask = 0;
Willy Tarreau0ccd3222018-07-30 10:34:35 +020040volatile unsigned long all_threads_mask = 1; // nbthread 1 assumed by default
Willy Tarreau0c026f42018-08-01 19:12:20 +020041THREAD_LOCAL unsigned int tid = 0;
42THREAD_LOCAL unsigned long tid_bit = (1UL << 0);
Willy Tarreau149ab772019-01-26 14:27:06 +010043int thread_cpus_enabled_at_boot = 1;
Willy Tarreau0c026f42018-08-01 19:12:20 +020044
Christopher Faulet339fff82017-10-19 11:59:15 +020045
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020046#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
47struct lock_stat lock_stats[LOCK_LABELS];
48#endif
49
Willy Tarreau60b639c2018-08-02 10:16:17 +020050/* Marks the thread as harmless until the last thread using the rendez-vous
Christopher Fauletf03cd272021-03-25 14:11:36 +010051 * point quits, excluding the current one. Thus an isolated thread may be safely
52 * marked as harmless. Given that we can wait for a long time, sched_yield() is
53 * used when available to offer the CPU resources to competing threads if
54 * needed.
Willy Tarreau60b639c2018-08-02 10:16:17 +020055 */
56void thread_harmless_till_end()
57{
Olivier Houchardb23a61f2019-03-08 18:51:17 +010058 _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
Christopher Fauletf03cd272021-03-25 14:11:36 +010059 while (threads_want_rdv_mask & ~tid_bit) {
Willy Tarreau38171da2019-05-17 16:33:13 +020060 ha_thread_relax();
Willy Tarreau60b639c2018-08-02 10:16:17 +020061 }
62}
63
64/* Isolates the current thread : request the ability to work while all other
65 * threads are harmless. Only returns once all of them are harmless, with the
66 * current thread's bit in threads_harmless_mask cleared. Needs to be completed
67 * using thread_release().
68 */
69void thread_isolate()
70{
71 unsigned long old;
72
Olivier Houchardb23a61f2019-03-08 18:51:17 +010073 _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
74 __ha_barrier_atomic_store();
75 _HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
Willy Tarreau60b639c2018-08-02 10:16:17 +020076
77 /* wait for all threads to become harmless */
78 old = threads_harmless_mask;
79 while (1) {
80 if (unlikely((old & all_threads_mask) != all_threads_mask))
81 old = threads_harmless_mask;
Olivier Houchardb23a61f2019-03-08 18:51:17 +010082 else if (_HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
Willy Tarreau60b639c2018-08-02 10:16:17 +020083 break;
84
Willy Tarreau38171da2019-05-17 16:33:13 +020085 ha_thread_relax();
Willy Tarreau60b639c2018-08-02 10:16:17 +020086 }
87 /* one thread gets released at a time here, with its harmess bit off.
88 * The loss of this bit makes the other one continue to spin while the
89 * thread is working alone.
90 */
91}
92
93/* Cancels the effect of thread_isolate() by releasing the current thread's bit
94 * in threads_want_rdv_mask and by marking this thread as harmless until the
95 * last worker finishes.
96 */
97void thread_release()
98{
Olivier Houchardb23a61f2019-03-08 18:51:17 +010099 _HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
Willy Tarreau31cba0d2019-06-09 08:44:19 +0200100 while (threads_want_rdv_mask & all_threads_mask) {
101 _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
102 while (threads_want_rdv_mask & all_threads_mask)
103 ha_thread_relax();
104 HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
105 }
Willy Tarreau60b639c2018-08-02 10:16:17 +0200106}
Christopher Faulet339fff82017-10-19 11:59:15 +0200107
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200108/* Cancels the effect of thread_isolate() by releasing the current thread's bit
109 * in threads_want_rdv_mask and by marking this thread as harmless until the
110 * last worker finishes. The difference with thread_release() is that this one
111 * will not leave the function before others are notified to do the same, so it
112 * guarantees that the current thread will not pass through a subsequent call
113 * to thread_isolate() before others finish.
114 */
115void thread_sync_release()
116{
117 _HA_ATOMIC_OR(&threads_sync_mask, tid_bit);
118 __ha_barrier_atomic_store();
119 _HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
120
121 while (threads_want_rdv_mask & all_threads_mask) {
122 _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
123 while (threads_want_rdv_mask & all_threads_mask)
124 ha_thread_relax();
125 HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
126 }
127
128 /* the current thread is not harmless anymore, thread_isolate()
129 * is forced to wait till all waiters finish.
130 */
131 _HA_ATOMIC_AND(&threads_sync_mask, ~tid_bit);
132 while (threads_sync_mask & all_threads_mask)
133 ha_thread_relax();
134}
135
Willy Tarreau2beaaf72019-05-22 08:43:34 +0200136/* send signal <sig> to thread <thr> */
137void ha_tkill(unsigned int thr, int sig)
138{
139 pthread_kill(thread_info[thr].pthread, sig);
140}
141
142/* send signal <sig> to all threads. The calling thread is signaled last in
143 * order to allow all threads to synchronize in the handler.
144 */
145void ha_tkillall(int sig)
146{
147 unsigned int thr;
148
149 for (thr = 0; thr < global.nbthread; thr++) {
150 if (!(all_threads_mask & (1UL << thr)))
151 continue;
152 if (thr == tid)
153 continue;
154 pthread_kill(thread_info[thr].pthread, sig);
155 }
156 raise(sig);
157}
158
Willy Tarreaua8ae77d2018-11-25 19:28:23 +0100159/* these calls are used as callbacks at init time */
160void ha_spin_init(HA_SPINLOCK_T *l)
161{
162 HA_SPIN_INIT(l);
163}
164
165/* these calls are used as callbacks at init time */
166void ha_rwlock_init(HA_RWLOCK_T *l)
167{
168 HA_RWLOCK_INIT(l);
169}
170
Willy Tarreau149ab772019-01-26 14:27:06 +0100171/* returns the number of CPUs the current process is enabled to run on */
172static int thread_cpus_enabled()
173{
174 int ret = 1;
175
176#ifdef USE_CPU_AFFINITY
177#if defined(__linux__) && defined(CPU_COUNT)
178 cpu_set_t mask;
179
180 if (sched_getaffinity(0, sizeof(mask), &mask) == 0)
181 ret = CPU_COUNT(&mask);
Olivier Houchard46453d32019-04-11 00:06:47 +0200182#elif defined(__FreeBSD__) && defined(USE_CPU_AFFINITY)
183 cpuset_t cpuset;
184 if (cpuset_getaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1,
185 sizeof(cpuset), &cpuset) == 0)
186 ret = CPU_COUNT(&cpuset);
Willy Tarreau149ab772019-01-26 14:27:06 +0100187#endif
188#endif
189 ret = MAX(ret, 1);
190 ret = MIN(ret, MAX_THREADS);
191 return ret;
192}
193
Willy Tarreau3a3b7752020-09-09 17:07:54 +0200194/* Depending on the platform and how libpthread was built, pthread_exit() may
195 * involve some code in libgcc_s that would be loaded on exit for the first
196 * time, causing aborts if the process is chrooted. It's harmless bit very
197 * dirty. There isn't much we can do to make sure libgcc_s is loaded only if
198 * needed, so what we do here is that during early boot we create a dummy
199 * thread that immediately exits. This will lead to libgcc_s being loaded
200 * during boot on the platforms where it's required.
201 */
202static void *dummy_thread_function(void *data)
203{
204 pthread_exit(NULL);
205 return NULL;
206}
207
208static inline void preload_libgcc_s(void)
209{
210 pthread_t dummy_thread;
211 pthread_create(&dummy_thread, NULL, dummy_thread_function, NULL);
212 pthread_join(dummy_thread, NULL);
213}
214
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200215__attribute__((constructor))
216static void __hathreads_init(void)
217{
Willy Tarreauf5809cd2019-01-26 13:35:03 +0100218 char *ptr = NULL;
219
220 if (MAX_THREADS < 1 || MAX_THREADS > LONGBITS) {
221 ha_alert("MAX_THREADS value must be between 1 and %d inclusive; "
222 "HAProxy was built with value %d, please fix it and rebuild.\n",
223 LONGBITS, MAX_THREADS);
224 exit(1);
225 }
Willy Tarreau149ab772019-01-26 14:27:06 +0100226
Willy Tarreau3a3b7752020-09-09 17:07:54 +0200227 preload_libgcc_s();
Willy Tarreaue9fdcf82020-09-02 08:04:35 +0200228
Willy Tarreau149ab772019-01-26 14:27:06 +0100229 thread_cpus_enabled_at_boot = thread_cpus_enabled();
230
231 memprintf(&ptr, "Built with multi-threading support (MAX_THREADS=%d, default=%d).",
232 MAX_THREADS, thread_cpus_enabled_at_boot);
Willy Tarreauf5809cd2019-01-26 13:35:03 +0100233 hap_register_build_opts(ptr, 1);
234
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200235#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
236 memset(lock_stats, 0, sizeof(lock_stats));
237#endif
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200238}
239
Willy Tarreau8459f252018-12-15 16:48:14 +0100240#else
241
242REGISTER_BUILD_OPTS("Built without multi-threading support (USE_THREAD not set).");
243
Willy Tarreau0ccd3222018-07-30 10:34:35 +0200244#endif // USE_THREAD
245
246
247/* Parse the number of threads in argument <arg>, returns it and adjusts a few
248 * internal variables accordingly, or fails and returns zero with an error
249 * reason in <errmsg>. May be called multiple times while parsing.
250 */
251int parse_nbthread(const char *arg, char **err)
252{
253 long nbthread;
254 char *errptr;
255
256 nbthread = strtol(arg, &errptr, 10);
257 if (!*arg || *errptr) {
258 memprintf(err, "passed a missing or unparsable integer value in '%s'", arg);
259 return 0;
260 }
261
262#ifndef USE_THREAD
263 if (nbthread != 1) {
264 memprintf(err, "specified with a value other than 1 while HAProxy is not compiled with threads support. Please check build options for USE_THREAD");
265 return 0;
266 }
267#else
268 if (nbthread < 1 || nbthread > MAX_THREADS) {
269 memprintf(err, "value must be between 1 and %d (was %ld)", MAX_THREADS, nbthread);
270 return 0;
271 }
272
Willy Tarreaufc647362019-02-02 17:05:03 +0100273 all_threads_mask = nbits(nbthread);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200274#endif
Willy Tarreau0ccd3222018-07-30 10:34:35 +0200275 return nbthread;
276}