blob: e7d6d2cba290e3582b98a3c6eda34c0b94d66207 [file] [log] [blame]
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02001/*
Willy Tarreau3f567e42020-05-28 15:29:19 +02002 * include/haproxy/thread.h
3 * definitions, macros and inline functions used by threads.
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02004 *
Willy Tarreau3f567e42020-05-28 15:29:19 +02005 * Copyright (C) 2017 Christopher Faulet - cfaulet@haproxy.com
6 * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
Christopher Faulet1a2b56e2017-10-12 16:09:09 +02007 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation, version 2.1
11 * exclusively.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
Willy Tarreau3f567e42020-05-28 15:29:19 +020023#ifndef _HAPROXY_THREAD_H
24#define _HAPROXY_THREAD_H
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020025
Willy Tarreau2beaaf72019-05-22 08:43:34 +020026#include <signal.h>
Willy Tarreau38171da2019-05-17 16:33:13 +020027#include <unistd.h>
28#ifdef _POSIX_PRIORITY_SCHEDULING
29#include <sched.h>
30#endif
Willy Tarreau3f567e42020-05-28 15:29:19 +020031
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020032#include <haproxy/api.h>
Willy Tarreau3f567e42020-05-28 15:29:19 +020033#include <haproxy/thread-t.h>
Willy Tarreaue4d15052020-06-29 09:57:23 +020034#include <haproxy/tinfo.h>
Willy Tarreau38171da2019-05-17 16:33:13 +020035
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020036
Willy Tarreau3f567e42020-05-28 15:29:19 +020037/* Note: this file mainly contains 5 sections:
38 * - a small common part, which also corresponds to the common API
39 * - one used solely when USE_THREAD is *not* set
40 * - one used solely when USE_THREAD is set
41 * - one used solely when USE_THREAD is set WITHOUT debugging
42 * - one used solely when USE_THREAD is set WITH debugging
43 *
Willy Tarreau0ccd3222018-07-30 10:34:35 +020044 */
45
Willy Tarreau3f567e42020-05-28 15:29:19 +020046
47/* Generic exports */
48int parse_nbthread(const char *arg, char **err);
49int thread_get_default_count();
50extern int thread_cpus_enabled_at_boot;
Willy Tarreaue6a02fa2019-05-22 07:06:44 +020051
52
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020053#ifndef USE_THREAD
54
Willy Tarreau3f567e42020-05-28 15:29:19 +020055/********************** THREADS DISABLED ************************/
Willy Tarreau0c026f42018-08-01 19:12:20 +020056
57/* Only way found to replace variables with constants that are optimized away
58 * at build time.
59 */
60enum { all_threads_mask = 1UL };
Willy Tarreau441259c2019-05-22 07:48:18 +020061enum { threads_harmless_mask = 0 };
Willy Tarreau9a1f5732019-06-09 12:20:02 +020062enum { threads_sync_mask = 0 };
Willy Tarreau3f567e42020-05-28 15:29:19 +020063enum { threads_want_rdv_mask = 0 };
Willy Tarreau0c026f42018-08-01 19:12:20 +020064enum { tid_bit = 1UL };
65enum { tid = 0 };
Willy Tarreau421f02e2018-01-20 18:19:22 +010066
Willy Tarreau3f567e42020-05-28 15:29:19 +020067#define HA_SPIN_INIT(l) do { /* do nothing */ } while(0)
68#define HA_SPIN_DESTROY(l) do { /* do nothing */ } while(0)
69#define HA_SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0)
70#define HA_SPIN_TRYLOCK(lbl, l) ({ 0; })
71#define HA_SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020072
Willy Tarreau3f567e42020-05-28 15:29:19 +020073#define HA_RWLOCK_INIT(l) do { /* do nothing */ } while(0)
74#define HA_RWLOCK_DESTROY(l) do { /* do nothing */ } while(0)
75#define HA_RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0)
Christopher Faulet2a944ee2017-11-07 10:42:54 +010076#define HA_RWLOCK_TRYWRLOCK(lbl, l) ({ 0; })
Willy Tarreau3f567e42020-05-28 15:29:19 +020077#define HA_RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
78#define HA_RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0)
Christopher Faulet2a944ee2017-11-07 10:42:54 +010079#define HA_RWLOCK_TRYRDLOCK(lbl, l) ({ 0; })
Willy Tarreau3f567e42020-05-28 15:29:19 +020080#define HA_RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +020081
Willy Tarreau61f799b2020-10-16 16:53:46 +020082#define HA_RWLOCK_SKLOCK(lbl,l) do { /* do nothing */ } while(0)
83#define HA_RWLOCK_SKTOWR(lbl,l) do { /* do nothing */ } while(0)
84#define HA_RWLOCK_WRTOSK(lbl,l) do { /* do nothing */ } while(0)
85#define HA_RWLOCK_SKTORD(lbl,l) do { /* do nothing */ } while(0)
86#define HA_RWLOCK_WRTORD(lbl,l) do { /* do nothing */ } while(0)
87#define HA_RWLOCK_SKUNLOCK(lbl,l) do { /* do nothing */ } while(0)
88#define HA_RWLOCK_TRYSKLOCK(lbl,l) ({ 0; })
89#define HA_RWLOCK_TRYRDTOSK(lbl,l) ({ 0; })
90
William Lallemand6e1796e2018-06-07 11:23:40 +020091#define ha_sigmask(how, set, oldset) sigprocmask(how, set, oldset)
92
Willy Tarreau0c026f42018-08-01 19:12:20 +020093static inline void ha_set_tid(unsigned int tid)
94{
David Carliera92c5ce2019-09-13 05:03:12 +010095 ti = &ha_thread_info[tid];
Willy Tarreau38171da2019-05-17 16:33:13 +020096}
97
Willy Tarreauf0e5da22020-05-01 12:26:03 +020098static inline unsigned long long ha_get_pthread_id(unsigned int thr)
Willy Tarreauff64d3b2020-05-01 11:28:49 +020099{
100 return 0;
101}
102
Willy Tarreau38171da2019-05-17 16:33:13 +0200103static inline void ha_thread_relax(void)
104{
105#if _POSIX_PRIORITY_SCHEDULING
106 sched_yield();
107#endif
Willy Tarreau0c026f42018-08-01 19:12:20 +0200108}
William Lallemand6e1796e2018-06-07 11:23:40 +0200109
Willy Tarreau2beaaf72019-05-22 08:43:34 +0200110/* send signal <sig> to thread <thr> */
111static inline void ha_tkill(unsigned int thr, int sig)
112{
113 raise(sig);
114}
115
116/* send signal <sig> to all threads */
117static inline void ha_tkillall(int sig)
118{
119 raise(sig);
120}
121
Willy Tarreau60b639c2018-08-02 10:16:17 +0200122static inline void thread_harmless_now()
123{
124}
125
126static inline void thread_harmless_end()
127{
128}
129
130static inline void thread_isolate()
131{
132}
133
134static inline void thread_release()
135{
136}
137
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200138static inline void thread_sync_release()
139{
140}
141
Willy Tarreau60b639c2018-08-02 10:16:17 +0200142static inline unsigned long thread_isolated()
143{
144 return 1;
145}
146
Willy Tarreau3f567e42020-05-28 15:29:19 +0200147#else /* !USE_THREAD */
148
149/********************** THREADS ENABLED ************************/
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200150
151#include <stdio.h>
152#include <stdlib.h>
153#include <string.h>
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200154#include <import/plock.h>
155
Willy Tarreau60b639c2018-08-02 10:16:17 +0200156void thread_harmless_till_end();
157void thread_isolate();
158void thread_release();
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200159void thread_sync_release();
Willy Tarreau2beaaf72019-05-22 08:43:34 +0200160void ha_tkill(unsigned int thr, int sig);
161void ha_tkillall(int sig);
Willy Tarreau3f567e42020-05-28 15:29:19 +0200162void ha_spin_init(HA_SPINLOCK_T *l);
163void ha_rwlock_init(HA_RWLOCK_T *l);
Willy Tarreau5a6e2242019-05-20 18:56:48 +0200164
Christopher Fauletddb6c162018-07-20 09:31:53 +0200165extern volatile unsigned long all_threads_mask;
Willy Tarreau60b639c2018-08-02 10:16:17 +0200166extern volatile unsigned long threads_harmless_mask;
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200167extern volatile unsigned long threads_sync_mask;
Willy Tarreau3f567e42020-05-28 15:29:19 +0200168extern volatile unsigned long threads_want_rdv_mask;
169extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
170extern THREAD_LOCAL unsigned int tid; /* The thread id */
Willy Tarreau60b639c2018-08-02 10:16:17 +0200171
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200172/* explanation for threads_want_rdv_mask, threads_harmless_mask, and
173 * threads_sync_mask :
Willy Tarreau60b639c2018-08-02 10:16:17 +0200174 * - threads_want_rdv_mask is a bit field indicating all threads that have
175 * requested a rendez-vous of other threads using thread_isolate().
176 * - threads_harmless_mask is a bit field indicating all threads that are
177 * currently harmless in that they promise not to access a shared resource.
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200178 * - threads_sync_mask is a bit field indicating that a thread waiting for
179 * others to finish wants to leave synchronized with others and as such
180 * promises to do so as well using thread_sync_release().
Willy Tarreau60b639c2018-08-02 10:16:17 +0200181 *
182 * For a given thread, its bits in want_rdv and harmless can be translated like
183 * this :
184 *
185 * ----------+----------+----------------------------------------------------
186 * want_rdv | harmless | description
187 * ----------+----------+----------------------------------------------------
188 * 0 | 0 | thread not interested in RDV, possibly harmful
189 * 0 | 1 | thread not interested in RDV but harmless
190 * 1 | 1 | thread interested in RDV and waiting for its turn
191 * 1 | 0 | thread currently working isolated from others
192 * ----------+----------+----------------------------------------------------
Willy Tarreau9a1f5732019-06-09 12:20:02 +0200193 *
194 * thread_sync_mask only delays the leaving of threads_sync_release() to make
195 * sure that each thread's harmless bit is cleared before leaving the function.
Willy Tarreau60b639c2018-08-02 10:16:17 +0200196 */
Olivier Houchard6b96f722018-04-25 16:58:25 +0200197
William Lallemand6e1796e2018-06-07 11:23:40 +0200198#define ha_sigmask(how, set, oldset) pthread_sigmask(how, set, oldset)
199
Willy Tarreau0c026f42018-08-01 19:12:20 +0200200/* sets the thread ID and the TID bit for the current thread */
201static inline void ha_set_tid(unsigned int data)
202{
203 tid = data;
204 tid_bit = (1UL << tid);
David Carliera92c5ce2019-09-13 05:03:12 +0100205 ti = &ha_thread_info[tid];
Willy Tarreau0c026f42018-08-01 19:12:20 +0200206}
207
Willy Tarreauff64d3b2020-05-01 11:28:49 +0200208/* Retrieves the opaque pthread_t of thread <thr> cast to an unsigned long long
209 * since POSIX took great care of not specifying its representation, making it
210 * hard to export for post-mortem analysis. For this reason we copy it into a
211 * union and will use the smallest scalar type at least as large as its size,
212 * which will keep endianness and alignment for all regular sizes. As a last
213 * resort we end up with a long long ligned to the first bytes in memory, which
214 * will be endian-dependent if pthread_t is larger than a long long (not seen
215 * yet).
216 */
217static inline unsigned long long ha_get_pthread_id(unsigned int thr)
218{
219 union {
220 pthread_t t;
221 unsigned long long ll;
222 unsigned int i;
223 unsigned short s;
224 unsigned char c;
225 } u;
226
227 memset(&u, 0, sizeof(u));
228 u.t = ha_thread_info[thr].pthread;
229
230 if (sizeof(u.t) <= sizeof(u.c))
231 return u.c;
232 else if (sizeof(u.t) <= sizeof(u.s))
233 return u.s;
234 else if (sizeof(u.t) <= sizeof(u.i))
235 return u.i;
236 return u.ll;
237}
238
Willy Tarreau38171da2019-05-17 16:33:13 +0200239static inline void ha_thread_relax(void)
240{
241#if _POSIX_PRIORITY_SCHEDULING
242 sched_yield();
243#else
244 pl_cpu_relax();
245#endif
246}
247
Willy Tarreau60b639c2018-08-02 10:16:17 +0200248/* Marks the thread as harmless. Note: this must be true, i.e. the thread must
249 * not be touching any unprotected shared resource during this period. Usually
250 * this is called before poll(), but it may also be placed around very slow
251 * calls (eg: some crypto operations). Needs to be terminated using
252 * thread_harmless_end().
253 */
254static inline void thread_harmless_now()
255{
256 HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
257}
258
259/* Ends the harmless period started by thread_harmless_now(). Usually this is
260 * placed after the poll() call. If it is discovered that a job was running and
261 * is relying on the thread still being harmless, the thread waits for the
262 * other one to finish.
263 */
264static inline void thread_harmless_end()
265{
266 while (1) {
267 HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
Christopher Faulet76b44192021-04-16 11:33:39 +0200268 if (likely((threads_want_rdv_mask & all_threads_mask & ~tid_bit) == 0))
Willy Tarreau60b639c2018-08-02 10:16:17 +0200269 break;
270 thread_harmless_till_end();
271 }
272}
273
274/* an isolated thread has harmless cleared and want_rdv set */
275static inline unsigned long thread_isolated()
276{
277 return threads_want_rdv_mask & ~threads_harmless_mask & tid_bit;
278}
279
Willy Tarreau3f567e42020-05-28 15:29:19 +0200280
281#if !defined(DEBUG_THREAD) && !defined(DEBUG_FULL)
282
283/* Thread debugging is DISABLED, these are the regular locking functions */
284
285#define HA_SPIN_INIT(l) ({ (*l) = 0; })
286#define HA_SPIN_DESTROY(l) ({ (*l) = 0; })
287#define HA_SPIN_LOCK(lbl, l) pl_take_s(l)
Willy Tarreaudb57a142020-06-12 11:42:25 +0200288#define HA_SPIN_TRYLOCK(lbl, l) (!pl_try_s(l))
Willy Tarreau3f567e42020-05-28 15:29:19 +0200289#define HA_SPIN_UNLOCK(lbl, l) pl_drop_s(l)
290
291#define HA_RWLOCK_INIT(l) ({ (*l) = 0; })
292#define HA_RWLOCK_DESTROY(l) ({ (*l) = 0; })
293#define HA_RWLOCK_WRLOCK(lbl,l) pl_take_w(l)
Willy Tarreaudb57a142020-06-12 11:42:25 +0200294#define HA_RWLOCK_TRYWRLOCK(lbl,l) (!pl_try_w(l))
Willy Tarreau3f567e42020-05-28 15:29:19 +0200295#define HA_RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l)
296#define HA_RWLOCK_RDLOCK(lbl,l) pl_take_r(l)
Willy Tarreaudb57a142020-06-12 11:42:25 +0200297#define HA_RWLOCK_TRYRDLOCK(lbl,l) (!pl_try_r(l))
Willy Tarreau3f567e42020-05-28 15:29:19 +0200298#define HA_RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l)
299
Willy Tarreau61f799b2020-10-16 16:53:46 +0200300/* rwlock upgrades via seek locks */
301#define HA_RWLOCK_SKLOCK(lbl,l) pl_take_s(l) /* N --> S */
302#define HA_RWLOCK_SKTOWR(lbl,l) pl_stow(l) /* S --> W */
303#define HA_RWLOCK_WRTOSK(lbl,l) pl_wtos(l) /* W --> S */
304#define HA_RWLOCK_SKTORD(lbl,l) pl_stor(l) /* S --> R */
305#define HA_RWLOCK_WRTORD(lbl,l) pl_wtor(l) /* W --> R */
306#define HA_RWLOCK_SKUNLOCK(lbl,l) pl_drop_s(l) /* S --> N */
307#define HA_RWLOCK_TRYSKLOCK(lbl,l) (!pl_try_s(l)) /* N -?> S */
308#define HA_RWLOCK_TRYRDTOSK(lbl,l) (!pl_try_rtos(l)) /* R -?> S */
309
Willy Tarreau3f567e42020-05-28 15:29:19 +0200310#else /* !defined(DEBUG_THREAD) && !defined(DEBUG_FULL) */
311
312/* Thread debugging is ENABLED, these are the instrumented functions */
313
314#define __SPIN_INIT(l) ({ (*l) = 0; })
315#define __SPIN_DESTROY(l) ({ (*l) = 0; })
316#define __SPIN_LOCK(l) pl_take_s(l)
Willy Tarreaudb57a142020-06-12 11:42:25 +0200317#define __SPIN_TRYLOCK(l) (!pl_try_s(l))
Willy Tarreau3f567e42020-05-28 15:29:19 +0200318#define __SPIN_UNLOCK(l) pl_drop_s(l)
William Lallemand6e1796e2018-06-07 11:23:40 +0200319
Willy Tarreau3f567e42020-05-28 15:29:19 +0200320#define __RWLOCK_INIT(l) ({ (*l) = 0; })
321#define __RWLOCK_DESTROY(l) ({ (*l) = 0; })
322#define __RWLOCK_WRLOCK(l) pl_take_w(l)
Willy Tarreaudb57a142020-06-12 11:42:25 +0200323#define __RWLOCK_TRYWRLOCK(l) (!pl_try_w(l))
Willy Tarreau3f567e42020-05-28 15:29:19 +0200324#define __RWLOCK_WRUNLOCK(l) pl_drop_w(l)
325#define __RWLOCK_RDLOCK(l) pl_take_r(l)
Willy Tarreaudb57a142020-06-12 11:42:25 +0200326#define __RWLOCK_TRYRDLOCK(l) (!pl_try_r(l))
Willy Tarreau3f567e42020-05-28 15:29:19 +0200327#define __RWLOCK_RDUNLOCK(l) pl_drop_r(l)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200328
Willy Tarreau61f799b2020-10-16 16:53:46 +0200329/* rwlock upgrades via seek locks */
330#define __RWLOCK_SKLOCK(l) pl_take_s(l) /* N --> S */
331#define __RWLOCK_SKTOWR(l) pl_stow(l) /* S --> W */
332#define __RWLOCK_WRTOSK(l) pl_wtos(l) /* W --> S */
333#define __RWLOCK_SKTORD(l) pl_stor(l) /* S --> R */
334#define __RWLOCK_WRTORD(l) pl_wtor(l) /* W --> R */
335#define __RWLOCK_SKUNLOCK(l) pl_drop_s(l) /* S --> N */
336#define __RWLOCK_TRYSKLOCK(l) (!pl_try_s(l)) /* N -?> S */
337#define __RWLOCK_TRYRDTOSK(l) (!pl_try_rtos(l)) /* R -?> S */
338
Willy Tarreau3f567e42020-05-28 15:29:19 +0200339#define HA_SPIN_INIT(l) __spin_init(l)
340#define HA_SPIN_DESTROY(l) __spin_destroy(l)
341
342#define HA_SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
343#define HA_SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
344#define HA_SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
345
346#define HA_RWLOCK_INIT(l) __ha_rwlock_init((l))
347#define HA_RWLOCK_DESTROY(l) __ha_rwlock_destroy((l))
348#define HA_RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
349#define HA_RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
350#define HA_RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
351#define HA_RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l)
352#define HA_RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
353#define HA_RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l)
354
Willy Tarreau61f799b2020-10-16 16:53:46 +0200355#define HA_RWLOCK_SKLOCK(lbl,l) __ha_rwlock_sklock(lbl, l, __func__, __FILE__, __LINE__)
356#define HA_RWLOCK_SKTOWR(lbl,l) __ha_rwlock_sktowr(lbl, l, __func__, __FILE__, __LINE__)
357#define HA_RWLOCK_WRTOSK(lbl,l) __ha_rwlock_wrtosk(lbl, l, __func__, __FILE__, __LINE__)
358#define HA_RWLOCK_SKTORD(lbl,l) __ha_rwlock_sktord(lbl, l, __func__, __FILE__, __LINE__)
359#define HA_RWLOCK_WRTORD(lbl,l) __ha_rwlock_wrtord(lbl, l, __func__, __FILE__, __LINE__)
360#define HA_RWLOCK_SKUNLOCK(lbl,l) __ha_rwlock_skunlock(lbl, l, __func__, __FILE__, __LINE__)
361#define HA_RWLOCK_TRYSKLOCK(lbl,l) __ha_rwlock_trysklock(lbl, l, __func__, __FILE__, __LINE__)
362#define HA_RWLOCK_TRYRDTOSK(lbl,l) __ha_rwlock_tryrdtosk(lbl, l, __func__, __FILE__, __LINE__)
363
Willy Tarreau3f567e42020-05-28 15:29:19 +0200364/* WARNING!!! if you update this enum, please also keep lock_label() up to date
365 * below.
366 */
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200367enum lock_label {
Emeric Brunc60def82017-09-27 14:59:38 +0200368 TASK_RQ_LOCK,
369 TASK_WQ_LOCK,
Christopher Fauletb349e482017-08-29 09:52:38 +0200370 POOL_LOCK,
Christopher Faulet8d8aa0d2017-05-30 15:36:50 +0200371 LISTENER_LOCK,
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200372 PROXY_LOCK,
Christopher Faulet29f77e82017-06-08 14:04:45 +0200373 SERVER_LOCK,
Christopher Faulet5b517552017-06-09 14:17:53 +0200374 LBPRM_LOCK,
Christopher Fauletb79a94c2017-05-30 15:34:30 +0200375 SIGNALS_LOCK,
Emeric Brun819fc6f2017-06-13 19:37:32 +0200376 STK_TABLE_LOCK,
377 STK_SESS_LOCK,
Emeric Brun1138fd02017-06-19 12:38:55 +0200378 APPLETS_LOCK,
Emeric Brun80527f52017-06-19 17:46:37 +0200379 PEER_LOCK,
Emeric Brun821bb9b2017-06-15 16:37:39 +0200380 SSL_LOCK,
381 SSL_GEN_CERTS_LOCK,
Emeric Brunb5997f72017-07-03 11:34:05 +0200382 PATREF_LOCK,
383 PATEXP_LOCK,
Christopher Faulete95f2c32017-07-24 16:30:34 +0200384 VARS_LOCK,
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +0200385 COMP_POOL_LOCK,
Thierry FOURNIER61ba0e22017-07-12 11:41:21 +0200386 LUA_LOCK,
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200387 NOTIF_LOCK,
Christopher Faulet24289f22017-09-25 14:48:02 +0200388 SPOE_APPLET_LOCK,
Christopher Fauletb2812a62017-10-04 16:17:58 +0200389 DNS_LOCK,
Christopher Fauletcfda8472017-10-20 15:40:23 +0200390 PID_LIST_LOCK,
Christopher Fauletc2a89a62017-10-23 15:54:24 +0200391 EMAIL_ALERTS_LOCK,
Emeric Brund8b3b652017-11-07 11:19:48 +0100392 PIPES_LOCK,
Christopher Faulet16f45c82018-02-16 11:23:49 +0100393 TLSKEYS_REF_LOCK,
Willy Tarreau34d4b522018-10-29 18:02:54 +0100394 AUTH_LOCK,
Frédéric Lécailled803e472019-04-25 07:42:09 +0200395 LOGSRV_LOCK,
Frédéric Lécaille4a3fef82019-05-28 14:47:17 +0200396 DICT_LOCK,
Willy Tarreaud6e0c032019-07-25 07:53:56 +0200397 PROTO_LOCK,
William Lallemand150bfa82019-09-19 17:12:49 +0200398 CKCH_LOCK,
399 SNI_LOCK,
William Lallemand3ce6eed2021-02-08 10:43:44 +0100400 SSL_SERVER_LOCK,
Emeric Brun494c5052020-05-28 11:13:15 +0200401 SFT_LOCK, /* sink forward target */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +0100402 IDLE_CONNS_LOCK,
Ben51Degrees4ddf59d2019-02-05 13:24:00 +0000403 OTHER_LOCK,
Willy Tarreauccea3c52021-02-17 14:33:58 +0100404 /* WT: make sure never to use these ones outside of development,
405 * we need them for lock profiling!
406 */
407 DEBUG1_LOCK,
408 DEBUG2_LOCK,
409 DEBUG3_LOCK,
410 DEBUG4_LOCK,
411 DEBUG5_LOCK,
Christopher Faulet339fff82017-10-19 11:59:15 +0200412 LOCK_LABELS
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200413};
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200414
415extern struct lock_stat lock_stats[LOCK_LABELS];
416
Christopher Fauletf51bac22018-01-30 11:04:29 +0100417static inline const char *lock_label(enum lock_label label)
418{
419 switch (label) {
Christopher Fauletf51bac22018-01-30 11:04:29 +0100420 case TASK_RQ_LOCK: return "TASK_RQ";
421 case TASK_WQ_LOCK: return "TASK_WQ";
422 case POOL_LOCK: return "POOL";
423 case LISTENER_LOCK: return "LISTENER";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100424 case PROXY_LOCK: return "PROXY";
425 case SERVER_LOCK: return "SERVER";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100426 case LBPRM_LOCK: return "LBPRM";
427 case SIGNALS_LOCK: return "SIGNALS";
428 case STK_TABLE_LOCK: return "STK_TABLE";
429 case STK_SESS_LOCK: return "STK_SESS";
430 case APPLETS_LOCK: return "APPLETS";
431 case PEER_LOCK: return "PEER";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100432 case SSL_LOCK: return "SSL";
433 case SSL_GEN_CERTS_LOCK: return "SSL_GEN_CERTS";
434 case PATREF_LOCK: return "PATREF";
435 case PATEXP_LOCK: return "PATEXP";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100436 case VARS_LOCK: return "VARS";
437 case COMP_POOL_LOCK: return "COMP_POOL";
438 case LUA_LOCK: return "LUA";
439 case NOTIF_LOCK: return "NOTIF";
440 case SPOE_APPLET_LOCK: return "SPOE_APPLET";
441 case DNS_LOCK: return "DNS";
442 case PID_LIST_LOCK: return "PID_LIST";
443 case EMAIL_ALERTS_LOCK: return "EMAIL_ALERTS";
444 case PIPES_LOCK: return "PIPES";
Christopher Faulet16f45c82018-02-16 11:23:49 +0100445 case TLSKEYS_REF_LOCK: return "TLSKEYS_REF";
Willy Tarreau34d4b522018-10-29 18:02:54 +0100446 case AUTH_LOCK: return "AUTH";
Frédéric Lécailled803e472019-04-25 07:42:09 +0200447 case LOGSRV_LOCK: return "LOGSRV";
Frédéric Lécaille4a3fef82019-05-28 14:47:17 +0200448 case DICT_LOCK: return "DICT";
Willy Tarreaud6e0c032019-07-25 07:53:56 +0200449 case PROTO_LOCK: return "PROTO";
William Lallemand150bfa82019-09-19 17:12:49 +0200450 case CKCH_LOCK: return "CKCH";
451 case SNI_LOCK: return "SNI";
William Lallemand7b416542021-02-10 16:17:19 +0100452 case SSL_SERVER_LOCK: return "SSL_SERVER";
Emeric Brun494c5052020-05-28 11:13:15 +0200453 case SFT_LOCK: return "SFT";
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +0100454 case IDLE_CONNS_LOCK: return "IDLE_CONNS";
Ben51Degrees4ddf59d2019-02-05 13:24:00 +0000455 case OTHER_LOCK: return "OTHER";
Willy Tarreauccea3c52021-02-17 14:33:58 +0100456 case DEBUG1_LOCK: return "DEBUG1";
457 case DEBUG2_LOCK: return "DEBUG2";
458 case DEBUG3_LOCK: return "DEBUG3";
459 case DEBUG4_LOCK: return "DEBUG4";
460 case DEBUG5_LOCK: return "DEBUG5";
Christopher Fauletf51bac22018-01-30 11:04:29 +0100461 case LOCK_LABELS: break; /* keep compiler happy */
462 };
463 /* only way to come here is consecutive to an internal bug */
464 abort();
465}
466
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200467static inline void show_lock_stats()
468{
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200469 int lbl;
470
471 for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
Willy Tarreau23d3b002020-10-22 08:00:09 +0200472 if (!lock_stats[lbl].num_write_locked &&
473 !lock_stats[lbl].num_seek_locked &&
474 !lock_stats[lbl].num_read_locked) {
475 fprintf(stderr,
476 "Stats about Lock %s: not used\n",
477 lock_label(lbl));
478 continue;
479 }
480
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200481 fprintf(stderr,
Willy Tarreaude785f02020-10-22 08:04:23 +0200482 "Stats about Lock %s: \n",
483 lock_label(lbl));
484
485 if (lock_stats[lbl].num_write_locked)
486 fprintf(stderr,
487 "\t # write lock : %lu\n"
488 "\t # write unlock: %lu (%ld)\n"
489 "\t # wait time for write : %.3f msec\n"
490 "\t # wait time for write/lock: %.3f nsec\n",
491 lock_stats[lbl].num_write_locked,
492 lock_stats[lbl].num_write_unlocked,
493 lock_stats[lbl].num_write_unlocked - lock_stats[lbl].num_write_locked,
494 (double)lock_stats[lbl].nsec_wait_for_write / 1000000.0,
495 lock_stats[lbl].num_write_locked ? ((double)lock_stats[lbl].nsec_wait_for_write / (double)lock_stats[lbl].num_write_locked) : 0);
496
497 if (lock_stats[lbl].num_seek_locked)
498 fprintf(stderr,
499 "\t # seek lock : %lu\n"
500 "\t # seek unlock : %lu (%ld)\n"
501 "\t # wait time for seek : %.3f msec\n"
502 "\t # wait time for seek/lock : %.3f nsec\n",
503 lock_stats[lbl].num_seek_locked,
504 lock_stats[lbl].num_seek_unlocked,
505 lock_stats[lbl].num_seek_unlocked - lock_stats[lbl].num_seek_locked,
506 (double)lock_stats[lbl].nsec_wait_for_seek / 1000000.0,
507 lock_stats[lbl].num_seek_locked ? ((double)lock_stats[lbl].nsec_wait_for_seek / (double)lock_stats[lbl].num_seek_locked) : 0);
Willy Tarreau8d5360c2020-10-16 16:49:38 +0200508
Willy Tarreaude785f02020-10-22 08:04:23 +0200509 if (lock_stats[lbl].num_read_locked)
510 fprintf(stderr,
511 "\t # read lock : %lu\n"
512 "\t # read unlock : %lu (%ld)\n"
513 "\t # wait time for read : %.3f msec\n"
514 "\t # wait time for read/lock : %.3f nsec\n",
515 lock_stats[lbl].num_read_locked,
516 lock_stats[lbl].num_read_unlocked,
517 lock_stats[lbl].num_read_unlocked - lock_stats[lbl].num_read_locked,
518 (double)lock_stats[lbl].nsec_wait_for_read / 1000000.0,
519 lock_stats[lbl].num_read_locked ? ((double)lock_stats[lbl].nsec_wait_for_read / (double)lock_stats[lbl].num_read_locked) : 0);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200520 }
521}
522
523/* Following functions are used to collect some stats about locks. We wrap
524 * pthread functions to known how much time we wait in a lock. */
525
Willy Tarreau3f567e42020-05-28 15:29:19 +0200526static uint64_t nsec_now(void)
527{
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200528 struct timespec ts;
529
530 clock_gettime(CLOCK_MONOTONIC, &ts);
531 return ((uint64_t) ts.tv_sec * 1000000000ULL +
532 (uint64_t) ts.tv_nsec);
533}
534
535static inline void __ha_rwlock_init(struct ha_rwlock *l)
536{
537 memset(l, 0, sizeof(struct ha_rwlock));
538 __RWLOCK_INIT(&l->lock);
539}
540
541static inline void __ha_rwlock_destroy(struct ha_rwlock *l)
542{
543 __RWLOCK_DESTROY(&l->lock);
544 memset(l, 0, sizeof(struct ha_rwlock));
545}
546
547
548static inline void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
549 const char *func, const char *file, int line)
550{
551 uint64_t start_time;
552
Willy Tarreau61f799b2020-10-16 16:53:46 +0200553 if ((l->info.cur_readers | l->info.cur_seeker | l->info.cur_writer) & tid_bit)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200554 abort();
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200555
556 HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
557
558 start_time = nsec_now();
559 __RWLOCK_WRLOCK(&l->lock);
560 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
561
Willy Tarreau4781b152021-04-06 13:53:36 +0200562 HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200563
564 l->info.cur_writer = tid_bit;
565 l->info.last_location.function = func;
566 l->info.last_location.file = file;
567 l->info.last_location.line = line;
568
569 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
570}
571
572static inline int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l,
573 const char *func, const char *file, int line)
574{
575 uint64_t start_time;
576 int r;
577
Willy Tarreau61f799b2020-10-16 16:53:46 +0200578 if ((l->info.cur_readers | l->info.cur_seeker | l->info.cur_writer) & tid_bit)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200579 abort();
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200580
581 /* We set waiting writer because trywrlock could wait for readers to quit */
582 HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
583
584 start_time = nsec_now();
585 r = __RWLOCK_TRYWRLOCK(&l->lock);
586 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
587 if (unlikely(r)) {
588 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
589 return r;
590 }
Willy Tarreau4781b152021-04-06 13:53:36 +0200591 HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200592
593 l->info.cur_writer = tid_bit;
594 l->info.last_location.function = func;
595 l->info.last_location.file = file;
596 l->info.last_location.line = line;
597
598 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
599
600 return 0;
601}
602
603static inline void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l,
604 const char *func, const char *file, int line)
605{
606 if (unlikely(!(l->info.cur_writer & tid_bit))) {
607 /* the thread is not owning the lock for write */
608 abort();
609 }
610
611 l->info.cur_writer = 0;
612 l->info.last_location.function = func;
613 l->info.last_location.file = file;
614 l->info.last_location.line = line;
615
616 __RWLOCK_WRUNLOCK(&l->lock);
617
Willy Tarreau4781b152021-04-06 13:53:36 +0200618 HA_ATOMIC_INC(&lock_stats[lbl].num_write_unlocked);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200619}
620
621static inline void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l)
622{
623 uint64_t start_time;
624
Willy Tarreau61f799b2020-10-16 16:53:46 +0200625 if ((l->info.cur_readers | l->info.cur_seeker | l->info.cur_writer) & tid_bit)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200626 abort();
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200627
628 HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
629
630 start_time = nsec_now();
631 __RWLOCK_RDLOCK(&l->lock);
632 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
Willy Tarreau4781b152021-04-06 13:53:36 +0200633 HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200634
635 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
636
637 HA_ATOMIC_AND(&l->info.wait_readers, ~tid_bit);
638}
639
640static inline int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l)
641{
642 int r;
643
Willy Tarreau61f799b2020-10-16 16:53:46 +0200644 if ((l->info.cur_readers | l->info.cur_seeker | l->info.cur_writer) & tid_bit)
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200645 abort();
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200646
647 /* try read should never wait */
648 r = __RWLOCK_TRYRDLOCK(&l->lock);
649 if (unlikely(r))
650 return r;
Willy Tarreau4781b152021-04-06 13:53:36 +0200651 HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200652
653 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
654
655 return 0;
656}
657
658static inline void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l)
659{
660 if (unlikely(!(l->info.cur_readers & tid_bit))) {
661 /* the thread is not owning the lock for read */
662 abort();
663 }
664
665 HA_ATOMIC_AND(&l->info.cur_readers, ~tid_bit);
666
667 __RWLOCK_RDUNLOCK(&l->lock);
668
Willy Tarreau4781b152021-04-06 13:53:36 +0200669 HA_ATOMIC_INC(&lock_stats[lbl].num_read_unlocked);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200670}
671
Willy Tarreau61f799b2020-10-16 16:53:46 +0200672static inline void __ha_rwlock_wrtord(enum lock_label lbl, struct ha_rwlock *l,
673 const char *func, const char *file, int line)
674{
675 uint64_t start_time;
676
677 if ((l->info.cur_readers | l->info.cur_seeker) & tid_bit)
678 abort();
679
680 if (!(l->info.cur_writer & tid_bit))
681 abort();
682
683 HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
684
685 start_time = nsec_now();
686 __RWLOCK_WRTORD(&l->lock);
687 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
688
Willy Tarreau4781b152021-04-06 13:53:36 +0200689 HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
Willy Tarreau61f799b2020-10-16 16:53:46 +0200690
691 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
692 HA_ATOMIC_AND(&l->info.cur_writer, ~tid_bit);
693 l->info.last_location.function = func;
694 l->info.last_location.file = file;
695 l->info.last_location.line = line;
696
697 HA_ATOMIC_AND(&l->info.wait_readers, ~tid_bit);
698}
699
700static inline void __ha_rwlock_wrtosk(enum lock_label lbl, struct ha_rwlock *l,
701 const char *func, const char *file, int line)
702{
703 uint64_t start_time;
704
705 if ((l->info.cur_readers | l->info.cur_seeker) & tid_bit)
706 abort();
707
708 if (!(l->info.cur_writer & tid_bit))
709 abort();
710
711 HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
712
713 start_time = nsec_now();
714 __RWLOCK_WRTOSK(&l->lock);
715 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
716
Willy Tarreau4781b152021-04-06 13:53:36 +0200717 HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
Willy Tarreau61f799b2020-10-16 16:53:46 +0200718
719 HA_ATOMIC_OR(&l->info.cur_seeker, tid_bit);
720 HA_ATOMIC_AND(&l->info.cur_writer, ~tid_bit);
721 l->info.last_location.function = func;
722 l->info.last_location.file = file;
723 l->info.last_location.line = line;
724
725 HA_ATOMIC_AND(&l->info.wait_seekers, ~tid_bit);
726}
727
728static inline void __ha_rwlock_sklock(enum lock_label lbl, struct ha_rwlock *l,
729 const char *func, const char *file, int line)
730{
731 uint64_t start_time;
732
733 if ((l->info.cur_readers | l->info.cur_seeker | l->info.cur_writer) & tid_bit)
734 abort();
735
736 HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
737
738 start_time = nsec_now();
739 __RWLOCK_SKLOCK(&l->lock);
740 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
741
Willy Tarreau4781b152021-04-06 13:53:36 +0200742 HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
Willy Tarreau61f799b2020-10-16 16:53:46 +0200743
744 HA_ATOMIC_OR(&l->info.cur_seeker, tid_bit);
745 l->info.last_location.function = func;
746 l->info.last_location.file = file;
747 l->info.last_location.line = line;
748
749 HA_ATOMIC_AND(&l->info.wait_seekers, ~tid_bit);
750}
751
752static inline void __ha_rwlock_sktowr(enum lock_label lbl, struct ha_rwlock *l,
753 const char *func, const char *file, int line)
754{
755 uint64_t start_time;
756
757 if ((l->info.cur_readers | l->info.cur_writer) & tid_bit)
758 abort();
759
760 if (!(l->info.cur_seeker & tid_bit))
761 abort();
762
763 HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
764
765 start_time = nsec_now();
766 __RWLOCK_SKTOWR(&l->lock);
767 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
768
Willy Tarreau4781b152021-04-06 13:53:36 +0200769 HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
Willy Tarreau61f799b2020-10-16 16:53:46 +0200770
771 HA_ATOMIC_OR(&l->info.cur_writer, tid_bit);
772 HA_ATOMIC_AND(&l->info.cur_seeker, ~tid_bit);
773 l->info.last_location.function = func;
774 l->info.last_location.file = file;
775 l->info.last_location.line = line;
776
777 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
778}
779
780static inline void __ha_rwlock_sktord(enum lock_label lbl, struct ha_rwlock *l,
781 const char *func, const char *file, int line)
782{
783 uint64_t start_time;
784
785 if ((l->info.cur_readers | l->info.cur_writer) & tid_bit)
786 abort();
787
788 if (!(l->info.cur_seeker & tid_bit))
789 abort();
790
791 HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
792
793 start_time = nsec_now();
794 __RWLOCK_SKTORD(&l->lock);
795 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
796
Willy Tarreau4781b152021-04-06 13:53:36 +0200797 HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
Willy Tarreau61f799b2020-10-16 16:53:46 +0200798
799 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
800 HA_ATOMIC_AND(&l->info.cur_seeker, ~tid_bit);
801 l->info.last_location.function = func;
802 l->info.last_location.file = file;
803 l->info.last_location.line = line;
804
805 HA_ATOMIC_AND(&l->info.wait_readers, ~tid_bit);
806}
807
808static inline void __ha_rwlock_skunlock(enum lock_label lbl,struct ha_rwlock *l,
809 const char *func, const char *file, int line)
810{
811 if (!(l->info.cur_seeker & tid_bit))
812 abort();
813
814 HA_ATOMIC_AND(&l->info.cur_seeker, ~tid_bit);
815 l->info.last_location.function = func;
816 l->info.last_location.file = file;
817 l->info.last_location.line = line;
818
819 __RWLOCK_SKUNLOCK(&l->lock);
820
Willy Tarreau4781b152021-04-06 13:53:36 +0200821 HA_ATOMIC_INC(&lock_stats[lbl].num_seek_unlocked);
Willy Tarreau61f799b2020-10-16 16:53:46 +0200822}
823
824static inline int __ha_rwlock_trysklock(enum lock_label lbl, struct ha_rwlock *l,
825 const char *func, const char *file, int line)
826{
827 uint64_t start_time;
828 int r;
829
830 if ((l->info.cur_readers | l->info.cur_seeker | l->info.cur_writer) & tid_bit)
831 abort();
832
833 HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
834
835 start_time = nsec_now();
836 r = __RWLOCK_TRYSKLOCK(&l->lock);
837 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
838
839 if (likely(!r)) {
840 /* got the lock ! */
Willy Tarreau4781b152021-04-06 13:53:36 +0200841 HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
Willy Tarreau61f799b2020-10-16 16:53:46 +0200842 HA_ATOMIC_OR(&l->info.cur_seeker, tid_bit);
843 l->info.last_location.function = func;
844 l->info.last_location.file = file;
845 l->info.last_location.line = line;
846 }
847
848 HA_ATOMIC_AND(&l->info.wait_seekers, ~tid_bit);
849 return r;
850}
851
852static inline int __ha_rwlock_tryrdtosk(enum lock_label lbl, struct ha_rwlock *l,
853 const char *func, const char *file, int line)
854{
855 uint64_t start_time;
856 int r;
857
858 if ((l->info.cur_writer | l->info.cur_seeker) & tid_bit)
859 abort();
860
861 if (!(l->info.cur_readers & tid_bit))
862 abort();
863
864 HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
865
866 start_time = nsec_now();
867 r = __RWLOCK_TRYRDTOSK(&l->lock);
868 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
869
870 if (likely(!r)) {
871 /* got the lock ! */
Willy Tarreau4781b152021-04-06 13:53:36 +0200872 HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
Willy Tarreau61f799b2020-10-16 16:53:46 +0200873 HA_ATOMIC_OR(&l->info.cur_seeker, tid_bit);
874 HA_ATOMIC_AND(&l->info.cur_readers, ~tid_bit);
875 l->info.last_location.function = func;
876 l->info.last_location.file = file;
877 l->info.last_location.line = line;
878 }
879
880 HA_ATOMIC_AND(&l->info.wait_seekers, ~tid_bit);
881 return r;
882}
883
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200884static inline void __spin_init(struct ha_spinlock *l)
885{
886 memset(l, 0, sizeof(struct ha_spinlock));
887 __SPIN_INIT(&l->lock);
888}
889
890static inline void __spin_destroy(struct ha_spinlock *l)
891{
892 __SPIN_DESTROY(&l->lock);
893 memset(l, 0, sizeof(struct ha_spinlock));
894}
895
896static inline void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
897 const char *func, const char *file, int line)
898{
899 uint64_t start_time;
900
901 if (unlikely(l->info.owner & tid_bit)) {
902 /* the thread is already owning the lock */
903 abort();
904 }
905
906 HA_ATOMIC_OR(&l->info.waiters, tid_bit);
907
908 start_time = nsec_now();
909 __SPIN_LOCK(&l->lock);
910 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
911
Willy Tarreau4781b152021-04-06 13:53:36 +0200912 HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200913
914
915 l->info.owner = tid_bit;
916 l->info.last_location.function = func;
917 l->info.last_location.file = file;
918 l->info.last_location.line = line;
919
920 HA_ATOMIC_AND(&l->info.waiters, ~tid_bit);
921}
922
923static inline int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l,
924 const char *func, const char *file, int line)
925{
926 int r;
927
928 if (unlikely(l->info.owner & tid_bit)) {
929 /* the thread is already owning the lock */
930 abort();
931 }
932
933 /* try read should never wait */
934 r = __SPIN_TRYLOCK(&l->lock);
935 if (unlikely(r))
936 return r;
Willy Tarreau4781b152021-04-06 13:53:36 +0200937 HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200938
939 l->info.owner = tid_bit;
940 l->info.last_location.function = func;
941 l->info.last_location.file = file;
942 l->info.last_location.line = line;
943
944 return 0;
945}
946
947static inline void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
948 const char *func, const char *file, int line)
949{
950 if (unlikely(!(l->info.owner & tid_bit))) {
951 /* the thread is not owning the lock */
952 abort();
953 }
954
955 l->info.owner = 0;
956 l->info.last_location.function = func;
957 l->info.last_location.file = file;
958 l->info.last_location.line = line;
959
Willy Tarreau7c2a2ad2017-11-02 16:26:02 +0100960 __SPIN_UNLOCK(&l->lock);
Willy Tarreau4781b152021-04-06 13:53:36 +0200961 HA_ATOMIC_INC(&lock_stats[lbl].num_write_unlocked);
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200962}
963
Christopher Faulet1a2b56e2017-10-12 16:09:09 +0200964#endif /* DEBUG_THREAD */
965
966#endif /* USE_THREAD */
967
Willy Tarreau3f567e42020-05-28 15:29:19 +0200968#endif /* _HAPROXY_THREAD_H */