blob: 8b086751bd7ea09dbc40325daa78b3151110e4b4 [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
2 * Task management functions.
3 *
Willy Tarreau4726f532009-03-07 17:25:21 +01004 * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
Willy Tarreaubaaee002006-06-26 02:48:02 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreau87bed622009-03-08 22:25:28 +010013#include <string.h>
14
Willy Tarreaub2551052020-06-09 09:07:15 +020015#include <import/eb32sctree.h>
16#include <import/eb32tree.h>
17
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020018#include <haproxy/api.h>
Willy Tarreaue7723bd2020-06-24 11:11:02 +020019#include <haproxy/cfgparse.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020020#include <haproxy/fd.h>
Willy Tarreau853b2972020-05-27 18:01:47 +020021#include <haproxy/list.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020022#include <haproxy/pool.h>
Willy Tarreaucea0e1b2020-06-04 17:25:40 +020023#include <haproxy/task.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020024#include <haproxy/tools.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020025
Willy Tarreaue08f4bf2021-05-08 20:10:13 +020026extern struct task *process_stream(struct task *t, void *context, unsigned int state);
Willy Tarreaubaaee002006-06-26 02:48:02 +020027
Willy Tarreau8ceae722018-11-26 11:58:30 +010028DECLARE_POOL(pool_head_task, "task", sizeof(struct task));
29DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
Willy Tarreau96bcfd72007-04-29 10:41:56 +020030
Thierry FOURNIERd6975962017-07-12 14:31:10 +020031/* This is the memory pool containing all the signal structs. These
Joseph Herlantcf92b6d2018-11-15 14:19:23 -080032 * struct are used to store each required signal between two tasks.
Thierry FOURNIERd6975962017-07-12 14:31:10 +020033 */
Willy Tarreau8ceae722018-11-26 11:58:30 +010034DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
Thierry FOURNIERd6975962017-07-12 14:31:10 +020035
Olivier Houchardeba0c0b2018-07-26 18:53:28 +020036volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
Willy Tarreaue35c94a2009-03-21 10:01:42 +010037unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
Emeric Brun01948972017-03-30 15:37:25 +020038
Willy Tarreaud022e9c2019-09-24 08:25:15 +020039THREAD_LOCAL struct task_per_thread *sched = &task_per_thread[0]; /* scheduler context for the current thread */
Willy Tarreau6d1222c2017-11-26 10:08:06 +010040
Willy Tarreau86abe442018-11-25 20:12:18 +010041__decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */
Willy Tarreauef28dc12019-05-28 18:48:07 +020042__decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */
Willy Tarreau964c9362007-01-07 00:38:00 +010043
Olivier Houchardb1ca58b2018-06-06 14:22:03 +020044#ifdef USE_THREAD
Willy Tarreauc6ba9a02021-02-20 12:49:54 +010045struct eb_root timers; /* sorted timers tree, global, accessed under wq_lock */
46struct eb_root rqueue; /* tree constituting the global run queue, accessed under rq_lock */
Willy Tarreau45499c52021-02-25 07:51:18 +010047unsigned int grq_total; /* total number of entries in the global run queue, atomic */
Willy Tarreauc6ba9a02021-02-20 12:49:54 +010048static unsigned int global_rqueue_ticks; /* insertion count in the grq, use rq_lock */
Olivier Houchardb1ca58b2018-06-06 14:22:03 +020049#endif
Willy Tarreaub20aa9e2018-10-15 14:52:21 +020050
Willy Tarreau8d8747a2018-10-15 16:12:48 +020051
52struct task_per_thread task_per_thread[MAX_THREADS];
Willy Tarreau9789f7b2008-06-24 08:17:16 +020053
Willy Tarreaueb8c2c62020-06-30 11:48:48 +020054
55/* Flags the task <t> for immediate destruction and puts it into its first
56 * thread's shared tasklet list if not yet queued/running. This will bypass
57 * the priority scheduling and make the task show up as fast as possible in
58 * the other thread's queue. Note that this operation isn't idempotent and is
59 * not supposed to be run on the same task from multiple threads at once. It's
60 * the caller's responsibility to make sure it is the only one able to kill the
61 * task.
62 */
63void task_kill(struct task *t)
64{
Willy Tarreau144f84a2021-03-02 16:09:26 +010065 unsigned int state = t->state;
Willy Tarreaueb8c2c62020-06-30 11:48:48 +020066 unsigned int thr;
67
68 BUG_ON(state & TASK_KILLED);
69
70 while (1) {
71 while (state & (TASK_RUNNING | TASK_QUEUED)) {
72 /* task already in the queue and about to be executed,
73 * or even currently running. Just add the flag and be
74 * done with it, the process loop will detect it and kill
75 * it. The CAS will fail if we arrive too late.
76 */
77 if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_KILLED))
78 return;
79 }
80
81 /* We'll have to wake it up, but we must also secure it so that
82 * it doesn't vanish under us. TASK_QUEUED guarantees nobody will
83 * add past us.
84 */
85 if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED | TASK_KILLED)) {
86 /* Bypass the tree and go directly into the shared tasklet list.
87 * Note: that's a task so it must be accounted for as such. Pick
88 * the task's first thread for the job.
89 */
90 thr = my_ffsl(t->thread_mask) - 1;
Willy Tarreau54d31172020-07-02 14:14:00 +020091
92 /* Beware: tasks that have never run don't have their ->list empty yet! */
Willy Tarreau2b718102021-04-21 07:32:39 +020093 MT_LIST_APPEND(&task_per_thread[thr].shared_tasklet_list,
Willy Tarreau4f589262020-07-02 17:17:42 +020094 (struct mt_list *)&((struct tasklet *)t)->list);
Willy Tarreau4781b152021-04-06 13:53:36 +020095 _HA_ATOMIC_INC(&task_per_thread[thr].rq_total);
96 _HA_ATOMIC_INC(&task_per_thread[thr].tasks_in_list);
Willy Tarreau54d31172020-07-02 14:14:00 +020097 if (sleeping_thread_mask & (1UL << thr)) {
98 _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
99 wake_thread(thr);
Willy Tarreaueb8c2c62020-06-30 11:48:48 +0200100 }
Willy Tarreau54d31172020-07-02 14:14:00 +0200101 return;
Willy Tarreaueb8c2c62020-06-30 11:48:48 +0200102 }
103 }
104}
105
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100106/* Do not call this one, please use tasklet_wakeup_on() instead, as this one is
107 * the slow path of tasklet_wakeup_on() which performs some preliminary checks
108 * and sets TASK_IN_LIST before calling this one. A negative <thr> designates
109 * the current thread.
110 */
111void __tasklet_wakeup_on(struct tasklet *tl, int thr)
112{
113 if (likely(thr < 0)) {
114 /* this tasklet runs on the caller thread */
Willy Tarreau826fa872021-02-26 10:13:40 +0100115 if (tl->state & TASK_HEAVY) {
Willy Tarreau2b718102021-04-21 07:32:39 +0200116 LIST_APPEND(&sched->tasklets[TL_HEAVY], &tl->list);
Willy Tarreau826fa872021-02-26 10:13:40 +0100117 sched->tl_class_mask |= 1 << TL_HEAVY;
118 }
119 else if (tl->state & TASK_SELF_WAKING) {
Willy Tarreau2b718102021-04-21 07:32:39 +0200120 LIST_APPEND(&sched->tasklets[TL_BULK], &tl->list);
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100121 sched->tl_class_mask |= 1 << TL_BULK;
122 }
123 else if ((struct task *)tl == sched->current) {
124 _HA_ATOMIC_OR(&tl->state, TASK_SELF_WAKING);
Willy Tarreau2b718102021-04-21 07:32:39 +0200125 LIST_APPEND(&sched->tasklets[TL_BULK], &tl->list);
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100126 sched->tl_class_mask |= 1 << TL_BULK;
127 }
128 else if (sched->current_queue < 0) {
Willy Tarreau2b718102021-04-21 07:32:39 +0200129 LIST_APPEND(&sched->tasklets[TL_URGENT], &tl->list);
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100130 sched->tl_class_mask |= 1 << TL_URGENT;
131 }
132 else {
Willy Tarreau2b718102021-04-21 07:32:39 +0200133 LIST_APPEND(&sched->tasklets[sched->current_queue], &tl->list);
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100134 sched->tl_class_mask |= 1 << sched->current_queue;
135 }
Willy Tarreau4781b152021-04-06 13:53:36 +0200136 _HA_ATOMIC_INC(&sched->rq_total);
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100137 } else {
138 /* this tasklet runs on a specific thread. */
Willy Tarreau2b718102021-04-21 07:32:39 +0200139 MT_LIST_APPEND(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list);
Willy Tarreau4781b152021-04-06 13:53:36 +0200140 _HA_ATOMIC_INC(&task_per_thread[thr].rq_total);
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100141 if (sleeping_thread_mask & (1UL << thr)) {
142 _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
143 wake_thread(thr);
144 }
145 }
146}
147
Willy Tarreau4726f532009-03-07 17:25:21 +0100148/* Puts the task <t> in run queue at a position depending on t->nice. <t> is
149 * returned. The nice value assigns boosts in 32th of the run queue size. A
Christopher Faulet34c5cc92016-12-06 09:15:30 +0100150 * nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
151 * of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so
152 * the caller will have to set its flags after this call.
Willy Tarreau4726f532009-03-07 17:25:21 +0100153 * The task must not already be in the run queue. If unsure, use the safer
154 * task_wakeup() function.
Willy Tarreau91e99932008-06-30 07:51:00 +0200155 */
Willy Tarreau018564e2021-02-24 16:41:11 +0100156void __task_wakeup(struct task *t)
Willy Tarreaue33aece2007-04-30 13:15:14 +0200157{
Willy Tarreau018564e2021-02-24 16:41:11 +0100158 struct eb_root *root = &sched->rqueue;
159
Olivier Houchardb1ca58b2018-06-06 14:22:03 +0200160#ifdef USE_THREAD
Willy Tarreau018564e2021-02-24 16:41:11 +0100161 if (t->thread_mask != tid_bit && global.nbthread != 1) {
162 root = &rqueue;
163
Willy Tarreau4781b152021-04-06 13:53:36 +0200164 _HA_ATOMIC_INC(&grq_total);
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200165 HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
Willy Tarreau9c7b8082021-02-24 15:10:07 +0100166
Olivier Houchardde82aea2019-04-17 19:10:22 +0200167 global_tasks_mask |= t->thread_mask;
Willy Tarreauc6ba9a02021-02-20 12:49:54 +0100168 t->rq.key = ++global_rqueue_ticks;
Olivier Houcharded1a6a02019-04-18 14:12:51 +0200169 __ha_barrier_store();
Willy Tarreauc6ba9a02021-02-20 12:49:54 +0100170 } else
Olivier Houchardc4aac9e2018-07-26 15:25:49 +0200171#endif
Willy Tarreau9c7b8082021-02-24 15:10:07 +0100172 {
Willy Tarreau4781b152021-04-06 13:53:36 +0200173 _HA_ATOMIC_INC(&sched->rq_total);
Willy Tarreauc6ba9a02021-02-20 12:49:54 +0100174 t->rq.key = ++sched->rqueue_ticks;
Willy Tarreau9c7b8082021-02-24 15:10:07 +0100175 }
Willy Tarreau91e99932008-06-30 07:51:00 +0200176
177 if (likely(t->nice)) {
178 int offset;
179
Willy Tarreau4781b152021-04-06 13:53:36 +0200180 _HA_ATOMIC_INC(&niced_tasks);
Willy Tarreau2d1fd0a2019-04-15 09:18:31 +0200181 offset = t->nice * (int)global.tune.runqueue_depth;
Willy Tarreau4726f532009-03-07 17:25:21 +0100182 t->rq.key += offset;
Willy Tarreau91e99932008-06-30 07:51:00 +0200183 }
184
Willy Tarreaud9add3a2019-04-25 08:57:41 +0200185 if (task_profiling_mask & tid_bit)
Willy Tarreau9efd7452018-05-31 14:48:54 +0200186 t->call_date = now_mono_time();
187
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200188 eb32sc_insert(root, &t->rq, t->thread_mask);
Willy Tarreau018564e2021-02-24 16:41:11 +0100189
Olivier Houchardb1ca58b2018-06-06 14:22:03 +0200190#ifdef USE_THREAD
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200191 if (root == &rqueue) {
Olivier Houchard4c2832852019-03-08 18:48:47 +0100192 _HA_ATOMIC_OR(&t->state, TASK_GLOBAL);
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200193 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
Willy Tarreau2c41d772021-02-24 16:13:03 +0100194
Willy Tarreaueeffb3d2021-02-24 16:44:51 +0100195 /* If all threads that are supposed to handle this task are sleeping,
196 * wake one.
197 */
198 if ((((t->thread_mask & all_threads_mask) & sleeping_thread_mask) ==
199 (t->thread_mask & all_threads_mask))) {
200 unsigned long m = (t->thread_mask & all_threads_mask) &~ tid_bit;
Olivier Houchard1b327902019-03-15 00:23:10 +0100201
Willy Tarreaueeffb3d2021-02-24 16:44:51 +0100202 m = (m & (m - 1)) ^ m; // keep lowest bit set
203 _HA_ATOMIC_AND(&sleeping_thread_mask, ~m);
204 wake_thread(my_ffsl(m) - 1);
205 }
Olivier Houchard1b327902019-03-15 00:23:10 +0100206 }
Willy Tarreau85d9b842018-07-27 17:14:41 +0200207#endif
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200208 return;
Willy Tarreaue33aece2007-04-30 13:15:14 +0200209}
Willy Tarreaud825eef2007-05-12 22:35:00 +0200210
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200211/*
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100212 * __task_queue()
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200213 *
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200214 * Inserts a task into wait queue <wq> at the position given by its expiration
Willy Tarreau4726f532009-03-07 17:25:21 +0100215 * date. It does not matter if the task was already in the wait queue or not,
Willy Tarreauaaed6092021-09-30 16:38:09 +0200216 * as it will be unlinked. The task MUST NOT have an infinite expiration timer.
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100217 * Last, tasks must not be queued further than the end of the tree, which is
218 * between <now_ms> and <now_ms> + 2^31 ms (now+24days in 32bit).
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100219 *
220 * This function should not be used directly, it is meant to be called by the
221 * inline version of task_queue() which performs a few cheap preliminary tests
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200222 * before deciding to call __task_queue(). Moreover this function doesn't care
223 * at all about locking so the caller must be careful when deciding whether to
224 * lock or not around this call.
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200225 */
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200226void __task_queue(struct task *task, struct eb_root *wq)
Willy Tarreau964c9362007-01-07 00:38:00 +0100227{
Willy Tarreaue5d79bc2020-07-22 14:29:42 +0200228#ifdef USE_THREAD
229 BUG_ON((wq == &timers && !(task->state & TASK_SHARED_WQ)) ||
230 (wq == &sched->timers && (task->state & TASK_SHARED_WQ)) ||
231 (wq != &timers && wq != &sched->timers));
232#endif
Willy Tarreauaaed6092021-09-30 16:38:09 +0200233 /* if this happens the process is doomed anyway, so better catch it now
234 * so that we have the caller in the stack.
235 */
236 BUG_ON(task->expire == TICK_ETERNITY);
Willy Tarreaue5d79bc2020-07-22 14:29:42 +0200237
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100238 if (likely(task_in_wq(task)))
Willy Tarreau4726f532009-03-07 17:25:21 +0100239 __task_unlink_wq(task);
Willy Tarreau4726f532009-03-07 17:25:21 +0100240
241 /* the task is not in the queue now */
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100242 task->wq.key = task->expire;
Willy Tarreau28c41a42008-06-29 17:00:59 +0200243#ifdef DEBUG_CHECK_INVALID_EXPIRATION_DATES
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100244 if (tick_is_lt(task->wq.key, now_ms))
Willy Tarreau28c41a42008-06-29 17:00:59 +0200245 /* we're queuing too far away or in the past (most likely) */
Willy Tarreau4726f532009-03-07 17:25:21 +0100246 return;
Willy Tarreau28c41a42008-06-29 17:00:59 +0200247#endif
Willy Tarreauce44f122008-07-05 18:16:19 +0200248
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200249 eb32_insert(wq, &task->wq);
Willy Tarreau964c9362007-01-07 00:38:00 +0100250}
251
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200252/*
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200253 * Extract all expired timers from the timer queue, and wakes up all
Willy Tarreauc49ba522019-12-11 08:12:23 +0100254 * associated tasks.
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200255 */
Willy Tarreauc49ba522019-12-11 08:12:23 +0100256void wake_expired_tasks()
Willy Tarreaubaaee002006-06-26 02:48:02 +0200257{
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200258 struct task_per_thread * const tt = sched; // thread's tasks
Willy Tarreau3cfaa8d2020-10-16 09:26:22 +0200259 int max_processed = global.tune.runqueue_depth;
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200260 struct task *task;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200261 struct eb32_node *eb;
Willy Tarreauaf613e82020-06-05 08:40:51 +0200262 __decl_thread(int key);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200263
Willy Tarreau3cfaa8d2020-10-16 09:26:22 +0200264 while (max_processed-- > 0) {
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200265 lookup_next_local:
Willy Tarreau4c1e1ad2019-09-24 07:19:08 +0200266 eb = eb32_lookup_ge(&tt->timers, now_ms - TIMER_LOOK_BACK);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200267 if (!eb) {
268 /* we might have reached the end of the tree, typically because
269 * <now_ms> is in the first half and we're first scanning the last
270 * half. Let's loop back to the beginning of the tree now.
271 */
Willy Tarreau4c1e1ad2019-09-24 07:19:08 +0200272 eb = eb32_first(&tt->timers);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200273 if (likely(!eb))
274 break;
275 }
276
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200277 /* It is possible that this task was left at an earlier place in the
278 * tree because a recent call to task_queue() has not moved it. This
279 * happens when the new expiration date is later than the old one.
280 * Since it is very unlikely that we reach a timeout anyway, it's a
281 * lot cheaper to proceed like this because we almost never update
282 * the tree. We may also find disabled expiration dates there. Since
283 * we have detached the task from the tree, we simply call task_queue
284 * to take care of this. Note that we might occasionally requeue it at
285 * the same place, before <eb>, so we have to check if this happens,
286 * and adjust <eb>, otherwise we may skip it which is not what we want.
287 * We may also not requeue the task (and not point eb at it) if its
Willy Tarreau77015ab2020-06-19 11:50:27 +0200288 * expiration time is not set. We also make sure we leave the real
289 * expiration date for the next task in the queue so that when calling
290 * next_timer_expiry() we're guaranteed to see the next real date and
291 * not the next apparent date. This is in order to avoid useless
292 * wakeups.
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200293 */
Willy Tarreau77015ab2020-06-19 11:50:27 +0200294
295 task = eb32_entry(eb, struct task, wq);
296 if (tick_is_expired(task->expire, now_ms)) {
297 /* expired task, wake it up */
298 __task_unlink_wq(task);
299 task_wakeup(task, TASK_WOKEN_TIMER);
300 }
301 else if (task->expire != eb->key) {
302 /* task is not expired but its key doesn't match so let's
303 * update it and skip to next apparently expired task.
304 */
305 __task_unlink_wq(task);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200306 if (tick_isset(task->expire))
Willy Tarreau4c1e1ad2019-09-24 07:19:08 +0200307 __task_queue(task, &tt->timers);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200308 }
Willy Tarreau77015ab2020-06-19 11:50:27 +0200309 else {
Willy Tarreauaaed6092021-09-30 16:38:09 +0200310 /* task not expired and correctly placed. It may not be eternal. */
311 BUG_ON(task->expire == TICK_ETERNITY);
Willy Tarreau77015ab2020-06-19 11:50:27 +0200312 break;
313 }
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200314 }
315
316#ifdef USE_THREAD
Willy Tarreau1e928c02019-05-28 18:57:25 +0200317 if (eb_is_empty(&timers))
318 goto leave;
319
320 HA_RWLOCK_RDLOCK(TASK_WQ_LOCK, &wq_lock);
321 eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
322 if (!eb) {
323 eb = eb32_first(&timers);
324 if (likely(!eb)) {
325 HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
326 goto leave;
327 }
328 }
329 key = eb->key;
Willy Tarreau1e928c02019-05-28 18:57:25 +0200330
Willy Tarreaud48ed662020-10-16 09:31:41 +0200331 if (tick_is_lt(now_ms, key)) {
332 HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau1e928c02019-05-28 18:57:25 +0200333 goto leave;
Willy Tarreaud48ed662020-10-16 09:31:41 +0200334 }
Willy Tarreau1e928c02019-05-28 18:57:25 +0200335
336 /* There's really something of interest here, let's visit the queue */
337
Willy Tarreaud48ed662020-10-16 09:31:41 +0200338 if (HA_RWLOCK_TRYRDTOSK(TASK_WQ_LOCK, &wq_lock)) {
339 /* if we failed to grab the lock it means another thread is
340 * already doing the same here, so let it do the job.
341 */
342 HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
343 goto leave;
344 }
345
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200346 while (1) {
Emeric Brunc60def82017-09-27 14:59:38 +0200347 lookup_next:
Willy Tarreau3cfaa8d2020-10-16 09:26:22 +0200348 if (max_processed-- <= 0)
349 break;
Emeric Brun01948972017-03-30 15:37:25 +0200350 eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
Emeric Brunc60def82017-09-27 14:59:38 +0200351 if (!eb) {
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100352 /* we might have reached the end of the tree, typically because
353 * <now_ms> is in the first half and we're first scanning the last
354 * half. Let's loop back to the beginning of the tree now.
355 */
356 eb = eb32_first(&timers);
Willy Tarreaub992ba12017-11-05 19:09:27 +0100357 if (likely(!eb))
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100358 break;
359 }
Willy Tarreaubaaee002006-06-26 02:48:02 +0200360
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100361 task = eb32_entry(eb, struct task, wq);
Willy Tarreau77015ab2020-06-19 11:50:27 +0200362 if (tick_is_expired(task->expire, now_ms)) {
363 /* expired task, wake it up */
Willy Tarreaud48ed662020-10-16 09:31:41 +0200364 HA_RWLOCK_SKTOWR(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau77015ab2020-06-19 11:50:27 +0200365 __task_unlink_wq(task);
Willy Tarreaud48ed662020-10-16 09:31:41 +0200366 HA_RWLOCK_WRTOSK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau77015ab2020-06-19 11:50:27 +0200367 task_wakeup(task, TASK_WOKEN_TIMER);
368 }
369 else if (task->expire != eb->key) {
370 /* task is not expired but its key doesn't match so let's
371 * update it and skip to next apparently expired task.
372 */
Willy Tarreaud48ed662020-10-16 09:31:41 +0200373 HA_RWLOCK_SKTOWR(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau77015ab2020-06-19 11:50:27 +0200374 __task_unlink_wq(task);
Willy Tarreaub992ba12017-11-05 19:09:27 +0100375 if (tick_isset(task->expire))
Willy Tarreau783afbe2020-07-22 14:12:45 +0200376 __task_queue(task, &timers);
Willy Tarreaud48ed662020-10-16 09:31:41 +0200377 HA_RWLOCK_WRTOSK(TASK_WQ_LOCK, &wq_lock);
Emeric Brunc60def82017-09-27 14:59:38 +0200378 goto lookup_next;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200379 }
Willy Tarreau77015ab2020-06-19 11:50:27 +0200380 else {
Willy Tarreauaaed6092021-09-30 16:38:09 +0200381 /* task not expired and correctly placed. It may not be eternal. */
382 BUG_ON(task->expire == TICK_ETERNITY);
Willy Tarreau77015ab2020-06-19 11:50:27 +0200383 break;
384 }
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100385 }
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200386
Willy Tarreaud48ed662020-10-16 09:31:41 +0200387 HA_RWLOCK_SKUNLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200388#endif
Willy Tarreau1e928c02019-05-28 18:57:25 +0200389leave:
Willy Tarreauc49ba522019-12-11 08:12:23 +0100390 return;
391}
392
393/* Checks the next timer for the current thread by looking into its own timer
394 * list and the global one. It may return TICK_ETERNITY if no timer is present.
Ilya Shipitsin856aabc2020-04-16 23:51:34 +0500395 * Note that the next timer might very well be slightly in the past.
Willy Tarreauc49ba522019-12-11 08:12:23 +0100396 */
397int next_timer_expiry()
398{
399 struct task_per_thread * const tt = sched; // thread's tasks
400 struct eb32_node *eb;
401 int ret = TICK_ETERNITY;
Willy Tarreau6ce02322020-08-21 05:48:34 +0200402 __decl_thread(int key = TICK_ETERNITY);
Willy Tarreauc49ba522019-12-11 08:12:23 +0100403
404 /* first check in the thread-local timers */
405 eb = eb32_lookup_ge(&tt->timers, now_ms - TIMER_LOOK_BACK);
406 if (!eb) {
407 /* we might have reached the end of the tree, typically because
408 * <now_ms> is in the first half and we're first scanning the last
409 * half. Let's loop back to the beginning of the tree now.
410 */
411 eb = eb32_first(&tt->timers);
412 }
413
414 if (eb)
415 ret = eb->key;
416
417#ifdef USE_THREAD
418 if (!eb_is_empty(&timers)) {
419 HA_RWLOCK_RDLOCK(TASK_WQ_LOCK, &wq_lock);
420 eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
421 if (!eb)
422 eb = eb32_first(&timers);
423 if (eb)
424 key = eb->key;
425 HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
426 if (eb)
427 ret = tick_first(ret, key);
428 }
429#endif
Willy Tarreaub992ba12017-11-05 19:09:27 +0100430 return ret;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200431}
432
Willy Tarreau59153fe2020-06-24 10:17:29 +0200433/* Walks over tasklet lists sched->tasklets[0..TL_CLASSES-1] and run at most
434 * budget[TL_*] of them. Returns the number of entries effectively processed
435 * (tasks and tasklets merged). The count of tasks in the list for the current
436 * thread is adjusted.
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100437 */
Willy Tarreau59153fe2020-06-24 10:17:29 +0200438unsigned int run_tasks_from_lists(unsigned int budgets[])
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100439{
Willy Tarreau144f84a2021-03-02 16:09:26 +0100440 struct task *(*process)(struct task *t, void *ctx, unsigned int state);
Willy Tarreauba48d5c2020-06-24 09:54:24 +0200441 struct list *tl_queues = sched->tasklets;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100442 struct task *t;
Willy Tarreaue7723bd2020-06-24 11:11:02 +0200443 uint8_t budget_mask = (1 << TL_CLASSES) - 1;
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100444 struct sched_activity *profile_entry = NULL;
Willy Tarreau59153fe2020-06-24 10:17:29 +0200445 unsigned int done = 0;
446 unsigned int queue;
Willy Tarreau144f84a2021-03-02 16:09:26 +0100447 unsigned int state;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100448 void *ctx;
Willy Tarreau59153fe2020-06-24 10:17:29 +0200449
450 for (queue = 0; queue < TL_CLASSES;) {
451 sched->current_queue = queue;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100452
Willy Tarreaue7723bd2020-06-24 11:11:02 +0200453 /* global.tune.sched.low-latency is set */
454 if (global.tune.options & GTUNE_SCHED_LOW_LATENCY) {
455 if (unlikely(sched->tl_class_mask & budget_mask & ((1 << queue) - 1))) {
456 /* a lower queue index has tasks again and still has a
457 * budget to run them. Let's switch to it now.
458 */
459 queue = (sched->tl_class_mask & 1) ? 0 :
460 (sched->tl_class_mask & 2) ? 1 : 2;
461 continue;
462 }
463
464 if (unlikely(queue > TL_URGENT &&
465 budget_mask & (1 << TL_URGENT) &&
466 !MT_LIST_ISEMPTY(&sched->shared_tasklet_list))) {
467 /* an urgent tasklet arrived from another thread */
468 break;
469 }
470
471 if (unlikely(queue > TL_NORMAL &&
472 budget_mask & (1 << TL_NORMAL) &&
Willy Tarreau2c41d772021-02-24 16:13:03 +0100473 (!eb_is_empty(&sched->rqueue) ||
Willy Tarreaue7723bd2020-06-24 11:11:02 +0200474 (global_tasks_mask & tid_bit)))) {
475 /* a task was woken up by a bulk tasklet or another thread */
476 break;
477 }
478 }
479
Willy Tarreauba48d5c2020-06-24 09:54:24 +0200480 if (LIST_ISEMPTY(&tl_queues[queue])) {
481 sched->tl_class_mask &= ~(1 << queue);
Willy Tarreau59153fe2020-06-24 10:17:29 +0200482 queue++;
483 continue;
Willy Tarreauba48d5c2020-06-24 09:54:24 +0200484 }
485
Willy Tarreau59153fe2020-06-24 10:17:29 +0200486 if (!budgets[queue]) {
Willy Tarreaue7723bd2020-06-24 11:11:02 +0200487 budget_mask &= ~(1 << queue);
Willy Tarreau59153fe2020-06-24 10:17:29 +0200488 queue++;
489 continue;
490 }
Willy Tarreauba48d5c2020-06-24 09:54:24 +0200491
Willy Tarreau59153fe2020-06-24 10:17:29 +0200492 budgets[queue]--;
Willy Tarreauba48d5c2020-06-24 09:54:24 +0200493 t = (struct task *)LIST_ELEM(tl_queues[queue].n, struct tasklet *, list);
Willy Tarreau6fa8bcd2021-03-02 16:26:05 +0100494 state = t->state & (TASK_SHARED_WQ|TASK_SELF_WAKING|TASK_HEAVY|TASK_F_TASKLET|TASK_KILLED|TASK_F_USR1);
Willy Tarreau74dea8c2021-02-26 00:25:51 +0100495
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100496 ti->flags &= ~TI_FL_STUCK; // this thread is still running
497 activity[tid].ctxsw++;
498 ctx = t->context;
499 process = t->process;
500 t->calls++;
Willy Tarreaud23d4132020-01-31 10:39:03 +0100501 sched->current = t;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100502
Willy Tarreau4781b152021-04-06 13:53:36 +0200503 _HA_ATOMIC_DEC(&sched->rq_total);
Willy Tarreau2da4c312020-11-30 14:52:11 +0100504
Willy Tarreaudb4e2382021-03-02 15:54:11 +0100505 if (state & TASK_F_TASKLET) {
Willy Tarreau2a54ffb2021-02-25 09:32:58 +0100506 uint64_t before = 0;
507
Willy Tarreau4d6c5942020-11-30 14:58:53 +0100508 LIST_DEL_INIT(&((struct tasklet *)t)->list);
509 __ha_barrier_store();
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100510
511 if (unlikely(task_profiling_mask & tid_bit)) {
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100512 profile_entry = sched_activity_entry(sched_activity, t->process);
513 before = now_mono_time();
Willy Tarreaub2285de2021-02-25 08:39:07 +0100514#ifdef DEBUG_TASK
Willy Tarreau2a54ffb2021-02-25 09:32:58 +0100515 if (((struct tasklet *)t)->call_date) {
516 HA_ATOMIC_ADD(&profile_entry->lat_time, before - ((struct tasklet *)t)->call_date);
517 ((struct tasklet *)t)->call_date = 0;
518 }
Willy Tarreaub2285de2021-02-25 08:39:07 +0100519#endif
Willy Tarreau2a54ffb2021-02-25 09:32:58 +0100520 }
521
522 state = _HA_ATOMIC_XCHG(&t->state, state);
523 __ha_barrier_atomic_store();
524
525 process(t, ctx, state);
526
527 if (unlikely(task_profiling_mask & tid_bit)) {
Willy Tarreau4781b152021-04-06 13:53:36 +0200528 HA_ATOMIC_INC(&profile_entry->calls);
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100529 HA_ATOMIC_ADD(&profile_entry->cpu_time, now_mono_time() - before);
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100530 }
Willy Tarreau2a54ffb2021-02-25 09:32:58 +0100531
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100532 done++;
Willy Tarreaud23d4132020-01-31 10:39:03 +0100533 sched->current = NULL;
534 __ha_barrier_store();
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100535 continue;
536 }
537
Willy Tarreau4d6c5942020-11-30 14:58:53 +0100538 LIST_DEL_INIT(&((struct tasklet *)t)->list);
539 __ha_barrier_store();
Willy Tarreau6fa8bcd2021-03-02 16:26:05 +0100540 state = _HA_ATOMIC_XCHG(&t->state, state|TASK_RUNNING|TASK_F_USR1);
Willy Tarreau952c2642020-01-31 16:39:30 +0100541 __ha_barrier_atomic_store();
Willy Tarreau952c2642020-01-31 16:39:30 +0100542
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100543 /* OK then this is a regular task */
544
Willy Tarreau4781b152021-04-06 13:53:36 +0200545 _HA_ATOMIC_DEC(&task_per_thread[tid].tasks_in_list);
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100546 if (unlikely(t->call_date)) {
547 uint64_t now_ns = now_mono_time();
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100548 uint64_t lat = now_ns - t->call_date;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100549
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100550 t->lat_time += lat;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100551 t->call_date = now_ns;
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100552 profile_entry = sched_activity_entry(sched_activity, t->process);
553 HA_ATOMIC_ADD(&profile_entry->lat_time, lat);
Willy Tarreau4781b152021-04-06 13:53:36 +0200554 HA_ATOMIC_INC(&profile_entry->calls);
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100555 }
556
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100557 __ha_barrier_store();
Willy Tarreau8a6049c2020-06-30 11:48:48 +0200558
559 /* Note for below: if TASK_KILLED arrived before we've read the state, we
560 * directly free the task. Otherwise it will be seen after processing and
561 * it's freed on the exit path.
562 */
563 if (likely(!(state & TASK_KILLED) && process == process_stream))
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100564 t = process_stream(t, ctx, state);
Willy Tarreau8a6049c2020-06-30 11:48:48 +0200565 else if (!(state & TASK_KILLED) && process != NULL)
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100566 t = process(t, ctx, state);
567 else {
Willy Tarreau273aea42020-07-17 14:37:51 +0200568 task_unlink_wq(t);
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100569 __task_free(t);
570 sched->current = NULL;
571 __ha_barrier_store();
572 /* We don't want max_processed to be decremented if
573 * we're just freeing a destroyed task, we should only
574 * do so if we really ran a task.
575 */
576 continue;
577 }
578 sched->current = NULL;
579 __ha_barrier_store();
580 /* If there is a pending state we have to wake up the task
581 * immediately, else we defer it into wait queue
582 */
583 if (t != NULL) {
584 if (unlikely(t->call_date)) {
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100585 uint64_t cpu = now_mono_time() - t->call_date;
586
587 t->cpu_time += cpu;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100588 t->call_date = 0;
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100589 HA_ATOMIC_ADD(&profile_entry->cpu_time, cpu);
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100590 }
591
Willy Tarreau1db42732021-04-06 11:44:07 +0200592 state = _HA_ATOMIC_AND_FETCH(&t->state, ~TASK_RUNNING);
Willy Tarreau8a6049c2020-06-30 11:48:48 +0200593 if (unlikely(state & TASK_KILLED)) {
Willy Tarreau273aea42020-07-17 14:37:51 +0200594 task_unlink_wq(t);
Willy Tarreau8a6049c2020-06-30 11:48:48 +0200595 __task_free(t);
596 }
597 else if (state & TASK_WOKEN_ANY)
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100598 task_wakeup(t, 0);
599 else
600 task_queue(t);
601 }
602 done++;
603 }
Willy Tarreauba48d5c2020-06-24 09:54:24 +0200604 sched->current_queue = -1;
Willy Tarreau116ef222020-06-23 16:35:38 +0200605
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100606 return done;
607}
608
Willy Tarreau58b458d2008-06-29 22:40:23 +0200609/* The run queue is chronologically sorted in a tree. An insertion counter is
610 * used to assign a position to each task. This counter may be combined with
611 * other variables (eg: nice value) to set the final position in the tree. The
612 * counter may wrap without a problem, of course. We then limit the number of
Christopher Faulet8a48f672017-11-14 10:38:36 +0100613 * tasks processed to 200 in any case, so that general latency remains low and
Willy Tarreaucde79022019-04-12 18:03:41 +0200614 * so that task positions have a chance to be considered. The function scans
615 * both the global and local run queues and picks the most urgent task between
616 * the two. We need to grab the global runqueue lock to touch it so it's taken
617 * on the very first access to the global run queue and is released as soon as
618 * it reaches the end.
Willy Tarreau58b458d2008-06-29 22:40:23 +0200619 *
620 * The function adjusts <next> if a new event is closer.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200621 */
Thierry FOURNIER9cf7c4b2014-12-15 13:26:01 +0100622void process_runnable_tasks()
Willy Tarreaubaaee002006-06-26 02:48:02 +0200623{
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200624 struct task_per_thread * const tt = sched;
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200625 struct eb32sc_node *lrq; // next local run queue entry
626 struct eb32sc_node *grq; // next global run queue entry
Willy Tarreau964c9362007-01-07 00:38:00 +0100627 struct task *t;
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200628 const unsigned int default_weights[TL_CLASSES] = {
629 [TL_URGENT] = 64, // ~50% of CPU bandwidth for I/O
630 [TL_NORMAL] = 48, // ~37% of CPU bandwidth for tasks
631 [TL_BULK] = 16, // ~13% of CPU bandwidth for self-wakers
Willy Tarreau401135c2021-02-26 09:16:22 +0100632 [TL_HEAVY] = 1, // never more than 1 heavy task at once
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200633 };
634 unsigned int max[TL_CLASSES]; // max to be run per class
635 unsigned int max_total; // sum of max above
Olivier Houchard06910462019-10-11 16:35:01 +0200636 struct mt_list *tmp_list;
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200637 unsigned int queue;
638 int max_processed;
Willy Tarreaue7923c12021-02-25 07:09:08 +0100639 int lpicked, gpicked;
Willy Tarreau76390da2021-02-26 10:18:11 +0100640 int heavy_queued = 0;
Willy Tarreauc309dbd2020-11-30 15:39:00 +0100641 int budget;
Christopher Faulet3911ee82017-11-14 10:26:53 +0100642
Willy Tarreaue6a02fa2019-05-22 07:06:44 +0200643 ti->flags &= ~TI_FL_STUCK; // this thread is still running
644
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200645 if (!thread_has_tasks()) {
646 activity[tid].empty_rq++;
647 return;
648 }
649
Willy Tarreau5c8be272020-06-19 12:17:55 +0200650 max_processed = global.tune.runqueue_depth;
651
652 if (likely(niced_tasks))
653 max_processed = (max_processed + 3) / 4;
654
Willy Tarreau1691ba32021-03-10 09:26:24 +0100655 if (max_processed < sched->rq_total && sched->rq_total <= 2*max_processed) {
656 /* If the run queue exceeds the budget by up to 50%, let's cut it
657 * into two identical halves to improve latency.
658 */
659 max_processed = sched->rq_total / 2;
660 }
661
Willy Tarreau5c8be272020-06-19 12:17:55 +0200662 not_done_yet:
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200663 max[TL_URGENT] = max[TL_NORMAL] = max[TL_BULK] = 0;
Willy Tarreaucde79022019-04-12 18:03:41 +0200664
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200665 /* urgent tasklets list gets a default weight of ~50% */
Willy Tarreau49f90bf2020-06-24 09:39:48 +0200666 if ((tt->tl_class_mask & (1 << TL_URGENT)) ||
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200667 !MT_LIST_ISEMPTY(&tt->shared_tasklet_list))
668 max[TL_URGENT] = default_weights[TL_URGENT];
Willy Tarreaua62917b2020-01-30 18:37:28 +0100669
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200670 /* normal tasklets list gets a default weight of ~37% */
Willy Tarreau49f90bf2020-06-24 09:39:48 +0200671 if ((tt->tl_class_mask & (1 << TL_NORMAL)) ||
Willy Tarreau2c41d772021-02-24 16:13:03 +0100672 !eb_is_empty(&sched->rqueue) || (global_tasks_mask & tid_bit))
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200673 max[TL_NORMAL] = default_weights[TL_NORMAL];
Willy Tarreaua62917b2020-01-30 18:37:28 +0100674
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200675 /* bulk tasklets list gets a default weight of ~13% */
Willy Tarreau49f90bf2020-06-24 09:39:48 +0200676 if ((tt->tl_class_mask & (1 << TL_BULK)))
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200677 max[TL_BULK] = default_weights[TL_BULK];
678
Willy Tarreau401135c2021-02-26 09:16:22 +0100679 /* heavy tasks are processed only once and never refilled in a
Willy Tarreau76390da2021-02-26 10:18:11 +0100680 * call round. That budget is not lost either as we don't reset
681 * it unless consumed.
Willy Tarreau401135c2021-02-26 09:16:22 +0100682 */
Willy Tarreau76390da2021-02-26 10:18:11 +0100683 if (!heavy_queued) {
684 if ((tt->tl_class_mask & (1 << TL_HEAVY)))
685 max[TL_HEAVY] = default_weights[TL_HEAVY];
686 else
687 max[TL_HEAVY] = 0;
688 heavy_queued = 1;
689 }
Willy Tarreau401135c2021-02-26 09:16:22 +0100690
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200691 /* Now compute a fair share of the weights. Total may slightly exceed
Willy Tarreau1553b662020-06-30 13:46:21 +0200692 * 100% due to rounding, this is not a problem. Note that while in
693 * theory the sum cannot be NULL as we cannot get there without tasklets
694 * to process, in practice it seldom happens when multiple writers
Willy Tarreau2b718102021-04-21 07:32:39 +0200695 * conflict and rollback on MT_LIST_TRY_APPEND(shared_tasklet_list), causing
Willy Tarreau1553b662020-06-30 13:46:21 +0200696 * a first MT_LIST_ISEMPTY() to succeed for thread_has_task() and the
697 * one above to finally fail. This is extremely rare and not a problem.
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200698 */
Willy Tarreau401135c2021-02-26 09:16:22 +0100699 max_total = max[TL_URGENT] + max[TL_NORMAL] + max[TL_BULK] + max[TL_HEAVY];
Willy Tarreau1553b662020-06-30 13:46:21 +0200700 if (!max_total)
701 return;
702
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200703 for (queue = 0; queue < TL_CLASSES; queue++)
704 max[queue] = ((unsigned)max_processed * max[queue] + max_total - 1) / max_total;
705
Willy Tarreau76390da2021-02-26 10:18:11 +0100706 /* The heavy queue must never process more than one task at once
707 * anyway.
708 */
709 if (max[TL_HEAVY] > 1)
710 max[TL_HEAVY] = 1;
711
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200712 lrq = grq = NULL;
Christopher Faulet8a48f672017-11-14 10:38:36 +0100713
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200714 /* pick up to max[TL_NORMAL] regular tasks from prio-ordered run queues */
715 /* Note: the grq lock is always held when grq is not null */
Willy Tarreaue7923c12021-02-25 07:09:08 +0100716 lpicked = gpicked = 0;
Willy Tarreau1f3b1412021-02-24 14:13:40 +0100717 budget = max[TL_NORMAL] - tt->tasks_in_list;
Willy Tarreaue7923c12021-02-25 07:09:08 +0100718 while (lpicked + gpicked < budget) {
Willy Tarreaucde79022019-04-12 18:03:41 +0200719 if ((global_tasks_mask & tid_bit) && !grq) {
Willy Tarreau3466e3c2019-04-15 18:52:40 +0200720#ifdef USE_THREAD
Willy Tarreaucde79022019-04-12 18:03:41 +0200721 HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
Willy Tarreauc6ba9a02021-02-20 12:49:54 +0100722 grq = eb32sc_lookup_ge(&rqueue, global_rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
Willy Tarreaucde79022019-04-12 18:03:41 +0200723 if (unlikely(!grq)) {
724 grq = eb32sc_first(&rqueue, tid_bit);
725 if (!grq) {
Olivier Houchardde82aea2019-04-17 19:10:22 +0200726 global_tasks_mask &= ~tid_bit;
Willy Tarreaucde79022019-04-12 18:03:41 +0200727 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
Olivier Houchardc4aac9e2018-07-26 15:25:49 +0200728 }
Willy Tarreauf0c531a2017-11-05 16:35:59 +0100729 }
Willy Tarreau3466e3c2019-04-15 18:52:40 +0200730#endif
Willy Tarreaucde79022019-04-12 18:03:41 +0200731 }
Willy Tarreauf0c531a2017-11-05 16:35:59 +0100732
Willy Tarreaucde79022019-04-12 18:03:41 +0200733 /* If a global task is available for this thread, it's in grq
734 * now and the global RQ is locked.
735 */
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200736
Willy Tarreaucde79022019-04-12 18:03:41 +0200737 if (!lrq) {
Willy Tarreauc6ba9a02021-02-20 12:49:54 +0100738 lrq = eb32sc_lookup_ge(&tt->rqueue, tt->rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
Willy Tarreaucde79022019-04-12 18:03:41 +0200739 if (unlikely(!lrq))
Willy Tarreau4c1e1ad2019-09-24 07:19:08 +0200740 lrq = eb32sc_first(&tt->rqueue, tid_bit);
Willy Tarreauf0c531a2017-11-05 16:35:59 +0100741 }
Willy Tarreauf0c531a2017-11-05 16:35:59 +0100742
Willy Tarreaucde79022019-04-12 18:03:41 +0200743 if (!lrq && !grq)
744 break;
745
746 if (likely(!grq || (lrq && (int)(lrq->key - grq->key) <= 0))) {
747 t = eb32sc_entry(lrq, struct task, rq);
748 lrq = eb32sc_next(lrq, tid_bit);
Willy Tarreau2b363ac2021-02-25 07:14:58 +0100749 eb32sc_delete(&t->rq);
Willy Tarreaue7923c12021-02-25 07:09:08 +0100750 lpicked++;
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200751 }
Willy Tarreau3466e3c2019-04-15 18:52:40 +0200752#ifdef USE_THREAD
Willy Tarreaucde79022019-04-12 18:03:41 +0200753 else {
754 t = eb32sc_entry(grq, struct task, rq);
755 grq = eb32sc_next(grq, tid_bit);
Willy Tarreau2b363ac2021-02-25 07:14:58 +0100756 _HA_ATOMIC_AND(&t->state, ~TASK_GLOBAL);
757 eb32sc_delete(&t->rq);
758
Willy Tarreaucde79022019-04-12 18:03:41 +0200759 if (unlikely(!grq)) {
760 grq = eb32sc_first(&rqueue, tid_bit);
761 if (!grq) {
Olivier Houchardde82aea2019-04-17 19:10:22 +0200762 global_tasks_mask &= ~tid_bit;
Willy Tarreaucde79022019-04-12 18:03:41 +0200763 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
Willy Tarreaucde79022019-04-12 18:03:41 +0200764 }
765 }
Willy Tarreaue7923c12021-02-25 07:09:08 +0100766 gpicked++;
Emeric Brun01948972017-03-30 15:37:25 +0200767 }
Willy Tarreau3466e3c2019-04-15 18:52:40 +0200768#endif
Willy Tarreau2b363ac2021-02-25 07:14:58 +0100769 if (t->nice)
Willy Tarreau4781b152021-04-06 13:53:36 +0200770 _HA_ATOMIC_DEC(&niced_tasks);
Willy Tarreaucde79022019-04-12 18:03:41 +0200771
Willy Tarreaua868c292020-11-30 15:30:22 +0100772 /* Add it to the local task list */
Willy Tarreau2b718102021-04-21 07:32:39 +0200773 LIST_APPEND(&tt->tasklets[TL_NORMAL], &((struct tasklet *)t)->list);
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200774 }
Willy Tarreaucde79022019-04-12 18:03:41 +0200775
776 /* release the rqueue lock */
777 if (grq) {
778 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
779 grq = NULL;
780 }
781
Willy Tarreaue7923c12021-02-25 07:09:08 +0100782 if (lpicked + gpicked) {
Willy Tarreauc309dbd2020-11-30 15:39:00 +0100783 tt->tl_class_mask |= 1 << TL_NORMAL;
Willy Tarreaue7923c12021-02-25 07:09:08 +0100784 _HA_ATOMIC_ADD(&tt->tasks_in_list, lpicked + gpicked);
Willy Tarreaub7e0c632021-03-09 09:59:50 +0100785#ifdef USE_THREAD
Willy Tarreau45499c52021-02-25 07:51:18 +0100786 if (gpicked) {
787 _HA_ATOMIC_SUB(&grq_total, gpicked);
Willy Tarreauc9afbb12021-02-25 07:19:45 +0100788 _HA_ATOMIC_ADD(&tt->rq_total, gpicked);
Willy Tarreau45499c52021-02-25 07:51:18 +0100789 }
Willy Tarreaub7e0c632021-03-09 09:59:50 +0100790#endif
Willy Tarreaue7923c12021-02-25 07:09:08 +0100791 activity[tid].tasksw += lpicked + gpicked;
Willy Tarreauc309dbd2020-11-30 15:39:00 +0100792 }
793
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200794 /* Merge the list of tasklets waken up by other threads to the
795 * main list.
796 */
797 tmp_list = MT_LIST_BEHEAD(&tt->shared_tasklet_list);
Willy Tarreau49f90bf2020-06-24 09:39:48 +0200798 if (tmp_list) {
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200799 LIST_SPLICE_END_DETACHED(&tt->tasklets[TL_URGENT], (struct list *)tmp_list);
Willy Tarreau49f90bf2020-06-24 09:39:48 +0200800 if (!LIST_ISEMPTY(&tt->tasklets[TL_URGENT]))
801 tt->tl_class_mask |= 1 << TL_URGENT;
802 }
Willy Tarreaucde79022019-04-12 18:03:41 +0200803
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200804 /* execute tasklets in each queue */
Willy Tarreau59153fe2020-06-24 10:17:29 +0200805 max_processed -= run_tasks_from_lists(max);
Willy Tarreaua62917b2020-01-30 18:37:28 +0100806
Willy Tarreau5c8be272020-06-19 12:17:55 +0200807 /* some tasks may have woken other ones up */
Willy Tarreau0c0c85e2020-06-23 11:32:35 +0200808 if (max_processed > 0 && thread_has_tasks())
Willy Tarreau5c8be272020-06-19 12:17:55 +0200809 goto not_done_yet;
810
Willy Tarreau49f90bf2020-06-24 09:39:48 +0200811 if (tt->tl_class_mask)
Willy Tarreaucde79022019-04-12 18:03:41 +0200812 activity[tid].long_rq++;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200813}
814
Willy Tarreau64e60122019-07-12 08:31:17 +0200815/* create a work list array for <nbthread> threads, using tasks made of
816 * function <fct>. The context passed to the function will be the pointer to
817 * the thread's work list, which will contain a copy of argument <arg>. The
818 * wake up reason will be TASK_WOKEN_OTHER. The pointer to the work_list array
819 * is returned on success, otherwise NULL on failure.
820 */
821struct work_list *work_list_create(int nbthread,
Willy Tarreau144f84a2021-03-02 16:09:26 +0100822 struct task *(*fct)(struct task *, void *, unsigned int),
Willy Tarreau64e60122019-07-12 08:31:17 +0200823 void *arg)
824{
825 struct work_list *wl;
826 int i;
827
828 wl = calloc(nbthread, sizeof(*wl));
829 if (!wl)
830 goto fail;
831
832 for (i = 0; i < nbthread; i++) {
Olivier Houchard859dc802019-08-08 15:47:21 +0200833 MT_LIST_INIT(&wl[i].head);
Willy Tarreau64e60122019-07-12 08:31:17 +0200834 wl[i].task = task_new(1UL << i);
835 if (!wl[i].task)
836 goto fail;
837 wl[i].task->process = fct;
838 wl[i].task->context = &wl[i];
839 wl[i].arg = arg;
840 }
841 return wl;
842
843 fail:
844 work_list_destroy(wl, nbthread);
845 return NULL;
846}
847
848/* destroy work list <work> */
849void work_list_destroy(struct work_list *work, int nbthread)
850{
851 int t;
852
853 if (!work)
854 return;
855 for (t = 0; t < nbthread; t++)
856 task_destroy(work[t].task);
857 free(work);
858}
859
William Lallemand27f3fa52018-12-06 14:05:20 +0100860/*
861 * Delete every tasks before running the master polling loop
862 */
863void mworker_cleantasks()
864{
865 struct task *t;
866 int i;
William Lallemandb5823392018-12-06 15:14:37 +0100867 struct eb32_node *tmp_wq = NULL;
868 struct eb32sc_node *tmp_rq = NULL;
William Lallemand27f3fa52018-12-06 14:05:20 +0100869
870#ifdef USE_THREAD
871 /* cleanup the global run queue */
William Lallemandb5823392018-12-06 15:14:37 +0100872 tmp_rq = eb32sc_first(&rqueue, MAX_THREADS_MASK);
873 while (tmp_rq) {
874 t = eb32sc_entry(tmp_rq, struct task, rq);
875 tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
Olivier Houchard3f795f72019-04-17 22:51:06 +0200876 task_destroy(t);
William Lallemand27f3fa52018-12-06 14:05:20 +0100877 }
878 /* cleanup the timers queue */
William Lallemandb5823392018-12-06 15:14:37 +0100879 tmp_wq = eb32_first(&timers);
880 while (tmp_wq) {
881 t = eb32_entry(tmp_wq, struct task, wq);
882 tmp_wq = eb32_next(tmp_wq);
Olivier Houchard3f795f72019-04-17 22:51:06 +0200883 task_destroy(t);
William Lallemand27f3fa52018-12-06 14:05:20 +0100884 }
885#endif
886 /* clean the per thread run queue */
887 for (i = 0; i < global.nbthread; i++) {
William Lallemandb5823392018-12-06 15:14:37 +0100888 tmp_rq = eb32sc_first(&task_per_thread[i].rqueue, MAX_THREADS_MASK);
889 while (tmp_rq) {
890 t = eb32sc_entry(tmp_rq, struct task, rq);
891 tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
Olivier Houchard3f795f72019-04-17 22:51:06 +0200892 task_destroy(t);
William Lallemand27f3fa52018-12-06 14:05:20 +0100893 }
894 /* cleanup the per thread timers queue */
William Lallemandb5823392018-12-06 15:14:37 +0100895 tmp_wq = eb32_first(&task_per_thread[i].timers);
896 while (tmp_wq) {
897 t = eb32_entry(tmp_wq, struct task, wq);
898 tmp_wq = eb32_next(tmp_wq);
Olivier Houchard3f795f72019-04-17 22:51:06 +0200899 task_destroy(t);
William Lallemand27f3fa52018-12-06 14:05:20 +0100900 }
901 }
902}
903
Willy Tarreaub6b3df32018-11-26 16:31:20 +0100904/* perform minimal intializations */
905static void init_task()
Willy Tarreau4726f532009-03-07 17:25:21 +0100906{
Willy Tarreau401135c2021-02-26 09:16:22 +0100907 int i, q;
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200908
Olivier Houchardb1ca58b2018-06-06 14:22:03 +0200909#ifdef USE_THREAD
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200910 memset(&timers, 0, sizeof(timers));
Willy Tarreau4726f532009-03-07 17:25:21 +0100911 memset(&rqueue, 0, sizeof(rqueue));
Olivier Houchardb1ca58b2018-06-06 14:22:03 +0200912#endif
Willy Tarreau8d8747a2018-10-15 16:12:48 +0200913 memset(&task_per_thread, 0, sizeof(task_per_thread));
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200914 for (i = 0; i < MAX_THREADS; i++) {
Willy Tarreau401135c2021-02-26 09:16:22 +0100915 for (q = 0; q < TL_CLASSES; q++)
916 LIST_INIT(&task_per_thread[i].tasklets[q]);
Olivier Houchard06910462019-10-11 16:35:01 +0200917 MT_LIST_INIT(&task_per_thread[i].shared_tasklet_list);
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200918 }
Willy Tarreau4726f532009-03-07 17:25:21 +0100919}
920
Willy Tarreaue7723bd2020-06-24 11:11:02 +0200921/* config parser for global "tune.sched.low-latency", accepts "on" or "off" */
922static int cfg_parse_tune_sched_low_latency(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +0100923 const struct proxy *defpx, const char *file, int line,
Willy Tarreaue7723bd2020-06-24 11:11:02 +0200924 char **err)
925{
926 if (too_many_args(1, args, err, NULL))
927 return -1;
928
929 if (strcmp(args[1], "on") == 0)
930 global.tune.options |= GTUNE_SCHED_LOW_LATENCY;
931 else if (strcmp(args[1], "off") == 0)
932 global.tune.options &= ~GTUNE_SCHED_LOW_LATENCY;
933 else {
934 memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
935 return -1;
936 }
937 return 0;
938}
939
940/* config keyword parsers */
941static struct cfg_kw_list cfg_kws = {ILH, {
942 { CFG_GLOBAL, "tune.sched.low-latency", cfg_parse_tune_sched_low_latency },
943 { 0, NULL, NULL }
944}};
945
946INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
Willy Tarreaub6b3df32018-11-26 16:31:20 +0100947INITCALL0(STG_PREPARE, init_task);
948
Willy Tarreaubaaee002006-06-26 02:48:02 +0200949/*
950 * Local variables:
951 * c-indent-level: 8
952 * c-basic-offset: 8
953 * End:
954 */