blob: f77d34300b08051f9ee890c38345bc633359232a [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
2 * Task management functions.
3 *
Willy Tarreau4726f532009-03-07 17:25:21 +01004 * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
Willy Tarreaubaaee002006-06-26 02:48:02 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreau87bed622009-03-08 22:25:28 +010013#include <string.h>
14
Willy Tarreaub2551052020-06-09 09:07:15 +020015#include <import/eb32sctree.h>
16#include <import/eb32tree.h>
17
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020018#include <haproxy/api.h>
Willy Tarreau5d9ddc52021-10-06 19:54:09 +020019#include <haproxy/activity.h>
Willy Tarreaue7723bd2020-06-24 11:11:02 +020020#include <haproxy/cfgparse.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020021#include <haproxy/fd.h>
Willy Tarreau853b2972020-05-27 18:01:47 +020022#include <haproxy/list.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020023#include <haproxy/pool.h>
Willy Tarreaucea0e1b2020-06-04 17:25:40 +020024#include <haproxy/task.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020025#include <haproxy/tools.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020026
Willy Tarreaue08f4bf2021-05-08 20:10:13 +020027extern struct task *process_stream(struct task *t, void *context, unsigned int state);
Willy Tarreaubaaee002006-06-26 02:48:02 +020028
Willy Tarreau8ceae722018-11-26 11:58:30 +010029DECLARE_POOL(pool_head_task, "task", sizeof(struct task));
30DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
Willy Tarreau96bcfd72007-04-29 10:41:56 +020031
Thierry FOURNIERd6975962017-07-12 14:31:10 +020032/* This is the memory pool containing all the signal structs. These
Joseph Herlantcf92b6d2018-11-15 14:19:23 -080033 * struct are used to store each required signal between two tasks.
Thierry FOURNIERd6975962017-07-12 14:31:10 +020034 */
Willy Tarreau8ceae722018-11-26 11:58:30 +010035DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
Thierry FOURNIERd6975962017-07-12 14:31:10 +020036
Olivier Houchardeba0c0b2018-07-26 18:53:28 +020037volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
Willy Tarreaue35c94a2009-03-21 10:01:42 +010038unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
Emeric Brun01948972017-03-30 15:37:25 +020039
Willy Tarreau078c2572021-10-06 15:58:46 +020040/* used for idle time calculation */
41THREAD_LOCAL unsigned int samp_time = 0; /* total elapsed time over current sample */
42THREAD_LOCAL unsigned int idle_time = 0; /* total idle time over current sample */
43
Willy Tarreaud022e9c2019-09-24 08:25:15 +020044THREAD_LOCAL struct task_per_thread *sched = &task_per_thread[0]; /* scheduler context for the current thread */
Willy Tarreau6d1222c2017-11-26 10:08:06 +010045
Willy Tarreau86abe442018-11-25 20:12:18 +010046__decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */
Willy Tarreauef28dc12019-05-28 18:48:07 +020047__decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */
Willy Tarreau964c9362007-01-07 00:38:00 +010048
Olivier Houchardb1ca58b2018-06-06 14:22:03 +020049#ifdef USE_THREAD
Willy Tarreauc6ba9a02021-02-20 12:49:54 +010050struct eb_root timers; /* sorted timers tree, global, accessed under wq_lock */
51struct eb_root rqueue; /* tree constituting the global run queue, accessed under rq_lock */
Willy Tarreau45499c52021-02-25 07:51:18 +010052unsigned int grq_total; /* total number of entries in the global run queue, atomic */
Willy Tarreauc6ba9a02021-02-20 12:49:54 +010053static unsigned int global_rqueue_ticks; /* insertion count in the grq, use rq_lock */
Olivier Houchardb1ca58b2018-06-06 14:22:03 +020054#endif
Willy Tarreaub20aa9e2018-10-15 14:52:21 +020055
Willy Tarreau8d8747a2018-10-15 16:12:48 +020056
57struct task_per_thread task_per_thread[MAX_THREADS];
Willy Tarreau9789f7b2008-06-24 08:17:16 +020058
Willy Tarreaueb8c2c62020-06-30 11:48:48 +020059
60/* Flags the task <t> for immediate destruction and puts it into its first
61 * thread's shared tasklet list if not yet queued/running. This will bypass
62 * the priority scheduling and make the task show up as fast as possible in
63 * the other thread's queue. Note that this operation isn't idempotent and is
64 * not supposed to be run on the same task from multiple threads at once. It's
65 * the caller's responsibility to make sure it is the only one able to kill the
66 * task.
67 */
68void task_kill(struct task *t)
69{
Willy Tarreau144f84a2021-03-02 16:09:26 +010070 unsigned int state = t->state;
Willy Tarreaueb8c2c62020-06-30 11:48:48 +020071 unsigned int thr;
72
73 BUG_ON(state & TASK_KILLED);
74
75 while (1) {
76 while (state & (TASK_RUNNING | TASK_QUEUED)) {
77 /* task already in the queue and about to be executed,
78 * or even currently running. Just add the flag and be
79 * done with it, the process loop will detect it and kill
80 * it. The CAS will fail if we arrive too late.
81 */
82 if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_KILLED))
83 return;
84 }
85
86 /* We'll have to wake it up, but we must also secure it so that
87 * it doesn't vanish under us. TASK_QUEUED guarantees nobody will
88 * add past us.
89 */
90 if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED | TASK_KILLED)) {
91 /* Bypass the tree and go directly into the shared tasklet list.
92 * Note: that's a task so it must be accounted for as such. Pick
93 * the task's first thread for the job.
94 */
95 thr = my_ffsl(t->thread_mask) - 1;
Willy Tarreau54d31172020-07-02 14:14:00 +020096
97 /* Beware: tasks that have never run don't have their ->list empty yet! */
Willy Tarreau2b718102021-04-21 07:32:39 +020098 MT_LIST_APPEND(&task_per_thread[thr].shared_tasklet_list,
Willy Tarreau4f589262020-07-02 17:17:42 +020099 (struct mt_list *)&((struct tasklet *)t)->list);
Willy Tarreau4781b152021-04-06 13:53:36 +0200100 _HA_ATOMIC_INC(&task_per_thread[thr].rq_total);
101 _HA_ATOMIC_INC(&task_per_thread[thr].tasks_in_list);
Willy Tarreau54d31172020-07-02 14:14:00 +0200102 if (sleeping_thread_mask & (1UL << thr)) {
103 _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
104 wake_thread(thr);
Willy Tarreaueb8c2c62020-06-30 11:48:48 +0200105 }
Willy Tarreau54d31172020-07-02 14:14:00 +0200106 return;
Willy Tarreaueb8c2c62020-06-30 11:48:48 +0200107 }
108 }
109}
110
Amaury Denoyelle7b368332021-07-28 16:12:57 +0200111/* Equivalent of task_kill for tasklets. Mark the tasklet <t> for destruction.
112 * It will be deleted on the next scheduler invocation. This function is
113 * thread-safe : a thread can kill a tasklet of another thread.
114 */
115void tasklet_kill(struct tasklet *t)
116{
117 unsigned int state = t->state;
118 unsigned int thr;
119
120 BUG_ON(state & TASK_KILLED);
121
122 while (1) {
123 while (state & (TASK_IN_LIST)) {
124 /* Tasklet already in the list ready to be executed. Add
125 * the killed flag and wait for the process loop to
126 * detect it.
127 */
128 if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_KILLED))
129 return;
130 }
131
132 /* Mark the tasklet as killed and wake the thread to process it
133 * as soon as possible.
134 */
135 if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_IN_LIST | TASK_KILLED)) {
136 thr = t->tid > 0 ? t->tid: tid;
137 MT_LIST_APPEND(&task_per_thread[thr].shared_tasklet_list,
138 (struct mt_list *)&t->list);
139 _HA_ATOMIC_INC(&task_per_thread[thr].rq_total);
140 if (sleeping_thread_mask & (1UL << thr)) {
141 _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
142 wake_thread(thr);
143 }
144 return;
145 }
146 }
147}
148
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100149/* Do not call this one, please use tasklet_wakeup_on() instead, as this one is
150 * the slow path of tasklet_wakeup_on() which performs some preliminary checks
151 * and sets TASK_IN_LIST before calling this one. A negative <thr> designates
152 * the current thread.
153 */
154void __tasklet_wakeup_on(struct tasklet *tl, int thr)
155{
156 if (likely(thr < 0)) {
157 /* this tasklet runs on the caller thread */
Willy Tarreau826fa872021-02-26 10:13:40 +0100158 if (tl->state & TASK_HEAVY) {
Willy Tarreau2b718102021-04-21 07:32:39 +0200159 LIST_APPEND(&sched->tasklets[TL_HEAVY], &tl->list);
Willy Tarreau826fa872021-02-26 10:13:40 +0100160 sched->tl_class_mask |= 1 << TL_HEAVY;
161 }
162 else if (tl->state & TASK_SELF_WAKING) {
Willy Tarreau2b718102021-04-21 07:32:39 +0200163 LIST_APPEND(&sched->tasklets[TL_BULK], &tl->list);
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100164 sched->tl_class_mask |= 1 << TL_BULK;
165 }
166 else if ((struct task *)tl == sched->current) {
167 _HA_ATOMIC_OR(&tl->state, TASK_SELF_WAKING);
Willy Tarreau2b718102021-04-21 07:32:39 +0200168 LIST_APPEND(&sched->tasklets[TL_BULK], &tl->list);
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100169 sched->tl_class_mask |= 1 << TL_BULK;
170 }
171 else if (sched->current_queue < 0) {
Willy Tarreau2b718102021-04-21 07:32:39 +0200172 LIST_APPEND(&sched->tasklets[TL_URGENT], &tl->list);
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100173 sched->tl_class_mask |= 1 << TL_URGENT;
174 }
175 else {
Willy Tarreau2b718102021-04-21 07:32:39 +0200176 LIST_APPEND(&sched->tasklets[sched->current_queue], &tl->list);
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100177 sched->tl_class_mask |= 1 << sched->current_queue;
178 }
Willy Tarreau4781b152021-04-06 13:53:36 +0200179 _HA_ATOMIC_INC(&sched->rq_total);
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100180 } else {
181 /* this tasklet runs on a specific thread. */
Willy Tarreau2b718102021-04-21 07:32:39 +0200182 MT_LIST_APPEND(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list);
Willy Tarreau4781b152021-04-06 13:53:36 +0200183 _HA_ATOMIC_INC(&task_per_thread[thr].rq_total);
Willy Tarreau9c6dbf02021-02-24 17:51:38 +0100184 if (sleeping_thread_mask & (1UL << thr)) {
185 _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
186 wake_thread(thr);
187 }
188 }
189}
190
Willy Tarreau4726f532009-03-07 17:25:21 +0100191/* Puts the task <t> in run queue at a position depending on t->nice. <t> is
192 * returned. The nice value assigns boosts in 32th of the run queue size. A
Christopher Faulet34c5cc92016-12-06 09:15:30 +0100193 * nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
194 * of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so
195 * the caller will have to set its flags after this call.
Willy Tarreau4726f532009-03-07 17:25:21 +0100196 * The task must not already be in the run queue. If unsure, use the safer
197 * task_wakeup() function.
Willy Tarreau91e99932008-06-30 07:51:00 +0200198 */
Willy Tarreau018564e2021-02-24 16:41:11 +0100199void __task_wakeup(struct task *t)
Willy Tarreaue33aece2007-04-30 13:15:14 +0200200{
Willy Tarreau018564e2021-02-24 16:41:11 +0100201 struct eb_root *root = &sched->rqueue;
202
Olivier Houchardb1ca58b2018-06-06 14:22:03 +0200203#ifdef USE_THREAD
Willy Tarreau018564e2021-02-24 16:41:11 +0100204 if (t->thread_mask != tid_bit && global.nbthread != 1) {
205 root = &rqueue;
206
Willy Tarreau4781b152021-04-06 13:53:36 +0200207 _HA_ATOMIC_INC(&grq_total);
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200208 HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
Willy Tarreau9c7b8082021-02-24 15:10:07 +0100209
Olivier Houchardde82aea2019-04-17 19:10:22 +0200210 global_tasks_mask |= t->thread_mask;
Willy Tarreauc6ba9a02021-02-20 12:49:54 +0100211 t->rq.key = ++global_rqueue_ticks;
Olivier Houcharded1a6a02019-04-18 14:12:51 +0200212 __ha_barrier_store();
Willy Tarreauc6ba9a02021-02-20 12:49:54 +0100213 } else
Olivier Houchardc4aac9e2018-07-26 15:25:49 +0200214#endif
Willy Tarreau9c7b8082021-02-24 15:10:07 +0100215 {
Willy Tarreau4781b152021-04-06 13:53:36 +0200216 _HA_ATOMIC_INC(&sched->rq_total);
Willy Tarreauc6ba9a02021-02-20 12:49:54 +0100217 t->rq.key = ++sched->rqueue_ticks;
Willy Tarreau9c7b8082021-02-24 15:10:07 +0100218 }
Willy Tarreau91e99932008-06-30 07:51:00 +0200219
220 if (likely(t->nice)) {
221 int offset;
222
Willy Tarreau4781b152021-04-06 13:53:36 +0200223 _HA_ATOMIC_INC(&niced_tasks);
Willy Tarreau2d1fd0a2019-04-15 09:18:31 +0200224 offset = t->nice * (int)global.tune.runqueue_depth;
Willy Tarreau4726f532009-03-07 17:25:21 +0100225 t->rq.key += offset;
Willy Tarreau91e99932008-06-30 07:51:00 +0200226 }
227
Willy Tarreaud9add3a2019-04-25 08:57:41 +0200228 if (task_profiling_mask & tid_bit)
Willy Tarreau9efd7452018-05-31 14:48:54 +0200229 t->call_date = now_mono_time();
230
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200231 eb32sc_insert(root, &t->rq, t->thread_mask);
Willy Tarreau018564e2021-02-24 16:41:11 +0100232
Olivier Houchardb1ca58b2018-06-06 14:22:03 +0200233#ifdef USE_THREAD
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200234 if (root == &rqueue) {
Olivier Houchard4c2832852019-03-08 18:48:47 +0100235 _HA_ATOMIC_OR(&t->state, TASK_GLOBAL);
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200236 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
Willy Tarreau2c41d772021-02-24 16:13:03 +0100237
Willy Tarreaueeffb3d2021-02-24 16:44:51 +0100238 /* If all threads that are supposed to handle this task are sleeping,
239 * wake one.
240 */
241 if ((((t->thread_mask & all_threads_mask) & sleeping_thread_mask) ==
242 (t->thread_mask & all_threads_mask))) {
243 unsigned long m = (t->thread_mask & all_threads_mask) &~ tid_bit;
Olivier Houchard1b327902019-03-15 00:23:10 +0100244
Willy Tarreaueeffb3d2021-02-24 16:44:51 +0100245 m = (m & (m - 1)) ^ m; // keep lowest bit set
246 _HA_ATOMIC_AND(&sleeping_thread_mask, ~m);
247 wake_thread(my_ffsl(m) - 1);
248 }
Olivier Houchard1b327902019-03-15 00:23:10 +0100249 }
Willy Tarreau85d9b842018-07-27 17:14:41 +0200250#endif
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200251 return;
Willy Tarreaue33aece2007-04-30 13:15:14 +0200252}
Willy Tarreaud825eef2007-05-12 22:35:00 +0200253
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200254/*
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100255 * __task_queue()
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200256 *
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200257 * Inserts a task into wait queue <wq> at the position given by its expiration
Willy Tarreau4726f532009-03-07 17:25:21 +0100258 * date. It does not matter if the task was already in the wait queue or not,
Willy Tarreau7a969992021-09-30 16:38:09 +0200259 * as it will be unlinked. The task MUST NOT have an infinite expiration timer.
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100260 * Last, tasks must not be queued further than the end of the tree, which is
261 * between <now_ms> and <now_ms> + 2^31 ms (now+24days in 32bit).
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100262 *
263 * This function should not be used directly, it is meant to be called by the
264 * inline version of task_queue() which performs a few cheap preliminary tests
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200265 * before deciding to call __task_queue(). Moreover this function doesn't care
266 * at all about locking so the caller must be careful when deciding whether to
267 * lock or not around this call.
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200268 */
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200269void __task_queue(struct task *task, struct eb_root *wq)
Willy Tarreau964c9362007-01-07 00:38:00 +0100270{
Willy Tarreaue5d79bc2020-07-22 14:29:42 +0200271#ifdef USE_THREAD
272 BUG_ON((wq == &timers && !(task->state & TASK_SHARED_WQ)) ||
273 (wq == &sched->timers && (task->state & TASK_SHARED_WQ)) ||
274 (wq != &timers && wq != &sched->timers));
275#endif
Willy Tarreau7a969992021-09-30 16:38:09 +0200276 /* if this happens the process is doomed anyway, so better catch it now
277 * so that we have the caller in the stack.
278 */
279 BUG_ON(task->expire == TICK_ETERNITY);
Willy Tarreaue5d79bc2020-07-22 14:29:42 +0200280
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100281 if (likely(task_in_wq(task)))
Willy Tarreau4726f532009-03-07 17:25:21 +0100282 __task_unlink_wq(task);
Willy Tarreau4726f532009-03-07 17:25:21 +0100283
284 /* the task is not in the queue now */
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100285 task->wq.key = task->expire;
Willy Tarreau28c41a42008-06-29 17:00:59 +0200286#ifdef DEBUG_CHECK_INVALID_EXPIRATION_DATES
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100287 if (tick_is_lt(task->wq.key, now_ms))
Willy Tarreau28c41a42008-06-29 17:00:59 +0200288 /* we're queuing too far away or in the past (most likely) */
Willy Tarreau4726f532009-03-07 17:25:21 +0100289 return;
Willy Tarreau28c41a42008-06-29 17:00:59 +0200290#endif
Willy Tarreauce44f122008-07-05 18:16:19 +0200291
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200292 eb32_insert(wq, &task->wq);
Willy Tarreau964c9362007-01-07 00:38:00 +0100293}
294
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200295/*
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200296 * Extract all expired timers from the timer queue, and wakes up all
Willy Tarreauc49ba522019-12-11 08:12:23 +0100297 * associated tasks.
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200298 */
Willy Tarreauc49ba522019-12-11 08:12:23 +0100299void wake_expired_tasks()
Willy Tarreaubaaee002006-06-26 02:48:02 +0200300{
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200301 struct task_per_thread * const tt = sched; // thread's tasks
Willy Tarreau3cfaa8d2020-10-16 09:26:22 +0200302 int max_processed = global.tune.runqueue_depth;
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200303 struct task *task;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200304 struct eb32_node *eb;
Willy Tarreauaf613e82020-06-05 08:40:51 +0200305 __decl_thread(int key);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200306
Willy Tarreau3cfaa8d2020-10-16 09:26:22 +0200307 while (max_processed-- > 0) {
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200308 lookup_next_local:
Willy Tarreau4c1e1ad2019-09-24 07:19:08 +0200309 eb = eb32_lookup_ge(&tt->timers, now_ms - TIMER_LOOK_BACK);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200310 if (!eb) {
311 /* we might have reached the end of the tree, typically because
312 * <now_ms> is in the first half and we're first scanning the last
313 * half. Let's loop back to the beginning of the tree now.
314 */
Willy Tarreau4c1e1ad2019-09-24 07:19:08 +0200315 eb = eb32_first(&tt->timers);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200316 if (likely(!eb))
317 break;
318 }
319
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200320 /* It is possible that this task was left at an earlier place in the
321 * tree because a recent call to task_queue() has not moved it. This
322 * happens when the new expiration date is later than the old one.
323 * Since it is very unlikely that we reach a timeout anyway, it's a
324 * lot cheaper to proceed like this because we almost never update
325 * the tree. We may also find disabled expiration dates there. Since
326 * we have detached the task from the tree, we simply call task_queue
327 * to take care of this. Note that we might occasionally requeue it at
328 * the same place, before <eb>, so we have to check if this happens,
329 * and adjust <eb>, otherwise we may skip it which is not what we want.
330 * We may also not requeue the task (and not point eb at it) if its
Willy Tarreau77015ab2020-06-19 11:50:27 +0200331 * expiration time is not set. We also make sure we leave the real
332 * expiration date for the next task in the queue so that when calling
333 * next_timer_expiry() we're guaranteed to see the next real date and
334 * not the next apparent date. This is in order to avoid useless
335 * wakeups.
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200336 */
Willy Tarreau77015ab2020-06-19 11:50:27 +0200337
338 task = eb32_entry(eb, struct task, wq);
339 if (tick_is_expired(task->expire, now_ms)) {
340 /* expired task, wake it up */
341 __task_unlink_wq(task);
342 task_wakeup(task, TASK_WOKEN_TIMER);
343 }
344 else if (task->expire != eb->key) {
345 /* task is not expired but its key doesn't match so let's
346 * update it and skip to next apparently expired task.
347 */
348 __task_unlink_wq(task);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200349 if (tick_isset(task->expire))
Willy Tarreau4c1e1ad2019-09-24 07:19:08 +0200350 __task_queue(task, &tt->timers);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200351 }
Willy Tarreau77015ab2020-06-19 11:50:27 +0200352 else {
Willy Tarreau7a969992021-09-30 16:38:09 +0200353 /* task not expired and correctly placed. It may not be eternal. */
354 BUG_ON(task->expire == TICK_ETERNITY);
Willy Tarreau77015ab2020-06-19 11:50:27 +0200355 break;
356 }
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200357 }
358
359#ifdef USE_THREAD
Willy Tarreau1e928c02019-05-28 18:57:25 +0200360 if (eb_is_empty(&timers))
361 goto leave;
362
363 HA_RWLOCK_RDLOCK(TASK_WQ_LOCK, &wq_lock);
364 eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
365 if (!eb) {
366 eb = eb32_first(&timers);
367 if (likely(!eb)) {
368 HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
369 goto leave;
370 }
371 }
372 key = eb->key;
Willy Tarreau1e928c02019-05-28 18:57:25 +0200373
Willy Tarreaud48ed662020-10-16 09:31:41 +0200374 if (tick_is_lt(now_ms, key)) {
375 HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau1e928c02019-05-28 18:57:25 +0200376 goto leave;
Willy Tarreaud48ed662020-10-16 09:31:41 +0200377 }
Willy Tarreau1e928c02019-05-28 18:57:25 +0200378
379 /* There's really something of interest here, let's visit the queue */
380
Willy Tarreaud48ed662020-10-16 09:31:41 +0200381 if (HA_RWLOCK_TRYRDTOSK(TASK_WQ_LOCK, &wq_lock)) {
382 /* if we failed to grab the lock it means another thread is
383 * already doing the same here, so let it do the job.
384 */
385 HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
386 goto leave;
387 }
388
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200389 while (1) {
Emeric Brunc60def82017-09-27 14:59:38 +0200390 lookup_next:
Willy Tarreau3cfaa8d2020-10-16 09:26:22 +0200391 if (max_processed-- <= 0)
392 break;
Emeric Brun01948972017-03-30 15:37:25 +0200393 eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
Emeric Brunc60def82017-09-27 14:59:38 +0200394 if (!eb) {
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100395 /* we might have reached the end of the tree, typically because
396 * <now_ms> is in the first half and we're first scanning the last
397 * half. Let's loop back to the beginning of the tree now.
398 */
399 eb = eb32_first(&timers);
Willy Tarreaub992ba12017-11-05 19:09:27 +0100400 if (likely(!eb))
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100401 break;
402 }
Willy Tarreaubaaee002006-06-26 02:48:02 +0200403
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100404 task = eb32_entry(eb, struct task, wq);
Willy Tarreau77015ab2020-06-19 11:50:27 +0200405 if (tick_is_expired(task->expire, now_ms)) {
406 /* expired task, wake it up */
Willy Tarreaud48ed662020-10-16 09:31:41 +0200407 HA_RWLOCK_SKTOWR(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau77015ab2020-06-19 11:50:27 +0200408 __task_unlink_wq(task);
Willy Tarreaud48ed662020-10-16 09:31:41 +0200409 HA_RWLOCK_WRTOSK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau77015ab2020-06-19 11:50:27 +0200410 task_wakeup(task, TASK_WOKEN_TIMER);
411 }
412 else if (task->expire != eb->key) {
413 /* task is not expired but its key doesn't match so let's
414 * update it and skip to next apparently expired task.
415 */
Willy Tarreaud48ed662020-10-16 09:31:41 +0200416 HA_RWLOCK_SKTOWR(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau77015ab2020-06-19 11:50:27 +0200417 __task_unlink_wq(task);
Willy Tarreaub992ba12017-11-05 19:09:27 +0100418 if (tick_isset(task->expire))
Willy Tarreau783afbe2020-07-22 14:12:45 +0200419 __task_queue(task, &timers);
Willy Tarreaud48ed662020-10-16 09:31:41 +0200420 HA_RWLOCK_WRTOSK(TASK_WQ_LOCK, &wq_lock);
Emeric Brunc60def82017-09-27 14:59:38 +0200421 goto lookup_next;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200422 }
Willy Tarreau77015ab2020-06-19 11:50:27 +0200423 else {
Willy Tarreau7a969992021-09-30 16:38:09 +0200424 /* task not expired and correctly placed. It may not be eternal. */
425 BUG_ON(task->expire == TICK_ETERNITY);
Willy Tarreau77015ab2020-06-19 11:50:27 +0200426 break;
427 }
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100428 }
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200429
Willy Tarreaud48ed662020-10-16 09:31:41 +0200430 HA_RWLOCK_SKUNLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200431#endif
Willy Tarreau1e928c02019-05-28 18:57:25 +0200432leave:
Willy Tarreauc49ba522019-12-11 08:12:23 +0100433 return;
434}
435
436/* Checks the next timer for the current thread by looking into its own timer
437 * list and the global one. It may return TICK_ETERNITY if no timer is present.
Ilya Shipitsin856aabc2020-04-16 23:51:34 +0500438 * Note that the next timer might very well be slightly in the past.
Willy Tarreauc49ba522019-12-11 08:12:23 +0100439 */
440int next_timer_expiry()
441{
442 struct task_per_thread * const tt = sched; // thread's tasks
443 struct eb32_node *eb;
444 int ret = TICK_ETERNITY;
Willy Tarreau6ce02322020-08-21 05:48:34 +0200445 __decl_thread(int key = TICK_ETERNITY);
Willy Tarreauc49ba522019-12-11 08:12:23 +0100446
447 /* first check in the thread-local timers */
448 eb = eb32_lookup_ge(&tt->timers, now_ms - TIMER_LOOK_BACK);
449 if (!eb) {
450 /* we might have reached the end of the tree, typically because
451 * <now_ms> is in the first half and we're first scanning the last
452 * half. Let's loop back to the beginning of the tree now.
453 */
454 eb = eb32_first(&tt->timers);
455 }
456
457 if (eb)
458 ret = eb->key;
459
460#ifdef USE_THREAD
461 if (!eb_is_empty(&timers)) {
462 HA_RWLOCK_RDLOCK(TASK_WQ_LOCK, &wq_lock);
463 eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
464 if (!eb)
465 eb = eb32_first(&timers);
466 if (eb)
467 key = eb->key;
468 HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
469 if (eb)
470 ret = tick_first(ret, key);
471 }
472#endif
Willy Tarreaub992ba12017-11-05 19:09:27 +0100473 return ret;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200474}
475
Willy Tarreau59153fe2020-06-24 10:17:29 +0200476/* Walks over tasklet lists sched->tasklets[0..TL_CLASSES-1] and run at most
477 * budget[TL_*] of them. Returns the number of entries effectively processed
478 * (tasks and tasklets merged). The count of tasks in the list for the current
479 * thread is adjusted.
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100480 */
Willy Tarreau59153fe2020-06-24 10:17:29 +0200481unsigned int run_tasks_from_lists(unsigned int budgets[])
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100482{
Willy Tarreau144f84a2021-03-02 16:09:26 +0100483 struct task *(*process)(struct task *t, void *ctx, unsigned int state);
Willy Tarreauba48d5c2020-06-24 09:54:24 +0200484 struct list *tl_queues = sched->tasklets;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100485 struct task *t;
Willy Tarreaue7723bd2020-06-24 11:11:02 +0200486 uint8_t budget_mask = (1 << TL_CLASSES) - 1;
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100487 struct sched_activity *profile_entry = NULL;
Willy Tarreau59153fe2020-06-24 10:17:29 +0200488 unsigned int done = 0;
489 unsigned int queue;
Willy Tarreau144f84a2021-03-02 16:09:26 +0100490 unsigned int state;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100491 void *ctx;
Willy Tarreau59153fe2020-06-24 10:17:29 +0200492
493 for (queue = 0; queue < TL_CLASSES;) {
494 sched->current_queue = queue;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100495
Willy Tarreaue7723bd2020-06-24 11:11:02 +0200496 /* global.tune.sched.low-latency is set */
497 if (global.tune.options & GTUNE_SCHED_LOW_LATENCY) {
498 if (unlikely(sched->tl_class_mask & budget_mask & ((1 << queue) - 1))) {
499 /* a lower queue index has tasks again and still has a
500 * budget to run them. Let's switch to it now.
501 */
502 queue = (sched->tl_class_mask & 1) ? 0 :
503 (sched->tl_class_mask & 2) ? 1 : 2;
504 continue;
505 }
506
507 if (unlikely(queue > TL_URGENT &&
508 budget_mask & (1 << TL_URGENT) &&
509 !MT_LIST_ISEMPTY(&sched->shared_tasklet_list))) {
510 /* an urgent tasklet arrived from another thread */
511 break;
512 }
513
514 if (unlikely(queue > TL_NORMAL &&
515 budget_mask & (1 << TL_NORMAL) &&
Willy Tarreau2c41d772021-02-24 16:13:03 +0100516 (!eb_is_empty(&sched->rqueue) ||
Willy Tarreaue7723bd2020-06-24 11:11:02 +0200517 (global_tasks_mask & tid_bit)))) {
518 /* a task was woken up by a bulk tasklet or another thread */
519 break;
520 }
521 }
522
Willy Tarreauba48d5c2020-06-24 09:54:24 +0200523 if (LIST_ISEMPTY(&tl_queues[queue])) {
524 sched->tl_class_mask &= ~(1 << queue);
Willy Tarreau59153fe2020-06-24 10:17:29 +0200525 queue++;
526 continue;
Willy Tarreauba48d5c2020-06-24 09:54:24 +0200527 }
528
Willy Tarreau59153fe2020-06-24 10:17:29 +0200529 if (!budgets[queue]) {
Willy Tarreaue7723bd2020-06-24 11:11:02 +0200530 budget_mask &= ~(1 << queue);
Willy Tarreau59153fe2020-06-24 10:17:29 +0200531 queue++;
532 continue;
533 }
Willy Tarreauba48d5c2020-06-24 09:54:24 +0200534
Willy Tarreau59153fe2020-06-24 10:17:29 +0200535 budgets[queue]--;
Willy Tarreauba48d5c2020-06-24 09:54:24 +0200536 t = (struct task *)LIST_ELEM(tl_queues[queue].n, struct tasklet *, list);
Amaury Denoyelle7b368332021-07-28 16:12:57 +0200537 state = t->state & (TASK_SHARED_WQ|TASK_SELF_WAKING|TASK_HEAVY|TASK_F_TASKLET|TASK_KILLED|TASK_F_USR1|TASK_KILLED);
Willy Tarreau74dea8c2021-02-26 00:25:51 +0100538
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100539 ti->flags &= ~TI_FL_STUCK; // this thread is still running
540 activity[tid].ctxsw++;
541 ctx = t->context;
542 process = t->process;
543 t->calls++;
Willy Tarreaud23d4132020-01-31 10:39:03 +0100544 sched->current = t;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100545
Willy Tarreau4781b152021-04-06 13:53:36 +0200546 _HA_ATOMIC_DEC(&sched->rq_total);
Willy Tarreau2da4c312020-11-30 14:52:11 +0100547
Willy Tarreaudb4e2382021-03-02 15:54:11 +0100548 if (state & TASK_F_TASKLET) {
Willy Tarreau2a54ffb2021-02-25 09:32:58 +0100549 uint64_t before = 0;
550
Willy Tarreau4d6c5942020-11-30 14:58:53 +0100551 LIST_DEL_INIT(&((struct tasklet *)t)->list);
552 __ha_barrier_store();
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100553
554 if (unlikely(task_profiling_mask & tid_bit)) {
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100555 profile_entry = sched_activity_entry(sched_activity, t->process);
556 before = now_mono_time();
Willy Tarreaub2285de2021-02-25 08:39:07 +0100557#ifdef DEBUG_TASK
Willy Tarreau2a54ffb2021-02-25 09:32:58 +0100558 if (((struct tasklet *)t)->call_date) {
559 HA_ATOMIC_ADD(&profile_entry->lat_time, before - ((struct tasklet *)t)->call_date);
560 ((struct tasklet *)t)->call_date = 0;
561 }
Willy Tarreaub2285de2021-02-25 08:39:07 +0100562#endif
Willy Tarreau2a54ffb2021-02-25 09:32:58 +0100563 }
564
565 state = _HA_ATOMIC_XCHG(&t->state, state);
566 __ha_barrier_atomic_store();
567
Amaury Denoyelle7b368332021-07-28 16:12:57 +0200568 if (likely(!(state & TASK_KILLED))) {
569 process(t, ctx, state);
570 }
571 else {
572 done++;
573 sched->current = NULL;
574 pool_free(pool_head_tasklet, t);
575 __ha_barrier_store();
576 continue;
577 }
Willy Tarreau2a54ffb2021-02-25 09:32:58 +0100578
579 if (unlikely(task_profiling_mask & tid_bit)) {
Willy Tarreau4781b152021-04-06 13:53:36 +0200580 HA_ATOMIC_INC(&profile_entry->calls);
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100581 HA_ATOMIC_ADD(&profile_entry->cpu_time, now_mono_time() - before);
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100582 }
Willy Tarreau2a54ffb2021-02-25 09:32:58 +0100583
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100584 done++;
Willy Tarreaud23d4132020-01-31 10:39:03 +0100585 sched->current = NULL;
586 __ha_barrier_store();
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100587 continue;
588 }
589
Willy Tarreau4d6c5942020-11-30 14:58:53 +0100590 LIST_DEL_INIT(&((struct tasklet *)t)->list);
591 __ha_barrier_store();
Willy Tarreau6fa8bcd2021-03-02 16:26:05 +0100592 state = _HA_ATOMIC_XCHG(&t->state, state|TASK_RUNNING|TASK_F_USR1);
Willy Tarreau952c2642020-01-31 16:39:30 +0100593 __ha_barrier_atomic_store();
Willy Tarreau952c2642020-01-31 16:39:30 +0100594
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100595 /* OK then this is a regular task */
596
Willy Tarreau4781b152021-04-06 13:53:36 +0200597 _HA_ATOMIC_DEC(&task_per_thread[tid].tasks_in_list);
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100598 if (unlikely(t->call_date)) {
599 uint64_t now_ns = now_mono_time();
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100600 uint64_t lat = now_ns - t->call_date;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100601
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100602 t->lat_time += lat;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100603 t->call_date = now_ns;
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100604 profile_entry = sched_activity_entry(sched_activity, t->process);
605 HA_ATOMIC_ADD(&profile_entry->lat_time, lat);
Willy Tarreau4781b152021-04-06 13:53:36 +0200606 HA_ATOMIC_INC(&profile_entry->calls);
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100607 }
608
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100609 __ha_barrier_store();
Willy Tarreau8a6049c2020-06-30 11:48:48 +0200610
611 /* Note for below: if TASK_KILLED arrived before we've read the state, we
612 * directly free the task. Otherwise it will be seen after processing and
613 * it's freed on the exit path.
614 */
615 if (likely(!(state & TASK_KILLED) && process == process_stream))
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100616 t = process_stream(t, ctx, state);
Willy Tarreau8a6049c2020-06-30 11:48:48 +0200617 else if (!(state & TASK_KILLED) && process != NULL)
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100618 t = process(t, ctx, state);
619 else {
Willy Tarreau273aea42020-07-17 14:37:51 +0200620 task_unlink_wq(t);
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100621 __task_free(t);
622 sched->current = NULL;
623 __ha_barrier_store();
624 /* We don't want max_processed to be decremented if
625 * we're just freeing a destroyed task, we should only
626 * do so if we really ran a task.
627 */
628 continue;
629 }
630 sched->current = NULL;
631 __ha_barrier_store();
632 /* If there is a pending state we have to wake up the task
633 * immediately, else we defer it into wait queue
634 */
635 if (t != NULL) {
636 if (unlikely(t->call_date)) {
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100637 uint64_t cpu = now_mono_time() - t->call_date;
638
639 t->cpu_time += cpu;
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100640 t->call_date = 0;
Willy Tarreau4e2282f2021-01-29 00:07:40 +0100641 HA_ATOMIC_ADD(&profile_entry->cpu_time, cpu);
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100642 }
643
Willy Tarreau1db42732021-04-06 11:44:07 +0200644 state = _HA_ATOMIC_AND_FETCH(&t->state, ~TASK_RUNNING);
Willy Tarreau8a6049c2020-06-30 11:48:48 +0200645 if (unlikely(state & TASK_KILLED)) {
Willy Tarreau273aea42020-07-17 14:37:51 +0200646 task_unlink_wq(t);
Willy Tarreau8a6049c2020-06-30 11:48:48 +0200647 __task_free(t);
648 }
649 else if (state & TASK_WOKEN_ANY)
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100650 task_wakeup(t, 0);
651 else
652 task_queue(t);
653 }
654 done++;
655 }
Willy Tarreauba48d5c2020-06-24 09:54:24 +0200656 sched->current_queue = -1;
Willy Tarreau116ef222020-06-23 16:35:38 +0200657
Willy Tarreau4ffa0b52020-01-30 18:13:13 +0100658 return done;
659}
660
Willy Tarreau58b458d2008-06-29 22:40:23 +0200661/* The run queue is chronologically sorted in a tree. An insertion counter is
662 * used to assign a position to each task. This counter may be combined with
663 * other variables (eg: nice value) to set the final position in the tree. The
664 * counter may wrap without a problem, of course. We then limit the number of
Christopher Faulet8a48f672017-11-14 10:38:36 +0100665 * tasks processed to 200 in any case, so that general latency remains low and
Willy Tarreaucde79022019-04-12 18:03:41 +0200666 * so that task positions have a chance to be considered. The function scans
667 * both the global and local run queues and picks the most urgent task between
668 * the two. We need to grab the global runqueue lock to touch it so it's taken
669 * on the very first access to the global run queue and is released as soon as
670 * it reaches the end.
Willy Tarreau58b458d2008-06-29 22:40:23 +0200671 *
672 * The function adjusts <next> if a new event is closer.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200673 */
Thierry FOURNIER9cf7c4b2014-12-15 13:26:01 +0100674void process_runnable_tasks()
Willy Tarreaubaaee002006-06-26 02:48:02 +0200675{
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200676 struct task_per_thread * const tt = sched;
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200677 struct eb32sc_node *lrq; // next local run queue entry
678 struct eb32sc_node *grq; // next global run queue entry
Willy Tarreau964c9362007-01-07 00:38:00 +0100679 struct task *t;
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200680 const unsigned int default_weights[TL_CLASSES] = {
681 [TL_URGENT] = 64, // ~50% of CPU bandwidth for I/O
682 [TL_NORMAL] = 48, // ~37% of CPU bandwidth for tasks
683 [TL_BULK] = 16, // ~13% of CPU bandwidth for self-wakers
Willy Tarreau401135c2021-02-26 09:16:22 +0100684 [TL_HEAVY] = 1, // never more than 1 heavy task at once
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200685 };
686 unsigned int max[TL_CLASSES]; // max to be run per class
687 unsigned int max_total; // sum of max above
Olivier Houchard06910462019-10-11 16:35:01 +0200688 struct mt_list *tmp_list;
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200689 unsigned int queue;
690 int max_processed;
Willy Tarreaue7923c12021-02-25 07:09:08 +0100691 int lpicked, gpicked;
Willy Tarreau76390da2021-02-26 10:18:11 +0100692 int heavy_queued = 0;
Willy Tarreauc309dbd2020-11-30 15:39:00 +0100693 int budget;
Christopher Faulet3911ee82017-11-14 10:26:53 +0100694
Willy Tarreaue6a02fa2019-05-22 07:06:44 +0200695 ti->flags &= ~TI_FL_STUCK; // this thread is still running
696
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200697 if (!thread_has_tasks()) {
698 activity[tid].empty_rq++;
699 return;
700 }
701
Willy Tarreau5c8be272020-06-19 12:17:55 +0200702 max_processed = global.tune.runqueue_depth;
703
704 if (likely(niced_tasks))
705 max_processed = (max_processed + 3) / 4;
706
Willy Tarreau1691ba32021-03-10 09:26:24 +0100707 if (max_processed < sched->rq_total && sched->rq_total <= 2*max_processed) {
708 /* If the run queue exceeds the budget by up to 50%, let's cut it
709 * into two identical halves to improve latency.
710 */
711 max_processed = sched->rq_total / 2;
712 }
713
Willy Tarreau5c8be272020-06-19 12:17:55 +0200714 not_done_yet:
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200715 max[TL_URGENT] = max[TL_NORMAL] = max[TL_BULK] = 0;
Willy Tarreaucde79022019-04-12 18:03:41 +0200716
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200717 /* urgent tasklets list gets a default weight of ~50% */
Willy Tarreau49f90bf2020-06-24 09:39:48 +0200718 if ((tt->tl_class_mask & (1 << TL_URGENT)) ||
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200719 !MT_LIST_ISEMPTY(&tt->shared_tasklet_list))
720 max[TL_URGENT] = default_weights[TL_URGENT];
Willy Tarreaua62917b2020-01-30 18:37:28 +0100721
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200722 /* normal tasklets list gets a default weight of ~37% */
Willy Tarreau49f90bf2020-06-24 09:39:48 +0200723 if ((tt->tl_class_mask & (1 << TL_NORMAL)) ||
Willy Tarreau2c41d772021-02-24 16:13:03 +0100724 !eb_is_empty(&sched->rqueue) || (global_tasks_mask & tid_bit))
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200725 max[TL_NORMAL] = default_weights[TL_NORMAL];
Willy Tarreaua62917b2020-01-30 18:37:28 +0100726
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200727 /* bulk tasklets list gets a default weight of ~13% */
Willy Tarreau49f90bf2020-06-24 09:39:48 +0200728 if ((tt->tl_class_mask & (1 << TL_BULK)))
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200729 max[TL_BULK] = default_weights[TL_BULK];
730
Willy Tarreau401135c2021-02-26 09:16:22 +0100731 /* heavy tasks are processed only once and never refilled in a
Willy Tarreau76390da2021-02-26 10:18:11 +0100732 * call round. That budget is not lost either as we don't reset
733 * it unless consumed.
Willy Tarreau401135c2021-02-26 09:16:22 +0100734 */
Willy Tarreau76390da2021-02-26 10:18:11 +0100735 if (!heavy_queued) {
736 if ((tt->tl_class_mask & (1 << TL_HEAVY)))
737 max[TL_HEAVY] = default_weights[TL_HEAVY];
738 else
739 max[TL_HEAVY] = 0;
740 heavy_queued = 1;
741 }
Willy Tarreau401135c2021-02-26 09:16:22 +0100742
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200743 /* Now compute a fair share of the weights. Total may slightly exceed
Willy Tarreau1553b662020-06-30 13:46:21 +0200744 * 100% due to rounding, this is not a problem. Note that while in
745 * theory the sum cannot be NULL as we cannot get there without tasklets
746 * to process, in practice it seldom happens when multiple writers
Willy Tarreau2b718102021-04-21 07:32:39 +0200747 * conflict and rollback on MT_LIST_TRY_APPEND(shared_tasklet_list), causing
Willy Tarreau1553b662020-06-30 13:46:21 +0200748 * a first MT_LIST_ISEMPTY() to succeed for thread_has_task() and the
749 * one above to finally fail. This is extremely rare and not a problem.
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200750 */
Willy Tarreau401135c2021-02-26 09:16:22 +0100751 max_total = max[TL_URGENT] + max[TL_NORMAL] + max[TL_BULK] + max[TL_HEAVY];
Willy Tarreau1553b662020-06-30 13:46:21 +0200752 if (!max_total)
753 return;
754
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200755 for (queue = 0; queue < TL_CLASSES; queue++)
756 max[queue] = ((unsigned)max_processed * max[queue] + max_total - 1) / max_total;
757
Willy Tarreau76390da2021-02-26 10:18:11 +0100758 /* The heavy queue must never process more than one task at once
759 * anyway.
760 */
761 if (max[TL_HEAVY] > 1)
762 max[TL_HEAVY] = 1;
763
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200764 lrq = grq = NULL;
Christopher Faulet8a48f672017-11-14 10:38:36 +0100765
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200766 /* pick up to max[TL_NORMAL] regular tasks from prio-ordered run queues */
767 /* Note: the grq lock is always held when grq is not null */
Willy Tarreaue7923c12021-02-25 07:09:08 +0100768 lpicked = gpicked = 0;
Willy Tarreau1f3b1412021-02-24 14:13:40 +0100769 budget = max[TL_NORMAL] - tt->tasks_in_list;
Willy Tarreaue7923c12021-02-25 07:09:08 +0100770 while (lpicked + gpicked < budget) {
Willy Tarreaucde79022019-04-12 18:03:41 +0200771 if ((global_tasks_mask & tid_bit) && !grq) {
Willy Tarreau3466e3c2019-04-15 18:52:40 +0200772#ifdef USE_THREAD
Willy Tarreaucde79022019-04-12 18:03:41 +0200773 HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
Willy Tarreauc6ba9a02021-02-20 12:49:54 +0100774 grq = eb32sc_lookup_ge(&rqueue, global_rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
Willy Tarreaucde79022019-04-12 18:03:41 +0200775 if (unlikely(!grq)) {
776 grq = eb32sc_first(&rqueue, tid_bit);
777 if (!grq) {
Olivier Houchardde82aea2019-04-17 19:10:22 +0200778 global_tasks_mask &= ~tid_bit;
Willy Tarreaucde79022019-04-12 18:03:41 +0200779 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
Olivier Houchardc4aac9e2018-07-26 15:25:49 +0200780 }
Willy Tarreauf0c531a2017-11-05 16:35:59 +0100781 }
Willy Tarreau3466e3c2019-04-15 18:52:40 +0200782#endif
Willy Tarreaucde79022019-04-12 18:03:41 +0200783 }
Willy Tarreauf0c531a2017-11-05 16:35:59 +0100784
Willy Tarreaucde79022019-04-12 18:03:41 +0200785 /* If a global task is available for this thread, it's in grq
786 * now and the global RQ is locked.
787 */
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200788
Willy Tarreaucde79022019-04-12 18:03:41 +0200789 if (!lrq) {
Willy Tarreauc6ba9a02021-02-20 12:49:54 +0100790 lrq = eb32sc_lookup_ge(&tt->rqueue, tt->rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
Willy Tarreaucde79022019-04-12 18:03:41 +0200791 if (unlikely(!lrq))
Willy Tarreau4c1e1ad2019-09-24 07:19:08 +0200792 lrq = eb32sc_first(&tt->rqueue, tid_bit);
Willy Tarreauf0c531a2017-11-05 16:35:59 +0100793 }
Willy Tarreauf0c531a2017-11-05 16:35:59 +0100794
Willy Tarreaucde79022019-04-12 18:03:41 +0200795 if (!lrq && !grq)
796 break;
797
798 if (likely(!grq || (lrq && (int)(lrq->key - grq->key) <= 0))) {
799 t = eb32sc_entry(lrq, struct task, rq);
800 lrq = eb32sc_next(lrq, tid_bit);
Willy Tarreau2b363ac2021-02-25 07:14:58 +0100801 eb32sc_delete(&t->rq);
Willy Tarreaue7923c12021-02-25 07:09:08 +0100802 lpicked++;
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200803 }
Willy Tarreau3466e3c2019-04-15 18:52:40 +0200804#ifdef USE_THREAD
Willy Tarreaucde79022019-04-12 18:03:41 +0200805 else {
806 t = eb32sc_entry(grq, struct task, rq);
807 grq = eb32sc_next(grq, tid_bit);
Willy Tarreau2b363ac2021-02-25 07:14:58 +0100808 _HA_ATOMIC_AND(&t->state, ~TASK_GLOBAL);
809 eb32sc_delete(&t->rq);
810
Willy Tarreaucde79022019-04-12 18:03:41 +0200811 if (unlikely(!grq)) {
812 grq = eb32sc_first(&rqueue, tid_bit);
813 if (!grq) {
Olivier Houchardde82aea2019-04-17 19:10:22 +0200814 global_tasks_mask &= ~tid_bit;
Willy Tarreaucde79022019-04-12 18:03:41 +0200815 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
Willy Tarreaucde79022019-04-12 18:03:41 +0200816 }
817 }
Willy Tarreaue7923c12021-02-25 07:09:08 +0100818 gpicked++;
Emeric Brun01948972017-03-30 15:37:25 +0200819 }
Willy Tarreau3466e3c2019-04-15 18:52:40 +0200820#endif
Willy Tarreau2b363ac2021-02-25 07:14:58 +0100821 if (t->nice)
Willy Tarreau4781b152021-04-06 13:53:36 +0200822 _HA_ATOMIC_DEC(&niced_tasks);
Willy Tarreaucde79022019-04-12 18:03:41 +0200823
Willy Tarreaua868c292020-11-30 15:30:22 +0100824 /* Add it to the local task list */
Willy Tarreau2b718102021-04-21 07:32:39 +0200825 LIST_APPEND(&tt->tasklets[TL_NORMAL], &((struct tasklet *)t)->list);
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200826 }
Willy Tarreaucde79022019-04-12 18:03:41 +0200827
828 /* release the rqueue lock */
829 if (grq) {
830 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
831 grq = NULL;
832 }
833
Willy Tarreaue7923c12021-02-25 07:09:08 +0100834 if (lpicked + gpicked) {
Willy Tarreauc309dbd2020-11-30 15:39:00 +0100835 tt->tl_class_mask |= 1 << TL_NORMAL;
Willy Tarreaue7923c12021-02-25 07:09:08 +0100836 _HA_ATOMIC_ADD(&tt->tasks_in_list, lpicked + gpicked);
Willy Tarreaub7e0c632021-03-09 09:59:50 +0100837#ifdef USE_THREAD
Willy Tarreau45499c52021-02-25 07:51:18 +0100838 if (gpicked) {
839 _HA_ATOMIC_SUB(&grq_total, gpicked);
Willy Tarreauc9afbb12021-02-25 07:19:45 +0100840 _HA_ATOMIC_ADD(&tt->rq_total, gpicked);
Willy Tarreau45499c52021-02-25 07:51:18 +0100841 }
Willy Tarreaub7e0c632021-03-09 09:59:50 +0100842#endif
Willy Tarreaue7923c12021-02-25 07:09:08 +0100843 activity[tid].tasksw += lpicked + gpicked;
Willy Tarreauc309dbd2020-11-30 15:39:00 +0100844 }
845
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200846 /* Merge the list of tasklets waken up by other threads to the
847 * main list.
848 */
849 tmp_list = MT_LIST_BEHEAD(&tt->shared_tasklet_list);
Willy Tarreau49f90bf2020-06-24 09:39:48 +0200850 if (tmp_list) {
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200851 LIST_SPLICE_END_DETACHED(&tt->tasklets[TL_URGENT], (struct list *)tmp_list);
Willy Tarreau49f90bf2020-06-24 09:39:48 +0200852 if (!LIST_ISEMPTY(&tt->tasklets[TL_URGENT]))
853 tt->tl_class_mask |= 1 << TL_URGENT;
854 }
Willy Tarreaucde79022019-04-12 18:03:41 +0200855
Willy Tarreau3ef7a192020-06-24 07:21:08 +0200856 /* execute tasklets in each queue */
Willy Tarreau59153fe2020-06-24 10:17:29 +0200857 max_processed -= run_tasks_from_lists(max);
Willy Tarreaua62917b2020-01-30 18:37:28 +0100858
Willy Tarreau5c8be272020-06-19 12:17:55 +0200859 /* some tasks may have woken other ones up */
Willy Tarreau0c0c85e2020-06-23 11:32:35 +0200860 if (max_processed > 0 && thread_has_tasks())
Willy Tarreau5c8be272020-06-19 12:17:55 +0200861 goto not_done_yet;
862
Willy Tarreau49f90bf2020-06-24 09:39:48 +0200863 if (tt->tl_class_mask)
Willy Tarreaucde79022019-04-12 18:03:41 +0200864 activity[tid].long_rq++;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200865}
866
Willy Tarreau61369892021-09-30 08:52:11 +0200867/* report the average CPU idle percentage over all running threads, between 0 and 100 */
868uint sched_report_idle()
869{
870 uint total = 0;
871 uint rthr = 0;
872 uint thr;
873
874 for (thr = 0; thr < MAX_THREADS; thr++) {
875 if (!(all_threads_mask & (1UL << thr)))
876 continue;
877 total += HA_ATOMIC_LOAD(&ha_thread_info[thr].idle_pct);
878 rthr++;
879 }
880 return rthr ? total / rthr : 0;
881}
882
William Lallemand27f3fa52018-12-06 14:05:20 +0100883/*
884 * Delete every tasks before running the master polling loop
885 */
886void mworker_cleantasks()
887{
888 struct task *t;
889 int i;
William Lallemandb5823392018-12-06 15:14:37 +0100890 struct eb32_node *tmp_wq = NULL;
891 struct eb32sc_node *tmp_rq = NULL;
William Lallemand27f3fa52018-12-06 14:05:20 +0100892
893#ifdef USE_THREAD
894 /* cleanup the global run queue */
William Lallemandb5823392018-12-06 15:14:37 +0100895 tmp_rq = eb32sc_first(&rqueue, MAX_THREADS_MASK);
896 while (tmp_rq) {
897 t = eb32sc_entry(tmp_rq, struct task, rq);
898 tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
Olivier Houchard3f795f72019-04-17 22:51:06 +0200899 task_destroy(t);
William Lallemand27f3fa52018-12-06 14:05:20 +0100900 }
901 /* cleanup the timers queue */
William Lallemandb5823392018-12-06 15:14:37 +0100902 tmp_wq = eb32_first(&timers);
903 while (tmp_wq) {
904 t = eb32_entry(tmp_wq, struct task, wq);
905 tmp_wq = eb32_next(tmp_wq);
Olivier Houchard3f795f72019-04-17 22:51:06 +0200906 task_destroy(t);
William Lallemand27f3fa52018-12-06 14:05:20 +0100907 }
908#endif
909 /* clean the per thread run queue */
910 for (i = 0; i < global.nbthread; i++) {
William Lallemandb5823392018-12-06 15:14:37 +0100911 tmp_rq = eb32sc_first(&task_per_thread[i].rqueue, MAX_THREADS_MASK);
912 while (tmp_rq) {
913 t = eb32sc_entry(tmp_rq, struct task, rq);
914 tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
Olivier Houchard3f795f72019-04-17 22:51:06 +0200915 task_destroy(t);
William Lallemand27f3fa52018-12-06 14:05:20 +0100916 }
917 /* cleanup the per thread timers queue */
William Lallemandb5823392018-12-06 15:14:37 +0100918 tmp_wq = eb32_first(&task_per_thread[i].timers);
919 while (tmp_wq) {
920 t = eb32_entry(tmp_wq, struct task, wq);
921 tmp_wq = eb32_next(tmp_wq);
Olivier Houchard3f795f72019-04-17 22:51:06 +0200922 task_destroy(t);
William Lallemand27f3fa52018-12-06 14:05:20 +0100923 }
924 }
925}
926
Willy Tarreaub6b3df32018-11-26 16:31:20 +0100927/* perform minimal intializations */
928static void init_task()
Willy Tarreau4726f532009-03-07 17:25:21 +0100929{
Willy Tarreau401135c2021-02-26 09:16:22 +0100930 int i, q;
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200931
Olivier Houchardb1ca58b2018-06-06 14:22:03 +0200932#ifdef USE_THREAD
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200933 memset(&timers, 0, sizeof(timers));
Willy Tarreau4726f532009-03-07 17:25:21 +0100934 memset(&rqueue, 0, sizeof(rqueue));
Olivier Houchardb1ca58b2018-06-06 14:22:03 +0200935#endif
Willy Tarreau8d8747a2018-10-15 16:12:48 +0200936 memset(&task_per_thread, 0, sizeof(task_per_thread));
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200937 for (i = 0; i < MAX_THREADS; i++) {
Willy Tarreau401135c2021-02-26 09:16:22 +0100938 for (q = 0; q < TL_CLASSES; q++)
939 LIST_INIT(&task_per_thread[i].tasklets[q]);
Olivier Houchard06910462019-10-11 16:35:01 +0200940 MT_LIST_INIT(&task_per_thread[i].shared_tasklet_list);
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200941 }
Willy Tarreau4726f532009-03-07 17:25:21 +0100942}
943
Willy Tarreaue7723bd2020-06-24 11:11:02 +0200944/* config parser for global "tune.sched.low-latency", accepts "on" or "off" */
945static int cfg_parse_tune_sched_low_latency(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +0100946 const struct proxy *defpx, const char *file, int line,
Willy Tarreaue7723bd2020-06-24 11:11:02 +0200947 char **err)
948{
949 if (too_many_args(1, args, err, NULL))
950 return -1;
951
952 if (strcmp(args[1], "on") == 0)
953 global.tune.options |= GTUNE_SCHED_LOW_LATENCY;
954 else if (strcmp(args[1], "off") == 0)
955 global.tune.options &= ~GTUNE_SCHED_LOW_LATENCY;
956 else {
957 memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
958 return -1;
959 }
960 return 0;
961}
962
963/* config keyword parsers */
964static struct cfg_kw_list cfg_kws = {ILH, {
965 { CFG_GLOBAL, "tune.sched.low-latency", cfg_parse_tune_sched_low_latency },
966 { 0, NULL, NULL }
967}};
968
969INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
Willy Tarreaub6b3df32018-11-26 16:31:20 +0100970INITCALL0(STG_PREPARE, init_task);
971
Willy Tarreaubaaee002006-06-26 02:48:02 +0200972/*
973 * Local variables:
974 * c-indent-level: 8
975 * c-basic-offset: 8
976 * End:
977 */