blob: 261ee9c5dff63413b453ac0d5fd121759e6a9d7a [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreau24f4efa2010-08-27 17:56:48 +02002 * include/proto/task.h
3 * Functions for task management.
4 *
5 * Copyright (C) 2000-2010 Willy Tarreau - w@1wt.eu
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
Willy Tarreaubaaee002006-06-26 02:48:02 +020021
22#ifndef _PROTO_TASK_H
23#define _PROTO_TASK_H
24
25
26#include <sys/time.h>
Willy Tarreaue3ba5f02006-06-29 18:54:54 +020027
28#include <common/config.h>
Willy Tarreau2dd0d472006-06-29 17:53:05 +020029#include <common/memory.h>
Willy Tarreau96bcfd72007-04-29 10:41:56 +020030#include <common/mini-clist.h>
31#include <common/standard.h>
Willy Tarreaud0a201b2009-03-08 15:53:06 +010032#include <common/ticks.h>
Emeric Brunc60def82017-09-27 14:59:38 +020033#include <common/hathreads.h>
34
Willy Tarreau8d388052017-11-05 13:34:20 +010035#include <eb32sctree.h>
Willy Tarreau45cb4fb2009-10-26 21:10:04 +010036#include <eb32tree.h>
Willy Tarreau96bcfd72007-04-29 10:41:56 +020037
Willy Tarreaueb118892014-11-13 16:57:19 +010038#include <types/global.h>
Willy Tarreaue3ba5f02006-06-29 18:54:54 +020039#include <types/task.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020040
Olivier Houchardbba1a262019-09-24 14:55:28 +020041#include <proto/fd.h>
42
Willy Tarreaud0a201b2009-03-08 15:53:06 +010043/* Principle of the wait queue.
44 *
45 * We want to be able to tell whether an expiration date is before of after the
46 * current time <now>. We KNOW that expiration dates are never too far apart,
47 * because they are measured in ticks (milliseconds). We also know that almost
48 * all dates will be in the future, and that a very small part of them will be
49 * in the past, they are the ones which have expired since last time we checked
50 * them. Using ticks, we know if a date is in the future or in the past, but we
51 * cannot use that to store sorted information because that reference changes
52 * all the time.
53 *
Willy Tarreaue35c94a2009-03-21 10:01:42 +010054 * We'll use the fact that the time wraps to sort timers. Timers above <now>
55 * are in the future, timers below <now> are in the past. Here, "above" and
56 * "below" are to be considered modulo 2^31.
Willy Tarreaud0a201b2009-03-08 15:53:06 +010057 *
Willy Tarreaue35c94a2009-03-21 10:01:42 +010058 * Timers are stored sorted in an ebtree. We use the new ability for ebtrees to
59 * lookup values starting from X to only expire tasks between <now> - 2^31 and
60 * <now>. If the end of the tree is reached while walking over it, we simply
61 * loop back to the beginning. That way, we have no problem keeping sorted
62 * wrapping timers in a tree, between (now - 24 days) and (now + 24 days). The
63 * keys in the tree always reflect their real position, none can be infinite.
64 * This reduces the number of checks to be performed.
Willy Tarreaud0a201b2009-03-08 15:53:06 +010065 *
66 * Another nice optimisation is to allow a timer to stay at an old place in the
67 * queue as long as it's not further than the real expiration date. That way,
68 * we use the tree as a place holder for a minorant of the real expiration
69 * date. Since we have a very low chance of hitting a timeout anyway, we can
70 * bounce the nodes to their right place when we scan the tree if we encounter
71 * a misplaced node once in a while. This even allows us not to remove the
72 * infinite timers from the wait queue.
73 *
74 * So, to summarize, we have :
75 * - node->key always defines current position in the wait queue
76 * - timer is the real expiration date (possibly infinite)
Willy Tarreaue35c94a2009-03-21 10:01:42 +010077 * - node->key is always before or equal to timer
Willy Tarreaud0a201b2009-03-08 15:53:06 +010078 *
79 * The run queue works similarly to the wait queue except that the current date
80 * is replaced by an insertion counter which can also wrap without any problem.
81 */
82
Willy Tarreaue35c94a2009-03-21 10:01:42 +010083/* The farthest we can look back in a timer tree */
84#define TIMER_LOOK_BACK (1U << 31)
Willy Tarreaud0a201b2009-03-08 15:53:06 +010085
86/* a few exported variables */
Willy Tarreaua4613182009-03-21 18:13:21 +010087extern unsigned int nb_tasks; /* total number of tasks */
Willy Tarreauaa1e1be2019-05-16 17:37:27 +020088extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */
Christopher Faulet34c5cc92016-12-06 09:15:30 +010089extern unsigned int tasks_run_queue; /* run queue size */
90extern unsigned int tasks_run_queue_cur;
Willy Tarreauc7bdf092009-03-21 18:33:52 +010091extern unsigned int nb_tasks_cur;
Willy Tarreau91e99932008-06-30 07:51:00 +020092extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
Willy Tarreaubafbe012017-11-24 17:34:44 +010093extern struct pool_head *pool_head_task;
Olivier Houchardb0bdae72018-05-18 18:45:28 +020094extern struct pool_head *pool_head_tasklet;
Willy Tarreaubafbe012017-11-24 17:34:44 +010095extern struct pool_head *pool_head_notification;
Willy Tarreaud022e9c2019-09-24 08:25:15 +020096extern THREAD_LOCAL struct task_per_thread *sched; /* current's thread scheduler context */
Olivier Houchardb1ca58b2018-06-06 14:22:03 +020097#ifdef USE_THREAD
Willy Tarreaub20aa9e2018-10-15 14:52:21 +020098extern struct eb_root timers; /* sorted timers tree, global */
Olivier Houchardf6e6dc12018-05-18 18:38:23 +020099extern struct eb_root rqueue; /* tree constituting the run queue */
Olivier Houchard77551ee2018-07-26 15:59:38 +0200100extern int global_rqueue_size; /* Number of element sin the global runqueue */
Olivier Houchardb1ca58b2018-06-06 14:22:03 +0200101#endif
Olivier Houchard77551ee2018-07-26 15:59:38 +0200102
Willy Tarreau8d8747a2018-10-15 16:12:48 +0200103extern struct task_per_thread task_per_thread[MAX_THREADS];
Christopher Faulet9dcf9b62017-11-13 10:34:01 +0100104
105__decl_hathreads(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */
Willy Tarreauef28dc12019-05-28 18:48:07 +0200106__decl_hathreads(extern HA_RWLOCK_T wq_lock); /* RW lock related to the wait queue */
Willy Tarreauc6ca1a02007-05-13 19:43:47 +0200107
Olivier Houchard5d187182018-08-01 15:58:44 +0200108
Willy Tarreau4726f532009-03-07 17:25:21 +0100109/* return 0 if task is in run queue, otherwise non-zero */
110static inline int task_in_rq(struct task *t)
111{
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200112 /* Check if leaf_p is NULL, in case he's not in the runqueue, and if
113 * it's not 0x1, which would mean it's in the tasklet list.
114 */
Olivier Houchard4a1be0c2019-04-17 19:13:07 +0200115 return t->rq.node.leaf_p != NULL;
Willy Tarreau4726f532009-03-07 17:25:21 +0100116}
117
118/* return 0 if task is in wait queue, otherwise non-zero */
119static inline int task_in_wq(struct task *t)
120{
121 return t->wq.node.leaf_p != NULL;
122}
123
Willy Tarreaufdccded2008-08-29 18:19:04 +0200124/* puts the task <t> in run queue with reason flags <f>, and returns <t> */
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200125/* This will put the task in the local runqueue if the task is only runnable
126 * by the current thread, in the global runqueue otherwies.
127 */
128void __task_wakeup(struct task *t, struct eb_root *);
129static inline void task_wakeup(struct task *t, unsigned int f)
Willy Tarreau4df82062008-08-29 15:26:14 +0200130{
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200131 unsigned short state;
Emeric Brunc60def82017-09-27 14:59:38 +0200132
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200133#ifdef USE_THREAD
134 struct eb_root *root;
Emeric Brunc60def82017-09-27 14:59:38 +0200135
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200136 if (t->thread_mask == tid_bit || global.nbthread == 1)
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200137 root = &sched->rqueue;
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200138 else
139 root = &rqueue;
140#else
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200141 struct eb_root *root = &sched->rqueue;
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200142#endif
143
Willy Tarreaub0380072019-04-17 11:47:18 +0200144 state = _HA_ATOMIC_OR(&t->state, f);
145 while (!(state & (TASK_RUNNING | TASK_QUEUED))) {
Willy Tarreau8c12e2f2019-04-17 20:52:51 +0200146 if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED)) {
147 __task_wakeup(t, root);
Willy Tarreaub0380072019-04-17 11:47:18 +0200148 break;
Willy Tarreau8c12e2f2019-04-17 20:52:51 +0200149 }
Willy Tarreaub0380072019-04-17 11:47:18 +0200150 }
Willy Tarreau4df82062008-08-29 15:26:14 +0200151}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200152
Willy Tarreauf65610a2017-10-31 16:06:06 +0100153/* change the thread affinity of a task to <thread_mask> */
Emeric Brunc60def82017-09-27 14:59:38 +0200154static inline void task_set_affinity(struct task *t, unsigned long thread_mask)
155{
Willy Tarreauf65610a2017-10-31 16:06:06 +0100156 t->thread_mask = thread_mask;
Emeric Brunc60def82017-09-27 14:59:38 +0200157}
Willy Tarreauf65610a2017-10-31 16:06:06 +0100158
Willy Tarreau4726f532009-03-07 17:25:21 +0100159/*
160 * Unlink the task from the wait queue, and possibly update the last_timer
161 * pointer. A pointer to the task itself is returned. The task *must* already
162 * be in the wait queue before calling this function. If unsure, use the safer
163 * task_unlink_wq() function.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200164 */
Willy Tarreau4726f532009-03-07 17:25:21 +0100165static inline struct task *__task_unlink_wq(struct task *t)
166{
167 eb32_delete(&t->wq);
Willy Tarreau4726f532009-03-07 17:25:21 +0100168 return t;
169}
170
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200171/* remove a task from its wait queue. It may either be the local wait queue if
172 * the task is bound to a single thread (in which case there's no locking
173 * involved) or the global queue, with locking.
174 */
Willy Tarreau4726f532009-03-07 17:25:21 +0100175static inline struct task *task_unlink_wq(struct task *t)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200176{
Richard Russobc9d9842019-02-20 12:43:45 -0800177 unsigned long locked;
178
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200179 if (likely(task_in_wq(t))) {
Richard Russobc9d9842019-02-20 12:43:45 -0800180 locked = atleast2(t->thread_mask);
181 if (locked)
Willy Tarreauef28dc12019-05-28 18:48:07 +0200182 HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau4726f532009-03-07 17:25:21 +0100183 __task_unlink_wq(t);
Richard Russobc9d9842019-02-20 12:43:45 -0800184 if (locked)
Willy Tarreauef28dc12019-05-28 18:48:07 +0200185 HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200186 }
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200187 return t;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200188}
189
190/*
Christopher Faulet34c5cc92016-12-06 09:15:30 +0100191 * Unlink the task from the run queue. The tasks_run_queue size and number of
192 * niced tasks are updated too. A pointer to the task itself is returned. The
193 * task *must* already be in the run queue before calling this function. If
194 * unsure, use the safer task_unlink_rq() function. Note that the pointer to the
195 * next run queue entry is neither checked nor updated.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200196 */
Willy Tarreau4726f532009-03-07 17:25:21 +0100197static inline struct task *__task_unlink_rq(struct task *t)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200198{
Olivier Houchard4c2832852019-03-08 18:48:47 +0100199 _HA_ATOMIC_SUB(&tasks_run_queue, 1);
Olivier Houchard77551ee2018-07-26 15:59:38 +0200200#ifdef USE_THREAD
201 if (t->state & TASK_GLOBAL) {
Olivier Houchard4c2832852019-03-08 18:48:47 +0100202 _HA_ATOMIC_AND(&t->state, ~TASK_GLOBAL);
Olivier Houchard77551ee2018-07-26 15:59:38 +0200203 global_rqueue_size--;
204 } else
205#endif
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200206 sched->rqueue_size--;
Olivier Houchard77551ee2018-07-26 15:59:38 +0200207 eb32sc_delete(&t->rq);
Willy Tarreau4726f532009-03-07 17:25:21 +0100208 if (likely(t->nice))
Olivier Houchard4c2832852019-03-08 18:48:47 +0100209 _HA_ATOMIC_SUB(&niced_tasks, 1);
Willy Tarreauce44f122008-07-05 18:16:19 +0200210 return t;
211}
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200212
Willy Tarreau501260b2015-02-23 16:07:01 +0100213/* This function unlinks task <t> from the run queue if it is in it. It also
214 * takes care of updating the next run queue task if it was this task.
215 */
Willy Tarreau4726f532009-03-07 17:25:21 +0100216static inline struct task *task_unlink_rq(struct task *t)
217{
Olivier Houchard1d7f37a2019-03-14 16:14:04 +0100218 int is_global = t->state & TASK_GLOBAL;
219
220 if (is_global)
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200221 HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
Willy Tarreau24f382f2019-04-12 16:10:55 +0200222 if (likely(task_in_rq(t)))
Willy Tarreau4726f532009-03-07 17:25:21 +0100223 __task_unlink_rq(t);
Olivier Houchard1d7f37a2019-03-14 16:14:04 +0100224 if (is_global)
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200225 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
Willy Tarreau4726f532009-03-07 17:25:21 +0100226 return t;
227}
228
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200229static inline void tasklet_wakeup(struct tasklet *tl)
230{
Olivier Houchardbba1a262019-09-24 14:55:28 +0200231 if (MT_LIST_ADDQ(&task_per_thread[tl->tid].task_list, &tl->list) == 1) {
Olivier Houchardff1e9f32019-09-20 17:18:35 +0200232 _HA_ATOMIC_ADD(&tasks_run_queue, 1);
Olivier Houchardbba1a262019-09-24 14:55:28 +0200233 if (sleeping_thread_mask & (1 << tl->tid)) {
234 _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1 << tl->tid));
235 wake_thread(tl->tid);
236 }
237 }
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200238
239}
240
Willy Tarreaubd20a9d2019-06-14 18:05:54 +0200241/* Insert a tasklet into the tasklet list. If used with a plain task instead,
242 * the caller must update the task_list_size.
243 */
244static inline void tasklet_insert_into_tasklet_list(struct tasklet *tl)
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200245{
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200246 if (MT_LIST_ADDQ(&sched->task_list, &tl->list) == 1)
Olivier Houchardff1e9f32019-09-20 17:18:35 +0200247 _HA_ATOMIC_ADD(&tasks_run_queue, 1);
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200248}
249
Willy Tarreaubd20a9d2019-06-14 18:05:54 +0200250/* Remove the tasklet from the tasklet list. The tasklet MUST already be there.
251 * If unsure, use tasklet_remove_from_tasklet_list() instead. If used with a
252 * plain task, the caller must update the task_list_size.
Willy Tarreaue73256f2019-03-25 18:02:54 +0100253 */
Willy Tarreau86eded62019-06-14 14:47:49 +0200254static inline void __tasklet_remove_from_tasklet_list(struct tasklet *t)
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200255{
Olivier Houchardff1e9f32019-09-20 17:18:35 +0200256 if (MT_LIST_DEL(&t->list) == 1)
257 _HA_ATOMIC_SUB(&tasks_run_queue, 1);
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200258}
259
Willy Tarreau86eded62019-06-14 14:47:49 +0200260static inline void tasklet_remove_from_tasklet_list(struct tasklet *t)
Willy Tarreaue73256f2019-03-25 18:02:54 +0100261{
Olivier Houchardff1e9f32019-09-20 17:18:35 +0200262 if (likely(!MT_LIST_ISEMPTY(&t->list)))
Willy Tarreau86eded62019-06-14 14:47:49 +0200263 __tasklet_remove_from_tasklet_list(t);
Willy Tarreaue73256f2019-03-25 18:02:54 +0100264}
265
Willy Tarreauce44f122008-07-05 18:16:19 +0200266/*
Willy Tarreaua4613182009-03-21 18:13:21 +0100267 * Initialize a new task. The bare minimum is performed (queue pointers and
268 * state). The task is returned. This function should not be used outside of
269 * task_new().
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200270 */
Emeric Brunc60def82017-09-27 14:59:38 +0200271static inline struct task *task_init(struct task *t, unsigned long thread_mask)
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200272{
Willy Tarreau4726f532009-03-07 17:25:21 +0100273 t->wq.node.leaf_p = NULL;
274 t->rq.node.leaf_p = NULL;
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200275 t->state = TASK_SLEEPING;
Willy Tarreauf65610a2017-10-31 16:06:06 +0100276 t->thread_mask = thread_mask;
Willy Tarreau91e99932008-06-30 07:51:00 +0200277 t->nice = 0;
Willy Tarreau3884cba2009-03-28 17:54:35 +0100278 t->calls = 0;
Willy Tarreau9efd7452018-05-31 14:48:54 +0200279 t->call_date = 0;
280 t->cpu_time = 0;
281 t->lat_time = 0;
Willy Tarreauf4219992017-07-24 17:52:58 +0200282 t->expire = TICK_ETERNITY;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200283 return t;
284}
285
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200286static inline void tasklet_init(struct tasklet *t)
287{
288 t->nice = -32768;
289 t->calls = 0;
290 t->state = 0;
Olivier Houchard9ddaf792018-07-19 16:02:16 +0200291 t->process = NULL;
Olivier Houchardff1e9f32019-09-20 17:18:35 +0200292 t->tid = tid;
293 MT_LIST_INIT(&t->list);
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200294}
295
296static inline struct tasklet *tasklet_new(void)
297{
298 struct tasklet *t = pool_alloc(pool_head_tasklet);
299
300 if (t) {
301 tasklet_init(t);
302 }
303 return t;
304}
305
Willy Tarreaubaaee002006-06-26 02:48:02 +0200306/*
Willy Tarreaua4613182009-03-21 18:13:21 +0100307 * Allocate and initialise a new task. The new task is returned, or NULL in
308 * case of lack of memory. The task count is incremented. Tasks should only
309 * be allocated this way, and must be freed using task_free().
310 */
Emeric Brunc60def82017-09-27 14:59:38 +0200311static inline struct task *task_new(unsigned long thread_mask)
Willy Tarreaua4613182009-03-21 18:13:21 +0100312{
Willy Tarreaubafbe012017-11-24 17:34:44 +0100313 struct task *t = pool_alloc(pool_head_task);
Willy Tarreaua4613182009-03-21 18:13:21 +0100314 if (t) {
Olivier Houchard4c2832852019-03-08 18:48:47 +0100315 _HA_ATOMIC_ADD(&nb_tasks, 1);
Emeric Brunc60def82017-09-27 14:59:38 +0200316 task_init(t, thread_mask);
Willy Tarreaua4613182009-03-21 18:13:21 +0100317 }
318 return t;
319}
320
321/*
Willy Tarreau29bf96d2019-05-17 14:16:51 +0200322 * Free a task. Its context must have been freed since it will be lost. The
323 * task count is decremented. It it is the current task, this one is reset.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200324 */
Olivier Houchard9b36cb42018-05-04 15:46:16 +0200325static inline void __task_free(struct task *t)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200326{
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200327 if (t == sched->current) {
328 sched->current = NULL;
Willy Tarreau29bf96d2019-05-17 14:16:51 +0200329 __ha_barrier_store();
330 }
Willy Tarreaubafbe012017-11-24 17:34:44 +0100331 pool_free(pool_head_task, t);
Willy Tarreaueb118892014-11-13 16:57:19 +0100332 if (unlikely(stopping))
Willy Tarreaubafbe012017-11-24 17:34:44 +0100333 pool_flush(pool_head_task);
Olivier Houchard4c2832852019-03-08 18:48:47 +0100334 _HA_ATOMIC_SUB(&nb_tasks, 1);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200335}
336
Willy Tarreauf6562792019-05-07 19:05:35 +0200337/* Destroys a task : it's unlinked from the wait queues and is freed if it's
338 * the current task or not queued otherwise it's marked to be freed by the
339 * scheduler. It does nothing if <t> is NULL.
340 */
Olivier Houchard3f795f72019-04-17 22:51:06 +0200341static inline void task_destroy(struct task *t)
Olivier Houchard9b36cb42018-05-04 15:46:16 +0200342{
Dragan Dosen75bc6d32019-05-07 15:25:25 +0200343 if (!t)
344 return;
345
Olivier Houchard3f795f72019-04-17 22:51:06 +0200346 task_unlink_wq(t);
347 /* We don't have to explicitely remove from the run queue.
348 * If we are in the runqueue, the test below will set t->process
349 * to NULL, and the task will be free'd when it'll be its turn
350 * to run.
351 */
352
Olivier Houchard9b36cb42018-05-04 15:46:16 +0200353 /* There's no need to protect t->state with a lock, as the task
354 * has to run on the current thread.
355 */
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200356 if (t == sched->current || !(t->state & (TASK_QUEUED | TASK_RUNNING)))
Olivier Houchard9b36cb42018-05-04 15:46:16 +0200357 __task_free(t);
358 else
359 t->process = NULL;
360}
361
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200362static inline void tasklet_free(struct tasklet *tl)
363{
Olivier Houchardff1e9f32019-09-20 17:18:35 +0200364 if (!MT_LIST_ISEMPTY(&tl->list)) {
365 if(MT_LIST_DEL(&tl->list) == 1)
366 _HA_ATOMIC_SUB(&tasks_run_queue, 1);
Olivier Houchard09e498f2018-12-24 14:03:10 +0100367 }
Olivier Houcharddcd6f3a2018-06-08 17:08:19 +0200368
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200369 pool_free(pool_head_tasklet, tl);
370 if (unlikely(stopping))
371 pool_flush(pool_head_tasklet);
372}
373
Olivier Houchardff1e9f32019-09-20 17:18:35 +0200374static inline void tasklet_set_tid(struct tasklet *tl, int tid)
375{
376 tl->tid = tid;
377}
378
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200379void __task_queue(struct task *task, struct eb_root *wq);
380
Willy Tarreau4726f532009-03-07 17:25:21 +0100381/* Place <task> into the wait queue, where it may already be. If the expiration
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100382 * timer is infinite, do nothing and rely on wake_expired_task to clean up.
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200383 * If the task is bound to a single thread, it's assumed to be bound to the
384 * current thread's queue and is queued without locking. Otherwise it's queued
385 * into the global wait queue, protected by locks.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200386 */
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100387static inline void task_queue(struct task *task)
388{
389 /* If we already have a place in the wait queue no later than the
390 * timeout we're trying to set, we'll stay there, because it is very
391 * unlikely that we will reach the timeout anyway. If the timeout
392 * has been disabled, it's useless to leave the queue as well. We'll
393 * rely on wake_expired_tasks() to catch the node and move it to the
394 * proper place should it ever happen. Finally we only add the task
395 * to the queue if it was not there or if it was further than what
396 * we want.
397 */
398 if (!tick_isset(task->expire))
399 return;
400
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200401#ifdef USE_THREAD
402 if (atleast2(task->thread_mask)) {
Willy Tarreauef28dc12019-05-28 18:48:07 +0200403 HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200404 if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
405 __task_queue(task, &timers);
Willy Tarreauef28dc12019-05-28 18:48:07 +0200406 HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200407 } else
408#endif
409 {
410 if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200411 __task_queue(task, &sched->timers);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200412 }
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100413}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200414
Willy Tarreau26e48812011-07-25 14:30:42 +0200415/* Ensure <task> will be woken up at most at <when>. If the task is already in
416 * the run queue (but not running), nothing is done. It may be used that way
417 * with a delay : task_schedule(task, tick_add(now_ms, delay));
418 */
419static inline void task_schedule(struct task *task, int when)
420{
Emeric Brunc60def82017-09-27 14:59:38 +0200421 /* TODO: mthread, check if there is no tisk with this test */
Willy Tarreau26e48812011-07-25 14:30:42 +0200422 if (task_in_rq(task))
423 return;
424
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200425#ifdef USE_THREAD
426 if (atleast2(task->thread_mask)) {
Willy Tarreauef28dc12019-05-28 18:48:07 +0200427 /* FIXME: is it really needed to lock the WQ during the check ? */
428 HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200429 if (task_in_wq(task))
430 when = tick_first(when, task->expire);
431
432 task->expire = when;
433 if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
434 __task_queue(task, &timers);
Willy Tarreauef28dc12019-05-28 18:48:07 +0200435 HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200436 } else
437#endif
438 {
439 if (task_in_wq(task))
440 when = tick_first(when, task->expire);
Willy Tarreau26e48812011-07-25 14:30:42 +0200441
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200442 task->expire = when;
443 if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200444 __task_queue(task, &sched->timers);
Willy Tarreaub20aa9e2018-10-15 14:52:21 +0200445 }
Willy Tarreau26e48812011-07-25 14:30:42 +0200446}
447
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200448/* This function register a new signal. "lua" is the current lua
449 * execution context. It contains a pointer to the associated task.
450 * "link" is a list head attached to an other task that must be wake
451 * the lua task if an event occurs. This is useful with external
452 * events like TCP I/O or sleep functions. This funcion allocate
453 * memory for the signal.
454 */
455static inline struct notification *notification_new(struct list *purge, struct list *event, struct task *wakeup)
456{
Willy Tarreaubafbe012017-11-24 17:34:44 +0100457 struct notification *com = pool_alloc(pool_head_notification);
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200458 if (!com)
459 return NULL;
460 LIST_ADDQ(purge, &com->purge_me);
461 LIST_ADDQ(event, &com->wake_me);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100462 HA_SPIN_INIT(&com->lock);
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200463 com->task = wakeup;
464 return com;
465}
466
467/* This function purge all the pending signals when the LUA execution
468 * is finished. This prevent than a coprocess try to wake a deleted
469 * task. This function remove the memory associated to the signal.
Thierry FOURNIERd5b79832017-12-10 17:14:07 +0100470 * The purge list is not locked because it is owned by only one
471 * process. before browsing this list, the caller must ensure to be
472 * the only one browser.
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200473 */
474static inline void notification_purge(struct list *purge)
475{
476 struct notification *com, *back;
477
478 /* Delete all pending communication signals. */
479 list_for_each_entry_safe(com, back, purge, purge_me) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100480 HA_SPIN_LOCK(NOTIF_LOCK, &com->lock);
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200481 LIST_DEL(&com->purge_me);
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200482 if (!com->task) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100483 HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100484 pool_free(pool_head_notification, com);
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200485 continue;
486 }
487 com->task = NULL;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100488 HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200489 }
490}
491
Thierry FOURNIERcb146882017-12-10 17:10:57 +0100492/* In some cases, the disconnected notifications must be cleared.
493 * This function just release memory blocs. The purge list is not
494 * locked because it is owned by only one process. Before browsing
495 * this list, the caller must ensure to be the only one browser.
496 * The "com" is not locked because when com->task is NULL, the
497 * notification is no longer used.
498 */
499static inline void notification_gc(struct list *purge)
500{
501 struct notification *com, *back;
502
503 /* Delete all pending communication signals. */
504 list_for_each_entry_safe (com, back, purge, purge_me) {
505 if (com->task)
506 continue;
507 LIST_DEL(&com->purge_me);
508 pool_free(pool_head_notification, com);
509 }
510}
511
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200512/* This function sends signals. It wakes all the tasks attached
513 * to a list head, and remove the signal, and free the used
Thierry FOURNIERd5b79832017-12-10 17:14:07 +0100514 * memory. The wake list is not locked because it is owned by
515 * only one process. before browsing this list, the caller must
516 * ensure to be the only one browser.
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200517 */
518static inline void notification_wake(struct list *wake)
519{
520 struct notification *com, *back;
521
522 /* Wake task and delete all pending communication signals. */
523 list_for_each_entry_safe(com, back, wake, wake_me) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100524 HA_SPIN_LOCK(NOTIF_LOCK, &com->lock);
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200525 LIST_DEL(&com->wake_me);
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200526 if (!com->task) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100527 HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100528 pool_free(pool_head_notification, com);
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200529 continue;
530 }
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200531 task_wakeup(com->task, TASK_WOKEN_MSG);
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200532 com->task = NULL;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100533 HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200534 }
535}
536
Thierry FOURNIER9d5422a2018-05-30 11:40:08 +0200537/* This function returns true is some notification are pending
538 */
539static inline int notification_registered(struct list *wake)
540{
541 return !LIST_ISEMPTY(wake);
542}
543
Olivier Houchardcfbb3e62019-05-29 19:22:43 +0200544static inline int thread_has_tasks(void)
545{
546 return (!!(global_tasks_mask & tid_bit) |
Willy Tarreaud022e9c2019-09-24 08:25:15 +0200547 (sched->rqueue_size > 0) |
548 !MT_LIST_ISEMPTY(&sched->task_list));
Olivier Houchardcfbb3e62019-05-29 19:22:43 +0200549}
550
Willy Tarreau64e60122019-07-12 08:31:17 +0200551/* adds list item <item> to work list <work> and wake up the associated task */
Olivier Houchard859dc802019-08-08 15:47:21 +0200552static inline void work_list_add(struct work_list *work, struct mt_list *item)
Willy Tarreau64e60122019-07-12 08:31:17 +0200553{
Olivier Houchard859dc802019-08-08 15:47:21 +0200554 MT_LIST_ADDQ(&work->head, item);
Willy Tarreau64e60122019-07-12 08:31:17 +0200555 task_wakeup(work->task, TASK_WOKEN_OTHER);
556}
557
558struct work_list *work_list_create(int nbthread,
559 struct task *(*fct)(struct task *, void *, unsigned short),
560 void *arg);
561
562void work_list_destroy(struct work_list *work, int nbthread);
563
Willy Tarreaubaaee002006-06-26 02:48:02 +0200564/*
Willy Tarreau918ff602011-07-25 16:33:49 +0200565 * This does 3 things :
Willy Tarreaubaaee002006-06-26 02:48:02 +0200566 * - wake up all expired tasks
567 * - call all runnable tasks
Willy Tarreaud825eef2007-05-12 22:35:00 +0200568 * - return the date of next event in <next> or eternity.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200569 */
570
Thierry FOURNIER9cf7c4b2014-12-15 13:26:01 +0100571void process_runnable_tasks();
Willy Tarreaubaaee002006-06-26 02:48:02 +0200572
Willy Tarreau58b458d2008-06-29 22:40:23 +0200573/*
574 * Extract all expired timers from the timer queue, and wakes up all
575 * associated tasks. Returns the date of next event (or eternity).
576 */
Thierry FOURNIER9cf7c4b2014-12-15 13:26:01 +0100577int wake_expired_tasks();
Willy Tarreau58b458d2008-06-29 22:40:23 +0200578
William Lallemand27f3fa52018-12-06 14:05:20 +0100579/*
580 * Delete every tasks before running the master polling loop
581 */
582void mworker_cleantasks();
583
Willy Tarreaubaaee002006-06-26 02:48:02 +0200584#endif /* _PROTO_TASK_H */
585
586/*
587 * Local variables:
588 * c-indent-level: 8
589 * c-basic-offset: 8
590 * End:
591 */