blob: d1371f3b01eeb0fdc6318856b47886c558ee0a09 [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreau24f4efa2010-08-27 17:56:48 +02002 * include/proto/task.h
3 * Functions for task management.
4 *
5 * Copyright (C) 2000-2010 Willy Tarreau - w@1wt.eu
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
Willy Tarreaubaaee002006-06-26 02:48:02 +020021
22#ifndef _PROTO_TASK_H
23#define _PROTO_TASK_H
24
25
26#include <sys/time.h>
Willy Tarreaue3ba5f02006-06-29 18:54:54 +020027
28#include <common/config.h>
Willy Tarreau2dd0d472006-06-29 17:53:05 +020029#include <common/memory.h>
Willy Tarreau96bcfd72007-04-29 10:41:56 +020030#include <common/mini-clist.h>
31#include <common/standard.h>
Willy Tarreaud0a201b2009-03-08 15:53:06 +010032#include <common/ticks.h>
Emeric Brunc60def82017-09-27 14:59:38 +020033#include <common/hathreads.h>
34
Willy Tarreau8d388052017-11-05 13:34:20 +010035#include <eb32sctree.h>
Willy Tarreau45cb4fb2009-10-26 21:10:04 +010036#include <eb32tree.h>
Willy Tarreau96bcfd72007-04-29 10:41:56 +020037
Willy Tarreaueb118892014-11-13 16:57:19 +010038#include <types/global.h>
Willy Tarreaue3ba5f02006-06-29 18:54:54 +020039#include <types/task.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020040
Willy Tarreaud0a201b2009-03-08 15:53:06 +010041/* Principle of the wait queue.
42 *
43 * We want to be able to tell whether an expiration date is before of after the
44 * current time <now>. We KNOW that expiration dates are never too far apart,
45 * because they are measured in ticks (milliseconds). We also know that almost
46 * all dates will be in the future, and that a very small part of them will be
47 * in the past, they are the ones which have expired since last time we checked
48 * them. Using ticks, we know if a date is in the future or in the past, but we
49 * cannot use that to store sorted information because that reference changes
50 * all the time.
51 *
Willy Tarreaue35c94a2009-03-21 10:01:42 +010052 * We'll use the fact that the time wraps to sort timers. Timers above <now>
53 * are in the future, timers below <now> are in the past. Here, "above" and
54 * "below" are to be considered modulo 2^31.
Willy Tarreaud0a201b2009-03-08 15:53:06 +010055 *
Willy Tarreaue35c94a2009-03-21 10:01:42 +010056 * Timers are stored sorted in an ebtree. We use the new ability for ebtrees to
57 * lookup values starting from X to only expire tasks between <now> - 2^31 and
58 * <now>. If the end of the tree is reached while walking over it, we simply
59 * loop back to the beginning. That way, we have no problem keeping sorted
60 * wrapping timers in a tree, between (now - 24 days) and (now + 24 days). The
61 * keys in the tree always reflect their real position, none can be infinite.
62 * This reduces the number of checks to be performed.
Willy Tarreaud0a201b2009-03-08 15:53:06 +010063 *
64 * Another nice optimisation is to allow a timer to stay at an old place in the
65 * queue as long as it's not further than the real expiration date. That way,
66 * we use the tree as a place holder for a minorant of the real expiration
67 * date. Since we have a very low chance of hitting a timeout anyway, we can
68 * bounce the nodes to their right place when we scan the tree if we encounter
69 * a misplaced node once in a while. This even allows us not to remove the
70 * infinite timers from the wait queue.
71 *
72 * So, to summarize, we have :
73 * - node->key always defines current position in the wait queue
74 * - timer is the real expiration date (possibly infinite)
Willy Tarreaue35c94a2009-03-21 10:01:42 +010075 * - node->key is always before or equal to timer
Willy Tarreaud0a201b2009-03-08 15:53:06 +010076 *
77 * The run queue works similarly to the wait queue except that the current date
78 * is replaced by an insertion counter which can also wrap without any problem.
79 */
80
Willy Tarreaue35c94a2009-03-21 10:01:42 +010081/* The farthest we can look back in a timer tree */
82#define TIMER_LOOK_BACK (1U << 31)
Willy Tarreaud0a201b2009-03-08 15:53:06 +010083
84/* a few exported variables */
Willy Tarreaua4613182009-03-21 18:13:21 +010085extern unsigned int nb_tasks; /* total number of tasks */
Christopher Faulet3911ee82017-11-14 10:26:53 +010086extern unsigned long active_tasks_mask; /* Mask of threads with active tasks */
Christopher Faulet34c5cc92016-12-06 09:15:30 +010087extern unsigned int tasks_run_queue; /* run queue size */
88extern unsigned int tasks_run_queue_cur;
Willy Tarreauc7bdf092009-03-21 18:33:52 +010089extern unsigned int nb_tasks_cur;
Willy Tarreau91e99932008-06-30 07:51:00 +020090extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
Willy Tarreaubafbe012017-11-24 17:34:44 +010091extern struct pool_head *pool_head_task;
Olivier Houchardb0bdae72018-05-18 18:45:28 +020092extern struct pool_head *pool_head_tasklet;
Willy Tarreaubafbe012017-11-24 17:34:44 +010093extern struct pool_head *pool_head_notification;
Olivier Houchard9b36cb42018-05-04 15:46:16 +020094extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */
95extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be potentially run */
Olivier Houchardb1ca58b2018-06-06 14:22:03 +020096#ifdef USE_THREAD
Olivier Houchardf6e6dc12018-05-18 18:38:23 +020097extern struct eb_root rqueue; /* tree constituting the run queue */
Olivier Houchardb1ca58b2018-06-06 14:22:03 +020098#endif
Olivier Houchardf6e6dc12018-05-18 18:38:23 +020099extern struct eb_root rqueue_local[MAX_THREADS]; /* tree constituting the per-thread run queue */
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200100extern struct list task_list[MAX_THREADS]; /* List of tasks to be run, mixing tasks and tasklets */
101extern int task_list_size[MAX_THREADS]; /* Number of task sin the task_list */
Christopher Faulet9dcf9b62017-11-13 10:34:01 +0100102
103__decl_hathreads(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */
104__decl_hathreads(extern HA_SPINLOCK_T wq_lock); /* spin lock related to wait queue */
Willy Tarreauc6ca1a02007-05-13 19:43:47 +0200105
Willy Tarreau4726f532009-03-07 17:25:21 +0100106/* return 0 if task is in run queue, otherwise non-zero */
107static inline int task_in_rq(struct task *t)
108{
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200109 /* Check if leaf_p is NULL, in case he's not in the runqueue, and if
110 * it's not 0x1, which would mean it's in the tasklet list.
111 */
112 return t->rq.node.leaf_p != NULL && t->rq.node.leaf_p != (void *)0x1;
Willy Tarreau4726f532009-03-07 17:25:21 +0100113}
114
115/* return 0 if task is in wait queue, otherwise non-zero */
116static inline int task_in_wq(struct task *t)
117{
118 return t->wq.node.leaf_p != NULL;
119}
120
Willy Tarreaufdccded2008-08-29 18:19:04 +0200121/* puts the task <t> in run queue with reason flags <f>, and returns <t> */
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200122/* This will put the task in the local runqueue if the task is only runnable
123 * by the current thread, in the global runqueue otherwies.
124 */
125void __task_wakeup(struct task *t, struct eb_root *);
126static inline void task_wakeup(struct task *t, unsigned int f)
Willy Tarreau4df82062008-08-29 15:26:14 +0200127{
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200128 unsigned short state;
Emeric Brunc60def82017-09-27 14:59:38 +0200129
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200130#ifdef USE_THREAD
131 struct eb_root *root;
Emeric Brunc60def82017-09-27 14:59:38 +0200132
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200133 if (t->thread_mask == tid_bit || global.nbthread == 1)
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200134 root = &rqueue_local[tid];
135 else
136 root = &rqueue;
137#else
Olivier Houcharde13ab8b2018-06-06 14:01:08 +0200138 struct eb_root *root = &rqueue_local[tid];
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200139#endif
140
141 state = HA_ATOMIC_OR(&t->state, f);
142 if (!(state & TASK_RUNNING))
143 __task_wakeup(t, root);
Willy Tarreau4df82062008-08-29 15:26:14 +0200144}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200145
Willy Tarreauf65610a2017-10-31 16:06:06 +0100146/* change the thread affinity of a task to <thread_mask> */
Emeric Brunc60def82017-09-27 14:59:38 +0200147static inline void task_set_affinity(struct task *t, unsigned long thread_mask)
148{
Willy Tarreauf65610a2017-10-31 16:06:06 +0100149 t->thread_mask = thread_mask;
Emeric Brunc60def82017-09-27 14:59:38 +0200150}
Willy Tarreauf65610a2017-10-31 16:06:06 +0100151
Willy Tarreau4726f532009-03-07 17:25:21 +0100152/*
153 * Unlink the task from the wait queue, and possibly update the last_timer
154 * pointer. A pointer to the task itself is returned. The task *must* already
155 * be in the wait queue before calling this function. If unsure, use the safer
156 * task_unlink_wq() function.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200157 */
Willy Tarreau4726f532009-03-07 17:25:21 +0100158static inline struct task *__task_unlink_wq(struct task *t)
159{
160 eb32_delete(&t->wq);
Willy Tarreau4726f532009-03-07 17:25:21 +0100161 return t;
162}
163
164static inline struct task *task_unlink_wq(struct task *t)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200165{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100166 HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau4726f532009-03-07 17:25:21 +0100167 if (likely(task_in_wq(t)))
168 __task_unlink_wq(t);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100169 HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200170 return t;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200171}
172
173/*
Christopher Faulet34c5cc92016-12-06 09:15:30 +0100174 * Unlink the task from the run queue. The tasks_run_queue size and number of
175 * niced tasks are updated too. A pointer to the task itself is returned. The
176 * task *must* already be in the run queue before calling this function. If
177 * unsure, use the safer task_unlink_rq() function. Note that the pointer to the
178 * next run queue entry is neither checked nor updated.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200179 */
Willy Tarreau4726f532009-03-07 17:25:21 +0100180static inline struct task *__task_unlink_rq(struct task *t)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200181{
Olivier Houchard09eeb762018-05-28 13:51:06 +0200182 HA_ATOMIC_SUB(&tasks_run_queue, 1);
Willy Tarreau8d388052017-11-05 13:34:20 +0100183 eb32sc_delete(&t->rq);
Willy Tarreau4726f532009-03-07 17:25:21 +0100184 if (likely(t->nice))
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200185 HA_ATOMIC_SUB(&niced_tasks, 1);
Willy Tarreauce44f122008-07-05 18:16:19 +0200186 return t;
187}
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200188
Willy Tarreau501260b2015-02-23 16:07:01 +0100189/* This function unlinks task <t> from the run queue if it is in it. It also
190 * takes care of updating the next run queue task if it was this task.
191 */
Willy Tarreau4726f532009-03-07 17:25:21 +0100192static inline struct task *task_unlink_rq(struct task *t)
193{
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200194 if (t->thread_mask != tid_bit)
195 HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
Olivier Houchard9b36cb42018-05-04 15:46:16 +0200196 if (likely(task_in_rq(t))) {
197 if (&t->rq == rq_next)
198 rq_next = eb32sc_next(rq_next, tid_bit);
Willy Tarreau4726f532009-03-07 17:25:21 +0100199 __task_unlink_rq(t);
Olivier Houchard9b36cb42018-05-04 15:46:16 +0200200 }
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200201 if (t->thread_mask != tid_bit)
202 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
Willy Tarreau4726f532009-03-07 17:25:21 +0100203 return t;
204}
205
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200206static inline void tasklet_wakeup(struct tasklet *tl)
207{
208 LIST_ADDQ(&task_list[tid], &tl->list);
209 task_list_size[tid]++;
210 HA_ATOMIC_ADD(&tasks_run_queue, 1);
211
212}
213
214static inline void task_insert_into_tasklet_list(struct task *t)
215{
216 struct tasklet *tl;
217 void *expected = NULL;
218
219 /* Protect ourself against anybody trying to insert the task into
220 * another runqueue. We set leaf_p to 0x1 to indicate that the node is
221 * not in a tree but that it's in the tasklet list. See task_in_rq().
222 */
David Carliercaa8a372018-06-01 14:32:39 +0200223 if (unlikely(!HA_ATOMIC_CAS(&t->rq.node.leaf_p, &expected, (void *)0x1)))
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200224 return;
Olivier Houchard09eeb762018-05-28 13:51:06 +0200225 HA_ATOMIC_ADD(&tasks_run_queue, 1);
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200226 task_list_size[tid]++;
227 tl = (struct tasklet *)t;
228 LIST_ADDQ(&task_list[tid], &tl->list);
229}
230
231static inline void task_remove_from_task_list(struct task *t)
232{
233 LIST_DEL(&((struct tasklet *)t)->list);
Olivier Houcharddcd6f3a2018-06-08 17:08:19 +0200234 LIST_INIT(&((struct tasklet *)t)->list);
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200235 task_list_size[tid]--;
236 HA_ATOMIC_SUB(&tasks_run_queue, 1);
237 if (!TASK_IS_TASKLET(t)) {
238 t->rq.node.leaf_p = NULL; // was 0x1
239 __ha_barrier_store();
240 }
241}
242
Willy Tarreauce44f122008-07-05 18:16:19 +0200243/*
244 * Unlinks the task and adjusts run queue stats.
245 * A pointer to the task itself is returned.
246 */
247static inline struct task *task_delete(struct task *t)
248{
Willy Tarreau4726f532009-03-07 17:25:21 +0100249 task_unlink_wq(t);
250 task_unlink_rq(t);
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200251 return t;
252}
253
254/*
Willy Tarreaua4613182009-03-21 18:13:21 +0100255 * Initialize a new task. The bare minimum is performed (queue pointers and
256 * state). The task is returned. This function should not be used outside of
257 * task_new().
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200258 */
Emeric Brunc60def82017-09-27 14:59:38 +0200259static inline struct task *task_init(struct task *t, unsigned long thread_mask)
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200260{
Willy Tarreau4726f532009-03-07 17:25:21 +0100261 t->wq.node.leaf_p = NULL;
262 t->rq.node.leaf_p = NULL;
Olivier Houchardf6e6dc12018-05-18 18:38:23 +0200263 t->state = TASK_SLEEPING;
Willy Tarreauf65610a2017-10-31 16:06:06 +0100264 t->thread_mask = thread_mask;
Willy Tarreau91e99932008-06-30 07:51:00 +0200265 t->nice = 0;
Willy Tarreau3884cba2009-03-28 17:54:35 +0100266 t->calls = 0;
Willy Tarreauf4219992017-07-24 17:52:58 +0200267 t->expire = TICK_ETERNITY;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200268 return t;
269}
270
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200271static inline void tasklet_init(struct tasklet *t)
272{
273 t->nice = -32768;
274 t->calls = 0;
275 t->state = 0;
Olivier Houchard9ddaf792018-07-19 16:02:16 +0200276 t->process = NULL;
Olivier Houcharddcd6f3a2018-06-08 17:08:19 +0200277 LIST_INIT(&t->list);
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200278}
279
280static inline struct tasklet *tasklet_new(void)
281{
282 struct tasklet *t = pool_alloc(pool_head_tasklet);
283
284 if (t) {
285 tasklet_init(t);
286 }
287 return t;
288}
289
Willy Tarreaubaaee002006-06-26 02:48:02 +0200290/*
Willy Tarreaua4613182009-03-21 18:13:21 +0100291 * Allocate and initialise a new task. The new task is returned, or NULL in
292 * case of lack of memory. The task count is incremented. Tasks should only
293 * be allocated this way, and must be freed using task_free().
294 */
Emeric Brunc60def82017-09-27 14:59:38 +0200295static inline struct task *task_new(unsigned long thread_mask)
Willy Tarreaua4613182009-03-21 18:13:21 +0100296{
Willy Tarreaubafbe012017-11-24 17:34:44 +0100297 struct task *t = pool_alloc(pool_head_task);
Willy Tarreaua4613182009-03-21 18:13:21 +0100298 if (t) {
Emeric Brunc60def82017-09-27 14:59:38 +0200299 HA_ATOMIC_ADD(&nb_tasks, 1);
300 task_init(t, thread_mask);
Willy Tarreaua4613182009-03-21 18:13:21 +0100301 }
302 return t;
303}
304
305/*
306 * Free a task. Its context must have been freed since it will be lost.
307 * The task count is decremented.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200308 */
Olivier Houchard9b36cb42018-05-04 15:46:16 +0200309static inline void __task_free(struct task *t)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200310{
Willy Tarreaubafbe012017-11-24 17:34:44 +0100311 pool_free(pool_head_task, t);
Willy Tarreaueb118892014-11-13 16:57:19 +0100312 if (unlikely(stopping))
Willy Tarreaubafbe012017-11-24 17:34:44 +0100313 pool_flush(pool_head_task);
Emeric Brunc60def82017-09-27 14:59:38 +0200314 HA_ATOMIC_SUB(&nb_tasks, 1);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200315}
316
Olivier Houchard9b36cb42018-05-04 15:46:16 +0200317static inline void task_free(struct task *t)
318{
319 /* There's no need to protect t->state with a lock, as the task
320 * has to run on the current thread.
321 */
322 if (t == curr_task || !(t->state & TASK_RUNNING))
323 __task_free(t);
324 else
325 t->process = NULL;
326}
327
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200328static inline void tasklet_free(struct tasklet *tl)
329{
Olivier Houcharddcd6f3a2018-06-08 17:08:19 +0200330 LIST_DEL(&tl->list);
331
Olivier Houchardb0bdae72018-05-18 18:45:28 +0200332 pool_free(pool_head_tasklet, tl);
333 if (unlikely(stopping))
334 pool_flush(pool_head_tasklet);
335}
336
Willy Tarreau4726f532009-03-07 17:25:21 +0100337/* Place <task> into the wait queue, where it may already be. If the expiration
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100338 * timer is infinite, do nothing and rely on wake_expired_task to clean up.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200339 */
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100340void __task_queue(struct task *task);
341static inline void task_queue(struct task *task)
342{
343 /* If we already have a place in the wait queue no later than the
344 * timeout we're trying to set, we'll stay there, because it is very
345 * unlikely that we will reach the timeout anyway. If the timeout
346 * has been disabled, it's useless to leave the queue as well. We'll
347 * rely on wake_expired_tasks() to catch the node and move it to the
348 * proper place should it ever happen. Finally we only add the task
349 * to the queue if it was not there or if it was further than what
350 * we want.
351 */
352 if (!tick_isset(task->expire))
353 return;
354
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100355 HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100356 if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100357 __task_queue(task);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100358 HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100359}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200360
Willy Tarreau26e48812011-07-25 14:30:42 +0200361/* Ensure <task> will be woken up at most at <when>. If the task is already in
362 * the run queue (but not running), nothing is done. It may be used that way
363 * with a delay : task_schedule(task, tick_add(now_ms, delay));
364 */
365static inline void task_schedule(struct task *task, int when)
366{
Emeric Brunc60def82017-09-27 14:59:38 +0200367 /* TODO: mthread, check if there is no tisk with this test */
Willy Tarreau26e48812011-07-25 14:30:42 +0200368 if (task_in_rq(task))
369 return;
370
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100371 HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau26e48812011-07-25 14:30:42 +0200372 if (task_in_wq(task))
373 when = tick_first(when, task->expire);
374
375 task->expire = when;
376 if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
377 __task_queue(task);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100378 HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
Willy Tarreau26e48812011-07-25 14:30:42 +0200379}
380
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200381/* This function register a new signal. "lua" is the current lua
382 * execution context. It contains a pointer to the associated task.
383 * "link" is a list head attached to an other task that must be wake
384 * the lua task if an event occurs. This is useful with external
385 * events like TCP I/O or sleep functions. This funcion allocate
386 * memory for the signal.
387 */
388static inline struct notification *notification_new(struct list *purge, struct list *event, struct task *wakeup)
389{
Willy Tarreaubafbe012017-11-24 17:34:44 +0100390 struct notification *com = pool_alloc(pool_head_notification);
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200391 if (!com)
392 return NULL;
393 LIST_ADDQ(purge, &com->purge_me);
394 LIST_ADDQ(event, &com->wake_me);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100395 HA_SPIN_INIT(&com->lock);
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200396 com->task = wakeup;
397 return com;
398}
399
400/* This function purge all the pending signals when the LUA execution
401 * is finished. This prevent than a coprocess try to wake a deleted
402 * task. This function remove the memory associated to the signal.
Thierry FOURNIERd5b79832017-12-10 17:14:07 +0100403 * The purge list is not locked because it is owned by only one
404 * process. before browsing this list, the caller must ensure to be
405 * the only one browser.
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200406 */
407static inline void notification_purge(struct list *purge)
408{
409 struct notification *com, *back;
410
411 /* Delete all pending communication signals. */
412 list_for_each_entry_safe(com, back, purge, purge_me) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100413 HA_SPIN_LOCK(NOTIF_LOCK, &com->lock);
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200414 LIST_DEL(&com->purge_me);
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200415 if (!com->task) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100416 HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100417 pool_free(pool_head_notification, com);
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200418 continue;
419 }
420 com->task = NULL;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100421 HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200422 }
423}
424
Thierry FOURNIERcb146882017-12-10 17:10:57 +0100425/* In some cases, the disconnected notifications must be cleared.
426 * This function just release memory blocs. The purge list is not
427 * locked because it is owned by only one process. Before browsing
428 * this list, the caller must ensure to be the only one browser.
429 * The "com" is not locked because when com->task is NULL, the
430 * notification is no longer used.
431 */
432static inline void notification_gc(struct list *purge)
433{
434 struct notification *com, *back;
435
436 /* Delete all pending communication signals. */
437 list_for_each_entry_safe (com, back, purge, purge_me) {
438 if (com->task)
439 continue;
440 LIST_DEL(&com->purge_me);
441 pool_free(pool_head_notification, com);
442 }
443}
444
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200445/* This function sends signals. It wakes all the tasks attached
446 * to a list head, and remove the signal, and free the used
Thierry FOURNIERd5b79832017-12-10 17:14:07 +0100447 * memory. The wake list is not locked because it is owned by
448 * only one process. before browsing this list, the caller must
449 * ensure to be the only one browser.
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200450 */
451static inline void notification_wake(struct list *wake)
452{
453 struct notification *com, *back;
454
455 /* Wake task and delete all pending communication signals. */
456 list_for_each_entry_safe(com, back, wake, wake_me) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100457 HA_SPIN_LOCK(NOTIF_LOCK, &com->lock);
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200458 LIST_DEL(&com->wake_me);
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200459 if (!com->task) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100460 HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100461 pool_free(pool_head_notification, com);
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200462 continue;
463 }
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200464 task_wakeup(com->task, TASK_WOKEN_MSG);
Thierry FOURNIER738a6d72017-07-17 00:14:07 +0200465 com->task = NULL;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100466 HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
Thierry FOURNIERd6975962017-07-12 14:31:10 +0200467 }
468}
469
Thierry FOURNIER9d5422a2018-05-30 11:40:08 +0200470/* This function returns true is some notification are pending
471 */
472static inline int notification_registered(struct list *wake)
473{
474 return !LIST_ISEMPTY(wake);
475}
476
Willy Tarreaubaaee002006-06-26 02:48:02 +0200477/*
Willy Tarreau918ff602011-07-25 16:33:49 +0200478 * This does 3 things :
Willy Tarreaubaaee002006-06-26 02:48:02 +0200479 * - wake up all expired tasks
480 * - call all runnable tasks
Willy Tarreaud825eef2007-05-12 22:35:00 +0200481 * - return the date of next event in <next> or eternity.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200482 */
483
Thierry FOURNIER9cf7c4b2014-12-15 13:26:01 +0100484void process_runnable_tasks();
Willy Tarreaubaaee002006-06-26 02:48:02 +0200485
Willy Tarreau58b458d2008-06-29 22:40:23 +0200486/*
487 * Extract all expired timers from the timer queue, and wakes up all
488 * associated tasks. Returns the date of next event (or eternity).
489 */
Thierry FOURNIER9cf7c4b2014-12-15 13:26:01 +0100490int wake_expired_tasks();
Willy Tarreau58b458d2008-06-29 22:40:23 +0200491
Willy Tarreaud0a201b2009-03-08 15:53:06 +0100492/* Perform minimal initializations, report 0 in case of error, 1 if OK. */
493int init_task();
Willy Tarreaubaaee002006-06-26 02:48:02 +0200494
495#endif /* _PROTO_TASK_H */
496
497/*
498 * Local variables:
499 * c-indent-level: 8
500 * c-basic-offset: 8
501 * End:
502 */