Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 1 | /* |
Willy Tarreau | 24f4efa | 2010-08-27 17:56:48 +0200 | [diff] [blame] | 2 | * include/proto/task.h |
| 3 | * Functions for task management. |
| 4 | * |
| 5 | * Copyright (C) 2000-2010 Willy Tarreau - w@1wt.eu |
| 6 | * |
| 7 | * This library is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU Lesser General Public |
| 9 | * License as published by the Free Software Foundation, version 2.1 |
| 10 | * exclusively. |
| 11 | * |
| 12 | * This library is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 15 | * Lesser General Public License for more details. |
| 16 | * |
| 17 | * You should have received a copy of the GNU Lesser General Public |
| 18 | * License along with this library; if not, write to the Free Software |
| 19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 20 | */ |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 21 | |
| 22 | #ifndef _PROTO_TASK_H |
| 23 | #define _PROTO_TASK_H |
| 24 | |
| 25 | |
| 26 | #include <sys/time.h> |
Willy Tarreau | e3ba5f0 | 2006-06-29 18:54:54 +0200 | [diff] [blame] | 27 | |
| 28 | #include <common/config.h> |
Willy Tarreau | 2dd0d47 | 2006-06-29 17:53:05 +0200 | [diff] [blame] | 29 | #include <common/memory.h> |
Willy Tarreau | 96bcfd7 | 2007-04-29 10:41:56 +0200 | [diff] [blame] | 30 | #include <common/mini-clist.h> |
| 31 | #include <common/standard.h> |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 32 | #include <common/ticks.h> |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 33 | #include <common/hathreads.h> |
| 34 | |
Willy Tarreau | 8d38805 | 2017-11-05 13:34:20 +0100 | [diff] [blame] | 35 | #include <eb32sctree.h> |
Willy Tarreau | 45cb4fb | 2009-10-26 21:10:04 +0100 | [diff] [blame] | 36 | #include <eb32tree.h> |
Willy Tarreau | 96bcfd7 | 2007-04-29 10:41:56 +0200 | [diff] [blame] | 37 | |
Willy Tarreau | eb11889 | 2014-11-13 16:57:19 +0100 | [diff] [blame] | 38 | #include <types/global.h> |
Willy Tarreau | e3ba5f0 | 2006-06-29 18:54:54 +0200 | [diff] [blame] | 39 | #include <types/task.h> |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 40 | |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 41 | /* Principle of the wait queue. |
| 42 | * |
| 43 | * We want to be able to tell whether an expiration date is before of after the |
| 44 | * current time <now>. We KNOW that expiration dates are never too far apart, |
| 45 | * because they are measured in ticks (milliseconds). We also know that almost |
| 46 | * all dates will be in the future, and that a very small part of them will be |
| 47 | * in the past, they are the ones which have expired since last time we checked |
| 48 | * them. Using ticks, we know if a date is in the future or in the past, but we |
| 49 | * cannot use that to store sorted information because that reference changes |
| 50 | * all the time. |
| 51 | * |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 52 | * We'll use the fact that the time wraps to sort timers. Timers above <now> |
| 53 | * are in the future, timers below <now> are in the past. Here, "above" and |
| 54 | * "below" are to be considered modulo 2^31. |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 55 | * |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 56 | * Timers are stored sorted in an ebtree. We use the new ability for ebtrees to |
| 57 | * lookup values starting from X to only expire tasks between <now> - 2^31 and |
| 58 | * <now>. If the end of the tree is reached while walking over it, we simply |
| 59 | * loop back to the beginning. That way, we have no problem keeping sorted |
| 60 | * wrapping timers in a tree, between (now - 24 days) and (now + 24 days). The |
| 61 | * keys in the tree always reflect their real position, none can be infinite. |
| 62 | * This reduces the number of checks to be performed. |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 63 | * |
| 64 | * Another nice optimisation is to allow a timer to stay at an old place in the |
| 65 | * queue as long as it's not further than the real expiration date. That way, |
| 66 | * we use the tree as a place holder for a minorant of the real expiration |
| 67 | * date. Since we have a very low chance of hitting a timeout anyway, we can |
| 68 | * bounce the nodes to their right place when we scan the tree if we encounter |
| 69 | * a misplaced node once in a while. This even allows us not to remove the |
| 70 | * infinite timers from the wait queue. |
| 71 | * |
| 72 | * So, to summarize, we have : |
| 73 | * - node->key always defines current position in the wait queue |
| 74 | * - timer is the real expiration date (possibly infinite) |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 75 | * - node->key is always before or equal to timer |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 76 | * |
| 77 | * The run queue works similarly to the wait queue except that the current date |
| 78 | * is replaced by an insertion counter which can also wrap without any problem. |
| 79 | */ |
| 80 | |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 81 | /* The farthest we can look back in a timer tree */ |
| 82 | #define TIMER_LOOK_BACK (1U << 31) |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 83 | |
| 84 | /* a few exported variables */ |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 85 | extern unsigned int nb_tasks; /* total number of tasks */ |
Christopher Faulet | 3911ee8 | 2017-11-14 10:26:53 +0100 | [diff] [blame] | 86 | extern unsigned long active_tasks_mask; /* Mask of threads with active tasks */ |
Christopher Faulet | 34c5cc9 | 2016-12-06 09:15:30 +0100 | [diff] [blame] | 87 | extern unsigned int tasks_run_queue; /* run queue size */ |
| 88 | extern unsigned int tasks_run_queue_cur; |
Willy Tarreau | c7bdf09 | 2009-03-21 18:33:52 +0100 | [diff] [blame] | 89 | extern unsigned int nb_tasks_cur; |
Willy Tarreau | 91e9993 | 2008-06-30 07:51:00 +0200 | [diff] [blame] | 90 | extern unsigned int niced_tasks; /* number of niced tasks in the run queue */ |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 91 | extern struct pool_head *pool_head_task; |
| 92 | extern struct pool_head *pool_head_notification; |
Olivier Houchard | 9b36cb4 | 2018-05-04 15:46:16 +0200 | [diff] [blame] | 93 | extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */ |
| 94 | extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be potentially run */ |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame^] | 95 | extern struct eb_root rqueue; /* tree constituting the run queue */ |
| 96 | extern struct eb_root rqueue_local[MAX_THREADS]; /* tree constituting the per-thread run queue */ |
Christopher Faulet | 9dcf9b6 | 2017-11-13 10:34:01 +0100 | [diff] [blame] | 97 | |
| 98 | __decl_hathreads(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */ |
| 99 | __decl_hathreads(extern HA_SPINLOCK_T wq_lock); /* spin lock related to wait queue */ |
Willy Tarreau | c6ca1a0 | 2007-05-13 19:43:47 +0200 | [diff] [blame] | 100 | |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 101 | /* return 0 if task is in run queue, otherwise non-zero */ |
| 102 | static inline int task_in_rq(struct task *t) |
| 103 | { |
| 104 | return t->rq.node.leaf_p != NULL; |
| 105 | } |
| 106 | |
| 107 | /* return 0 if task is in wait queue, otherwise non-zero */ |
| 108 | static inline int task_in_wq(struct task *t) |
| 109 | { |
| 110 | return t->wq.node.leaf_p != NULL; |
| 111 | } |
| 112 | |
Willy Tarreau | fdccded | 2008-08-29 18:19:04 +0200 | [diff] [blame] | 113 | /* puts the task <t> in run queue with reason flags <f>, and returns <t> */ |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame^] | 114 | /* This will put the task in the local runqueue if the task is only runnable |
| 115 | * by the current thread, in the global runqueue otherwies. |
| 116 | */ |
| 117 | void __task_wakeup(struct task *t, struct eb_root *); |
| 118 | static inline void task_wakeup(struct task *t, unsigned int f) |
Willy Tarreau | 4df8206 | 2008-08-29 15:26:14 +0200 | [diff] [blame] | 119 | { |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame^] | 120 | unsigned short state; |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 121 | |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame^] | 122 | #ifdef USE_THREAD |
| 123 | struct eb_root *root; |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 124 | |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame^] | 125 | if (t->thread_mask == tid_bit && global.nbthread > 1) |
| 126 | root = &rqueue_local[tid]; |
| 127 | else |
| 128 | root = &rqueue; |
| 129 | #else |
| 130 | struct eb_root *root = &rqueue; |
| 131 | #endif |
| 132 | |
| 133 | state = HA_ATOMIC_OR(&t->state, f); |
| 134 | if (!(state & TASK_RUNNING)) |
| 135 | __task_wakeup(t, root); |
Willy Tarreau | 4df8206 | 2008-08-29 15:26:14 +0200 | [diff] [blame] | 136 | } |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 137 | |
Willy Tarreau | f65610a | 2017-10-31 16:06:06 +0100 | [diff] [blame] | 138 | /* change the thread affinity of a task to <thread_mask> */ |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 139 | static inline void task_set_affinity(struct task *t, unsigned long thread_mask) |
| 140 | { |
Willy Tarreau | f65610a | 2017-10-31 16:06:06 +0100 | [diff] [blame] | 141 | t->thread_mask = thread_mask; |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 142 | } |
Willy Tarreau | f65610a | 2017-10-31 16:06:06 +0100 | [diff] [blame] | 143 | |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 144 | /* |
| 145 | * Unlink the task from the wait queue, and possibly update the last_timer |
| 146 | * pointer. A pointer to the task itself is returned. The task *must* already |
| 147 | * be in the wait queue before calling this function. If unsure, use the safer |
| 148 | * task_unlink_wq() function. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 149 | */ |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 150 | static inline struct task *__task_unlink_wq(struct task *t) |
| 151 | { |
| 152 | eb32_delete(&t->wq); |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 153 | return t; |
| 154 | } |
| 155 | |
| 156 | static inline struct task *task_unlink_wq(struct task *t) |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 157 | { |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 158 | HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock); |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 159 | if (likely(task_in_wq(t))) |
| 160 | __task_unlink_wq(t); |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 161 | HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); |
Willy Tarreau | 96bcfd7 | 2007-04-29 10:41:56 +0200 | [diff] [blame] | 162 | return t; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | /* |
Christopher Faulet | 34c5cc9 | 2016-12-06 09:15:30 +0100 | [diff] [blame] | 166 | * Unlink the task from the run queue. The tasks_run_queue size and number of |
| 167 | * niced tasks are updated too. A pointer to the task itself is returned. The |
| 168 | * task *must* already be in the run queue before calling this function. If |
| 169 | * unsure, use the safer task_unlink_rq() function. Note that the pointer to the |
| 170 | * next run queue entry is neither checked nor updated. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 171 | */ |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 172 | static inline struct task *__task_unlink_rq(struct task *t) |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 173 | { |
Willy Tarreau | 8d38805 | 2017-11-05 13:34:20 +0100 | [diff] [blame] | 174 | eb32sc_delete(&t->rq); |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame^] | 175 | HA_ATOMIC_SUB(&tasks_run_queue, 1); |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 176 | if (likely(t->nice)) |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame^] | 177 | HA_ATOMIC_SUB(&niced_tasks, 1); |
Willy Tarreau | ce44f12 | 2008-07-05 18:16:19 +0200 | [diff] [blame] | 178 | return t; |
| 179 | } |
Willy Tarreau | 9789f7b | 2008-06-24 08:17:16 +0200 | [diff] [blame] | 180 | |
Willy Tarreau | 501260b | 2015-02-23 16:07:01 +0100 | [diff] [blame] | 181 | /* This function unlinks task <t> from the run queue if it is in it. It also |
| 182 | * takes care of updating the next run queue task if it was this task. |
| 183 | */ |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 184 | static inline struct task *task_unlink_rq(struct task *t) |
| 185 | { |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame^] | 186 | if (t->thread_mask != tid_bit) |
| 187 | HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); |
Olivier Houchard | 9b36cb4 | 2018-05-04 15:46:16 +0200 | [diff] [blame] | 188 | if (likely(task_in_rq(t))) { |
| 189 | if (&t->rq == rq_next) |
| 190 | rq_next = eb32sc_next(rq_next, tid_bit); |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 191 | __task_unlink_rq(t); |
Olivier Houchard | 9b36cb4 | 2018-05-04 15:46:16 +0200 | [diff] [blame] | 192 | } |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame^] | 193 | if (t->thread_mask != tid_bit) |
| 194 | HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 195 | return t; |
| 196 | } |
| 197 | |
Willy Tarreau | ce44f12 | 2008-07-05 18:16:19 +0200 | [diff] [blame] | 198 | /* |
| 199 | * Unlinks the task and adjusts run queue stats. |
| 200 | * A pointer to the task itself is returned. |
| 201 | */ |
| 202 | static inline struct task *task_delete(struct task *t) |
| 203 | { |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 204 | task_unlink_wq(t); |
| 205 | task_unlink_rq(t); |
Willy Tarreau | 9789f7b | 2008-06-24 08:17:16 +0200 | [diff] [blame] | 206 | return t; |
| 207 | } |
| 208 | |
| 209 | /* |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 210 | * Initialize a new task. The bare minimum is performed (queue pointers and |
| 211 | * state). The task is returned. This function should not be used outside of |
| 212 | * task_new(). |
Willy Tarreau | 9789f7b | 2008-06-24 08:17:16 +0200 | [diff] [blame] | 213 | */ |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 214 | static inline struct task *task_init(struct task *t, unsigned long thread_mask) |
Willy Tarreau | 9789f7b | 2008-06-24 08:17:16 +0200 | [diff] [blame] | 215 | { |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 216 | t->wq.node.leaf_p = NULL; |
| 217 | t->rq.node.leaf_p = NULL; |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame^] | 218 | t->state = TASK_SLEEPING; |
Willy Tarreau | f65610a | 2017-10-31 16:06:06 +0100 | [diff] [blame] | 219 | t->thread_mask = thread_mask; |
Willy Tarreau | 91e9993 | 2008-06-30 07:51:00 +0200 | [diff] [blame] | 220 | t->nice = 0; |
Willy Tarreau | 3884cba | 2009-03-28 17:54:35 +0100 | [diff] [blame] | 221 | t->calls = 0; |
Willy Tarreau | f421999 | 2017-07-24 17:52:58 +0200 | [diff] [blame] | 222 | t->expire = TICK_ETERNITY; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 223 | return t; |
| 224 | } |
| 225 | |
| 226 | /* |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 227 | * Allocate and initialise a new task. The new task is returned, or NULL in |
| 228 | * case of lack of memory. The task count is incremented. Tasks should only |
| 229 | * be allocated this way, and must be freed using task_free(). |
| 230 | */ |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 231 | static inline struct task *task_new(unsigned long thread_mask) |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 232 | { |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 233 | struct task *t = pool_alloc(pool_head_task); |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 234 | if (t) { |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 235 | HA_ATOMIC_ADD(&nb_tasks, 1); |
| 236 | task_init(t, thread_mask); |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 237 | } |
| 238 | return t; |
| 239 | } |
| 240 | |
| 241 | /* |
| 242 | * Free a task. Its context must have been freed since it will be lost. |
| 243 | * The task count is decremented. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 244 | */ |
Olivier Houchard | 9b36cb4 | 2018-05-04 15:46:16 +0200 | [diff] [blame] | 245 | static inline void __task_free(struct task *t) |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 246 | { |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 247 | pool_free(pool_head_task, t); |
Willy Tarreau | eb11889 | 2014-11-13 16:57:19 +0100 | [diff] [blame] | 248 | if (unlikely(stopping)) |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 249 | pool_flush(pool_head_task); |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 250 | HA_ATOMIC_SUB(&nb_tasks, 1); |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 251 | } |
| 252 | |
Olivier Houchard | 9b36cb4 | 2018-05-04 15:46:16 +0200 | [diff] [blame] | 253 | static inline void task_free(struct task *t) |
| 254 | { |
| 255 | /* There's no need to protect t->state with a lock, as the task |
| 256 | * has to run on the current thread. |
| 257 | */ |
| 258 | if (t == curr_task || !(t->state & TASK_RUNNING)) |
| 259 | __task_free(t); |
| 260 | else |
| 261 | t->process = NULL; |
| 262 | } |
| 263 | |
| 264 | |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 265 | /* Place <task> into the wait queue, where it may already be. If the expiration |
Willy Tarreau | 531cf0c | 2009-03-08 16:35:27 +0100 | [diff] [blame] | 266 | * timer is infinite, do nothing and rely on wake_expired_task to clean up. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 267 | */ |
Willy Tarreau | 531cf0c | 2009-03-08 16:35:27 +0100 | [diff] [blame] | 268 | void __task_queue(struct task *task); |
| 269 | static inline void task_queue(struct task *task) |
| 270 | { |
| 271 | /* If we already have a place in the wait queue no later than the |
| 272 | * timeout we're trying to set, we'll stay there, because it is very |
| 273 | * unlikely that we will reach the timeout anyway. If the timeout |
| 274 | * has been disabled, it's useless to leave the queue as well. We'll |
| 275 | * rely on wake_expired_tasks() to catch the node and move it to the |
| 276 | * proper place should it ever happen. Finally we only add the task |
| 277 | * to the queue if it was not there or if it was further than what |
| 278 | * we want. |
| 279 | */ |
| 280 | if (!tick_isset(task->expire)) |
| 281 | return; |
| 282 | |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 283 | HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock); |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 284 | if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) |
Willy Tarreau | 531cf0c | 2009-03-08 16:35:27 +0100 | [diff] [blame] | 285 | __task_queue(task); |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 286 | HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); |
Willy Tarreau | 531cf0c | 2009-03-08 16:35:27 +0100 | [diff] [blame] | 287 | } |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 288 | |
Willy Tarreau | 26e4881 | 2011-07-25 14:30:42 +0200 | [diff] [blame] | 289 | /* Ensure <task> will be woken up at most at <when>. If the task is already in |
| 290 | * the run queue (but not running), nothing is done. It may be used that way |
| 291 | * with a delay : task_schedule(task, tick_add(now_ms, delay)); |
| 292 | */ |
| 293 | static inline void task_schedule(struct task *task, int when) |
| 294 | { |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 295 | /* TODO: mthread, check if there is no tisk with this test */ |
Willy Tarreau | 26e4881 | 2011-07-25 14:30:42 +0200 | [diff] [blame] | 296 | if (task_in_rq(task)) |
| 297 | return; |
| 298 | |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 299 | HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock); |
Willy Tarreau | 26e4881 | 2011-07-25 14:30:42 +0200 | [diff] [blame] | 300 | if (task_in_wq(task)) |
| 301 | when = tick_first(when, task->expire); |
| 302 | |
| 303 | task->expire = when; |
| 304 | if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) |
| 305 | __task_queue(task); |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 306 | HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); |
Willy Tarreau | 26e4881 | 2011-07-25 14:30:42 +0200 | [diff] [blame] | 307 | } |
| 308 | |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 309 | /* This function register a new signal. "lua" is the current lua |
| 310 | * execution context. It contains a pointer to the associated task. |
| 311 | * "link" is a list head attached to an other task that must be wake |
| 312 | * the lua task if an event occurs. This is useful with external |
| 313 | * events like TCP I/O or sleep functions. This funcion allocate |
| 314 | * memory for the signal. |
| 315 | */ |
| 316 | static inline struct notification *notification_new(struct list *purge, struct list *event, struct task *wakeup) |
| 317 | { |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 318 | struct notification *com = pool_alloc(pool_head_notification); |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 319 | if (!com) |
| 320 | return NULL; |
| 321 | LIST_ADDQ(purge, &com->purge_me); |
| 322 | LIST_ADDQ(event, &com->wake_me); |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 323 | HA_SPIN_INIT(&com->lock); |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 324 | com->task = wakeup; |
| 325 | return com; |
| 326 | } |
| 327 | |
| 328 | /* This function purge all the pending signals when the LUA execution |
| 329 | * is finished. This prevent than a coprocess try to wake a deleted |
| 330 | * task. This function remove the memory associated to the signal. |
Thierry FOURNIER | d5b7983 | 2017-12-10 17:14:07 +0100 | [diff] [blame] | 331 | * The purge list is not locked because it is owned by only one |
| 332 | * process. before browsing this list, the caller must ensure to be |
| 333 | * the only one browser. |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 334 | */ |
| 335 | static inline void notification_purge(struct list *purge) |
| 336 | { |
| 337 | struct notification *com, *back; |
| 338 | |
| 339 | /* Delete all pending communication signals. */ |
| 340 | list_for_each_entry_safe(com, back, purge, purge_me) { |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 341 | HA_SPIN_LOCK(NOTIF_LOCK, &com->lock); |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 342 | LIST_DEL(&com->purge_me); |
Thierry FOURNIER | 738a6d7 | 2017-07-17 00:14:07 +0200 | [diff] [blame] | 343 | if (!com->task) { |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 344 | HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock); |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 345 | pool_free(pool_head_notification, com); |
Thierry FOURNIER | 738a6d7 | 2017-07-17 00:14:07 +0200 | [diff] [blame] | 346 | continue; |
| 347 | } |
| 348 | com->task = NULL; |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 349 | HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock); |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 350 | } |
| 351 | } |
| 352 | |
Thierry FOURNIER | cb14688 | 2017-12-10 17:10:57 +0100 | [diff] [blame] | 353 | /* In some cases, the disconnected notifications must be cleared. |
| 354 | * This function just release memory blocs. The purge list is not |
| 355 | * locked because it is owned by only one process. Before browsing |
| 356 | * this list, the caller must ensure to be the only one browser. |
| 357 | * The "com" is not locked because when com->task is NULL, the |
| 358 | * notification is no longer used. |
| 359 | */ |
| 360 | static inline void notification_gc(struct list *purge) |
| 361 | { |
| 362 | struct notification *com, *back; |
| 363 | |
| 364 | /* Delete all pending communication signals. */ |
| 365 | list_for_each_entry_safe (com, back, purge, purge_me) { |
| 366 | if (com->task) |
| 367 | continue; |
| 368 | LIST_DEL(&com->purge_me); |
| 369 | pool_free(pool_head_notification, com); |
| 370 | } |
| 371 | } |
| 372 | |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 373 | /* This function sends signals. It wakes all the tasks attached |
| 374 | * to a list head, and remove the signal, and free the used |
Thierry FOURNIER | d5b7983 | 2017-12-10 17:14:07 +0100 | [diff] [blame] | 375 | * memory. The wake list is not locked because it is owned by |
| 376 | * only one process. before browsing this list, the caller must |
| 377 | * ensure to be the only one browser. |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 378 | */ |
| 379 | static inline void notification_wake(struct list *wake) |
| 380 | { |
| 381 | struct notification *com, *back; |
| 382 | |
| 383 | /* Wake task and delete all pending communication signals. */ |
| 384 | list_for_each_entry_safe(com, back, wake, wake_me) { |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 385 | HA_SPIN_LOCK(NOTIF_LOCK, &com->lock); |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 386 | LIST_DEL(&com->wake_me); |
Thierry FOURNIER | 738a6d7 | 2017-07-17 00:14:07 +0200 | [diff] [blame] | 387 | if (!com->task) { |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 388 | HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock); |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 389 | pool_free(pool_head_notification, com); |
Thierry FOURNIER | 738a6d7 | 2017-07-17 00:14:07 +0200 | [diff] [blame] | 390 | continue; |
| 391 | } |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 392 | task_wakeup(com->task, TASK_WOKEN_MSG); |
Thierry FOURNIER | 738a6d7 | 2017-07-17 00:14:07 +0200 | [diff] [blame] | 393 | com->task = NULL; |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 394 | HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock); |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 395 | } |
| 396 | } |
| 397 | |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 398 | /* |
Willy Tarreau | 918ff60 | 2011-07-25 16:33:49 +0200 | [diff] [blame] | 399 | * This does 3 things : |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 400 | * - wake up all expired tasks |
| 401 | * - call all runnable tasks |
Willy Tarreau | d825eef | 2007-05-12 22:35:00 +0200 | [diff] [blame] | 402 | * - return the date of next event in <next> or eternity. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 403 | */ |
| 404 | |
Thierry FOURNIER | 9cf7c4b | 2014-12-15 13:26:01 +0100 | [diff] [blame] | 405 | void process_runnable_tasks(); |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 406 | |
Willy Tarreau | 58b458d | 2008-06-29 22:40:23 +0200 | [diff] [blame] | 407 | /* |
| 408 | * Extract all expired timers from the timer queue, and wakes up all |
| 409 | * associated tasks. Returns the date of next event (or eternity). |
| 410 | */ |
Thierry FOURNIER | 9cf7c4b | 2014-12-15 13:26:01 +0100 | [diff] [blame] | 411 | int wake_expired_tasks(); |
Willy Tarreau | 58b458d | 2008-06-29 22:40:23 +0200 | [diff] [blame] | 412 | |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 413 | /* Perform minimal initializations, report 0 in case of error, 1 if OK. */ |
| 414 | int init_task(); |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 415 | |
| 416 | #endif /* _PROTO_TASK_H */ |
| 417 | |
| 418 | /* |
| 419 | * Local variables: |
| 420 | * c-indent-level: 8 |
| 421 | * c-basic-offset: 8 |
| 422 | * End: |
| 423 | */ |