Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 1 | /* |
| 2 | include/proto/task.h |
| 3 | Functions for task management. |
| 4 | |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 5 | Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 6 | |
| 7 | This library is free software; you can redistribute it and/or |
| 8 | modify it under the terms of the GNU Lesser General Public |
| 9 | License as published by the Free Software Foundation, version 2.1 |
| 10 | exclusively. |
| 11 | |
| 12 | This library is distributed in the hope that it will be useful, |
| 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 15 | Lesser General Public License for more details. |
| 16 | |
| 17 | You should have received a copy of the GNU Lesser General Public |
| 18 | License along with this library; if not, write to the Free Software |
| 19 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 20 | */ |
| 21 | |
| 22 | #ifndef _PROTO_TASK_H |
| 23 | #define _PROTO_TASK_H |
| 24 | |
| 25 | |
| 26 | #include <sys/time.h> |
Willy Tarreau | e3ba5f0 | 2006-06-29 18:54:54 +0200 | [diff] [blame] | 27 | |
| 28 | #include <common/config.h> |
Willy Tarreau | 26ca34e | 2009-03-21 12:51:40 +0100 | [diff] [blame] | 29 | #include <common/eb32tree.h> |
Willy Tarreau | 2dd0d47 | 2006-06-29 17:53:05 +0200 | [diff] [blame] | 30 | #include <common/memory.h> |
Willy Tarreau | 96bcfd7 | 2007-04-29 10:41:56 +0200 | [diff] [blame] | 31 | #include <common/mini-clist.h> |
| 32 | #include <common/standard.h> |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 33 | #include <common/ticks.h> |
Willy Tarreau | 96bcfd7 | 2007-04-29 10:41:56 +0200 | [diff] [blame] | 34 | |
Willy Tarreau | e3ba5f0 | 2006-06-29 18:54:54 +0200 | [diff] [blame] | 35 | #include <types/task.h> |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 36 | |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 37 | /* Principle of the wait queue. |
| 38 | * |
| 39 | * We want to be able to tell whether an expiration date is before of after the |
| 40 | * current time <now>. We KNOW that expiration dates are never too far apart, |
| 41 | * because they are measured in ticks (milliseconds). We also know that almost |
| 42 | * all dates will be in the future, and that a very small part of them will be |
| 43 | * in the past, they are the ones which have expired since last time we checked |
| 44 | * them. Using ticks, we know if a date is in the future or in the past, but we |
| 45 | * cannot use that to store sorted information because that reference changes |
| 46 | * all the time. |
| 47 | * |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 48 | * We'll use the fact that the time wraps to sort timers. Timers above <now> |
| 49 | * are in the future, timers below <now> are in the past. Here, "above" and |
| 50 | * "below" are to be considered modulo 2^31. |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 51 | * |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 52 | * Timers are stored sorted in an ebtree. We use the new ability for ebtrees to |
| 53 | * lookup values starting from X to only expire tasks between <now> - 2^31 and |
| 54 | * <now>. If the end of the tree is reached while walking over it, we simply |
| 55 | * loop back to the beginning. That way, we have no problem keeping sorted |
| 56 | * wrapping timers in a tree, between (now - 24 days) and (now + 24 days). The |
| 57 | * keys in the tree always reflect their real position, none can be infinite. |
| 58 | * This reduces the number of checks to be performed. |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 59 | * |
| 60 | * Another nice optimisation is to allow a timer to stay at an old place in the |
| 61 | * queue as long as it's not further than the real expiration date. That way, |
| 62 | * we use the tree as a place holder for a minorant of the real expiration |
| 63 | * date. Since we have a very low chance of hitting a timeout anyway, we can |
| 64 | * bounce the nodes to their right place when we scan the tree if we encounter |
| 65 | * a misplaced node once in a while. This even allows us not to remove the |
| 66 | * infinite timers from the wait queue. |
| 67 | * |
| 68 | * So, to summarize, we have : |
| 69 | * - node->key always defines current position in the wait queue |
| 70 | * - timer is the real expiration date (possibly infinite) |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 71 | * - node->key is always before or equal to timer |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 72 | * |
| 73 | * The run queue works similarly to the wait queue except that the current date |
| 74 | * is replaced by an insertion counter which can also wrap without any problem. |
| 75 | */ |
| 76 | |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 77 | /* The farthest we can look back in a timer tree */ |
| 78 | #define TIMER_LOOK_BACK (1U << 31) |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 79 | |
| 80 | /* a few exported variables */ |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 81 | extern unsigned int nb_tasks; /* total number of tasks */ |
Willy Tarreau | 91e9993 | 2008-06-30 07:51:00 +0200 | [diff] [blame] | 82 | extern unsigned int run_queue; /* run queue size */ |
Willy Tarreau | c7bdf09 | 2009-03-21 18:33:52 +0100 | [diff] [blame] | 83 | extern unsigned int run_queue_cur; |
| 84 | extern unsigned int nb_tasks_cur; |
Willy Tarreau | 91e9993 | 2008-06-30 07:51:00 +0200 | [diff] [blame] | 85 | extern unsigned int niced_tasks; /* number of niced tasks in the run queue */ |
Willy Tarreau | c6ca1a0 | 2007-05-13 19:43:47 +0200 | [diff] [blame] | 86 | extern struct pool_head *pool2_task; |
Willy Tarreau | 26ca34e | 2009-03-21 12:51:40 +0100 | [diff] [blame] | 87 | extern struct eb32_node *last_timer; /* optimization: last queued timer */ |
Willy Tarreau | c6ca1a0 | 2007-05-13 19:43:47 +0200 | [diff] [blame] | 88 | |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 89 | /* return 0 if task is in run queue, otherwise non-zero */ |
| 90 | static inline int task_in_rq(struct task *t) |
| 91 | { |
| 92 | return t->rq.node.leaf_p != NULL; |
| 93 | } |
| 94 | |
| 95 | /* return 0 if task is in wait queue, otherwise non-zero */ |
| 96 | static inline int task_in_wq(struct task *t) |
| 97 | { |
| 98 | return t->wq.node.leaf_p != NULL; |
| 99 | } |
| 100 | |
Willy Tarreau | fdccded | 2008-08-29 18:19:04 +0200 | [diff] [blame] | 101 | /* puts the task <t> in run queue with reason flags <f>, and returns <t> */ |
Willy Tarreau | 4df8206 | 2008-08-29 15:26:14 +0200 | [diff] [blame] | 102 | struct task *__task_wakeup(struct task *t); |
Willy Tarreau | fdccded | 2008-08-29 18:19:04 +0200 | [diff] [blame] | 103 | static inline struct task *task_wakeup(struct task *t, unsigned int f) |
Willy Tarreau | 4df8206 | 2008-08-29 15:26:14 +0200 | [diff] [blame] | 104 | { |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 105 | if (likely(!task_in_rq(t))) |
Willy Tarreau | fdccded | 2008-08-29 18:19:04 +0200 | [diff] [blame] | 106 | __task_wakeup(t); |
| 107 | t->state |= f; |
| 108 | return t; |
Willy Tarreau | 4df8206 | 2008-08-29 15:26:14 +0200 | [diff] [blame] | 109 | } |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 110 | |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 111 | /* |
| 112 | * Unlink the task from the wait queue, and possibly update the last_timer |
| 113 | * pointer. A pointer to the task itself is returned. The task *must* already |
| 114 | * be in the wait queue before calling this function. If unsure, use the safer |
| 115 | * task_unlink_wq() function. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 116 | */ |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 117 | static inline struct task *__task_unlink_wq(struct task *t) |
| 118 | { |
| 119 | eb32_delete(&t->wq); |
Willy Tarreau | 26ca34e | 2009-03-21 12:51:40 +0100 | [diff] [blame] | 120 | if (last_timer == &t->wq) |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 121 | last_timer = NULL; |
| 122 | return t; |
| 123 | } |
| 124 | |
| 125 | static inline struct task *task_unlink_wq(struct task *t) |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 126 | { |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 127 | if (likely(task_in_wq(t))) |
| 128 | __task_unlink_wq(t); |
Willy Tarreau | 96bcfd7 | 2007-04-29 10:41:56 +0200 | [diff] [blame] | 129 | return t; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | /* |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 133 | * Unlink the task from the run queue. The run_queue size and number of niced |
| 134 | * tasks are updated too. A pointer to the task itself is returned. The task |
| 135 | * *must* already be in the wait queue before calling this function. If unsure, |
| 136 | * use the safer task_unlink_rq() function. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 137 | */ |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 138 | static inline struct task *__task_unlink_rq(struct task *t) |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 139 | { |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 140 | eb32_delete(&t->rq); |
| 141 | run_queue--; |
| 142 | if (likely(t->nice)) |
| 143 | niced_tasks--; |
Willy Tarreau | ce44f12 | 2008-07-05 18:16:19 +0200 | [diff] [blame] | 144 | return t; |
| 145 | } |
Willy Tarreau | 9789f7b | 2008-06-24 08:17:16 +0200 | [diff] [blame] | 146 | |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 147 | static inline struct task *task_unlink_rq(struct task *t) |
| 148 | { |
| 149 | if (likely(task_in_rq(t))) |
| 150 | __task_unlink_rq(t); |
| 151 | return t; |
| 152 | } |
| 153 | |
Willy Tarreau | ce44f12 | 2008-07-05 18:16:19 +0200 | [diff] [blame] | 154 | /* |
| 155 | * Unlinks the task and adjusts run queue stats. |
| 156 | * A pointer to the task itself is returned. |
| 157 | */ |
| 158 | static inline struct task *task_delete(struct task *t) |
| 159 | { |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 160 | task_unlink_wq(t); |
| 161 | task_unlink_rq(t); |
Willy Tarreau | 9789f7b | 2008-06-24 08:17:16 +0200 | [diff] [blame] | 162 | return t; |
| 163 | } |
| 164 | |
| 165 | /* |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 166 | * Initialize a new task. The bare minimum is performed (queue pointers and |
| 167 | * state). The task is returned. This function should not be used outside of |
| 168 | * task_new(). |
Willy Tarreau | 9789f7b | 2008-06-24 08:17:16 +0200 | [diff] [blame] | 169 | */ |
| 170 | static inline struct task *task_init(struct task *t) |
| 171 | { |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 172 | t->wq.node.leaf_p = NULL; |
| 173 | t->rq.node.leaf_p = NULL; |
Willy Tarreau | fdccded | 2008-08-29 18:19:04 +0200 | [diff] [blame] | 174 | t->state = TASK_SLEEPING; |
Willy Tarreau | 91e9993 | 2008-06-30 07:51:00 +0200 | [diff] [blame] | 175 | t->nice = 0; |
Willy Tarreau | 3884cba | 2009-03-28 17:54:35 +0100 | [diff] [blame] | 176 | t->calls = 0; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 177 | return t; |
| 178 | } |
| 179 | |
| 180 | /* |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 181 | * Allocate and initialise a new task. The new task is returned, or NULL in |
| 182 | * case of lack of memory. The task count is incremented. Tasks should only |
| 183 | * be allocated this way, and must be freed using task_free(). |
| 184 | */ |
| 185 | static inline struct task *task_new(void) |
| 186 | { |
| 187 | struct task *t = pool_alloc2(pool2_task); |
| 188 | if (t) { |
| 189 | nb_tasks++; |
| 190 | task_init(t); |
| 191 | } |
| 192 | return t; |
| 193 | } |
| 194 | |
| 195 | /* |
| 196 | * Free a task. Its context must have been freed since it will be lost. |
| 197 | * The task count is decremented. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 198 | */ |
| 199 | static inline void task_free(struct task *t) |
| 200 | { |
Willy Tarreau | c6ca1a0 | 2007-05-13 19:43:47 +0200 | [diff] [blame] | 201 | pool_free2(pool2_task, t); |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 202 | nb_tasks--; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 203 | } |
| 204 | |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 205 | /* Place <task> into the wait queue, where it may already be. If the expiration |
Willy Tarreau | 531cf0c | 2009-03-08 16:35:27 +0100 | [diff] [blame] | 206 | * timer is infinite, do nothing and rely on wake_expired_task to clean up. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 207 | */ |
Willy Tarreau | 531cf0c | 2009-03-08 16:35:27 +0100 | [diff] [blame] | 208 | void __task_queue(struct task *task); |
| 209 | static inline void task_queue(struct task *task) |
| 210 | { |
| 211 | /* If we already have a place in the wait queue no later than the |
| 212 | * timeout we're trying to set, we'll stay there, because it is very |
| 213 | * unlikely that we will reach the timeout anyway. If the timeout |
| 214 | * has been disabled, it's useless to leave the queue as well. We'll |
| 215 | * rely on wake_expired_tasks() to catch the node and move it to the |
| 216 | * proper place should it ever happen. Finally we only add the task |
| 217 | * to the queue if it was not there or if it was further than what |
| 218 | * we want. |
| 219 | */ |
| 220 | if (!tick_isset(task->expire)) |
| 221 | return; |
| 222 | |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 223 | if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) |
Willy Tarreau | 531cf0c | 2009-03-08 16:35:27 +0100 | [diff] [blame] | 224 | __task_queue(task); |
| 225 | } |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 226 | |
| 227 | /* |
| 228 | * This does 4 things : |
| 229 | * - wake up all expired tasks |
| 230 | * - call all runnable tasks |
| 231 | * - call maintain_proxies() to enable/disable the listeners |
Willy Tarreau | d825eef | 2007-05-12 22:35:00 +0200 | [diff] [blame] | 232 | * - return the date of next event in <next> or eternity. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 233 | */ |
| 234 | |
Willy Tarreau | 0c303ee | 2008-07-07 00:09:58 +0200 | [diff] [blame] | 235 | void process_runnable_tasks(int *next); |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 236 | |
Willy Tarreau | 58b458d | 2008-06-29 22:40:23 +0200 | [diff] [blame] | 237 | /* |
| 238 | * Extract all expired timers from the timer queue, and wakes up all |
| 239 | * associated tasks. Returns the date of next event (or eternity). |
| 240 | */ |
Willy Tarreau | 0c303ee | 2008-07-07 00:09:58 +0200 | [diff] [blame] | 241 | void wake_expired_tasks(int *next); |
Willy Tarreau | 58b458d | 2008-06-29 22:40:23 +0200 | [diff] [blame] | 242 | |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 243 | /* Perform minimal initializations, report 0 in case of error, 1 if OK. */ |
| 244 | int init_task(); |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 245 | |
| 246 | #endif /* _PROTO_TASK_H */ |
| 247 | |
| 248 | /* |
| 249 | * Local variables: |
| 250 | * c-indent-level: 8 |
| 251 | * c-basic-offset: 8 |
| 252 | * End: |
| 253 | */ |