blob: 98967c55e44dd4bba864c97c7ef4e5dde3d8efc3 [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
2 * Task management functions.
3 *
Willy Tarreau9789f7b2008-06-24 08:17:16 +02004 * Copyright 2000-2008 Willy Tarreau <w@1wt.eu>
Willy Tarreaubaaee002006-06-26 02:48:02 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreau2dd0d472006-06-29 17:53:05 +020013#include <common/config.h>
Willy Tarreau9789f7b2008-06-24 08:17:16 +020014#include <common/eb32tree.h>
Willy Tarreauc6ca1a02007-05-13 19:43:47 +020015#include <common/memory.h>
Willy Tarreau2dd0d472006-06-29 17:53:05 +020016#include <common/mini-clist.h>
Willy Tarreau96bcfd72007-04-29 10:41:56 +020017#include <common/standard.h>
Willy Tarreaua6a6a932007-04-28 22:40:08 +020018#include <common/time.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020019
Willy Tarreaud825eef2007-05-12 22:35:00 +020020#include <proto/proxy.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020021#include <proto/task.h>
22
Willy Tarreau9789f7b2008-06-24 08:17:16 +020023struct pool_head *pool2_task;
Willy Tarreau96bcfd72007-04-29 10:41:56 +020024
Willy Tarreau58b458d2008-06-29 22:40:23 +020025unsigned int run_queue = 0;
Willy Tarreau91e99932008-06-30 07:51:00 +020026unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
Willy Tarreauce44f122008-07-05 18:16:19 +020027struct task *last_timer = NULL; /* optimization: last queued timer */
Willy Tarreau964c9362007-01-07 00:38:00 +010028
Willy Tarreau28c41a42008-06-29 17:00:59 +020029/* Principle of the wait queue.
30 *
31 * We want to be able to tell whether an expiration date is before of after the
32 * current time <now>. We KNOW that expiration dates are never too far apart,
33 * because they are already computed by adding integer numbers of milliseconds
34 * to the current date.
35 * We also know that almost all dates will be in the future, and that a very
36 * small part of them will be in the past, they are the ones which have expired
37 * since last time we checked them.
38 *
39 * The current implementation uses a wrapping time cut into 3 ranges :
40 * - previous : those ones are expired by definition
41 * - current : some are expired, some are not
42 * - next : none are expired
43 *
44 * We use the higher two bits of the timers expressed in ticks (milliseconds)
45 * to determine which range a timer is in, compared to <now> :
46 *
47 * now previous current next0 next1
48 * [31:30] [31:30] [31:30] [31:30] [31:30]
49 * 00 11 00 01 10
50 * 01 00 01 10 11
51 * 10 01 10 11 00
52 * 11 10 11 00 01
53 *
54 * By definition, <current> is the range containing <now> as well as all timers
55 * which have the same 2 high bits as <now>, <previous> is the range just
56 * before, which contains all timers whose high bits equal those of <now> minus
57 * 1. Last, <next> is composed of the two remaining ranges.
58 *
59 * For ease of implementation, the timers will then be stored into 4 queues 0-3
60 * determined by the 2 higher bits of the timer. The expiration algorithm is
61 * very simple :
62 * - expire everything in <previous>=queue[((now>>30)-1)&3]
63 * - expire from <current>=queue[(now>>30)&3] everything where timer >= now
64 *
65 * With this algorithm, it's possible to queue tasks meant to expire 24.8 days
66 * in the future, and still be able to detect events remaining unprocessed for
67 * the last 12.4 days! Note that the principle might be extended to any number
68 * of higher bits as long as there is only one range for expired tasks. For
69 * instance, using the 8 higher bits to index the range, we would have one past
70 * range of 4.6 hours (24 bits in ms), and 254 ranges in the future totalizing
71 * 49.3 days. This would eat more memory for a very little added benefit.
72 *
73 * Also, in order to maintain the ability to perform time comparisons, it is
74 * recommended to avoid using the <next1> range above, as values in this range
75 * may not easily be compared to <now> outside of these functions as it is the
76 * opposite of the <current> range, and <timer>-<now> may randomly be positive
77 * or negative. That means we're left with +/- 12 days timers.
78 *
79 * To keep timers ordered, we use 4 ebtrees [0..3]. To keep computation low, we
80 * may use (seconds*1024)+milliseconds, which preserves ordering eventhough we
81 * can't do real computations on it. Future evolutions could make use of 1024th
82 * of seconds instead of milliseconds, with the special value 0 avoided (and
83 * replaced with 1), so that zero indicates the timer is not set.
Willy Tarreau9789f7b2008-06-24 08:17:16 +020084 */
Willy Tarreau28c41a42008-06-29 17:00:59 +020085
86#define TIMER_TICK_BITS 32
87#define TIMER_TREE_BITS 2
88#define TIMER_TREES (1 << TIMER_TREE_BITS)
89#define TIMER_TREE_SHIFT (TIMER_TICK_BITS - TIMER_TREE_BITS)
90#define TIMER_TREE_MASK (TIMER_TREES - 1)
91#define TIMER_TICK_MASK ((1U << (TIMER_TICK_BITS-1)) * 2 - 1)
92#define TIMER_SIGN_BIT (1 << (TIMER_TICK_BITS - 1))
Willy Tarreauc6ca1a02007-05-13 19:43:47 +020093
Willy Tarreau28c41a42008-06-29 17:00:59 +020094static struct eb_root timers[TIMER_TREES]; /* trees with MSB 00, 01, 10 and 11 */
Willy Tarreau58b458d2008-06-29 22:40:23 +020095static struct eb_root rqueue[TIMER_TREES]; /* trees constituting the run queue */
96static unsigned int rqueue_ticks; /* insertion count */
Willy Tarreau9789f7b2008-06-24 08:17:16 +020097
98/* returns an ordered key based on an expiration date. */
Willy Tarreau28c41a42008-06-29 17:00:59 +020099static inline unsigned int timeval_to_ticks(const struct timeval *t)
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200100{
101 unsigned int key;
102
Willy Tarreau28c41a42008-06-29 17:00:59 +0200103 key = ((unsigned int)t->tv_sec * 1000) + ((unsigned int)t->tv_usec / 1000);
104 key &= TIMER_TICK_MASK;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200105 return key;
106}
107
Willy Tarreau28c41a42008-06-29 17:00:59 +0200108/* returns a tree number based on a ticks value */
109static inline unsigned int ticks_to_tree(unsigned int ticks)
110{
111 return (ticks >> TIMER_TREE_SHIFT) & TIMER_TREE_MASK;
112}
113
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200114/* returns a tree number based on an expiration date. */
115static inline unsigned int timeval_to_tree(const struct timeval *t)
116{
Willy Tarreau28c41a42008-06-29 17:00:59 +0200117 return ticks_to_tree(timeval_to_ticks(t));
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200118}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200119
Willy Tarreauc6ca1a02007-05-13 19:43:47 +0200120/* perform minimal intializations, report 0 in case of error, 1 if OK. */
121int init_task()
122{
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200123 memset(&timers, 0, sizeof(timers));
Willy Tarreau58b458d2008-06-29 22:40:23 +0200124 memset(&rqueue, 0, sizeof(rqueue));
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200125 pool2_task = create_pool("task", sizeof(struct task), MEM_F_SHARED);
126 return pool2_task != NULL;
Willy Tarreau964c9362007-01-07 00:38:00 +0100127}
128
Willy Tarreau91e99932008-06-30 07:51:00 +0200129/* Puts the task <t> in run queue at a position depending on t->nice.
130 * <t> is returned. The nice value assigns boosts in 32th of the run queue
131 * size. A nice value of -1024 sets the task to -run_queue*32, while a nice
132 * value of 1024 sets the task to run_queue*32.
133 */
Willy Tarreau4df82062008-08-29 15:26:14 +0200134struct task *__task_wakeup(struct task *t)
Willy Tarreaue33aece2007-04-30 13:15:14 +0200135{
Willy Tarreauce44f122008-07-05 18:16:19 +0200136 task_dequeue(t);
Willy Tarreau58b458d2008-06-29 22:40:23 +0200137
138 run_queue++;
139 t->eb.key = ++rqueue_ticks;
Willy Tarreau91e99932008-06-30 07:51:00 +0200140
141 if (likely(t->nice)) {
142 int offset;
143
144 niced_tasks++;
145 if (likely(t->nice > 0))
146 offset = (unsigned)((run_queue * (unsigned int)t->nice) / 32U);
147 else
148 offset = -(unsigned)((run_queue * (unsigned int)-t->nice) / 32U);
149 t->eb.key += offset;
150 }
151
Willy Tarreaufdccded2008-08-29 18:19:04 +0200152 /* clear state flags at the same time */
153 t->state = TASK_IN_RUNQUEUE;
Willy Tarreau58b458d2008-06-29 22:40:23 +0200154
155 eb32_insert(&rqueue[ticks_to_tree(t->eb.key)], &t->eb);
156 return t;
Willy Tarreaue33aece2007-04-30 13:15:14 +0200157}
Willy Tarreaud825eef2007-05-12 22:35:00 +0200158
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200159/*
160 * task_queue()
161 *
162 * Inserts a task into the wait queue at the position given by its expiration
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200163 * date. Note that the task must *not* already be in the wait queue nor in the
164 * run queue, otherwise unpredictable results may happen. Tasks queued with an
165 * eternity expiration date are simply returned. Last, tasks must not be queued
Willy Tarreau28c41a42008-06-29 17:00:59 +0200166 * further than the end of the next tree, which is between <now_ms> and
167 * <now_ms> + TIMER_SIGN_BIT ms (now+12days..24days in 32bit).
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200168 */
169struct task *task_queue(struct task *task)
Willy Tarreau964c9362007-01-07 00:38:00 +0100170{
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200171 if (unlikely(!task->expire))
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200172 return task;
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200173
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200174 task->eb.key = task->expire;
Willy Tarreau28c41a42008-06-29 17:00:59 +0200175#ifdef DEBUG_CHECK_INVALID_EXPIRATION_DATES
176 if ((task->eb.key - now_ms) & TIMER_SIGN_BIT)
177 /* we're queuing too far away or in the past (most likely) */
178 return task;
179#endif
Willy Tarreauce44f122008-07-05 18:16:19 +0200180
181 if (likely(last_timer &&
182 last_timer->eb.key == task->eb.key &&
183 last_timer->eb.node.node_p)) {
184 /* Most often, last queued timer has the same expiration date, so
185 * if it's not queued at the root, let's queue a dup directly there.
186 */
187 eb_insert_dup(&last_timer->eb.node, &task->eb.node);
188 return task;
189 }
Willy Tarreau28c41a42008-06-29 17:00:59 +0200190 eb32_insert(&timers[ticks_to_tree(task->eb.key)], &task->eb);
Willy Tarreauce44f122008-07-05 18:16:19 +0200191 last_timer = task;
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200192 return task;
Willy Tarreau964c9362007-01-07 00:38:00 +0100193}
194
195
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200196/*
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200197 * Extract all expired timers from the timer queue, and wakes up all
Willy Tarreaud825eef2007-05-12 22:35:00 +0200198 * associated tasks. Returns the date of next event (or eternity).
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200199 */
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200200void wake_expired_tasks(int *next)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200201{
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200202 struct task *task;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200203 struct eb32_node *eb;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200204 unsigned int now_tree;
Willy Tarreau28c41a42008-06-29 17:00:59 +0200205 unsigned int tree;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200206
Willy Tarreau28c41a42008-06-29 17:00:59 +0200207 /* In theory, we should :
208 * - wake all tasks from the <previous> tree
209 * - wake all expired tasks from the <current> tree
210 * - scan <next> trees for next expiration date if not found earlier.
211 * But we can do all this more easily : we scan all 3 trees before we
212 * wrap, and wake everything expired from there, then stop on the first
213 * non-expired entry.
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200214 */
Willy Tarreaubaaee002006-06-26 02:48:02 +0200215
Willy Tarreau28c41a42008-06-29 17:00:59 +0200216 now_tree = ticks_to_tree(now_ms);
217 tree = (now_tree - 1) & TIMER_TREE_MASK;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200218 do {
Willy Tarreau28c41a42008-06-29 17:00:59 +0200219 eb = eb32_first(&timers[tree]);
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200220 while (eb) {
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200221 task = eb32_entry(eb, struct task, eb);
Willy Tarreau28c41a42008-06-29 17:00:59 +0200222 if ((now_ms - eb->key) & TIMER_SIGN_BIT) {
223 /* note that we don't need this check for the <previous>
224 * tree, but it's cheaper than duplicating the code.
225 */
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200226 *next = task->expire;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200227 return;
228 }
Willy Tarreaubaaee002006-06-26 02:48:02 +0200229
Willy Tarreauaf754fc2008-06-29 19:25:52 +0200230 /* detach the task from the queue and add the task to the run queue */
231 eb = eb32_next(eb);
Willy Tarreau4df82062008-08-29 15:26:14 +0200232 __task_wakeup(task);
Willy Tarreaufdccded2008-08-29 18:19:04 +0200233 task->state |= TASK_WOKEN_TIMER;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200234 }
Willy Tarreau28c41a42008-06-29 17:00:59 +0200235 tree = (tree + 1) & TIMER_TREE_MASK;
236 } while (((tree - now_tree) & TIMER_TREE_MASK) < TIMER_TREES/2);
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200237
Willy Tarreau28c41a42008-06-29 17:00:59 +0200238 /* We have found no task to expire in any tree */
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200239 *next = TICK_ETERNITY;
Willy Tarreau28c41a42008-06-29 17:00:59 +0200240 return;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200241}
242
Willy Tarreau58b458d2008-06-29 22:40:23 +0200243/* The run queue is chronologically sorted in a tree. An insertion counter is
244 * used to assign a position to each task. This counter may be combined with
245 * other variables (eg: nice value) to set the final position in the tree. The
246 * counter may wrap without a problem, of course. We then limit the number of
Willy Tarreau91e99932008-06-30 07:51:00 +0200247 * tasks processed at once to 1/4 of the number of tasks in the queue, and to
248 * 200 max in any case, so that general latency remains low and so that task
249 * positions have a chance to be considered. It also reduces the number of
250 * trees to be evaluated when no task remains.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200251 *
Willy Tarreau58b458d2008-06-29 22:40:23 +0200252 * Just like with timers, we start with tree[(current - 1)], which holds past
253 * values, and stop when we reach the middle of the list. In practise, we visit
254 * 3 out of 4 trees.
255 *
256 * The function adjusts <next> if a new event is closer.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200257 */
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200258void process_runnable_tasks(int *next)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200259{
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200260 int temp;
Willy Tarreau964c9362007-01-07 00:38:00 +0100261 struct task *t;
Willy Tarreau58b458d2008-06-29 22:40:23 +0200262 struct eb32_node *eb;
263 unsigned int tree, stop;
264 unsigned int max_processed;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200265
Willy Tarreau58b458d2008-06-29 22:40:23 +0200266 if (!run_queue)
267 return;
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200268
Willy Tarreau91e99932008-06-30 07:51:00 +0200269 max_processed = run_queue;
270 if (max_processed > 200)
271 max_processed = 200;
272
273 if (likely(niced_tasks))
274 max_processed /= 4;
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200275
Willy Tarreau58b458d2008-06-29 22:40:23 +0200276 tree = ticks_to_tree(rqueue_ticks);
277 stop = (tree + TIMER_TREES / 2) & TIMER_TREE_MASK;
278 tree = (tree - 1) & TIMER_TREE_MASK;
Willy Tarreau964c9362007-01-07 00:38:00 +0100279
Willy Tarreau58b458d2008-06-29 22:40:23 +0200280 do {
281 eb = eb32_first(&rqueue[tree]);
282 while (eb) {
283 t = eb32_entry(eb, struct task, eb);
284
285 /* detach the task from the queue and add the task to the run queue */
286 eb = eb32_next(eb);
287
288 run_queue--;
Willy Tarreau91e99932008-06-30 07:51:00 +0200289 if (likely(t->nice))
290 niced_tasks--;
Willy Tarreaufdccded2008-08-29 18:19:04 +0200291 t->state &= ~TASK_IN_RUNQUEUE;
Willy Tarreauce44f122008-07-05 18:16:19 +0200292 task_dequeue(t);
Willy Tarreau58b458d2008-06-29 22:40:23 +0200293
294 t->process(t, &temp);
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200295 *next = tick_first(*next, temp);
Willy Tarreau58b458d2008-06-29 22:40:23 +0200296
297 if (!--max_processed)
298 return;
299 }
300 tree = (tree + 1) & TIMER_TREE_MASK;
301 } while (tree != stop);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200302}
303
Willy Tarreaubaaee002006-06-26 02:48:02 +0200304/*
305 * Local variables:
306 * c-indent-level: 8
307 * c-basic-offset: 8
308 * End:
309 */