blob: 15539c234b78f90440e5ea6d5a3eef88a894e879 [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
2 * Task management functions.
3 *
Willy Tarreau9789f7b2008-06-24 08:17:16 +02004 * Copyright 2000-2008 Willy Tarreau <w@1wt.eu>
Willy Tarreaubaaee002006-06-26 02:48:02 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreau2dd0d472006-06-29 17:53:05 +020013#include <common/config.h>
Willy Tarreau9789f7b2008-06-24 08:17:16 +020014#include <common/eb32tree.h>
Willy Tarreauc6ca1a02007-05-13 19:43:47 +020015#include <common/memory.h>
Willy Tarreau2dd0d472006-06-29 17:53:05 +020016#include <common/mini-clist.h>
Willy Tarreau96bcfd72007-04-29 10:41:56 +020017#include <common/standard.h>
Willy Tarreaua6a6a932007-04-28 22:40:08 +020018#include <common/time.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020019
Willy Tarreaud825eef2007-05-12 22:35:00 +020020#include <proto/proxy.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020021#include <proto/task.h>
22
Willy Tarreau9789f7b2008-06-24 08:17:16 +020023struct pool_head *pool2_task;
Willy Tarreau96bcfd72007-04-29 10:41:56 +020024
Willy Tarreau58b458d2008-06-29 22:40:23 +020025unsigned int run_queue = 0;
Willy Tarreau91e99932008-06-30 07:51:00 +020026unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
Willy Tarreauce44f122008-07-05 18:16:19 +020027struct task *last_timer = NULL; /* optimization: last queued timer */
Willy Tarreau964c9362007-01-07 00:38:00 +010028
Willy Tarreau28c41a42008-06-29 17:00:59 +020029/* Principle of the wait queue.
30 *
31 * We want to be able to tell whether an expiration date is before of after the
32 * current time <now>. We KNOW that expiration dates are never too far apart,
33 * because they are already computed by adding integer numbers of milliseconds
34 * to the current date.
35 * We also know that almost all dates will be in the future, and that a very
36 * small part of them will be in the past, they are the ones which have expired
37 * since last time we checked them.
38 *
39 * The current implementation uses a wrapping time cut into 3 ranges :
40 * - previous : those ones are expired by definition
41 * - current : some are expired, some are not
42 * - next : none are expired
43 *
44 * We use the higher two bits of the timers expressed in ticks (milliseconds)
45 * to determine which range a timer is in, compared to <now> :
46 *
47 * now previous current next0 next1
48 * [31:30] [31:30] [31:30] [31:30] [31:30]
49 * 00 11 00 01 10
50 * 01 00 01 10 11
51 * 10 01 10 11 00
52 * 11 10 11 00 01
53 *
54 * By definition, <current> is the range containing <now> as well as all timers
55 * which have the same 2 high bits as <now>, <previous> is the range just
56 * before, which contains all timers whose high bits equal those of <now> minus
57 * 1. Last, <next> is composed of the two remaining ranges.
58 *
59 * For ease of implementation, the timers will then be stored into 4 queues 0-3
60 * determined by the 2 higher bits of the timer. The expiration algorithm is
61 * very simple :
62 * - expire everything in <previous>=queue[((now>>30)-1)&3]
63 * - expire from <current>=queue[(now>>30)&3] everything where timer >= now
64 *
65 * With this algorithm, it's possible to queue tasks meant to expire 24.8 days
66 * in the future, and still be able to detect events remaining unprocessed for
67 * the last 12.4 days! Note that the principle might be extended to any number
68 * of higher bits as long as there is only one range for expired tasks. For
69 * instance, using the 8 higher bits to index the range, we would have one past
70 * range of 4.6 hours (24 bits in ms), and 254 ranges in the future totalizing
71 * 49.3 days. This would eat more memory for a very little added benefit.
72 *
73 * Also, in order to maintain the ability to perform time comparisons, it is
74 * recommended to avoid using the <next1> range above, as values in this range
75 * may not easily be compared to <now> outside of these functions as it is the
76 * opposite of the <current> range, and <timer>-<now> may randomly be positive
77 * or negative. That means we're left with +/- 12 days timers.
78 *
79 * To keep timers ordered, we use 4 ebtrees [0..3]. To keep computation low, we
80 * may use (seconds*1024)+milliseconds, which preserves ordering eventhough we
81 * can't do real computations on it. Future evolutions could make use of 1024th
82 * of seconds instead of milliseconds, with the special value 0 avoided (and
83 * replaced with 1), so that zero indicates the timer is not set.
Willy Tarreau9789f7b2008-06-24 08:17:16 +020084 */
Willy Tarreau28c41a42008-06-29 17:00:59 +020085
86#define TIMER_TICK_BITS 32
87#define TIMER_TREE_BITS 2
88#define TIMER_TREES (1 << TIMER_TREE_BITS)
89#define TIMER_TREE_SHIFT (TIMER_TICK_BITS - TIMER_TREE_BITS)
90#define TIMER_TREE_MASK (TIMER_TREES - 1)
91#define TIMER_TICK_MASK ((1U << (TIMER_TICK_BITS-1)) * 2 - 1)
92#define TIMER_SIGN_BIT (1 << (TIMER_TICK_BITS - 1))
Willy Tarreauc6ca1a02007-05-13 19:43:47 +020093
Willy Tarreau28c41a42008-06-29 17:00:59 +020094static struct eb_root timers[TIMER_TREES]; /* trees with MSB 00, 01, 10 and 11 */
Willy Tarreau58b458d2008-06-29 22:40:23 +020095static struct eb_root rqueue[TIMER_TREES]; /* trees constituting the run queue */
96static unsigned int rqueue_ticks; /* insertion count */
Willy Tarreau9789f7b2008-06-24 08:17:16 +020097
98/* returns an ordered key based on an expiration date. */
Willy Tarreau28c41a42008-06-29 17:00:59 +020099static inline unsigned int timeval_to_ticks(const struct timeval *t)
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200100{
101 unsigned int key;
102
Willy Tarreau28c41a42008-06-29 17:00:59 +0200103 key = ((unsigned int)t->tv_sec * 1000) + ((unsigned int)t->tv_usec / 1000);
104 key &= TIMER_TICK_MASK;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200105 return key;
106}
107
Willy Tarreau28c41a42008-06-29 17:00:59 +0200108/* returns a tree number based on a ticks value */
109static inline unsigned int ticks_to_tree(unsigned int ticks)
110{
111 return (ticks >> TIMER_TREE_SHIFT) & TIMER_TREE_MASK;
112}
113
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200114/* returns a tree number based on an expiration date. */
115static inline unsigned int timeval_to_tree(const struct timeval *t)
116{
Willy Tarreau28c41a42008-06-29 17:00:59 +0200117 return ticks_to_tree(timeval_to_ticks(t));
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200118}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200119
Willy Tarreauc6ca1a02007-05-13 19:43:47 +0200120/* perform minimal intializations, report 0 in case of error, 1 if OK. */
121int init_task()
122{
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200123 memset(&timers, 0, sizeof(timers));
Willy Tarreau58b458d2008-06-29 22:40:23 +0200124 memset(&rqueue, 0, sizeof(rqueue));
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200125 pool2_task = create_pool("task", sizeof(struct task), MEM_F_SHARED);
126 return pool2_task != NULL;
Willy Tarreau964c9362007-01-07 00:38:00 +0100127}
128
Willy Tarreau91e99932008-06-30 07:51:00 +0200129/* Puts the task <t> in run queue at a position depending on t->nice.
130 * <t> is returned. The nice value assigns boosts in 32th of the run queue
131 * size. A nice value of -1024 sets the task to -run_queue*32, while a nice
132 * value of 1024 sets the task to run_queue*32.
133 */
Willy Tarreau58b458d2008-06-29 22:40:23 +0200134struct task *task_wakeup(struct task *t)
Willy Tarreaue33aece2007-04-30 13:15:14 +0200135{
Willy Tarreau58b458d2008-06-29 22:40:23 +0200136 if (t->state == TASK_RUNNING)
137 return t;
138
Willy Tarreauce44f122008-07-05 18:16:19 +0200139 task_dequeue(t);
Willy Tarreau58b458d2008-06-29 22:40:23 +0200140
141 run_queue++;
142 t->eb.key = ++rqueue_ticks;
Willy Tarreau91e99932008-06-30 07:51:00 +0200143
144 if (likely(t->nice)) {
145 int offset;
146
147 niced_tasks++;
148 if (likely(t->nice > 0))
149 offset = (unsigned)((run_queue * (unsigned int)t->nice) / 32U);
150 else
151 offset = -(unsigned)((run_queue * (unsigned int)-t->nice) / 32U);
152 t->eb.key += offset;
153 }
154
Willy Tarreau58b458d2008-06-29 22:40:23 +0200155 t->state = TASK_RUNNING;
156
157 eb32_insert(&rqueue[ticks_to_tree(t->eb.key)], &t->eb);
158 return t;
Willy Tarreaue33aece2007-04-30 13:15:14 +0200159}
Willy Tarreaud825eef2007-05-12 22:35:00 +0200160
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200161/*
162 * task_queue()
163 *
164 * Inserts a task into the wait queue at the position given by its expiration
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200165 * date. Note that the task must *not* already be in the wait queue nor in the
166 * run queue, otherwise unpredictable results may happen. Tasks queued with an
167 * eternity expiration date are simply returned. Last, tasks must not be queued
Willy Tarreau28c41a42008-06-29 17:00:59 +0200168 * further than the end of the next tree, which is between <now_ms> and
169 * <now_ms> + TIMER_SIGN_BIT ms (now+12days..24days in 32bit).
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200170 */
171struct task *task_queue(struct task *task)
Willy Tarreau964c9362007-01-07 00:38:00 +0100172{
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200173 if (unlikely(!task->expire))
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200174 return task;
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200175
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200176 task->eb.key = task->expire;
Willy Tarreau28c41a42008-06-29 17:00:59 +0200177#ifdef DEBUG_CHECK_INVALID_EXPIRATION_DATES
178 if ((task->eb.key - now_ms) & TIMER_SIGN_BIT)
179 /* we're queuing too far away or in the past (most likely) */
180 return task;
181#endif
Willy Tarreauce44f122008-07-05 18:16:19 +0200182
183 if (likely(last_timer &&
184 last_timer->eb.key == task->eb.key &&
185 last_timer->eb.node.node_p)) {
186 /* Most often, last queued timer has the same expiration date, so
187 * if it's not queued at the root, let's queue a dup directly there.
188 */
189 eb_insert_dup(&last_timer->eb.node, &task->eb.node);
190 return task;
191 }
Willy Tarreau28c41a42008-06-29 17:00:59 +0200192 eb32_insert(&timers[ticks_to_tree(task->eb.key)], &task->eb);
Willy Tarreauce44f122008-07-05 18:16:19 +0200193 last_timer = task;
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200194 return task;
Willy Tarreau964c9362007-01-07 00:38:00 +0100195}
196
197
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200198/*
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200199 * Extract all expired timers from the timer queue, and wakes up all
Willy Tarreaud825eef2007-05-12 22:35:00 +0200200 * associated tasks. Returns the date of next event (or eternity).
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200201 */
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200202void wake_expired_tasks(int *next)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200203{
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200204 struct task *task;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200205 struct eb32_node *eb;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200206 unsigned int now_tree;
Willy Tarreau28c41a42008-06-29 17:00:59 +0200207 unsigned int tree;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200208
Willy Tarreau28c41a42008-06-29 17:00:59 +0200209 /* In theory, we should :
210 * - wake all tasks from the <previous> tree
211 * - wake all expired tasks from the <current> tree
212 * - scan <next> trees for next expiration date if not found earlier.
213 * But we can do all this more easily : we scan all 3 trees before we
214 * wrap, and wake everything expired from there, then stop on the first
215 * non-expired entry.
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200216 */
Willy Tarreaubaaee002006-06-26 02:48:02 +0200217
Willy Tarreau28c41a42008-06-29 17:00:59 +0200218 now_tree = ticks_to_tree(now_ms);
219 tree = (now_tree - 1) & TIMER_TREE_MASK;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200220 do {
Willy Tarreau28c41a42008-06-29 17:00:59 +0200221 eb = eb32_first(&timers[tree]);
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200222 while (eb) {
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200223 task = eb32_entry(eb, struct task, eb);
Willy Tarreau28c41a42008-06-29 17:00:59 +0200224 if ((now_ms - eb->key) & TIMER_SIGN_BIT) {
225 /* note that we don't need this check for the <previous>
226 * tree, but it's cheaper than duplicating the code.
227 */
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200228 *next = task->expire;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200229 return;
230 }
Willy Tarreaubaaee002006-06-26 02:48:02 +0200231
Willy Tarreauaf754fc2008-06-29 19:25:52 +0200232 /* detach the task from the queue and add the task to the run queue */
233 eb = eb32_next(eb);
Willy Tarreau58b458d2008-06-29 22:40:23 +0200234 task_wakeup(task);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200235 }
Willy Tarreau28c41a42008-06-29 17:00:59 +0200236 tree = (tree + 1) & TIMER_TREE_MASK;
237 } while (((tree - now_tree) & TIMER_TREE_MASK) < TIMER_TREES/2);
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200238
Willy Tarreau28c41a42008-06-29 17:00:59 +0200239 /* We have found no task to expire in any tree */
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200240 *next = TICK_ETERNITY;
Willy Tarreau28c41a42008-06-29 17:00:59 +0200241 return;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200242}
243
Willy Tarreau58b458d2008-06-29 22:40:23 +0200244/* The run queue is chronologically sorted in a tree. An insertion counter is
245 * used to assign a position to each task. This counter may be combined with
246 * other variables (eg: nice value) to set the final position in the tree. The
247 * counter may wrap without a problem, of course. We then limit the number of
Willy Tarreau91e99932008-06-30 07:51:00 +0200248 * tasks processed at once to 1/4 of the number of tasks in the queue, and to
249 * 200 max in any case, so that general latency remains low and so that task
250 * positions have a chance to be considered. It also reduces the number of
251 * trees to be evaluated when no task remains.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200252 *
Willy Tarreau58b458d2008-06-29 22:40:23 +0200253 * Just like with timers, we start with tree[(current - 1)], which holds past
254 * values, and stop when we reach the middle of the list. In practise, we visit
255 * 3 out of 4 trees.
256 *
257 * The function adjusts <next> if a new event is closer.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200258 */
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200259void process_runnable_tasks(int *next)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200260{
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200261 int temp;
Willy Tarreau964c9362007-01-07 00:38:00 +0100262 struct task *t;
Willy Tarreau58b458d2008-06-29 22:40:23 +0200263 struct eb32_node *eb;
264 unsigned int tree, stop;
265 unsigned int max_processed;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200266
Willy Tarreau58b458d2008-06-29 22:40:23 +0200267 if (!run_queue)
268 return;
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200269
Willy Tarreau91e99932008-06-30 07:51:00 +0200270 max_processed = run_queue;
271 if (max_processed > 200)
272 max_processed = 200;
273
274 if (likely(niced_tasks))
275 max_processed /= 4;
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200276
Willy Tarreau58b458d2008-06-29 22:40:23 +0200277 tree = ticks_to_tree(rqueue_ticks);
278 stop = (tree + TIMER_TREES / 2) & TIMER_TREE_MASK;
279 tree = (tree - 1) & TIMER_TREE_MASK;
Willy Tarreau964c9362007-01-07 00:38:00 +0100280
Willy Tarreau58b458d2008-06-29 22:40:23 +0200281 do {
282 eb = eb32_first(&rqueue[tree]);
283 while (eb) {
284 t = eb32_entry(eb, struct task, eb);
285
286 /* detach the task from the queue and add the task to the run queue */
287 eb = eb32_next(eb);
288
289 run_queue--;
Willy Tarreau91e99932008-06-30 07:51:00 +0200290 if (likely(t->nice))
291 niced_tasks--;
Willy Tarreau58b458d2008-06-29 22:40:23 +0200292 t->state = TASK_IDLE;
Willy Tarreauce44f122008-07-05 18:16:19 +0200293 task_dequeue(t);
Willy Tarreau58b458d2008-06-29 22:40:23 +0200294
295 t->process(t, &temp);
Willy Tarreau0c303ee2008-07-07 00:09:58 +0200296 *next = tick_first(*next, temp);
Willy Tarreau58b458d2008-06-29 22:40:23 +0200297
298 if (!--max_processed)
299 return;
300 }
301 tree = (tree + 1) & TIMER_TREE_MASK;
302 } while (tree != stop);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200303}
304
Willy Tarreaubaaee002006-06-26 02:48:02 +0200305/*
306 * Local variables:
307 * c-indent-level: 8
308 * c-basic-offset: 8
309 * End:
310 */