blob: c99cea89cdd1ebacb442cbd3821303c170feb942 [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
2 * Task management functions.
3 *
Willy Tarreau4726f532009-03-07 17:25:21 +01004 * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
Willy Tarreaubaaee002006-06-26 02:48:02 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreau87bed622009-03-08 22:25:28 +010013#include <string.h>
14
Willy Tarreau2dd0d472006-06-29 17:53:05 +020015#include <common/config.h>
Willy Tarreauc6ca1a02007-05-13 19:43:47 +020016#include <common/memory.h>
Willy Tarreau2dd0d472006-06-29 17:53:05 +020017#include <common/mini-clist.h>
Willy Tarreau96bcfd72007-04-29 10:41:56 +020018#include <common/standard.h>
Willy Tarreaua6a6a932007-04-28 22:40:08 +020019#include <common/time.h>
Willy Tarreau45cb4fb2009-10-26 21:10:04 +010020#include <eb32tree.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020021
Willy Tarreaud825eef2007-05-12 22:35:00 +020022#include <proto/proxy.h>
Willy Tarreau87b09662015-04-03 00:22:06 +020023#include <proto/stream.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020024#include <proto/task.h>
25
Willy Tarreau9789f7b2008-06-24 08:17:16 +020026struct pool_head *pool2_task;
Willy Tarreau96bcfd72007-04-29 10:41:56 +020027
Willy Tarreaua4613182009-03-21 18:13:21 +010028unsigned int nb_tasks = 0;
Christopher Faulet34c5cc92016-12-06 09:15:30 +010029unsigned int tasks_run_queue = 0;
30unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */
Willy Tarreauc7bdf092009-03-21 18:33:52 +010031unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
Willy Tarreaue35c94a2009-03-21 10:01:42 +010032unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
Willy Tarreau26ca34e2009-03-21 12:51:40 +010033struct eb32_node *last_timer = NULL; /* optimization: last queued timer */
Willy Tarreau501260b2015-02-23 16:07:01 +010034struct eb32_node *rq_next = NULL; /* optimization: next task except if delete/insert */
Willy Tarreau964c9362007-01-07 00:38:00 +010035
Willy Tarreaue35c94a2009-03-21 10:01:42 +010036static struct eb_root timers; /* sorted timers tree */
37static struct eb_root rqueue; /* tree constituting the run queue */
38static unsigned int rqueue_ticks; /* insertion count */
Willy Tarreau9789f7b2008-06-24 08:17:16 +020039
Willy Tarreau4726f532009-03-07 17:25:21 +010040/* Puts the task <t> in run queue at a position depending on t->nice. <t> is
41 * returned. The nice value assigns boosts in 32th of the run queue size. A
Christopher Faulet34c5cc92016-12-06 09:15:30 +010042 * nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
43 * of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so
44 * the caller will have to set its flags after this call.
Willy Tarreau4726f532009-03-07 17:25:21 +010045 * The task must not already be in the run queue. If unsure, use the safer
46 * task_wakeup() function.
Willy Tarreau91e99932008-06-30 07:51:00 +020047 */
Willy Tarreau4df82062008-08-29 15:26:14 +020048struct task *__task_wakeup(struct task *t)
Willy Tarreaue33aece2007-04-30 13:15:14 +020049{
Christopher Faulet34c5cc92016-12-06 09:15:30 +010050 tasks_run_queue++;
Willy Tarreau4726f532009-03-07 17:25:21 +010051 t->rq.key = ++rqueue_ticks;
Willy Tarreau91e99932008-06-30 07:51:00 +020052
53 if (likely(t->nice)) {
54 int offset;
55
56 niced_tasks++;
57 if (likely(t->nice > 0))
Christopher Faulet34c5cc92016-12-06 09:15:30 +010058 offset = (unsigned)((tasks_run_queue * (unsigned int)t->nice) / 32U);
Willy Tarreau91e99932008-06-30 07:51:00 +020059 else
Christopher Faulet34c5cc92016-12-06 09:15:30 +010060 offset = -(unsigned)((tasks_run_queue * (unsigned int)-t->nice) / 32U);
Willy Tarreau4726f532009-03-07 17:25:21 +010061 t->rq.key += offset;
Willy Tarreau91e99932008-06-30 07:51:00 +020062 }
63
Willy Tarreaufdccded2008-08-29 18:19:04 +020064 /* clear state flags at the same time */
Willy Tarreau4726f532009-03-07 17:25:21 +010065 t->state &= ~TASK_WOKEN_ANY;
Willy Tarreau58b458d2008-06-29 22:40:23 +020066
Willy Tarreaue35c94a2009-03-21 10:01:42 +010067 eb32_insert(&rqueue, &t->rq);
Willy Tarreauc46c9652015-03-04 23:26:01 +010068 rq_next = NULL;
Willy Tarreau58b458d2008-06-29 22:40:23 +020069 return t;
Willy Tarreaue33aece2007-04-30 13:15:14 +020070}
Willy Tarreaud825eef2007-05-12 22:35:00 +020071
Willy Tarreau96bcfd72007-04-29 10:41:56 +020072/*
Willy Tarreau531cf0c2009-03-08 16:35:27 +010073 * __task_queue()
Willy Tarreau96bcfd72007-04-29 10:41:56 +020074 *
75 * Inserts a task into the wait queue at the position given by its expiration
Willy Tarreau4726f532009-03-07 17:25:21 +010076 * date. It does not matter if the task was already in the wait queue or not,
Willy Tarreau531cf0c2009-03-08 16:35:27 +010077 * as it will be unlinked. The task must not have an infinite expiration timer.
Willy Tarreaue35c94a2009-03-21 10:01:42 +010078 * Last, tasks must not be queued further than the end of the tree, which is
79 * between <now_ms> and <now_ms> + 2^31 ms (now+24days in 32bit).
Willy Tarreau531cf0c2009-03-08 16:35:27 +010080 *
81 * This function should not be used directly, it is meant to be called by the
82 * inline version of task_queue() which performs a few cheap preliminary tests
83 * before deciding to call __task_queue().
Willy Tarreau96bcfd72007-04-29 10:41:56 +020084 */
Willy Tarreau531cf0c2009-03-08 16:35:27 +010085void __task_queue(struct task *task)
Willy Tarreau964c9362007-01-07 00:38:00 +010086{
Willy Tarreau531cf0c2009-03-08 16:35:27 +010087 if (likely(task_in_wq(task)))
Willy Tarreau4726f532009-03-07 17:25:21 +010088 __task_unlink_wq(task);
Willy Tarreau4726f532009-03-07 17:25:21 +010089
90 /* the task is not in the queue now */
Willy Tarreaue35c94a2009-03-21 10:01:42 +010091 task->wq.key = task->expire;
Willy Tarreau28c41a42008-06-29 17:00:59 +020092#ifdef DEBUG_CHECK_INVALID_EXPIRATION_DATES
Willy Tarreaue35c94a2009-03-21 10:01:42 +010093 if (tick_is_lt(task->wq.key, now_ms))
Willy Tarreau28c41a42008-06-29 17:00:59 +020094 /* we're queuing too far away or in the past (most likely) */
Willy Tarreau4726f532009-03-07 17:25:21 +010095 return;
Willy Tarreau28c41a42008-06-29 17:00:59 +020096#endif
Willy Tarreauce44f122008-07-05 18:16:19 +020097
98 if (likely(last_timer &&
Willy Tarreau26ca34e2009-03-21 12:51:40 +010099 last_timer->node.bit < 0 &&
100 last_timer->key == task->wq.key &&
101 last_timer->node.node_p)) {
Willy Tarreauce44f122008-07-05 18:16:19 +0200102 /* Most often, last queued timer has the same expiration date, so
103 * if it's not queued at the root, let's queue a dup directly there.
Willy Tarreau26ca34e2009-03-21 12:51:40 +0100104 * Note that we can only use dups at the dup tree's root (most
105 * negative bit).
Willy Tarreauce44f122008-07-05 18:16:19 +0200106 */
Willy Tarreau26ca34e2009-03-21 12:51:40 +0100107 eb_insert_dup(&last_timer->node, &task->wq.node);
108 if (task->wq.node.bit < last_timer->node.bit)
109 last_timer = &task->wq;
Willy Tarreau4726f532009-03-07 17:25:21 +0100110 return;
Willy Tarreauce44f122008-07-05 18:16:19 +0200111 }
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100112 eb32_insert(&timers, &task->wq);
SaVaGe1d7a4202009-10-06 18:53:37 +0300113
114 /* Make sure we don't assign the last_timer to a node-less entry */
115 if (task->wq.node.node_p && (!last_timer || (task->wq.node.bit < last_timer->node.bit)))
Willy Tarreau26ca34e2009-03-21 12:51:40 +0100116 last_timer = &task->wq;
Willy Tarreau4726f532009-03-07 17:25:21 +0100117 return;
Willy Tarreau964c9362007-01-07 00:38:00 +0100118}
119
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200120/*
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200121 * Extract all expired timers from the timer queue, and wakes up all
Thierry FOURNIER9cf7c4b2014-12-15 13:26:01 +0100122 * associated tasks. Returns the date of next event (or eternity).
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200123 */
Thierry FOURNIER9cf7c4b2014-12-15 13:26:01 +0100124int wake_expired_tasks()
Willy Tarreaubaaee002006-06-26 02:48:02 +0200125{
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200126 struct task *task;
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200127 struct eb32_node *eb;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200128
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100129 eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
130 while (1) {
131 if (unlikely(!eb)) {
132 /* we might have reached the end of the tree, typically because
133 * <now_ms> is in the first half and we're first scanning the last
134 * half. Let's loop back to the beginning of the tree now.
135 */
136 eb = eb32_first(&timers);
137 if (likely(!eb))
138 break;
139 }
Willy Tarreaubaaee002006-06-26 02:48:02 +0200140
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100141 if (likely(tick_is_lt(now_ms, eb->key))) {
142 /* timer not expired yet, revisit it later */
Thierry FOURNIER9cf7c4b2014-12-15 13:26:01 +0100143 return eb->key;
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100144 }
Willy Tarreaubaaee002006-06-26 02:48:02 +0200145
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100146 /* timer looks expired, detach it from the queue */
147 task = eb32_entry(eb, struct task, wq);
148 eb = eb32_next(eb);
149 __task_unlink_wq(task);
Willy Tarreau41365222009-03-08 07:46:27 +0100150
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100151 /* It is possible that this task was left at an earlier place in the
152 * tree because a recent call to task_queue() has not moved it. This
153 * happens when the new expiration date is later than the old one.
154 * Since it is very unlikely that we reach a timeout anyway, it's a
155 * lot cheaper to proceed like this because we almost never update
156 * the tree. We may also find disabled expiration dates there. Since
157 * we have detached the task from the tree, we simply call task_queue
Willy Tarreau814c9782009-07-14 23:48:55 +0200158 * to take care of this. Note that we might occasionally requeue it at
159 * the same place, before <eb>, so we have to check if this happens,
160 * and adjust <eb>, otherwise we may skip it which is not what we want.
Willy Tarreau34e98ea2009-08-09 09:09:54 +0200161 * We may also not requeue the task (and not point eb at it) if its
162 * expiration time is not set.
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100163 */
164 if (!tick_is_expired(task->expire, now_ms)) {
Willy Tarreau34e98ea2009-08-09 09:09:54 +0200165 if (!tick_isset(task->expire))
166 continue;
167 __task_queue(task);
Willy Tarreau814c9782009-07-14 23:48:55 +0200168 if (!eb || eb->key > task->wq.key)
169 eb = &task->wq;
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100170 continue;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200171 }
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100172 task_wakeup(task, TASK_WOKEN_TIMER);
173 }
Willy Tarreau9789f7b2008-06-24 08:17:16 +0200174
Thierry FOURNIER9cf7c4b2014-12-15 13:26:01 +0100175 /* No task is expired */
176 return TICK_ETERNITY;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200177}
178
Willy Tarreau58b458d2008-06-29 22:40:23 +0200179/* The run queue is chronologically sorted in a tree. An insertion counter is
180 * used to assign a position to each task. This counter may be combined with
181 * other variables (eg: nice value) to set the final position in the tree. The
182 * counter may wrap without a problem, of course. We then limit the number of
Willy Tarreau91e99932008-06-30 07:51:00 +0200183 * tasks processed at once to 1/4 of the number of tasks in the queue, and to
184 * 200 max in any case, so that general latency remains low and so that task
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100185 * positions have a chance to be considered.
Willy Tarreau58b458d2008-06-29 22:40:23 +0200186 *
187 * The function adjusts <next> if a new event is closer.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200188 */
Thierry FOURNIER9cf7c4b2014-12-15 13:26:01 +0100189void process_runnable_tasks()
Willy Tarreaubaaee002006-06-26 02:48:02 +0200190{
Willy Tarreau964c9362007-01-07 00:38:00 +0100191 struct task *t;
Willy Tarreau58b458d2008-06-29 22:40:23 +0200192 unsigned int max_processed;
Willy Tarreaubaaee002006-06-26 02:48:02 +0200193
Christopher Faulet34c5cc92016-12-06 09:15:30 +0100194 tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
Willy Tarreauc7bdf092009-03-21 18:33:52 +0100195 nb_tasks_cur = nb_tasks;
Christopher Faulet34c5cc92016-12-06 09:15:30 +0100196 max_processed = tasks_run_queue;
Willy Tarreau98c61212011-09-10 20:08:49 +0200197
Christopher Faulet34c5cc92016-12-06 09:15:30 +0100198 if (!tasks_run_queue)
Willy Tarreau98c61212011-09-10 20:08:49 +0200199 return;
200
Willy Tarreau91e99932008-06-30 07:51:00 +0200201 if (max_processed > 200)
202 max_processed = 200;
203
204 if (likely(niced_tasks))
Willy Tarreau218859a2009-03-21 11:53:09 +0100205 max_processed = (max_processed + 3) / 4;
Willy Tarreau96bcfd72007-04-29 10:41:56 +0200206
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100207 while (max_processed--) {
208 /* Note: this loop is one of the fastest code path in
209 * the whole program. It should not be re-arranged
210 * without a good reason.
211 */
Willy Tarreau501260b2015-02-23 16:07:01 +0100212 if (unlikely(!rq_next)) {
213 rq_next = eb32_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK);
214 if (!rq_next) {
215 /* we might have reached the end of the tree, typically because
216 * <rqueue_ticks> is in the first half and we're first scanning
217 * the last half. Let's loop back to the beginning of the tree now.
218 */
219 rq_next = eb32_first(&rqueue);
220 if (!rq_next)
221 break;
222 }
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100223 }
Willy Tarreau58b458d2008-06-29 22:40:23 +0200224
Willy Tarreau501260b2015-02-23 16:07:01 +0100225 /* detach the task from the queue after updating the pointer to
226 * the next entry.
227 */
228 t = eb32_entry(rq_next, struct task, rq);
229 rq_next = eb32_next(rq_next);
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100230 __task_unlink_rq(t);
Willy Tarreau58b458d2008-06-29 22:40:23 +0200231
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100232 t->state |= TASK_RUNNING;
233 /* This is an optimisation to help the processor's branch
234 * predictor take this most common call.
235 */
Willy Tarreau3884cba2009-03-28 17:54:35 +0100236 t->calls++;
Willy Tarreau87b09662015-04-03 00:22:06 +0200237 if (likely(t->process == process_stream))
238 t = process_stream(t);
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100239 else
240 t = t->process(t);
Willy Tarreau531cf0c2009-03-08 16:35:27 +0100241
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100242 if (likely(t != NULL)) {
243 t->state &= ~TASK_RUNNING;
Thierry FOURNIER9cf7c4b2014-12-15 13:26:01 +0100244 if (t->expire)
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100245 task_queue(t);
Willy Tarreau58b458d2008-06-29 22:40:23 +0200246 }
Willy Tarreaue35c94a2009-03-21 10:01:42 +0100247 }
Willy Tarreaubaaee002006-06-26 02:48:02 +0200248}
249
Willy Tarreau4726f532009-03-07 17:25:21 +0100250/* perform minimal intializations, report 0 in case of error, 1 if OK. */
251int init_task()
252{
253 memset(&timers, 0, sizeof(timers));
254 memset(&rqueue, 0, sizeof(rqueue));
255 pool2_task = create_pool("task", sizeof(struct task), MEM_F_SHARED);
256 return pool2_task != NULL;
257}
258
Willy Tarreaubaaee002006-06-26 02:48:02 +0200259/*
260 * Local variables:
261 * c-indent-level: 8
262 * c-basic-offset: 8
263 * End:
264 */