Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 1 | /* |
Willy Tarreau | 24f4efa | 2010-08-27 17:56:48 +0200 | [diff] [blame] | 2 | * include/proto/task.h |
| 3 | * Functions for task management. |
| 4 | * |
| 5 | * Copyright (C) 2000-2010 Willy Tarreau - w@1wt.eu |
| 6 | * |
| 7 | * This library is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU Lesser General Public |
| 9 | * License as published by the Free Software Foundation, version 2.1 |
| 10 | * exclusively. |
| 11 | * |
| 12 | * This library is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 15 | * Lesser General Public License for more details. |
| 16 | * |
| 17 | * You should have received a copy of the GNU Lesser General Public |
| 18 | * License along with this library; if not, write to the Free Software |
| 19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 20 | */ |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 21 | |
| 22 | #ifndef _PROTO_TASK_H |
| 23 | #define _PROTO_TASK_H |
| 24 | |
| 25 | |
| 26 | #include <sys/time.h> |
Willy Tarreau | e3ba5f0 | 2006-06-29 18:54:54 +0200 | [diff] [blame] | 27 | |
Willy Tarreau | 4c7e4b7 | 2020-05-27 12:58:42 +0200 | [diff] [blame] | 28 | #include <haproxy/api.h> |
Willy Tarreau | d0ef439 | 2020-06-02 09:38:52 +0200 | [diff] [blame] | 29 | #include <haproxy/pool.h> |
Willy Tarreau | 889faf4 | 2020-06-01 12:09:26 +0200 | [diff] [blame] | 30 | #include <haproxy/intops.h> |
Willy Tarreau | 853b297 | 2020-05-27 18:01:47 +0200 | [diff] [blame] | 31 | #include <haproxy/list.h> |
Willy Tarreau | c2f7c58 | 2020-06-02 18:15:32 +0200 | [diff] [blame] | 32 | #include <haproxy/ticks.h> |
Willy Tarreau | 3f567e4 | 2020-05-28 15:29:19 +0200 | [diff] [blame] | 33 | #include <haproxy/thread.h> |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 34 | |
Willy Tarreau | 8d2b777 | 2020-05-27 10:58:19 +0200 | [diff] [blame] | 35 | #include <import/eb32sctree.h> |
| 36 | #include <import/eb32tree.h> |
Willy Tarreau | 96bcfd7 | 2007-04-29 10:41:56 +0200 | [diff] [blame] | 37 | |
Willy Tarreau | eb11889 | 2014-11-13 16:57:19 +0100 | [diff] [blame] | 38 | #include <types/global.h> |
Willy Tarreau | e3ba5f0 | 2006-06-29 18:54:54 +0200 | [diff] [blame] | 39 | #include <types/task.h> |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 40 | |
Willy Tarreau | 0f6ffd6 | 2020-06-03 19:33:00 +0200 | [diff] [blame] | 41 | #include <haproxy/fd.h> |
Olivier Houchard | bba1a26 | 2019-09-24 14:55:28 +0200 | [diff] [blame] | 42 | |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 43 | /* Principle of the wait queue. |
| 44 | * |
| 45 | * We want to be able to tell whether an expiration date is before of after the |
| 46 | * current time <now>. We KNOW that expiration dates are never too far apart, |
| 47 | * because they are measured in ticks (milliseconds). We also know that almost |
| 48 | * all dates will be in the future, and that a very small part of them will be |
| 49 | * in the past, they are the ones which have expired since last time we checked |
| 50 | * them. Using ticks, we know if a date is in the future or in the past, but we |
| 51 | * cannot use that to store sorted information because that reference changes |
| 52 | * all the time. |
| 53 | * |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 54 | * We'll use the fact that the time wraps to sort timers. Timers above <now> |
| 55 | * are in the future, timers below <now> are in the past. Here, "above" and |
| 56 | * "below" are to be considered modulo 2^31. |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 57 | * |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 58 | * Timers are stored sorted in an ebtree. We use the new ability for ebtrees to |
| 59 | * lookup values starting from X to only expire tasks between <now> - 2^31 and |
| 60 | * <now>. If the end of the tree is reached while walking over it, we simply |
| 61 | * loop back to the beginning. That way, we have no problem keeping sorted |
| 62 | * wrapping timers in a tree, between (now - 24 days) and (now + 24 days). The |
| 63 | * keys in the tree always reflect their real position, none can be infinite. |
| 64 | * This reduces the number of checks to be performed. |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 65 | * |
| 66 | * Another nice optimisation is to allow a timer to stay at an old place in the |
| 67 | * queue as long as it's not further than the real expiration date. That way, |
| 68 | * we use the tree as a place holder for a minorant of the real expiration |
| 69 | * date. Since we have a very low chance of hitting a timeout anyway, we can |
| 70 | * bounce the nodes to their right place when we scan the tree if we encounter |
| 71 | * a misplaced node once in a while. This even allows us not to remove the |
| 72 | * infinite timers from the wait queue. |
| 73 | * |
| 74 | * So, to summarize, we have : |
| 75 | * - node->key always defines current position in the wait queue |
| 76 | * - timer is the real expiration date (possibly infinite) |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 77 | * - node->key is always before or equal to timer |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 78 | * |
| 79 | * The run queue works similarly to the wait queue except that the current date |
| 80 | * is replaced by an insertion counter which can also wrap without any problem. |
| 81 | */ |
| 82 | |
Willy Tarreau | e35c94a | 2009-03-21 10:01:42 +0100 | [diff] [blame] | 83 | /* The farthest we can look back in a timer tree */ |
| 84 | #define TIMER_LOOK_BACK (1U << 31) |
Willy Tarreau | d0a201b | 2009-03-08 15:53:06 +0100 | [diff] [blame] | 85 | |
| 86 | /* a few exported variables */ |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 87 | extern unsigned int nb_tasks; /* total number of tasks */ |
Willy Tarreau | aa1e1be | 2019-05-16 17:37:27 +0200 | [diff] [blame] | 88 | extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */ |
Christopher Faulet | 34c5cc9 | 2016-12-06 09:15:30 +0100 | [diff] [blame] | 89 | extern unsigned int tasks_run_queue; /* run queue size */ |
| 90 | extern unsigned int tasks_run_queue_cur; |
Willy Tarreau | c7bdf09 | 2009-03-21 18:33:52 +0100 | [diff] [blame] | 91 | extern unsigned int nb_tasks_cur; |
Willy Tarreau | 91e9993 | 2008-06-30 07:51:00 +0200 | [diff] [blame] | 92 | extern unsigned int niced_tasks; /* number of niced tasks in the run queue */ |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 93 | extern struct pool_head *pool_head_task; |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 94 | extern struct pool_head *pool_head_tasklet; |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 95 | extern struct pool_head *pool_head_notification; |
Willy Tarreau | d022e9c | 2019-09-24 08:25:15 +0200 | [diff] [blame] | 96 | extern THREAD_LOCAL struct task_per_thread *sched; /* current's thread scheduler context */ |
Olivier Houchard | b1ca58b | 2018-06-06 14:22:03 +0200 | [diff] [blame] | 97 | #ifdef USE_THREAD |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 98 | extern struct eb_root timers; /* sorted timers tree, global */ |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame] | 99 | extern struct eb_root rqueue; /* tree constituting the run queue */ |
Olivier Houchard | 77551ee | 2018-07-26 15:59:38 +0200 | [diff] [blame] | 100 | extern int global_rqueue_size; /* Number of element sin the global runqueue */ |
Olivier Houchard | b1ca58b | 2018-06-06 14:22:03 +0200 | [diff] [blame] | 101 | #endif |
Olivier Houchard | 77551ee | 2018-07-26 15:59:38 +0200 | [diff] [blame] | 102 | |
Willy Tarreau | 8d8747a | 2018-10-15 16:12:48 +0200 | [diff] [blame] | 103 | extern struct task_per_thread task_per_thread[MAX_THREADS]; |
Christopher Faulet | 9dcf9b6 | 2017-11-13 10:34:01 +0100 | [diff] [blame] | 104 | |
Willy Tarreau | af613e8 | 2020-06-05 08:40:51 +0200 | [diff] [blame] | 105 | __decl_thread(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */ |
| 106 | __decl_thread(extern HA_RWLOCK_T wq_lock); /* RW lock related to the wait queue */ |
Willy Tarreau | c6ca1a0 | 2007-05-13 19:43:47 +0200 | [diff] [blame] | 107 | |
Olivier Houchard | 0742c31 | 2019-12-05 15:11:19 +0100 | [diff] [blame] | 108 | static inline struct task *task_unlink_wq(struct task *t); |
| 109 | static inline void task_queue(struct task *task); |
Olivier Houchard | 5d18718 | 2018-08-01 15:58:44 +0200 | [diff] [blame] | 110 | |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 111 | /* return 0 if task is in run queue, otherwise non-zero */ |
| 112 | static inline int task_in_rq(struct task *t) |
| 113 | { |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 114 | /* Check if leaf_p is NULL, in case he's not in the runqueue, and if |
| 115 | * it's not 0x1, which would mean it's in the tasklet list. |
| 116 | */ |
Olivier Houchard | 4a1be0c | 2019-04-17 19:13:07 +0200 | [diff] [blame] | 117 | return t->rq.node.leaf_p != NULL; |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 118 | } |
| 119 | |
| 120 | /* return 0 if task is in wait queue, otherwise non-zero */ |
| 121 | static inline int task_in_wq(struct task *t) |
| 122 | { |
| 123 | return t->wq.node.leaf_p != NULL; |
| 124 | } |
| 125 | |
Willy Tarreau | fdccded | 2008-08-29 18:19:04 +0200 | [diff] [blame] | 126 | /* puts the task <t> in run queue with reason flags <f>, and returns <t> */ |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame] | 127 | /* This will put the task in the local runqueue if the task is only runnable |
| 128 | * by the current thread, in the global runqueue otherwies. |
| 129 | */ |
| 130 | void __task_wakeup(struct task *t, struct eb_root *); |
| 131 | static inline void task_wakeup(struct task *t, unsigned int f) |
Willy Tarreau | 4df8206 | 2008-08-29 15:26:14 +0200 | [diff] [blame] | 132 | { |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame] | 133 | unsigned short state; |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 134 | |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame] | 135 | #ifdef USE_THREAD |
| 136 | struct eb_root *root; |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 137 | |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 138 | if (t->thread_mask == tid_bit || global.nbthread == 1) |
Willy Tarreau | d022e9c | 2019-09-24 08:25:15 +0200 | [diff] [blame] | 139 | root = &sched->rqueue; |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame] | 140 | else |
| 141 | root = &rqueue; |
| 142 | #else |
Willy Tarreau | d022e9c | 2019-09-24 08:25:15 +0200 | [diff] [blame] | 143 | struct eb_root *root = &sched->rqueue; |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame] | 144 | #endif |
| 145 | |
Willy Tarreau | b038007 | 2019-04-17 11:47:18 +0200 | [diff] [blame] | 146 | state = _HA_ATOMIC_OR(&t->state, f); |
| 147 | while (!(state & (TASK_RUNNING | TASK_QUEUED))) { |
Willy Tarreau | 8c12e2f | 2019-04-17 20:52:51 +0200 | [diff] [blame] | 148 | if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED)) { |
| 149 | __task_wakeup(t, root); |
Willy Tarreau | b038007 | 2019-04-17 11:47:18 +0200 | [diff] [blame] | 150 | break; |
Willy Tarreau | 8c12e2f | 2019-04-17 20:52:51 +0200 | [diff] [blame] | 151 | } |
Willy Tarreau | b038007 | 2019-04-17 11:47:18 +0200 | [diff] [blame] | 152 | } |
Willy Tarreau | 4df8206 | 2008-08-29 15:26:14 +0200 | [diff] [blame] | 153 | } |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 154 | |
Willy Tarreau | dd0e89a | 2019-12-19 07:39:06 +0100 | [diff] [blame] | 155 | /* change the thread affinity of a task to <thread_mask>. |
| 156 | * This may only be done from within the running task itself or during its |
| 157 | * initialization. It will unqueue and requeue the task from the wait queue |
| 158 | * if it was in it. This is safe against a concurrent task_queue() call because |
| 159 | * task_queue() itself will unlink again if needed after taking into account |
| 160 | * the new thread_mask. |
| 161 | */ |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 162 | static inline void task_set_affinity(struct task *t, unsigned long thread_mask) |
| 163 | { |
Willy Tarreau | 440d09b | 2019-12-11 09:11:58 +0100 | [diff] [blame] | 164 | if (unlikely(task_in_wq(t))) { |
Olivier Houchard | 0742c31 | 2019-12-05 15:11:19 +0100 | [diff] [blame] | 165 | task_unlink_wq(t); |
Willy Tarreau | 440d09b | 2019-12-11 09:11:58 +0100 | [diff] [blame] | 166 | t->thread_mask = thread_mask; |
Olivier Houchard | 0742c31 | 2019-12-05 15:11:19 +0100 | [diff] [blame] | 167 | task_queue(t); |
Willy Tarreau | 440d09b | 2019-12-11 09:11:58 +0100 | [diff] [blame] | 168 | } |
| 169 | else |
| 170 | t->thread_mask = thread_mask; |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 171 | } |
Willy Tarreau | f65610a | 2017-10-31 16:06:06 +0100 | [diff] [blame] | 172 | |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 173 | /* |
| 174 | * Unlink the task from the wait queue, and possibly update the last_timer |
| 175 | * pointer. A pointer to the task itself is returned. The task *must* already |
| 176 | * be in the wait queue before calling this function. If unsure, use the safer |
| 177 | * task_unlink_wq() function. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 178 | */ |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 179 | static inline struct task *__task_unlink_wq(struct task *t) |
| 180 | { |
| 181 | eb32_delete(&t->wq); |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 182 | return t; |
| 183 | } |
| 184 | |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 185 | /* remove a task from its wait queue. It may either be the local wait queue if |
Willy Tarreau | dd0e89a | 2019-12-19 07:39:06 +0100 | [diff] [blame] | 186 | * the task is bound to a single thread or the global queue. If the task uses a |
| 187 | * shared wait queue, the global wait queue lock is used. |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 188 | */ |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 189 | static inline struct task *task_unlink_wq(struct task *t) |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 190 | { |
Richard Russo | bc9d984 | 2019-02-20 12:43:45 -0800 | [diff] [blame] | 191 | unsigned long locked; |
| 192 | |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 193 | if (likely(task_in_wq(t))) { |
Willy Tarreau | dd0e89a | 2019-12-19 07:39:06 +0100 | [diff] [blame] | 194 | locked = t->state & TASK_SHARED_WQ; |
Richard Russo | bc9d984 | 2019-02-20 12:43:45 -0800 | [diff] [blame] | 195 | if (locked) |
Willy Tarreau | ef28dc1 | 2019-05-28 18:48:07 +0200 | [diff] [blame] | 196 | HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock); |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 197 | __task_unlink_wq(t); |
Richard Russo | bc9d984 | 2019-02-20 12:43:45 -0800 | [diff] [blame] | 198 | if (locked) |
Willy Tarreau | ef28dc1 | 2019-05-28 18:48:07 +0200 | [diff] [blame] | 199 | HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock); |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 200 | } |
Willy Tarreau | 96bcfd7 | 2007-04-29 10:41:56 +0200 | [diff] [blame] | 201 | return t; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 202 | } |
| 203 | |
| 204 | /* |
Christopher Faulet | 34c5cc9 | 2016-12-06 09:15:30 +0100 | [diff] [blame] | 205 | * Unlink the task from the run queue. The tasks_run_queue size and number of |
| 206 | * niced tasks are updated too. A pointer to the task itself is returned. The |
| 207 | * task *must* already be in the run queue before calling this function. If |
| 208 | * unsure, use the safer task_unlink_rq() function. Note that the pointer to the |
| 209 | * next run queue entry is neither checked nor updated. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 210 | */ |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 211 | static inline struct task *__task_unlink_rq(struct task *t) |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 212 | { |
Olivier Houchard | 4c283285 | 2019-03-08 18:48:47 +0100 | [diff] [blame] | 213 | _HA_ATOMIC_SUB(&tasks_run_queue, 1); |
Olivier Houchard | 77551ee | 2018-07-26 15:59:38 +0200 | [diff] [blame] | 214 | #ifdef USE_THREAD |
| 215 | if (t->state & TASK_GLOBAL) { |
Olivier Houchard | 4c283285 | 2019-03-08 18:48:47 +0100 | [diff] [blame] | 216 | _HA_ATOMIC_AND(&t->state, ~TASK_GLOBAL); |
Olivier Houchard | 77551ee | 2018-07-26 15:59:38 +0200 | [diff] [blame] | 217 | global_rqueue_size--; |
| 218 | } else |
| 219 | #endif |
Willy Tarreau | d022e9c | 2019-09-24 08:25:15 +0200 | [diff] [blame] | 220 | sched->rqueue_size--; |
Olivier Houchard | 77551ee | 2018-07-26 15:59:38 +0200 | [diff] [blame] | 221 | eb32sc_delete(&t->rq); |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 222 | if (likely(t->nice)) |
Olivier Houchard | 4c283285 | 2019-03-08 18:48:47 +0100 | [diff] [blame] | 223 | _HA_ATOMIC_SUB(&niced_tasks, 1); |
Willy Tarreau | ce44f12 | 2008-07-05 18:16:19 +0200 | [diff] [blame] | 224 | return t; |
| 225 | } |
Willy Tarreau | 9789f7b | 2008-06-24 08:17:16 +0200 | [diff] [blame] | 226 | |
Willy Tarreau | 501260b | 2015-02-23 16:07:01 +0100 | [diff] [blame] | 227 | /* This function unlinks task <t> from the run queue if it is in it. It also |
| 228 | * takes care of updating the next run queue task if it was this task. |
| 229 | */ |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 230 | static inline struct task *task_unlink_rq(struct task *t) |
| 231 | { |
Olivier Houchard | 1d7f37a | 2019-03-14 16:14:04 +0100 | [diff] [blame] | 232 | int is_global = t->state & TASK_GLOBAL; |
| 233 | |
| 234 | if (is_global) |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame] | 235 | HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); |
Willy Tarreau | 24f382f | 2019-04-12 16:10:55 +0200 | [diff] [blame] | 236 | if (likely(task_in_rq(t))) |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 237 | __task_unlink_rq(t); |
Olivier Houchard | 1d7f37a | 2019-03-14 16:14:04 +0100 | [diff] [blame] | 238 | if (is_global) |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame] | 239 | HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 240 | return t; |
| 241 | } |
| 242 | |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 243 | static inline void tasklet_wakeup(struct tasklet *tl) |
| 244 | { |
Willy Tarreau | 8cdc167 | 2019-10-18 06:43:53 +0200 | [diff] [blame] | 245 | if (likely(tl->tid < 0)) { |
| 246 | /* this tasklet runs on the caller thread */ |
Olivier Houchard | 0691046 | 2019-10-11 16:35:01 +0200 | [diff] [blame] | 247 | if (LIST_ISEMPTY(&tl->list)) { |
Willy Tarreau | bb23883 | 2020-01-31 10:48:10 +0100 | [diff] [blame] | 248 | if (tl->state & TASK_SELF_WAKING) { |
Willy Tarreau | a17664d | 2020-01-30 18:59:43 +0100 | [diff] [blame] | 249 | LIST_ADDQ(&task_per_thread[tid].tasklets[TL_BULK], &tl->list); |
Willy Tarreau | bb23883 | 2020-01-31 10:48:10 +0100 | [diff] [blame] | 250 | } |
Willy Tarreau | b30a153 | 2020-01-31 16:37:34 +0100 | [diff] [blame] | 251 | else if ((struct task *)tl == sched->current) { |
Willy Tarreau | bb23883 | 2020-01-31 10:48:10 +0100 | [diff] [blame] | 252 | _HA_ATOMIC_OR(&tl->state, TASK_SELF_WAKING); |
| 253 | LIST_ADDQ(&task_per_thread[tid].tasklets[TL_BULK], &tl->list); |
| 254 | } |
| 255 | else { |
Willy Tarreau | a17664d | 2020-01-30 18:59:43 +0100 | [diff] [blame] | 256 | LIST_ADDQ(&task_per_thread[tid].tasklets[TL_URGENT], &tl->list); |
Willy Tarreau | bb23883 | 2020-01-31 10:48:10 +0100 | [diff] [blame] | 257 | } |
Olivier Houchard | 0691046 | 2019-10-11 16:35:01 +0200 | [diff] [blame] | 258 | _HA_ATOMIC_ADD(&tasks_run_queue, 1); |
| 259 | } |
| 260 | } else { |
Willy Tarreau | 8cdc167 | 2019-10-18 06:43:53 +0200 | [diff] [blame] | 261 | /* this tasklet runs on a specific thread */ |
Olivier Houchard | 0691046 | 2019-10-11 16:35:01 +0200 | [diff] [blame] | 262 | if (MT_LIST_ADDQ(&task_per_thread[tl->tid].shared_tasklet_list, (struct mt_list *)&tl->list) == 1) { |
| 263 | _HA_ATOMIC_ADD(&tasks_run_queue, 1); |
Willy Tarreau | 891b5ef | 2019-10-18 08:45:41 +0200 | [diff] [blame] | 264 | if (sleeping_thread_mask & (1UL << tl->tid)) { |
| 265 | _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << tl->tid)); |
Olivier Houchard | 0691046 | 2019-10-11 16:35:01 +0200 | [diff] [blame] | 266 | wake_thread(tl->tid); |
| 267 | } |
Olivier Houchard | bba1a26 | 2019-09-24 14:55:28 +0200 | [diff] [blame] | 268 | } |
| 269 | } |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 270 | |
| 271 | } |
| 272 | |
Willy Tarreau | bd20a9d | 2019-06-14 18:05:54 +0200 | [diff] [blame] | 273 | /* Insert a tasklet into the tasklet list. If used with a plain task instead, |
| 274 | * the caller must update the task_list_size. |
| 275 | */ |
Willy Tarreau | a62917b | 2020-01-30 18:37:28 +0100 | [diff] [blame] | 276 | static inline void tasklet_insert_into_tasklet_list(struct list *list, struct tasklet *tl) |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 277 | { |
Olivier Houchard | 0691046 | 2019-10-11 16:35:01 +0200 | [diff] [blame] | 278 | _HA_ATOMIC_ADD(&tasks_run_queue, 1); |
Willy Tarreau | a62917b | 2020-01-30 18:37:28 +0100 | [diff] [blame] | 279 | LIST_ADDQ(list, &tl->list); |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 280 | } |
| 281 | |
Willy Tarreau | bd20a9d | 2019-06-14 18:05:54 +0200 | [diff] [blame] | 282 | /* Remove the tasklet from the tasklet list. The tasklet MUST already be there. |
| 283 | * If unsure, use tasklet_remove_from_tasklet_list() instead. If used with a |
| 284 | * plain task, the caller must update the task_list_size. |
Olivier Houchard | 0691046 | 2019-10-11 16:35:01 +0200 | [diff] [blame] | 285 | * This should only be used by the thread that owns the tasklet, any other |
| 286 | * thread should use tasklet_cancel(). |
Willy Tarreau | e73256f | 2019-03-25 18:02:54 +0100 | [diff] [blame] | 287 | */ |
Willy Tarreau | 86eded6 | 2019-06-14 14:47:49 +0200 | [diff] [blame] | 288 | static inline void __tasklet_remove_from_tasklet_list(struct tasklet *t) |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 289 | { |
Olivier Houchard | 0691046 | 2019-10-11 16:35:01 +0200 | [diff] [blame] | 290 | LIST_DEL_INIT(&t->list); |
| 291 | _HA_ATOMIC_SUB(&tasks_run_queue, 1); |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 292 | } |
| 293 | |
Willy Tarreau | 86eded6 | 2019-06-14 14:47:49 +0200 | [diff] [blame] | 294 | static inline void tasklet_remove_from_tasklet_list(struct tasklet *t) |
Willy Tarreau | e73256f | 2019-03-25 18:02:54 +0100 | [diff] [blame] | 295 | { |
Olivier Houchard | 7031e3d | 2019-11-08 15:41:55 +0100 | [diff] [blame] | 296 | if (MT_LIST_DEL((struct mt_list *)&t->list)) |
| 297 | _HA_ATOMIC_SUB(&tasks_run_queue, 1); |
Willy Tarreau | e73256f | 2019-03-25 18:02:54 +0100 | [diff] [blame] | 298 | } |
| 299 | |
Willy Tarreau | ce44f12 | 2008-07-05 18:16:19 +0200 | [diff] [blame] | 300 | /* |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 301 | * Initialize a new task. The bare minimum is performed (queue pointers and |
| 302 | * state). The task is returned. This function should not be used outside of |
Willy Tarreau | dd0e89a | 2019-12-19 07:39:06 +0100 | [diff] [blame] | 303 | * task_new(). If the thread mask contains more than one thread, TASK_SHARED_WQ |
| 304 | * is set. |
Willy Tarreau | 9789f7b | 2008-06-24 08:17:16 +0200 | [diff] [blame] | 305 | */ |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 306 | static inline struct task *task_init(struct task *t, unsigned long thread_mask) |
Willy Tarreau | 9789f7b | 2008-06-24 08:17:16 +0200 | [diff] [blame] | 307 | { |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 308 | t->wq.node.leaf_p = NULL; |
| 309 | t->rq.node.leaf_p = NULL; |
Olivier Houchard | f6e6dc1 | 2018-05-18 18:38:23 +0200 | [diff] [blame] | 310 | t->state = TASK_SLEEPING; |
Willy Tarreau | f65610a | 2017-10-31 16:06:06 +0100 | [diff] [blame] | 311 | t->thread_mask = thread_mask; |
Willy Tarreau | dd0e89a | 2019-12-19 07:39:06 +0100 | [diff] [blame] | 312 | if (atleast2(thread_mask)) |
| 313 | t->state |= TASK_SHARED_WQ; |
Willy Tarreau | 91e9993 | 2008-06-30 07:51:00 +0200 | [diff] [blame] | 314 | t->nice = 0; |
Willy Tarreau | 3884cba | 2009-03-28 17:54:35 +0100 | [diff] [blame] | 315 | t->calls = 0; |
Willy Tarreau | 9efd745 | 2018-05-31 14:48:54 +0200 | [diff] [blame] | 316 | t->call_date = 0; |
| 317 | t->cpu_time = 0; |
| 318 | t->lat_time = 0; |
Willy Tarreau | f421999 | 2017-07-24 17:52:58 +0200 | [diff] [blame] | 319 | t->expire = TICK_ETERNITY; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 320 | return t; |
| 321 | } |
| 322 | |
Willy Tarreau | 8cdc167 | 2019-10-18 06:43:53 +0200 | [diff] [blame] | 323 | /* Initialize a new tasklet. It's identified as a tasklet by ->nice=-32768. It |
| 324 | * is expected to run on the calling thread by default, it's up to the caller |
| 325 | * to change ->tid if it wants to own it. |
| 326 | */ |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 327 | static inline void tasklet_init(struct tasklet *t) |
| 328 | { |
| 329 | t->nice = -32768; |
| 330 | t->calls = 0; |
| 331 | t->state = 0; |
Olivier Houchard | 9ddaf79 | 2018-07-19 16:02:16 +0200 | [diff] [blame] | 332 | t->process = NULL; |
Willy Tarreau | 8cdc167 | 2019-10-18 06:43:53 +0200 | [diff] [blame] | 333 | t->tid = -1; |
Olivier Houchard | 0691046 | 2019-10-11 16:35:01 +0200 | [diff] [blame] | 334 | LIST_INIT(&t->list); |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 335 | } |
| 336 | |
Willy Tarreau | 8cdc167 | 2019-10-18 06:43:53 +0200 | [diff] [blame] | 337 | /* Allocate and initialize a new tasklet, local to the thread by default. The |
Ilya Shipitsin | 77e3b4a | 2020-03-10 12:06:11 +0500 | [diff] [blame] | 338 | * caller may assign its tid if it wants to own the tasklet. |
Willy Tarreau | 8cdc167 | 2019-10-18 06:43:53 +0200 | [diff] [blame] | 339 | */ |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 340 | static inline struct tasklet *tasklet_new(void) |
| 341 | { |
| 342 | struct tasklet *t = pool_alloc(pool_head_tasklet); |
| 343 | |
| 344 | if (t) { |
| 345 | tasklet_init(t); |
| 346 | } |
| 347 | return t; |
| 348 | } |
| 349 | |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 350 | /* |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 351 | * Allocate and initialise a new task. The new task is returned, or NULL in |
| 352 | * case of lack of memory. The task count is incremented. Tasks should only |
| 353 | * be allocated this way, and must be freed using task_free(). |
| 354 | */ |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 355 | static inline struct task *task_new(unsigned long thread_mask) |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 356 | { |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 357 | struct task *t = pool_alloc(pool_head_task); |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 358 | if (t) { |
Olivier Houchard | 4c283285 | 2019-03-08 18:48:47 +0100 | [diff] [blame] | 359 | _HA_ATOMIC_ADD(&nb_tasks, 1); |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 360 | task_init(t, thread_mask); |
Willy Tarreau | a461318 | 2009-03-21 18:13:21 +0100 | [diff] [blame] | 361 | } |
| 362 | return t; |
| 363 | } |
| 364 | |
| 365 | /* |
Willy Tarreau | 29bf96d | 2019-05-17 14:16:51 +0200 | [diff] [blame] | 366 | * Free a task. Its context must have been freed since it will be lost. The |
| 367 | * task count is decremented. It it is the current task, this one is reset. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 368 | */ |
Olivier Houchard | 9b36cb4 | 2018-05-04 15:46:16 +0200 | [diff] [blame] | 369 | static inline void __task_free(struct task *t) |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 370 | { |
Willy Tarreau | d022e9c | 2019-09-24 08:25:15 +0200 | [diff] [blame] | 371 | if (t == sched->current) { |
| 372 | sched->current = NULL; |
Willy Tarreau | 29bf96d | 2019-05-17 14:16:51 +0200 | [diff] [blame] | 373 | __ha_barrier_store(); |
| 374 | } |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 375 | pool_free(pool_head_task, t); |
Willy Tarreau | eb11889 | 2014-11-13 16:57:19 +0100 | [diff] [blame] | 376 | if (unlikely(stopping)) |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 377 | pool_flush(pool_head_task); |
Olivier Houchard | 4c283285 | 2019-03-08 18:48:47 +0100 | [diff] [blame] | 378 | _HA_ATOMIC_SUB(&nb_tasks, 1); |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 379 | } |
| 380 | |
Willy Tarreau | f656279 | 2019-05-07 19:05:35 +0200 | [diff] [blame] | 381 | /* Destroys a task : it's unlinked from the wait queues and is freed if it's |
| 382 | * the current task or not queued otherwise it's marked to be freed by the |
| 383 | * scheduler. It does nothing if <t> is NULL. |
| 384 | */ |
Olivier Houchard | 3f795f7 | 2019-04-17 22:51:06 +0200 | [diff] [blame] | 385 | static inline void task_destroy(struct task *t) |
Olivier Houchard | 9b36cb4 | 2018-05-04 15:46:16 +0200 | [diff] [blame] | 386 | { |
Dragan Dosen | 75bc6d3 | 2019-05-07 15:25:25 +0200 | [diff] [blame] | 387 | if (!t) |
| 388 | return; |
| 389 | |
Olivier Houchard | 3f795f7 | 2019-04-17 22:51:06 +0200 | [diff] [blame] | 390 | task_unlink_wq(t); |
Ilya Shipitsin | 77e3b4a | 2020-03-10 12:06:11 +0500 | [diff] [blame] | 391 | /* We don't have to explicitly remove from the run queue. |
Olivier Houchard | 3f795f7 | 2019-04-17 22:51:06 +0200 | [diff] [blame] | 392 | * If we are in the runqueue, the test below will set t->process |
| 393 | * to NULL, and the task will be free'd when it'll be its turn |
| 394 | * to run. |
| 395 | */ |
| 396 | |
Olivier Houchard | 9b36cb4 | 2018-05-04 15:46:16 +0200 | [diff] [blame] | 397 | /* There's no need to protect t->state with a lock, as the task |
| 398 | * has to run on the current thread. |
| 399 | */ |
Willy Tarreau | d022e9c | 2019-09-24 08:25:15 +0200 | [diff] [blame] | 400 | if (t == sched->current || !(t->state & (TASK_QUEUED | TASK_RUNNING))) |
Olivier Houchard | 9b36cb4 | 2018-05-04 15:46:16 +0200 | [diff] [blame] | 401 | __task_free(t); |
| 402 | else |
| 403 | t->process = NULL; |
| 404 | } |
| 405 | |
Olivier Houchard | 0691046 | 2019-10-11 16:35:01 +0200 | [diff] [blame] | 406 | /* Should only be called by the thread responsible for the tasklet */ |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 407 | static inline void tasklet_free(struct tasklet *tl) |
| 408 | { |
Olivier Houchard | 3c4f40a | 2020-01-10 16:46:48 +0100 | [diff] [blame] | 409 | if (MT_LIST_DEL((struct mt_list *)&tl->list)) |
Olivier Houchard | 0691046 | 2019-10-11 16:35:01 +0200 | [diff] [blame] | 410 | _HA_ATOMIC_SUB(&tasks_run_queue, 1); |
Olivier Houchard | dcd6f3a | 2018-06-08 17:08:19 +0200 | [diff] [blame] | 411 | |
Olivier Houchard | b0bdae7 | 2018-05-18 18:45:28 +0200 | [diff] [blame] | 412 | pool_free(pool_head_tasklet, tl); |
| 413 | if (unlikely(stopping)) |
| 414 | pool_flush(pool_head_tasklet); |
| 415 | } |
| 416 | |
Olivier Houchard | ff1e9f3 | 2019-09-20 17:18:35 +0200 | [diff] [blame] | 417 | static inline void tasklet_set_tid(struct tasklet *tl, int tid) |
| 418 | { |
| 419 | tl->tid = tid; |
| 420 | } |
| 421 | |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 422 | void __task_queue(struct task *task, struct eb_root *wq); |
| 423 | |
Willy Tarreau | 4726f53 | 2009-03-07 17:25:21 +0100 | [diff] [blame] | 424 | /* Place <task> into the wait queue, where it may already be. If the expiration |
Willy Tarreau | 531cf0c | 2009-03-08 16:35:27 +0100 | [diff] [blame] | 425 | * timer is infinite, do nothing and rely on wake_expired_task to clean up. |
Willy Tarreau | dd0e89a | 2019-12-19 07:39:06 +0100 | [diff] [blame] | 426 | * If the task uses a shared wait queue, it's queued into the global wait queue, |
| 427 | * protected by the global wq_lock, otherwise by it necessarily belongs to the |
| 428 | * current thread'sand is queued without locking. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 429 | */ |
Willy Tarreau | 531cf0c | 2009-03-08 16:35:27 +0100 | [diff] [blame] | 430 | static inline void task_queue(struct task *task) |
| 431 | { |
| 432 | /* If we already have a place in the wait queue no later than the |
| 433 | * timeout we're trying to set, we'll stay there, because it is very |
| 434 | * unlikely that we will reach the timeout anyway. If the timeout |
| 435 | * has been disabled, it's useless to leave the queue as well. We'll |
| 436 | * rely on wake_expired_tasks() to catch the node and move it to the |
| 437 | * proper place should it ever happen. Finally we only add the task |
| 438 | * to the queue if it was not there or if it was further than what |
| 439 | * we want. |
| 440 | */ |
| 441 | if (!tick_isset(task->expire)) |
| 442 | return; |
| 443 | |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 444 | #ifdef USE_THREAD |
Willy Tarreau | dd0e89a | 2019-12-19 07:39:06 +0100 | [diff] [blame] | 445 | if (task->state & TASK_SHARED_WQ) { |
Willy Tarreau | ef28dc1 | 2019-05-28 18:48:07 +0200 | [diff] [blame] | 446 | HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock); |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 447 | if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) |
| 448 | __task_queue(task, &timers); |
Willy Tarreau | ef28dc1 | 2019-05-28 18:48:07 +0200 | [diff] [blame] | 449 | HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock); |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 450 | } else |
| 451 | #endif |
| 452 | { |
Willy Tarreau | dd0e89a | 2019-12-19 07:39:06 +0100 | [diff] [blame] | 453 | BUG_ON((task->thread_mask & tid_bit) == 0); // should have TASK_SHARED_WQ |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 454 | if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) |
Willy Tarreau | d022e9c | 2019-09-24 08:25:15 +0200 | [diff] [blame] | 455 | __task_queue(task, &sched->timers); |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 456 | } |
Willy Tarreau | 531cf0c | 2009-03-08 16:35:27 +0100 | [diff] [blame] | 457 | } |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 458 | |
Willy Tarreau | 26e4881 | 2011-07-25 14:30:42 +0200 | [diff] [blame] | 459 | /* Ensure <task> will be woken up at most at <when>. If the task is already in |
| 460 | * the run queue (but not running), nothing is done. It may be used that way |
| 461 | * with a delay : task_schedule(task, tick_add(now_ms, delay)); |
| 462 | */ |
| 463 | static inline void task_schedule(struct task *task, int when) |
| 464 | { |
Emeric Brun | c60def8 | 2017-09-27 14:59:38 +0200 | [diff] [blame] | 465 | /* TODO: mthread, check if there is no tisk with this test */ |
Willy Tarreau | 26e4881 | 2011-07-25 14:30:42 +0200 | [diff] [blame] | 466 | if (task_in_rq(task)) |
| 467 | return; |
| 468 | |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 469 | #ifdef USE_THREAD |
Willy Tarreau | dd0e89a | 2019-12-19 07:39:06 +0100 | [diff] [blame] | 470 | if (task->state & TASK_SHARED_WQ) { |
Willy Tarreau | ef28dc1 | 2019-05-28 18:48:07 +0200 | [diff] [blame] | 471 | /* FIXME: is it really needed to lock the WQ during the check ? */ |
| 472 | HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock); |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 473 | if (task_in_wq(task)) |
| 474 | when = tick_first(when, task->expire); |
| 475 | |
| 476 | task->expire = when; |
| 477 | if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) |
| 478 | __task_queue(task, &timers); |
Willy Tarreau | ef28dc1 | 2019-05-28 18:48:07 +0200 | [diff] [blame] | 479 | HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock); |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 480 | } else |
| 481 | #endif |
| 482 | { |
Willy Tarreau | dd0e89a | 2019-12-19 07:39:06 +0100 | [diff] [blame] | 483 | BUG_ON((task->thread_mask & tid_bit) == 0); // should have TASK_SHARED_WQ |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 484 | if (task_in_wq(task)) |
| 485 | when = tick_first(when, task->expire); |
Willy Tarreau | 26e4881 | 2011-07-25 14:30:42 +0200 | [diff] [blame] | 486 | |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 487 | task->expire = when; |
| 488 | if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) |
Willy Tarreau | d022e9c | 2019-09-24 08:25:15 +0200 | [diff] [blame] | 489 | __task_queue(task, &sched->timers); |
Willy Tarreau | b20aa9e | 2018-10-15 14:52:21 +0200 | [diff] [blame] | 490 | } |
Willy Tarreau | 26e4881 | 2011-07-25 14:30:42 +0200 | [diff] [blame] | 491 | } |
| 492 | |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 493 | /* This function register a new signal. "lua" is the current lua |
| 494 | * execution context. It contains a pointer to the associated task. |
| 495 | * "link" is a list head attached to an other task that must be wake |
| 496 | * the lua task if an event occurs. This is useful with external |
Ilya Shipitsin | 77e3b4a | 2020-03-10 12:06:11 +0500 | [diff] [blame] | 497 | * events like TCP I/O or sleep functions. This function allocate |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 498 | * memory for the signal. |
| 499 | */ |
| 500 | static inline struct notification *notification_new(struct list *purge, struct list *event, struct task *wakeup) |
| 501 | { |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 502 | struct notification *com = pool_alloc(pool_head_notification); |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 503 | if (!com) |
| 504 | return NULL; |
| 505 | LIST_ADDQ(purge, &com->purge_me); |
| 506 | LIST_ADDQ(event, &com->wake_me); |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 507 | HA_SPIN_INIT(&com->lock); |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 508 | com->task = wakeup; |
| 509 | return com; |
| 510 | } |
| 511 | |
| 512 | /* This function purge all the pending signals when the LUA execution |
| 513 | * is finished. This prevent than a coprocess try to wake a deleted |
| 514 | * task. This function remove the memory associated to the signal. |
Thierry FOURNIER | d5b7983 | 2017-12-10 17:14:07 +0100 | [diff] [blame] | 515 | * The purge list is not locked because it is owned by only one |
| 516 | * process. before browsing this list, the caller must ensure to be |
| 517 | * the only one browser. |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 518 | */ |
| 519 | static inline void notification_purge(struct list *purge) |
| 520 | { |
| 521 | struct notification *com, *back; |
| 522 | |
| 523 | /* Delete all pending communication signals. */ |
| 524 | list_for_each_entry_safe(com, back, purge, purge_me) { |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 525 | HA_SPIN_LOCK(NOTIF_LOCK, &com->lock); |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 526 | LIST_DEL(&com->purge_me); |
Thierry FOURNIER | 738a6d7 | 2017-07-17 00:14:07 +0200 | [diff] [blame] | 527 | if (!com->task) { |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 528 | HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock); |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 529 | pool_free(pool_head_notification, com); |
Thierry FOURNIER | 738a6d7 | 2017-07-17 00:14:07 +0200 | [diff] [blame] | 530 | continue; |
| 531 | } |
| 532 | com->task = NULL; |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 533 | HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock); |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 534 | } |
| 535 | } |
| 536 | |
Thierry FOURNIER | cb14688 | 2017-12-10 17:10:57 +0100 | [diff] [blame] | 537 | /* In some cases, the disconnected notifications must be cleared. |
Ilya Shipitsin | 77e3b4a | 2020-03-10 12:06:11 +0500 | [diff] [blame] | 538 | * This function just release memory blocks. The purge list is not |
Thierry FOURNIER | cb14688 | 2017-12-10 17:10:57 +0100 | [diff] [blame] | 539 | * locked because it is owned by only one process. Before browsing |
| 540 | * this list, the caller must ensure to be the only one browser. |
| 541 | * The "com" is not locked because when com->task is NULL, the |
| 542 | * notification is no longer used. |
| 543 | */ |
| 544 | static inline void notification_gc(struct list *purge) |
| 545 | { |
| 546 | struct notification *com, *back; |
| 547 | |
| 548 | /* Delete all pending communication signals. */ |
| 549 | list_for_each_entry_safe (com, back, purge, purge_me) { |
| 550 | if (com->task) |
| 551 | continue; |
| 552 | LIST_DEL(&com->purge_me); |
| 553 | pool_free(pool_head_notification, com); |
| 554 | } |
| 555 | } |
| 556 | |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 557 | /* This function sends signals. It wakes all the tasks attached |
| 558 | * to a list head, and remove the signal, and free the used |
Thierry FOURNIER | d5b7983 | 2017-12-10 17:14:07 +0100 | [diff] [blame] | 559 | * memory. The wake list is not locked because it is owned by |
| 560 | * only one process. before browsing this list, the caller must |
| 561 | * ensure to be the only one browser. |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 562 | */ |
| 563 | static inline void notification_wake(struct list *wake) |
| 564 | { |
| 565 | struct notification *com, *back; |
| 566 | |
| 567 | /* Wake task and delete all pending communication signals. */ |
| 568 | list_for_each_entry_safe(com, back, wake, wake_me) { |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 569 | HA_SPIN_LOCK(NOTIF_LOCK, &com->lock); |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 570 | LIST_DEL(&com->wake_me); |
Thierry FOURNIER | 738a6d7 | 2017-07-17 00:14:07 +0200 | [diff] [blame] | 571 | if (!com->task) { |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 572 | HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock); |
Willy Tarreau | bafbe01 | 2017-11-24 17:34:44 +0100 | [diff] [blame] | 573 | pool_free(pool_head_notification, com); |
Thierry FOURNIER | 738a6d7 | 2017-07-17 00:14:07 +0200 | [diff] [blame] | 574 | continue; |
| 575 | } |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 576 | task_wakeup(com->task, TASK_WOKEN_MSG); |
Thierry FOURNIER | 738a6d7 | 2017-07-17 00:14:07 +0200 | [diff] [blame] | 577 | com->task = NULL; |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 578 | HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock); |
Thierry FOURNIER | d697596 | 2017-07-12 14:31:10 +0200 | [diff] [blame] | 579 | } |
| 580 | } |
| 581 | |
Thierry FOURNIER | 9d5422a | 2018-05-30 11:40:08 +0200 | [diff] [blame] | 582 | /* This function returns true is some notification are pending |
| 583 | */ |
| 584 | static inline int notification_registered(struct list *wake) |
| 585 | { |
| 586 | return !LIST_ISEMPTY(wake); |
| 587 | } |
| 588 | |
Olivier Houchard | cfbb3e6 | 2019-05-29 19:22:43 +0200 | [diff] [blame] | 589 | static inline int thread_has_tasks(void) |
| 590 | { |
| 591 | return (!!(global_tasks_mask & tid_bit) | |
Willy Tarreau | d022e9c | 2019-09-24 08:25:15 +0200 | [diff] [blame] | 592 | (sched->rqueue_size > 0) | |
Willy Tarreau | a62917b | 2020-01-30 18:37:28 +0100 | [diff] [blame] | 593 | !LIST_ISEMPTY(&sched->tasklets[TL_URGENT]) | |
| 594 | !LIST_ISEMPTY(&sched->tasklets[TL_NORMAL]) | |
| 595 | !LIST_ISEMPTY(&sched->tasklets[TL_BULK]) | |
| 596 | !MT_LIST_ISEMPTY(&sched->shared_tasklet_list)); |
Olivier Houchard | cfbb3e6 | 2019-05-29 19:22:43 +0200 | [diff] [blame] | 597 | } |
| 598 | |
Willy Tarreau | 64e6012 | 2019-07-12 08:31:17 +0200 | [diff] [blame] | 599 | /* adds list item <item> to work list <work> and wake up the associated task */ |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 600 | static inline void work_list_add(struct work_list *work, struct mt_list *item) |
Willy Tarreau | 64e6012 | 2019-07-12 08:31:17 +0200 | [diff] [blame] | 601 | { |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 602 | MT_LIST_ADDQ(&work->head, item); |
Willy Tarreau | 64e6012 | 2019-07-12 08:31:17 +0200 | [diff] [blame] | 603 | task_wakeup(work->task, TASK_WOKEN_OTHER); |
| 604 | } |
| 605 | |
| 606 | struct work_list *work_list_create(int nbthread, |
| 607 | struct task *(*fct)(struct task *, void *, unsigned short), |
| 608 | void *arg); |
| 609 | |
| 610 | void work_list_destroy(struct work_list *work, int nbthread); |
Willy Tarreau | 27d00c0 | 2020-03-03 14:59:28 +0100 | [diff] [blame] | 611 | int run_tasks_from_list(struct list *list, int max); |
Willy Tarreau | 64e6012 | 2019-07-12 08:31:17 +0200 | [diff] [blame] | 612 | |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 613 | /* |
Willy Tarreau | 918ff60 | 2011-07-25 16:33:49 +0200 | [diff] [blame] | 614 | * This does 3 things : |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 615 | * - wake up all expired tasks |
| 616 | * - call all runnable tasks |
Willy Tarreau | d825eef | 2007-05-12 22:35:00 +0200 | [diff] [blame] | 617 | * - return the date of next event in <next> or eternity. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 618 | */ |
| 619 | |
Thierry FOURNIER | 9cf7c4b | 2014-12-15 13:26:01 +0100 | [diff] [blame] | 620 | void process_runnable_tasks(); |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 621 | |
Willy Tarreau | 58b458d | 2008-06-29 22:40:23 +0200 | [diff] [blame] | 622 | /* |
| 623 | * Extract all expired timers from the timer queue, and wakes up all |
Willy Tarreau | c49ba52 | 2019-12-11 08:12:23 +0100 | [diff] [blame] | 624 | * associated tasks. |
| 625 | */ |
| 626 | void wake_expired_tasks(); |
| 627 | |
| 628 | /* Checks the next timer for the current thread by looking into its own timer |
| 629 | * list and the global one. It may return TICK_ETERNITY if no timer is present. |
Ilya Shipitsin | 77e3b4a | 2020-03-10 12:06:11 +0500 | [diff] [blame] | 630 | * Note that the next timer might very well be slightly in the past. |
Willy Tarreau | 58b458d | 2008-06-29 22:40:23 +0200 | [diff] [blame] | 631 | */ |
Willy Tarreau | c49ba52 | 2019-12-11 08:12:23 +0100 | [diff] [blame] | 632 | int next_timer_expiry(); |
Willy Tarreau | 58b458d | 2008-06-29 22:40:23 +0200 | [diff] [blame] | 633 | |
William Lallemand | 27f3fa5 | 2018-12-06 14:05:20 +0100 | [diff] [blame] | 634 | /* |
| 635 | * Delete every tasks before running the master polling loop |
| 636 | */ |
| 637 | void mworker_cleantasks(); |
| 638 | |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 639 | #endif /* _PROTO_TASK_H */ |
| 640 | |
| 641 | /* |
| 642 | * Local variables: |
| 643 | * c-indent-level: 8 |
| 644 | * c-basic-offset: 8 |
| 645 | * End: |
| 646 | */ |