BUG/MEDIUM: task: Don't free a task that is about to be run.

While running a task, we may try to delete and free a task that is about to
be run, because it's part of the local tasks list, or because rq_next points
to it.
So flag any task that is in the local tasks list to be deleted, instead of
run, by setting t->process to NULL, and re-make rq_next a global,
thread-local variable, that is modified if we attempt to delete that task.

Many thanks to PiBa-NL for reporting this and analysing the problem.

This should be backported to 1.8.
diff --git a/include/proto/task.h b/include/proto/task.h
index cbc1a90..c1c4c07 100644
--- a/include/proto/task.h
+++ b/include/proto/task.h
@@ -90,6 +90,8 @@
 extern unsigned int niced_tasks;  /* number of niced tasks in the run queue */
 extern struct pool_head *pool_head_task;
 extern struct pool_head *pool_head_notification;
+extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */
+extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be potentially run */
 
 __decl_hathreads(extern HA_SPINLOCK_T rq_lock);  /* spin lock related to run queue */
 __decl_hathreads(extern HA_SPINLOCK_T wq_lock);  /* spin lock related to wait queue */
@@ -177,8 +179,11 @@
 static inline struct task *task_unlink_rq(struct task *t)
 {
 	HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
-	if (likely(task_in_rq(t)))
+	if (likely(task_in_rq(t))) {
+		if (&t->rq == rq_next)
+			rq_next = eb32sc_next(rq_next, tid_bit);
 		__task_unlink_rq(t);
+	}
 	HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
 	return t;
 }
@@ -230,7 +235,7 @@
  * Free a task. Its context must have been freed since it will be lost.
  * The task count is decremented.
  */
-static inline void task_free(struct task *t)
+static inline void __task_free(struct task *t)
 {
 	pool_free(pool_head_task, t);
 	if (unlikely(stopping))
@@ -238,6 +243,18 @@
 	HA_ATOMIC_SUB(&nb_tasks, 1);
 }
 
+static inline void task_free(struct task *t)
+{
+	/* There's no need to protect t->state with a lock, as the task
+	 * has to run on the current thread.
+	 */
+	if (t == curr_task || !(t->state & TASK_RUNNING))
+		__task_free(t);
+	else
+		t->process = NULL;
+}
+
+
 /* Place <task> into the wait queue, where it may already be. If the expiration
  * timer is infinite, do nothing and rely on wake_expired_task to clean up.
  */
diff --git a/src/task.c b/src/task.c
index fd9acf6..3d021bb 100644
--- a/src/task.c
+++ b/src/task.c
@@ -39,6 +39,7 @@
 unsigned int niced_tasks = 0;      /* number of niced tasks in the run queue */
 
 THREAD_LOCAL struct task *curr_task = NULL; /* task currently running or NULL */
+THREAD_LOCAL struct eb32sc_node *rq_next = NULL; /* Next task to be potentially run */
 
 __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) rq_lock); /* spin lock related to run queue */
 __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */
@@ -186,7 +187,6 @@
 	struct task *t;
 	int i;
 	int max_processed;
-	struct eb32sc_node *rq_next;
 	struct task *local_tasks[16];
 	int local_tasks_count;
 	int final_tasks_count;
@@ -227,8 +227,14 @@
 			 */
 			if (likely(t->process == process_stream))
 				t = process_stream(t);
-			else
-				t = t->process(t);
+			else {
+				if (t->process != NULL)
+					t = t->process(t);
+				else {
+					__task_free(t);
+					t = NULL;
+				}
+			}
 			curr_task = NULL;
 
 			if (likely(t != NULL)) {
@@ -309,8 +315,14 @@
 			curr_task = t;
 			if (likely(t->process == process_stream))
 				t = process_stream(t);
-			else
-				t = t->process(t);
+			else {
+				if (t->process != NULL)
+					t = t->process(t);
+				else {
+					__task_free(t);
+					t = NULL;
+				}
+			}
 			curr_task = NULL;
 			if (t)
 				local_tasks[final_tasks_count++] = t;