BUG/MEDIUM: task: close a possible data race condition on a tasklet's list link

In issue #958 Ashley Penney reported intermittent crashes on AWS's ARM
nodes which would not happen on x86 nodes. After investigation it turned
out that the Neoverse N1 CPU cores used in the Graviton2 CPU are much
more aggressive than the usual Cortex A53/A72/A55 or any x86 regarding
memory ordering.

The issue that was triggered there is that if a tasklet_wakeup() call
is made on a tasklet scheduled to run on a foreign thread and that
tasklet is just being dequeued to be processed, there can be a race at
two places:
  - if MT_LIST_TRY_ADDQ() happens between MT_LIST_BEHEAD() and
    LIST_SPLICE_END_DETACHED() if the tasklet is alone in the list,
    because the emptiness tests matches ;

  - if MT_LIST_TRY_ADDQ() happens during LIST_DEL_INIT() in
    run_tasks_from_lists(), then depending on how LIST_DEL_INIT() ends
    up being implemented, it may even corrupt the adjacent nodes while
    they're being reused for the in-tree storage.

This issue was introduced in 2.2 when support for waking up remote
tasklets was added. Initially the attachment of a tasklet to a list
was enough to know its status and this used to be stable information.
Now it's not sufficient to rely on this anymore, thus we need to use
a different information.

This patch solves this by adding a new task flag, TASK_IN_LIST, which
is atomically set before attaching a tasklet to a list, and is only
removed after the tasklet is detached from a list. It is checked
by tasklet_wakeup_on() so that it may only be done while the tasklet
is out of any list, and is cleared during the state switch when calling
the tasklet. Note that the flag is not set for pure tasks as it's not
needed.

However this introduces a new special case: the function
tasklet_remove_from_tasklet_list() needs to keep both states in sync
and cannot check both the state and the attachment to a list at the
same time. This function is already limited to being used by the thread
owning the tasklet, so in this case the test remains reliable. However,
just like its predecessors, this function is wrong by design and it
should probably be replaced with a stricter one, a lazy one, or be
totally removed (it's only used in checks to avoid calling a possibly
scheduled event, and when freeing a tasklet). Regardless, for now the
function exists so the flag is removed only if the deletion could be
done, which covers all cases we're interested in regarding the insertion.
This removal is safe against a concurrent tasklet_wakeup_on() since
MT_LIST_DEL() guarantees the atomic test, and will ultimately clear
the flag only if the task could be deleted, so the flag will always
reflect the last state.

This should be carefully be backported as far as 2.2 after some
observation period. This patch depends on previous patch
"MINOR: task: remove __tasklet_remove_from_tasklet_list()".
diff --git a/include/haproxy/task-t.h b/include/haproxy/task-t.h
index cf86c0a..38dd3ec 100644
--- a/include/haproxy/task-t.h
+++ b/include/haproxy/task-t.h
@@ -39,6 +39,7 @@
                                    * threads, must be set before first queue/wakeup */
 #define TASK_SELF_WAKING  0x0010  /* task/tasklet found waking itself */
 #define TASK_KILLED       0x0020  /* task/tasklet killed, may now be freed */
+#define TASK_IN_LIST      0x0040  /* tasklet is in a tasklet list */
 
 #define TASK_WOKEN_INIT   0x0100  /* woken up for initialisation purposes */
 #define TASK_WOKEN_TIMER  0x0200  /* woken up because of expired timer */
diff --git a/include/haproxy/task.h b/include/haproxy/task.h
index d5cf3b1..a02fd2f 100644
--- a/include/haproxy/task.h
+++ b/include/haproxy/task.h
@@ -322,43 +322,50 @@
 }
 
 /* schedules tasklet <tl> to run onto thread <thr> or the current thread if
- * <thr> is negative.
+ * <thr> is negative. Note that it is illegal to wakeup a foreign tasklet if
+ * its tid is negative and it is illegal to self-assign a tasklet that was
+ * at least once scheduled on a specific thread.
  */
 static inline void tasklet_wakeup_on(struct tasklet *tl, int thr)
 {
+	unsigned short state = tl->state;
+
+	do {
+		/* do nothing if someone else already added it */
+		if (state & TASK_IN_LIST)
+			return;
+	} while (!_HA_ATOMIC_CAS(&tl->state, &state, state | TASK_IN_LIST));
+
+	/* at this pint we're the first ones to add this task to the list */
+
 	if (likely(thr < 0)) {
 		/* this tasklet runs on the caller thread */
-		if (LIST_ISEMPTY(&tl->list)) {
-			if (tl->state & TASK_SELF_WAKING) {
-				LIST_ADDQ(&sched->tasklets[TL_BULK], &tl->list);
-				sched->tl_class_mask |= 1 << TL_BULK;
-			}
-			else if ((struct task *)tl == sched->current) {
-				_HA_ATOMIC_OR(&tl->state, TASK_SELF_WAKING);
-				LIST_ADDQ(&sched->tasklets[TL_BULK], &tl->list);
-				sched->tl_class_mask |= 1 << TL_BULK;
-			}
-			else if (sched->current_queue < 0) {
-				LIST_ADDQ(&sched->tasklets[TL_URGENT], &tl->list);
-				sched->tl_class_mask |= 1 << TL_URGENT;
-			}
-			else {
-				LIST_ADDQ(&sched->tasklets[sched->current_queue], &tl->list);
-				sched->tl_class_mask |= 1 << sched->current_queue;
-			}
-
-			_HA_ATOMIC_ADD(&tasks_run_queue, 1);
+		if (tl->state & TASK_SELF_WAKING) {
+			LIST_ADDQ(&sched->tasklets[TL_BULK], &tl->list);
+			sched->tl_class_mask |= 1 << TL_BULK;
+		}
+		else if ((struct task *)tl == sched->current) {
+			_HA_ATOMIC_OR(&tl->state, TASK_SELF_WAKING);
+			LIST_ADDQ(&sched->tasklets[TL_BULK], &tl->list);
+			sched->tl_class_mask |= 1 << TL_BULK;
+		}
+		else if (sched->current_queue < 0) {
+			LIST_ADDQ(&sched->tasklets[TL_URGENT], &tl->list);
+			sched->tl_class_mask |= 1 << TL_URGENT;
+		}
+		else {
+			LIST_ADDQ(&sched->tasklets[sched->current_queue], &tl->list);
+			sched->tl_class_mask |= 1 << sched->current_queue;
 		}
 	} else {
-		/* this tasklet runs on a specific thread */
-		if (MT_LIST_TRY_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list) == 1) {
-			_HA_ATOMIC_ADD(&tasks_run_queue, 1);
-			if (sleeping_thread_mask & (1UL << thr)) {
-				_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
-				wake_thread(thr);
-			}
+		/* this tasklet runs on a specific thread. */
+		MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list);
+		if (sleeping_thread_mask & (1UL << thr)) {
+			_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
+			wake_thread(thr);
 		}
 	}
+	_HA_ATOMIC_ADD(&tasks_run_queue, 1);
 }
 
 /* schedules tasklet <tl> to run onto the thread designated by tl->tid, which
@@ -371,11 +378,16 @@
 
 /* Try to remove a tasklet from the list. This call is inherently racy and may
  * only be performed on the thread that was supposed to dequeue this tasklet.
+ * This way it is safe to call MT_LIST_DEL without first removing the
+ * TASK_IN_LIST bit, which must absolutely be removed afterwards in case
+ * another thread would want to wake this tasklet up in parallel.
  */
 static inline void tasklet_remove_from_tasklet_list(struct tasklet *t)
 {
-	if (MT_LIST_DEL((struct mt_list *)&t->list))
+	if (MT_LIST_DEL((struct mt_list *)&t->list)) {
+		_HA_ATOMIC_AND(&t->state, ~TASK_IN_LIST);
 		_HA_ATOMIC_SUB(&tasks_run_queue, 1);
+	}
 }
 
 /*
diff --git a/src/task.c b/src/task.c
index d1e06ce..92c771f 100644
--- a/src/task.c
+++ b/src/task.c
@@ -461,9 +461,10 @@
 		_HA_ATOMIC_SUB(&tasks_run_queue, 1);
 
 		if (TASK_IS_TASKLET(t)) {
+			LIST_DEL_INIT(&((struct tasklet *)t)->list);
+			__ha_barrier_store();
 			state = _HA_ATOMIC_XCHG(&t->state, state);
 			__ha_barrier_atomic_store();
-			LIST_DEL_INIT(&((struct tasklet *)t)->list);
 			process(t, ctx, state);
 			done++;
 			sched->current = NULL;
@@ -471,9 +472,10 @@
 			continue;
 		}
 
+		LIST_DEL_INIT(&((struct tasklet *)t)->list);
+		__ha_barrier_store();
 		state = _HA_ATOMIC_XCHG(&t->state, state | TASK_RUNNING);
 		__ha_barrier_atomic_store();
-		LIST_DEL_INIT(&((struct tasklet *)t)->list);
 
 		/* OK then this is a regular task */