MEDIUM: tasks: Get rid of active_tasks_mask.

Remove the active_tasks_mask variable, we can deduce if we've work to do
by other means, and it is costly to maintain. Instead, introduce a new
function, thread_has_tasks(), that returns non-zero if there's tasks
scheduled for the thread, zero otherwise.
diff --git a/include/proto/task.h b/include/proto/task.h
index 585d890..d39d0f4 100644
--- a/include/proto/task.h
+++ b/include/proto/task.h
@@ -83,7 +83,6 @@
 
 /* a few exported variables */
 extern unsigned int nb_tasks;     /* total number of tasks */
-extern volatile unsigned long active_tasks_mask; /* Mask of threads with active tasks */
 extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */
 extern unsigned int tasks_run_queue;    /* run queue size */
 extern unsigned int tasks_run_queue_cur;
@@ -233,7 +232,6 @@
 		return;
 	LIST_ADDQ(&task_per_thread[tid].task_list, &tl->list);
 	task_per_thread[tid].task_list_size++;
-	_HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
 	_HA_ATOMIC_ADD(&tasks_run_queue, 1);
 
 }
@@ -541,6 +539,13 @@
 	return !LIST_ISEMPTY(wake);
 }
 
+static inline int thread_has_tasks(void)
+{
+	return (!!(global_tasks_mask & tid_bit) |
+	        (task_per_thread[tid].rqueue_size > 0) |
+	        !LIST_ISEMPTY(&task_per_thread[tid].task_list));
+}
+
 /*
  * This does 3 things :
  *   - wake up all expired tasks
diff --git a/src/debug.c b/src/debug.c
index b34df72..fd760fa 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -47,7 +47,7 @@
 	              "%c%cThread %-2u: act=%d glob=%d wq=%d rq=%d tl=%d tlsz=%d rqsz=%d\n"
 	              "             stuck=%d fdcache=%d prof=%d",
 	              (thr == calling_tid) ? '*' : ' ', stuck ? '>' : ' ', thr + 1,
-	              !!(active_tasks_mask & thr_bit),
+		      thread_has_tasks(),
 	              !!(global_tasks_mask & thr_bit),
 	              !eb_is_empty(&task_per_thread[thr].timers),
 	              !eb_is_empty(&task_per_thread[thr].rqueue),
diff --git a/src/haproxy.c b/src/haproxy.c
index 35dd514..df9a686 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -2528,14 +2528,14 @@
 		wake = 1;
 		if (fd_cache_mask & tid_bit)
 			activity[tid].wake_cache++;
-		else if (active_tasks_mask & tid_bit)
+		else if (thread_has_tasks())
 			activity[tid].wake_tasks++;
 		else if (signal_queue_len && tid == 0)
 			activity[tid].wake_signal++;
 		else {
 			_HA_ATOMIC_OR(&sleeping_thread_mask, tid_bit);
 			__ha_barrier_atomic_store();
-			if (active_tasks_mask & tid_bit) {
+			if (global_tasks_mask & tid_bit) {
 				activity[tid].wake_tasks++;
 				_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
 			} else
diff --git a/src/task.c b/src/task.c
index 0799d00..0ccc7af 100644
--- a/src/task.c
+++ b/src/task.c
@@ -35,7 +35,6 @@
 DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
 
 unsigned int nb_tasks = 0;
-volatile unsigned long active_tasks_mask = 0; /* Mask of threads with active tasks */
 volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
 unsigned int tasks_run_queue = 0;
 unsigned int tasks_run_queue_cur = 0;    /* copy of the run queue size */
@@ -82,7 +81,6 @@
 		__ha_barrier_store();
 	}
 #endif
-	_HA_ATOMIC_OR(&active_tasks_mask, t->thread_mask);
 	t->rq.key = _HA_ATOMIC_ADD(&rqueue_ticks, 1);
 
 	if (likely(t->nice)) {
@@ -308,7 +306,7 @@
 
 	ti->flags &= ~TI_FL_STUCK; // this thread is still running
 
-	if (!(active_tasks_mask & tid_bit)) {
+	if (!thread_has_tasks()) {
 		activity[tid].empty_rq++;
 		return;
 	}
@@ -381,13 +379,6 @@
 		grq = NULL;
 	}
 
-	if (!(global_tasks_mask & tid_bit) && task_per_thread[tid].rqueue_size == 0) {
-		_HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit);
-		__ha_barrier_atomic_load();
-		if (global_tasks_mask & tid_bit)
-			_HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
-	}
-
 	while (max_processed > 0 && !LIST_ISEMPTY(&task_per_thread[tid].task_list)) {
 		struct task *t;
 		unsigned short state;
@@ -449,10 +440,8 @@
 		max_processed--;
 	}
 
-	if (!LIST_ISEMPTY(&task_per_thread[tid].task_list)) {
-		_HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
+	if (!LIST_ISEMPTY(&task_per_thread[tid].task_list))
 		activity[tid].long_rq++;
-	}
 }
 
 /*