MINOR: tasks: do not maintain the rqueue_size counter anymore

This one is exclusively used as a boolean nowadays and is non-zero only
when the thread-local run queue is not empty. Better check the root tree's
pointer and avoid updating this counter all the time.
diff --git a/include/haproxy/task-t.h b/include/haproxy/task-t.h
index 34727fa..1a103a2 100644
--- a/include/haproxy/task-t.h
+++ b/include/haproxy/task-t.h
@@ -78,10 +78,9 @@
 	struct list tasklets[TL_CLASSES]; /* tasklets (and/or tasks) to run, by class */
 	unsigned int rqueue_ticks; /* Insertion counter for the run queue */
 	int task_list_size;     /* Number of tasks among the tasklets */
-	int rqueue_size;        /* Number of elements in the per-thread run queue */
 	int current_queue;      /* points to current tasklet list being run, -1 if none */
-	struct task *current;   /* current task (not tasklet) */
 	unsigned int rq_total;  /* total size of the run queue, prio_tree + tasklets */
+	struct task *current;   /* current task (not tasklet) */
 	uint8_t tl_class_mask;  /* bit mask of non-empty tasklets classes */
 	__attribute__((aligned(64))) char end[0];
 };
diff --git a/include/haproxy/task.h b/include/haproxy/task.h
index 5841818..61ac55e 100644
--- a/include/haproxy/task.h
+++ b/include/haproxy/task.h
@@ -179,7 +179,7 @@
 static inline int thread_has_tasks(void)
 {
 	return (!!(global_tasks_mask & tid_bit) |
-	        (sched->rqueue_size > 0) |
+		!eb_is_empty(&sched->rqueue) |
 	        !!sched->tl_class_mask |
 		!MT_LIST_ISEMPTY(&sched->shared_tasklet_list));
 }
@@ -325,7 +325,6 @@
 	else
 #endif
 	{
-		sched->rqueue_size--;
 		_HA_ATOMIC_SUB(&sched->rq_total, 1);
 	}
 	eb32sc_delete(&t->rq);
diff --git a/src/task.c b/src/task.c
index 153f7d6..6dffbde 100644
--- a/src/task.c
+++ b/src/task.c
@@ -150,13 +150,8 @@
 	if (root == &rqueue) {
 		_HA_ATOMIC_OR(&t->state, TASK_GLOBAL);
 		HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
-	} else
-#endif
-	{
-		int nb = ((void *)root - (void *)&task_per_thread[0].rqueue) / sizeof(task_per_thread[0]);
-		task_per_thread[nb].rqueue_size++;
 	}
-#ifdef USE_THREAD
+
 	/* If all threads that are supposed to handle this task are sleeping,
 	 * wake one.
 	 */
@@ -428,7 +423,7 @@
 
 			if (unlikely(queue > TL_NORMAL &&
 				     budget_mask & (1 << TL_NORMAL) &&
-				     ((sched->rqueue_size > 0) ||
+				     (!eb_is_empty(&sched->rqueue) ||
 				      (global_tasks_mask & tid_bit)))) {
 				/* a task was woken up by a bulk tasklet or another thread */
 				break;
@@ -609,7 +604,7 @@
 
 	/* normal tasklets list gets a default weight of ~37% */
 	if ((tt->tl_class_mask & (1 << TL_NORMAL)) ||
-	    (sched->rqueue_size > 0) || (global_tasks_mask & tid_bit))
+	    !eb_is_empty(&sched->rqueue) || (global_tasks_mask & tid_bit))
 		max[TL_NORMAL] = default_weights[TL_NORMAL];
 
 	/* bulk tasklets list gets a default weight of ~13% */