CLEANUP: tasks: use a less confusing name for task_list_size

This one is systematically misunderstood due to its unclear name. It
is in fact the number of tasks in the local tasklet list. Let's call
it "tasks_in_list" to remove some of the confusion.
diff --git a/include/haproxy/task-t.h b/include/haproxy/task-t.h
index 1a103a2..b2a69ee 100644
--- a/include/haproxy/task-t.h
+++ b/include/haproxy/task-t.h
@@ -77,7 +77,7 @@
 	struct mt_list shared_tasklet_list; /* Tasklet to be run, woken up by other threads */
 	struct list tasklets[TL_CLASSES]; /* tasklets (and/or tasks) to run, by class */
 	unsigned int rqueue_ticks; /* Insertion counter for the run queue */
-	int task_list_size;     /* Number of tasks among the tasklets */
+	int tasks_in_list;      /* Number of tasks in the per-thread tasklets list */
 	int current_queue;      /* points to current tasklet list being run, -1 if none */
 	unsigned int rq_total;  /* total size of the run queue, prio_tree + tasklets */
 	struct task *current;   /* current task (not tasklet) */
diff --git a/src/debug.c b/src/debug.c
index 3162d32..209199a 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -174,7 +174,7 @@
 			LIST_ISEMPTY(&task_per_thread[thr].tasklets[TL_NORMAL]) &&
 			LIST_ISEMPTY(&task_per_thread[thr].tasklets[TL_BULK]) &&
 			MT_LIST_ISEMPTY(&task_per_thread[thr].shared_tasklet_list)),
-	              task_per_thread[thr].task_list_size,
+	              task_per_thread[thr].tasks_in_list,
 	              task_per_thread[thr].rq_total,
 	              stuck,
 	              !!(task_profiling_mask & thr_bit));
diff --git a/src/task.c b/src/task.c
index 6dffbde..0287817 100644
--- a/src/task.c
+++ b/src/task.c
@@ -97,7 +97,7 @@
 			MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list,
 			             (struct mt_list *)&((struct tasklet *)t)->list);
 			_HA_ATOMIC_ADD(&task_per_thread[thr].rq_total, 1);
-			_HA_ATOMIC_ADD(&task_per_thread[thr].task_list_size, 1);
+			_HA_ATOMIC_ADD(&task_per_thread[thr].tasks_in_list, 1);
 			if (sleeping_thread_mask & (1UL << thr)) {
 				_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
 				wake_thread(thr);
@@ -485,7 +485,7 @@
 
 		/* OK then this is a regular task */
 
-		_HA_ATOMIC_SUB(&task_per_thread[tid].task_list_size, 1);
+		_HA_ATOMIC_SUB(&task_per_thread[tid].tasks_in_list, 1);
 		if (unlikely(t->call_date)) {
 			uint64_t now_ns = now_mono_time();
 			uint64_t lat = now_ns - t->call_date;
@@ -631,7 +631,7 @@
 	/* pick up to max[TL_NORMAL] regular tasks from prio-ordered run queues */
 	/* Note: the grq lock is always held when grq is not null */
 	picked = 0;
-	budget = max[TL_NORMAL] - tt->task_list_size;
+	budget = max[TL_NORMAL] - tt->tasks_in_list;
 	while (picked < budget) {
 		if ((global_tasks_mask & tid_bit) && !grq) {
 #ifdef USE_THREAD
@@ -693,7 +693,7 @@
 
 	if (picked) {
 		tt->tl_class_mask |= 1 << TL_NORMAL;
-		_HA_ATOMIC_ADD(&tt->task_list_size, picked);
+		_HA_ATOMIC_ADD(&tt->tasks_in_list, picked);
 		_HA_ATOMIC_ADD(&tt->rq_total, picked);
 		activity[tid].tasksw += picked;
 	}