MINOR: task: Rename run_queue and run_queue_cur counters

<run_queue> is used to track the number of task in the run queue and
<run_queue_cur> is a copy used for the reporting purpose. These counters has
been renamed, respectively, <tasks_run_queue> and <tasks_run_queue_cur>. So the
naming is consistent between tasks and applets.

[wt: needed for next fixes, backport to 1.7 and 1.6]
diff --git a/src/haproxy.c b/src/haproxy.c
index b403ba1..c31ccb0 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -1730,7 +1730,7 @@
 			break;
 
 		/* expire immediately if events are pending */
-		if (fd_cache_num || run_queue || signal_queue_len || applets_active_queue)
+		if (fd_cache_num || tasks_run_queue || signal_queue_len || applets_active_queue)
 			next = now_ms;
 
 		/* The poller will ensure it returns around <next> */
diff --git a/src/stats.c b/src/stats.c
index 0ba6d27..1a842e8 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -2083,7 +2083,7 @@
 	              global.rlimit_nofile,
 	              global.maxsock, global.maxconn, global.maxpipes,
 	              actconn, pipes_used, pipes_used+pipes_free, read_freq_ctr(&global.conn_per_sec),
-	              run_queue_cur, nb_tasks_cur, idle_pct
+	              tasks_run_queue_cur, nb_tasks_cur, idle_pct
 	              );
 
 	/* scope_txt = search query, appctx->ctx.stats.scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
@@ -2996,7 +2996,7 @@
 	info[INF_MAX_ZLIB_MEM_USAGE]             = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxzlibmem);
 #endif
 	info[INF_TASKS]                          = mkf_u32(0, nb_tasks_cur);
-	info[INF_RUN_QUEUE]                      = mkf_u32(0, run_queue_cur);
+	info[INF_RUN_QUEUE]                      = mkf_u32(0, tasks_run_queue_cur);
 	info[INF_IDLE_PCT]                       = mkf_u32(FN_AVG, idle_pct);
 	info[INF_NODE]                           = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.node);
 	if (global.desc)
diff --git a/src/stream.c b/src/stream.c
index 08f3aa9..055cc23 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -449,7 +449,7 @@
 	struct stream *sess, *bak;
 
 	list_for_each_entry_safe(sess, bak, &buffer_wq, buffer_wait) {
-		if (rqlimit <= run_queue)
+		if (rqlimit <= tasks_run_queue)
 			break;
 
 		if (sess->task->state & TASK_RUNNING)
diff --git a/src/task.c b/src/task.c
index 4a8b907..c99cea8 100644
--- a/src/task.c
+++ b/src/task.c
@@ -26,8 +26,8 @@
 struct pool_head *pool2_task;
 
 unsigned int nb_tasks = 0;
-unsigned int run_queue = 0;
-unsigned int run_queue_cur = 0;    /* copy of the run queue size */
+unsigned int tasks_run_queue = 0;
+unsigned int tasks_run_queue_cur = 0;    /* copy of the run queue size */
 unsigned int nb_tasks_cur = 0;     /* copy of the tasks count */
 unsigned int niced_tasks = 0;      /* number of niced tasks in the run queue */
 struct eb32_node *last_timer = NULL;  /* optimization: last queued timer */
@@ -39,15 +39,15 @@
 
 /* Puts the task <t> in run queue at a position depending on t->nice. <t> is
  * returned. The nice value assigns boosts in 32th of the run queue size. A
- * nice value of -1024 sets the task to -run_queue*32, while a nice value of
- * 1024 sets the task to run_queue*32. The state flags are cleared, so the
- * caller will have to set its flags after this call.
+ * nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
+ * of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so
+ * the caller will have to set its flags after this call.
  * The task must not already be in the run queue. If unsure, use the safer
  * task_wakeup() function.
  */
 struct task *__task_wakeup(struct task *t)
 {
-	run_queue++;
+	tasks_run_queue++;
 	t->rq.key = ++rqueue_ticks;
 
 	if (likely(t->nice)) {
@@ -55,9 +55,9 @@
 
 		niced_tasks++;
 		if (likely(t->nice > 0))
-			offset = (unsigned)((run_queue * (unsigned int)t->nice) / 32U);
+			offset = (unsigned)((tasks_run_queue * (unsigned int)t->nice) / 32U);
 		else
-			offset = -(unsigned)((run_queue * (unsigned int)-t->nice) / 32U);
+			offset = -(unsigned)((tasks_run_queue * (unsigned int)-t->nice) / 32U);
 		t->rq.key += offset;
 	}
 
@@ -191,11 +191,11 @@
 	struct task *t;
 	unsigned int max_processed;
 
-	run_queue_cur = run_queue; /* keep a copy for reporting */
+	tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
 	nb_tasks_cur = nb_tasks;
-	max_processed = run_queue;
+	max_processed = tasks_run_queue;
 
-	if (!run_queue)
+	if (!tasks_run_queue)
 		return;
 
 	if (max_processed > 200)