CLEANUP: lists/tree-wide: rename some list operations to avoid some confusion

The current "ADD" vs "ADDQ" is confusing because when thinking in terms
of appending at the end of a list, "ADD" naturally comes to mind, but
here it does the opposite, it inserts. Several times already it's been
incorrectly used where ADDQ was expected, the latest of which was a
fortunate accident explained in 6fa922562 ("CLEANUP: stream: explain
why we queue the stream at the head of the server list").

Let's use more explicit (but slightly longer) names now:

   LIST_ADD        ->       LIST_INSERT
   LIST_ADDQ       ->       LIST_APPEND
   LIST_ADDED      ->       LIST_INLIST
   LIST_DEL        ->       LIST_DELETE

The same is true for MT_LISTs, including their "TRY" variant.
LIST_DEL_INIT keeps its short name to encourage to use it instead of the
lazier LIST_DELETE which is often less safe.

The change is large (~674 non-comment entries) but is mechanical enough
to remain safe. No permutation was performed, so any out-of-tree code
can easily map older names to new ones.

The list doc was updated.
diff --git a/src/task.c b/src/task.c
index ca678d3..6763327 100644
--- a/src/task.c
+++ b/src/task.c
@@ -92,7 +92,7 @@
 			thr = my_ffsl(t->thread_mask) - 1;
 
 			/* Beware: tasks that have never run don't have their ->list empty yet! */
-			MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list,
+			MT_LIST_APPEND(&task_per_thread[thr].shared_tasklet_list,
 			             (struct mt_list *)&((struct tasklet *)t)->list);
 			_HA_ATOMIC_INC(&task_per_thread[thr].rq_total);
 			_HA_ATOMIC_INC(&task_per_thread[thr].tasks_in_list);
@@ -115,30 +115,30 @@
 	if (likely(thr < 0)) {
 		/* this tasklet runs on the caller thread */
 		if (tl->state & TASK_HEAVY) {
-			LIST_ADDQ(&sched->tasklets[TL_HEAVY], &tl->list);
+			LIST_APPEND(&sched->tasklets[TL_HEAVY], &tl->list);
 			sched->tl_class_mask |= 1 << TL_HEAVY;
 		}
 		else if (tl->state & TASK_SELF_WAKING) {
-			LIST_ADDQ(&sched->tasklets[TL_BULK], &tl->list);
+			LIST_APPEND(&sched->tasklets[TL_BULK], &tl->list);
 			sched->tl_class_mask |= 1 << TL_BULK;
 		}
 		else if ((struct task *)tl == sched->current) {
 			_HA_ATOMIC_OR(&tl->state, TASK_SELF_WAKING);
-			LIST_ADDQ(&sched->tasklets[TL_BULK], &tl->list);
+			LIST_APPEND(&sched->tasklets[TL_BULK], &tl->list);
 			sched->tl_class_mask |= 1 << TL_BULK;
 		}
 		else if (sched->current_queue < 0) {
-			LIST_ADDQ(&sched->tasklets[TL_URGENT], &tl->list);
+			LIST_APPEND(&sched->tasklets[TL_URGENT], &tl->list);
 			sched->tl_class_mask |= 1 << TL_URGENT;
 		}
 		else {
-			LIST_ADDQ(&sched->tasklets[sched->current_queue], &tl->list);
+			LIST_APPEND(&sched->tasklets[sched->current_queue], &tl->list);
 			sched->tl_class_mask |= 1 << sched->current_queue;
 		}
 		_HA_ATOMIC_INC(&sched->rq_total);
 	} else {
 		/* this tasklet runs on a specific thread. */
-		MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list);
+		MT_LIST_APPEND(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list);
 		_HA_ATOMIC_INC(&task_per_thread[thr].rq_total);
 		if (sleeping_thread_mask & (1UL << thr)) {
 			_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
@@ -688,7 +688,7 @@
 	 * 100% due to rounding, this is not a problem. Note that while in
 	 * theory the sum cannot be NULL as we cannot get there without tasklets
 	 * to process, in practice it seldom happens when multiple writers
-	 * conflict and rollback on MT_LIST_TRY_ADDQ(shared_tasklet_list), causing
+	 * conflict and rollback on MT_LIST_TRY_APPEND(shared_tasklet_list), causing
 	 * a first MT_LIST_ISEMPTY() to succeed for thread_has_task() and the
 	 * one above to finally fail. This is extremely rare and not a problem.
 	 */
@@ -766,7 +766,7 @@
 			_HA_ATOMIC_DEC(&niced_tasks);
 
 		/* Add it to the local task list */
-		LIST_ADDQ(&tt->tasklets[TL_NORMAL], &((struct tasklet *)t)->list);
+		LIST_APPEND(&tt->tasklets[TL_NORMAL], &((struct tasklet *)t)->list);
 	}
 
 	/* release the rqueue lock */