MINOR: lists: rename some MT_LIST operations to clarify them

Initially when mt_lists were added, their purpose was to be used with
the scheduler, where anyone may concurrently add the same tasklet, so
it sounded natural to implement a check in MT_LIST_ADD{,Q}. Later their
usage was extended and MT_LIST_ADD{,Q} started to be used on situations
where the element to be added was exclusively owned by the one performing
the operation so a conflict was impossible. This became more obvious with
the idle connections and the new macro was called MT_LIST_ADDQ_NOCHECK.

But this remains confusing and at many places it's not expected that
an MT_LIST_ADD could possibly fail, and worse, at some places we start
by initializing it before adding (and the test is superflous) so let's
rename them to something more conventional to denote the presence of the
check or not:

   MT_LIST_ADD{,Q}    : inconditional operation, the caller owns the
                        element, and doesn't care about the element's
                        current state (exactly like LIST_ADD)
   MT_LIST_TRY_ADD{,Q}: only perform the operation if the element is not
                        already added or in the process of being added.

This means that the previously "safe" MT_LIST_ADD{,Q} are not "safe"
anymore. This also means that in case of backport mistakes in the
future causing this to be overlooked, the slower and safer functions
will still be used by default.

Note that the missing unchecked MT_LIST_ADD macro was added.

The rest of the code will have to be reviewed so that a number of
callers of MT_LIST_TRY_ADDQ are changed to MT_LIST_ADDQ to remove
the unneeded test.
diff --git a/include/haproxy/channel.h b/include/haproxy/channel.h
index 3cd3bb3..004fc03 100644
--- a/include/haproxy/channel.h
+++ b/include/haproxy/channel.h
@@ -850,7 +850,7 @@
 		return 1;
 
 	if (!MT_LIST_ADDED(&wait->list))
-		MT_LIST_ADDQ(&buffer_wq, &wait->list);
+		MT_LIST_TRY_ADDQ(&buffer_wq, &wait->list);
 
 	return 0;
 }
diff --git a/include/haproxy/list.h b/include/haproxy/list.h
index 9d37649..dae5ca2 100644
--- a/include/haproxy/list.h
+++ b/include/haproxy/list.h
@@ -225,7 +225,7 @@
  * Returns 1 if we added the item, 0 otherwise (because it was already in a
  * list).
  */
-#define MT_LIST_ADD(_lh, _el)                                              \
+#define MT_LIST_TRY_ADD(_lh, _el)                                              \
      ({                                                                    \
         int _ret = 0;                                                      \
 	struct mt_list *lh = (_lh), *el = (_el);                           \
@@ -265,7 +265,7 @@
  * Returns 1 if we added the item, 0 otherwise (because it was already in a
  * list).
  */
-#define MT_LIST_ADDQ(_lh, _el)                                             \
+#define MT_LIST_TRY_ADDQ(_lh, _el)                                             \
     ({                                                                     \
 	int _ret = 0;                                                      \
 	struct mt_list *lh = (_lh), *el = (_el);                           \
@@ -301,10 +301,43 @@
     })
 
 /*
+ * Add an item at the beginning of a list.
+ * It is assumed the element can't already be in a list, so it isn't checked.
+ */
+#define MT_LIST_ADD(_lh, _el)                                              \
+     ({                                                                    \
+        int _ret = 0;                                                      \
+	struct mt_list *lh = (_lh), *el = (_el);                           \
+	while (1) {                                                        \
+		struct mt_list *n;                                         \
+		struct mt_list *p;                                         \
+		n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY);            \
+		if (n == MT_LIST_BUSY)                                     \
+		        continue;                                          \
+		p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);               \
+		if (p == MT_LIST_BUSY) {                                   \
+			(lh)->next = n;                                    \
+			__ha_barrier_store();                              \
+			continue;                                          \
+		}                                                          \
+		(el)->next = n;                                            \
+		(el)->prev = p;                                            \
+		__ha_barrier_store();                                      \
+		n->prev = (el);                                            \
+		__ha_barrier_store();                                      \
+		p->next = (el);                                            \
+		__ha_barrier_store();                                      \
+		_ret = 1;                                                  \
+		break;                                                     \
+	}                                                                  \
+	(_ret);                                                            \
+     })
+
+/*
  * Add an item at the end of a list.
  * It is assumed the element can't already be in a list, so it isn't checked
  */
-#define MT_LIST_ADDQ_NOCHECK(_lh, _el)                                     \
+#define MT_LIST_ADDQ(_lh, _el)                                     \
     ({                                                                     \
 	int _ret = 0;                                                      \
 	struct mt_list *lh = (_lh), *el = (_el);                           \
@@ -336,7 +369,7 @@
 /*
  * Detach a list from its head. A pointer to the first element is returned
  * and the list is closed. If the list was empty, NULL is returned. This may
- * exclusively be used with lists modified by MT_LIST_ADD/MT_LIST_ADDQ. This
+ * exclusively be used with lists modified by MT_LIST_TRY_ADD/MT_LIST_TRY_ADDQ. This
  * is incompatible with MT_LIST_DEL run concurrently.
  * If there's at least one element, the next of the last element will always
  * be NULL.
diff --git a/include/haproxy/server.h b/include/haproxy/server.h
index 3ad60d0..9a400fd 100644
--- a/include/haproxy/server.h
+++ b/include/haproxy/server.h
@@ -273,11 +273,11 @@
 		conn->idle_time = now_ms;
 		if (is_safe) {
 			conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
-			MT_LIST_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list);
+			MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list);
 			_HA_ATOMIC_ADD(&srv->curr_safe_nb, 1);
 		} else {
 			conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_IDLE_LIST;
-			MT_LIST_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list);
+			MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list);
 			_HA_ATOMIC_ADD(&srv->curr_idle_nb, 1);
 		}
 		_HA_ATOMIC_ADD(&srv->curr_idle_thr[tid], 1);
diff --git a/include/haproxy/task.h b/include/haproxy/task.h
index db91991..080240a 100644
--- a/include/haproxy/task.h
+++ b/include/haproxy/task.h
@@ -352,7 +352,7 @@
 		}
 	} else {
 		/* this tasklet runs on a specific thread */
-		if (MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list) == 1) {
+		if (MT_LIST_TRY_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list) == 1) {
 			_HA_ATOMIC_ADD(&tasks_run_queue, 1);
 			if (sleeping_thread_mask & (1UL << thr)) {
 				_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
@@ -652,7 +652,7 @@
 /* adds list item <item> to work list <work> and wake up the associated task */
 static inline void work_list_add(struct work_list *work, struct mt_list *item)
 {
-	MT_LIST_ADDQ(&work->head, item);
+	MT_LIST_TRY_ADDQ(&work->head, item);
 	task_wakeup(work->task, TASK_WOKEN_OTHER);
 }
 
diff --git a/src/backend.c b/src/backend.c
index 324d756..4a162a4 100644
--- a/src/backend.c
+++ b/src/backend.c
@@ -1345,7 +1345,7 @@
 				if (tokill_conn) {
 					/* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
 
-					MT_LIST_ADDQ(&idle_conns[i].toremove_conns,
+					MT_LIST_TRY_ADDQ(&idle_conns[i].toremove_conns,
 					    (struct mt_list *)&tokill_conn->list);
 					task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
 					HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
diff --git a/src/flt_spoe.c b/src/flt_spoe.c
index 62e535e..b6359c6 100644
--- a/src/flt_spoe.c
+++ b/src/flt_spoe.c
@@ -2807,7 +2807,7 @@
 	if (b_alloc_margin(buf, global.tune.reserved_bufs))
 		return 1;
 
-	MT_LIST_ADDQ(&buffer_wq, &buffer_wait->list);
+	MT_LIST_TRY_ADDQ(&buffer_wq, &buffer_wait->list);
 	return 0;
 }
 
diff --git a/src/listener.c b/src/listener.c
index cc49c70..cad0e0c 100644
--- a/src/listener.c
+++ b/src/listener.c
@@ -424,7 +424,7 @@
 {
 	HA_SPIN_LOCK(LISTENER_LOCK, &l->lock);
 	if (l->state == LI_READY) {
-		MT_LIST_ADDQ(list, &l->wait_queue);
+		MT_LIST_TRY_ADDQ(list, &l->wait_queue);
 		fd_stop_recv(l->fd);
 		l->state = LI_LIMITED;
 	}
diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
index a0509e6..05723e8 100644
--- a/src/mux_fcgi.c
+++ b/src/mux_fcgi.c
@@ -605,7 +605,7 @@
 	    unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
 		fconn->buf_wait.target = fconn;
 		fconn->buf_wait.wakeup_cb = fcgi_buf_available;
-		MT_LIST_ADDQ(&buffer_wq, &fconn->buf_wait.list);
+		MT_LIST_TRY_ADDQ(&buffer_wq, &fconn->buf_wait.list);
 	}
 	return buf;
 }
@@ -2956,9 +2956,9 @@
 		struct server *srv = objt_server(conn->target);
 
 		if (conn_in_list == CO_FL_SAFE_LIST)
-			MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
+			MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
 		else
-			MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+			MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
 	}
 	return NULL;
 }
diff --git a/src/mux_h1.c b/src/mux_h1.c
index 80a7b7d..0626a7d 100644
--- a/src/mux_h1.c
+++ b/src/mux_h1.c
@@ -416,7 +416,7 @@
 	    unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
 		h1c->buf_wait.target = h1c;
 		h1c->buf_wait.wakeup_cb = h1_buf_available;
-		MT_LIST_ADDQ(&buffer_wq, &h1c->buf_wait.list);
+		MT_LIST_TRY_ADDQ(&buffer_wq, &h1c->buf_wait.list);
 	}
 	return buf;
 }
@@ -2265,9 +2265,9 @@
 		struct server *srv = objt_server(conn->target);
 
 		if (conn_in_list == CO_FL_SAFE_LIST)
-			MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
+			MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
 		else
-			MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+			MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
 	}
 	return NULL;
 }
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 6ec8d27..ce26c92 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -683,7 +683,7 @@
 	    unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
 		h2c->buf_wait.target = h2c;
 		h2c->buf_wait.wakeup_cb = h2_buf_available;
-		MT_LIST_ADDQ(&buffer_wq, &h2c->buf_wait.list);
+		MT_LIST_TRY_ADDQ(&buffer_wq, &h2c->buf_wait.list);
 	}
 	return buf;
 }
@@ -3565,9 +3565,9 @@
 		struct server *srv = objt_server(conn->target);
 
 		if (conn_in_list == CO_FL_SAFE_LIST)
-			MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
+			MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
 		else
-			MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+			MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
 	}
 
 leave:
diff --git a/src/server.c b/src/server.c
index b0c061e..05b19d4 100644
--- a/src/server.c
+++ b/src/server.c
@@ -5207,7 +5207,7 @@
 		if (toremove_nb != -1 && i >= toremove_nb)
 			break;
 		MT_LIST_DEL_SAFE_NOINIT(elt1);
-		MT_LIST_ADDQ_NOCHECK(toremove_list, &conn->list);
+		MT_LIST_ADDQ(toremove_list, &conn->list);
 		i++;
 	}
 	return i;
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 69f6835..54da3a0 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -5676,9 +5676,9 @@
 		struct server *srv = objt_server(conn->target);
 
 		if (conn_in_list == CO_FL_SAFE_LIST)
-			MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
+			MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
 		else
-			MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+			MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
 	}
 	return NULL;
 }
diff --git a/src/stream.c b/src/stream.c
index 287f406..f3253c6 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -728,7 +728,7 @@
 	if (b_alloc_margin(&s->res.buf, 0))
 		return 1;
 
-	MT_LIST_ADDQ(&buffer_wq, &s->buffer_wait.list);
+	MT_LIST_TRY_ADDQ(&buffer_wq, &s->buffer_wait.list);
 	return 0;
 }
 
diff --git a/src/task.c b/src/task.c
index 72f09de..102462e 100644
--- a/src/task.c
+++ b/src/task.c
@@ -97,7 +97,7 @@
 
 			/* Beware: tasks that have never run don't have their ->list empty yet! */
 			LIST_INIT(&((struct tasklet *)t)->list);
-			MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list,
+			MT_LIST_TRY_ADDQ(&task_per_thread[thr].shared_tasklet_list,
 			             (struct mt_list *)&((struct tasklet *)t)->list);
 			_HA_ATOMIC_ADD(&tasks_run_queue, 1);
 			_HA_ATOMIC_ADD(&task_per_thread[thr].task_list_size, 1);
@@ -582,7 +582,7 @@
 	 * 100% due to rounding, this is not a problem. Note that while in
 	 * theory the sum cannot be NULL as we cannot get there without tasklets
 	 * to process, in practice it seldom happens when multiple writers
-	 * conflict and rollback on MT_LIST_ADDQ(shared_tasklet_list), causing
+	 * conflict and rollback on MT_LIST_TRY_ADDQ(shared_tasklet_list), causing
 	 * a first MT_LIST_ISEMPTY() to succeed for thread_has_task() and the
 	 * one above to finally fail. This is extremely rare and not a problem.
 	 */
diff --git a/tests/test-list.c b/tests/test-list.c
index 5e54f60..e2e07f9 100644
--- a/tests/test-list.c
+++ b/tests/test-list.c
@@ -33,12 +33,12 @@
 		case 0:
 			lol = malloc(sizeof(*lol));
 			MT_LIST_INIT(&lol->list_elt);
-			MT_LIST_ADD(&pouet_list, &lol->list_elt);
+			MT_LIST_TRY_ADD(&pouet_list, &lol->list_elt);
 			break;
 		case 1:
 			lol = malloc(sizeof(*lol));
 			MT_LIST_INIT(&lol->list_elt);
-			MT_LIST_ADDQ(&pouet_list, &lol->list_elt);
+			MT_LIST_TRY_ADDQ(&pouet_list, &lol->list_elt);
 			break;
 
 		case 2: