MEDIUM: list: Separate "locked" list from regular list.

Instead of using the same type for regular linked lists and "autolocked"
linked lists, use a separate type, "struct mt_list", for the autolocked one,
and introduce a set of macros, similar to the LIST_* macros, with the
MT_ prefix.
When we use the same entry for both regular list and autolocked list, as
is done for the "list" field in struct connection, we know have to explicitely
cast it to struct mt_list when using MT_ macros.
diff --git a/include/common/mini-clist.h b/include/common/mini-clist.h
index 0cdddce..e530c43 100644
--- a/include/common/mini-clist.h
+++ b/include/common/mini-clist.h
@@ -34,6 +34,16 @@
     struct list *p;	/* prev */
 };
 
+/* This is similar to struct list, but we want to be sure the compiler will
+ * yell at you if you use macroes for one when you're using the other. You have
+ * to expicitely cast if that's really what you want to do.
+ */
+struct mt_list {
+    struct mt_list *next;
+    struct mt_list *prev;
+};
+
+
 /* a back-ref is a pointer to a target list entry. It is used to detect when an
  * element being deleted is currently being tracked by another user. The best
  * example is a user dumping the session table. The table does not fit in the
@@ -189,7 +199,7 @@
 	     item = back, back = LIST_ELEM(back->member.n, typeof(back), member))
 
 #include <common/hathreads.h>
-#define LLIST_BUSY ((struct list *)1)
+#define MT_LIST_BUSY ((struct mt_list *)1)
 
 /*
  * Locked version of list manipulation macros.
@@ -197,95 +207,95 @@
  * list is only used with the locked variants. The only "unlocked" macro you
  * can use with a locked list is LIST_INIT.
  */
-#define LIST_ADD_LOCKED(lh, el)                                            \
+#define MT_LIST_ADD(lh, el)                                                \
 	do {                                                               \
 		while (1) {                                                \
-			struct list *n;                                    \
-			struct list *p;                                    \
-			n = _HA_ATOMIC_XCHG(&(lh)->n, LLIST_BUSY);         \
-			if (n == LLIST_BUSY)                               \
+			struct mt_list *n;                                 \
+			struct mt_list *p;                                 \
+			n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY);      \
+			if (n == MT_LIST_BUSY)                               \
 			        continue;                                  \
-			p = _HA_ATOMIC_XCHG(&n->p, LLIST_BUSY);            \
-			if (p == LLIST_BUSY) {                             \
-				(lh)->n = n;                               \
+			p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);         \
+			if (p == MT_LIST_BUSY) {                             \
+				(lh)->next = n;                            \
 				__ha_barrier_store();                      \
 				continue;                                  \
 			}                                                  \
-			(el)->n = n;                                       \
-			(el)->p = p;                                       \
+			(el)->next = n;                                    \
+			(el)->prev = p;                                    \
 			__ha_barrier_store();                              \
-			n->p = (el);                                       \
+			n->prev = (el);                                    \
 			__ha_barrier_store();                              \
-			p->n = (el);                                       \
+			p->next = (el);                                    \
 			__ha_barrier_store();                              \
 			break;                                             \
 		}                                                          \
 	} while (0)
 
-#define LIST_ADDQ_LOCKED(lh, el)                                           \
+#define MT_LIST_ADDQ(lh, el)                                               \
 	do {                                                               \
 		while (1) {                                                \
-			struct list *n;                                    \
-			struct list *p;                                    \
-			p = _HA_ATOMIC_XCHG(&(lh)->p, LLIST_BUSY);         \
-			if (p == LLIST_BUSY)                               \
+			struct mt_list *n;                                 \
+			struct mt_list *p;                                 \
+			p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY);      \
+			if (p == MT_LIST_BUSY)                               \
 			        continue;                                  \
-			n = _HA_ATOMIC_XCHG(&p->n, LLIST_BUSY);            \
-			if (n == LLIST_BUSY) {                             \
-				(lh)->p = p;                               \
+			n = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);         \
+			if (n == MT_LIST_BUSY) {                             \
+				(lh)->prev = p;                            \
 				__ha_barrier_store();                      \
 				continue;                                  \
 			}                                                  \
-			(el)->n = n;                                       \
-			(el)->p = p;                                       \
+			(el)->next = n;                                    \
+			(el)->prev = p;                                    \
 			__ha_barrier_store();                              \
-			p->n = (el);                                       \
+			p->next = (el);                                    \
 			__ha_barrier_store();                              \
-			n->p = (el);                                       \
+			n->prev = (el);                                    \
 			__ha_barrier_store();                              \
 			break;                                             \
 		}                                                          \
 	} while (0)
 
-#define LIST_DEL_LOCKED(el)                                                \
+#define MT_LIST_DEL(el)                                                    \
 	do {                                                               \
 		while (1) {                                                \
-			struct list *n, *n2;                               \
-			struct list *p, *p2 = NULL;                        \
-			n = _HA_ATOMIC_XCHG(&(el)->n, LLIST_BUSY);         \
-			if (n == LLIST_BUSY)                               \
+			struct mt_list *n, *n2;                            \
+			struct mt_list *p, *p2 = NULL;                     \
+			n = _HA_ATOMIC_XCHG(&(el)->next, MT_LIST_BUSY);      \
+			if (n == MT_LIST_BUSY)                               \
 			        continue;                                  \
-			p = _HA_ATOMIC_XCHG(&(el)->p, LLIST_BUSY);         \
-			if (p == LLIST_BUSY) {                             \
-				(el)->n = n;                               \
+			p = _HA_ATOMIC_XCHG(&(el)->prev, MT_LIST_BUSY);      \
+			if (p == MT_LIST_BUSY) {                             \
+				(el)->next = n;                            \
 				__ha_barrier_store();                      \
 				continue;                                  \
 			}                                                  \
 			if (p != (el)) {                                   \
-			        p2 = _HA_ATOMIC_XCHG(&p->n, LLIST_BUSY);   \
-			        if (p2 == LLIST_BUSY) {                    \
-			                (el)->p = p;                       \
-					(el)->n = n;                       \
+			        p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);\
+			        if (p2 == MT_LIST_BUSY) {                    \
+			                (el)->prev = p;                    \
+					(el)->next = n;                    \
 					__ha_barrier_store();              \
 					continue;                          \
 				}                                          \
 			}                                                  \
 			if (n != (el)) {                                   \
-			        n2 = _HA_ATOMIC_XCHG(&n->p, LLIST_BUSY);   \
-				if (n2 == LLIST_BUSY) {                    \
+			        n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);\
+				if (n2 == MT_LIST_BUSY) {                    \
 					if (p2 != NULL)                    \
-						p->n = p2;                 \
-					(el)->p = p;                       \
-					(el)->n = n;                       \
+						p->next = p2;              \
+					(el)->prev = p;                    \
+					(el)->next = n;                    \
 					__ha_barrier_store();              \
 					continue;                          \
 				}                                          \
 			}                                                  \
-			n->p = p;                                          \
-			p->n = n;                                          \
+			n->prev = p;                                       \
+			p->next = n;                                       \
 			__ha_barrier_store();                              \
-			(el)->p = (el);                                    \
-			(el)->n = (el);	                                   \
+			(el)->prev = (el);                                 \
+			(el)->next = (el);                                 \
 			__ha_barrier_store();                              \
 			break;                                             \
 		}                                                          \
@@ -293,54 +303,89 @@
 
 
 /* Remove the first element from the list, and return it */
-#define LIST_POP_LOCKED(lh, pt, el)                                        \
+#define MT_LIST_POP(lh, pt, el)                                            \
 	({                                                                 \
 		 void *_ret;                                               \
 		 while (1) {                                               \
-			 struct list *n, *n2;                              \
-			 struct list *p, *p2;                              \
-			 n = _HA_ATOMIC_XCHG(&(lh)->n, LLIST_BUSY);        \
-			 if (n == LLIST_BUSY)                              \
+			 struct mt_list *n, *n2;                           \
+			 struct mt_list *p, *p2;                           \
+			 n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY);     \
+			 if (n == MT_LIST_BUSY)                              \
 			         continue;                                 \
 			 if (n == (lh)) {                                  \
-				 (lh)->n = lh;                             \
+				 (lh)->next = lh;                          \
 				 __ha_barrier_store();                     \
 				 _ret = NULL;                              \
 				 break;                                    \
 			 }                                                 \
-			 p = _HA_ATOMIC_XCHG(&n->p, LLIST_BUSY);           \
-			 if (p == LLIST_BUSY) {                            \
-				 (lh)->n = n;                              \
+			 p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);        \
+			 if (p == MT_LIST_BUSY) {                            \
+				 (lh)->next = n;                           \
 				 __ha_barrier_store();                     \
 				 continue;                                 \
 			 }                                                 \
-			 n2 = _HA_ATOMIC_XCHG(&n->n, LLIST_BUSY);          \
-			 if (n2 == LLIST_BUSY) {                           \
-				 n->p = p;                                 \
+			 n2 = _HA_ATOMIC_XCHG(&n->next, MT_LIST_BUSY);       \
+			 if (n2 == MT_LIST_BUSY) {                           \
+				 n->prev = p;                              \
 				 __ha_barrier_store();                     \
-				 (lh)->n = n;                              \
+				 (lh)->next = n;                           \
 				 __ha_barrier_store();                     \
 				 continue;                                 \
 			 }                                                 \
-			 p2 = _HA_ATOMIC_XCHG(&n2->p, LLIST_BUSY);         \
-			 if (p2 == LLIST_BUSY) {                           \
-				 n->n = n2;                                \
-				 n->p = p;                                 \
+			 p2 = _HA_ATOMIC_XCHG(&n2->prev, MT_LIST_BUSY);      \
+			 if (p2 == MT_LIST_BUSY) {                           \
+				 n->next = n2;                             \
+				 n->prev = p;                              \
 				 __ha_barrier_store();                     \
-				 (lh)->n = n;                              \
+				 (lh)->next = n;                           \
 				 __ha_barrier_store();                     \
 				 continue;                                 \
 			 }                                                 \
-			 (lh)->n = n2;                                     \
-			 (n2)->p = (lh);                                   \
+			 (lh)->next = n2;                                  \
+			 (n2)->prev = (lh);                                \
 			 __ha_barrier_store();                             \
-			 (n)->p = (n);                                     \
-			 (n)->n = (n);	                                   \
+			 (n)->prev = (n);                                  \
+			 (n)->next = (n);	                           \
 			 __ha_barrier_store();                             \
-			 _ret = LIST_ELEM(n, pt, el);                      \
+			 _ret = MT_LIST_ELEM(n, pt, el);                   \
 			 break;                                            \
 		 }                                                         \
 		 (_ret);                                                   \
 	 })
 
+#define MT_LIST_HEAD(a)	((void *)(&(a)))
+
+#define MT_LIST_INIT(l) ((l)->next = (l)->prev = (l))
+
+#define MT_LIST_HEAD_INIT(l) { &l, &l }
+/* returns a pointer of type <pt> to a structure containing a list head called
+ * <el> at address <lh>. Note that <lh> can be the result of a function or macro
+ * since it's used only once.
+ * Example: MT_LIST_ELEM(cur_node->args.next, struct node *, args)
+ */
+#define MT_LIST_ELEM(lh, pt, el) ((pt)(((void *)(lh)) - ((void *)&((pt)NULL)->el)))
+
+/* checks if the list head <lh> is empty or not */
+#define MT_LIST_ISEMPTY(lh) ((lh)->next == (lh))
+
+/* returns a pointer of type <pt> to a structure following the element
+ * which contains list head <lh>, which is known as element <el> in
+ * struct pt.
+ * Example: MT_LIST_NEXT(args, struct node *, list)
+ */
+#define MT_LIST_NEXT(lh, pt, el) (MT_LIST_ELEM((lh)->next, pt, el))
+
+
+/* returns a pointer of type <pt> to a structure preceding the element
+ * which contains list head <lh>, which is known as element <el> in
+ * struct pt.
+ */
+#undef MT_LIST_PREV
+#define MT_LIST_PREV(lh, pt, el) (MT_LIST_ELEM((lh)->prev, pt, el))
+
+/* checks if the list element <el> was added to a list or not. This only
+ * works when detached elements are reinitialized (using LIST_DEL_INIT)
+ */
+#define MT_LIST_ADDED(el) ((el)->next != (el))
+
 #endif /* _COMMON_MINI_CLIST_H */
diff --git a/include/proto/connection.h b/include/proto/connection.h
index b1d8638..a7647bd 100644
--- a/include/proto/connection.h
+++ b/include/proto/connection.h
@@ -625,7 +625,7 @@
 
 	conn_force_unsubscribe(conn);
 	HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
-	LIST_DEL_LOCKED(&conn->list);
+	MT_LIST_DEL((struct mt_list *)&conn->list);
 	HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
 	pool_free(pool_head_connection, conn);
 }
diff --git a/include/proto/listener.h b/include/proto/listener.h
index 24a01b2..b245d6e 100644
--- a/include/proto/listener.h
+++ b/include/proto/listener.h
@@ -58,7 +58,7 @@
 int disable_all_listeners(struct protocol *proto);
 
 /* Dequeues all of the listeners waiting for a resource in wait queue <queue>. */
-void dequeue_all_listeners(struct list *list);
+void dequeue_all_listeners(struct mt_list *list);
 
 /* Must be called with the lock held. Depending on <do_close> value, it does
  * what unbind_listener or unbind_listener_no_close should do.
diff --git a/include/proto/server.h b/include/proto/server.h
index f52b38e..5a2fcc9 100644
--- a/include/proto/server.h
+++ b/include/proto/server.h
@@ -41,7 +41,7 @@
 extern struct eb_root idle_conn_srv;
 extern struct task *idle_conn_task;
 extern struct task *idle_conn_cleanup[MAX_THREADS];
-extern struct list toremove_connections[MAX_THREADS];
+extern struct mt_list toremove_connections[MAX_THREADS];
 
 int srv_downtime(const struct server *s);
 int srv_lastsession(const struct server *s);
@@ -262,7 +262,7 @@
 			return 0;
 		}
 		LIST_DEL(&conn->list);
-		LIST_ADDQ_LOCKED(&srv->idle_orphan_conns[tid], &conn->list);
+		MT_LIST_ADDQ(&srv->idle_orphan_conns[tid], (struct mt_list *)&conn->list);
 		srv->curr_idle_thr[tid]++;
 
 		conn->idle_time = now_ms;
diff --git a/include/proto/task.h b/include/proto/task.h
index 4044bd4..f213f47 100644
--- a/include/proto/task.h
+++ b/include/proto/task.h
@@ -542,9 +542,9 @@
 }
 
 /* adds list item <item> to work list <work> and wake up the associated task */
-static inline void work_list_add(struct work_list *work, struct list *item)
+static inline void work_list_add(struct work_list *work, struct mt_list *item)
 {
-	LIST_ADDQ_LOCKED(&work->head, item);
+	MT_LIST_ADDQ(&work->head, item);
 	task_wakeup(work->task, TASK_WOKEN_OTHER);
 }
 
diff --git a/include/types/global.h b/include/types/global.h
index bd08db1..e9cbf3f 100644
--- a/include/types/global.h
+++ b/include/types/global.h
@@ -240,7 +240,7 @@
 extern int killed;	/* >0 means a hard-stop is triggered, >1 means hard-stop immediately */
 extern char hostname[MAX_HOSTNAME_LEN];
 extern char localpeer[MAX_HOSTNAME_LEN];
-extern struct list global_listener_queue; /* list of the temporarily limited listeners */
+extern struct mt_list global_listener_queue; /* list of the temporarily limited listeners */
 extern struct task *global_listener_queue_task;
 extern unsigned int warned;     /* bitfield of a few warnings to emit just once */
 extern volatile unsigned long sleeping_thread_mask;
diff --git a/include/types/listener.h b/include/types/listener.h
index def48b0..e60d91c 100644
--- a/include/types/listener.h
+++ b/include/types/listener.h
@@ -202,7 +202,7 @@
 	int (*accept)(struct listener *l, int fd, struct sockaddr_storage *addr); /* upper layer's accept() */
 	enum obj_type *default_target;  /* default target to use for accepted sessions or NULL */
 	/* cache line boundary */
-	struct list wait_queue;		/* link element to make the listener wait for something (LI_LIMITED)  */
+	struct mt_list wait_queue;	/* link element to make the listener wait for something (LI_LIMITED)  */
 	unsigned int thr_idx;           /* thread indexes for queue distribution : (t2<<16)+t1 */
 	unsigned int analysers;		/* bitmap of required protocol analysers */
 	int maxseg;			/* for TCP, advertised MSS */
diff --git a/include/types/proxy.h b/include/types/proxy.h
index 87b85ad..6ea96b3 100644
--- a/include/types/proxy.h
+++ b/include/types/proxy.h
@@ -415,7 +415,7 @@
 	struct be_counters be_counters;		/* backend statistics counters */
 	struct fe_counters fe_counters;		/* frontend statistics counters */
 
-	struct list listener_queue;		/* list of the temporarily limited listeners because of lack of a proxy resource */
+	struct mt_list listener_queue;		/* list of the temporarily limited listeners because of lack of a proxy resource */
 	struct stktable *table;			/* table for storing sticking streams */
 
 	struct task *task;			/* the associated task, mandatory to manage rate limiting, stopping and resource shortage, NULL if disabled */
diff --git a/include/types/server.h b/include/types/server.h
index a71f806..842e033 100644
--- a/include/types/server.h
+++ b/include/types/server.h
@@ -223,7 +223,7 @@
 	struct list *priv_conns;		/* private idle connections attached to stream interfaces */
 	struct list *idle_conns;		/* sharable idle connections attached or not to a stream interface */
 	struct list *safe_conns;		/* safe idle connections attached to stream interfaces, shared */
-	struct list *idle_orphan_conns;         /* Orphan connections idling */
+	struct mt_list *idle_orphan_conns;         /* Orphan connections idling */
 	unsigned int pool_purge_delay;          /* Delay before starting to purge the idle conns pool */
 	unsigned int max_idle_conns;            /* Max number of connection allowed in the orphan connections list */
 	unsigned int curr_idle_conns;           /* Current number of orphan idling connections */
diff --git a/include/types/task.h b/include/types/task.h
index 3421cd3..481d563 100644
--- a/include/types/task.h
+++ b/include/types/task.h
@@ -118,7 +118,7 @@
  * TASK_WOKEN_OTHER and a context pointing to the work_list entry.
  */
 struct work_list {
-	struct list head;
+	struct mt_list head;
 	struct task *task;
 	void *arg;
 };