REORG: thread/sched: move the last dynamic thread_info to thread_ctx

The last 3 fields were 3 list heads that are per-thread, and which are:
  - the pool's LRU head
  - the buffer_wq
  - the streams list head

Moving them into thread_ctx completes the removal of dynamic elements
from the struct thread_info. Now all these dynamic elements are packed
together at a single place for a thread.
diff --git a/include/haproxy/channel.h b/include/haproxy/channel.h
index 0f6e9e0..abc1de1 100644
--- a/include/haproxy/channel.h
+++ b/include/haproxy/channel.h
@@ -846,7 +846,7 @@
 		return 1;
 
 	if (!LIST_INLIST(&wait->list))
-		LIST_APPEND(&ti->buffer_wq, &wait->list);
+		LIST_APPEND(&th_ctx->buffer_wq, &wait->list);
 
 	return 0;
 }
diff --git a/include/haproxy/dynbuf.h b/include/haproxy/dynbuf.h
index c38b9c7..5e8ece2 100644
--- a/include/haproxy/dynbuf.h
+++ b/include/haproxy/dynbuf.h
@@ -112,7 +112,7 @@
 
 static inline void offer_buffers(void *from, unsigned int count)
 {
-	if (!LIST_ISEMPTY(&ti->buffer_wq))
+	if (!LIST_ISEMPTY(&th_ctx->buffer_wq))
 		__offer_buffers(from, count);
 }
 
diff --git a/include/haproxy/tinfo-t.h b/include/haproxy/tinfo-t.h
index dc6268e..737431f 100644
--- a/include/haproxy/tinfo-t.h
+++ b/include/haproxy/tinfo-t.h
@@ -42,12 +42,6 @@
  * disabled, it contains the same info for the single running thread.
  */
 struct thread_info {
-#ifdef CONFIG_HAP_POOLS
-	struct list pool_lru_head;                         /* oldest objects   */
-#endif
-	struct list buffer_wq;     /* buffer waiters */
-	struct list streams;       /* list of streams attached to this thread */
-
 	/* pad to cache line (64B) */
 	char __pad[0];            /* unused except to check remaining room */
 	char __end[0] __attribute__((aligned(64)));
@@ -70,6 +64,12 @@
 	uint8_t tl_class_mask;              /* bit mask of non-empty tasklets classes */
 
 	// 7 bytes hole here
+#ifdef CONFIG_HAP_POOLS
+	struct list pool_lru_head;          /* oldest objects   */
+#endif
+	struct list buffer_wq;              /* buffer waiters */
+	struct list streams;                /* list of streams attached to this thread */
+
 	ALWAYS_ALIGN(2*sizeof(void*));
 	struct list tasklets[TL_CLASSES];   /* tasklets (and/or tasks) to run, by class */
 
diff --git a/src/check.c b/src/check.c
index 9ac66a5..ad6df9c 100644
--- a/src/check.c
+++ b/src/check.c
@@ -1296,7 +1296,7 @@
 	    unlikely((buf = b_alloc(bptr)) == NULL)) {
 		check->buf_wait.target = check;
 		check->buf_wait.wakeup_cb = check_buf_available;
-		LIST_APPEND(&ti->buffer_wq, &check->buf_wait.list);
+		LIST_APPEND(&th_ctx->buffer_wq, &check->buf_wait.list);
 	}
 	return buf;
 }
diff --git a/src/dynbuf.c b/src/dynbuf.c
index a200237..0b12c75 100644
--- a/src/dynbuf.c
+++ b/src/dynbuf.c
@@ -34,7 +34,7 @@
 		return 0;
 
 	for (thr = 0; thr < MAX_THREADS; thr++)
-		LIST_INIT(&ha_thread_info[thr].buffer_wq);
+		LIST_INIT(&ha_thread_ctx[thr].buffer_wq);
 
 
 	/* The reserved buffer is what we leave behind us. Thus we always need
@@ -109,7 +109,7 @@
 	 * other tasks, but that's a rough estimate. Similarly, for each cached
 	 * event we'll need 1 buffer.
 	 */
-	list_for_each_entry_safe(wait, wait_back, &ti->buffer_wq, list) {
+	list_for_each_entry_safe(wait, wait_back, &th_ctx->buffer_wq, list) {
 		if (!count)
 			break;
 
diff --git a/src/flt_spoe.c b/src/flt_spoe.c
index 3262fd0..3f17bcb 100644
--- a/src/flt_spoe.c
+++ b/src/flt_spoe.c
@@ -2867,7 +2867,7 @@
 	if (b_alloc(buf))
 		return 1;
 
-	LIST_APPEND(&ti->buffer_wq, &buffer_wait->list);
+	LIST_APPEND(&th_ctx->buffer_wq, &buffer_wait->list);
 	return 0;
 }
 
diff --git a/src/h3.c b/src/h3.c
index 4540680..4acd95a 100644
--- a/src/h3.c
+++ b/src/h3.c
@@ -99,7 +99,7 @@
 	    unlikely((buf = b_alloc(&h3_uqs->qcs->tx.buf)) == NULL)) {
 		h3->buf_wait.target = h3_uqs;
 		h3->buf_wait.wakeup_cb = qcs_buf_available;
-		LIST_APPEND(&ti->buffer_wq, &h3->buf_wait.list);
+		LIST_APPEND(&th_ctx->buffer_wq, &h3->buf_wait.list);
 	}
 
 	return buf;
diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
index 3c03021..5fa7502 100644
--- a/src/mux_fcgi.c
+++ b/src/mux_fcgi.c
@@ -612,7 +612,7 @@
 	    unlikely((buf = b_alloc(bptr)) == NULL)) {
 		fconn->buf_wait.target = fconn;
 		fconn->buf_wait.wakeup_cb = fcgi_buf_available;
-		LIST_APPEND(&ti->buffer_wq, &fconn->buf_wait.list);
+		LIST_APPEND(&th_ctx->buffer_wq, &fconn->buf_wait.list);
 	}
 	return buf;
 }
diff --git a/src/mux_h1.c b/src/mux_h1.c
index 3e2e094..814b3fb 100644
--- a/src/mux_h1.c
+++ b/src/mux_h1.c
@@ -449,7 +449,7 @@
 	    unlikely((buf = b_alloc(bptr)) == NULL)) {
 		h1c->buf_wait.target = h1c;
 		h1c->buf_wait.wakeup_cb = h1_buf_available;
-		LIST_APPEND(&ti->buffer_wq, &h1c->buf_wait.list);
+		LIST_APPEND(&th_ctx->buffer_wq, &h1c->buf_wait.list);
 	}
 	return buf;
 }
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 150a559..f32766d 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -817,7 +817,7 @@
 	    unlikely((buf = b_alloc(bptr)) == NULL)) {
 		h2c->buf_wait.target = h2c;
 		h2c->buf_wait.wakeup_cb = h2_buf_available;
-		LIST_APPEND(&ti->buffer_wq, &h2c->buf_wait.list);
+		LIST_APPEND(&th_ctx->buffer_wq, &h2c->buf_wait.list);
 	}
 	return buf;
 }
diff --git a/src/mux_quic.c b/src/mux_quic.c
index c89490f..cb40e0b 100644
--- a/src/mux_quic.c
+++ b/src/mux_quic.c
@@ -442,7 +442,7 @@
 	    unlikely((buf = b_alloc(bptr)) == NULL)) {
 		qcc->buf_wait.target = qcc;
 		qcc->buf_wait.wakeup_cb = qc_buf_available;
-		LIST_APPEND(&ti->buffer_wq, &qcc->buf_wait.list);
+		LIST_APPEND(&th_ctx->buffer_wq, &qcc->buf_wait.list);
 	}
 
 	return buf;
diff --git a/src/pool.c b/src/pool.c
index cb0179b..eb1484b 100644
--- a/src/pool.c
+++ b/src/pool.c
@@ -289,7 +289,7 @@
 	struct pool_head *pool;
 
 	do {
-		item = LIST_PREV(&ti->pool_lru_head, struct pool_cache_item *, by_lru);
+		item = LIST_PREV(&th_ctx->pool_lru_head, struct pool_cache_item *, by_lru);
 		/* note: by definition we remove oldest objects so they also are the
 		 * oldest in their own pools, thus their next is the pool's head.
 		 */
@@ -315,7 +315,7 @@
 	struct pool_cache_head *ph = &pool->cache[tid];
 
 	LIST_INSERT(&ph->list, &item->by_pool);
-	LIST_INSERT(&ti->pool_lru_head, &item->by_lru);
+	LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
 	ph->count++;
 	pool_cache_count++;
 	pool_cache_bytes += pool->size;
@@ -640,7 +640,7 @@
 	int thr;
 
 	for (thr = 0; thr < MAX_THREADS; thr++) {
-		LIST_INIT(&ha_thread_info[thr].pool_lru_head);
+		LIST_INIT(&ha_thread_ctx[thr].pool_lru_head);
 	}
 #endif
 	detect_allocator();
diff --git a/src/proxy.c b/src/proxy.c
index db876e6..6868eff 100644
--- a/src/proxy.c
+++ b/src/proxy.c
@@ -2017,7 +2017,7 @@
 	thread_isolate();
 
 	for (thr = 0; thr < global.nbthread; thr++) {
-		list_for_each_entry(s, &ha_thread_info[thr].streams, list) {
+		list_for_each_entry(s, &ha_thread_ctx[thr].streams, list) {
 			stream_shutdown(s, SF_ERR_KILLED);
 		}
 	}
diff --git a/src/stream.c b/src/stream.c
index e4d5ac9..18d4f12 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -548,7 +548,7 @@
 
 	s->tunnel_timeout = TICK_ETERNITY;
 
-	LIST_APPEND(&ti->streams, &s->list);
+	LIST_APPEND(&th_ctx->streams, &s->list);
 
 	if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
 		goto out_fail_accept;
@@ -720,7 +720,7 @@
 		 * only touch their node under thread isolation.
 		 */
 		LIST_DEL_INIT(&bref->users);
-		if (s->list.n != &ti->streams)
+		if (s->list.n != &th_ctx->streams)
 			LIST_APPEND(&LIST_ELEM(s->list.n, struct stream *, list)->back_refs, &bref->users);
 		bref->ref = s->list.n;
 		__ha_barrier_store();
@@ -778,7 +778,7 @@
 	if (b_alloc(&s->res.buf))
 		return 1;
 
-	LIST_APPEND(&ti->buffer_wq, &s->buffer_wait.list);
+	LIST_APPEND(&th_ctx->buffer_wq, &s->buffer_wait.list);
 	return 0;
 }
 
@@ -2818,7 +2818,7 @@
 	int thr;
 
 	for (thr = 0; thr < MAX_THREADS; thr++)
-		LIST_INIT(&ha_thread_info[thr].streams);
+		LIST_INIT(&ha_thread_ctx[thr].streams);
 }
 INITCALL0(STG_INIT, init_stream);
 
@@ -3495,7 +3495,7 @@
 		 * pointer points back to the head of the streams list.
 		 */
 		LIST_INIT(&appctx->ctx.sess.bref.users);
-		appctx->ctx.sess.bref.ref = ha_thread_info[appctx->ctx.sess.thr].streams.n;
+		appctx->ctx.sess.bref.ref = ha_thread_ctx[appctx->ctx.sess.thr].streams.n;
 		appctx->st2 = STAT_ST_LIST;
 		/* fall through */
 
@@ -3512,7 +3512,7 @@
 			struct stream *curr_strm;
 			int done= 0;
 
-			if (appctx->ctx.sess.bref.ref == &ha_thread_info[appctx->ctx.sess.thr].streams)
+			if (appctx->ctx.sess.bref.ref == &ha_thread_ctx[appctx->ctx.sess.thr].streams)
 				done = 1;
 			else {
 				/* check if we've found a stream created after issuing the "show sess" */
@@ -3525,7 +3525,7 @@
 				appctx->ctx.sess.thr++;
 				if (appctx->ctx.sess.thr >= global.nbthread)
 					break;
-				appctx->ctx.sess.bref.ref = ha_thread_info[appctx->ctx.sess.thr].streams.n;
+				appctx->ctx.sess.bref.ref = ha_thread_ctx[appctx->ctx.sess.thr].streams.n;
 				continue;
 			}
 
@@ -3732,7 +3732,7 @@
 
 	/* first, look for the requested stream in the stream table */
 	for (thr = 0; !strm && thr < global.nbthread; thr++) {
-		list_for_each_entry(strm, &ha_thread_info[thr].streams, list) {
+		list_for_each_entry(strm, &ha_thread_ctx[thr].streams, list) {
 			if (strm == ptr) {
 				stream_shutdown(strm, SF_ERR_KILLED);
 				break;