MINOR: dynbuf: make the buffer wait queue per thread

The buffer wait queue used to be global historically but this doest not
make any sense anymore given that the most common use case is to have
thread-local pools. Thus there's no point waking up waiters of other
threads after releasing an entry, as they won't benefit from it.

Let's move the queue head to the thread_info structure and use
ti->buffer_wq from now on.
diff --git a/src/check.c b/src/check.c
index a09c67a..ff4effa 100644
--- a/src/check.c
+++ b/src/check.c
@@ -1019,7 +1019,7 @@
 	    unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
 		check->buf_wait.target = check;
 		check->buf_wait.wakeup_cb = check_buf_available;
-		MT_LIST_ADDQ(&buffer_wq, &check->buf_wait.list);
+		MT_LIST_ADDQ(&ti->buffer_wq, &check->buf_wait.list);
 	}
 	return buf;
 }
diff --git a/src/dynbuf.c b/src/dynbuf.c
index ad2cde9..395fa8a 100644
--- a/src/dynbuf.c
+++ b/src/dynbuf.c
@@ -22,18 +22,20 @@
 
 struct pool_head *pool_head_buffer;
 
-/* list of objects waiting for at least one buffer */
-struct mt_list buffer_wq = LIST_HEAD_INIT(buffer_wq);
-
 /* perform minimal intializations, report 0 in case of error, 1 if OK. */
 int init_buffer()
 {
 	void *buffer;
+	int thr;
 
 	pool_head_buffer = create_pool("buffer", global.tune.bufsize, MEM_F_SHARED|MEM_F_EXACT);
 	if (!pool_head_buffer)
 		return 0;
 
+	for (thr = 0; thr < MAX_THREADS; thr++)
+		MT_LIST_INIT(&ha_thread_info[thr].buffer_wq);
+
+
 	/* The reserved buffer is what we leave behind us. Thus we always need
 	 * at least one extra buffer in minavail otherwise we'll end up waking
 	 * up tasks with no memory available, causing a lot of useless wakeups.
@@ -112,7 +114,7 @@
 	 */
 	avail = pool_head_buffer->allocated - pool_head_buffer->used - global.tune.reserved_bufs / 2;
 
-	mt_list_for_each_entry_safe(wait, &buffer_wq, list, elt1, elt2) {
+	mt_list_for_each_entry_safe(wait, &ti->buffer_wq, list, elt1, elt2) {
 		if (avail <= threshold)
 			break;
 
diff --git a/src/flt_spoe.c b/src/flt_spoe.c
index b9471d9..27a15b6 100644
--- a/src/flt_spoe.c
+++ b/src/flt_spoe.c
@@ -2828,7 +2828,7 @@
 	if (b_alloc_margin(buf, global.tune.reserved_bufs))
 		return 1;
 
-	MT_LIST_ADDQ(&buffer_wq, &buffer_wait->list);
+	MT_LIST_ADDQ(&ti->buffer_wq, &buffer_wait->list);
 	return 0;
 }
 
diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
index a2c1c86..2ff7aa9 100644
--- a/src/mux_fcgi.c
+++ b/src/mux_fcgi.c
@@ -608,7 +608,7 @@
 	    unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
 		fconn->buf_wait.target = fconn;
 		fconn->buf_wait.wakeup_cb = fcgi_buf_available;
-		MT_LIST_ADDQ(&buffer_wq, &fconn->buf_wait.list);
+		MT_LIST_ADDQ(&ti->buffer_wq, &fconn->buf_wait.list);
 	}
 	return buf;
 }
diff --git a/src/mux_h1.c b/src/mux_h1.c
index 6333935..077f4ea 100644
--- a/src/mux_h1.c
+++ b/src/mux_h1.c
@@ -452,7 +452,7 @@
 	    unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
 		h1c->buf_wait.target = h1c;
 		h1c->buf_wait.wakeup_cb = h1_buf_available;
-		MT_LIST_ADDQ(&buffer_wq, &h1c->buf_wait.list);
+		MT_LIST_ADDQ(&ti->buffer_wq, &h1c->buf_wait.list);
 	}
 	return buf;
 }
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 21c14b8..cfa5f8c 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -810,7 +810,7 @@
 	    unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
 		h2c->buf_wait.target = h2c;
 		h2c->buf_wait.wakeup_cb = h2_buf_available;
-		MT_LIST_ADDQ(&buffer_wq, &h2c->buf_wait.list);
+		MT_LIST_ADDQ(&ti->buffer_wq, &h2c->buf_wait.list);
 	}
 	return buf;
 }
diff --git a/src/stream.c b/src/stream.c
index 9394ac0..d747211 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -773,7 +773,7 @@
 	if (b_alloc_margin(&s->res.buf, 0))
 		return 1;
 
-	MT_LIST_ADDQ(&buffer_wq, &s->buffer_wait.list);
+	MT_LIST_ADDQ(&ti->buffer_wq, &s->buffer_wait.list);
 	return 0;
 }