MINOR: dynbuf: pass offer_buffers() the number of buffers instead of a threshold

Historically this function would try to wake the most accurate number of
process_stream() waiters. But since the introduction of filters which could
also require buffers (e.g. for compression), things started not to be as
accurate anymore. Nowadays muxes and transport layers also use buffers, so
the runqueue size has nothing to do anymore with the number of supposed
users to come.

In addition to this, the threshold was compared to the number of free buffer
calculated as allocated minus used, but this didn't work anymore with local
pools since these counts are not updated upon alloc/free!

Let's clean this up and pass the number of released buffers instead, and
consider that each waiter successfully called counts as one buffer. This
is not rocket science and will not suddenly fix everything, but at least
it cannot be as wrong as it is today.

This could have been marked as a bug given that the current situation is
totally broken regarding this, but this probably doesn't completely fix
it, it only goes in a better direction. It is possible however that it
makes sense in the future to backport this as part of a larger series if
the situation significantly improves.
diff --git a/src/check.c b/src/check.c
index 6451dcf..5b65354 100644
--- a/src/check.c
+++ b/src/check.c
@@ -1032,7 +1032,7 @@
 {
 	if (bptr->size) {
 		b_free(bptr);
-		offer_buffers(check->buf_wait.target, tasks_run_queue);
+		offer_buffers(check->buf_wait.target, 1);
 	}
 }
 
diff --git a/src/dynbuf.c b/src/dynbuf.c
index a6d1d40..8b492e2 100644
--- a/src/dynbuf.c
+++ b/src/dynbuf.c
@@ -96,32 +96,26 @@
 	fflush(o);
 }
 
-/* see offer_buffer() for details */
-void __offer_buffer(void *from, unsigned int threshold)
+/* see offer_buffers() for details */
+void __offer_buffers(void *from, unsigned int count)
 {
 	struct buffer_wait *wait, *wait_back;
-	int avail;
 
 	/* For now, we consider that all objects need 1 buffer, so we can stop
 	 * waking up them once we have enough of them to eat all the available
 	 * buffers. Note that we don't really know if they are streams or just
 	 * other tasks, but that's a rough estimate. Similarly, for each cached
-	 * event we'll need 1 buffer. If no buffer is currently used, always
-	 * wake up the number of tasks we can offer a buffer based on what is
-	 * allocated, and in any case at least one task per two reserved
-	 * buffers.
+	 * event we'll need 1 buffer.
 	 */
-	avail = pool_head_buffer->allocated - pool_head_buffer->used - global.tune.reserved_bufs / 2;
-
 	list_for_each_entry_safe(wait, wait_back, &ti->buffer_wq, list) {
-		if (avail <= threshold)
+		if (!count)
 			break;
 
 		if (wait->target == from || !wait->wakeup_cb(wait->target))
 			continue;
 
 		LIST_DEL_INIT(&wait->list);
-		avail--;
+		count--;
 	}
 }
 
diff --git a/src/flt_spoe.c b/src/flt_spoe.c
index eee77cb..92a5af4 100644
--- a/src/flt_spoe.c
+++ b/src/flt_spoe.c
@@ -2841,7 +2841,7 @@
 	/* Release the buffer if needed */
 	if (buf->size) {
 		b_free(buf);
-		offer_buffers(buffer_wait->target, tasks_run_queue);
+		offer_buffers(buffer_wait->target, 1);
 	}
 }
 
diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
index 464a075..494517b 100644
--- a/src/mux_fcgi.c
+++ b/src/mux_fcgi.c
@@ -617,7 +617,7 @@
 {
 	if (bptr->size) {
 		b_free(bptr);
-		offer_buffers(NULL, tasks_run_queue);
+		offer_buffers(NULL, 1);
 	}
 }
 
@@ -631,7 +631,7 @@
 		count++;
 	}
 	if (count)
-		offer_buffers(NULL, tasks_run_queue);
+		offer_buffers(NULL, count);
 }
 
 /* Returns the number of allocatable outgoing streams for the connection taking
@@ -1027,7 +1027,7 @@
 	eb32_delete(&fstrm->by_id);
 	if (b_size(&fstrm->rxbuf)) {
 		b_free(&fstrm->rxbuf);
-		offer_buffers(NULL, tasks_run_queue);
+		offer_buffers(NULL, 1);
 	}
 	if (fstrm->subs)
 		fstrm->subs->events = 0;
@@ -2910,7 +2910,7 @@
 		}
 
 		if (released)
-			offer_buffers(NULL, tasks_run_queue);
+			offer_buffers(NULL, released);
 
 		/* wrote at least one byte, the buffer is not full anymore */
 		if (fconn->flags & (FCGI_CF_MUX_MFULL | FCGI_CF_DEM_MROOM))
@@ -3224,7 +3224,7 @@
 		}
 
 		if (released)
-			offer_buffers(NULL, tasks_run_queue);
+			offer_buffers(NULL, released);
 	}
 
   end:
diff --git a/src/mux_h1.c b/src/mux_h1.c
index 8e17d72..1bfdab6 100644
--- a/src/mux_h1.c
+++ b/src/mux_h1.c
@@ -465,7 +465,7 @@
 {
 	if (bptr->size) {
 		b_free(bptr);
-		offer_buffers(h1c->buf_wait.target, tasks_run_queue);
+		offer_buffers(h1c->buf_wait.target, 1);
 	}
 }
 
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 30bedc3..09b4268 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -819,7 +819,7 @@
 {
 	if (bptr->size) {
 		b_free(bptr);
-		offer_buffers(NULL, tasks_run_queue);
+		offer_buffers(NULL, 1);
 	}
 }
 
@@ -833,7 +833,7 @@
 		count++;
 	}
 	if (count)
-		offer_buffers(NULL, tasks_run_queue);
+		offer_buffers(NULL, count);
 }
 
 /* returns the number of allocatable outgoing streams for the connection taking
@@ -1393,7 +1393,7 @@
 	eb32_delete(&h2s->by_id);
 	if (b_size(&h2s->rxbuf)) {
 		b_free(&h2s->rxbuf);
-		offer_buffers(NULL, tasks_run_queue);
+		offer_buffers(NULL, 1);
 	}
 
 	if (h2s->subs)
@@ -3740,7 +3740,7 @@
 		}
 
 		if (released)
-			offer_buffers(NULL, tasks_run_queue);
+			offer_buffers(NULL, released);
 
 		/* wrote at least one byte, the buffer is not full anymore */
 		if (sent)
@@ -4049,7 +4049,7 @@
 		}
 
 		if (released)
-			offer_buffers(NULL, tasks_run_queue);
+			offer_buffers(NULL, released);
 	}
 
 	/* in any case this connection must not be considered idle anymore */
@@ -6242,7 +6242,7 @@
 			cs->flags |= CS_FL_ERROR;
 		if (b_size(&h2s->rxbuf)) {
 			b_free(&h2s->rxbuf);
-			offer_buffers(NULL, tasks_run_queue);
+			offer_buffers(NULL, 1);
 		}
 	}
 
diff --git a/src/stream.c b/src/stream.c
index 29d54c7..6d534cd 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -638,9 +638,11 @@
 		LIST_DEL_INIT(&s->buffer_wait.list);
 
 	if (s->req.buf.size || s->res.buf.size) {
+		int count = !!s->req.buf.size + !!s->res.buf.size;
+
 		b_free(&s->req.buf);
 		b_free(&s->res.buf);
-		offer_buffers(NULL, tasks_run_queue);
+		offer_buffers(NULL, count);
 	}
 
 	pool_free(pool_head_uniqueid, s->unique_id.ptr);
@@ -788,11 +790,11 @@
 	int offer = 0;
 
 	if (c_size(&s->req) && c_empty(&s->req)) {
-		offer = 1;
+		offer++;
 		b_free(&s->req.buf);
 	}
 	if (c_size(&s->res) && c_empty(&s->res)) {
-		offer = 1;
+		offer++;
 		b_free(&s->res.buf);
 	}
 
@@ -800,7 +802,7 @@
 	 * someone waiting, we can wake up a waiter and offer them.
 	 */
 	if (offer)
-		offer_buffers(s, tasks_run_queue);
+		offer_buffers(s, offer);
 }
 
 void stream_process_counters(struct stream *s)