MEDIUM: dynbuf: remove last usages of b_alloc_margin()

The function's purpose used to be to fail a buffer allocation if that
allocation wouldn't result in leaving some buffers available. Thus,
some allocations could succeed and others fail for the sole purpose of
trying to provide 2 buffers at once to process_stream(). But things
have changed a lot with 1.7 breaking the promise that process_stream()
would always succeed with only two buffers, and later the thread-local
pool caches that keep certain buffers available that are not accounted
for in the global pool so that local allocators cannot guess anything
from the number of currently available pools.

Let's just replace all last uses of b_alloc_margin() with b_alloc() once
for all.
diff --git a/src/applet.c b/src/applet.c
index eafce3d..a6d7f43 100644
--- a/src/applet.c
+++ b/src/applet.c
@@ -48,7 +48,7 @@
 		return 0;
 
 	/* allocation possible now ? */
-	if (!b_alloc_margin(&si_ic(si)->buf, global.tune.reserved_bufs)) {
+	if (!b_alloc(&si_ic(si)->buf)) {
 		si_rx_buff_blk(si);
 		return 0;
 	}
diff --git a/src/check.c b/src/check.c
index c32e940..96276c1 100644
--- a/src/check.c
+++ b/src/check.c
@@ -994,12 +994,12 @@
 {
 	struct check *check = target;
 
-	if ((check->state & CHK_ST_IN_ALLOC) && b_alloc_margin(&check->bi, 0)) {
+	if ((check->state & CHK_ST_IN_ALLOC) && b_alloc(&check->bi)) {
 		check->state &= ~CHK_ST_IN_ALLOC;
 		tasklet_wakeup(check->wait_list.tasklet);
 		return 1;
 	}
-	if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc_margin(&check->bo, 0)) {
+	if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc(&check->bo)) {
 		check->state &= ~CHK_ST_OUT_ALLOC;
 		tasklet_wakeup(check->wait_list.tasklet);
 		return 1;
@@ -1016,7 +1016,7 @@
 	struct buffer *buf = NULL;
 
 	if (likely(!LIST_ADDED(&check->buf_wait.list)) &&
-	    unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
+	    unlikely((buf = b_alloc(bptr)) == NULL)) {
 		check->buf_wait.target = check;
 		check->buf_wait.wakeup_cb = check_buf_available;
 		LIST_ADDQ(&ti->buffer_wq, &check->buf_wait.list);
diff --git a/src/flt_spoe.c b/src/flt_spoe.c
index 0397470..2d7b3b8 100644
--- a/src/flt_spoe.c
+++ b/src/flt_spoe.c
@@ -2837,7 +2837,7 @@
 	if (LIST_ADDED(&buffer_wait->list))
 		LIST_DEL_INIT(&buffer_wait->list);
 
-	if (b_alloc_margin(buf, global.tune.reserved_bufs))
+	if (b_alloc(buf))
 		return 1;
 
 	LIST_ADDQ(&ti->buffer_wq, &buffer_wait->list);
diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
index 48f11f2..780d721 100644
--- a/src/mux_fcgi.c
+++ b/src/mux_fcgi.c
@@ -570,14 +570,14 @@
 	struct fcgi_conn *fconn = target;
 	struct fcgi_strm *fstrm;
 
-	if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc_margin(&fconn->dbuf, 0)) {
+	if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc(&fconn->dbuf)) {
 		TRACE_STATE("unblocking fconn, dbuf allocated", FCGI_EV_FCONN_RECV|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn);
 		fconn->flags &= ~FCGI_CF_DEM_DALLOC;
 		fcgi_conn_restart_reading(fconn, 1);
 		return 1;
 	}
 
-	if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc_margin(br_tail(fconn->mbuf), 0)) {
+	if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc(br_tail(fconn->mbuf))) {
 		TRACE_STATE("unblocking fconn, mbuf allocated", FCGI_EV_FCONN_SEND|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn);
 		fconn->flags &= ~FCGI_CF_MUX_MALLOC;
 		if (fconn->flags & FCGI_CF_DEM_MROOM) {
@@ -589,7 +589,7 @@
 
 	if ((fconn->flags & FCGI_CF_DEM_SALLOC) &&
 	    (fstrm = fcgi_conn_st_by_id(fconn, fconn->dsi)) && fstrm->cs &&
-	    b_alloc_margin(&fstrm->rxbuf, 0)) {
+	    b_alloc(&fstrm->rxbuf)) {
 		TRACE_STATE("unblocking fstrm, rxbuf allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK|FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
 		fconn->flags &= ~FCGI_CF_DEM_SALLOC;
 		fcgi_conn_restart_reading(fconn, 1);
@@ -605,7 +605,7 @@
 	struct buffer *buf = NULL;
 
 	if (likely(!LIST_ADDED(&fconn->buf_wait.list)) &&
-	    unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
+	    unlikely((buf = b_alloc(bptr)) == NULL)) {
 		fconn->buf_wait.target = fconn;
 		fconn->buf_wait.wakeup_cb = fcgi_buf_available;
 		LIST_ADDQ(&ti->buffer_wq, &fconn->buf_wait.list);
diff --git a/src/mux_h1.c b/src/mux_h1.c
index 3c80e85..ca2e8a8 100644
--- a/src/mux_h1.c
+++ b/src/mux_h1.c
@@ -415,7 +415,7 @@
 {
 	struct h1c *h1c = target;
 
-	if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc_margin(&h1c->ibuf, 0)) {
+	if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc(&h1c->ibuf)) {
 		TRACE_STATE("unblocking h1c, ibuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn);
 		h1c->flags &= ~H1C_F_IN_ALLOC;
 		if (h1_recv_allowed(h1c))
@@ -423,7 +423,7 @@
 		return 1;
 	}
 
-	if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc_margin(&h1c->obuf, 0)) {
+	if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc(&h1c->obuf)) {
 		TRACE_STATE("unblocking h1s, obuf allocated", H1_EV_TX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1c->h1s);
 		h1c->flags &= ~H1C_F_OUT_ALLOC;
 		if (h1c->h1s)
@@ -431,7 +431,7 @@
 		return 1;
 	}
 
-	if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc_margin(&h1c->h1s->rxbuf, 0)) {
+	if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc(&h1c->h1s->rxbuf)) {
 		TRACE_STATE("unblocking h1c, stream rxbuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn);
 		h1c->flags &= ~H1C_F_IN_SALLOC;
 		tasklet_wakeup(h1c->wait_event.tasklet);
@@ -449,7 +449,7 @@
 	struct buffer *buf = NULL;
 
 	if (likely(!LIST_ADDED(&h1c->buf_wait.list)) &&
-	    unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
+	    unlikely((buf = b_alloc(bptr)) == NULL)) {
 		h1c->buf_wait.target = h1c;
 		h1c->buf_wait.wakeup_cb = h1_buf_available;
 		LIST_ADDQ(&ti->buffer_wq, &h1c->buf_wait.list);
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 68b3462..06abbc1 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -775,13 +775,13 @@
 	struct h2c *h2c = target;
 	struct h2s *h2s;
 
-	if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc_margin(&h2c->dbuf, 0)) {
+	if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) {
 		h2c->flags &= ~H2_CF_DEM_DALLOC;
 		h2c_restart_reading(h2c, 1);
 		return 1;
 	}
 
-	if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc_margin(br_tail(h2c->mbuf), 0)) {
+	if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) {
 		h2c->flags &= ~H2_CF_MUX_MALLOC;
 
 		if (h2c->flags & H2_CF_DEM_MROOM) {
@@ -793,7 +793,7 @@
 
 	if ((h2c->flags & H2_CF_DEM_SALLOC) &&
 	    (h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s->cs &&
-	    b_alloc_margin(&h2s->rxbuf, 0)) {
+	    b_alloc(&h2s->rxbuf)) {
 		h2c->flags &= ~H2_CF_DEM_SALLOC;
 		h2c_restart_reading(h2c, 1);
 		return 1;
@@ -807,7 +807,7 @@
 	struct buffer *buf = NULL;
 
 	if (likely(!LIST_ADDED(&h2c->buf_wait.list)) &&
-	    unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
+	    unlikely((buf = b_alloc(bptr)) == NULL)) {
 		h2c->buf_wait.target = h2c;
 		h2c->buf_wait.wakeup_cb = h2_buf_available;
 		LIST_ADDQ(&ti->buffer_wq, &h2c->buf_wait.list);
diff --git a/src/stream.c b/src/stream.c
index 54c6a77..b0c2bab 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -329,10 +329,10 @@
 	struct stream *s = arg;
 
 	if (!s->req.buf.size && !s->req.pipe && (s->si[0].flags & SI_FL_RXBLK_BUFF) &&
-	    b_alloc_margin(&s->req.buf, global.tune.reserved_bufs))
+	    b_alloc(&s->req.buf))
 		si_rx_buff_rdy(&s->si[0]);
 	else if (!s->res.buf.size && !s->res.pipe && (s->si[1].flags & SI_FL_RXBLK_BUFF) &&
-		 b_alloc_margin(&s->res.buf, 0))
+		 b_alloc(&s->res.buf))
 		si_rx_buff_rdy(&s->si[1]);
 	else
 		return 0;
@@ -772,7 +772,7 @@
 	if (LIST_ADDED(&s->buffer_wait.list))
 		LIST_DEL_INIT(&s->buffer_wait.list);
 
-	if (b_alloc_margin(&s->res.buf, 0))
+	if (b_alloc(&s->res.buf))
 		return 1;
 
 	LIST_ADDQ(&ti->buffer_wq, &s->buffer_wait.list);