CLEANUP: channel: use "channel" instead of "buffer" in function names

This is a massive rename of most functions which should make use of the
word "channel" instead of the word "buffer" in their names.

In concerns the following ones (new names) :

unsigned long long channel_forward(struct channel *buf, unsigned long long bytes);
static inline void channel_init(struct channel *buf)
static inline int channel_input_closed(struct channel *buf)
static inline int channel_output_closed(struct channel *buf)
static inline void channel_check_timeouts(struct channel *b)
static inline void channel_erase(struct channel *buf)
static inline void channel_shutr_now(struct channel *buf)
static inline void channel_shutw_now(struct channel *buf)
static inline void channel_abort(struct channel *buf)
static inline void channel_stop_hijacker(struct channel *buf)
static inline void channel_auto_connect(struct channel *buf)
static inline void channel_dont_connect(struct channel *buf)
static inline void channel_auto_close(struct channel *buf)
static inline void channel_dont_close(struct channel *buf)
static inline void channel_auto_read(struct channel *buf)
static inline void channel_dont_read(struct channel *buf)
unsigned long long channel_forward(struct channel *buf, unsigned long long bytes)

Some functions provided by channel.[ch] have kept their "buffer" name because
they are really designed to act on the buffer according to some information
gathered from the channel. They have been moved together to the same place in
the file for better readability but they were not changed at all.

The "buffer" memory pool was also renamed "channel".
diff --git a/src/channel.c b/src/channel.c
index 1597b0f..65fe665 100644
--- a/src/channel.c
+++ b/src/channel.c
@@ -22,28 +22,25 @@
 #include <types/global.h>
 
 
-/* Note: this code has not yet been completely cleaned up and still refers to
- * the word "buffer" when "channel" is meant instead.
- */
-struct pool_head *pool2_buffer;
+struct pool_head *pool2_channel;
 
 
 /* perform minimal intializations, report 0 in case of error, 1 if OK. */
-int init_buffer()
+int init_channel()
 {
-	pool2_buffer = create_pool("buffer", sizeof(struct channel) + global.tune.bufsize, MEM_F_SHARED);
-	return pool2_buffer != NULL;
+	pool2_channel = create_pool("channel", sizeof(struct channel) + global.tune.bufsize, MEM_F_SHARED);
+	return pool2_channel != NULL;
 }
 
-/* Schedule up to <bytes> more bytes to be forwarded by the buffer without notifying
- * the task. Any pending data in the buffer is scheduled to be sent as well,
- * in the limit of the number of bytes to forward. This must be the only method
- * to use to schedule bytes to be sent. If the requested number is too large, it
- * is automatically adjusted. The number of bytes taken into account is returned.
- * Directly touching ->to_forward will cause lockups when ->o goes down to
- * zero if nobody is ready to push the remaining data.
+/* Schedule up to <bytes> more bytes to be forwarded via the channel without
+ * notifying the owner task. Any data pending in the buffer are scheduled to be
+ * sent as well, in the limit of the number of bytes to forward. This must be
+ * the only method to use to schedule bytes to be forwarded. If the requested
+ * number is too large, it is automatically adjusted. The number of bytes taken
+ * into account is returned. Directly touching ->to_forward will cause lockups
+ * when buf->o goes down to zero if nobody is ready to push the remaining data.
  */
-unsigned long long buffer_forward(struct channel *buf, unsigned long long bytes)
+unsigned long long channel_forward(struct channel *buf, unsigned long long bytes)
 {
 	unsigned int new_forward;
 	unsigned int forwarded;
@@ -94,12 +91,12 @@
 	return bytes;
 }
 
-/* writes <len> bytes from message <msg> to buffer <buf>. Returns -1 in case of
- * success, -2 if the message is larger than the buffer size, or the number of
- * bytes available otherwise. The send limit is automatically adjusted with the
- * amount of data written. FIXME-20060521: handle unaligned data.
- * Note: this function appends data to the buffer's output and possibly overwrites
- * any pending input data which are assumed not to exist.
+/* writes <len> bytes from message <msg> to the channel's buffer. Returns -1 in
+ * case of success, -2 if the message is larger than the buffer size, or the
+ * number of bytes available otherwise. The send limit is automatically
+ * adjusted to the amount of data written. FIXME-20060521: handle unaligned
+ * data. Note: this function appends data to the buffer's output and possibly
+ * overwrites any pending input data which are assumed not to exist.
  */
 int bo_inject(struct channel *buf, const char *msg, int len)
 {
@@ -129,15 +126,15 @@
 	return -1;
 }
 
-/* Tries to copy character <c> into buffer <buf> after length controls. The
- * ->o and to_forward pointers are updated. If the buffer's input is
- * closed, -2 is returned. If there is not enough room left in the buffer, -1
- * is returned. Otherwise the number of bytes copied is returned (1). Buffer
- * flag READ_PARTIAL is updated if some data can be transferred.
+/* Tries to copy character <c> into the channel's buffer after some length
+ * controls. The buf->o and to_forward pointers are updated. If the channel
+ * input is closed, -2 is returned. If there is not enough room left in the
+ * buffer, -1 is returned. Otherwise the number of bytes copied is returned
+ * (1). Channel flag READ_PARTIAL is updated if some data can be transferred.
  */
 int bi_putchr(struct channel *buf, char c)
 {
-	if (unlikely(buffer_input_closed(buf)))
+	if (unlikely(channel_input_closed(buf)))
 		return -2;
 
 	if (channel_full(buf))
@@ -158,18 +155,19 @@
 	return 1;
 }
 
-/* Tries to copy block <blk> at once into buffer <buf> after length controls.
- * The ->o and to_forward pointers are updated. If the buffer's input is
- * closed, -2 is returned. If the block is too large for this buffer, -3 is
- * returned. If there is not enough room left in the buffer, -1 is returned.
- * Otherwise the number of bytes copied is returned (0 being a valid number).
- * Buffer flag READ_PARTIAL is updated if some data can be transferred.
+/* Tries to copy block <blk> at once into the channel's buffer after length
+ * controls. The buf->o and to_forward pointers are updated. If the channel
+ * input is closed, -2 is returned. If the block is too large for this buffer,
+ * -3 is returned. If there is not enough room left in the buffer, -1 is
+ * returned. Otherwise the number of bytes copied is returned (0 being a valid
+ * number). Channel flag READ_PARTIAL is updated if some data can be
+ * transferred.
  */
 int bi_putblk(struct channel *buf, const char *blk, int len)
 {
 	int max;
 
-	if (unlikely(buffer_input_closed(buf)))
+	if (unlikely(channel_input_closed(buf)))
 		return -2;
 
 	max = buffer_max_len(buf);
@@ -210,12 +208,12 @@
 	return len;
 }
 
-/* Gets one text line out of a buffer from a stream interface.
+/* Gets one text line out of a channel's buffer from a stream interface.
  * Return values :
  *   >0 : number of bytes read. Includes the \n if present before len or end.
  *   =0 : no '\n' before end found. <str> is left undefined.
  *   <0 : no more bytes readable because output is shut.
- * The buffer status is not changed. The caller must call bo_skip() to
+ * The channel status is not changed. The caller must call bo_skip() to
  * update it. The '\n' is waited for as long as neither the buffer nor the
  * output are full. If either of them is full, the string may be returned
  * as is, without the '\n'.
@@ -260,12 +258,12 @@
 	return ret;
 }
 
-/* Gets one full block of data at once from a buffer, optionally from a
- * specific offset. Return values :
+/* Gets one full block of data at once from a channel's buffer, optionally from
+ * a specific offset. Return values :
  *   >0 : number of bytes read, equal to requested size.
  *   =0 : not enough data available. <blk> is left undefined.
  *   <0 : no more bytes readable because output is shut.
- * The buffer status is not changed. The caller must call bo_skip() to
+ * The channel status is not changed. The caller must call bo_skip() to
  * update it.
  */
 int bo_getblk(struct channel *buf, char *blk, int len, int offset)
diff --git a/src/frontend.c b/src/frontend.c
index 07c4462..8b16836 100644
--- a/src/frontend.c
+++ b/src/frontend.c
@@ -190,8 +190,8 @@
 
 	/* note: this should not happen anymore since there's always at least the switching rules */
 	if (!s->req->analysers) {
-		buffer_auto_connect(s->req);  /* don't wait to establish connection */
-		buffer_auto_close(s->req);    /* let the producer forward close requests */
+		channel_auto_connect(s->req);  /* don't wait to establish connection */
+		channel_auto_close(s->req);    /* let the producer forward close requests */
 	}
 
 	s->req->rto = s->fe->timeout.client;
@@ -400,12 +400,12 @@
 	if ((req->flags & CF_SHUTR) || buffer_full(&req->buf, global.tune.maxrewrite))
 		goto fail;
 
-	buffer_dont_connect(s->req);
+	channel_dont_connect(s->req);
 	return 0;
 
  fail:
-	buffer_abort(req);
-	buffer_abort(s->rep);
+	channel_abort(req);
+	channel_abort(s->rep);
 	req->analysers = 0;
 
 	s->fe->fe_counters.failed_req++;
diff --git a/src/haproxy.c b/src/haproxy.c
index 3427dcc..5d537df 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -584,8 +584,8 @@
 	global_listener_queue_task->process = manage_global_listener_queue;
 	global_listener_queue_task->expire = TICK_ETERNITY;
 
-	/* now we know the buffer size, we can initialize the buffers */
-	init_buffer();
+	/* now we know the buffer size, we can initialize the channels and buffers */
+	init_channel();
 
 	if (have_appsession)
 		appsession_init();
@@ -1053,7 +1053,7 @@
 	}
 
 	pool_destroy2(pool2_session);
-	pool_destroy2(pool2_buffer);
+	pool_destroy2(pool2_channel);
 	pool_destroy2(pool2_requri);
 	pool_destroy2(pool2_task);
 	pool_destroy2(pool2_capture);
diff --git a/src/peers.c b/src/peers.c
index 386cca9..312c248 100644
--- a/src/peers.c
+++ b/src/peers.c
@@ -1222,11 +1222,11 @@
 	txn->hdr_idx.v = NULL;
 	txn->hdr_idx.size = txn->hdr_idx.used = 0;
 
-	if ((s->req = pool_alloc2(pool2_buffer)) == NULL)
+	if ((s->req = pool_alloc2(pool2_channel)) == NULL)
 		goto out_fail_req; /* no memory */
 
 	s->req->buf.size = global.tune.bufsize;
-	buffer_init(s->req);
+	channel_init(s->req);
 	s->req->prod = &s->si[0];
 	s->req->cons = &s->si[1];
 	s->si[0].ib = s->si[1].ob = s->req;
@@ -1238,18 +1238,18 @@
 
 	/* note: this should not happen anymore since there's always at least the switching rules */
 	if (!s->req->analysers) {
-		buffer_auto_connect(s->req);/* don't wait to establish connection */
-		buffer_auto_close(s->req);/* let the producer forward close requests */
+		channel_auto_connect(s->req);/* don't wait to establish connection */
+		channel_auto_close(s->req);/* let the producer forward close requests */
 	}
 
 	s->req->rto = s->fe->timeout.client;
 	s->req->wto = s->be->timeout.server;
 
-	if ((s->rep = pool_alloc2(pool2_buffer)) == NULL)
+	if ((s->rep = pool_alloc2(pool2_channel)) == NULL)
 		goto out_fail_rep; /* no memory */
 
 	s->rep->buf.size = global.tune.bufsize;
-	buffer_init(s->rep);
+	channel_init(s->rep);
 	s->rep->prod = &s->si[1];
 	s->rep->cons = &s->si[0];
 	s->si[0].ob = s->si[1].ib = s->rep;
@@ -1283,7 +1283,7 @@
 
 	/* Error unrolling */
  out_fail_rep:
-	pool_free2(pool2_buffer, s->req);
+	pool_free2(pool2_channel, s->req);
  out_fail_req:
 	task_free(t);
  out_free_session:
diff --git a/src/proto_http.c b/src/proto_http.c
index ce462e1..e671225 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -649,12 +649,12 @@
 static void http_server_error(struct session *t, struct stream_interface *si,
 			      int err, int finst, int status, const struct chunk *msg)
 {
-	buffer_auto_read(si->ob);
-	buffer_abort(si->ob);
-	buffer_auto_close(si->ob);
-	buffer_erase(si->ob);
-	buffer_auto_close(si->ib);
-	buffer_auto_read(si->ib);
+	channel_auto_read(si->ob);
+	channel_abort(si->ob);
+	channel_auto_close(si->ob);
+	channel_erase(si->ob);
+	channel_auto_close(si->ib);
+	channel_auto_read(si->ib);
 	if (status > 0 && msg) {
 		t->txn.status = status;
 		bo_inject(si->ib, msg->str, msg->len);
@@ -2026,7 +2026,7 @@
 				if (req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
 					goto failed_keep_alive;
 				/* some data has still not left the buffer, wake us once that's done */
-				buffer_dont_connect(req);
+				channel_dont_connect(req);
 				req->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
 				return 0;
 			}
@@ -2050,7 +2050,7 @@
 				if (s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
 					goto failed_keep_alive;
 				/* don't let a connection request be initiated */
-				buffer_dont_connect(req);
+				channel_dont_connect(req);
 				s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
 				s->rep->analysers |= an_bit; /* wake us up once it changes */
 				return 0;
@@ -2211,7 +2211,7 @@
 			return 0;
 		}
 
-		buffer_dont_connect(req);
+		channel_dont_connect(req);
 		req->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
 		s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
 #ifdef TCP_QUICKACK
@@ -2784,7 +2784,7 @@
 
 	if (unlikely(msg->msg_state < HTTP_MSG_BODY)) {
 		/* we need more data */
-		buffer_dont_connect(req);
+		channel_dont_connect(req);
 		return 0;
 	}
 
@@ -2866,11 +2866,11 @@
 		 * eventually expire. We build the tarpit as an analyser.
 		 */
 		if (txn->flags & TX_CLTARPIT) {
-			buffer_erase(s->req);
+			channel_erase(s->req);
 			/* wipe the request out so that we can drop the connection early
 			 * if the client closes first.
 			 */
-			buffer_dont_connect(req);
+			channel_dont_connect(req);
 			req->analysers = 0; /* remove switching rules etc... */
 			req->analysers |= AN_REQ_HTTP_TARPIT;
 			req->analyse_exp = tick_add_ifset(now_ms,  s->be->timeout.tarpit);
@@ -3017,7 +3017,7 @@
 				if (!http_process_req_stat_post(s->rep->prod, txn, req)) {
 					/* we need more data */
 					req->analysers |= an_bit;
-					buffer_dont_connect(req);
+					channel_dont_connect(req);
 					return 0;
 				}
 			} else {
@@ -3244,7 +3244,7 @@
 
 	if (unlikely(msg->msg_state < HTTP_MSG_BODY)) {
 		/* we need more data */
-		buffer_dont_connect(req);
+		channel_dont_connect(req);
 		return 0;
 	}
 
@@ -3454,7 +3454,7 @@
 	    s->txn.meth == HTTP_METH_POST && s->be->url_param_name != NULL &&
 	    s->be->url_param_post_limit != 0 &&
 	    (msg->flags & (HTTP_MSGF_CNT_LEN|HTTP_MSGF_TE_CHNK))) {
-		buffer_dont_connect(req);
+		channel_dont_connect(req);
 		req->analysers |= AN_REQ_HTTP_BODY;
 	}
 
@@ -3526,7 +3526,7 @@
 	 * timeout. We just have to check that the client is still
 	 * there and that the timeout has not expired.
 	 */
-	buffer_dont_connect(req);
+	channel_dont_connect(req);
 	if ((req->flags & (CF_SHUTR|CF_READ_ERROR)) == 0 &&
 	    !tick_is_expired(req->analyse_exp, now_ms))
 		return 0;
@@ -3660,7 +3660,7 @@
 		 * request timeout once at the beginning of the
 		 * request.
 		 */
-		buffer_dont_connect(req);
+		channel_dont_connect(req);
 		if (!tick_isset(req->analyse_exp))
 			req->analyse_exp = tick_add_ifset(now_ms, s->be->timeout.httpreq);
 		return 0;
@@ -3859,10 +3859,10 @@
 	}
 
 	/* we're removing the analysers, we MUST re-enable events detection */
-	buffer_auto_read(s->req);
-	buffer_auto_close(s->req);
-	buffer_auto_read(s->rep);
-	buffer_auto_close(s->rep);
+	channel_auto_read(s->req);
+	channel_auto_close(s->req);
+	channel_auto_read(s->rep);
+	channel_auto_close(s->rep);
 
 	s->req->analysers = s->listener->analysers;
 	s->req->analysers &= ~AN_REQ_DECODE_PROXY;
@@ -3900,7 +3900,7 @@
 		 * (eg: Linux).
 		 */
 		if (!(s->be->options & PR_O_ABRT_CLOSE) && txn->meth != HTTP_METH_POST)
-			buffer_dont_read(buf);
+			channel_dont_read(buf);
 
 		if (txn->rsp.msg_state == HTTP_MSG_ERROR)
 			goto wait_other_side;
@@ -3914,7 +3914,7 @@
 
 		if (txn->rsp.msg_state == HTTP_MSG_TUNNEL) {
 			/* if any side switches to tunnel mode, the other one does too */
-			buffer_auto_read(buf);
+			channel_auto_read(buf);
 			txn->req.msg_state = HTTP_MSG_TUNNEL;
 			goto wait_other_side;
 		}
@@ -3928,7 +3928,7 @@
 		if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL) {
 			/* Server-close mode : queue a connection close to the server */
 			if (!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW)))
-				buffer_shutw_now(buf);
+				channel_shutw_now(buf);
 		}
 		else if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_CLO) {
 			/* Option forceclose is set, or either side wants to close,
@@ -3937,8 +3937,8 @@
 			 * once both states are CLOSED.
 			 */
 			if (!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
-				buffer_shutr_now(buf);
-				buffer_shutw_now(buf);
+				channel_shutr_now(buf);
+				channel_shutw_now(buf);
 			}
 		}
 		else {
@@ -3947,7 +3947,7 @@
 			 * in tunnel mode, so we're left with keep-alive only.
 			 * This mode is currently not implemented, we switch to tunnel mode.
 			 */
-			buffer_auto_read(buf);
+			channel_auto_read(buf);
 			txn->req.msg_state = HTTP_MSG_TUNNEL;
 		}
 
@@ -4017,7 +4017,7 @@
 		 * while the request is being uploaded, so we don't disable
 		 * reading.
 		 */
-		/* buffer_dont_read(buf); */
+		/* channel_dont_read(buf); */
 
 		if (txn->req.msg_state == HTTP_MSG_ERROR)
 			goto wait_other_side;
@@ -4033,7 +4033,7 @@
 
 		if (txn->req.msg_state == HTTP_MSG_TUNNEL) {
 			/* if any side switches to tunnel mode, the other one does too */
-			buffer_auto_read(buf);
+			channel_auto_read(buf);
 			txn->rsp.msg_state = HTTP_MSG_TUNNEL;
 			goto wait_other_side;
 		}
@@ -4051,7 +4051,7 @@
 			 * catch that for the final cleanup.
 			 */
 			if (!(buf->flags & (CF_SHUTR|CF_SHUTR_NOW)))
-				buffer_shutr_now(buf);
+				channel_shutr_now(buf);
 		}
 		else if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_CLO) {
 			/* Option forceclose is set, or either side wants to close,
@@ -4060,8 +4060,8 @@
 			 * once both states are CLOSED.
 			 */
 			if (!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
-				buffer_shutr_now(buf);
-				buffer_shutw_now(buf);
+				channel_shutr_now(buf);
+				channel_shutw_now(buf);
 			}
 		}
 		else {
@@ -4070,7 +4070,7 @@
 			 * in tunnel mode, so we're left with keep-alive only.
 			 * This mode is currently not implemented, we switch to tunnel mode.
 			 */
-			buffer_auto_read(buf);
+			channel_auto_read(buf);
 			txn->rsp.msg_state = HTTP_MSG_TUNNEL;
 		}
 
@@ -4110,8 +4110,8 @@
 	http_msg_closed:
 		/* drop any pending data */
 		bi_erase(buf);
-		buffer_auto_close(buf);
-		buffer_auto_read(buf);
+		channel_auto_close(buf);
+		channel_auto_read(buf);
 		goto wait_other_side;
 	}
 
@@ -4158,23 +4158,23 @@
 	    (txn->req.msg_state == HTTP_MSG_CLOSED &&
 	     txn->rsp.msg_state == HTTP_MSG_CLOSED)) {
 		s->req->analysers = 0;
-		buffer_auto_close(s->req);
-		buffer_auto_read(s->req);
+		channel_auto_close(s->req);
+		channel_auto_read(s->req);
 		s->rep->analysers = 0;
-		buffer_auto_close(s->rep);
-		buffer_auto_read(s->rep);
+		channel_auto_close(s->rep);
+		channel_auto_read(s->rep);
 	}
 	else if (txn->rsp.msg_state == HTTP_MSG_CLOSED ||
 		 txn->rsp.msg_state == HTTP_MSG_ERROR ||
 		 txn->req.msg_state == HTTP_MSG_ERROR ||
 		 (s->rep->flags & CF_SHUTW)) {
 		s->rep->analysers = 0;
-		buffer_auto_close(s->rep);
-		buffer_auto_read(s->rep);
+		channel_auto_close(s->rep);
+		channel_auto_read(s->rep);
 		s->req->analysers = 0;
-		buffer_abort(s->req);
-		buffer_auto_close(s->req);
-		buffer_auto_read(s->req);
+		channel_abort(s->req);
+		channel_auto_close(s->req);
+		channel_auto_read(s->req);
 		bi_erase(s->req);
 	}
 	else if (txn->req.msg_state == HTTP_MSG_CLOSED &&
@@ -4221,7 +4221,7 @@
 	}
 
 	/* in most states, we should abort in case of early close */
-	buffer_auto_close(req);
+	channel_auto_close(req);
 
 	/* Note that we don't have to send 100-continue back because we don't
 	 * need the data to complete our job, and it's up to the server to
@@ -4254,7 +4254,7 @@
 			msg->sol = msg->sov;
 			msg->next -= bytes; /* will be forwarded */
 			msg->chunk_len += bytes;
-			msg->chunk_len -= buffer_forward(req, msg->chunk_len);
+			msg->chunk_len -= channel_forward(req, msg->chunk_len);
 		}
 
 		if (msg->msg_state == HTTP_MSG_DATA) {
@@ -4321,7 +4321,7 @@
 			/* for keep-alive we don't want to forward closes on DONE */
 			if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL ||
 			    (txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL)
-				buffer_dont_close(req);
+				channel_dont_close(req);
 			if (http_resync_states(s)) {
 				/* some state changes occurred, maybe the analyser
 				 * was disabled too.
@@ -4347,8 +4347,8 @@
 			 * request.
 			 */
 			if (s->be->options & PR_O_ABRT_CLOSE) {
-				buffer_auto_read(req);
-				buffer_auto_close(req);
+				channel_auto_read(req);
+				channel_auto_close(req);
 			}
 			else if (s->txn.meth == HTTP_METH_POST) {
 				/* POST requests may require to read extra CRLF
@@ -4356,7 +4356,7 @@
 				 * an RST to be sent upon close on some systems
 				 * (eg: Linux).
 				 */
-				buffer_auto_read(req);
+				channel_auto_read(req);
 			}
 
 			return 0;
@@ -4391,7 +4391,7 @@
 	 * chunks even if the client has closed, so we don't want to set CF_DONTCLOSE.
 	 */
 	if (msg->flags & HTTP_MSGF_TE_CHNK)
-		buffer_dont_close(req);
+		channel_dont_close(req);
 
 	/* We know that more data are expected, but we couldn't send more that
 	 * what we did. So we always set the CF_EXPECT_MORE flag so that the
@@ -4515,7 +4515,7 @@
 				/* some data has still not left the buffer, wake us once that's done */
 				if (rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
 					goto abort_response;
-				buffer_dont_close(rep);
+				channel_dont_close(rep);
 				rep->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
 				return 0;
 			}
@@ -4577,7 +4577,7 @@
 				health_adjust(target_srv(&s->target), HANA_STATUS_HTTP_HDRRSP);
 			}
 		abort_response:
-			buffer_auto_close(rep);
+			channel_auto_close(rep);
 			rep->analysers = 0;
 			txn->status = 502;
 			rep->prod->flags |= SI_FL_NOLINGER;
@@ -4610,7 +4610,7 @@
 				health_adjust(target_srv(&s->target), HANA_STATUS_HTTP_READ_ERROR);
 			}
 
-			buffer_auto_close(rep);
+			channel_auto_close(rep);
 			rep->analysers = 0;
 			txn->status = 502;
 			rep->prod->flags |= SI_FL_NOLINGER;
@@ -4635,7 +4635,7 @@
 				health_adjust(target_srv(&s->target), HANA_STATUS_HTTP_READ_TIMEOUT);
 			}
 
-			buffer_auto_close(rep);
+			channel_auto_close(rep);
 			rep->analysers = 0;
 			txn->status = 504;
 			rep->prod->flags |= SI_FL_NOLINGER;
@@ -4660,7 +4660,7 @@
 				health_adjust(target_srv(&s->target), HANA_STATUS_HTTP_BROKEN_PIPE);
 			}
 
-			buffer_auto_close(rep);
+			channel_auto_close(rep);
 			rep->analysers = 0;
 			txn->status = 502;
 			rep->prod->flags |= SI_FL_NOLINGER;
@@ -4681,7 +4681,7 @@
 
 			s->be->be_counters.failed_resp++;
 			rep->analysers = 0;
-			buffer_auto_close(rep);
+			channel_auto_close(rep);
 
 			if (!(s->flags & SN_ERR_MASK))
 				s->flags |= SN_ERR_CLICL;
@@ -4692,7 +4692,7 @@
 			return 0;
 		}
 
-		buffer_dont_close(rep);
+		channel_dont_close(rep);
 		return 0;
 	}
 
@@ -4888,7 +4888,7 @@
 	/* end of job, return OK */
 	rep->analysers &= ~an_bit;
 	rep->analyse_exp = TICK_ETERNITY;
-	buffer_auto_close(rep);
+	channel_auto_close(rep);
 	return 1;
 }
 
@@ -5066,7 +5066,7 @@
 		 */
 		if (unlikely(txn->status == 100)) {
 			hdr_idx_init(&txn->hdr_idx);
-			msg->next -= buffer_forward(rep, msg->next);
+			msg->next -= channel_forward(rep, msg->next);
 			msg->msg_state = HTTP_MSG_RPBEFORE;
 			txn->status = 0;
 			rep->analysers |= AN_RES_WAIT_HTTP | an_bit;
@@ -5290,7 +5290,7 @@
 	}
 
 	/* in most states, we should abort in case of early close */
-	buffer_auto_close(res);
+	channel_auto_close(res);
 
 	if (msg->msg_state < HTTP_MSG_CHUNK_SIZE) {
 		/* we have msg->sov which points to the first byte of message body.
@@ -5315,7 +5315,7 @@
 			msg->sol = msg->sov;
 			msg->next -= bytes; /* will be forwarded */
 			msg->chunk_len += bytes;
-			msg->chunk_len -= buffer_forward(res, msg->chunk_len);
+			msg->chunk_len -= channel_forward(res, msg->chunk_len);
 		}
 
 		if (msg->msg_state == HTTP_MSG_DATA) {
@@ -5379,7 +5379,7 @@
 			/* for keep-alive we don't want to forward closes on DONE */
 			if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL ||
 			    (txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL)
-				buffer_dont_close(res);
+				channel_dont_close(res);
 			if (http_resync_states(s)) {
 				http_silent_debug(__LINE__, s);
 				/* some state changes occurred, maybe the analyser
@@ -5426,7 +5426,7 @@
 		msg->sol = msg->sov;
 		msg->next -= bytes; /* will be forwarded */
 		msg->chunk_len += bytes;
-		msg->chunk_len -= buffer_forward(res, msg->chunk_len);
+		msg->chunk_len -= channel_forward(res, msg->chunk_len);
 	}
 
 	/* When TE: chunked is used, we need to get there again to parse remaining
@@ -5437,7 +5437,7 @@
 	if ((msg->flags & HTTP_MSGF_TE_CHNK) ||
 	    (txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL ||
 	    (txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL)
-		buffer_dont_close(res);
+		channel_dont_close(res);
 
 	/* We know that more data are expected, but we couldn't send more that
 	 * what we did. So we always set the CF_EXPECT_MORE flag so that the
diff --git a/src/proto_tcp.c b/src/proto_tcp.c
index 22664dd..f6cc6a3 100644
--- a/src/proto_tcp.c
+++ b/src/proto_tcp.c
@@ -821,7 +821,7 @@
 		if (rule->cond) {
 			ret = acl_exec_cond(rule->cond, s->be, s, &s->txn, SMP_OPT_DIR_REQ | partial);
 			if (ret == ACL_PAT_MISS) {
-				buffer_dont_connect(req);
+				channel_dont_connect(req);
 				/* just set the request timeout once at the beginning of the request */
 				if (!tick_isset(req->analyse_exp) && s->be->tcp_req.inspect_delay)
 					req->analyse_exp = tick_add_ifset(now_ms, s->be->tcp_req.inspect_delay);
@@ -836,8 +836,8 @@
 		if (ret) {
 			/* we have a matching rule. */
 			if (rule->action == TCP_ACT_REJECT) {
-				buffer_abort(req);
-				buffer_abort(s->rep);
+				channel_abort(req);
+				channel_abort(s->rep);
 				req->analysers = 0;
 
 				s->be->be_counters.denied_req++;
@@ -953,8 +953,8 @@
 		if (ret) {
 			/* we have a matching rule. */
 			if (rule->action == TCP_ACT_REJECT) {
-				buffer_abort(rep);
-				buffer_abort(s->req);
+				channel_abort(rep);
+				channel_abort(s->req);
 				rep->analysers = 0;
 
 				s->be->be_counters.denied_resp++;
diff --git a/src/session.c b/src/session.c
index a62b42a..e5a8b7e 100644
--- a/src/session.c
+++ b/src/session.c
@@ -217,15 +217,15 @@
 	if (unlikely(fcntl(cfd, F_SETFL, O_NONBLOCK) == -1))
 		goto out_free_task;
 
-	if (unlikely((s->req = pool_alloc2(pool2_buffer)) == NULL))
+	if (unlikely((s->req = pool_alloc2(pool2_channel)) == NULL))
 		goto out_free_task; /* no memory */
 
-	if (unlikely((s->rep = pool_alloc2(pool2_buffer)) == NULL))
+	if (unlikely((s->rep = pool_alloc2(pool2_channel)) == NULL))
 		goto out_free_req; /* no memory */
 
 	/* initialize the request buffer */
 	s->req->buf.size = global.tune.bufsize;
-	buffer_init(s->req);
+	channel_init(s->req);
 	s->req->prod = &s->si[0];
 	s->req->cons = &s->si[1];
 	s->si[0].ib = s->si[1].ob = s->req;
@@ -242,7 +242,7 @@
 
 	/* initialize response buffer */
 	s->rep->buf.size = global.tune.bufsize;
-	buffer_init(s->rep);
+	channel_init(s->rep);
 	s->rep->prod = &s->si[1];
 	s->rep->cons = &s->si[0];
 	s->si[0].ob = s->si[1].ib = s->rep;
@@ -302,9 +302,9 @@
 
 	/* Error unrolling */
  out_free_rep:
-	pool_free2(pool2_buffer, s->rep);
+	pool_free2(pool2_channel, s->rep);
  out_free_req:
-	pool_free2(pool2_buffer, s->req);
+	pool_free2(pool2_channel, s->req);
  out_free_task:
 	p->feconn--;
 	if (s->stkctr1_entry || s->stkctr2_entry)
@@ -363,8 +363,8 @@
 	if (s->rep->pipe)
 		put_pipe(s->rep->pipe);
 
-	pool_free2(pool2_buffer, s->req);
-	pool_free2(pool2_buffer, s->rep);
+	pool_free2(pool2_channel, s->req);
+	pool_free2(pool2_channel, s->rep);
 
 	http_end_txn(s);
 
@@ -399,7 +399,7 @@
 
 	/* We may want to free the maximum amount of pools if the proxy is stopping */
 	if (fe && unlikely(fe->state == PR_STSTOPPED)) {
-		pool_flush2(pool2_buffer);
+		pool_flush2(pool2_channel);
 		pool_flush2(pool2_hdr_idx);
 		pool_flush2(pool2_requri);
 		pool_flush2(pool2_capture);
@@ -1032,8 +1032,8 @@
 
  sw_failed:
 	/* immediately abort this request in case of allocation failure */
-	buffer_abort(s->req);
-	buffer_abort(s->rep);
+	channel_abort(s->req);
+	channel_abort(s->rep);
 
 	if (!(s->flags & SN_ERR_MASK))
 		s->flags |= SN_ERR_RESOURCE;
@@ -1335,13 +1335,13 @@
 		stream_int_check_timeouts(&s->si[0]);
 		stream_int_check_timeouts(&s->si[1]);
 
-		/* check buffer timeouts, and close the corresponding stream interfaces
+		/* check channel timeouts, and close the corresponding stream interfaces
 		 * for future reads or writes. Note: this will also concern upper layers
 		 * but we do not touch any other flag. We must be careful and correctly
 		 * detect state changes when calling them.
 		 */
 
-		buffer_check_timeouts(s->req);
+		channel_check_timeouts(s->req);
 
 		if (unlikely((s->req->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
 			s->req->cons->flags |= SI_FL_NOLINGER;
@@ -1354,7 +1354,7 @@
 			si_shutr(s->req->prod);
 		}
 
-		buffer_check_timeouts(s->rep);
+		channel_check_timeouts(s->rep);
 
 		if (unlikely((s->rep->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
 			s->rep->cons->flags |= SI_FL_NOLINGER;
@@ -1496,9 +1496,9 @@
 			 * enabling them again when it disables itself, so
 			 * that other analysers are called in similar conditions.
 			 */
-			buffer_auto_read(s->req);
-			buffer_auto_connect(s->req);
-			buffer_auto_close(s->req);
+			channel_auto_read(s->req);
+			channel_auto_connect(s->req);
+			channel_auto_close(s->req);
 
 			/* We will call all analysers for which a bit is set in
 			 * s->req->analysers, following the bit order from LSB
@@ -1688,8 +1688,8 @@
 			 * it disables itself, so that other analysers are called
 			 * in similar conditions.
 			 */
-			buffer_auto_read(s->rep);
-			buffer_auto_close(s->rep);
+			channel_auto_read(s->rep);
+			channel_auto_close(s->rep);
 
 			/* We will call all analysers for which a bit is set in
 			 * s->rep->analysers, following the bit order from LSB
@@ -1843,7 +1843,7 @@
 	/* If noone is interested in analysing data, it's time to forward
 	 * everything. We configure the buffer to forward indefinitely.
 	 * Note that we're checking CF_SHUTR_NOW as an indication of a possible
-	 * recent call to buffer_abort().
+	 * recent call to channel_abort().
 	 */
 	if (!s->req->analysers &&
 	    !(s->req->flags & (CF_HIJACK|CF_SHUTW|CF_SHUTR_NOW)) &&
@@ -1853,16 +1853,16 @@
 		 * attached to it. If any data are left in, we'll permit them to
 		 * move.
 		 */
-		buffer_auto_read(s->req);
-		buffer_auto_connect(s->req);
-		buffer_auto_close(s->req);
+		channel_auto_read(s->req);
+		channel_auto_connect(s->req);
+		channel_auto_close(s->req);
 		buffer_flush(&s->req->buf);
 
 		/* We'll let data flow between the producer (if still connected)
 		 * to the consumer (which might possibly not be connected yet).
 		 */
 		if (!(s->req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
-			buffer_forward(s->req, CHN_INFINITE_FORWARD);
+			channel_forward(s->req, CHN_INFINITE_FORWARD);
 	}
 
 	/* check if it is wise to enable kernel splicing to forward request data */
@@ -1890,7 +1890,7 @@
 	 */
 	if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK|CF_AUTO_CLOSE|CF_SHUTR)) ==
 		     (CF_AUTO_CLOSE|CF_SHUTR)))
-			buffer_shutw_now(s->req);
+			channel_shutw_now(s->req);
 
 	/* shutdown(write) pending */
 	if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
@@ -1900,7 +1900,7 @@
 	/* shutdown(write) done on server side, we must stop the client too */
 	if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW &&
 		     !s->req->analysers))
-		buffer_shutr_now(s->req);
+		channel_shutr_now(s->req);
 
 	/* shutdown(read) pending */
 	if (unlikely((s->req->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
@@ -1933,8 +1933,8 @@
 		}
 		else {
 			s->req->cons->state = SI_ST_CLO; /* shutw+ini = abort */
-			buffer_shutw_now(s->req);        /* fix buffer flags upon abort */
-			buffer_shutr_now(s->rep);
+			channel_shutw_now(s->req);        /* fix buffer flags upon abort */
+			channel_shutr_now(s->rep);
 		}
 	}
 
@@ -1979,7 +1979,7 @@
 	/* If noone is interested in analysing data, it's time to forward
 	 * everything. We configure the buffer to forward indefinitely.
 	 * Note that we're checking CF_SHUTR_NOW as an indication of a possible
-	 * recent call to buffer_abort().
+	 * recent call to channel_abort().
 	 */
 	if (!s->rep->analysers &&
 	    !(s->rep->flags & (CF_HIJACK|CF_SHUTW|CF_SHUTR_NOW)) &&
@@ -1989,15 +1989,15 @@
 		 * attached to it. If any data are left in, we'll permit them to
 		 * move.
 		 */
-		buffer_auto_read(s->rep);
-		buffer_auto_close(s->rep);
+		channel_auto_read(s->rep);
+		channel_auto_close(s->rep);
 		buffer_flush(&s->rep->buf);
 
 		/* We'll let data flow between the producer (if still connected)
 		 * to the consumer.
 		 */
 		if (!(s->rep->flags & (CF_SHUTR|CF_SHUTW_NOW)))
-			buffer_forward(s->rep, CHN_INFINITE_FORWARD);
+			channel_forward(s->rep, CHN_INFINITE_FORWARD);
 
 		/* if we have no analyser anymore in any direction and have a
 		 * tunnel timeout set, use it now.
@@ -2036,7 +2036,7 @@
 	/* first, let's check if the response buffer needs to shutdown(write) */
 	if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK|CF_AUTO_CLOSE|CF_SHUTR)) ==
 		     (CF_AUTO_CLOSE|CF_SHUTR)))
-		buffer_shutw_now(s->rep);
+		channel_shutw_now(s->rep);
 
 	/* shutdown(write) pending */
 	if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
@@ -2046,7 +2046,7 @@
 	/* shutdown(write) done on the client side, we must stop the server too */
 	if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) &&
 	    !s->rep->analysers)
-		buffer_shutr_now(s->rep);
+		channel_shutr_now(s->rep);
 
 	/* shutdown(read) pending */
 	if (unlikely((s->rep->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
@@ -2313,8 +2313,8 @@
 	if (session->req->flags & (CF_SHUTW|CF_SHUTW_NOW))
 		return;
 
-	buffer_shutw_now(session->req);
-	buffer_shutr_now(session->rep);
+	channel_shutw_now(session->req);
+	channel_shutr_now(session->rep);
 	session->task->nice = 1024;
 	if (!(session->flags & SN_ERR_MASK))
 		session->flags |= why;
diff --git a/src/stream_interface.c b/src/stream_interface.c
index ae633ea..765d04f 100644
--- a/src/stream_interface.c
+++ b/src/stream_interface.c
@@ -108,19 +108,19 @@
  */
 void stream_int_retnclose(struct stream_interface *si, const struct chunk *msg)
 {
-	buffer_auto_read(si->ib);
-	buffer_abort(si->ib);
-	buffer_auto_close(si->ib);
-	buffer_erase(si->ib);
+	channel_auto_read(si->ib);
+	channel_abort(si->ib);
+	channel_auto_close(si->ib);
+	channel_erase(si->ib);
 
 	bi_erase(si->ob);
 	if (likely(msg && msg->len))
 		bo_inject(si->ob, msg->str, msg->len);
 
 	si->ob->wex = tick_add_ifset(now_ms, si->ob->wto);
-	buffer_auto_read(si->ob);
-	buffer_auto_close(si->ob);
-	buffer_shutr_now(si->ob);
+	channel_auto_read(si->ob);
+	channel_auto_close(si->ob);
+	channel_shutr_now(si->ob);
 }
 
 /* default update function for scheduled tasks, not used for embedded tasks */
@@ -1179,7 +1179,7 @@
 	/* we received a shutdown */
 	b->flags |= CF_READ_NULL;
 	if (b->flags & CF_AUTO_CLOSE)
-		buffer_shutw_now(b);
+		channel_shutw_now(b);
 	stream_sock_read0(si);
 	conn_data_read0(conn);
 	return;