CLEANUP: channel: use "channel" instead of "buffer" in function names
This is a massive rename of most functions which should make use of the
word "channel" instead of the word "buffer" in their names.
In concerns the following ones (new names) :
unsigned long long channel_forward(struct channel *buf, unsigned long long bytes);
static inline void channel_init(struct channel *buf)
static inline int channel_input_closed(struct channel *buf)
static inline int channel_output_closed(struct channel *buf)
static inline void channel_check_timeouts(struct channel *b)
static inline void channel_erase(struct channel *buf)
static inline void channel_shutr_now(struct channel *buf)
static inline void channel_shutw_now(struct channel *buf)
static inline void channel_abort(struct channel *buf)
static inline void channel_stop_hijacker(struct channel *buf)
static inline void channel_auto_connect(struct channel *buf)
static inline void channel_dont_connect(struct channel *buf)
static inline void channel_auto_close(struct channel *buf)
static inline void channel_dont_close(struct channel *buf)
static inline void channel_auto_read(struct channel *buf)
static inline void channel_dont_read(struct channel *buf)
unsigned long long channel_forward(struct channel *buf, unsigned long long bytes)
Some functions provided by channel.[ch] have kept their "buffer" name because
they are really designed to act on the buffer according to some information
gathered from the channel. They have been moved together to the same place in
the file for better readability but they were not changed at all.
The "buffer" memory pool was also renamed "channel".
diff --git a/include/proto/channel.h b/include/proto/channel.h
index c6ab718..2f2a913 100644
--- a/include/proto/channel.h
+++ b/include/proto/channel.h
@@ -34,21 +34,22 @@
#include <types/global.h>
-extern struct pool_head *pool2_buffer;
+extern struct pool_head *pool2_channel;
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
-int init_buffer();
+int init_channel();
-/* SI-to-buffer functions : buffer_{get,put}_{char,block,string,chunk} */
-int bo_inject(struct channel *buf, const char *msg, int len);
+unsigned long long channel_forward(struct channel *buf, unsigned long long bytes);
+
+/* SI-to-channel functions working with buffers */
int bi_putblk(struct channel *buf, const char *str, int len);
int bi_putchr(struct channel *buf, char c);
+int bo_inject(struct channel *buf, const char *msg, int len);
int bo_getline(struct channel *buf, char *str, int len);
int bo_getblk(struct channel *buf, char *blk, int len, int offset);
-unsigned long long buffer_forward(struct channel *buf, unsigned long long bytes);
-/* Initialize all fields in the buffer. */
-static inline void buffer_init(struct channel *buf)
+/* Initialize all fields in the channel. */
+static inline void channel_init(struct channel *buf)
{
buf->buf.o = 0;
buf->buf.i = 0;
@@ -61,9 +62,9 @@
buf->flags = 0;
}
-/*****************************************************************/
-/* These functions are used to compute various buffer area sizes */
-/*****************************************************************/
+/*********************************************************************/
+/* These functions are used to compute various channel content sizes */
+/*********************************************************************/
/* Reports non-zero if the channel is empty, which means both its
* buffer and pipe are empty. The construct looks strange but is
@@ -75,31 +76,6 @@
return !(c->buf.o | (long)c->pipe);
}
-/* Return the number of reserved bytes in the buffer, which ensures that once
- * all pending data are forwarded, the buffer still has global.tune.maxrewrite
- * bytes free. The result is between 0 and global.maxrewrite, which is itself
- * smaller than any buf->size.
- */
-static inline int buffer_reserved(const struct channel *buf)
-{
- int ret = global.tune.maxrewrite - buf->to_forward - buf->buf.o;
-
- if (buf->to_forward == CHN_INFINITE_FORWARD)
- return 0;
- if (ret <= 0)
- return 0;
- return ret;
-}
-
-/* Return the max number of bytes the buffer can contain so that once all the
- * pending bytes are forwarded, the buffer still has global.tune.maxrewrite
- * bytes free. The result sits between buf->size - maxrewrite and buf->size.
- */
-static inline int buffer_max_len(const struct channel *buf)
-{
- return buf->buf.size - buffer_reserved(buf);
-}
-
/* Returns non-zero if the buffer input is considered full. The reserved space
* is taken into account if ->to_forward indicates that an end of transfer is
* close to happen. The test is optimized to avoid as many operations as
@@ -125,64 +101,24 @@
return rem <= 0;
}
-/* Returns the amount of space available at the input of the buffer, taking the
- * reserved space into account if ->to_forward indicates that an end of transfer
- * is close to happen. The test is optimized to avoid as many operations as
- * possible for the fast case.
- */
-static inline int bi_avail(const struct channel *b)
-{
- int rem = b->buf.size;
- int rem2;
-
- rem -= b->buf.o;
- rem -= b->buf.i;
- if (!rem)
- return rem; /* buffer already full */
-
- if (b->to_forward >= b->buf.size ||
- (CHN_INFINITE_FORWARD < MAX_RANGE(typeof(b->buf.size)) && // just there to ensure gcc
- b->to_forward == CHN_INFINITE_FORWARD)) // avoids the useless second
- return rem; // test whenever possible
-
- rem2 = rem - global.tune.maxrewrite;
- rem2 += b->buf.o;
- rem2 += b->to_forward;
-
- if (rem > rem2)
- rem = rem2;
- if (rem > 0)
- return rem;
- return 0;
-}
-
-/* Return the amount of bytes that can be written into the buffer at once,
- * excluding reserved space, which is preserved.
- */
-static inline int buffer_contig_space_res(const struct channel *chn)
-{
- return buffer_contig_space_with_res(&chn->buf, buffer_reserved(chn));
-}
-
-/* Returns true if the buffer's input is already closed */
-static inline int buffer_input_closed(struct channel *buf)
+/* Returns true if the channel's input is already closed */
+static inline int channel_input_closed(struct channel *buf)
{
return ((buf->flags & CF_SHUTR) != 0);
}
-/* Returns true if the buffer's output is already closed */
-static inline int buffer_output_closed(struct channel *buf)
+/* Returns true if the channel's output is already closed */
+static inline int channel_output_closed(struct channel *buf)
{
return ((buf->flags & CF_SHUTW) != 0);
}
-/* Check buffer timeouts, and set the corresponding flags. The
- * likely/unlikely have been optimized for fastest normal path.
- * The read/write timeouts are not set if there was activity on the buffer.
- * That way, we don't have to update the timeout on every I/O. Note that the
- * analyser timeout is always checked.
+/* Check channel timeouts, and set the corresponding flags. The likely/unlikely
+ * have been optimized for fastest normal path. The read/write timeouts are not
+ * set if there was activity on the channel. That way, we don't have to update
+ * the timeout on every I/O. Note that the analyser timeout is always checked.
*/
-static inline void buffer_check_timeouts(struct channel *b)
+static inline void channel_check_timeouts(struct channel *b)
{
if (likely(!(b->flags & (CF_SHUTR|CF_READ_TIMEOUT|CF_READ_ACTIVITY|CF_READ_NOEXP))) &&
unlikely(tick_is_expired(b->rex, now_ms)))
@@ -197,11 +133,11 @@
b->flags |= CF_ANA_TIMEOUT;
}
-/* Erase any content from buffer <buf> and adjusts flags accordingly. Note
+/* Erase any content from channel <buf> and adjusts flags accordingly. Note
* that any spliced data is not affected since we may not have any access to
* it.
*/
-static inline void buffer_erase(struct channel *buf)
+static inline void channel_erase(struct channel *buf)
{
buf->buf.o = 0;
buf->buf.i = 0;
@@ -209,48 +145,31 @@
buf->buf.p = buf->buf.data;
}
-/* Cut the "tail" of the buffer, which means strip it to the length of unsent
- * data only, and kill any remaining unsent data. Any scheduled forwarding is
- * stopped. This is mainly to be used to send error messages after existing
- * data.
- */
-static inline void bi_erase(struct channel *buf)
-{
- if (!buf->buf.o)
- return buffer_erase(buf);
-
- buf->to_forward = 0;
- if (!buf->buf.i)
- return;
-
- buf->buf.i = 0;
-}
-
-/* marks the buffer as "shutdown" ASAP for reads */
-static inline void buffer_shutr_now(struct channel *buf)
+/* marks the channel as "shutdown" ASAP for reads */
+static inline void channel_shutr_now(struct channel *buf)
{
buf->flags |= CF_SHUTR_NOW;
}
-/* marks the buffer as "shutdown" ASAP for writes */
-static inline void buffer_shutw_now(struct channel *buf)
+/* marks the channel as "shutdown" ASAP for writes */
+static inline void channel_shutw_now(struct channel *buf)
{
buf->flags |= CF_SHUTW_NOW;
}
-/* marks the buffer as "shutdown" ASAP in both directions */
-static inline void buffer_abort(struct channel *buf)
+/* marks the channel as "shutdown" ASAP in both directions */
+static inline void channel_abort(struct channel *buf)
{
buf->flags |= CF_SHUTR_NOW | CF_SHUTW_NOW;
buf->flags &= ~CF_AUTO_CONNECT;
}
-/* Installs <func> as a hijacker on the buffer <b> for session <s>. The hijack
+/* Installs <func> as a hijacker on the channel <b> for session <s>. The hijack
* flag is set, and the function called once. The function is responsible for
* clearing the hijack bit. It is possible that the function clears the flag
* during this first call.
*/
-static inline void buffer_install_hijacker(struct session *s,
+static inline void channel_install_hijacker(struct session *s,
struct channel *b,
void (*func)(struct session *, struct channel *))
{
@@ -259,14 +178,14 @@
func(s, b);
}
-/* Releases the buffer from hijacking mode. Often used by the hijack function */
-static inline void buffer_stop_hijack(struct channel *buf)
+/* Releases the channel from hijacking mode. Often used by the hijack function */
+static inline void channel_stop_hijacker(struct channel *buf)
{
buf->flags &= ~CF_HIJACK;
}
/* allow the consumer to try to establish a new connection. */
-static inline void buffer_auto_connect(struct channel *buf)
+static inline void channel_auto_connect(struct channel *buf)
{
buf->flags |= CF_AUTO_CONNECT;
}
@@ -274,40 +193,129 @@
/* prevent the consumer from trying to establish a new connection, and also
* disable auto shutdown forwarding.
*/
-static inline void buffer_dont_connect(struct channel *buf)
+static inline void channel_dont_connect(struct channel *buf)
{
buf->flags &= ~(CF_AUTO_CONNECT|CF_AUTO_CLOSE);
}
/* allow the producer to forward shutdown requests */
-static inline void buffer_auto_close(struct channel *buf)
+static inline void channel_auto_close(struct channel *buf)
{
buf->flags |= CF_AUTO_CLOSE;
}
/* prevent the producer from forwarding shutdown requests */
-static inline void buffer_dont_close(struct channel *buf)
+static inline void channel_dont_close(struct channel *buf)
{
buf->flags &= ~CF_AUTO_CLOSE;
}
/* allow the producer to read / poll the input */
-static inline void buffer_auto_read(struct channel *buf)
+static inline void channel_auto_read(struct channel *buf)
{
buf->flags &= ~CF_DONT_READ;
}
/* prevent the producer from read / poll the input */
-static inline void buffer_dont_read(struct channel *buf)
+static inline void channel_dont_read(struct channel *buf)
{
buf->flags |= CF_DONT_READ;
}
+
+/*************************************************/
+/* Buffer operations in the context of a channel */
+/*************************************************/
+
+
+/* Return the number of reserved bytes in the channel's visible
+ * buffer, which ensures that once all pending data are forwarded, the
+ * buffer still has global.tune.maxrewrite bytes free. The result is
+ * between 0 and global.tune.maxrewrite, which is itself smaller than
+ * any buf->size.
+ */
+static inline int buffer_reserved(const struct channel *buf)
+{
+ int ret = global.tune.maxrewrite - buf->to_forward - buf->buf.o;
+
+ if (buf->to_forward == CHN_INFINITE_FORWARD)
+ return 0;
+ if (ret <= 0)
+ return 0;
+ return ret;
+}
+
+/* Return the max number of bytes the buffer can contain so that once all the
+ * pending bytes are forwarded, the buffer still has global.tune.maxrewrite
+ * bytes free. The result sits between buf->size - maxrewrite and buf->size.
+ */
+static inline int buffer_max_len(const struct channel *buf)
+{
+ return buf->buf.size - buffer_reserved(buf);
+}
+
+/* Return the amount of bytes that can be written into the buffer at once,
+ * excluding reserved space, which is preserved.
+ */
+static inline int buffer_contig_space_res(const struct channel *chn)
+{
+ return buffer_contig_space_with_res(&chn->buf, buffer_reserved(chn));
+}
+
+/* Returns the amount of space available at the input of the buffer, taking the
+ * reserved space into account if ->to_forward indicates that an end of transfer
+ * is close to happen. The test is optimized to avoid as many operations as
+ * possible for the fast case.
+ */
+static inline int bi_avail(const struct channel *b)
+{
+ int rem = b->buf.size;
+ int rem2;
+
+ rem -= b->buf.o;
+ rem -= b->buf.i;
+ if (!rem)
+ return rem; /* buffer already full */
+
+ if (b->to_forward >= b->buf.size ||
+ (CHN_INFINITE_FORWARD < MAX_RANGE(typeof(b->buf.size)) && // just there to ensure gcc
+ b->to_forward == CHN_INFINITE_FORWARD)) // avoids the useless second
+ return rem; // test whenever possible
+
+ rem2 = rem - global.tune.maxrewrite;
+ rem2 += b->buf.o;
+ rem2 += b->to_forward;
+
+ if (rem > rem2)
+ rem = rem2;
+ if (rem > 0)
+ return rem;
+ return 0;
+}
+
+/* Cut the "tail" of the channel's buffer, which means strip it to the length
+ * of unsent data only, and kill any remaining unsent data. Any scheduled
+ * forwarding is stopped. This is mainly to be used to send error messages
+ * after existing data.
+ */
+static inline void bi_erase(struct channel *buf)
+{
+ if (!buf->buf.o)
+ return channel_erase(buf);
+
+ buf->to_forward = 0;
+ if (!buf->buf.i)
+ return;
+
+ buf->buf.i = 0;
+}
+
/*
- * Advance the buffer's read pointer by <len> bytes. This is useful when data
- * have been read directly from the buffer. It is illegal to call this function
- * with <len> causing a wrapping at the end of the buffer. It's the caller's
- * responsibility to ensure that <len> is never larger than buf->o.
+ * Advance the channel buffer's read pointer by <len> bytes. This is useful
+ * when data have been read directly from the buffer. It is illegal to call
+ * this function with <len> causing a wrapping at the end of the buffer. It's
+ * the caller's responsibility to ensure that <len> is never larger than
+ * buf->o. Channel flag WRITE_PARTIAL is set.
*/
static inline void bo_skip(struct channel *buf, int len)
{
@@ -320,12 +328,12 @@
buf->flags |= CF_WRITE_PARTIAL;
}
-/* Tries to copy chunk <chunk> into buffer <buf> after length controls.
- * The ->o and to_forward pointers are updated. If the buffer's input is
+/* Tries to copy chunk <chunk> into the channel's buffer after length controls.
+ * The buf->o and to_forward pointers are updated. If the channel's input is
* closed, -2 is returned. If the block is too large for this buffer, -3 is
* returned. If there is not enough room left in the buffer, -1 is returned.
* Otherwise the number of bytes copied is returned (0 being a valid number).
- * Buffer flag READ_PARTIAL is updated if some data can be transferred. The
+ * Channel flag READ_PARTIAL is updated if some data can be transferred. The
* chunk's length is updated with the number of bytes sent.
*/
static inline int bi_putchk(struct channel *buf, struct chunk *chunk)
@@ -338,12 +346,13 @@
return ret;
}
-/* Tries to copy string <str> at once into buffer <buf> after length controls.
- * The ->o and to_forward pointers are updated. If the buffer's input is
- * closed, -2 is returned. If the block is too large for this buffer, -3 is
- * returned. If there is not enough room left in the buffer, -1 is returned.
- * Otherwise the number of bytes copied is returned (0 being a valid number).
- * Buffer flag READ_PARTIAL is updated if some data can be transferred.
+/* Tries to copy string <str> at once into the channel's buffer after length
+ * controls. The buf->o and to_forward pointers are updated. If the channel's
+ * input is closed, -2 is returned. If the block is too large for this buffer,
+ * -3 is returned. If there is not enough room left in the buffer, -1 is
+ * returned. Otherwise the number of bytes copied is returned (0 being a valid
+ * number). Channel flag READ_PARTIAL is updated if some data can be
+ * transferred.
*/
static inline int bi_putstr(struct channel *buf, const char *str)
{
@@ -351,10 +360,11 @@
}
/*
- * Return one char from the buffer. If the buffer is empty and closed, return -2.
- * If the buffer is just empty, return -1. The buffer's pointer is not advanced,
- * it's up to the caller to call bo_skip(buf, 1) when it has consumed the char.
- * Also note that this function respects the ->o limit.
+ * Return one char from the channel's buffer. If the buffer is empty and the
+ * channel is closed, return -2. If the buffer is just empty, return -1. The
+ * buffer's pointer is not advanced, it's up to the caller to call bo_skip(buf,
+ * 1) when it has consumed the char. Also note that this function respects the
+ * buf->o limit.
*/
static inline int bo_getchr(struct channel *buf)
{
diff --git a/include/types/channel.h b/include/types/channel.h
index 2ef3b3e..c68f4f7 100644
--- a/include/types/channel.h
+++ b/include/types/channel.h
@@ -137,7 +137,7 @@
* Those bits indicate that there are some processing to do on the buffer
* contents. It will probably evolve into a linked list later. Those
* analysers could be compared to higher level processors.
- * The field is blanked by buffer_init() and only by analysers themselves
+ * The field is blanked by channel_init() and only by analysers themselves
* afterwards.
*/
#define AN_REQ_DECODE_PROXY 0x00000001 /* take the proxied address from a 'PROXY' line */
diff --git a/src/channel.c b/src/channel.c
index 1597b0f..65fe665 100644
--- a/src/channel.c
+++ b/src/channel.c
@@ -22,28 +22,25 @@
#include <types/global.h>
-/* Note: this code has not yet been completely cleaned up and still refers to
- * the word "buffer" when "channel" is meant instead.
- */
-struct pool_head *pool2_buffer;
+struct pool_head *pool2_channel;
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
-int init_buffer()
+int init_channel()
{
- pool2_buffer = create_pool("buffer", sizeof(struct channel) + global.tune.bufsize, MEM_F_SHARED);
- return pool2_buffer != NULL;
+ pool2_channel = create_pool("channel", sizeof(struct channel) + global.tune.bufsize, MEM_F_SHARED);
+ return pool2_channel != NULL;
}
-/* Schedule up to <bytes> more bytes to be forwarded by the buffer without notifying
- * the task. Any pending data in the buffer is scheduled to be sent as well,
- * in the limit of the number of bytes to forward. This must be the only method
- * to use to schedule bytes to be sent. If the requested number is too large, it
- * is automatically adjusted. The number of bytes taken into account is returned.
- * Directly touching ->to_forward will cause lockups when ->o goes down to
- * zero if nobody is ready to push the remaining data.
+/* Schedule up to <bytes> more bytes to be forwarded via the channel without
+ * notifying the owner task. Any data pending in the buffer are scheduled to be
+ * sent as well, in the limit of the number of bytes to forward. This must be
+ * the only method to use to schedule bytes to be forwarded. If the requested
+ * number is too large, it is automatically adjusted. The number of bytes taken
+ * into account is returned. Directly touching ->to_forward will cause lockups
+ * when buf->o goes down to zero if nobody is ready to push the remaining data.
*/
-unsigned long long buffer_forward(struct channel *buf, unsigned long long bytes)
+unsigned long long channel_forward(struct channel *buf, unsigned long long bytes)
{
unsigned int new_forward;
unsigned int forwarded;
@@ -94,12 +91,12 @@
return bytes;
}
-/* writes <len> bytes from message <msg> to buffer <buf>. Returns -1 in case of
- * success, -2 if the message is larger than the buffer size, or the number of
- * bytes available otherwise. The send limit is automatically adjusted with the
- * amount of data written. FIXME-20060521: handle unaligned data.
- * Note: this function appends data to the buffer's output and possibly overwrites
- * any pending input data which are assumed not to exist.
+/* writes <len> bytes from message <msg> to the channel's buffer. Returns -1 in
+ * case of success, -2 if the message is larger than the buffer size, or the
+ * number of bytes available otherwise. The send limit is automatically
+ * adjusted to the amount of data written. FIXME-20060521: handle unaligned
+ * data. Note: this function appends data to the buffer's output and possibly
+ * overwrites any pending input data which are assumed not to exist.
*/
int bo_inject(struct channel *buf, const char *msg, int len)
{
@@ -129,15 +126,15 @@
return -1;
}
-/* Tries to copy character <c> into buffer <buf> after length controls. The
- * ->o and to_forward pointers are updated. If the buffer's input is
- * closed, -2 is returned. If there is not enough room left in the buffer, -1
- * is returned. Otherwise the number of bytes copied is returned (1). Buffer
- * flag READ_PARTIAL is updated if some data can be transferred.
+/* Tries to copy character <c> into the channel's buffer after some length
+ * controls. The buf->o and to_forward pointers are updated. If the channel
+ * input is closed, -2 is returned. If there is not enough room left in the
+ * buffer, -1 is returned. Otherwise the number of bytes copied is returned
+ * (1). Channel flag READ_PARTIAL is updated if some data can be transferred.
*/
int bi_putchr(struct channel *buf, char c)
{
- if (unlikely(buffer_input_closed(buf)))
+ if (unlikely(channel_input_closed(buf)))
return -2;
if (channel_full(buf))
@@ -158,18 +155,19 @@
return 1;
}
-/* Tries to copy block <blk> at once into buffer <buf> after length controls.
- * The ->o and to_forward pointers are updated. If the buffer's input is
- * closed, -2 is returned. If the block is too large for this buffer, -3 is
- * returned. If there is not enough room left in the buffer, -1 is returned.
- * Otherwise the number of bytes copied is returned (0 being a valid number).
- * Buffer flag READ_PARTIAL is updated if some data can be transferred.
+/* Tries to copy block <blk> at once into the channel's buffer after length
+ * controls. The buf->o and to_forward pointers are updated. If the channel
+ * input is closed, -2 is returned. If the block is too large for this buffer,
+ * -3 is returned. If there is not enough room left in the buffer, -1 is
+ * returned. Otherwise the number of bytes copied is returned (0 being a valid
+ * number). Channel flag READ_PARTIAL is updated if some data can be
+ * transferred.
*/
int bi_putblk(struct channel *buf, const char *blk, int len)
{
int max;
- if (unlikely(buffer_input_closed(buf)))
+ if (unlikely(channel_input_closed(buf)))
return -2;
max = buffer_max_len(buf);
@@ -210,12 +208,12 @@
return len;
}
-/* Gets one text line out of a buffer from a stream interface.
+/* Gets one text line out of a channel's buffer from a stream interface.
* Return values :
* >0 : number of bytes read. Includes the \n if present before len or end.
* =0 : no '\n' before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
- * The buffer status is not changed. The caller must call bo_skip() to
+ * The channel status is not changed. The caller must call bo_skip() to
* update it. The '\n' is waited for as long as neither the buffer nor the
* output are full. If either of them is full, the string may be returned
* as is, without the '\n'.
@@ -260,12 +258,12 @@
return ret;
}
-/* Gets one full block of data at once from a buffer, optionally from a
- * specific offset. Return values :
+/* Gets one full block of data at once from a channel's buffer, optionally from
+ * a specific offset. Return values :
* >0 : number of bytes read, equal to requested size.
* =0 : not enough data available. <blk> is left undefined.
* <0 : no more bytes readable because output is shut.
- * The buffer status is not changed. The caller must call bo_skip() to
+ * The channel status is not changed. The caller must call bo_skip() to
* update it.
*/
int bo_getblk(struct channel *buf, char *blk, int len, int offset)
diff --git a/src/frontend.c b/src/frontend.c
index 07c4462..8b16836 100644
--- a/src/frontend.c
+++ b/src/frontend.c
@@ -190,8 +190,8 @@
/* note: this should not happen anymore since there's always at least the switching rules */
if (!s->req->analysers) {
- buffer_auto_connect(s->req); /* don't wait to establish connection */
- buffer_auto_close(s->req); /* let the producer forward close requests */
+ channel_auto_connect(s->req); /* don't wait to establish connection */
+ channel_auto_close(s->req); /* let the producer forward close requests */
}
s->req->rto = s->fe->timeout.client;
@@ -400,12 +400,12 @@
if ((req->flags & CF_SHUTR) || buffer_full(&req->buf, global.tune.maxrewrite))
goto fail;
- buffer_dont_connect(s->req);
+ channel_dont_connect(s->req);
return 0;
fail:
- buffer_abort(req);
- buffer_abort(s->rep);
+ channel_abort(req);
+ channel_abort(s->rep);
req->analysers = 0;
s->fe->fe_counters.failed_req++;
diff --git a/src/haproxy.c b/src/haproxy.c
index 3427dcc..5d537df 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -584,8 +584,8 @@
global_listener_queue_task->process = manage_global_listener_queue;
global_listener_queue_task->expire = TICK_ETERNITY;
- /* now we know the buffer size, we can initialize the buffers */
- init_buffer();
+ /* now we know the buffer size, we can initialize the channels and buffers */
+ init_channel();
if (have_appsession)
appsession_init();
@@ -1053,7 +1053,7 @@
}
pool_destroy2(pool2_session);
- pool_destroy2(pool2_buffer);
+ pool_destroy2(pool2_channel);
pool_destroy2(pool2_requri);
pool_destroy2(pool2_task);
pool_destroy2(pool2_capture);
diff --git a/src/peers.c b/src/peers.c
index 386cca9..312c248 100644
--- a/src/peers.c
+++ b/src/peers.c
@@ -1222,11 +1222,11 @@
txn->hdr_idx.v = NULL;
txn->hdr_idx.size = txn->hdr_idx.used = 0;
- if ((s->req = pool_alloc2(pool2_buffer)) == NULL)
+ if ((s->req = pool_alloc2(pool2_channel)) == NULL)
goto out_fail_req; /* no memory */
s->req->buf.size = global.tune.bufsize;
- buffer_init(s->req);
+ channel_init(s->req);
s->req->prod = &s->si[0];
s->req->cons = &s->si[1];
s->si[0].ib = s->si[1].ob = s->req;
@@ -1238,18 +1238,18 @@
/* note: this should not happen anymore since there's always at least the switching rules */
if (!s->req->analysers) {
- buffer_auto_connect(s->req);/* don't wait to establish connection */
- buffer_auto_close(s->req);/* let the producer forward close requests */
+ channel_auto_connect(s->req);/* don't wait to establish connection */
+ channel_auto_close(s->req);/* let the producer forward close requests */
}
s->req->rto = s->fe->timeout.client;
s->req->wto = s->be->timeout.server;
- if ((s->rep = pool_alloc2(pool2_buffer)) == NULL)
+ if ((s->rep = pool_alloc2(pool2_channel)) == NULL)
goto out_fail_rep; /* no memory */
s->rep->buf.size = global.tune.bufsize;
- buffer_init(s->rep);
+ channel_init(s->rep);
s->rep->prod = &s->si[1];
s->rep->cons = &s->si[0];
s->si[0].ob = s->si[1].ib = s->rep;
@@ -1283,7 +1283,7 @@
/* Error unrolling */
out_fail_rep:
- pool_free2(pool2_buffer, s->req);
+ pool_free2(pool2_channel, s->req);
out_fail_req:
task_free(t);
out_free_session:
diff --git a/src/proto_http.c b/src/proto_http.c
index ce462e1..e671225 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -649,12 +649,12 @@
static void http_server_error(struct session *t, struct stream_interface *si,
int err, int finst, int status, const struct chunk *msg)
{
- buffer_auto_read(si->ob);
- buffer_abort(si->ob);
- buffer_auto_close(si->ob);
- buffer_erase(si->ob);
- buffer_auto_close(si->ib);
- buffer_auto_read(si->ib);
+ channel_auto_read(si->ob);
+ channel_abort(si->ob);
+ channel_auto_close(si->ob);
+ channel_erase(si->ob);
+ channel_auto_close(si->ib);
+ channel_auto_read(si->ib);
if (status > 0 && msg) {
t->txn.status = status;
bo_inject(si->ib, msg->str, msg->len);
@@ -2026,7 +2026,7 @@
if (req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
goto failed_keep_alive;
/* some data has still not left the buffer, wake us once that's done */
- buffer_dont_connect(req);
+ channel_dont_connect(req);
req->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
return 0;
}
@@ -2050,7 +2050,7 @@
if (s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
goto failed_keep_alive;
/* don't let a connection request be initiated */
- buffer_dont_connect(req);
+ channel_dont_connect(req);
s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
s->rep->analysers |= an_bit; /* wake us up once it changes */
return 0;
@@ -2211,7 +2211,7 @@
return 0;
}
- buffer_dont_connect(req);
+ channel_dont_connect(req);
req->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
#ifdef TCP_QUICKACK
@@ -2784,7 +2784,7 @@
if (unlikely(msg->msg_state < HTTP_MSG_BODY)) {
/* we need more data */
- buffer_dont_connect(req);
+ channel_dont_connect(req);
return 0;
}
@@ -2866,11 +2866,11 @@
* eventually expire. We build the tarpit as an analyser.
*/
if (txn->flags & TX_CLTARPIT) {
- buffer_erase(s->req);
+ channel_erase(s->req);
/* wipe the request out so that we can drop the connection early
* if the client closes first.
*/
- buffer_dont_connect(req);
+ channel_dont_connect(req);
req->analysers = 0; /* remove switching rules etc... */
req->analysers |= AN_REQ_HTTP_TARPIT;
req->analyse_exp = tick_add_ifset(now_ms, s->be->timeout.tarpit);
@@ -3017,7 +3017,7 @@
if (!http_process_req_stat_post(s->rep->prod, txn, req)) {
/* we need more data */
req->analysers |= an_bit;
- buffer_dont_connect(req);
+ channel_dont_connect(req);
return 0;
}
} else {
@@ -3244,7 +3244,7 @@
if (unlikely(msg->msg_state < HTTP_MSG_BODY)) {
/* we need more data */
- buffer_dont_connect(req);
+ channel_dont_connect(req);
return 0;
}
@@ -3454,7 +3454,7 @@
s->txn.meth == HTTP_METH_POST && s->be->url_param_name != NULL &&
s->be->url_param_post_limit != 0 &&
(msg->flags & (HTTP_MSGF_CNT_LEN|HTTP_MSGF_TE_CHNK))) {
- buffer_dont_connect(req);
+ channel_dont_connect(req);
req->analysers |= AN_REQ_HTTP_BODY;
}
@@ -3526,7 +3526,7 @@
* timeout. We just have to check that the client is still
* there and that the timeout has not expired.
*/
- buffer_dont_connect(req);
+ channel_dont_connect(req);
if ((req->flags & (CF_SHUTR|CF_READ_ERROR)) == 0 &&
!tick_is_expired(req->analyse_exp, now_ms))
return 0;
@@ -3660,7 +3660,7 @@
* request timeout once at the beginning of the
* request.
*/
- buffer_dont_connect(req);
+ channel_dont_connect(req);
if (!tick_isset(req->analyse_exp))
req->analyse_exp = tick_add_ifset(now_ms, s->be->timeout.httpreq);
return 0;
@@ -3859,10 +3859,10 @@
}
/* we're removing the analysers, we MUST re-enable events detection */
- buffer_auto_read(s->req);
- buffer_auto_close(s->req);
- buffer_auto_read(s->rep);
- buffer_auto_close(s->rep);
+ channel_auto_read(s->req);
+ channel_auto_close(s->req);
+ channel_auto_read(s->rep);
+ channel_auto_close(s->rep);
s->req->analysers = s->listener->analysers;
s->req->analysers &= ~AN_REQ_DECODE_PROXY;
@@ -3900,7 +3900,7 @@
* (eg: Linux).
*/
if (!(s->be->options & PR_O_ABRT_CLOSE) && txn->meth != HTTP_METH_POST)
- buffer_dont_read(buf);
+ channel_dont_read(buf);
if (txn->rsp.msg_state == HTTP_MSG_ERROR)
goto wait_other_side;
@@ -3914,7 +3914,7 @@
if (txn->rsp.msg_state == HTTP_MSG_TUNNEL) {
/* if any side switches to tunnel mode, the other one does too */
- buffer_auto_read(buf);
+ channel_auto_read(buf);
txn->req.msg_state = HTTP_MSG_TUNNEL;
goto wait_other_side;
}
@@ -3928,7 +3928,7 @@
if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL) {
/* Server-close mode : queue a connection close to the server */
if (!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW)))
- buffer_shutw_now(buf);
+ channel_shutw_now(buf);
}
else if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_CLO) {
/* Option forceclose is set, or either side wants to close,
@@ -3937,8 +3937,8 @@
* once both states are CLOSED.
*/
if (!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
- buffer_shutr_now(buf);
- buffer_shutw_now(buf);
+ channel_shutr_now(buf);
+ channel_shutw_now(buf);
}
}
else {
@@ -3947,7 +3947,7 @@
* in tunnel mode, so we're left with keep-alive only.
* This mode is currently not implemented, we switch to tunnel mode.
*/
- buffer_auto_read(buf);
+ channel_auto_read(buf);
txn->req.msg_state = HTTP_MSG_TUNNEL;
}
@@ -4017,7 +4017,7 @@
* while the request is being uploaded, so we don't disable
* reading.
*/
- /* buffer_dont_read(buf); */
+ /* channel_dont_read(buf); */
if (txn->req.msg_state == HTTP_MSG_ERROR)
goto wait_other_side;
@@ -4033,7 +4033,7 @@
if (txn->req.msg_state == HTTP_MSG_TUNNEL) {
/* if any side switches to tunnel mode, the other one does too */
- buffer_auto_read(buf);
+ channel_auto_read(buf);
txn->rsp.msg_state = HTTP_MSG_TUNNEL;
goto wait_other_side;
}
@@ -4051,7 +4051,7 @@
* catch that for the final cleanup.
*/
if (!(buf->flags & (CF_SHUTR|CF_SHUTR_NOW)))
- buffer_shutr_now(buf);
+ channel_shutr_now(buf);
}
else if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_CLO) {
/* Option forceclose is set, or either side wants to close,
@@ -4060,8 +4060,8 @@
* once both states are CLOSED.
*/
if (!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
- buffer_shutr_now(buf);
- buffer_shutw_now(buf);
+ channel_shutr_now(buf);
+ channel_shutw_now(buf);
}
}
else {
@@ -4070,7 +4070,7 @@
* in tunnel mode, so we're left with keep-alive only.
* This mode is currently not implemented, we switch to tunnel mode.
*/
- buffer_auto_read(buf);
+ channel_auto_read(buf);
txn->rsp.msg_state = HTTP_MSG_TUNNEL;
}
@@ -4110,8 +4110,8 @@
http_msg_closed:
/* drop any pending data */
bi_erase(buf);
- buffer_auto_close(buf);
- buffer_auto_read(buf);
+ channel_auto_close(buf);
+ channel_auto_read(buf);
goto wait_other_side;
}
@@ -4158,23 +4158,23 @@
(txn->req.msg_state == HTTP_MSG_CLOSED &&
txn->rsp.msg_state == HTTP_MSG_CLOSED)) {
s->req->analysers = 0;
- buffer_auto_close(s->req);
- buffer_auto_read(s->req);
+ channel_auto_close(s->req);
+ channel_auto_read(s->req);
s->rep->analysers = 0;
- buffer_auto_close(s->rep);
- buffer_auto_read(s->rep);
+ channel_auto_close(s->rep);
+ channel_auto_read(s->rep);
}
else if (txn->rsp.msg_state == HTTP_MSG_CLOSED ||
txn->rsp.msg_state == HTTP_MSG_ERROR ||
txn->req.msg_state == HTTP_MSG_ERROR ||
(s->rep->flags & CF_SHUTW)) {
s->rep->analysers = 0;
- buffer_auto_close(s->rep);
- buffer_auto_read(s->rep);
+ channel_auto_close(s->rep);
+ channel_auto_read(s->rep);
s->req->analysers = 0;
- buffer_abort(s->req);
- buffer_auto_close(s->req);
- buffer_auto_read(s->req);
+ channel_abort(s->req);
+ channel_auto_close(s->req);
+ channel_auto_read(s->req);
bi_erase(s->req);
}
else if (txn->req.msg_state == HTTP_MSG_CLOSED &&
@@ -4221,7 +4221,7 @@
}
/* in most states, we should abort in case of early close */
- buffer_auto_close(req);
+ channel_auto_close(req);
/* Note that we don't have to send 100-continue back because we don't
* need the data to complete our job, and it's up to the server to
@@ -4254,7 +4254,7 @@
msg->sol = msg->sov;
msg->next -= bytes; /* will be forwarded */
msg->chunk_len += bytes;
- msg->chunk_len -= buffer_forward(req, msg->chunk_len);
+ msg->chunk_len -= channel_forward(req, msg->chunk_len);
}
if (msg->msg_state == HTTP_MSG_DATA) {
@@ -4321,7 +4321,7 @@
/* for keep-alive we don't want to forward closes on DONE */
if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL ||
(txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL)
- buffer_dont_close(req);
+ channel_dont_close(req);
if (http_resync_states(s)) {
/* some state changes occurred, maybe the analyser
* was disabled too.
@@ -4347,8 +4347,8 @@
* request.
*/
if (s->be->options & PR_O_ABRT_CLOSE) {
- buffer_auto_read(req);
- buffer_auto_close(req);
+ channel_auto_read(req);
+ channel_auto_close(req);
}
else if (s->txn.meth == HTTP_METH_POST) {
/* POST requests may require to read extra CRLF
@@ -4356,7 +4356,7 @@
* an RST to be sent upon close on some systems
* (eg: Linux).
*/
- buffer_auto_read(req);
+ channel_auto_read(req);
}
return 0;
@@ -4391,7 +4391,7 @@
* chunks even if the client has closed, so we don't want to set CF_DONTCLOSE.
*/
if (msg->flags & HTTP_MSGF_TE_CHNK)
- buffer_dont_close(req);
+ channel_dont_close(req);
/* We know that more data are expected, but we couldn't send more that
* what we did. So we always set the CF_EXPECT_MORE flag so that the
@@ -4515,7 +4515,7 @@
/* some data has still not left the buffer, wake us once that's done */
if (rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
goto abort_response;
- buffer_dont_close(rep);
+ channel_dont_close(rep);
rep->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
return 0;
}
@@ -4577,7 +4577,7 @@
health_adjust(target_srv(&s->target), HANA_STATUS_HTTP_HDRRSP);
}
abort_response:
- buffer_auto_close(rep);
+ channel_auto_close(rep);
rep->analysers = 0;
txn->status = 502;
rep->prod->flags |= SI_FL_NOLINGER;
@@ -4610,7 +4610,7 @@
health_adjust(target_srv(&s->target), HANA_STATUS_HTTP_READ_ERROR);
}
- buffer_auto_close(rep);
+ channel_auto_close(rep);
rep->analysers = 0;
txn->status = 502;
rep->prod->flags |= SI_FL_NOLINGER;
@@ -4635,7 +4635,7 @@
health_adjust(target_srv(&s->target), HANA_STATUS_HTTP_READ_TIMEOUT);
}
- buffer_auto_close(rep);
+ channel_auto_close(rep);
rep->analysers = 0;
txn->status = 504;
rep->prod->flags |= SI_FL_NOLINGER;
@@ -4660,7 +4660,7 @@
health_adjust(target_srv(&s->target), HANA_STATUS_HTTP_BROKEN_PIPE);
}
- buffer_auto_close(rep);
+ channel_auto_close(rep);
rep->analysers = 0;
txn->status = 502;
rep->prod->flags |= SI_FL_NOLINGER;
@@ -4681,7 +4681,7 @@
s->be->be_counters.failed_resp++;
rep->analysers = 0;
- buffer_auto_close(rep);
+ channel_auto_close(rep);
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_CLICL;
@@ -4692,7 +4692,7 @@
return 0;
}
- buffer_dont_close(rep);
+ channel_dont_close(rep);
return 0;
}
@@ -4888,7 +4888,7 @@
/* end of job, return OK */
rep->analysers &= ~an_bit;
rep->analyse_exp = TICK_ETERNITY;
- buffer_auto_close(rep);
+ channel_auto_close(rep);
return 1;
}
@@ -5066,7 +5066,7 @@
*/
if (unlikely(txn->status == 100)) {
hdr_idx_init(&txn->hdr_idx);
- msg->next -= buffer_forward(rep, msg->next);
+ msg->next -= channel_forward(rep, msg->next);
msg->msg_state = HTTP_MSG_RPBEFORE;
txn->status = 0;
rep->analysers |= AN_RES_WAIT_HTTP | an_bit;
@@ -5290,7 +5290,7 @@
}
/* in most states, we should abort in case of early close */
- buffer_auto_close(res);
+ channel_auto_close(res);
if (msg->msg_state < HTTP_MSG_CHUNK_SIZE) {
/* we have msg->sov which points to the first byte of message body.
@@ -5315,7 +5315,7 @@
msg->sol = msg->sov;
msg->next -= bytes; /* will be forwarded */
msg->chunk_len += bytes;
- msg->chunk_len -= buffer_forward(res, msg->chunk_len);
+ msg->chunk_len -= channel_forward(res, msg->chunk_len);
}
if (msg->msg_state == HTTP_MSG_DATA) {
@@ -5379,7 +5379,7 @@
/* for keep-alive we don't want to forward closes on DONE */
if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL ||
(txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL)
- buffer_dont_close(res);
+ channel_dont_close(res);
if (http_resync_states(s)) {
http_silent_debug(__LINE__, s);
/* some state changes occurred, maybe the analyser
@@ -5426,7 +5426,7 @@
msg->sol = msg->sov;
msg->next -= bytes; /* will be forwarded */
msg->chunk_len += bytes;
- msg->chunk_len -= buffer_forward(res, msg->chunk_len);
+ msg->chunk_len -= channel_forward(res, msg->chunk_len);
}
/* When TE: chunked is used, we need to get there again to parse remaining
@@ -5437,7 +5437,7 @@
if ((msg->flags & HTTP_MSGF_TE_CHNK) ||
(txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL ||
(txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL)
- buffer_dont_close(res);
+ channel_dont_close(res);
/* We know that more data are expected, but we couldn't send more that
* what we did. So we always set the CF_EXPECT_MORE flag so that the
diff --git a/src/proto_tcp.c b/src/proto_tcp.c
index 22664dd..f6cc6a3 100644
--- a/src/proto_tcp.c
+++ b/src/proto_tcp.c
@@ -821,7 +821,7 @@
if (rule->cond) {
ret = acl_exec_cond(rule->cond, s->be, s, &s->txn, SMP_OPT_DIR_REQ | partial);
if (ret == ACL_PAT_MISS) {
- buffer_dont_connect(req);
+ channel_dont_connect(req);
/* just set the request timeout once at the beginning of the request */
if (!tick_isset(req->analyse_exp) && s->be->tcp_req.inspect_delay)
req->analyse_exp = tick_add_ifset(now_ms, s->be->tcp_req.inspect_delay);
@@ -836,8 +836,8 @@
if (ret) {
/* we have a matching rule. */
if (rule->action == TCP_ACT_REJECT) {
- buffer_abort(req);
- buffer_abort(s->rep);
+ channel_abort(req);
+ channel_abort(s->rep);
req->analysers = 0;
s->be->be_counters.denied_req++;
@@ -953,8 +953,8 @@
if (ret) {
/* we have a matching rule. */
if (rule->action == TCP_ACT_REJECT) {
- buffer_abort(rep);
- buffer_abort(s->req);
+ channel_abort(rep);
+ channel_abort(s->req);
rep->analysers = 0;
s->be->be_counters.denied_resp++;
diff --git a/src/session.c b/src/session.c
index a62b42a..e5a8b7e 100644
--- a/src/session.c
+++ b/src/session.c
@@ -217,15 +217,15 @@
if (unlikely(fcntl(cfd, F_SETFL, O_NONBLOCK) == -1))
goto out_free_task;
- if (unlikely((s->req = pool_alloc2(pool2_buffer)) == NULL))
+ if (unlikely((s->req = pool_alloc2(pool2_channel)) == NULL))
goto out_free_task; /* no memory */
- if (unlikely((s->rep = pool_alloc2(pool2_buffer)) == NULL))
+ if (unlikely((s->rep = pool_alloc2(pool2_channel)) == NULL))
goto out_free_req; /* no memory */
/* initialize the request buffer */
s->req->buf.size = global.tune.bufsize;
- buffer_init(s->req);
+ channel_init(s->req);
s->req->prod = &s->si[0];
s->req->cons = &s->si[1];
s->si[0].ib = s->si[1].ob = s->req;
@@ -242,7 +242,7 @@
/* initialize response buffer */
s->rep->buf.size = global.tune.bufsize;
- buffer_init(s->rep);
+ channel_init(s->rep);
s->rep->prod = &s->si[1];
s->rep->cons = &s->si[0];
s->si[0].ob = s->si[1].ib = s->rep;
@@ -302,9 +302,9 @@
/* Error unrolling */
out_free_rep:
- pool_free2(pool2_buffer, s->rep);
+ pool_free2(pool2_channel, s->rep);
out_free_req:
- pool_free2(pool2_buffer, s->req);
+ pool_free2(pool2_channel, s->req);
out_free_task:
p->feconn--;
if (s->stkctr1_entry || s->stkctr2_entry)
@@ -363,8 +363,8 @@
if (s->rep->pipe)
put_pipe(s->rep->pipe);
- pool_free2(pool2_buffer, s->req);
- pool_free2(pool2_buffer, s->rep);
+ pool_free2(pool2_channel, s->req);
+ pool_free2(pool2_channel, s->rep);
http_end_txn(s);
@@ -399,7 +399,7 @@
/* We may want to free the maximum amount of pools if the proxy is stopping */
if (fe && unlikely(fe->state == PR_STSTOPPED)) {
- pool_flush2(pool2_buffer);
+ pool_flush2(pool2_channel);
pool_flush2(pool2_hdr_idx);
pool_flush2(pool2_requri);
pool_flush2(pool2_capture);
@@ -1032,8 +1032,8 @@
sw_failed:
/* immediately abort this request in case of allocation failure */
- buffer_abort(s->req);
- buffer_abort(s->rep);
+ channel_abort(s->req);
+ channel_abort(s->rep);
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_RESOURCE;
@@ -1335,13 +1335,13 @@
stream_int_check_timeouts(&s->si[0]);
stream_int_check_timeouts(&s->si[1]);
- /* check buffer timeouts, and close the corresponding stream interfaces
+ /* check channel timeouts, and close the corresponding stream interfaces
* for future reads or writes. Note: this will also concern upper layers
* but we do not touch any other flag. We must be careful and correctly
* detect state changes when calling them.
*/
- buffer_check_timeouts(s->req);
+ channel_check_timeouts(s->req);
if (unlikely((s->req->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
s->req->cons->flags |= SI_FL_NOLINGER;
@@ -1354,7 +1354,7 @@
si_shutr(s->req->prod);
}
- buffer_check_timeouts(s->rep);
+ channel_check_timeouts(s->rep);
if (unlikely((s->rep->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
s->rep->cons->flags |= SI_FL_NOLINGER;
@@ -1496,9 +1496,9 @@
* enabling them again when it disables itself, so
* that other analysers are called in similar conditions.
*/
- buffer_auto_read(s->req);
- buffer_auto_connect(s->req);
- buffer_auto_close(s->req);
+ channel_auto_read(s->req);
+ channel_auto_connect(s->req);
+ channel_auto_close(s->req);
/* We will call all analysers for which a bit is set in
* s->req->analysers, following the bit order from LSB
@@ -1688,8 +1688,8 @@
* it disables itself, so that other analysers are called
* in similar conditions.
*/
- buffer_auto_read(s->rep);
- buffer_auto_close(s->rep);
+ channel_auto_read(s->rep);
+ channel_auto_close(s->rep);
/* We will call all analysers for which a bit is set in
* s->rep->analysers, following the bit order from LSB
@@ -1843,7 +1843,7 @@
/* If noone is interested in analysing data, it's time to forward
* everything. We configure the buffer to forward indefinitely.
* Note that we're checking CF_SHUTR_NOW as an indication of a possible
- * recent call to buffer_abort().
+ * recent call to channel_abort().
*/
if (!s->req->analysers &&
!(s->req->flags & (CF_HIJACK|CF_SHUTW|CF_SHUTR_NOW)) &&
@@ -1853,16 +1853,16 @@
* attached to it. If any data are left in, we'll permit them to
* move.
*/
- buffer_auto_read(s->req);
- buffer_auto_connect(s->req);
- buffer_auto_close(s->req);
+ channel_auto_read(s->req);
+ channel_auto_connect(s->req);
+ channel_auto_close(s->req);
buffer_flush(&s->req->buf);
/* We'll let data flow between the producer (if still connected)
* to the consumer (which might possibly not be connected yet).
*/
if (!(s->req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
- buffer_forward(s->req, CHN_INFINITE_FORWARD);
+ channel_forward(s->req, CHN_INFINITE_FORWARD);
}
/* check if it is wise to enable kernel splicing to forward request data */
@@ -1890,7 +1890,7 @@
*/
if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK|CF_AUTO_CLOSE|CF_SHUTR)) ==
(CF_AUTO_CLOSE|CF_SHUTR)))
- buffer_shutw_now(s->req);
+ channel_shutw_now(s->req);
/* shutdown(write) pending */
if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
@@ -1900,7 +1900,7 @@
/* shutdown(write) done on server side, we must stop the client too */
if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW &&
!s->req->analysers))
- buffer_shutr_now(s->req);
+ channel_shutr_now(s->req);
/* shutdown(read) pending */
if (unlikely((s->req->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
@@ -1933,8 +1933,8 @@
}
else {
s->req->cons->state = SI_ST_CLO; /* shutw+ini = abort */
- buffer_shutw_now(s->req); /* fix buffer flags upon abort */
- buffer_shutr_now(s->rep);
+ channel_shutw_now(s->req); /* fix buffer flags upon abort */
+ channel_shutr_now(s->rep);
}
}
@@ -1979,7 +1979,7 @@
/* If noone is interested in analysing data, it's time to forward
* everything. We configure the buffer to forward indefinitely.
* Note that we're checking CF_SHUTR_NOW as an indication of a possible
- * recent call to buffer_abort().
+ * recent call to channel_abort().
*/
if (!s->rep->analysers &&
!(s->rep->flags & (CF_HIJACK|CF_SHUTW|CF_SHUTR_NOW)) &&
@@ -1989,15 +1989,15 @@
* attached to it. If any data are left in, we'll permit them to
* move.
*/
- buffer_auto_read(s->rep);
- buffer_auto_close(s->rep);
+ channel_auto_read(s->rep);
+ channel_auto_close(s->rep);
buffer_flush(&s->rep->buf);
/* We'll let data flow between the producer (if still connected)
* to the consumer.
*/
if (!(s->rep->flags & (CF_SHUTR|CF_SHUTW_NOW)))
- buffer_forward(s->rep, CHN_INFINITE_FORWARD);
+ channel_forward(s->rep, CHN_INFINITE_FORWARD);
/* if we have no analyser anymore in any direction and have a
* tunnel timeout set, use it now.
@@ -2036,7 +2036,7 @@
/* first, let's check if the response buffer needs to shutdown(write) */
if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK|CF_AUTO_CLOSE|CF_SHUTR)) ==
(CF_AUTO_CLOSE|CF_SHUTR)))
- buffer_shutw_now(s->rep);
+ channel_shutw_now(s->rep);
/* shutdown(write) pending */
if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
@@ -2046,7 +2046,7 @@
/* shutdown(write) done on the client side, we must stop the server too */
if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) &&
!s->rep->analysers)
- buffer_shutr_now(s->rep);
+ channel_shutr_now(s->rep);
/* shutdown(read) pending */
if (unlikely((s->rep->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
@@ -2313,8 +2313,8 @@
if (session->req->flags & (CF_SHUTW|CF_SHUTW_NOW))
return;
- buffer_shutw_now(session->req);
- buffer_shutr_now(session->rep);
+ channel_shutw_now(session->req);
+ channel_shutr_now(session->rep);
session->task->nice = 1024;
if (!(session->flags & SN_ERR_MASK))
session->flags |= why;
diff --git a/src/stream_interface.c b/src/stream_interface.c
index ae633ea..765d04f 100644
--- a/src/stream_interface.c
+++ b/src/stream_interface.c
@@ -108,19 +108,19 @@
*/
void stream_int_retnclose(struct stream_interface *si, const struct chunk *msg)
{
- buffer_auto_read(si->ib);
- buffer_abort(si->ib);
- buffer_auto_close(si->ib);
- buffer_erase(si->ib);
+ channel_auto_read(si->ib);
+ channel_abort(si->ib);
+ channel_auto_close(si->ib);
+ channel_erase(si->ib);
bi_erase(si->ob);
if (likely(msg && msg->len))
bo_inject(si->ob, msg->str, msg->len);
si->ob->wex = tick_add_ifset(now_ms, si->ob->wto);
- buffer_auto_read(si->ob);
- buffer_auto_close(si->ob);
- buffer_shutr_now(si->ob);
+ channel_auto_read(si->ob);
+ channel_auto_close(si->ob);
+ channel_shutr_now(si->ob);
}
/* default update function for scheduled tasks, not used for embedded tasks */
@@ -1179,7 +1179,7 @@
/* we received a shutdown */
b->flags |= CF_READ_NULL;
if (b->flags & CF_AUTO_CLOSE)
- buffer_shutw_now(b);
+ channel_shutw_now(b);
stream_sock_read0(si);
conn_data_read0(conn);
return;