MAJOR: buffer: finalize buffer detachment

Now the buffers only contain the header and a pointer to the storage
area which can be anywhere. This will significantly simplify buffer
swapping and will make it possible to map chunks on buffers as well.

The buf_empty variable was removed, as now it's enough to have size==0
and area==NULL to designate the empty buffer (thus a non-allocated head
is the empty buffer by default). buf_wanted for now is indicated by
size==0 and area==(void *)1.

The channels and the checks now embed the buffer's head, and the only
pointer is to the storage area. This slightly increases the unallocated
buffer size (3 extra ints for the empty buffer) but considerably
simplifies dynamic buffer management. It will also later permit to
detach unused checks.

The way the struct buffer is arranged has proven quite efficient on a
number of tests, which makes sense given that size is always accessed
and often first, followed by the othe ones.
diff --git a/include/common/buf.h b/include/common/buf.h
index bd58a4d..92db0a1 100644
--- a/include/common/buf.h
+++ b/include/common/buf.h
@@ -32,12 +32,25 @@
 
 /* Structure defining a buffer's head */
 struct buffer {
-	size_t head;                /* start offset of remaining data relative to area */
-	size_t data;                /* amount of data after head including wrapping */
 	size_t size;                /* buffer size in bytes */
-	char   area[0];             /* <size> bytes of stored data */
+	char  *area;                /* points to <size> bytes */
+	size_t data;                /* amount of data after head including wrapping */
+	size_t head;                /* start offset of remaining data relative to area */
 };
 
+/* A buffer may be in 3 different states :
+ *   - unallocated : size == 0, area == 0  (b_is_null() is true)
+ *   - waiting     : size == 0, area != 0
+ *   - allocated   : size  > 0, area  > 0
+ */
+
+/* initializers for certain buffer states. It is important that the NULL buffer
+ * remains the one with all fields initialized to zero so that a calloc() or a
+ * memset() on a struct automatically sets a NULL buffer.
+ */
+#define BUF_NULL   ((struct buffer){ })
+#define BUF_WANTED ((struct buffer){ .area = (char *)1 })
+
 
 /***************************************************************************/
 /* Functions used to compute offsets and pointers. Most of them exist in   */
@@ -46,13 +59,21 @@
 /* offset relative to the storage area.                                    */
 /***************************************************************************/
 
+/* b_is_null() : returns true if (and only if) the buffer is not yet allocated
+ * and thus points to a NULL area.
+ */
+static inline int b_is_null(const struct buffer *buf)
+{
+	return buf->area == NULL;
+}
+
 /* b_orig() : returns the pointer to the origin of the storage, which is the
  * location of byte at offset zero. This is mostly used by functions which
  * handle the wrapping by themselves.
  */
 static inline char *b_orig(const struct buffer *b)
 {
-	return (char *)b->area;
+	return b->area;
 }
 
 /* b_size() : returns the size of the buffer. */
@@ -66,7 +87,7 @@
  */
 static inline char *b_wrap(const struct buffer *b)
 {
-	return (char *)b->area + b->size;
+	return b->area + b->size;
 }
 
 /* b_data() : returns the number of bytes present in the buffer. */
diff --git a/include/common/buffer.h b/include/common/buffer.h
index 1657275..ed9f90b 100644
--- a/include/common/buffer.h
+++ b/include/common/buffer.h
@@ -43,8 +43,6 @@
 };
 
 extern struct pool_head *pool_head_buffer;
-extern struct buffer buf_empty;
-extern struct buffer buf_wanted;
 extern struct list buffer_wq;
 __decl_hathreads(extern HA_SPINLOCK_T buffer_wq_lock);
 
@@ -59,7 +57,7 @@
 /* Return 1 if the buffer has less than 1/4 of its capacity free, otherwise 0 */
 static inline int buffer_almost_full(const struct buffer *buf)
 {
-	if (buf == &buf_empty)
+	if (b_is_null(buf))
 		return 0;
 
 	return b_almost_full(buf);
@@ -69,65 +67,64 @@
 /* Functions below are used for buffer allocation */
 /**************************************************/
 
-/* Allocates a buffer and replaces *buf with this buffer. If no memory is
- * available, &buf_wanted is used instead. No control is made to check if *buf
- * already pointed to another buffer. The allocated buffer is returned, or
- * NULL in case no memory is available.
+/* Allocates a buffer and assigns it to *buf. If no memory is available,
+ * ((char *)1) is assigned instead with a zero size. No control is made to
+ * check if *buf already pointed to another buffer. The allocated buffer is
+ * returned, or NULL in case no memory is available.
  */
-static inline struct buffer *b_alloc(struct buffer **buf)
+static inline struct buffer *b_alloc(struct buffer *buf)
 {
-	struct buffer *b;
+	char *area;
 
-	*buf = &buf_wanted;
-	b = pool_alloc_dirty(pool_head_buffer);
-	if (likely(b)) {
-		b->size = pool_head_buffer->size - sizeof(struct buffer);
-		b_reset(b);
-		*buf = b;
-	}
-	return b;
+	*buf = BUF_WANTED;
+	area = pool_alloc_dirty(pool_head_buffer);
+	if (unlikely(!area))
+		return NULL;
+
+	buf->area = area;
+	buf->size = pool_head_buffer->size;
+	return buf;
 }
 
-/* Allocates a buffer and replaces *buf with this buffer. If no memory is
- * available, &buf_wanted is used instead. No control is made to check if *buf
- * already pointed to another buffer. The allocated buffer is returned, or
- * NULL in case no memory is available. The difference with b_alloc() is that
- * this function only picks from the pool and never calls malloc(), so it can
- * fail even if some memory is available.
+/* Allocates a buffer and assigns it to *buf. If no memory is available,
+ * ((char *)1) is assigned instead with a zero size. No control is made to
+ * check if *buf already pointed to another buffer. The allocated buffer is
+ * returned, or NULL in case no memory is available. The difference with
+ * b_alloc() is that this function only picks from the pool and never calls
+ * malloc(), so it can fail even if some memory is available.
  */
-static inline struct buffer *b_alloc_fast(struct buffer **buf)
+static inline struct buffer *b_alloc_fast(struct buffer *buf)
 {
-	struct buffer *b;
+	char *area;
 
-	*buf = &buf_wanted;
-	b = pool_get_first(pool_head_buffer);
-	if (likely(b)) {
-		b->size = pool_head_buffer->size - sizeof(struct buffer);
-		b_reset(b);
-		*buf = b;
-	}
-	return b;
+	*buf = BUF_WANTED;
+	area = pool_get_first(pool_head_buffer);
+	if (unlikely(!area))
+		return NULL;
+
+	buf->area = area;
+	buf->size = pool_head_buffer->size;
+	return buf;
 }
 
-/* Releases buffer *buf (no check of emptiness) */
-static inline void __b_drop(struct buffer **buf)
+/* Releases buffer <buf> (no check of emptiness) */
+static inline void __b_drop(struct buffer *buf)
 {
-	pool_free(pool_head_buffer, *buf);
+	pool_free(pool_head_buffer, buf->area);
 }
 
-/* Releases buffer *buf if allocated. */
-static inline void b_drop(struct buffer **buf)
+/* Releases buffer <buf> if allocated. */
+static inline void b_drop(struct buffer *buf)
 {
-	if (!(*buf)->size)
-		return;
-	__b_drop(buf);
+	if (buf->size)
+		__b_drop(buf);
 }
 
-/* Releases buffer *buf if allocated, and replaces it with &buf_empty. */
-static inline void b_free(struct buffer **buf)
+/* Releases buffer <buf> if allocated, and marks it empty. */
+static inline void b_free(struct buffer *buf)
 {
 	b_drop(buf);
-	*buf = &buf_empty;
+	*buf = BUF_NULL;
 }
 
 /* Ensures that <buf> is allocated. If an allocation is needed, it ensures that
@@ -141,45 +138,44 @@
  * after the allocation, regardless how many threads that doing it in the same
  * time. So, we use internal and lockless memory functions (prefixed with '__').
  */
-static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
+static inline struct buffer *b_alloc_margin(struct buffer *buf, int margin)
 {
-	struct buffer *b;
+	char *area;
 
-	if ((*buf)->size)
-		return *buf;
+	if (buf->size)
+		return buf;
 
-	*buf = &buf_wanted;
+	*buf = BUF_WANTED;
+
 #ifndef CONFIG_HAP_LOCKLESS_POOLS
 	HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
 #endif
 
 	/* fast path */
 	if ((pool_head_buffer->allocated - pool_head_buffer->used) > margin) {
-		b = __pool_get_first(pool_head_buffer);
-		if (likely(b)) {
+		area = __pool_get_first(pool_head_buffer);
+		if (likely(area)) {
 #ifndef CONFIG_HAP_LOCKLESS_POOLS
 			HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
 #endif
-			b->size = pool_head_buffer->size - sizeof(struct buffer);
-			b_reset(b);
-			*buf = b;
-			return b;
+			goto done;
 		}
 	}
 
 	/* slow path, uses malloc() */
-	b = __pool_refill_alloc(pool_head_buffer, margin);
+	area = __pool_refill_alloc(pool_head_buffer, margin);
 
 #ifndef CONFIG_HAP_LOCKLESS_POOLS
 	HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
 #endif
 
-	if (b) {
-		b->size = pool_head_buffer->size - sizeof(struct buffer);
-		b_reset(b);
-		*buf = b;
-	}
-	return b;
+	if (unlikely(!area))
+		return NULL;
+
+ done:
+	buf->area = area;
+	buf->size = pool_head_buffer->size;
+	return buf;
 }
 
 
diff --git a/include/proto/channel.h b/include/proto/channel.h
index 27023ab..d831fcc 100644
--- a/include/proto/channel.h
+++ b/include/proto/channel.h
@@ -87,31 +87,31 @@
 /* c_orig() : returns the pointer to the channel buffer's origin */
 static inline char *c_orig(const struct channel *c)
 {
-	return b_orig(c->buf);
+	return b_orig(&c->buf);
 }
 
 /* c_size() : returns the size of the channel's buffer */
 static inline size_t c_size(const struct channel *c)
 {
-	return b_size(c->buf);
+	return b_size(&c->buf);
 }
 
 /* c_wrap() : returns the pointer to the channel buffer's wrapping point */
 static inline char *c_wrap(const struct channel *c)
 {
-	return b_wrap(c->buf);
+	return b_wrap(&c->buf);
 }
 
 /* c_data() : returns the amount of data in the channel's buffer */
 static inline size_t c_data(const struct channel *c)
 {
-	return b_data(c->buf);
+	return b_data(&c->buf);
 }
 
 /* c_room() : returns the room left in the channel's buffer */
 static inline size_t c_room(const struct channel *c)
 {
-	return b_size(c->buf) - b_data(c->buf);
+	return b_size(&c->buf) - b_data(&c->buf);
 }
 
 /* c_empty() : returns a boolean indicating if the channel's buffer is empty */
@@ -145,11 +145,11 @@
  */
 static inline size_t ci_next_ofs(const struct channel *c, size_t o)
 {
-	return b_next_ofs(c->buf, o);
+	return b_next_ofs(&c->buf, o);
 }
 static inline char *ci_next(const struct channel *c, const char *p)
 {
-	return b_next(c->buf, p);
+	return b_next(&c->buf, p);
 }
 
 
@@ -161,7 +161,7 @@
  */
 static inline char *c_ptr(const struct channel *c, ssize_t ofs)
 {
-	return b_peek(c->buf, co_data(c) + ofs);
+	return b_peek(&c->buf, co_data(c) + ofs);
 }
 
 /* c_adv() : advances the channel's buffer by <adv> bytes, which means that the
@@ -187,13 +187,13 @@
 /* c_realign_if_empty() : realign the channel's buffer if it's empty */
 static inline void c_realign_if_empty(struct channel *chn)
 {
-	b_realign_if_empty(chn->buf);
+	b_realign_if_empty(&chn->buf);
 }
 
 /* Sets the amount of output for the channel */
 static inline void co_set_data(struct channel *c, size_t output)
 {
-	c->buf->data += output - c->output;
+	c->buf.data += output - c->output;
 	c->output = output;
 }
 
@@ -204,19 +204,19 @@
  */
 static inline size_t __co_head_ofs(const struct channel *c)
 {
-	return __b_peek_ofs(c->buf, 0);
+	return __b_peek_ofs(&c->buf, 0);
 }
 static inline char *__co_head(const struct channel *c)
 {
-	return __b_peek(c->buf, 0);
+	return __b_peek(&c->buf, 0);
 }
 static inline size_t co_head_ofs(const struct channel *c)
 {
-	return b_peek_ofs(c->buf, 0);
+	return b_peek_ofs(&c->buf, 0);
 }
 static inline char *co_head(const struct channel *c)
 {
-	return b_peek(c->buf, 0);
+	return b_peek(&c->buf, 0);
 }
 
 
@@ -226,19 +226,19 @@
  */
 static inline size_t __co_tail_ofs(const struct channel *c)
 {
-	return __b_peek_ofs(c->buf, co_data(c));
+	return __b_peek_ofs(&c->buf, co_data(c));
 }
 static inline char *__co_tail(const struct channel *c)
 {
-	return __b_peek(c->buf, co_data(c));
+	return __b_peek(&c->buf, co_data(c));
 }
 static inline size_t co_tail_ofs(const struct channel *c)
 {
-	return b_peek_ofs(c->buf, co_data(c));
+	return b_peek_ofs(&c->buf, co_data(c));
 }
 static inline char *co_tail(const struct channel *c)
 {
-	return b_peek(c->buf, co_data(c));
+	return b_peek(&c->buf, co_data(c));
 }
 
 
@@ -248,19 +248,19 @@
  */
 static inline size_t __ci_head_ofs(const struct channel *c)
 {
-	return __b_peek_ofs(c->buf, co_data(c));
+	return __b_peek_ofs(&c->buf, co_data(c));
 }
 static inline char *__ci_head(const struct channel *c)
 {
-	return __b_peek(c->buf, co_data(c));
+	return __b_peek(&c->buf, co_data(c));
 }
 static inline size_t ci_head_ofs(const struct channel *c)
 {
-	return b_peek_ofs(c->buf, co_data(c));
+	return b_peek_ofs(&c->buf, co_data(c));
 }
 static inline char *ci_head(const struct channel *c)
 {
-	return b_peek(c->buf, co_data(c));
+	return b_peek(&c->buf, co_data(c));
 }
 
 
@@ -270,19 +270,19 @@
  */
 static inline size_t __ci_tail_ofs(const struct channel *c)
 {
-	return __b_peek_ofs(c->buf, c_data(c));
+	return __b_peek_ofs(&c->buf, c_data(c));
 }
 static inline char *__ci_tail(const struct channel *c)
 {
-	return __b_peek(c->buf, c_data(c));
+	return __b_peek(&c->buf, c_data(c));
 }
 static inline size_t ci_tail_ofs(const struct channel *c)
 {
-	return b_peek_ofs(c->buf, c_data(c));
+	return b_peek_ofs(&c->buf, c_data(c));
 }
 static inline char *ci_tail(const struct channel *c)
 {
-	return b_peek(c->buf, c_data(c));
+	return b_peek(&c->buf, c_data(c));
 }
 
 
@@ -292,32 +292,32 @@
  */
 static inline size_t __ci_stop_ofs(const struct channel *c)
 {
-	return __b_stop_ofs(c->buf);
+	return __b_stop_ofs(&c->buf);
 }
 static inline const char *__ci_stop(const struct channel *c)
 {
-	return __b_stop(c->buf);
+	return __b_stop(&c->buf);
 }
 static inline size_t ci_stop_ofs(const struct channel *c)
 {
-	return b_stop_ofs(c->buf);
+	return b_stop_ofs(&c->buf);
 }
 static inline const char *ci_stop(const struct channel *c)
 {
-	return b_stop(c->buf);
+	return b_stop(&c->buf);
 }
 
 
 /* Returns the amount of input data that can contiguously be read at once */
 static inline size_t ci_contig_data(const struct channel *c)
 {
-	return b_contig_data(c->buf, co_data(c));
+	return b_contig_data(&c->buf, co_data(c));
 }
 
 /* Initialize all fields in the channel. */
 static inline void channel_init(struct channel *chn)
 {
-	chn->buf = &buf_empty;
+	chn->buf = BUF_NULL;
 	chn->to_forward = 0;
 	chn->last_read = now_ms;
 	chn->xfer_small = chn->xfer_large = 0;
@@ -382,9 +382,9 @@
  */
 static inline int channel_is_rewritable(const struct channel *chn)
 {
-	int rem = chn->buf->size;
+	int rem = chn->buf.size;
 
-	rem -= b_data(chn->buf);
+	rem -= b_data(&chn->buf);
 	rem -= global.tune.maxrewrite;
 	return rem >= 0;
 }
@@ -402,7 +402,7 @@
  * decide when to stop reading into a buffer when we want to ensure that we
  * leave the reserve untouched after all pending outgoing data are forwarded.
  * The reserved space is taken into account if ->to_forward indicates that an
- * end of transfer is close to happen. Note that both ->buf->o and ->to_forward
+ * end of transfer is close to happen. Note that both ->buf.o and ->to_forward
  * are considered as available since they're supposed to leave the buffer. The
  * test is optimized to avoid as many operations as possible for the fast case
  * and to be used as an "if" condition. Just like channel_recv_limit(), we
@@ -411,12 +411,12 @@
  */
 static inline int channel_may_recv(const struct channel *chn)
 {
-	int rem = chn->buf->size;
+	int rem = chn->buf.size;
 
-	if (chn->buf == &buf_empty)
+	if (b_is_null(&chn->buf))
 		return 1;
 
-	rem -= b_data(chn->buf);
+	rem -= b_data(&chn->buf);
 	if (!rem)
 		return 0; /* buffer already full */
 
@@ -433,7 +433,7 @@
 	 * the reserve, and we want to ensure they're covered by scheduled
 	 * forwards.
 	 */
-	rem = ci_data(chn) + global.tune.maxrewrite - chn->buf->size;
+	rem = ci_data(chn) + global.tune.maxrewrite - chn->buf.size;
 	return rem < 0 || (unsigned int)rem < chn->to_forward;
 }
 
@@ -476,7 +476,7 @@
 static inline void channel_erase(struct channel *chn)
 {
 	chn->to_forward = 0;
-	b_reset(chn->buf);
+	b_reset(&chn->buf);
 }
 
 /* marks the channel as "shutdown" ASAP for reads */
@@ -608,8 +608,8 @@
 	int reserve;
 
 	/* return zero if empty */
-	reserve = chn->buf->size;
-	if (chn->buf == &buf_empty)
+	reserve = chn->buf.size;
+	if (b_is_null(&chn->buf))
 		goto end;
 
 	/* return size - maxrewrite if we can't send */
@@ -630,9 +630,9 @@
 	reserve -= transit;
 	if (transit < chn->to_forward ||                 // addition overflow
 	    transit >= (unsigned)global.tune.maxrewrite) // enough transit data
-		return chn->buf->size;
+		return chn->buf.size;
  end:
-	return chn->buf->size - reserve;
+	return chn->buf.size - reserve;
 }
 
 /* Returns non-zero if the channel's INPUT buffer's is considered full, which
@@ -646,7 +646,7 @@
  */
 static inline int channel_full(const struct channel *c, unsigned int reserve)
 {
-	if (c->buf == &buf_empty)
+	if (b_is_null(&c->buf))
 		return 0;
 
 	return (ci_data(c) + reserve >= c_size(c));
@@ -662,7 +662,7 @@
 {
 	int ret;
 
-	ret = channel_recv_limit(chn) - b_data(chn->buf);
+	ret = channel_recv_limit(chn) - b_data(&chn->buf);
 	if (ret < 0)
 		ret = 0;
 	return ret;
@@ -675,7 +675,7 @@
  */
 static inline int ci_space_for_replace(const struct channel *chn)
 {
-	const struct buffer *buf = chn->buf;
+	const struct buffer *buf = &chn->buf;
 	const char *end;
 
 	/* If the input side data overflows, we cannot insert data contiguously. */
@@ -745,7 +745,7 @@
 	if (!ci_data(chn))
 		return;
 
-	chn->buf->data = co_data(chn);
+	chn->buf.data = co_data(chn);
 }
 
 /* This function realigns a possibly wrapping channel buffer so that the input
@@ -756,7 +756,7 @@
  */
 static inline void channel_slow_realign(struct channel *chn, char *swap)
 {
-	return b_slow_realign(chn->buf, swap, co_data(chn));
+	return b_slow_realign(&chn->buf, swap, co_data(chn));
 }
 
 /*
@@ -768,7 +768,7 @@
  */
 static inline void co_skip(struct channel *chn, int len)
 {
-	b_del(chn->buf, len);
+	b_del(&chn->buf, len);
 	chn->output -= len;
 	c_realign_if_empty(chn);
 
diff --git a/include/proto/stream_interface.h b/include/proto/stream_interface.h
index 3ecfa01..03756ba 100644
--- a/include/proto/stream_interface.h
+++ b/include/proto/stream_interface.h
@@ -74,13 +74,13 @@
 /* returns the buffer which receives data from this stream interface (input channel's buffer) */
 static inline struct buffer *si_ib(struct stream_interface *si)
 {
-	return si_ic(si)->buf;
+	return &si_ic(si)->buf;
 }
 
 /* returns the buffer which feeds data to this stream interface (output channel's buffer) */
 static inline struct buffer *si_ob(struct stream_interface *si)
 {
-	return si_oc(si)->buf;
+	return &si_oc(si)->buf;
 }
 
 /* returns the stream associated to a stream interface */
diff --git a/include/types/channel.h b/include/types/channel.h
index 5205bd6..7879b12 100644
--- a/include/types/channel.h
+++ b/include/types/channel.h
@@ -187,7 +187,7 @@
 struct channel {
 	unsigned int flags;             /* CF_* */
 	unsigned int analysers;         /* bit field indicating what to do on the channel */
-	struct buffer *buf;		/* buffer attached to the channel, always present but may move */
+	struct buffer buf;		/* buffer attached to the channel, always present but may move */
 	struct pipe *pipe;		/* non-NULL only when data present */
 	size_t output;                  /* part of buffer which is to be forwarded */
 	unsigned int to_forward;        /* number of bytes to forward after out without a wake-up */
diff --git a/include/types/checks.h b/include/types/checks.h
index 853a5bf..359b921 100644
--- a/include/types/checks.h
+++ b/include/types/checks.h
@@ -18,6 +18,7 @@
 #include <common/config.h>
 #include <common/mini-clist.h>
 #include <common/regex.h>
+#include <common/buffer.h>
 
 #include <types/connection.h>
 #include <types/obj_type.h>
@@ -157,7 +158,7 @@
 struct check {
 	struct xprt_ops *xprt;			/* transport layer operations for health checks */
 	struct conn_stream *cs;			/* conn_stream state for health checks */
-	struct buffer *bi, *bo;			/* input and output buffers to send/recv check */
+	struct buffer bi, bo;			/* input and output buffers to send/recv check */
 	struct task *task;			/* the task associated to the health check processing, NULL if disabled */
 	struct timeval start;			/* last health check start time */
 	long duration;				/* time in ms took to finish last health check */
diff --git a/include/types/compression.h b/include/types/compression.h
index 9a0cc78..e515aad 100644
--- a/include/types/compression.h
+++ b/include/types/compression.h
@@ -45,7 +45,7 @@
 	struct slz_stream strm;
 	const void *direct_ptr; /* NULL or pointer to beginning of data */
 	int direct_len;         /* length of direct_ptr if not NULL */
-	struct buffer *queued;  /* if not NULL, data already queued */
+	struct buffer queued;   /* if not NULL, data already queued */
 #elif defined(USE_ZLIB)
 	z_stream strm; /* zlib stream */
 	void *zlib_deflate_state;
diff --git a/include/types/spoe.h b/include/types/spoe.h
index 2f13d37..a744cd7 100644
--- a/include/types/spoe.h
+++ b/include/types/spoe.h
@@ -306,7 +306,7 @@
 	struct list        *events;       /* List of messages that will be sent during the stream processing */
 	struct list        *groups;       /* List of available SPOE group */
 
-	struct buffer      *buffer;       /* Buffer used to store a encoded messages */
+	struct buffer       buffer;       /* Buffer used to store a encoded messages */
 	struct buffer_wait  buffer_wait;  /* position in the list of ressources waiting for a buffer */
 	struct list         list;
 
@@ -357,7 +357,7 @@
 	int                 rlen;           /* reason length */
 #endif
 
-	struct buffer      *buffer;         /* Buffer used to store a encoded messages */
+	struct buffer       buffer;         /* Buffer used to store a encoded messages */
 	struct buffer_wait  buffer_wait;    /* position in the list of ressources waiting for a buffer */
 	struct list         waiting_queue;  /* list of streams waiting for a ACK frame, in sync and pipelining mode */
 	struct list         list;           /* next spoe appctx for the same agent */