MINOR: shctx: Shared objects block by block allocation.
This patch makes shctx capable of storing objects in several parts,
each parts being made of several blocks. There is no more need to
walk through until reaching the end of a row to append new blocks.
A new pointer to a struct shared_block member, named last_reserved,
has been added to struct shared_block so that to memorize the last block which was
reserved by shctx_row_reserve_hot(). Same thing about "last_append" pointer which
is used to memorize the last block used by shctx_row_data_append() to store the data.
diff --git a/include/proto/shctx.h b/include/proto/shctx.h
index 55cb2a7..13e00c7 100644
--- a/include/proto/shctx.h
+++ b/include/proto/shctx.h
@@ -32,11 +32,13 @@
#endif
int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize, int extra, int shared);
-struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx, int data_len);
+struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx,
+ struct shared_block *last, int data_len);
void shctx_row_inc_hot(struct shared_context *shctx, struct shared_block *first);
void shctx_row_dec_hot(struct shared_context *shctx, struct shared_block *first);
int shctx_row_data_append(struct shared_context *shctx,
- struct shared_block *first, unsigned char *data, int len);
+ struct shared_block *first, struct shared_block *from,
+ unsigned char *data, int len);
int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first,
unsigned char *dst, int offset, int len);
@@ -180,6 +182,19 @@
/* List Macros */
+/*
+ * Insert <s> block after <head> which is not necessarily the head of a list,
+ * so between <head> and the next element after <head>.
+ */
+static inline void shctx_block_append_hot(struct shared_context *shctx,
+ struct list *head,
+ struct shared_block *s)
+{
+ shctx->nbav--;
+ LIST_DEL(&s->list);
+ LIST_ADD(head, &s->list);
+}
+
static inline void shctx_block_set_hot(struct shared_context *shctx,
struct shared_block *s)
{
diff --git a/include/types/shctx.h b/include/types/shctx.h
index 559beba..186f736 100644
--- a/include/types/shctx.h
+++ b/include/types/shctx.h
@@ -24,6 +24,8 @@
unsigned int len; /* data length for the row */
unsigned int block_count; /* number of blocks */
unsigned int refcount;
+ struct shared_block *last_reserved;
+ struct shared_block *last_append;
unsigned char data[0];
};
diff --git a/src/cache.c b/src/cache.c
index 2024642..b549537 100644
--- a/src/cache.c
+++ b/src/cache.c
@@ -224,7 +224,7 @@
/* Skip remaining headers to fill the cache */
c_adv(msg->chn, st->hdrs_len);
ret = shctx_row_data_append(shctx,
- st->first_block,
+ st->first_block, NULL,
(unsigned char *)ci_head(msg->chn),
MIN(ci_contig_data(msg->chn), len - st->hdrs_len));
/* Rewind the buffer to forward all data */
@@ -440,7 +440,7 @@
shctx_lock(shctx);
- first = shctx_row_reserve_hot(shctx, sizeof(struct cache_entry) + msg->sov + msg->body_len);
+ first = shctx_row_reserve_hot(shctx, NULL, sizeof(struct cache_entry) + msg->sov + msg->body_len);
if (!first) {
shctx_unlock(shctx);
goto out;
@@ -465,7 +465,7 @@
/* does not need to be locked because it's in the "hot" list,
* copy the headers */
- if (shctx_row_data_append(shctx, first, (unsigned char *)ci_head(&s->res), msg->sov) < 0)
+ if (shctx_row_data_append(shctx, first, NULL, (unsigned char *)ci_head(&s->res), msg->sov) < 0)
goto out;
/* register the buffer in the filter ctx for filling it with data*/
diff --git a/src/shctx.c b/src/shctx.c
index 59ac8b8..2a149a1 100644
--- a/src/shctx.c
+++ b/src/shctx.c
@@ -25,42 +25,67 @@
#endif
/*
- * Reserve a row, put it in the hotlist, set the refcount to 1
+ * Reserve a new row if <first> is null, put it in the hotlist, set the refcount to 1
+ * or append new blocks to the row with <first> as first block if non null.
*
* Reserve blocks in the avail list and put them in the hot list
* Return the first block put in the hot list or NULL if not enough blocks available
*/
-struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx, int data_len)
+struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx,
+ struct shared_block *first, int data_len)
{
- struct shared_block *block, *sblock, *ret = NULL, *first;
+ struct shared_block *last = NULL, *block, *sblock, *ret = NULL, *next;
int enough = 0;
int freed = 0;
+ int remain;
/* not enough usable blocks */
if (data_len > shctx->nbav * shctx->block_size)
goto out;
+ /* Note that <remain> is nul only if <first> is not nul. */
+ remain = 1;
+ if (first) {
+ /* Check that there is some block to reserve.
+ * In this first block of code we compute the remaining room in the
+ * current list of block already reserved for this object.
+ * We return asap if there is enough room to copy <data_len> bytes.
+ */
+ last = first->last_reserved;
+ /* Remaining room. */
+ remain = (shctx->block_size * first->block_count - first->len);
+ if (remain) {
+ if (remain > data_len) {
+ return last ? last : first;
+ } else {
+ data_len -= remain;
+ if (!data_len)
+ return last ? last : first;
+ }
+ }
+ }
+
while (!enough && !LIST_ISEMPTY(&shctx->avail)) {
int count = 0;
int first_count = 0, first_len = 0;
- first = block = LIST_NEXT(&shctx->avail, struct shared_block *, list);
+ next = block = LIST_NEXT(&shctx->avail, struct shared_block *, list);
if (ret == NULL)
- ret = first;
+ ret = next;
- first_count = first->block_count;
- first_len = first->len;
+ first_count = next->block_count;
+ first_len = next->len;
/*
Should never been set to 0.
- if (first->block_count == 0)
- first->block_count = 1;
+ if (next->block_count == 0)
+ next->block_count = 1;
*/
list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) {
/* release callback */
if (first_len && shctx->free_block)
- shctx->free_block(first, block);
+ shctx->free_block(next, block);
block->block_count = 1;
block->len = 0;
@@ -68,22 +93,41 @@
freed++;
data_len -= shctx->block_size;
- if (data_len > 0)
- shctx_block_set_hot(shctx, block);
-
- if (data_len <= 0 && !enough) {
- shctx_block_set_hot(shctx, block);
- ret->block_count = freed;
- ret->refcount = 1;
- enough = 1;
+ if (data_len > 0 || !enough) {
+ if (last) {
+ shctx_block_append_hot(shctx, &last->list, block);
+ last = block;
+ } else {
+ shctx_block_set_hot(shctx, block);
+ }
+ if (!remain) {
+ first->last_append = block;
+ remain = 1;
+ }
+ if (data_len <= 0) {
+ ret->block_count = freed;
+ ret->refcount = 1;
+ ret->last_reserved = block;
+ enough = 1;
+ }
}
-
count++;
if (count >= first_count)
break;
}
}
+ if (first) {
+ first->block_count += ret->block_count;
+ first->last_reserved = ret->last_reserved;
+ /* Reset this block. */
+ ret->last_reserved = NULL;
+ ret->block_count = 1;
+ ret->refcount = 0;
+ /* Return the first block. */
+ ret = first;
+ }
+
out:
return ret;
}
@@ -147,50 +191,44 @@
* Return the amount of appended data if ret >= 0
* or how much more space it needs to contains the data if < 0.
*/
-int shctx_row_data_append(struct shared_context *shctx, struct shared_block *first, unsigned char *data, int len)
+int shctx_row_data_append(struct shared_context *shctx,
+ struct shared_block *first, struct shared_block *from,
+ unsigned char *data, int len)
{
int remain, start;
- int count = 0;
struct shared_block *block;
-
/* return -<len> needed to work */
if (len > first->block_count * shctx->block_size - first->len)
return (first->block_count * shctx->block_size - first->len) - len;
- /* skipping full buffers, stop at the first buffer with remaining space */
- block = first;
+ block = from ? from : first;
list_for_each_entry_from(block, &shctx->hot, list) {
- count++;
-
-
- /* break if there is not enough blocks */
- if (count > first->block_count)
- break;
-
/* end of copy */
if (len <= 0)
break;
- /* skip full buffers */
- if (count * shctx->block_size <= first->len)
- continue;
-
- /* remaining space in the current block which is not full */
- remain = (shctx->block_size * count - first->len) % shctx->block_size;
- /* if remain == 0, previous buffer are full, or first->len == 0 */
- remain = remain ? remain : shctx->block_size;
-
- /* start must be calculated before remain is modified */
- start = shctx->block_size - remain;
+ /* remaining written bytes in the current block. */
+ remain = (shctx->block_size * first->block_count - first->len) % shctx->block_size;
+ /* if remain == 0, previous buffers are full, or first->len == 0 */
+ if (!remain) {
+ remain = shctx->block_size;
+ start = 0;
+ }
+ else {
+ /* start must be calculated before remain is modified */
+ start = shctx->block_size - remain;
+ }
/* must not try to copy more than len */
remain = MIN(remain, len);
memcpy(block->data + start, data, remain);
+
data += remain;
len -= remain;
first->len += remain; /* update len in the head of the row */
+ first->last_append = block;
}
return len;
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 4183376..ee71369 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -3871,7 +3871,7 @@
struct shared_block *first;
struct sh_ssl_sess_hdr *sh_ssl_sess, *oldsh_ssl_sess;
- first = shctx_row_reserve_hot(ssl_shctx, data_len + sizeof(struct sh_ssl_sess_hdr));
+ first = shctx_row_reserve_hot(ssl_shctx, NULL, data_len + sizeof(struct sh_ssl_sess_hdr));
if (!first) {
/* Could not retrieve enough free blocks to store that session */
return 0;
@@ -3897,7 +3897,7 @@
first->len = sizeof(struct sh_ssl_sess_hdr);
}
- if (shctx_row_data_append(ssl_shctx, first, data, data_len) < 0) {
+ if (shctx_row_data_append(ssl_shctx, first, NULL, data, data_len) < 0) {
shctx_row_dec_hot(ssl_shctx, first);
return 0;
}