blob: bfca085bb9693e696574076d879d0c8da8ce86e0 [file] [log] [blame]
Emeric Brun3e541d12012-09-03 11:14:36 +02001/*
2 * shctx.c - shared context management functions for SSL
3 *
4 * Copyright (C) 2011-2012 EXCELIANCE
5 *
6 * Author: Emeric Brun - emeric@exceliance.fr
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <sys/mman.h>
Emeric Brunaf9619d2012-11-28 18:47:52 +010015#include <arpa/inet.h>
Willy Tarreau8d2b7772020-05-27 10:58:19 +020016#include <import/ebmbtree.h>
William Lallemand24a7a752017-10-09 14:17:39 +020017#include <types/global.h>
William Lallemand4f45bb92017-10-30 20:08:51 +010018#include <common/mini-clist.h>
19#include "proto/shctx.h"
William Lallemanded0b5ad2017-10-30 19:36:36 +010020
William Lallemand24a7a752017-10-09 14:17:39 +020021#if !defined (USE_PRIVATE_CACHE)
William Lallemand4f45bb92017-10-30 20:08:51 +010022
William Lallemand24a7a752017-10-09 14:17:39 +020023int use_shared_mem = 0;
William Lallemand4f45bb92017-10-30 20:08:51 +010024
Emeric Brun9faf0712012-09-25 11:11:16 +020025#endif
Emeric Brun3e541d12012-09-03 11:14:36 +020026
William Lallemand4f45bb92017-10-30 20:08:51 +010027/*
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020028 * Reserve a new row if <first> is null, put it in the hotlist, set the refcount to 1
29 * or append new blocks to the row with <first> as first block if non null.
William Lallemand4f45bb92017-10-30 20:08:51 +010030 *
31 * Reserve blocks in the avail list and put them in the hot list
32 * Return the first block put in the hot list or NULL if not enough blocks available
33 */
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020034struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx,
35 struct shared_block *first, int data_len)
William Lallemand4f45bb92017-10-30 20:08:51 +010036{
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020037 struct shared_block *last = NULL, *block, *sblock, *ret = NULL, *next;
William Lallemand4f45bb92017-10-30 20:08:51 +010038 int enough = 0;
39 int freed = 0;
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020040 int remain;
William Lallemand4f45bb92017-10-30 20:08:51 +010041
42 /* not enough usable blocks */
43 if (data_len > shctx->nbav * shctx->block_size)
44 goto out;
Emeric Brun3e541d12012-09-03 11:14:36 +020045
Frédéric Lécailleb7838af2018-10-22 16:21:39 +020046 /* Check the object size limit. */
47 if (shctx->max_obj_size > 0) {
48 if ((first && first->len + data_len > shctx->max_obj_size) ||
49 (!first && data_len > shctx->max_obj_size))
50 goto out;
51 }
52
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020053 /* Note that <remain> is nul only if <first> is not nul. */
54 remain = 1;
55 if (first) {
56 /* Check that there is some block to reserve.
57 * In this first block of code we compute the remaining room in the
58 * current list of block already reserved for this object.
59 * We return asap if there is enough room to copy <data_len> bytes.
60 */
61 last = first->last_reserved;
62 /* Remaining room. */
63 remain = (shctx->block_size * first->block_count - first->len);
64 if (remain) {
65 if (remain > data_len) {
66 return last ? last : first;
67 } else {
68 data_len -= remain;
69 if (!data_len)
70 return last ? last : first;
71 }
72 }
73 }
74
William Lallemand4f45bb92017-10-30 20:08:51 +010075 while (!enough && !LIST_ISEMPTY(&shctx->avail)) {
76 int count = 0;
77 int first_count = 0, first_len = 0;
Emeric Brun3e541d12012-09-03 11:14:36 +020078
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020079 next = block = LIST_NEXT(&shctx->avail, struct shared_block *, list);
William Lallemand4f45bb92017-10-30 20:08:51 +010080 if (ret == NULL)
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020081 ret = next;
Emeric Brun3e541d12012-09-03 11:14:36 +020082
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020083 first_count = next->block_count;
84 first_len = next->len;
William Lallemand4f45bb92017-10-30 20:08:51 +010085 /*
86 Should never been set to 0.
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020087 if (next->block_count == 0)
88 next->block_count = 1;
William Lallemand4f45bb92017-10-30 20:08:51 +010089 */
Emeric Brun3e541d12012-09-03 11:14:36 +020090
William Lallemand4f45bb92017-10-30 20:08:51 +010091 list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) {
92
93 /* release callback */
94 if (first_len && shctx->free_block)
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020095 shctx->free_block(next, block);
William Lallemand4f45bb92017-10-30 20:08:51 +010096
97 block->block_count = 1;
98 block->len = 0;
99
100 freed++;
101 data_len -= shctx->block_size;
102
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200103 if (data_len > 0 || !enough) {
104 if (last) {
105 shctx_block_append_hot(shctx, &last->list, block);
106 last = block;
107 } else {
108 shctx_block_set_hot(shctx, block);
109 }
110 if (!remain) {
111 first->last_append = block;
112 remain = 1;
113 }
114 if (data_len <= 0) {
115 ret->block_count = freed;
116 ret->refcount = 1;
117 ret->last_reserved = block;
118 enough = 1;
119 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100120 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100121 count++;
122 if (count >= first_count)
123 break;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100124 }
Emeric Brunaf9619d2012-11-28 18:47:52 +0100125 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100126
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200127 if (first) {
128 first->block_count += ret->block_count;
129 first->last_reserved = ret->last_reserved;
130 /* Reset this block. */
131 ret->last_reserved = NULL;
132 ret->block_count = 1;
133 ret->refcount = 0;
134 /* Return the first block. */
135 ret = first;
136 }
137
William Lallemand4f45bb92017-10-30 20:08:51 +0100138out:
Emeric Brunaf9619d2012-11-28 18:47:52 +0100139 return ret;
140}
Emeric Brun3e541d12012-09-03 11:14:36 +0200141
William Lallemand4f45bb92017-10-30 20:08:51 +0100142/*
143 * if the refcount is 0 move the row to the hot list. Increment the refcount
Emeric Brunaf9619d2012-11-28 18:47:52 +0100144 */
William Lallemand4f45bb92017-10-30 20:08:51 +0100145void shctx_row_inc_hot(struct shared_context *shctx, struct shared_block *first)
Emeric Brun3e541d12012-09-03 11:14:36 +0200146{
William Lallemand4f45bb92017-10-30 20:08:51 +0100147 struct shared_block *block, *sblock;
148 int count = 0;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100149
William Lallemand4f45bb92017-10-30 20:08:51 +0100150 if (first->refcount <= 0) {
151
152 block = first;
153
154 list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) {
155
156 shctx_block_set_hot(shctx, block);
157
158 count++;
159 if (count >= first->block_count)
160 break;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100161 }
Emeric Brunaf9619d2012-11-28 18:47:52 +0100162 }
Emeric Brunaf9619d2012-11-28 18:47:52 +0100163
William Lallemand4f45bb92017-10-30 20:08:51 +0100164 first->refcount++;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100165}
Emeric Brun3e541d12012-09-03 11:14:36 +0200166
William Lallemand4f45bb92017-10-30 20:08:51 +0100167/*
168 * decrement the refcount and move the row at the end of the avail list if it reaches 0.
Emeric Brunaf9619d2012-11-28 18:47:52 +0100169 */
William Lallemand4f45bb92017-10-30 20:08:51 +0100170void shctx_row_dec_hot(struct shared_context *shctx, struct shared_block *first)
Emeric Brunaf9619d2012-11-28 18:47:52 +0100171{
William Lallemand4f45bb92017-10-30 20:08:51 +0100172 struct shared_block *block, *sblock;
173 int count = 0;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100174
William Lallemand4f45bb92017-10-30 20:08:51 +0100175 first->refcount--;
Emeric Brun3e541d12012-09-03 11:14:36 +0200176
William Lallemand4f45bb92017-10-30 20:08:51 +0100177 if (first->refcount <= 0) {
Emeric Brun3e541d12012-09-03 11:14:36 +0200178
William Lallemand4f45bb92017-10-30 20:08:51 +0100179 block = first;
Emeric Brun3e541d12012-09-03 11:14:36 +0200180
William Lallemand4f45bb92017-10-30 20:08:51 +0100181 list_for_each_entry_safe_from(block, sblock, &shctx->hot, list) {
Emeric Brun3e541d12012-09-03 11:14:36 +0200182
William Lallemand4f45bb92017-10-30 20:08:51 +0100183 shctx_block_set_avail(shctx, block);
Emeric Brun3e541d12012-09-03 11:14:36 +0200184
William Lallemand4f45bb92017-10-30 20:08:51 +0100185 count++;
186 if (count >= first->block_count)
Emeric Brunaf9619d2012-11-28 18:47:52 +0100187 break;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100188 }
189 }
Emeric Brun3e541d12012-09-03 11:14:36 +0200190
William Lallemand4f45bb92017-10-30 20:08:51 +0100191}
192
193
194/*
195 * Append data in the row if there is enough space.
196 * The row should be in the hot list
197 *
198 * Return the amount of appended data if ret >= 0
199 * or how much more space it needs to contains the data if < 0.
200 */
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200201int shctx_row_data_append(struct shared_context *shctx,
202 struct shared_block *first, struct shared_block *from,
203 unsigned char *data, int len)
William Lallemand4f45bb92017-10-30 20:08:51 +0100204{
205 int remain, start;
William Lallemand4f45bb92017-10-30 20:08:51 +0100206 struct shared_block *block;
207
William Lallemand4f45bb92017-10-30 20:08:51 +0100208 /* return -<len> needed to work */
209 if (len > first->block_count * shctx->block_size - first->len)
210 return (first->block_count * shctx->block_size - first->len) - len;
211
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200212 block = from ? from : first;
William Lallemand4f45bb92017-10-30 20:08:51 +0100213 list_for_each_entry_from(block, &shctx->hot, list) {
William Lallemand4f45bb92017-10-30 20:08:51 +0100214 /* end of copy */
215 if (len <= 0)
216 break;
217
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200218 /* remaining written bytes in the current block. */
219 remain = (shctx->block_size * first->block_count - first->len) % shctx->block_size;
220 /* if remain == 0, previous buffers are full, or first->len == 0 */
221 if (!remain) {
222 remain = shctx->block_size;
223 start = 0;
224 }
225 else {
226 /* start must be calculated before remain is modified */
227 start = shctx->block_size - remain;
228 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100229
230 /* must not try to copy more than len */
231 remain = MIN(remain, len);
232
233 memcpy(block->data + start, data, remain);
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200234
William Lallemand4f45bb92017-10-30 20:08:51 +0100235 data += remain;
236 len -= remain;
237 first->len += remain; /* update len in the head of the row */
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200238 first->last_append = block;
William Lallemand4f45bb92017-10-30 20:08:51 +0100239 }
240
241 return len;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100242}
Emeric Brun3e541d12012-09-03 11:14:36 +0200243
William Lallemand4f45bb92017-10-30 20:08:51 +0100244/*
245 * Copy <len> data from a row of blocks, return the remaining data to copy
Joseph Herlant39526432018-11-25 11:31:31 -0800246 * If 0 is returned, the full data has successfully been copied
William Lallemand4f45bb92017-10-30 20:08:51 +0100247 *
248 * The row should be in the hot list
249 */
250int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first,
251 unsigned char *dst, int offset, int len)
252{
253 int count = 0, size = 0, start = -1;
254 struct shared_block *block;
255
William Lallemand7217c462017-10-31 20:21:46 +0100256 /* can't copy more */
257 if (len > first->len)
258 len = first->len;
259
William Lallemand4f45bb92017-10-30 20:08:51 +0100260 block = first;
261 count = 0;
262 /* Pass through the blocks to copy them */
263 list_for_each_entry_from(block, &shctx->hot, list) {
264 if (count >= first->block_count || len <= 0)
265 break;
266
267 count++;
268 /* continue until we are in right block
269 corresponding to the offset */
270 if (count < offset / shctx->block_size + 1)
271 continue;
272
273 /* on the first block, data won't possibly began at offset 0 */
274 if (start == -1)
275 start = offset - (count - 1) * shctx->block_size;
Emeric Brun3e541d12012-09-03 11:14:36 +0200276
William Lallemand4f45bb92017-10-30 20:08:51 +0100277 /* size can be lower than a block when copying the last block */
278 size = MIN(shctx->block_size - start, len);
279
280 memcpy(dst, block->data + start, size);
281 dst += size;
282 len -= size;
283 start = 0;
284 }
285 return len;
286}
Emeric Brun3e541d12012-09-03 11:14:36 +0200287
Emeric Brun3e541d12012-09-03 11:14:36 +0200288/* Allocate shared memory context.
William Lallemand4f45bb92017-10-30 20:08:51 +0100289 * <maxblocks> is maximum blocks.
290 * If <maxblocks> is set to less or equal to 0, ssl cache is disabled.
291 * Returns: -1 on alloc failure, <maxblocks> if it performs context alloc,
Emeric Brunaf9619d2012-11-28 18:47:52 +0100292 * and 0 if cache is already allocated.
293 */
Frédéric Lécailleb7838af2018-10-22 16:21:39 +0200294int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize,
Frédéric Lécailleb80bc272018-10-25 20:31:40 +0200295 unsigned int maxobjsz, int extra, int shared)
Emeric Brun3e541d12012-09-03 11:14:36 +0200296{
297 int i;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200298 struct shared_context *shctx;
299 int ret;
Emeric Brun9faf0712012-09-25 11:11:16 +0200300#ifndef USE_PRIVATE_CACHE
Emeric Bruncd1a5262014-05-07 23:11:42 +0200301#ifdef USE_PTHREAD_PSHARED
Emeric Brun3e541d12012-09-03 11:14:36 +0200302 pthread_mutexattr_t attr;
Emeric Bruncd1a5262014-05-07 23:11:42 +0200303#endif
Emeric Brun9faf0712012-09-25 11:11:16 +0200304#endif
William Lallemand4f45bb92017-10-30 20:08:51 +0100305 void *cur;
Emeric Brun4b3091e2012-09-24 15:48:52 +0200306 int maptype = MAP_PRIVATE;
Emeric Brun3e541d12012-09-03 11:14:36 +0200307
William Lallemand4f45bb92017-10-30 20:08:51 +0100308 if (maxblocks <= 0)
Emeric Brun22890a12012-12-28 14:41:32 +0100309 return 0;
Emeric Brun3e541d12012-09-03 11:14:36 +0200310
Willy Tarreaua7ddab02020-02-21 13:45:58 +0100311 /* make sure to align the records on a pointer size */
312 blocksize = (blocksize + sizeof(void *) - 1) & -sizeof(void *);
313 extra = (extra + sizeof(void *) - 1) & -sizeof(void *);
314
Emeric Brun9faf0712012-09-25 11:11:16 +0200315#ifndef USE_PRIVATE_CACHE
Emeric Brun4b3091e2012-09-24 15:48:52 +0200316 if (shared)
317 maptype = MAP_SHARED;
Emeric Brun9faf0712012-09-25 11:11:16 +0200318#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200319
William Lallemand4f45bb92017-10-30 20:08:51 +0100320 shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)),
Emeric Brun4b3091e2012-09-24 15:48:52 +0200321 PROT_READ | PROT_WRITE, maptype | MAP_ANON, -1, 0);
Emeric Brun3e541d12012-09-03 11:14:36 +0200322 if (!shctx || shctx == MAP_FAILED) {
323 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200324 ret = SHCTX_E_ALLOC_CACHE;
325 goto err;
Emeric Brun3e541d12012-09-03 11:14:36 +0200326 }
327
William Lallemand4f45bb92017-10-30 20:08:51 +0100328 shctx->nbav = 0;
329
Emeric Brun9faf0712012-09-25 11:11:16 +0200330#ifndef USE_PRIVATE_CACHE
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200331 if (maptype == MAP_SHARED) {
Emeric Bruncd1a5262014-05-07 23:11:42 +0200332#ifdef USE_PTHREAD_PSHARED
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200333 if (pthread_mutexattr_init(&attr)) {
William Lallemand4f45bb92017-10-30 20:08:51 +0100334 munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200335 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200336 ret = SHCTX_E_INIT_LOCK;
337 goto err;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200338 }
339
340 if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) {
341 pthread_mutexattr_destroy(&attr);
William Lallemand4f45bb92017-10-30 20:08:51 +0100342 munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200343 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200344 ret = SHCTX_E_INIT_LOCK;
345 goto err;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200346 }
347
348 if (pthread_mutex_init(&shctx->mutex, &attr)) {
349 pthread_mutexattr_destroy(&attr);
William Lallemand4f45bb92017-10-30 20:08:51 +0100350 munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200351 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200352 ret = SHCTX_E_INIT_LOCK;
353 goto err;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200354 }
Emeric Bruncd1a5262014-05-07 23:11:42 +0200355#else
356 shctx->waiters = 0;
Emeric Brun3e541d12012-09-03 11:14:36 +0200357#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200358 use_shared_mem = 1;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200359 }
Emeric Brun9faf0712012-09-25 11:11:16 +0200360#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200361
William Lallemand4f45bb92017-10-30 20:08:51 +0100362 LIST_INIT(&shctx->avail);
363 LIST_INIT(&shctx->hot);
Emeric Brun3e541d12012-09-03 11:14:36 +0200364
William Lallemand4f45bb92017-10-30 20:08:51 +0100365 shctx->block_size = blocksize;
Frédéric Lécailleb80bc272018-10-25 20:31:40 +0200366 shctx->max_obj_size = maxobjsz == (unsigned int)-1 ? 0 : maxobjsz;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100367
William Lallemand4f45bb92017-10-30 20:08:51 +0100368 /* init the free blocks after the shared context struct */
369 cur = (void *)shctx + sizeof(struct shared_context) + extra;
370 for (i = 0; i < maxblocks; i++) {
371 struct shared_block *cur_block = (struct shared_block *)cur;
372 cur_block->len = 0;
373 cur_block->refcount = 0;
374 cur_block->block_count = 1;
375 LIST_ADDQ(&shctx->avail, &cur_block->list);
376 shctx->nbav++;
377 cur += sizeof(struct shared_block) + blocksize;
Emeric Brun3e541d12012-09-03 11:14:36 +0200378 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100379 ret = maxblocks;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200380
381err:
382 *orig_shctx = shctx;
383 return ret;
Emeric Brun3e541d12012-09-03 11:14:36 +0200384}
385