blob: b5be5c757ec63a0b9a13558325e643212ab27f02 [file] [log] [blame]
Emeric Brun3e541d12012-09-03 11:14:36 +02001/*
2 * shctx.c - shared context management functions for SSL
3 *
4 * Copyright (C) 2011-2012 EXCELIANCE
5 *
6 * Author: Emeric Brun - emeric@exceliance.fr
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <sys/mman.h>
Emeric Brunaf9619d2012-11-28 18:47:52 +010015#include <arpa/inet.h>
Willy Tarreau8d2b7772020-05-27 10:58:19 +020016#include <import/ebmbtree.h>
Willy Tarreau853b2972020-05-27 18:01:47 +020017#include <haproxy/list.h>
Willy Tarreau334099c2020-06-03 18:38:48 +020018#include <haproxy/shctx.h>
William Lallemanded0b5ad2017-10-30 19:36:36 +010019
William Lallemand24a7a752017-10-09 14:17:39 +020020int use_shared_mem = 0;
William Lallemand4f45bb92017-10-30 20:08:51 +010021
William Lallemand4f45bb92017-10-30 20:08:51 +010022/*
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020023 * Reserve a new row if <first> is null, put it in the hotlist, set the refcount to 1
24 * or append new blocks to the row with <first> as first block if non null.
William Lallemand4f45bb92017-10-30 20:08:51 +010025 *
26 * Reserve blocks in the avail list and put them in the hot list
27 * Return the first block put in the hot list or NULL if not enough blocks available
28 */
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020029struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx,
30 struct shared_block *first, int data_len)
William Lallemand4f45bb92017-10-30 20:08:51 +010031{
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020032 struct shared_block *last = NULL, *block, *sblock, *ret = NULL, *next;
William Lallemand4f45bb92017-10-30 20:08:51 +010033 int enough = 0;
34 int freed = 0;
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020035 int remain;
William Lallemand4f45bb92017-10-30 20:08:51 +010036
37 /* not enough usable blocks */
38 if (data_len > shctx->nbav * shctx->block_size)
39 goto out;
Emeric Brun3e541d12012-09-03 11:14:36 +020040
Frédéric Lécailleb7838af2018-10-22 16:21:39 +020041 /* Check the object size limit. */
42 if (shctx->max_obj_size > 0) {
43 if ((first && first->len + data_len > shctx->max_obj_size) ||
44 (!first && data_len > shctx->max_obj_size))
45 goto out;
46 }
47
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020048 /* Note that <remain> is nul only if <first> is not nul. */
49 remain = 1;
50 if (first) {
51 /* Check that there is some block to reserve.
52 * In this first block of code we compute the remaining room in the
53 * current list of block already reserved for this object.
54 * We return asap if there is enough room to copy <data_len> bytes.
55 */
56 last = first->last_reserved;
57 /* Remaining room. */
58 remain = (shctx->block_size * first->block_count - first->len);
59 if (remain) {
60 if (remain > data_len) {
61 return last ? last : first;
62 } else {
63 data_len -= remain;
Willy Tarreau4c98c072021-11-19 17:42:49 +010064 if (data_len <= 0)
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020065 return last ? last : first;
66 }
67 }
68 }
69
William Lallemand4f45bb92017-10-30 20:08:51 +010070 while (!enough && !LIST_ISEMPTY(&shctx->avail)) {
71 int count = 0;
72 int first_count = 0, first_len = 0;
Emeric Brun3e541d12012-09-03 11:14:36 +020073
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020074 next = block = LIST_NEXT(&shctx->avail, struct shared_block *, list);
William Lallemand4f45bb92017-10-30 20:08:51 +010075 if (ret == NULL)
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020076 ret = next;
Emeric Brun3e541d12012-09-03 11:14:36 +020077
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020078 first_count = next->block_count;
79 first_len = next->len;
William Lallemand4f45bb92017-10-30 20:08:51 +010080 /*
81 Should never been set to 0.
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020082 if (next->block_count == 0)
83 next->block_count = 1;
William Lallemand4f45bb92017-10-30 20:08:51 +010084 */
Emeric Brun3e541d12012-09-03 11:14:36 +020085
William Lallemand4f45bb92017-10-30 20:08:51 +010086 list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) {
87
88 /* release callback */
89 if (first_len && shctx->free_block)
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020090 shctx->free_block(next, block);
William Lallemand4f45bb92017-10-30 20:08:51 +010091
92 block->block_count = 1;
93 block->len = 0;
94
95 freed++;
96 data_len -= shctx->block_size;
97
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020098 if (data_len > 0 || !enough) {
99 if (last) {
100 shctx_block_append_hot(shctx, &last->list, block);
101 last = block;
102 } else {
103 shctx_block_set_hot(shctx, block);
104 }
105 if (!remain) {
106 first->last_append = block;
107 remain = 1;
108 }
109 if (data_len <= 0) {
110 ret->block_count = freed;
111 ret->refcount = 1;
112 ret->last_reserved = block;
113 enough = 1;
Willy Tarreauca4d7da2021-11-19 17:29:23 +0100114 break;
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200115 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100116 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100117 count++;
118 if (count >= first_count)
119 break;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100120 }
Emeric Brunaf9619d2012-11-28 18:47:52 +0100121 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100122
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200123 if (first) {
124 first->block_count += ret->block_count;
125 first->last_reserved = ret->last_reserved;
126 /* Reset this block. */
127 ret->last_reserved = NULL;
128 ret->block_count = 1;
129 ret->refcount = 0;
130 /* Return the first block. */
131 ret = first;
132 }
133
William Lallemand4f45bb92017-10-30 20:08:51 +0100134out:
Emeric Brunaf9619d2012-11-28 18:47:52 +0100135 return ret;
136}
Emeric Brun3e541d12012-09-03 11:14:36 +0200137
William Lallemand4f45bb92017-10-30 20:08:51 +0100138/*
139 * if the refcount is 0 move the row to the hot list. Increment the refcount
Emeric Brunaf9619d2012-11-28 18:47:52 +0100140 */
William Lallemand4f45bb92017-10-30 20:08:51 +0100141void shctx_row_inc_hot(struct shared_context *shctx, struct shared_block *first)
Emeric Brun3e541d12012-09-03 11:14:36 +0200142{
William Lallemand4f45bb92017-10-30 20:08:51 +0100143 struct shared_block *block, *sblock;
144 int count = 0;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100145
William Lallemand4f45bb92017-10-30 20:08:51 +0100146 if (first->refcount <= 0) {
147
148 block = first;
149
150 list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) {
151
152 shctx_block_set_hot(shctx, block);
153
154 count++;
155 if (count >= first->block_count)
156 break;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100157 }
Emeric Brunaf9619d2012-11-28 18:47:52 +0100158 }
Emeric Brunaf9619d2012-11-28 18:47:52 +0100159
William Lallemand4f45bb92017-10-30 20:08:51 +0100160 first->refcount++;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100161}
Emeric Brun3e541d12012-09-03 11:14:36 +0200162
William Lallemand4f45bb92017-10-30 20:08:51 +0100163/*
164 * decrement the refcount and move the row at the end of the avail list if it reaches 0.
Emeric Brunaf9619d2012-11-28 18:47:52 +0100165 */
William Lallemand4f45bb92017-10-30 20:08:51 +0100166void shctx_row_dec_hot(struct shared_context *shctx, struct shared_block *first)
Emeric Brunaf9619d2012-11-28 18:47:52 +0100167{
William Lallemand4f45bb92017-10-30 20:08:51 +0100168 struct shared_block *block, *sblock;
169 int count = 0;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100170
William Lallemand4f45bb92017-10-30 20:08:51 +0100171 first->refcount--;
Emeric Brun3e541d12012-09-03 11:14:36 +0200172
William Lallemand4f45bb92017-10-30 20:08:51 +0100173 if (first->refcount <= 0) {
Emeric Brun3e541d12012-09-03 11:14:36 +0200174
William Lallemand4f45bb92017-10-30 20:08:51 +0100175 block = first;
Emeric Brun3e541d12012-09-03 11:14:36 +0200176
William Lallemand4f45bb92017-10-30 20:08:51 +0100177 list_for_each_entry_safe_from(block, sblock, &shctx->hot, list) {
Emeric Brun3e541d12012-09-03 11:14:36 +0200178
William Lallemand4f45bb92017-10-30 20:08:51 +0100179 shctx_block_set_avail(shctx, block);
Emeric Brun3e541d12012-09-03 11:14:36 +0200180
William Lallemand4f45bb92017-10-30 20:08:51 +0100181 count++;
182 if (count >= first->block_count)
Emeric Brunaf9619d2012-11-28 18:47:52 +0100183 break;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100184 }
185 }
Emeric Brun3e541d12012-09-03 11:14:36 +0200186
William Lallemand4f45bb92017-10-30 20:08:51 +0100187}
188
189
190/*
191 * Append data in the row if there is enough space.
192 * The row should be in the hot list
193 *
194 * Return the amount of appended data if ret >= 0
195 * or how much more space it needs to contains the data if < 0.
196 */
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200197int shctx_row_data_append(struct shared_context *shctx,
198 struct shared_block *first, struct shared_block *from,
199 unsigned char *data, int len)
William Lallemand4f45bb92017-10-30 20:08:51 +0100200{
201 int remain, start;
William Lallemand4f45bb92017-10-30 20:08:51 +0100202 struct shared_block *block;
203
William Lallemand4f45bb92017-10-30 20:08:51 +0100204 /* return -<len> needed to work */
205 if (len > first->block_count * shctx->block_size - first->len)
206 return (first->block_count * shctx->block_size - first->len) - len;
207
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200208 block = from ? from : first;
William Lallemand4f45bb92017-10-30 20:08:51 +0100209 list_for_each_entry_from(block, &shctx->hot, list) {
William Lallemand4f45bb92017-10-30 20:08:51 +0100210 /* end of copy */
211 if (len <= 0)
212 break;
213
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200214 /* remaining written bytes in the current block. */
215 remain = (shctx->block_size * first->block_count - first->len) % shctx->block_size;
216 /* if remain == 0, previous buffers are full, or first->len == 0 */
217 if (!remain) {
218 remain = shctx->block_size;
219 start = 0;
220 }
221 else {
222 /* start must be calculated before remain is modified */
223 start = shctx->block_size - remain;
224 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100225
226 /* must not try to copy more than len */
227 remain = MIN(remain, len);
228
229 memcpy(block->data + start, data, remain);
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200230
William Lallemand4f45bb92017-10-30 20:08:51 +0100231 data += remain;
232 len -= remain;
233 first->len += remain; /* update len in the head of the row */
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200234 first->last_append = block;
William Lallemand4f45bb92017-10-30 20:08:51 +0100235 }
236
237 return len;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100238}
Emeric Brun3e541d12012-09-03 11:14:36 +0200239
William Lallemand4f45bb92017-10-30 20:08:51 +0100240/*
241 * Copy <len> data from a row of blocks, return the remaining data to copy
Joseph Herlant39526432018-11-25 11:31:31 -0800242 * If 0 is returned, the full data has successfully been copied
William Lallemand4f45bb92017-10-30 20:08:51 +0100243 *
244 * The row should be in the hot list
245 */
246int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first,
247 unsigned char *dst, int offset, int len)
248{
249 int count = 0, size = 0, start = -1;
250 struct shared_block *block;
251
William Lallemand7217c462017-10-31 20:21:46 +0100252 /* can't copy more */
253 if (len > first->len)
254 len = first->len;
255
William Lallemand4f45bb92017-10-30 20:08:51 +0100256 block = first;
257 count = 0;
258 /* Pass through the blocks to copy them */
259 list_for_each_entry_from(block, &shctx->hot, list) {
260 if (count >= first->block_count || len <= 0)
261 break;
262
263 count++;
264 /* continue until we are in right block
265 corresponding to the offset */
266 if (count < offset / shctx->block_size + 1)
267 continue;
268
269 /* on the first block, data won't possibly began at offset 0 */
270 if (start == -1)
271 start = offset - (count - 1) * shctx->block_size;
Emeric Brun3e541d12012-09-03 11:14:36 +0200272
William Lallemand4f45bb92017-10-30 20:08:51 +0100273 /* size can be lower than a block when copying the last block */
274 size = MIN(shctx->block_size - start, len);
275
276 memcpy(dst, block->data + start, size);
277 dst += size;
278 len -= size;
279 start = 0;
280 }
281 return len;
282}
Emeric Brun3e541d12012-09-03 11:14:36 +0200283
Emeric Brun3e541d12012-09-03 11:14:36 +0200284/* Allocate shared memory context.
William Lallemand4f45bb92017-10-30 20:08:51 +0100285 * <maxblocks> is maximum blocks.
286 * If <maxblocks> is set to less or equal to 0, ssl cache is disabled.
287 * Returns: -1 on alloc failure, <maxblocks> if it performs context alloc,
Emeric Brunaf9619d2012-11-28 18:47:52 +0100288 * and 0 if cache is already allocated.
289 */
Frédéric Lécailleb7838af2018-10-22 16:21:39 +0200290int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize,
Frédéric Lécailleb80bc272018-10-25 20:31:40 +0200291 unsigned int maxobjsz, int extra, int shared)
Emeric Brun3e541d12012-09-03 11:14:36 +0200292{
293 int i;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200294 struct shared_context *shctx;
295 int ret;
Emeric Bruncd1a5262014-05-07 23:11:42 +0200296#ifdef USE_PTHREAD_PSHARED
Emeric Brun3e541d12012-09-03 11:14:36 +0200297 pthread_mutexattr_t attr;
Emeric Bruncd1a5262014-05-07 23:11:42 +0200298#endif
William Lallemand4f45bb92017-10-30 20:08:51 +0100299 void *cur;
Emeric Brun4b3091e2012-09-24 15:48:52 +0200300 int maptype = MAP_PRIVATE;
Emeric Brun3e541d12012-09-03 11:14:36 +0200301
William Lallemand4f45bb92017-10-30 20:08:51 +0100302 if (maxblocks <= 0)
Emeric Brun22890a12012-12-28 14:41:32 +0100303 return 0;
Emeric Brun3e541d12012-09-03 11:14:36 +0200304
Willy Tarreaua7ddab02020-02-21 13:45:58 +0100305 /* make sure to align the records on a pointer size */
306 blocksize = (blocksize + sizeof(void *) - 1) & -sizeof(void *);
307 extra = (extra + sizeof(void *) - 1) & -sizeof(void *);
308
Emeric Brun4b3091e2012-09-24 15:48:52 +0200309 if (shared)
310 maptype = MAP_SHARED;
311
William Lallemand4f45bb92017-10-30 20:08:51 +0100312 shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)),
Emeric Brun4b3091e2012-09-24 15:48:52 +0200313 PROT_READ | PROT_WRITE, maptype | MAP_ANON, -1, 0);
Emeric Brun3e541d12012-09-03 11:14:36 +0200314 if (!shctx || shctx == MAP_FAILED) {
315 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200316 ret = SHCTX_E_ALLOC_CACHE;
317 goto err;
Emeric Brun3e541d12012-09-03 11:14:36 +0200318 }
319
William Lallemand4f45bb92017-10-30 20:08:51 +0100320 shctx->nbav = 0;
321
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200322 if (maptype == MAP_SHARED) {
Willy Tarreauca367712021-06-15 15:03:19 +0200323#ifndef USE_PRIVATE_CACHE
Emeric Bruncd1a5262014-05-07 23:11:42 +0200324#ifdef USE_PTHREAD_PSHARED
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200325 if (pthread_mutexattr_init(&attr)) {
William Lallemand4f45bb92017-10-30 20:08:51 +0100326 munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200327 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200328 ret = SHCTX_E_INIT_LOCK;
329 goto err;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200330 }
331
332 if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) {
333 pthread_mutexattr_destroy(&attr);
William Lallemand4f45bb92017-10-30 20:08:51 +0100334 munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200335 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200336 ret = SHCTX_E_INIT_LOCK;
337 goto err;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200338 }
339
340 if (pthread_mutex_init(&shctx->mutex, &attr)) {
341 pthread_mutexattr_destroy(&attr);
William Lallemand4f45bb92017-10-30 20:08:51 +0100342 munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200343 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200344 ret = SHCTX_E_INIT_LOCK;
345 goto err;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200346 }
Emeric Bruncd1a5262014-05-07 23:11:42 +0200347#else
348 shctx->waiters = 0;
Emeric Brun3e541d12012-09-03 11:14:36 +0200349#endif
Willy Tarreauca367712021-06-15 15:03:19 +0200350#else
351 HA_SPIN_INIT(&shctx->lock);
352#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200353 use_shared_mem = 1;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200354 }
Emeric Brun4b3091e2012-09-24 15:48:52 +0200355
William Lallemand4f45bb92017-10-30 20:08:51 +0100356 LIST_INIT(&shctx->avail);
357 LIST_INIT(&shctx->hot);
Emeric Brun3e541d12012-09-03 11:14:36 +0200358
William Lallemand4f45bb92017-10-30 20:08:51 +0100359 shctx->block_size = blocksize;
Frédéric Lécailleb80bc272018-10-25 20:31:40 +0200360 shctx->max_obj_size = maxobjsz == (unsigned int)-1 ? 0 : maxobjsz;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100361
William Lallemand4f45bb92017-10-30 20:08:51 +0100362 /* init the free blocks after the shared context struct */
363 cur = (void *)shctx + sizeof(struct shared_context) + extra;
364 for (i = 0; i < maxblocks; i++) {
365 struct shared_block *cur_block = (struct shared_block *)cur;
366 cur_block->len = 0;
367 cur_block->refcount = 0;
368 cur_block->block_count = 1;
Willy Tarreau2b718102021-04-21 07:32:39 +0200369 LIST_APPEND(&shctx->avail, &cur_block->list);
William Lallemand4f45bb92017-10-30 20:08:51 +0100370 shctx->nbav++;
371 cur += sizeof(struct shared_block) + blocksize;
Emeric Brun3e541d12012-09-03 11:14:36 +0200372 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100373 ret = maxblocks;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200374
375err:
376 *orig_shctx = shctx;
377 return ret;
Emeric Brun3e541d12012-09-03 11:14:36 +0200378}
379