blob: f4002a48e05c7922278bce41a99eaf8ba4256160 [file] [log] [blame]
Emeric Brun3e541d12012-09-03 11:14:36 +02001/*
2 * shctx.c - shared context management functions for SSL
3 *
4 * Copyright (C) 2011-2012 EXCELIANCE
5 *
6 * Author: Emeric Brun - emeric@exceliance.fr
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <sys/mman.h>
Emeric Brunaf9619d2012-11-28 18:47:52 +010015#include <arpa/inet.h>
Willy Tarreau8d2b7772020-05-27 10:58:19 +020016#include <import/ebmbtree.h>
Willy Tarreau853b2972020-05-27 18:01:47 +020017#include <haproxy/list.h>
Willy Tarreau334099c2020-06-03 18:38:48 +020018#include <haproxy/shctx.h>
William Lallemanded0b5ad2017-10-30 19:36:36 +010019
William Lallemand24a7a752017-10-09 14:17:39 +020020#if !defined (USE_PRIVATE_CACHE)
William Lallemand4f45bb92017-10-30 20:08:51 +010021
William Lallemand24a7a752017-10-09 14:17:39 +020022int use_shared_mem = 0;
William Lallemand4f45bb92017-10-30 20:08:51 +010023
Emeric Brun9faf0712012-09-25 11:11:16 +020024#endif
Emeric Brun3e541d12012-09-03 11:14:36 +020025
William Lallemand4f45bb92017-10-30 20:08:51 +010026/*
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020027 * Reserve a new row if <first> is null, put it in the hotlist, set the refcount to 1
28 * or append new blocks to the row with <first> as first block if non null.
William Lallemand4f45bb92017-10-30 20:08:51 +010029 *
30 * Reserve blocks in the avail list and put them in the hot list
31 * Return the first block put in the hot list or NULL if not enough blocks available
32 */
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020033struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx,
34 struct shared_block *first, int data_len)
William Lallemand4f45bb92017-10-30 20:08:51 +010035{
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020036 struct shared_block *last = NULL, *block, *sblock, *ret = NULL, *next;
William Lallemand4f45bb92017-10-30 20:08:51 +010037 int enough = 0;
38 int freed = 0;
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020039 int remain;
William Lallemand4f45bb92017-10-30 20:08:51 +010040
41 /* not enough usable blocks */
42 if (data_len > shctx->nbav * shctx->block_size)
43 goto out;
Emeric Brun3e541d12012-09-03 11:14:36 +020044
Frédéric Lécailleb7838af2018-10-22 16:21:39 +020045 /* Check the object size limit. */
46 if (shctx->max_obj_size > 0) {
47 if ((first && first->len + data_len > shctx->max_obj_size) ||
48 (!first && data_len > shctx->max_obj_size))
49 goto out;
50 }
51
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020052 /* Note that <remain> is nul only if <first> is not nul. */
53 remain = 1;
54 if (first) {
55 /* Check that there is some block to reserve.
56 * In this first block of code we compute the remaining room in the
57 * current list of block already reserved for this object.
58 * We return asap if there is enough room to copy <data_len> bytes.
59 */
60 last = first->last_reserved;
61 /* Remaining room. */
62 remain = (shctx->block_size * first->block_count - first->len);
63 if (remain) {
64 if (remain > data_len) {
65 return last ? last : first;
66 } else {
67 data_len -= remain;
68 if (!data_len)
69 return last ? last : first;
70 }
71 }
72 }
73
William Lallemand4f45bb92017-10-30 20:08:51 +010074 while (!enough && !LIST_ISEMPTY(&shctx->avail)) {
75 int count = 0;
76 int first_count = 0, first_len = 0;
Emeric Brun3e541d12012-09-03 11:14:36 +020077
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020078 next = block = LIST_NEXT(&shctx->avail, struct shared_block *, list);
William Lallemand4f45bb92017-10-30 20:08:51 +010079 if (ret == NULL)
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020080 ret = next;
Emeric Brun3e541d12012-09-03 11:14:36 +020081
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020082 first_count = next->block_count;
83 first_len = next->len;
William Lallemand4f45bb92017-10-30 20:08:51 +010084 /*
85 Should never been set to 0.
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020086 if (next->block_count == 0)
87 next->block_count = 1;
William Lallemand4f45bb92017-10-30 20:08:51 +010088 */
Emeric Brun3e541d12012-09-03 11:14:36 +020089
William Lallemand4f45bb92017-10-30 20:08:51 +010090 list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) {
91
92 /* release callback */
93 if (first_len && shctx->free_block)
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020094 shctx->free_block(next, block);
William Lallemand4f45bb92017-10-30 20:08:51 +010095
96 block->block_count = 1;
97 block->len = 0;
98
99 freed++;
100 data_len -= shctx->block_size;
101
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200102 if (data_len > 0 || !enough) {
103 if (last) {
104 shctx_block_append_hot(shctx, &last->list, block);
105 last = block;
106 } else {
107 shctx_block_set_hot(shctx, block);
108 }
109 if (!remain) {
110 first->last_append = block;
111 remain = 1;
112 }
113 if (data_len <= 0) {
114 ret->block_count = freed;
115 ret->refcount = 1;
116 ret->last_reserved = block;
117 enough = 1;
118 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100119 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100120 count++;
121 if (count >= first_count)
122 break;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100123 }
Emeric Brunaf9619d2012-11-28 18:47:52 +0100124 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100125
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200126 if (first) {
127 first->block_count += ret->block_count;
128 first->last_reserved = ret->last_reserved;
129 /* Reset this block. */
130 ret->last_reserved = NULL;
131 ret->block_count = 1;
132 ret->refcount = 0;
133 /* Return the first block. */
134 ret = first;
135 }
136
William Lallemand4f45bb92017-10-30 20:08:51 +0100137out:
Emeric Brunaf9619d2012-11-28 18:47:52 +0100138 return ret;
139}
Emeric Brun3e541d12012-09-03 11:14:36 +0200140
William Lallemand4f45bb92017-10-30 20:08:51 +0100141/*
142 * if the refcount is 0 move the row to the hot list. Increment the refcount
Emeric Brunaf9619d2012-11-28 18:47:52 +0100143 */
William Lallemand4f45bb92017-10-30 20:08:51 +0100144void shctx_row_inc_hot(struct shared_context *shctx, struct shared_block *first)
Emeric Brun3e541d12012-09-03 11:14:36 +0200145{
William Lallemand4f45bb92017-10-30 20:08:51 +0100146 struct shared_block *block, *sblock;
147 int count = 0;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100148
William Lallemand4f45bb92017-10-30 20:08:51 +0100149 if (first->refcount <= 0) {
150
151 block = first;
152
153 list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) {
154
155 shctx_block_set_hot(shctx, block);
156
157 count++;
158 if (count >= first->block_count)
159 break;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100160 }
Emeric Brunaf9619d2012-11-28 18:47:52 +0100161 }
Emeric Brunaf9619d2012-11-28 18:47:52 +0100162
William Lallemand4f45bb92017-10-30 20:08:51 +0100163 first->refcount++;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100164}
Emeric Brun3e541d12012-09-03 11:14:36 +0200165
William Lallemand4f45bb92017-10-30 20:08:51 +0100166/*
167 * decrement the refcount and move the row at the end of the avail list if it reaches 0.
Emeric Brunaf9619d2012-11-28 18:47:52 +0100168 */
William Lallemand4f45bb92017-10-30 20:08:51 +0100169void shctx_row_dec_hot(struct shared_context *shctx, struct shared_block *first)
Emeric Brunaf9619d2012-11-28 18:47:52 +0100170{
William Lallemand4f45bb92017-10-30 20:08:51 +0100171 struct shared_block *block, *sblock;
172 int count = 0;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100173
William Lallemand4f45bb92017-10-30 20:08:51 +0100174 first->refcount--;
Emeric Brun3e541d12012-09-03 11:14:36 +0200175
William Lallemand4f45bb92017-10-30 20:08:51 +0100176 if (first->refcount <= 0) {
Emeric Brun3e541d12012-09-03 11:14:36 +0200177
William Lallemand4f45bb92017-10-30 20:08:51 +0100178 block = first;
Emeric Brun3e541d12012-09-03 11:14:36 +0200179
William Lallemand4f45bb92017-10-30 20:08:51 +0100180 list_for_each_entry_safe_from(block, sblock, &shctx->hot, list) {
Emeric Brun3e541d12012-09-03 11:14:36 +0200181
William Lallemand4f45bb92017-10-30 20:08:51 +0100182 shctx_block_set_avail(shctx, block);
Emeric Brun3e541d12012-09-03 11:14:36 +0200183
William Lallemand4f45bb92017-10-30 20:08:51 +0100184 count++;
185 if (count >= first->block_count)
Emeric Brunaf9619d2012-11-28 18:47:52 +0100186 break;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100187 }
188 }
Emeric Brun3e541d12012-09-03 11:14:36 +0200189
William Lallemand4f45bb92017-10-30 20:08:51 +0100190}
191
192
193/*
194 * Append data in the row if there is enough space.
195 * The row should be in the hot list
196 *
197 * Return the amount of appended data if ret >= 0
198 * or how much more space it needs to contains the data if < 0.
199 */
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200200int shctx_row_data_append(struct shared_context *shctx,
201 struct shared_block *first, struct shared_block *from,
202 unsigned char *data, int len)
William Lallemand4f45bb92017-10-30 20:08:51 +0100203{
204 int remain, start;
William Lallemand4f45bb92017-10-30 20:08:51 +0100205 struct shared_block *block;
206
William Lallemand4f45bb92017-10-30 20:08:51 +0100207 /* return -<len> needed to work */
208 if (len > first->block_count * shctx->block_size - first->len)
209 return (first->block_count * shctx->block_size - first->len) - len;
210
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200211 block = from ? from : first;
William Lallemand4f45bb92017-10-30 20:08:51 +0100212 list_for_each_entry_from(block, &shctx->hot, list) {
William Lallemand4f45bb92017-10-30 20:08:51 +0100213 /* end of copy */
214 if (len <= 0)
215 break;
216
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200217 /* remaining written bytes in the current block. */
218 remain = (shctx->block_size * first->block_count - first->len) % shctx->block_size;
219 /* if remain == 0, previous buffers are full, or first->len == 0 */
220 if (!remain) {
221 remain = shctx->block_size;
222 start = 0;
223 }
224 else {
225 /* start must be calculated before remain is modified */
226 start = shctx->block_size - remain;
227 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100228
229 /* must not try to copy more than len */
230 remain = MIN(remain, len);
231
232 memcpy(block->data + start, data, remain);
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200233
William Lallemand4f45bb92017-10-30 20:08:51 +0100234 data += remain;
235 len -= remain;
236 first->len += remain; /* update len in the head of the row */
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200237 first->last_append = block;
William Lallemand4f45bb92017-10-30 20:08:51 +0100238 }
239
240 return len;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100241}
Emeric Brun3e541d12012-09-03 11:14:36 +0200242
William Lallemand4f45bb92017-10-30 20:08:51 +0100243/*
244 * Copy <len> data from a row of blocks, return the remaining data to copy
Joseph Herlant39526432018-11-25 11:31:31 -0800245 * If 0 is returned, the full data has successfully been copied
William Lallemand4f45bb92017-10-30 20:08:51 +0100246 *
247 * The row should be in the hot list
248 */
249int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first,
250 unsigned char *dst, int offset, int len)
251{
252 int count = 0, size = 0, start = -1;
253 struct shared_block *block;
254
William Lallemand7217c462017-10-31 20:21:46 +0100255 /* can't copy more */
256 if (len > first->len)
257 len = first->len;
258
William Lallemand4f45bb92017-10-30 20:08:51 +0100259 block = first;
260 count = 0;
261 /* Pass through the blocks to copy them */
262 list_for_each_entry_from(block, &shctx->hot, list) {
263 if (count >= first->block_count || len <= 0)
264 break;
265
266 count++;
267 /* continue until we are in right block
268 corresponding to the offset */
269 if (count < offset / shctx->block_size + 1)
270 continue;
271
272 /* on the first block, data won't possibly began at offset 0 */
273 if (start == -1)
274 start = offset - (count - 1) * shctx->block_size;
Emeric Brun3e541d12012-09-03 11:14:36 +0200275
William Lallemand4f45bb92017-10-30 20:08:51 +0100276 /* size can be lower than a block when copying the last block */
277 size = MIN(shctx->block_size - start, len);
278
279 memcpy(dst, block->data + start, size);
280 dst += size;
281 len -= size;
282 start = 0;
283 }
284 return len;
285}
Emeric Brun3e541d12012-09-03 11:14:36 +0200286
Emeric Brun3e541d12012-09-03 11:14:36 +0200287/* Allocate shared memory context.
William Lallemand4f45bb92017-10-30 20:08:51 +0100288 * <maxblocks> is maximum blocks.
289 * If <maxblocks> is set to less or equal to 0, ssl cache is disabled.
290 * Returns: -1 on alloc failure, <maxblocks> if it performs context alloc,
Emeric Brunaf9619d2012-11-28 18:47:52 +0100291 * and 0 if cache is already allocated.
292 */
Frédéric Lécailleb7838af2018-10-22 16:21:39 +0200293int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize,
Frédéric Lécailleb80bc272018-10-25 20:31:40 +0200294 unsigned int maxobjsz, int extra, int shared)
Emeric Brun3e541d12012-09-03 11:14:36 +0200295{
296 int i;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200297 struct shared_context *shctx;
298 int ret;
Emeric Brun9faf0712012-09-25 11:11:16 +0200299#ifndef USE_PRIVATE_CACHE
Emeric Bruncd1a5262014-05-07 23:11:42 +0200300#ifdef USE_PTHREAD_PSHARED
Emeric Brun3e541d12012-09-03 11:14:36 +0200301 pthread_mutexattr_t attr;
Emeric Bruncd1a5262014-05-07 23:11:42 +0200302#endif
Emeric Brun9faf0712012-09-25 11:11:16 +0200303#endif
William Lallemand4f45bb92017-10-30 20:08:51 +0100304 void *cur;
Emeric Brun4b3091e2012-09-24 15:48:52 +0200305 int maptype = MAP_PRIVATE;
Emeric Brun3e541d12012-09-03 11:14:36 +0200306
William Lallemand4f45bb92017-10-30 20:08:51 +0100307 if (maxblocks <= 0)
Emeric Brun22890a12012-12-28 14:41:32 +0100308 return 0;
Emeric Brun3e541d12012-09-03 11:14:36 +0200309
Willy Tarreaua7ddab02020-02-21 13:45:58 +0100310 /* make sure to align the records on a pointer size */
311 blocksize = (blocksize + sizeof(void *) - 1) & -sizeof(void *);
312 extra = (extra + sizeof(void *) - 1) & -sizeof(void *);
313
Emeric Brun9faf0712012-09-25 11:11:16 +0200314#ifndef USE_PRIVATE_CACHE
Emeric Brun4b3091e2012-09-24 15:48:52 +0200315 if (shared)
316 maptype = MAP_SHARED;
Emeric Brun9faf0712012-09-25 11:11:16 +0200317#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200318
William Lallemand4f45bb92017-10-30 20:08:51 +0100319 shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)),
Emeric Brun4b3091e2012-09-24 15:48:52 +0200320 PROT_READ | PROT_WRITE, maptype | MAP_ANON, -1, 0);
Emeric Brun3e541d12012-09-03 11:14:36 +0200321 if (!shctx || shctx == MAP_FAILED) {
322 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200323 ret = SHCTX_E_ALLOC_CACHE;
324 goto err;
Emeric Brun3e541d12012-09-03 11:14:36 +0200325 }
326
William Lallemand4f45bb92017-10-30 20:08:51 +0100327 shctx->nbav = 0;
328
Emeric Brun9faf0712012-09-25 11:11:16 +0200329#ifndef USE_PRIVATE_CACHE
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200330 if (maptype == MAP_SHARED) {
Emeric Bruncd1a5262014-05-07 23:11:42 +0200331#ifdef USE_PTHREAD_PSHARED
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200332 if (pthread_mutexattr_init(&attr)) {
William Lallemand4f45bb92017-10-30 20:08:51 +0100333 munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200334 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200335 ret = SHCTX_E_INIT_LOCK;
336 goto err;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200337 }
338
339 if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) {
340 pthread_mutexattr_destroy(&attr);
William Lallemand4f45bb92017-10-30 20:08:51 +0100341 munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200342 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200343 ret = SHCTX_E_INIT_LOCK;
344 goto err;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200345 }
346
347 if (pthread_mutex_init(&shctx->mutex, &attr)) {
348 pthread_mutexattr_destroy(&attr);
William Lallemand4f45bb92017-10-30 20:08:51 +0100349 munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200350 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200351 ret = SHCTX_E_INIT_LOCK;
352 goto err;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200353 }
Emeric Bruncd1a5262014-05-07 23:11:42 +0200354#else
355 shctx->waiters = 0;
Emeric Brun3e541d12012-09-03 11:14:36 +0200356#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200357 use_shared_mem = 1;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200358 }
Emeric Brun9faf0712012-09-25 11:11:16 +0200359#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200360
William Lallemand4f45bb92017-10-30 20:08:51 +0100361 LIST_INIT(&shctx->avail);
362 LIST_INIT(&shctx->hot);
Emeric Brun3e541d12012-09-03 11:14:36 +0200363
William Lallemand4f45bb92017-10-30 20:08:51 +0100364 shctx->block_size = blocksize;
Frédéric Lécailleb80bc272018-10-25 20:31:40 +0200365 shctx->max_obj_size = maxobjsz == (unsigned int)-1 ? 0 : maxobjsz;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100366
William Lallemand4f45bb92017-10-30 20:08:51 +0100367 /* init the free blocks after the shared context struct */
368 cur = (void *)shctx + sizeof(struct shared_context) + extra;
369 for (i = 0; i < maxblocks; i++) {
370 struct shared_block *cur_block = (struct shared_block *)cur;
371 cur_block->len = 0;
372 cur_block->refcount = 0;
373 cur_block->block_count = 1;
374 LIST_ADDQ(&shctx->avail, &cur_block->list);
375 shctx->nbav++;
376 cur += sizeof(struct shared_block) + blocksize;
Emeric Brun3e541d12012-09-03 11:14:36 +0200377 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100378 ret = maxblocks;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200379
380err:
381 *orig_shctx = shctx;
382 return ret;
Emeric Brun3e541d12012-09-03 11:14:36 +0200383}
384