blob: 2a149a1d5972a00b28c2d049a5b627a90a7423ea [file] [log] [blame]
Emeric Brun3e541d12012-09-03 11:14:36 +02001/*
2 * shctx.c - shared context management functions for SSL
3 *
4 * Copyright (C) 2011-2012 EXCELIANCE
5 *
6 * Author: Emeric Brun - emeric@exceliance.fr
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <sys/mman.h>
Emeric Brunaf9619d2012-11-28 18:47:52 +010015#include <arpa/inet.h>
Willy Tarreauce3f9132014-05-28 16:47:01 +020016#include <ebmbtree.h>
William Lallemand24a7a752017-10-09 14:17:39 +020017#include <types/global.h>
William Lallemand4f45bb92017-10-30 20:08:51 +010018#include <common/mini-clist.h>
19#include "proto/shctx.h"
William Lallemanded0b5ad2017-10-30 19:36:36 +010020
William Lallemand24a7a752017-10-09 14:17:39 +020021#if !defined (USE_PRIVATE_CACHE)
William Lallemand4f45bb92017-10-30 20:08:51 +010022
William Lallemand24a7a752017-10-09 14:17:39 +020023int use_shared_mem = 0;
William Lallemand4f45bb92017-10-30 20:08:51 +010024
Emeric Brun9faf0712012-09-25 11:11:16 +020025#endif
Emeric Brun3e541d12012-09-03 11:14:36 +020026
William Lallemand4f45bb92017-10-30 20:08:51 +010027/*
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020028 * Reserve a new row if <first> is null, put it in the hotlist, set the refcount to 1
29 * or append new blocks to the row with <first> as first block if non null.
William Lallemand4f45bb92017-10-30 20:08:51 +010030 *
31 * Reserve blocks in the avail list and put them in the hot list
32 * Return the first block put in the hot list or NULL if not enough blocks available
33 */
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020034struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx,
35 struct shared_block *first, int data_len)
William Lallemand4f45bb92017-10-30 20:08:51 +010036{
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020037 struct shared_block *last = NULL, *block, *sblock, *ret = NULL, *next;
William Lallemand4f45bb92017-10-30 20:08:51 +010038 int enough = 0;
39 int freed = 0;
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020040 int remain;
William Lallemand4f45bb92017-10-30 20:08:51 +010041
42 /* not enough usable blocks */
43 if (data_len > shctx->nbav * shctx->block_size)
44 goto out;
Emeric Brun3e541d12012-09-03 11:14:36 +020045
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020046 /* Note that <remain> is nul only if <first> is not nul. */
47 remain = 1;
48 if (first) {
49 /* Check that there is some block to reserve.
50 * In this first block of code we compute the remaining room in the
51 * current list of block already reserved for this object.
52 * We return asap if there is enough room to copy <data_len> bytes.
53 */
54 last = first->last_reserved;
55 /* Remaining room. */
56 remain = (shctx->block_size * first->block_count - first->len);
57 if (remain) {
58 if (remain > data_len) {
59 return last ? last : first;
60 } else {
61 data_len -= remain;
62 if (!data_len)
63 return last ? last : first;
64 }
65 }
66 }
67
William Lallemand4f45bb92017-10-30 20:08:51 +010068 while (!enough && !LIST_ISEMPTY(&shctx->avail)) {
69 int count = 0;
70 int first_count = 0, first_len = 0;
Emeric Brun3e541d12012-09-03 11:14:36 +020071
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020072 next = block = LIST_NEXT(&shctx->avail, struct shared_block *, list);
William Lallemand4f45bb92017-10-30 20:08:51 +010073 if (ret == NULL)
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020074 ret = next;
Emeric Brun3e541d12012-09-03 11:14:36 +020075
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020076 first_count = next->block_count;
77 first_len = next->len;
William Lallemand4f45bb92017-10-30 20:08:51 +010078 /*
79 Should never been set to 0.
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020080 if (next->block_count == 0)
81 next->block_count = 1;
William Lallemand4f45bb92017-10-30 20:08:51 +010082 */
Emeric Brun3e541d12012-09-03 11:14:36 +020083
William Lallemand4f45bb92017-10-30 20:08:51 +010084 list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) {
85
86 /* release callback */
87 if (first_len && shctx->free_block)
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020088 shctx->free_block(next, block);
William Lallemand4f45bb92017-10-30 20:08:51 +010089
90 block->block_count = 1;
91 block->len = 0;
92
93 freed++;
94 data_len -= shctx->block_size;
95
Frédéric Lécaille0bec8072018-10-22 17:55:57 +020096 if (data_len > 0 || !enough) {
97 if (last) {
98 shctx_block_append_hot(shctx, &last->list, block);
99 last = block;
100 } else {
101 shctx_block_set_hot(shctx, block);
102 }
103 if (!remain) {
104 first->last_append = block;
105 remain = 1;
106 }
107 if (data_len <= 0) {
108 ret->block_count = freed;
109 ret->refcount = 1;
110 ret->last_reserved = block;
111 enough = 1;
112 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100113 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100114 count++;
115 if (count >= first_count)
116 break;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100117 }
Emeric Brunaf9619d2012-11-28 18:47:52 +0100118 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100119
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200120 if (first) {
121 first->block_count += ret->block_count;
122 first->last_reserved = ret->last_reserved;
123 /* Reset this block. */
124 ret->last_reserved = NULL;
125 ret->block_count = 1;
126 ret->refcount = 0;
127 /* Return the first block. */
128 ret = first;
129 }
130
William Lallemand4f45bb92017-10-30 20:08:51 +0100131out:
Emeric Brunaf9619d2012-11-28 18:47:52 +0100132 return ret;
133}
Emeric Brun3e541d12012-09-03 11:14:36 +0200134
William Lallemand4f45bb92017-10-30 20:08:51 +0100135/*
136 * if the refcount is 0 move the row to the hot list. Increment the refcount
Emeric Brunaf9619d2012-11-28 18:47:52 +0100137 */
William Lallemand4f45bb92017-10-30 20:08:51 +0100138void shctx_row_inc_hot(struct shared_context *shctx, struct shared_block *first)
Emeric Brun3e541d12012-09-03 11:14:36 +0200139{
William Lallemand4f45bb92017-10-30 20:08:51 +0100140 struct shared_block *block, *sblock;
141 int count = 0;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100142
William Lallemand4f45bb92017-10-30 20:08:51 +0100143 if (first->refcount <= 0) {
144
145 block = first;
146
147 list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) {
148
149 shctx_block_set_hot(shctx, block);
150
151 count++;
152 if (count >= first->block_count)
153 break;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100154 }
Emeric Brunaf9619d2012-11-28 18:47:52 +0100155 }
Emeric Brunaf9619d2012-11-28 18:47:52 +0100156
William Lallemand4f45bb92017-10-30 20:08:51 +0100157 first->refcount++;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100158}
Emeric Brun3e541d12012-09-03 11:14:36 +0200159
William Lallemand4f45bb92017-10-30 20:08:51 +0100160/*
161 * decrement the refcount and move the row at the end of the avail list if it reaches 0.
Emeric Brunaf9619d2012-11-28 18:47:52 +0100162 */
William Lallemand4f45bb92017-10-30 20:08:51 +0100163void shctx_row_dec_hot(struct shared_context *shctx, struct shared_block *first)
Emeric Brunaf9619d2012-11-28 18:47:52 +0100164{
William Lallemand4f45bb92017-10-30 20:08:51 +0100165 struct shared_block *block, *sblock;
166 int count = 0;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100167
William Lallemand4f45bb92017-10-30 20:08:51 +0100168 first->refcount--;
Emeric Brun3e541d12012-09-03 11:14:36 +0200169
William Lallemand4f45bb92017-10-30 20:08:51 +0100170 if (first->refcount <= 0) {
Emeric Brun3e541d12012-09-03 11:14:36 +0200171
William Lallemand4f45bb92017-10-30 20:08:51 +0100172 block = first;
Emeric Brun3e541d12012-09-03 11:14:36 +0200173
William Lallemand4f45bb92017-10-30 20:08:51 +0100174 list_for_each_entry_safe_from(block, sblock, &shctx->hot, list) {
Emeric Brun3e541d12012-09-03 11:14:36 +0200175
William Lallemand4f45bb92017-10-30 20:08:51 +0100176 shctx_block_set_avail(shctx, block);
Emeric Brun3e541d12012-09-03 11:14:36 +0200177
William Lallemand4f45bb92017-10-30 20:08:51 +0100178 count++;
179 if (count >= first->block_count)
Emeric Brunaf9619d2012-11-28 18:47:52 +0100180 break;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100181 }
182 }
Emeric Brun3e541d12012-09-03 11:14:36 +0200183
William Lallemand4f45bb92017-10-30 20:08:51 +0100184}
185
186
187/*
188 * Append data in the row if there is enough space.
189 * The row should be in the hot list
190 *
191 * Return the amount of appended data if ret >= 0
192 * or how much more space it needs to contains the data if < 0.
193 */
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200194int shctx_row_data_append(struct shared_context *shctx,
195 struct shared_block *first, struct shared_block *from,
196 unsigned char *data, int len)
William Lallemand4f45bb92017-10-30 20:08:51 +0100197{
198 int remain, start;
William Lallemand4f45bb92017-10-30 20:08:51 +0100199 struct shared_block *block;
200
William Lallemand4f45bb92017-10-30 20:08:51 +0100201 /* return -<len> needed to work */
202 if (len > first->block_count * shctx->block_size - first->len)
203 return (first->block_count * shctx->block_size - first->len) - len;
204
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200205 block = from ? from : first;
William Lallemand4f45bb92017-10-30 20:08:51 +0100206 list_for_each_entry_from(block, &shctx->hot, list) {
William Lallemand4f45bb92017-10-30 20:08:51 +0100207 /* end of copy */
208 if (len <= 0)
209 break;
210
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200211 /* remaining written bytes in the current block. */
212 remain = (shctx->block_size * first->block_count - first->len) % shctx->block_size;
213 /* if remain == 0, previous buffers are full, or first->len == 0 */
214 if (!remain) {
215 remain = shctx->block_size;
216 start = 0;
217 }
218 else {
219 /* start must be calculated before remain is modified */
220 start = shctx->block_size - remain;
221 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100222
223 /* must not try to copy more than len */
224 remain = MIN(remain, len);
225
226 memcpy(block->data + start, data, remain);
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200227
William Lallemand4f45bb92017-10-30 20:08:51 +0100228 data += remain;
229 len -= remain;
230 first->len += remain; /* update len in the head of the row */
Frédéric Lécaille0bec8072018-10-22 17:55:57 +0200231 first->last_append = block;
William Lallemand4f45bb92017-10-30 20:08:51 +0100232 }
233
234 return len;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100235}
Emeric Brun3e541d12012-09-03 11:14:36 +0200236
William Lallemand4f45bb92017-10-30 20:08:51 +0100237/*
238 * Copy <len> data from a row of blocks, return the remaining data to copy
239 * If 0 is returned, the full data has successfuly be copied
240 *
241 * The row should be in the hot list
242 */
243int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first,
244 unsigned char *dst, int offset, int len)
245{
246 int count = 0, size = 0, start = -1;
247 struct shared_block *block;
248
William Lallemand7217c462017-10-31 20:21:46 +0100249 /* can't copy more */
250 if (len > first->len)
251 len = first->len;
252
William Lallemand4f45bb92017-10-30 20:08:51 +0100253 block = first;
254 count = 0;
255 /* Pass through the blocks to copy them */
256 list_for_each_entry_from(block, &shctx->hot, list) {
257 if (count >= first->block_count || len <= 0)
258 break;
259
260 count++;
261 /* continue until we are in right block
262 corresponding to the offset */
263 if (count < offset / shctx->block_size + 1)
264 continue;
265
266 /* on the first block, data won't possibly began at offset 0 */
267 if (start == -1)
268 start = offset - (count - 1) * shctx->block_size;
Emeric Brun3e541d12012-09-03 11:14:36 +0200269
William Lallemand4f45bb92017-10-30 20:08:51 +0100270 /* size can be lower than a block when copying the last block */
271 size = MIN(shctx->block_size - start, len);
272
273 memcpy(dst, block->data + start, size);
274 dst += size;
275 len -= size;
276 start = 0;
277 }
278 return len;
279}
Emeric Brun3e541d12012-09-03 11:14:36 +0200280
Emeric Brun3e541d12012-09-03 11:14:36 +0200281/* Allocate shared memory context.
William Lallemand4f45bb92017-10-30 20:08:51 +0100282 * <maxblocks> is maximum blocks.
283 * If <maxblocks> is set to less or equal to 0, ssl cache is disabled.
284 * Returns: -1 on alloc failure, <maxblocks> if it performs context alloc,
Emeric Brunaf9619d2012-11-28 18:47:52 +0100285 * and 0 if cache is already allocated.
286 */
William Lallemand4f45bb92017-10-30 20:08:51 +0100287int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize, int extra, int shared)
Emeric Brun3e541d12012-09-03 11:14:36 +0200288{
289 int i;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200290 struct shared_context *shctx;
291 int ret;
Emeric Brun9faf0712012-09-25 11:11:16 +0200292#ifndef USE_PRIVATE_CACHE
Emeric Bruncd1a5262014-05-07 23:11:42 +0200293#ifdef USE_PTHREAD_PSHARED
Emeric Brun3e541d12012-09-03 11:14:36 +0200294 pthread_mutexattr_t attr;
Emeric Bruncd1a5262014-05-07 23:11:42 +0200295#endif
Emeric Brun9faf0712012-09-25 11:11:16 +0200296#endif
William Lallemand4f45bb92017-10-30 20:08:51 +0100297 void *cur;
Emeric Brun4b3091e2012-09-24 15:48:52 +0200298 int maptype = MAP_PRIVATE;
Emeric Brun3e541d12012-09-03 11:14:36 +0200299
William Lallemand4f45bb92017-10-30 20:08:51 +0100300 if (maxblocks <= 0)
Emeric Brun22890a12012-12-28 14:41:32 +0100301 return 0;
Emeric Brun3e541d12012-09-03 11:14:36 +0200302
Emeric Brun9faf0712012-09-25 11:11:16 +0200303#ifndef USE_PRIVATE_CACHE
Emeric Brun4b3091e2012-09-24 15:48:52 +0200304 if (shared)
305 maptype = MAP_SHARED;
Emeric Brun9faf0712012-09-25 11:11:16 +0200306#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200307
William Lallemand4f45bb92017-10-30 20:08:51 +0100308 shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)),
Emeric Brun4b3091e2012-09-24 15:48:52 +0200309 PROT_READ | PROT_WRITE, maptype | MAP_ANON, -1, 0);
Emeric Brun3e541d12012-09-03 11:14:36 +0200310 if (!shctx || shctx == MAP_FAILED) {
311 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200312 ret = SHCTX_E_ALLOC_CACHE;
313 goto err;
Emeric Brun3e541d12012-09-03 11:14:36 +0200314 }
315
William Lallemand4f45bb92017-10-30 20:08:51 +0100316 shctx->nbav = 0;
317
Emeric Brun9faf0712012-09-25 11:11:16 +0200318#ifndef USE_PRIVATE_CACHE
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200319 if (maptype == MAP_SHARED) {
Emeric Bruncd1a5262014-05-07 23:11:42 +0200320#ifdef USE_PTHREAD_PSHARED
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200321 if (pthread_mutexattr_init(&attr)) {
William Lallemand4f45bb92017-10-30 20:08:51 +0100322 munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200323 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200324 ret = SHCTX_E_INIT_LOCK;
325 goto err;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200326 }
327
328 if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) {
329 pthread_mutexattr_destroy(&attr);
William Lallemand4f45bb92017-10-30 20:08:51 +0100330 munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200331 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200332 ret = SHCTX_E_INIT_LOCK;
333 goto err;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200334 }
335
336 if (pthread_mutex_init(&shctx->mutex, &attr)) {
337 pthread_mutexattr_destroy(&attr);
William Lallemand4f45bb92017-10-30 20:08:51 +0100338 munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200339 shctx = NULL;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200340 ret = SHCTX_E_INIT_LOCK;
341 goto err;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200342 }
Emeric Bruncd1a5262014-05-07 23:11:42 +0200343#else
344 shctx->waiters = 0;
Emeric Brun3e541d12012-09-03 11:14:36 +0200345#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200346 use_shared_mem = 1;
Emeric Bruncaa19cc2014-05-07 16:10:18 +0200347 }
Emeric Brun9faf0712012-09-25 11:11:16 +0200348#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200349
William Lallemand4f45bb92017-10-30 20:08:51 +0100350 LIST_INIT(&shctx->avail);
351 LIST_INIT(&shctx->hot);
Emeric Brun3e541d12012-09-03 11:14:36 +0200352
William Lallemand4f45bb92017-10-30 20:08:51 +0100353 shctx->block_size = blocksize;
Emeric Brunaf9619d2012-11-28 18:47:52 +0100354
William Lallemand4f45bb92017-10-30 20:08:51 +0100355 /* init the free blocks after the shared context struct */
356 cur = (void *)shctx + sizeof(struct shared_context) + extra;
357 for (i = 0; i < maxblocks; i++) {
358 struct shared_block *cur_block = (struct shared_block *)cur;
359 cur_block->len = 0;
360 cur_block->refcount = 0;
361 cur_block->block_count = 1;
362 LIST_ADDQ(&shctx->avail, &cur_block->list);
363 shctx->nbav++;
364 cur += sizeof(struct shared_block) + blocksize;
Emeric Brun3e541d12012-09-03 11:14:36 +0200365 }
William Lallemand4f45bb92017-10-30 20:08:51 +0100366 ret = maxblocks;
William Lallemand3f85c9a2017-10-09 16:30:50 +0200367
368err:
369 *orig_shctx = shctx;
370 return ret;
Emeric Brun3e541d12012-09-03 11:14:36 +0200371}
372