blob: b8bfd4a49aa3b43f43238022efb3b74291ea03c3 [file] [log] [blame]
Emeric Brun3e541d12012-09-03 11:14:36 +02001/*
2 * shctx.c - shared context management functions for SSL
3 *
4 * Copyright (C) 2011-2012 EXCELIANCE
5 *
6 * Author: Emeric Brun - emeric@exceliance.fr
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <sys/mman.h>
Emeric Brun9faf0712012-09-25 11:11:16 +020015#ifndef USE_PRIVATE_CACHE
Emeric Brun3e541d12012-09-03 11:14:36 +020016#ifdef USE_SYSCALL_FUTEX
17#include <unistd.h>
Willy Tarreau18b20592012-09-04 12:26:26 +020018#ifndef u32
19#define u32 unsigned int
20#endif
Emeric Brun3e541d12012-09-03 11:14:36 +020021#include <linux/futex.h>
22#include <sys/syscall.h>
23#else /* USE_SYSCALL_FUTEX */
24#include <pthread.h>
25#endif /* USE_SYSCALL_FUTEX */
Emeric Brun9faf0712012-09-25 11:11:16 +020026#endif
Emeric Brun3e541d12012-09-03 11:14:36 +020027
28#include "ebmbtree.h"
29#include "proto/shctx.h"
30
31struct shared_session {
32 struct ebmb_node key;
33 unsigned char key_data[SSL_MAX_SSL_SESSION_ID_LENGTH];
34 long c_date;
35 int data_len;
36 unsigned char data[SHSESS_MAX_DATA_LEN];
37 struct shared_session *p;
38 struct shared_session *n;
39};
40
41
42struct shared_context {
Emeric Brun9faf0712012-09-25 11:11:16 +020043#ifndef USE_PRIVATE_CACHE
Emeric Brun3e541d12012-09-03 11:14:36 +020044#ifdef USE_SYSCALL_FUTEX
45 unsigned int waiters;
46#else /* USE_SYSCALL_FUTEX */
47 pthread_mutex_t mutex;
48#endif
Emeric Brun9faf0712012-09-25 11:11:16 +020049#endif
Emeric Brun3e541d12012-09-03 11:14:36 +020050 struct shared_session active;
51 struct shared_session free;
52};
53
54/* Static shared context */
55static struct shared_context *shctx = NULL;
Emeric Brun9faf0712012-09-25 11:11:16 +020056#ifndef USE_PRIVATE_CACHE
Emeric Brun4b3091e2012-09-24 15:48:52 +020057static int use_shared_mem = 0;
Emeric Brun9faf0712012-09-25 11:11:16 +020058#endif
Emeric Brun3e541d12012-09-03 11:14:36 +020059
60/* Callbacks */
61static void (*shared_session_new_cbk)(unsigned char *session, unsigned int session_len, long cdate);
62
Emeric Brun3e541d12012-09-03 11:14:36 +020063/* Lock functions */
Emeric Brun9faf0712012-09-25 11:11:16 +020064#ifdef USE_PRIVATE_CACHE
65#define shared_context_lock(v)
66#define shared_context_unlock(v)
67
68#else
Emeric Brun3e541d12012-09-03 11:14:36 +020069#ifdef USE_SYSCALL_FUTEX
Emeric Brunce08baa2012-10-04 17:28:25 +020070#if defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__)
Emeric Brun3e541d12012-09-03 11:14:36 +020071static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
72{
73 __asm volatile("lock xchgl %0,%1"
74 : "=r" (x), "+m" (*ptr)
75 : "0" (x)
76 : "memory");
77 return x;
78}
79
80static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
81{
82 unsigned int ret;
83
84 __asm volatile("lock cmpxchgl %2,%1"
85 : "=a" (ret), "+m" (*ptr)
86 : "r" (new), "0" (old)
87 : "memory");
88 return ret;
89}
90
91static inline unsigned char atomic_dec(unsigned int *ptr)
92{
93 unsigned char ret;
94 __asm volatile("lock decl %0\n"
95 "setne %1\n"
96 : "+m" (*ptr), "=qm" (ret)
97 :
98 : "memory");
99 return ret;
100}
101
102#else /* if no x86_64 or i586 arch: use less optimized gcc >= 4.1 built-ins */
103static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
104{
105 return __sync_lock_test_and_set(ptr, x);
106}
107
108static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
109{
110 return __sync_val_compare_and_swap(ptr, old, new);
111}
112
113static inline unsigned char atomic_dec(unsigned int *ptr)
114{
115 return __sync_sub_and_fetch(ptr, 1) ? 1 : 0;
116}
117
118#endif
119
Emeric Brun4b3091e2012-09-24 15:48:52 +0200120static inline void _shared_context_lock(void)
Emeric Brun3e541d12012-09-03 11:14:36 +0200121{
122 unsigned int x;
123
124 x = cmpxchg(&shctx->waiters, 0, 1);
125 if (x) {
126 if (x != 2)
127 x = xchg(&shctx->waiters, 2);
128
129 while (x) {
130 syscall(SYS_futex, &shctx->waiters, FUTEX_WAIT, 2, NULL, 0, 0);
131 x = xchg(&shctx->waiters, 2);
132 }
133 }
134}
135
Emeric Brun4b3091e2012-09-24 15:48:52 +0200136static inline void _shared_context_unlock(void)
Emeric Brun3e541d12012-09-03 11:14:36 +0200137{
138 if (atomic_dec(&shctx->waiters)) {
139 shctx->waiters = 0;
140 syscall(SYS_futex, &shctx->waiters, FUTEX_WAKE, 1, NULL, 0, 0);
141 }
142}
143
Emeric Brun4b3091e2012-09-24 15:48:52 +0200144#define shared_context_lock(v) if (use_shared_mem) _shared_context_lock()
145
146#define shared_context_unlock(v) if (use_shared_mem) _shared_context_unlock()
147
Emeric Brun3e541d12012-09-03 11:14:36 +0200148#else /* USE_SYSCALL_FUTEX */
149
Emeric Brun4b3091e2012-09-24 15:48:52 +0200150#define shared_context_lock(v) if (use_shared_mem) pthread_mutex_lock(&shctx->mutex)
151
152#define shared_context_unlock(v) if (use_shared_mem) pthread_mutex_unlock(&shctx->mutex)
Emeric Brun3e541d12012-09-03 11:14:36 +0200153
154#endif
Emeric Brun9faf0712012-09-25 11:11:16 +0200155#endif
Emeric Brun3e541d12012-09-03 11:14:36 +0200156
157/* List Macros */
158
159#define shsess_unset(s) (s)->n->p = (s)->p; \
160 (s)->p->n = (s)->n;
161
162#define shsess_set_free(s) shsess_unset(s) \
163 (s)->p = &shctx->free; \
164 (s)->n = shctx->free.n; \
165 shctx->free.n->p = s; \
166 shctx->free.n = s;
167
168
169#define shsess_set_active(s) shsess_unset(s) \
170 (s)->p = &shctx->active; \
171 (s)->n = shctx->active.n; \
172 shctx->active.n->p = s; \
173 shctx->active.n = s;
174
175
176#define shsess_get_next() (shctx->free.p == shctx->free.n) ? \
177 shctx->active.p : shctx->free.p;
178
179/* Tree Macros */
180
181#define shsess_tree_delete(s) ebmb_delete(&(s)->key);
182
183#define shsess_tree_insert(s) (struct shared_session *)ebmb_insert(&shctx->active.key.node.branches, \
184 &(s)->key, SSL_MAX_SSL_SESSION_ID_LENGTH);
185
186#define shsess_tree_lookup(k) (struct shared_session *)ebmb_lookup(&shctx->active.key.node.branches, \
187 (k), SSL_MAX_SSL_SESSION_ID_LENGTH);
188
189/* Other Macros */
190
191#define shsess_set_key(s,k,l) { memcpy((s)->key_data, (k), (l)); \
192 if ((l) < SSL_MAX_SSL_SESSION_ID_LENGTH) \
193 memset((s)->key_data+(l), 0, SSL_MAX_SSL_SESSION_ID_LENGTH-(l)); };
194
195
196/* SSL context callbacks */
197
198/* SSL callback used on new session creation */
199int shctx_new_cb(SSL *ssl, SSL_SESSION *sess)
200{
201 struct shared_session *shsess;
202 unsigned char *data,*p;
203 unsigned int data_len;
204 unsigned char encsess[SHSESS_MAX_ENCODED_LEN];
205 (void)ssl;
206
207 /* check if session reserved size in aligned buffer is large enougth for the ASN1 encode session */
208 data_len=i2d_SSL_SESSION(sess, NULL);
209 if(data_len > SHSESS_MAX_DATA_LEN)
210 return 0;
211
212 /* process ASN1 session encoding before the lock: lower cost */
213 p = data = encsess+SSL_MAX_SSL_SESSION_ID_LENGTH;
214 i2d_SSL_SESSION(sess, &p);
215
216 shared_context_lock();
217
218 shsess = shsess_get_next();
219
220 shsess_tree_delete(shsess);
221
222 shsess_set_key(shsess, sess->session_id, sess->session_id_length);
223
224 /* it returns the already existing node or current node if none, never returns null */
225 shsess = shsess_tree_insert(shsess);
226
227 /* store ASN1 encoded session into cache */
228 shsess->data_len = data_len;
229 memcpy(shsess->data, data, data_len);
230
231 /* store creation date */
232 shsess->c_date = SSL_SESSION_get_time(sess);
233
234 shsess_set_active(shsess);
235
236 shared_context_unlock();
237
238 if (shared_session_new_cbk) { /* if user level callback is set */
239 /* copy sessionid padded with 0 into the sessionid + data aligned buffer */
240 memcpy(encsess, sess->session_id, sess->session_id_length);
241 if (sess->session_id_length < SSL_MAX_SSL_SESSION_ID_LENGTH)
242 memset(encsess+sess->session_id_length, 0, SSL_MAX_SSL_SESSION_ID_LENGTH-sess->session_id_length);
243
244 shared_session_new_cbk(encsess, SSL_MAX_SSL_SESSION_ID_LENGTH+data_len, SSL_SESSION_get_time(sess));
245 }
246
247 return 0; /* do not increment session reference count */
248}
249
250/* SSL callback used on lookup an existing session cause none found in internal cache */
251SSL_SESSION *shctx_get_cb(SSL *ssl, unsigned char *key, int key_len, int *do_copy)
252{
253 struct shared_session *shsess;
254 unsigned char data[SHSESS_MAX_DATA_LEN], *p;
255 unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH];
256 unsigned int data_len;
257 long cdate;
258 SSL_SESSION *sess;
259 (void)ssl;
260
261 /* allow the session to be freed automatically by openssl */
262 *do_copy = 0;
263
264 /* tree key is zeros padded sessionid */
265 if (key_len < SSL_MAX_SSL_SESSION_ID_LENGTH) {
266 memcpy(tmpkey, key, key_len);
267 memset(tmpkey + key_len, 0, SSL_MAX_SSL_SESSION_ID_LENGTH - key_len);
268 key = tmpkey;
269 }
270
271 /* lock cache */
272 shared_context_lock();
273
274 /* lookup for session */
275 shsess = shsess_tree_lookup(key);
276 if (!shsess) {
277 /* no session found: unlock cache and exit */
278 shared_context_unlock();
279 return NULL;
280 }
281
282 /* backup creation date to reset in session after ASN1 decode */
283 cdate = shsess->c_date;
284
285 /* copy ASN1 session data to decode outside the lock */
286 data_len = shsess->data_len;
287 memcpy(data, shsess->data, shsess->data_len);
288
289 shsess_set_active(shsess);
290
291 shared_context_unlock();
292
293 /* decode ASN1 session */
294 p = data;
295 sess = d2i_SSL_SESSION(NULL, (const unsigned char **)&p, data_len);
296
297 /* reset creation date */
298 if (sess)
299 SSL_SESSION_set_time(sess, cdate);
300
301 return sess;
302}
303
304/* SSL callback used to signal session is no more used in internal cache */
305void shctx_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess)
306{
307 struct shared_session *shsess;
308 unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH];
309 unsigned char *key = sess->session_id;
310 (void)ctx;
311
312 /* tree key is zeros padded sessionid */
313 if (sess->session_id_length < SSL_MAX_SSL_SESSION_ID_LENGTH) {
314 memcpy(tmpkey, sess->session_id, sess->session_id_length);
315 memset(tmpkey+sess->session_id_length, 0, SSL_MAX_SSL_SESSION_ID_LENGTH - sess->session_id_length);
316 key = tmpkey;
317 }
318
319 shared_context_lock();
320
321 /* lookup for session */
322 shsess = shsess_tree_lookup(key);
323 if (shsess) {
324 shsess_set_free(shsess);
325 }
326
327 /* unlock cache */
328 shared_context_unlock();
329}
330
331/* User level function called to add a session to the cache (remote updates) */
332void shctx_sess_add(const unsigned char *encsess, unsigned int len, long cdate)
333{
334 struct shared_session *shsess;
335
336 /* check buffer is at least padded key long + 1 byte
337 and data_len not too long */
338 if ((len <= SSL_MAX_SSL_SESSION_ID_LENGTH)
339 || (len > SHSESS_MAX_DATA_LEN+SSL_MAX_SSL_SESSION_ID_LENGTH))
340 return;
341
342 shared_context_lock();
343
344 shsess = shsess_get_next();
345
346 shsess_tree_delete(shsess);
347
348 shsess_set_key(shsess, encsess, SSL_MAX_SSL_SESSION_ID_LENGTH);
349
350 /* it returns the already existing node or current node if none, never returns null */
351 shsess = shsess_tree_insert(shsess);
352
353 /* store into cache and update earlier on session get events */
354 if (cdate)
355 shsess->c_date = (long)cdate;
356
357 /* copy ASN1 session data into cache */
358 shsess->data_len = len-SSL_MAX_SSL_SESSION_ID_LENGTH;
359 memcpy(shsess->data, encsess+SSL_MAX_SSL_SESSION_ID_LENGTH, shsess->data_len);
360
361 shsess_set_active(shsess);
362
363 shared_context_unlock();
364}
365
366/* Function used to set a callback on new session creation */
367void shsess_set_new_cbk(void (*func)(unsigned char *, unsigned int, long))
368{
369 shared_session_new_cbk = func;
370}
371
372/* Allocate shared memory context.
373 * size is maximum cached sessions.
374 * if set less or equal to 0, SHCTX_DEFAULT_SIZE is used.
375 * Returns: -1 on alloc failure, size if it performs context alloc,
376 * and 0 if cache is already allocated */
Emeric Brun4b3091e2012-09-24 15:48:52 +0200377int shared_context_init(int size, int shared)
Emeric Brun3e541d12012-09-03 11:14:36 +0200378{
379 int i;
Emeric Brun9faf0712012-09-25 11:11:16 +0200380#ifndef USE_PRIVATE_CACHE
Emeric Brun3e541d12012-09-03 11:14:36 +0200381#ifndef USE_SYSCALL_FUTEX
382 pthread_mutexattr_t attr;
383#endif /* USE_SYSCALL_FUTEX */
Emeric Brun9faf0712012-09-25 11:11:16 +0200384#endif
Emeric Brun3e541d12012-09-03 11:14:36 +0200385 struct shared_session *prev,*cur;
Emeric Brun4b3091e2012-09-24 15:48:52 +0200386 int maptype = MAP_PRIVATE;
Emeric Brun3e541d12012-09-03 11:14:36 +0200387
388 if (shctx)
389 return 0;
390
391 if (size<=0)
392 size = SHCTX_DEFAULT_SIZE;
393
Emeric Brun9faf0712012-09-25 11:11:16 +0200394#ifndef USE_PRIVATE_CACHE
Emeric Brun4b3091e2012-09-24 15:48:52 +0200395 if (shared)
396 maptype = MAP_SHARED;
Emeric Brun9faf0712012-09-25 11:11:16 +0200397#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200398
Emeric Brun3e541d12012-09-03 11:14:36 +0200399 shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context)+(size*sizeof(struct shared_session)),
Emeric Brun4b3091e2012-09-24 15:48:52 +0200400 PROT_READ | PROT_WRITE, maptype | MAP_ANON, -1, 0);
Emeric Brun3e541d12012-09-03 11:14:36 +0200401 if (!shctx || shctx == MAP_FAILED) {
402 shctx = NULL;
403 return -1;
404 }
405
Emeric Brun9faf0712012-09-25 11:11:16 +0200406#ifndef USE_PRIVATE_CACHE
Emeric Brun3e541d12012-09-03 11:14:36 +0200407#ifdef USE_SYSCALL_FUTEX
408 shctx->waiters = 0;
409#else
410 pthread_mutexattr_init(&attr);
411 pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
412 pthread_mutex_init(&shctx->mutex, &attr);
413#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200414 if (maptype == MAP_SHARED)
415 use_shared_mem = 1;
Emeric Brun9faf0712012-09-25 11:11:16 +0200416#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200417
Emeric Brun3e541d12012-09-03 11:14:36 +0200418 memset(&shctx->active.key, 0, sizeof(struct ebmb_node));
419 memset(&shctx->free.key, 0, sizeof(struct ebmb_node));
420
421 /* No duplicate authorized in tree: */
422 //shctx->active.key.node.branches.b[1] = (void *)1;
423 shctx->active.key.node.branches = EB_ROOT_UNIQUE;
424
425 cur = &shctx->active;
426 cur->n = cur->p = cur;
427
428 cur = &shctx->free;
429 for (i = 0 ; i < size ; i++) {
430 prev = cur;
431 cur = (struct shared_session *)((char *)prev + sizeof(struct shared_session));
432 prev->n = cur;
433 cur->p = prev;
434 }
435 cur->n = &shctx->free;
436 shctx->free.p = cur;
437
438 return size;
439}
440
441
442/* Set session cache mode to server and disable openssl internal cache.
443 * Set shared cache callbacks on an ssl context.
444 * Shared context MUST be firstly initialized */
445void shared_context_set_cache(SSL_CTX *ctx)
446{
447 SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_SERVER |
448 SSL_SESS_CACHE_NO_INTERNAL |
449 SSL_SESS_CACHE_NO_AUTO_CLEAR);
450 if (!shctx)
451 return;
452
453 /* Set callbacks */
454 SSL_CTX_sess_set_new_cb(ctx, shctx_new_cb);
455 SSL_CTX_sess_set_get_cb(ctx, shctx_get_cb);
456 SSL_CTX_sess_set_remove_cb(ctx, shctx_remove_cb);
457}