blob: 23992a6f26192101cf0a428bbc12e46574da52ec [file] [log] [blame]
Emeric Brun3e541d12012-09-03 11:14:36 +02001/*
2 * shctx.c - shared context management functions for SSL
3 *
4 * Copyright (C) 2011-2012 EXCELIANCE
5 *
6 * Author: Emeric Brun - emeric@exceliance.fr
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <sys/mman.h>
15#ifdef USE_SYSCALL_FUTEX
16#include <unistd.h>
Willy Tarreau18b20592012-09-04 12:26:26 +020017#ifndef u32
18#define u32 unsigned int
19#endif
Emeric Brun3e541d12012-09-03 11:14:36 +020020#include <linux/futex.h>
21#include <sys/syscall.h>
22#else /* USE_SYSCALL_FUTEX */
23#include <pthread.h>
24#endif /* USE_SYSCALL_FUTEX */
25
26#include "ebmbtree.h"
27#include "proto/shctx.h"
28
29struct shared_session {
30 struct ebmb_node key;
31 unsigned char key_data[SSL_MAX_SSL_SESSION_ID_LENGTH];
32 long c_date;
33 int data_len;
34 unsigned char data[SHSESS_MAX_DATA_LEN];
35 struct shared_session *p;
36 struct shared_session *n;
37};
38
39
40struct shared_context {
41#ifdef USE_SYSCALL_FUTEX
42 unsigned int waiters;
43#else /* USE_SYSCALL_FUTEX */
44 pthread_mutex_t mutex;
45#endif
46 struct shared_session active;
47 struct shared_session free;
48};
49
50/* Static shared context */
51static struct shared_context *shctx = NULL;
Emeric Brun4b3091e2012-09-24 15:48:52 +020052static int use_shared_mem = 0;
Emeric Brun3e541d12012-09-03 11:14:36 +020053
54/* Callbacks */
55static void (*shared_session_new_cbk)(unsigned char *session, unsigned int session_len, long cdate);
56
Emeric Brun3e541d12012-09-03 11:14:36 +020057/* Lock functions */
58#ifdef USE_SYSCALL_FUTEX
59#if defined (__i586__) || defined (__x86_64__)
60static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
61{
62 __asm volatile("lock xchgl %0,%1"
63 : "=r" (x), "+m" (*ptr)
64 : "0" (x)
65 : "memory");
66 return x;
67}
68
69static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
70{
71 unsigned int ret;
72
73 __asm volatile("lock cmpxchgl %2,%1"
74 : "=a" (ret), "+m" (*ptr)
75 : "r" (new), "0" (old)
76 : "memory");
77 return ret;
78}
79
80static inline unsigned char atomic_dec(unsigned int *ptr)
81{
82 unsigned char ret;
83 __asm volatile("lock decl %0\n"
84 "setne %1\n"
85 : "+m" (*ptr), "=qm" (ret)
86 :
87 : "memory");
88 return ret;
89}
90
91#else /* if no x86_64 or i586 arch: use less optimized gcc >= 4.1 built-ins */
92static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
93{
94 return __sync_lock_test_and_set(ptr, x);
95}
96
97static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
98{
99 return __sync_val_compare_and_swap(ptr, old, new);
100}
101
102static inline unsigned char atomic_dec(unsigned int *ptr)
103{
104 return __sync_sub_and_fetch(ptr, 1) ? 1 : 0;
105}
106
107#endif
108
Emeric Brun4b3091e2012-09-24 15:48:52 +0200109static inline void _shared_context_lock(void)
Emeric Brun3e541d12012-09-03 11:14:36 +0200110{
111 unsigned int x;
112
113 x = cmpxchg(&shctx->waiters, 0, 1);
114 if (x) {
115 if (x != 2)
116 x = xchg(&shctx->waiters, 2);
117
118 while (x) {
119 syscall(SYS_futex, &shctx->waiters, FUTEX_WAIT, 2, NULL, 0, 0);
120 x = xchg(&shctx->waiters, 2);
121 }
122 }
123}
124
Emeric Brun4b3091e2012-09-24 15:48:52 +0200125static inline void _shared_context_unlock(void)
Emeric Brun3e541d12012-09-03 11:14:36 +0200126{
127 if (atomic_dec(&shctx->waiters)) {
128 shctx->waiters = 0;
129 syscall(SYS_futex, &shctx->waiters, FUTEX_WAKE, 1, NULL, 0, 0);
130 }
131}
132
Emeric Brun4b3091e2012-09-24 15:48:52 +0200133#define shared_context_lock(v) if (use_shared_mem) _shared_context_lock()
134
135#define shared_context_unlock(v) if (use_shared_mem) _shared_context_unlock()
136
Emeric Brun3e541d12012-09-03 11:14:36 +0200137#else /* USE_SYSCALL_FUTEX */
138
Emeric Brun4b3091e2012-09-24 15:48:52 +0200139#define shared_context_lock(v) if (use_shared_mem) pthread_mutex_lock(&shctx->mutex)
140
141#define shared_context_unlock(v) if (use_shared_mem) pthread_mutex_unlock(&shctx->mutex)
Emeric Brun3e541d12012-09-03 11:14:36 +0200142
143#endif
144
145/* List Macros */
146
147#define shsess_unset(s) (s)->n->p = (s)->p; \
148 (s)->p->n = (s)->n;
149
150#define shsess_set_free(s) shsess_unset(s) \
151 (s)->p = &shctx->free; \
152 (s)->n = shctx->free.n; \
153 shctx->free.n->p = s; \
154 shctx->free.n = s;
155
156
157#define shsess_set_active(s) shsess_unset(s) \
158 (s)->p = &shctx->active; \
159 (s)->n = shctx->active.n; \
160 shctx->active.n->p = s; \
161 shctx->active.n = s;
162
163
164#define shsess_get_next() (shctx->free.p == shctx->free.n) ? \
165 shctx->active.p : shctx->free.p;
166
167/* Tree Macros */
168
169#define shsess_tree_delete(s) ebmb_delete(&(s)->key);
170
171#define shsess_tree_insert(s) (struct shared_session *)ebmb_insert(&shctx->active.key.node.branches, \
172 &(s)->key, SSL_MAX_SSL_SESSION_ID_LENGTH);
173
174#define shsess_tree_lookup(k) (struct shared_session *)ebmb_lookup(&shctx->active.key.node.branches, \
175 (k), SSL_MAX_SSL_SESSION_ID_LENGTH);
176
177/* Other Macros */
178
179#define shsess_set_key(s,k,l) { memcpy((s)->key_data, (k), (l)); \
180 if ((l) < SSL_MAX_SSL_SESSION_ID_LENGTH) \
181 memset((s)->key_data+(l), 0, SSL_MAX_SSL_SESSION_ID_LENGTH-(l)); };
182
183
184/* SSL context callbacks */
185
186/* SSL callback used on new session creation */
187int shctx_new_cb(SSL *ssl, SSL_SESSION *sess)
188{
189 struct shared_session *shsess;
190 unsigned char *data,*p;
191 unsigned int data_len;
192 unsigned char encsess[SHSESS_MAX_ENCODED_LEN];
193 (void)ssl;
194
195 /* check if session reserved size in aligned buffer is large enougth for the ASN1 encode session */
196 data_len=i2d_SSL_SESSION(sess, NULL);
197 if(data_len > SHSESS_MAX_DATA_LEN)
198 return 0;
199
200 /* process ASN1 session encoding before the lock: lower cost */
201 p = data = encsess+SSL_MAX_SSL_SESSION_ID_LENGTH;
202 i2d_SSL_SESSION(sess, &p);
203
204 shared_context_lock();
205
206 shsess = shsess_get_next();
207
208 shsess_tree_delete(shsess);
209
210 shsess_set_key(shsess, sess->session_id, sess->session_id_length);
211
212 /* it returns the already existing node or current node if none, never returns null */
213 shsess = shsess_tree_insert(shsess);
214
215 /* store ASN1 encoded session into cache */
216 shsess->data_len = data_len;
217 memcpy(shsess->data, data, data_len);
218
219 /* store creation date */
220 shsess->c_date = SSL_SESSION_get_time(sess);
221
222 shsess_set_active(shsess);
223
224 shared_context_unlock();
225
226 if (shared_session_new_cbk) { /* if user level callback is set */
227 /* copy sessionid padded with 0 into the sessionid + data aligned buffer */
228 memcpy(encsess, sess->session_id, sess->session_id_length);
229 if (sess->session_id_length < SSL_MAX_SSL_SESSION_ID_LENGTH)
230 memset(encsess+sess->session_id_length, 0, SSL_MAX_SSL_SESSION_ID_LENGTH-sess->session_id_length);
231
232 shared_session_new_cbk(encsess, SSL_MAX_SSL_SESSION_ID_LENGTH+data_len, SSL_SESSION_get_time(sess));
233 }
234
235 return 0; /* do not increment session reference count */
236}
237
238/* SSL callback used on lookup an existing session cause none found in internal cache */
239SSL_SESSION *shctx_get_cb(SSL *ssl, unsigned char *key, int key_len, int *do_copy)
240{
241 struct shared_session *shsess;
242 unsigned char data[SHSESS_MAX_DATA_LEN], *p;
243 unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH];
244 unsigned int data_len;
245 long cdate;
246 SSL_SESSION *sess;
247 (void)ssl;
248
249 /* allow the session to be freed automatically by openssl */
250 *do_copy = 0;
251
252 /* tree key is zeros padded sessionid */
253 if (key_len < SSL_MAX_SSL_SESSION_ID_LENGTH) {
254 memcpy(tmpkey, key, key_len);
255 memset(tmpkey + key_len, 0, SSL_MAX_SSL_SESSION_ID_LENGTH - key_len);
256 key = tmpkey;
257 }
258
259 /* lock cache */
260 shared_context_lock();
261
262 /* lookup for session */
263 shsess = shsess_tree_lookup(key);
264 if (!shsess) {
265 /* no session found: unlock cache and exit */
266 shared_context_unlock();
267 return NULL;
268 }
269
270 /* backup creation date to reset in session after ASN1 decode */
271 cdate = shsess->c_date;
272
273 /* copy ASN1 session data to decode outside the lock */
274 data_len = shsess->data_len;
275 memcpy(data, shsess->data, shsess->data_len);
276
277 shsess_set_active(shsess);
278
279 shared_context_unlock();
280
281 /* decode ASN1 session */
282 p = data;
283 sess = d2i_SSL_SESSION(NULL, (const unsigned char **)&p, data_len);
284
285 /* reset creation date */
286 if (sess)
287 SSL_SESSION_set_time(sess, cdate);
288
289 return sess;
290}
291
292/* SSL callback used to signal session is no more used in internal cache */
293void shctx_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess)
294{
295 struct shared_session *shsess;
296 unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH];
297 unsigned char *key = sess->session_id;
298 (void)ctx;
299
300 /* tree key is zeros padded sessionid */
301 if (sess->session_id_length < SSL_MAX_SSL_SESSION_ID_LENGTH) {
302 memcpy(tmpkey, sess->session_id, sess->session_id_length);
303 memset(tmpkey+sess->session_id_length, 0, SSL_MAX_SSL_SESSION_ID_LENGTH - sess->session_id_length);
304 key = tmpkey;
305 }
306
307 shared_context_lock();
308
309 /* lookup for session */
310 shsess = shsess_tree_lookup(key);
311 if (shsess) {
312 shsess_set_free(shsess);
313 }
314
315 /* unlock cache */
316 shared_context_unlock();
317}
318
319/* User level function called to add a session to the cache (remote updates) */
320void shctx_sess_add(const unsigned char *encsess, unsigned int len, long cdate)
321{
322 struct shared_session *shsess;
323
324 /* check buffer is at least padded key long + 1 byte
325 and data_len not too long */
326 if ((len <= SSL_MAX_SSL_SESSION_ID_LENGTH)
327 || (len > SHSESS_MAX_DATA_LEN+SSL_MAX_SSL_SESSION_ID_LENGTH))
328 return;
329
330 shared_context_lock();
331
332 shsess = shsess_get_next();
333
334 shsess_tree_delete(shsess);
335
336 shsess_set_key(shsess, encsess, SSL_MAX_SSL_SESSION_ID_LENGTH);
337
338 /* it returns the already existing node or current node if none, never returns null */
339 shsess = shsess_tree_insert(shsess);
340
341 /* store into cache and update earlier on session get events */
342 if (cdate)
343 shsess->c_date = (long)cdate;
344
345 /* copy ASN1 session data into cache */
346 shsess->data_len = len-SSL_MAX_SSL_SESSION_ID_LENGTH;
347 memcpy(shsess->data, encsess+SSL_MAX_SSL_SESSION_ID_LENGTH, shsess->data_len);
348
349 shsess_set_active(shsess);
350
351 shared_context_unlock();
352}
353
354/* Function used to set a callback on new session creation */
355void shsess_set_new_cbk(void (*func)(unsigned char *, unsigned int, long))
356{
357 shared_session_new_cbk = func;
358}
359
360/* Allocate shared memory context.
361 * size is maximum cached sessions.
362 * if set less or equal to 0, SHCTX_DEFAULT_SIZE is used.
363 * Returns: -1 on alloc failure, size if it performs context alloc,
364 * and 0 if cache is already allocated */
Emeric Brun4b3091e2012-09-24 15:48:52 +0200365int shared_context_init(int size, int shared)
Emeric Brun3e541d12012-09-03 11:14:36 +0200366{
367 int i;
368#ifndef USE_SYSCALL_FUTEX
369 pthread_mutexattr_t attr;
370#endif /* USE_SYSCALL_FUTEX */
371 struct shared_session *prev,*cur;
Emeric Brun4b3091e2012-09-24 15:48:52 +0200372 int maptype = MAP_PRIVATE;
Emeric Brun3e541d12012-09-03 11:14:36 +0200373
374 if (shctx)
375 return 0;
376
377 if (size<=0)
378 size = SHCTX_DEFAULT_SIZE;
379
Emeric Brun4b3091e2012-09-24 15:48:52 +0200380 if (shared)
381 maptype = MAP_SHARED;
382
Emeric Brun3e541d12012-09-03 11:14:36 +0200383 shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context)+(size*sizeof(struct shared_session)),
Emeric Brun4b3091e2012-09-24 15:48:52 +0200384 PROT_READ | PROT_WRITE, maptype | MAP_ANON, -1, 0);
Emeric Brun3e541d12012-09-03 11:14:36 +0200385 if (!shctx || shctx == MAP_FAILED) {
386 shctx = NULL;
387 return -1;
388 }
389
390#ifdef USE_SYSCALL_FUTEX
391 shctx->waiters = 0;
392#else
393 pthread_mutexattr_init(&attr);
394 pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
395 pthread_mutex_init(&shctx->mutex, &attr);
396#endif
Emeric Brun4b3091e2012-09-24 15:48:52 +0200397 if (maptype == MAP_SHARED)
398 use_shared_mem = 1;
399
Emeric Brun3e541d12012-09-03 11:14:36 +0200400 memset(&shctx->active.key, 0, sizeof(struct ebmb_node));
401 memset(&shctx->free.key, 0, sizeof(struct ebmb_node));
402
403 /* No duplicate authorized in tree: */
404 //shctx->active.key.node.branches.b[1] = (void *)1;
405 shctx->active.key.node.branches = EB_ROOT_UNIQUE;
406
407 cur = &shctx->active;
408 cur->n = cur->p = cur;
409
410 cur = &shctx->free;
411 for (i = 0 ; i < size ; i++) {
412 prev = cur;
413 cur = (struct shared_session *)((char *)prev + sizeof(struct shared_session));
414 prev->n = cur;
415 cur->p = prev;
416 }
417 cur->n = &shctx->free;
418 shctx->free.p = cur;
419
420 return size;
421}
422
423
424/* Set session cache mode to server and disable openssl internal cache.
425 * Set shared cache callbacks on an ssl context.
426 * Shared context MUST be firstly initialized */
427void shared_context_set_cache(SSL_CTX *ctx)
428{
429 SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_SERVER |
430 SSL_SESS_CACHE_NO_INTERNAL |
431 SSL_SESS_CACHE_NO_AUTO_CLEAR);
432 if (!shctx)
433 return;
434
435 /* Set callbacks */
436 SSL_CTX_sess_set_new_cb(ctx, shctx_new_cb);
437 SSL_CTX_sess_set_get_cb(ctx, shctx_get_cb);
438 SSL_CTX_sess_set_remove_cb(ctx, shctx_remove_cb);
439}