blob: 48d95418bb932f4f7ca31e4e96a632b4ffda80bb [file] [log] [blame]
Emeric Brun3e541d12012-09-03 11:14:36 +02001/*
2 * shctx.c - shared context management functions for SSL
3 *
4 * Copyright (C) 2011-2012 EXCELIANCE
5 *
6 * Author: Emeric Brun - emeric@exceliance.fr
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <sys/mman.h>
15#ifdef USE_SYSCALL_FUTEX
16#include <unistd.h>
Willy Tarreau18b20592012-09-04 12:26:26 +020017#ifndef u32
18#define u32 unsigned int
19#endif
Emeric Brun3e541d12012-09-03 11:14:36 +020020#include <linux/futex.h>
21#include <sys/syscall.h>
22#else /* USE_SYSCALL_FUTEX */
23#include <pthread.h>
24#endif /* USE_SYSCALL_FUTEX */
25
26#include "ebmbtree.h"
27#include "proto/shctx.h"
28
29struct shared_session {
30 struct ebmb_node key;
31 unsigned char key_data[SSL_MAX_SSL_SESSION_ID_LENGTH];
32 long c_date;
33 int data_len;
34 unsigned char data[SHSESS_MAX_DATA_LEN];
35 struct shared_session *p;
36 struct shared_session *n;
37};
38
39
40struct shared_context {
41#ifdef USE_SYSCALL_FUTEX
42 unsigned int waiters;
43#else /* USE_SYSCALL_FUTEX */
44 pthread_mutex_t mutex;
45#endif
46 struct shared_session active;
47 struct shared_session free;
48};
49
50/* Static shared context */
51static struct shared_context *shctx = NULL;
52
53/* Callbacks */
54static void (*shared_session_new_cbk)(unsigned char *session, unsigned int session_len, long cdate);
55
56
57/* Lock functions */
58#ifdef USE_SYSCALL_FUTEX
59#if defined (__i586__) || defined (__x86_64__)
60static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
61{
62 __asm volatile("lock xchgl %0,%1"
63 : "=r" (x), "+m" (*ptr)
64 : "0" (x)
65 : "memory");
66 return x;
67}
68
69static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
70{
71 unsigned int ret;
72
73 __asm volatile("lock cmpxchgl %2,%1"
74 : "=a" (ret), "+m" (*ptr)
75 : "r" (new), "0" (old)
76 : "memory");
77 return ret;
78}
79
80static inline unsigned char atomic_dec(unsigned int *ptr)
81{
82 unsigned char ret;
83 __asm volatile("lock decl %0\n"
84 "setne %1\n"
85 : "+m" (*ptr), "=qm" (ret)
86 :
87 : "memory");
88 return ret;
89}
90
91#else /* if no x86_64 or i586 arch: use less optimized gcc >= 4.1 built-ins */
92static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
93{
94 return __sync_lock_test_and_set(ptr, x);
95}
96
97static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
98{
99 return __sync_val_compare_and_swap(ptr, old, new);
100}
101
102static inline unsigned char atomic_dec(unsigned int *ptr)
103{
104 return __sync_sub_and_fetch(ptr, 1) ? 1 : 0;
105}
106
107#endif
108
109static inline void shared_context_lock(void)
110{
111 unsigned int x;
112
113 x = cmpxchg(&shctx->waiters, 0, 1);
114 if (x) {
115 if (x != 2)
116 x = xchg(&shctx->waiters, 2);
117
118 while (x) {
119 syscall(SYS_futex, &shctx->waiters, FUTEX_WAIT, 2, NULL, 0, 0);
120 x = xchg(&shctx->waiters, 2);
121 }
122 }
123}
124
125static inline void shared_context_unlock(void)
126{
127 if (atomic_dec(&shctx->waiters)) {
128 shctx->waiters = 0;
129 syscall(SYS_futex, &shctx->waiters, FUTEX_WAKE, 1, NULL, 0, 0);
130 }
131}
132
133#else /* USE_SYSCALL_FUTEX */
134
135#define shared_context_lock(v) pthread_mutex_lock(&shctx->mutex)
136#define shared_context_unlock(v) pthread_mutex_unlock(&shctx->mutex)
137
138#endif
139
140/* List Macros */
141
142#define shsess_unset(s) (s)->n->p = (s)->p; \
143 (s)->p->n = (s)->n;
144
145#define shsess_set_free(s) shsess_unset(s) \
146 (s)->p = &shctx->free; \
147 (s)->n = shctx->free.n; \
148 shctx->free.n->p = s; \
149 shctx->free.n = s;
150
151
152#define shsess_set_active(s) shsess_unset(s) \
153 (s)->p = &shctx->active; \
154 (s)->n = shctx->active.n; \
155 shctx->active.n->p = s; \
156 shctx->active.n = s;
157
158
159#define shsess_get_next() (shctx->free.p == shctx->free.n) ? \
160 shctx->active.p : shctx->free.p;
161
162/* Tree Macros */
163
164#define shsess_tree_delete(s) ebmb_delete(&(s)->key);
165
166#define shsess_tree_insert(s) (struct shared_session *)ebmb_insert(&shctx->active.key.node.branches, \
167 &(s)->key, SSL_MAX_SSL_SESSION_ID_LENGTH);
168
169#define shsess_tree_lookup(k) (struct shared_session *)ebmb_lookup(&shctx->active.key.node.branches, \
170 (k), SSL_MAX_SSL_SESSION_ID_LENGTH);
171
172/* Other Macros */
173
174#define shsess_set_key(s,k,l) { memcpy((s)->key_data, (k), (l)); \
175 if ((l) < SSL_MAX_SSL_SESSION_ID_LENGTH) \
176 memset((s)->key_data+(l), 0, SSL_MAX_SSL_SESSION_ID_LENGTH-(l)); };
177
178
179/* SSL context callbacks */
180
181/* SSL callback used on new session creation */
182int shctx_new_cb(SSL *ssl, SSL_SESSION *sess)
183{
184 struct shared_session *shsess;
185 unsigned char *data,*p;
186 unsigned int data_len;
187 unsigned char encsess[SHSESS_MAX_ENCODED_LEN];
188 (void)ssl;
189
190 /* check if session reserved size in aligned buffer is large enougth for the ASN1 encode session */
191 data_len=i2d_SSL_SESSION(sess, NULL);
192 if(data_len > SHSESS_MAX_DATA_LEN)
193 return 0;
194
195 /* process ASN1 session encoding before the lock: lower cost */
196 p = data = encsess+SSL_MAX_SSL_SESSION_ID_LENGTH;
197 i2d_SSL_SESSION(sess, &p);
198
199 shared_context_lock();
200
201 shsess = shsess_get_next();
202
203 shsess_tree_delete(shsess);
204
205 shsess_set_key(shsess, sess->session_id, sess->session_id_length);
206
207 /* it returns the already existing node or current node if none, never returns null */
208 shsess = shsess_tree_insert(shsess);
209
210 /* store ASN1 encoded session into cache */
211 shsess->data_len = data_len;
212 memcpy(shsess->data, data, data_len);
213
214 /* store creation date */
215 shsess->c_date = SSL_SESSION_get_time(sess);
216
217 shsess_set_active(shsess);
218
219 shared_context_unlock();
220
221 if (shared_session_new_cbk) { /* if user level callback is set */
222 /* copy sessionid padded with 0 into the sessionid + data aligned buffer */
223 memcpy(encsess, sess->session_id, sess->session_id_length);
224 if (sess->session_id_length < SSL_MAX_SSL_SESSION_ID_LENGTH)
225 memset(encsess+sess->session_id_length, 0, SSL_MAX_SSL_SESSION_ID_LENGTH-sess->session_id_length);
226
227 shared_session_new_cbk(encsess, SSL_MAX_SSL_SESSION_ID_LENGTH+data_len, SSL_SESSION_get_time(sess));
228 }
229
230 return 0; /* do not increment session reference count */
231}
232
233/* SSL callback used on lookup an existing session cause none found in internal cache */
234SSL_SESSION *shctx_get_cb(SSL *ssl, unsigned char *key, int key_len, int *do_copy)
235{
236 struct shared_session *shsess;
237 unsigned char data[SHSESS_MAX_DATA_LEN], *p;
238 unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH];
239 unsigned int data_len;
240 long cdate;
241 SSL_SESSION *sess;
242 (void)ssl;
243
244 /* allow the session to be freed automatically by openssl */
245 *do_copy = 0;
246
247 /* tree key is zeros padded sessionid */
248 if (key_len < SSL_MAX_SSL_SESSION_ID_LENGTH) {
249 memcpy(tmpkey, key, key_len);
250 memset(tmpkey + key_len, 0, SSL_MAX_SSL_SESSION_ID_LENGTH - key_len);
251 key = tmpkey;
252 }
253
254 /* lock cache */
255 shared_context_lock();
256
257 /* lookup for session */
258 shsess = shsess_tree_lookup(key);
259 if (!shsess) {
260 /* no session found: unlock cache and exit */
261 shared_context_unlock();
262 return NULL;
263 }
264
265 /* backup creation date to reset in session after ASN1 decode */
266 cdate = shsess->c_date;
267
268 /* copy ASN1 session data to decode outside the lock */
269 data_len = shsess->data_len;
270 memcpy(data, shsess->data, shsess->data_len);
271
272 shsess_set_active(shsess);
273
274 shared_context_unlock();
275
276 /* decode ASN1 session */
277 p = data;
278 sess = d2i_SSL_SESSION(NULL, (const unsigned char **)&p, data_len);
279
280 /* reset creation date */
281 if (sess)
282 SSL_SESSION_set_time(sess, cdate);
283
284 return sess;
285}
286
287/* SSL callback used to signal session is no more used in internal cache */
288void shctx_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess)
289{
290 struct shared_session *shsess;
291 unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH];
292 unsigned char *key = sess->session_id;
293 (void)ctx;
294
295 /* tree key is zeros padded sessionid */
296 if (sess->session_id_length < SSL_MAX_SSL_SESSION_ID_LENGTH) {
297 memcpy(tmpkey, sess->session_id, sess->session_id_length);
298 memset(tmpkey+sess->session_id_length, 0, SSL_MAX_SSL_SESSION_ID_LENGTH - sess->session_id_length);
299 key = tmpkey;
300 }
301
302 shared_context_lock();
303
304 /* lookup for session */
305 shsess = shsess_tree_lookup(key);
306 if (shsess) {
307 shsess_set_free(shsess);
308 }
309
310 /* unlock cache */
311 shared_context_unlock();
312}
313
314/* User level function called to add a session to the cache (remote updates) */
315void shctx_sess_add(const unsigned char *encsess, unsigned int len, long cdate)
316{
317 struct shared_session *shsess;
318
319 /* check buffer is at least padded key long + 1 byte
320 and data_len not too long */
321 if ((len <= SSL_MAX_SSL_SESSION_ID_LENGTH)
322 || (len > SHSESS_MAX_DATA_LEN+SSL_MAX_SSL_SESSION_ID_LENGTH))
323 return;
324
325 shared_context_lock();
326
327 shsess = shsess_get_next();
328
329 shsess_tree_delete(shsess);
330
331 shsess_set_key(shsess, encsess, SSL_MAX_SSL_SESSION_ID_LENGTH);
332
333 /* it returns the already existing node or current node if none, never returns null */
334 shsess = shsess_tree_insert(shsess);
335
336 /* store into cache and update earlier on session get events */
337 if (cdate)
338 shsess->c_date = (long)cdate;
339
340 /* copy ASN1 session data into cache */
341 shsess->data_len = len-SSL_MAX_SSL_SESSION_ID_LENGTH;
342 memcpy(shsess->data, encsess+SSL_MAX_SSL_SESSION_ID_LENGTH, shsess->data_len);
343
344 shsess_set_active(shsess);
345
346 shared_context_unlock();
347}
348
349/* Function used to set a callback on new session creation */
350void shsess_set_new_cbk(void (*func)(unsigned char *, unsigned int, long))
351{
352 shared_session_new_cbk = func;
353}
354
355/* Allocate shared memory context.
356 * size is maximum cached sessions.
357 * if set less or equal to 0, SHCTX_DEFAULT_SIZE is used.
358 * Returns: -1 on alloc failure, size if it performs context alloc,
359 * and 0 if cache is already allocated */
360int shared_context_init(int size)
361{
362 int i;
363#ifndef USE_SYSCALL_FUTEX
364 pthread_mutexattr_t attr;
365#endif /* USE_SYSCALL_FUTEX */
366 struct shared_session *prev,*cur;
367
368 if (shctx)
369 return 0;
370
371 if (size<=0)
372 size = SHCTX_DEFAULT_SIZE;
373
374 shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context)+(size*sizeof(struct shared_session)),
Willy Tarreauee2e3a42012-09-04 15:43:25 +0200375 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
Emeric Brun3e541d12012-09-03 11:14:36 +0200376 if (!shctx || shctx == MAP_FAILED) {
377 shctx = NULL;
378 return -1;
379 }
380
381#ifdef USE_SYSCALL_FUTEX
382 shctx->waiters = 0;
383#else
384 pthread_mutexattr_init(&attr);
385 pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
386 pthread_mutex_init(&shctx->mutex, &attr);
387#endif
388 memset(&shctx->active.key, 0, sizeof(struct ebmb_node));
389 memset(&shctx->free.key, 0, sizeof(struct ebmb_node));
390
391 /* No duplicate authorized in tree: */
392 //shctx->active.key.node.branches.b[1] = (void *)1;
393 shctx->active.key.node.branches = EB_ROOT_UNIQUE;
394
395 cur = &shctx->active;
396 cur->n = cur->p = cur;
397
398 cur = &shctx->free;
399 for (i = 0 ; i < size ; i++) {
400 prev = cur;
401 cur = (struct shared_session *)((char *)prev + sizeof(struct shared_session));
402 prev->n = cur;
403 cur->p = prev;
404 }
405 cur->n = &shctx->free;
406 shctx->free.p = cur;
407
408 return size;
409}
410
411
412/* Set session cache mode to server and disable openssl internal cache.
413 * Set shared cache callbacks on an ssl context.
414 * Shared context MUST be firstly initialized */
415void shared_context_set_cache(SSL_CTX *ctx)
416{
417 SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_SERVER |
418 SSL_SESS_CACHE_NO_INTERNAL |
419 SSL_SESS_CACHE_NO_AUTO_CLEAR);
420 if (!shctx)
421 return;
422
423 /* Set callbacks */
424 SSL_CTX_sess_set_new_cb(ctx, shctx_new_cb);
425 SSL_CTX_sess_set_get_cb(ctx, shctx_get_cb);
426 SSL_CTX_sess_set_remove_cb(ctx, shctx_remove_cb);
427}