blob: 5fe2e7eada260ef4f7bcc6c63ebf3a83bd5b4af6 [file] [log] [blame]
Emeric Brun3e541d12012-09-03 11:14:36 +02001/*
2 * shctx.c - shared context management functions for SSL
3 *
4 * Copyright (C) 2011-2012 EXCELIANCE
5 *
6 * Author: Emeric Brun - emeric@exceliance.fr
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <sys/mman.h>
15#ifdef USE_SYSCALL_FUTEX
16#include <unistd.h>
17#include <linux/futex.h>
18#include <sys/syscall.h>
19#else /* USE_SYSCALL_FUTEX */
20#include <pthread.h>
21#endif /* USE_SYSCALL_FUTEX */
22
23#include "ebmbtree.h"
24#include "proto/shctx.h"
25
26struct shared_session {
27 struct ebmb_node key;
28 unsigned char key_data[SSL_MAX_SSL_SESSION_ID_LENGTH];
29 long c_date;
30 int data_len;
31 unsigned char data[SHSESS_MAX_DATA_LEN];
32 struct shared_session *p;
33 struct shared_session *n;
34};
35
36
37struct shared_context {
38#ifdef USE_SYSCALL_FUTEX
39 unsigned int waiters;
40#else /* USE_SYSCALL_FUTEX */
41 pthread_mutex_t mutex;
42#endif
43 struct shared_session active;
44 struct shared_session free;
45};
46
47/* Static shared context */
48static struct shared_context *shctx = NULL;
49
50/* Callbacks */
51static void (*shared_session_new_cbk)(unsigned char *session, unsigned int session_len, long cdate);
52
53
54/* Lock functions */
55#ifdef USE_SYSCALL_FUTEX
56#if defined (__i586__) || defined (__x86_64__)
57static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
58{
59 __asm volatile("lock xchgl %0,%1"
60 : "=r" (x), "+m" (*ptr)
61 : "0" (x)
62 : "memory");
63 return x;
64}
65
66static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
67{
68 unsigned int ret;
69
70 __asm volatile("lock cmpxchgl %2,%1"
71 : "=a" (ret), "+m" (*ptr)
72 : "r" (new), "0" (old)
73 : "memory");
74 return ret;
75}
76
77static inline unsigned char atomic_dec(unsigned int *ptr)
78{
79 unsigned char ret;
80 __asm volatile("lock decl %0\n"
81 "setne %1\n"
82 : "+m" (*ptr), "=qm" (ret)
83 :
84 : "memory");
85 return ret;
86}
87
88#else /* if no x86_64 or i586 arch: use less optimized gcc >= 4.1 built-ins */
89static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
90{
91 return __sync_lock_test_and_set(ptr, x);
92}
93
94static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
95{
96 return __sync_val_compare_and_swap(ptr, old, new);
97}
98
99static inline unsigned char atomic_dec(unsigned int *ptr)
100{
101 return __sync_sub_and_fetch(ptr, 1) ? 1 : 0;
102}
103
104#endif
105
106static inline void shared_context_lock(void)
107{
108 unsigned int x;
109
110 x = cmpxchg(&shctx->waiters, 0, 1);
111 if (x) {
112 if (x != 2)
113 x = xchg(&shctx->waiters, 2);
114
115 while (x) {
116 syscall(SYS_futex, &shctx->waiters, FUTEX_WAIT, 2, NULL, 0, 0);
117 x = xchg(&shctx->waiters, 2);
118 }
119 }
120}
121
122static inline void shared_context_unlock(void)
123{
124 if (atomic_dec(&shctx->waiters)) {
125 shctx->waiters = 0;
126 syscall(SYS_futex, &shctx->waiters, FUTEX_WAKE, 1, NULL, 0, 0);
127 }
128}
129
130#else /* USE_SYSCALL_FUTEX */
131
132#define shared_context_lock(v) pthread_mutex_lock(&shctx->mutex)
133#define shared_context_unlock(v) pthread_mutex_unlock(&shctx->mutex)
134
135#endif
136
137/* List Macros */
138
139#define shsess_unset(s) (s)->n->p = (s)->p; \
140 (s)->p->n = (s)->n;
141
142#define shsess_set_free(s) shsess_unset(s) \
143 (s)->p = &shctx->free; \
144 (s)->n = shctx->free.n; \
145 shctx->free.n->p = s; \
146 shctx->free.n = s;
147
148
149#define shsess_set_active(s) shsess_unset(s) \
150 (s)->p = &shctx->active; \
151 (s)->n = shctx->active.n; \
152 shctx->active.n->p = s; \
153 shctx->active.n = s;
154
155
156#define shsess_get_next() (shctx->free.p == shctx->free.n) ? \
157 shctx->active.p : shctx->free.p;
158
159/* Tree Macros */
160
161#define shsess_tree_delete(s) ebmb_delete(&(s)->key);
162
163#define shsess_tree_insert(s) (struct shared_session *)ebmb_insert(&shctx->active.key.node.branches, \
164 &(s)->key, SSL_MAX_SSL_SESSION_ID_LENGTH);
165
166#define shsess_tree_lookup(k) (struct shared_session *)ebmb_lookup(&shctx->active.key.node.branches, \
167 (k), SSL_MAX_SSL_SESSION_ID_LENGTH);
168
169/* Other Macros */
170
171#define shsess_set_key(s,k,l) { memcpy((s)->key_data, (k), (l)); \
172 if ((l) < SSL_MAX_SSL_SESSION_ID_LENGTH) \
173 memset((s)->key_data+(l), 0, SSL_MAX_SSL_SESSION_ID_LENGTH-(l)); };
174
175
176/* SSL context callbacks */
177
178/* SSL callback used on new session creation */
179int shctx_new_cb(SSL *ssl, SSL_SESSION *sess)
180{
181 struct shared_session *shsess;
182 unsigned char *data,*p;
183 unsigned int data_len;
184 unsigned char encsess[SHSESS_MAX_ENCODED_LEN];
185 (void)ssl;
186
187 /* check if session reserved size in aligned buffer is large enougth for the ASN1 encode session */
188 data_len=i2d_SSL_SESSION(sess, NULL);
189 if(data_len > SHSESS_MAX_DATA_LEN)
190 return 0;
191
192 /* process ASN1 session encoding before the lock: lower cost */
193 p = data = encsess+SSL_MAX_SSL_SESSION_ID_LENGTH;
194 i2d_SSL_SESSION(sess, &p);
195
196 shared_context_lock();
197
198 shsess = shsess_get_next();
199
200 shsess_tree_delete(shsess);
201
202 shsess_set_key(shsess, sess->session_id, sess->session_id_length);
203
204 /* it returns the already existing node or current node if none, never returns null */
205 shsess = shsess_tree_insert(shsess);
206
207 /* store ASN1 encoded session into cache */
208 shsess->data_len = data_len;
209 memcpy(shsess->data, data, data_len);
210
211 /* store creation date */
212 shsess->c_date = SSL_SESSION_get_time(sess);
213
214 shsess_set_active(shsess);
215
216 shared_context_unlock();
217
218 if (shared_session_new_cbk) { /* if user level callback is set */
219 /* copy sessionid padded with 0 into the sessionid + data aligned buffer */
220 memcpy(encsess, sess->session_id, sess->session_id_length);
221 if (sess->session_id_length < SSL_MAX_SSL_SESSION_ID_LENGTH)
222 memset(encsess+sess->session_id_length, 0, SSL_MAX_SSL_SESSION_ID_LENGTH-sess->session_id_length);
223
224 shared_session_new_cbk(encsess, SSL_MAX_SSL_SESSION_ID_LENGTH+data_len, SSL_SESSION_get_time(sess));
225 }
226
227 return 0; /* do not increment session reference count */
228}
229
230/* SSL callback used on lookup an existing session cause none found in internal cache */
231SSL_SESSION *shctx_get_cb(SSL *ssl, unsigned char *key, int key_len, int *do_copy)
232{
233 struct shared_session *shsess;
234 unsigned char data[SHSESS_MAX_DATA_LEN], *p;
235 unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH];
236 unsigned int data_len;
237 long cdate;
238 SSL_SESSION *sess;
239 (void)ssl;
240
241 /* allow the session to be freed automatically by openssl */
242 *do_copy = 0;
243
244 /* tree key is zeros padded sessionid */
245 if (key_len < SSL_MAX_SSL_SESSION_ID_LENGTH) {
246 memcpy(tmpkey, key, key_len);
247 memset(tmpkey + key_len, 0, SSL_MAX_SSL_SESSION_ID_LENGTH - key_len);
248 key = tmpkey;
249 }
250
251 /* lock cache */
252 shared_context_lock();
253
254 /* lookup for session */
255 shsess = shsess_tree_lookup(key);
256 if (!shsess) {
257 /* no session found: unlock cache and exit */
258 shared_context_unlock();
259 return NULL;
260 }
261
262 /* backup creation date to reset in session after ASN1 decode */
263 cdate = shsess->c_date;
264
265 /* copy ASN1 session data to decode outside the lock */
266 data_len = shsess->data_len;
267 memcpy(data, shsess->data, shsess->data_len);
268
269 shsess_set_active(shsess);
270
271 shared_context_unlock();
272
273 /* decode ASN1 session */
274 p = data;
275 sess = d2i_SSL_SESSION(NULL, (const unsigned char **)&p, data_len);
276
277 /* reset creation date */
278 if (sess)
279 SSL_SESSION_set_time(sess, cdate);
280
281 return sess;
282}
283
284/* SSL callback used to signal session is no more used in internal cache */
285void shctx_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess)
286{
287 struct shared_session *shsess;
288 unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH];
289 unsigned char *key = sess->session_id;
290 (void)ctx;
291
292 /* tree key is zeros padded sessionid */
293 if (sess->session_id_length < SSL_MAX_SSL_SESSION_ID_LENGTH) {
294 memcpy(tmpkey, sess->session_id, sess->session_id_length);
295 memset(tmpkey+sess->session_id_length, 0, SSL_MAX_SSL_SESSION_ID_LENGTH - sess->session_id_length);
296 key = tmpkey;
297 }
298
299 shared_context_lock();
300
301 /* lookup for session */
302 shsess = shsess_tree_lookup(key);
303 if (shsess) {
304 shsess_set_free(shsess);
305 }
306
307 /* unlock cache */
308 shared_context_unlock();
309}
310
311/* User level function called to add a session to the cache (remote updates) */
312void shctx_sess_add(const unsigned char *encsess, unsigned int len, long cdate)
313{
314 struct shared_session *shsess;
315
316 /* check buffer is at least padded key long + 1 byte
317 and data_len not too long */
318 if ((len <= SSL_MAX_SSL_SESSION_ID_LENGTH)
319 || (len > SHSESS_MAX_DATA_LEN+SSL_MAX_SSL_SESSION_ID_LENGTH))
320 return;
321
322 shared_context_lock();
323
324 shsess = shsess_get_next();
325
326 shsess_tree_delete(shsess);
327
328 shsess_set_key(shsess, encsess, SSL_MAX_SSL_SESSION_ID_LENGTH);
329
330 /* it returns the already existing node or current node if none, never returns null */
331 shsess = shsess_tree_insert(shsess);
332
333 /* store into cache and update earlier on session get events */
334 if (cdate)
335 shsess->c_date = (long)cdate;
336
337 /* copy ASN1 session data into cache */
338 shsess->data_len = len-SSL_MAX_SSL_SESSION_ID_LENGTH;
339 memcpy(shsess->data, encsess+SSL_MAX_SSL_SESSION_ID_LENGTH, shsess->data_len);
340
341 shsess_set_active(shsess);
342
343 shared_context_unlock();
344}
345
346/* Function used to set a callback on new session creation */
347void shsess_set_new_cbk(void (*func)(unsigned char *, unsigned int, long))
348{
349 shared_session_new_cbk = func;
350}
351
352/* Allocate shared memory context.
353 * size is maximum cached sessions.
354 * if set less or equal to 0, SHCTX_DEFAULT_SIZE is used.
355 * Returns: -1 on alloc failure, size if it performs context alloc,
356 * and 0 if cache is already allocated */
357int shared_context_init(int size)
358{
359 int i;
360#ifndef USE_SYSCALL_FUTEX
361 pthread_mutexattr_t attr;
362#endif /* USE_SYSCALL_FUTEX */
363 struct shared_session *prev,*cur;
364
365 if (shctx)
366 return 0;
367
368 if (size<=0)
369 size = SHCTX_DEFAULT_SIZE;
370
371 shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context)+(size*sizeof(struct shared_session)),
372 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
373 if (!shctx || shctx == MAP_FAILED) {
374 shctx = NULL;
375 return -1;
376 }
377
378#ifdef USE_SYSCALL_FUTEX
379 shctx->waiters = 0;
380#else
381 pthread_mutexattr_init(&attr);
382 pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
383 pthread_mutex_init(&shctx->mutex, &attr);
384#endif
385 memset(&shctx->active.key, 0, sizeof(struct ebmb_node));
386 memset(&shctx->free.key, 0, sizeof(struct ebmb_node));
387
388 /* No duplicate authorized in tree: */
389 //shctx->active.key.node.branches.b[1] = (void *)1;
390 shctx->active.key.node.branches = EB_ROOT_UNIQUE;
391
392 cur = &shctx->active;
393 cur->n = cur->p = cur;
394
395 cur = &shctx->free;
396 for (i = 0 ; i < size ; i++) {
397 prev = cur;
398 cur = (struct shared_session *)((char *)prev + sizeof(struct shared_session));
399 prev->n = cur;
400 cur->p = prev;
401 }
402 cur->n = &shctx->free;
403 shctx->free.p = cur;
404
405 return size;
406}
407
408
409/* Set session cache mode to server and disable openssl internal cache.
410 * Set shared cache callbacks on an ssl context.
411 * Shared context MUST be firstly initialized */
412void shared_context_set_cache(SSL_CTX *ctx)
413{
414 SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_SERVER |
415 SSL_SESS_CACHE_NO_INTERNAL |
416 SSL_SESS_CACHE_NO_AUTO_CLEAR);
417 if (!shctx)
418 return;
419
420 /* Set callbacks */
421 SSL_CTX_sess_set_new_cb(ctx, shctx_new_cb);
422 SSL_CTX_sess_set_get_cb(ctx, shctx_get_cb);
423 SSL_CTX_sess_set_remove_cb(ctx, shctx_remove_cb);
424}