blob: 55cb2a77260d94cececa92050b670261c867105f [file] [log] [blame]
Emeric Brun3e541d12012-09-03 11:14:36 +02001/*
2 * shctx.h - shared context management functions for SSL
3 *
4 * Copyright (C) 2011-2012 EXCELIANCE
5 *
6 * Author: Emeric Brun - emeric@exceliance.fr
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef SHCTX_H
15#define SHCTX_H
William Lallemand24a7a752017-10-09 14:17:39 +020016
William Lallemand4f45bb92017-10-30 20:08:51 +010017#include <common/mini-clist.h>
William Lallemand24a7a752017-10-09 14:17:39 +020018#include <types/shctx.h>
19
Emeric Brun3e541d12012-09-03 11:14:36 +020020#include <stdint.h>
21
William Lallemand24a7a752017-10-09 14:17:39 +020022#ifndef USE_PRIVATE_CACHE
23#ifdef USE_PTHREAD_PSHARED
24#include <pthread.h>
25#else
26#ifdef USE_SYSCALL_FUTEX
27#include <unistd.h>
28#include <linux/futex.h>
29#include <sys/syscall.h>
Emeric Brun3e541d12012-09-03 11:14:36 +020030#endif
Emeric Brun3e541d12012-09-03 11:14:36 +020031#endif
Emeric Brun786991e2012-11-26 18:37:12 +010032#endif
33
William Lallemand4f45bb92017-10-30 20:08:51 +010034int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize, int extra, int shared);
35struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx, int data_len);
36void shctx_row_inc_hot(struct shared_context *shctx, struct shared_block *first);
37void shctx_row_dec_hot(struct shared_context *shctx, struct shared_block *first);
38int shctx_row_data_append(struct shared_context *shctx,
39 struct shared_block *first, unsigned char *data, int len);
40int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first,
41 unsigned char *dst, int offset, int len);
Emeric Bruncaa19cc2014-05-07 16:10:18 +020042
William Lallemanded0b5ad2017-10-30 19:36:36 +010043
William Lallemand24a7a752017-10-09 14:17:39 +020044/* Lock functions */
45
46#if defined (USE_PRIVATE_CACHE)
47
William Lallemanda3c77cf2017-10-30 23:44:40 +010048#define shctx_lock(shctx)
49#define shctx_unlock(shctx)
William Lallemand24a7a752017-10-09 14:17:39 +020050
51#elif defined (USE_PTHREAD_PSHARED)
52extern int use_shared_mem;
53
William Lallemanda3c77cf2017-10-30 23:44:40 +010054#define shctx_lock(shctx) if (use_shared_mem) pthread_mutex_lock(&shctx->mutex)
55#define shctx_unlock(shctx) if (use_shared_mem) pthread_mutex_unlock(&shctx->mutex)
William Lallemand24a7a752017-10-09 14:17:39 +020056
57#else
58extern int use_shared_mem;
59
60#ifdef USE_SYSCALL_FUTEX
William Lallemanda3c77cf2017-10-30 23:44:40 +010061static inline void _shctx_wait4lock(unsigned int *count, unsigned int *uaddr, int value)
William Lallemand24a7a752017-10-09 14:17:39 +020062{
63 syscall(SYS_futex, uaddr, FUTEX_WAIT, value, NULL, 0, 0);
64}
65
William Lallemanda3c77cf2017-10-30 23:44:40 +010066static inline void _shctx_awakelocker(unsigned int *uaddr)
William Lallemand24a7a752017-10-09 14:17:39 +020067{
68 syscall(SYS_futex, uaddr, FUTEX_WAKE, 1, NULL, 0, 0);
69}
70
71#else /* internal spin lock */
72
73#if defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__)
74static inline void relax()
75{
76 __asm volatile("rep;nop\n" ::: "memory");
77}
78#else /* if no x86_64 or i586 arch: use less optimized but generic asm */
79static inline void relax()
80{
81 __asm volatile("" ::: "memory");
82}
83#endif
84
William Lallemanda3c77cf2017-10-30 23:44:40 +010085static inline void _shctx_wait4lock(unsigned int *count, unsigned int *uaddr, int value)
William Lallemand24a7a752017-10-09 14:17:39 +020086{
87 int i;
88
89 for (i = 0; i < *count; i++) {
90 relax();
91 relax();
92 }
93 *count = *count << 1;
94}
95
William Lallemanda3c77cf2017-10-30 23:44:40 +010096#define _shctx_awakelocker(a)
William Lallemand24a7a752017-10-09 14:17:39 +020097
98#endif
99
100#if defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__)
101static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
102{
103 __asm volatile("lock xchgl %0,%1"
104 : "=r" (x), "+m" (*ptr)
105 : "0" (x)
106 : "memory");
107 return x;
108}
109
110static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
111{
112 unsigned int ret;
113
114 __asm volatile("lock cmpxchgl %2,%1"
115 : "=a" (ret), "+m" (*ptr)
116 : "r" (new), "0" (old)
117 : "memory");
118 return ret;
119}
120
121static inline unsigned char atomic_dec(unsigned int *ptr)
122{
123 unsigned char ret;
124 __asm volatile("lock decl %0\n"
125 "setne %1\n"
126 : "+m" (*ptr), "=qm" (ret)
127 :
128 : "memory");
129 return ret;
130}
131
132#else /* if no x86_64 or i586 arch: use less optimized gcc >= 4.1 built-ins */
133static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
134{
135 return __sync_lock_test_and_set(ptr, x);
136}
137
138static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
139{
140 return __sync_val_compare_and_swap(ptr, old, new);
141}
142
143static inline unsigned char atomic_dec(unsigned int *ptr)
144{
145 return __sync_sub_and_fetch(ptr, 1) ? 1 : 0;
146}
147
148#endif
149
William Lallemanda3c77cf2017-10-30 23:44:40 +0100150static inline void _shctx_lock(struct shared_context *shctx)
William Lallemand24a7a752017-10-09 14:17:39 +0200151{
152 unsigned int x;
153 unsigned int count = 4;
154
155 x = cmpxchg(&shctx->waiters, 0, 1);
156 if (x) {
157 if (x != 2)
158 x = xchg(&shctx->waiters, 2);
159
160 while (x) {
William Lallemanda3c77cf2017-10-30 23:44:40 +0100161 _shctx_wait4lock(&count, &shctx->waiters, 2);
William Lallemand24a7a752017-10-09 14:17:39 +0200162 x = xchg(&shctx->waiters, 2);
163 }
164 }
165}
166
William Lallemanda3c77cf2017-10-30 23:44:40 +0100167static inline void _shctx_unlock(struct shared_context *shctx)
William Lallemand24a7a752017-10-09 14:17:39 +0200168{
169 if (atomic_dec(&shctx->waiters)) {
170 shctx->waiters = 0;
William Lallemanda3c77cf2017-10-30 23:44:40 +0100171 _shctx_awakelocker(&shctx->waiters);
William Lallemand24a7a752017-10-09 14:17:39 +0200172 }
173}
174
William Lallemanda3c77cf2017-10-30 23:44:40 +0100175#define shctx_lock(shctx) if (use_shared_mem) _shctx_lock(shctx)
William Lallemand24a7a752017-10-09 14:17:39 +0200176
William Lallemanda3c77cf2017-10-30 23:44:40 +0100177#define shctx_unlock(shctx) if (use_shared_mem) _shctx_unlock(shctx)
William Lallemand24a7a752017-10-09 14:17:39 +0200178
179#endif
180
William Lallemanded0b5ad2017-10-30 19:36:36 +0100181/* List Macros */
182
William Lallemand4f45bb92017-10-30 20:08:51 +0100183static inline void shctx_block_set_hot(struct shared_context *shctx,
William Lallemanded0b5ad2017-10-30 19:36:36 +0100184 struct shared_block *s)
185{
William Lallemand4f45bb92017-10-30 20:08:51 +0100186 shctx->nbav--;
187 LIST_DEL(&s->list);
188 LIST_ADDQ(&shctx->hot, &s->list);
William Lallemanded0b5ad2017-10-30 19:36:36 +0100189}
190
William Lallemand4f45bb92017-10-30 20:08:51 +0100191static inline void shctx_block_set_avail(struct shared_context *shctx,
William Lallemanded0b5ad2017-10-30 19:36:36 +0100192 struct shared_block *s)
193{
William Lallemand4f45bb92017-10-30 20:08:51 +0100194 shctx->nbav++;
195 LIST_DEL(&s->list);
196 LIST_ADDQ(&shctx->avail, &s->list);
William Lallemanded0b5ad2017-10-30 19:36:36 +0100197}
William Lallemand24a7a752017-10-09 14:17:39 +0200198
Emeric Brun3e541d12012-09-03 11:14:36 +0200199#endif /* SHCTX_H */
200