blob: 9ee7da7767493cbd9b802d1077a0e031770cc3e5 [file] [log] [blame]
Emeric Brun7122ab32017-07-07 10:26:46 +02001#ifndef PL_ATOMIC_OPS_H
2#define PL_ATOMIC_OPS_H
3
4
5/* compiler-only memory barrier, for use around locks */
6static inline void pl_barrier()
7{
8 asm volatile("" ::: "memory");
9}
10
11/* full memory barrier */
12static inline void pl_mb()
13{
14 __sync_synchronize();
15}
16
17#if defined(__i386__) || defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__)
18
19/*
20 * Generic functions common to the x86 family
21 */
22
23static inline void pl_cpu_relax()
24{
25 asm volatile("rep;nop\n");
26}
27
28/* increment integer value pointed to by pointer <ptr>, and return non-zero if
29 * result is non-null.
30 */
31#define pl_inc(ptr) ( \
32 (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
33 unsigned char ret; \
34 asm volatile("lock incq %0\n" \
35 "setne %1\n" \
36 : "+m" (*(ptr)), "=qm" (ret) \
37 : \
38 : "cc"); \
39 ret; /* return value */ \
40 }) : (sizeof(*(ptr)) == 4) ? ({ \
41 unsigned char ret; \
42 asm volatile("lock incl %0\n" \
43 "setne %1\n" \
44 : "+m" (*(ptr)), "=qm" (ret) \
45 : \
46 : "cc"); \
47 ret; /* return value */ \
48 }) : (sizeof(*(ptr)) == 2) ? ({ \
49 unsigned char ret; \
50 asm volatile("lock incw %0\n" \
51 "setne %1\n" \
52 : "+m" (*(ptr)), "=qm" (ret) \
53 : \
54 : "cc"); \
55 ret; /* return value */ \
56 }) : (sizeof(*(ptr)) == 1) ? ({ \
57 unsigned char ret; \
58 asm volatile("lock incb %0\n" \
59 "setne %1\n" \
60 : "+m" (*(ptr)), "=qm" (ret) \
61 : \
62 : "cc"); \
63 ret; /* return value */ \
64 }) : ({ \
65 void __unsupported_argument_size_for_pl_inc__(char *,int); \
66 __unsupported_argument_size_for_pl_inc__(__FILE__,__LINE__); \
67 0; \
68 }) \
69)
70
71/* decrement integer value pointed to by pointer <ptr>, and return non-zero if
72 * result is non-null.
73 */
74#define pl_dec(ptr) ( \
75 (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
76 unsigned char ret; \
77 asm volatile("lock decq %0\n" \
78 "setne %1\n" \
79 : "+m" (*(ptr)), "=qm" (ret) \
80 : \
81 : "cc"); \
82 ret; /* return value */ \
83 }) : (sizeof(*(ptr)) == 4) ? ({ \
84 unsigned char ret; \
85 asm volatile("lock decl %0\n" \
86 "setne %1\n" \
87 : "+m" (*(ptr)), "=qm" (ret) \
88 : \
89 : "cc"); \
90 ret; /* return value */ \
91 }) : (sizeof(*(ptr)) == 2) ? ({ \
92 unsigned char ret; \
93 asm volatile("lock decw %0\n" \
94 "setne %1\n" \
95 : "+m" (*(ptr)), "=qm" (ret) \
96 : \
97 : "cc"); \
98 ret; /* return value */ \
99 }) : (sizeof(*(ptr)) == 1) ? ({ \
100 unsigned char ret; \
101 asm volatile("lock decb %0\n" \
102 "setne %1\n" \
103 : "+m" (*(ptr)), "=qm" (ret) \
104 : \
105 : "cc"); \
106 ret; /* return value */ \
107 }) : ({ \
108 void __unsupported_argument_size_for_pl_dec__(char *,int); \
109 __unsupported_argument_size_for_pl_dec__(__FILE__,__LINE__); \
110 0; \
111 }) \
112)
113
114/* increment integer value pointed to by pointer <ptr>, no return */
115#define pl_inc_noret(ptr) ({ \
116 if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
117 asm volatile("lock incq %0\n" \
118 : "+m" (*(ptr)) \
119 : \
120 : "cc"); \
121 } else if (sizeof(*(ptr)) == 4) { \
122 asm volatile("lock incl %0\n" \
123 : "+m" (*(ptr)) \
124 : \
125 : "cc"); \
126 } else if (sizeof(*(ptr)) == 2) { \
127 asm volatile("lock incw %0\n" \
128 : "+m" (*(ptr)) \
129 : \
130 : "cc"); \
131 } else if (sizeof(*(ptr)) == 1) { \
132 asm volatile("lock incb %0\n" \
133 : "+m" (*(ptr)) \
134 : \
135 : "cc"); \
136 } else { \
137 void __unsupported_argument_size_for_pl_inc_noret__(char *,int); \
138 __unsupported_argument_size_for_pl_inc_noret__(__FILE__,__LINE__); \
139 } \
140})
141
142/* decrement integer value pointed to by pointer <ptr>, no return */
143#define pl_dec_noret(ptr) ({ \
144 if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
145 asm volatile("lock decq %0\n" \
146 : "+m" (*(ptr)) \
147 : \
148 : "cc"); \
149 } else if (sizeof(*(ptr)) == 4) { \
150 asm volatile("lock decl %0\n" \
151 : "+m" (*(ptr)) \
152 : \
153 : "cc"); \
154 } else if (sizeof(*(ptr)) == 2) { \
155 asm volatile("lock decw %0\n" \
156 : "+m" (*(ptr)) \
157 : \
158 : "cc"); \
159 } else if (sizeof(*(ptr)) == 1) { \
160 asm volatile("lock decb %0\n" \
161 : "+m" (*(ptr)) \
162 : \
163 : "cc"); \
164 } else { \
165 void __unsupported_argument_size_for_pl_dec_noret__(char *,int); \
166 __unsupported_argument_size_for_pl_dec_noret__(__FILE__,__LINE__); \
167 } \
168})
169
170/* add integer constant <x> to integer value pointed to by pointer <ptr>,
171 * no return. Size of <x> is not checked.
172 */
173#define pl_add(ptr, x) ({ \
174 if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
175 asm volatile("lock addq %1, %0\n" \
176 : "+m" (*(ptr)) \
177 : "er" ((unsigned long)(x)) \
178 : "cc"); \
179 } else if (sizeof(*(ptr)) == 4) { \
180 asm volatile("lock addl %1, %0\n" \
181 : "+m" (*(ptr)) \
182 : "er" ((unsigned int)(x)) \
183 : "cc"); \
184 } else if (sizeof(*(ptr)) == 2) { \
185 asm volatile("lock addw %1, %0\n" \
186 : "+m" (*(ptr)) \
187 : "er" ((unsigned short)(x)) \
188 : "cc"); \
189 } else if (sizeof(*(ptr)) == 1) { \
190 asm volatile("lock addb %1, %0\n" \
191 : "+m" (*(ptr)) \
192 : "er" ((unsigned char)(x)) \
193 : "cc"); \
194 } else { \
195 void __unsupported_argument_size_for_pl_add__(char *,int); \
196 __unsupported_argument_size_for_pl_add__(__FILE__,__LINE__); \
197 } \
198})
199
200/* subtract integer constant <x> from integer value pointed to by pointer
201 * <ptr>, no return. Size of <x> is not checked.
202 */
203#define pl_sub(ptr, x) ({ \
204 if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
205 asm volatile("lock subq %1, %0\n" \
206 : "+m" (*(ptr)) \
207 : "er" ((unsigned long)(x)) \
208 : "cc"); \
209 } else if (sizeof(*(ptr)) == 4) { \
210 asm volatile("lock subl %1, %0\n" \
211 : "+m" (*(ptr)) \
212 : "er" ((unsigned int)(x)) \
213 : "cc"); \
214 } else if (sizeof(*(ptr)) == 2) { \
215 asm volatile("lock subw %1, %0\n" \
216 : "+m" (*(ptr)) \
217 : "er" ((unsigned short)(x)) \
218 : "cc"); \
219 } else if (sizeof(*(ptr)) == 1) { \
220 asm volatile("lock subb %1, %0\n" \
221 : "+m" (*(ptr)) \
222 : "er" ((unsigned char)(x)) \
223 : "cc"); \
224 } else { \
225 void __unsupported_argument_size_for_pl_sub__(char *,int); \
226 __unsupported_argument_size_for_pl_sub__(__FILE__,__LINE__); \
227 } \
228})
229
230/* binary and integer value pointed to by pointer <ptr> with constant <x>, no
231 * return. Size of <x> is not checked.
232 */
233#define pl_and(ptr, x) ({ \
234 if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
235 asm volatile("lock andq %1, %0\n" \
236 : "+m" (*(ptr)) \
237 : "er" ((unsigned long)(x)) \
238 : "cc"); \
239 } else if (sizeof(*(ptr)) == 4) { \
240 asm volatile("lock andl %1, %0\n" \
241 : "+m" (*(ptr)) \
242 : "er" ((unsigned int)(x)) \
243 : "cc"); \
244 } else if (sizeof(*(ptr)) == 2) { \
245 asm volatile("lock andw %1, %0\n" \
246 : "+m" (*(ptr)) \
247 : "er" ((unsigned short)(x)) \
248 : "cc"); \
249 } else if (sizeof(*(ptr)) == 1) { \
250 asm volatile("lock andb %1, %0\n" \
251 : "+m" (*(ptr)) \
252 : "er" ((unsigned char)(x)) \
253 : "cc"); \
254 } else { \
255 void __unsupported_argument_size_for_pl_and__(char *,int); \
256 __unsupported_argument_size_for_pl_and__(__FILE__,__LINE__); \
257 } \
258})
259
260/* binary or integer value pointed to by pointer <ptr> with constant <x>, no
261 * return. Size of <x> is not checked.
262 */
263#define pl_or(ptr, x) ({ \
264 if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
265 asm volatile("lock orq %1, %0\n" \
266 : "+m" (*(ptr)) \
267 : "er" ((unsigned long)(x)) \
268 : "cc"); \
269 } else if (sizeof(*(ptr)) == 4) { \
270 asm volatile("lock orl %1, %0\n" \
271 : "+m" (*(ptr)) \
272 : "er" ((unsigned int)(x)) \
273 : "cc"); \
274 } else if (sizeof(*(ptr)) == 2) { \
275 asm volatile("lock orw %1, %0\n" \
276 : "+m" (*(ptr)) \
277 : "er" ((unsigned short)(x)) \
278 : "cc"); \
279 } else if (sizeof(*(ptr)) == 1) { \
280 asm volatile("lock orb %1, %0\n" \
281 : "+m" (*(ptr)) \
282 : "er" ((unsigned char)(x)) \
283 : "cc"); \
284 } else { \
285 void __unsupported_argument_size_for_pl_or__(char *,int); \
286 __unsupported_argument_size_for_pl_or__(__FILE__,__LINE__); \
287 } \
288})
289
290/* binary xor integer value pointed to by pointer <ptr> with constant <x>, no
291 * return. Size of <x> is not checked.
292 */
293#define pl_xor(ptr, x) ({ \
294 if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
295 asm volatile("lock xorq %1, %0\n" \
296 : "+m" (*(ptr)) \
297 : "er" ((unsigned long)(x)) \
298 : "cc"); \
299 } else if (sizeof(*(ptr)) == 4) { \
300 asm volatile("lock xorl %1, %0\n" \
301 : "+m" (*(ptr)) \
302 : "er" ((unsigned int)(x)) \
303 : "cc"); \
304 } else if (sizeof(*(ptr)) == 2) { \
305 asm volatile("lock xorw %1, %0\n" \
306 : "+m" (*(ptr)) \
307 : "er" ((unsigned short)(x)) \
308 : "cc"); \
309 } else if (sizeof(*(ptr)) == 1) { \
310 asm volatile("lock xorb %1, %0\n" \
311 : "+m" (*(ptr)) \
312 : "er" ((unsigned char)(x)) \
313 : "cc"); \
314 } else { \
315 void __unsupported_argument_size_for_pl_xor__(char *,int); \
316 __unsupported_argument_size_for_pl_xor__(__FILE__,__LINE__); \
317 } \
318})
319
320/* test and set bit <bit> in integer value pointed to by pointer <ptr>. Returns
321 * 0 if the bit was not set, or ~0 of the same type as *ptr if it was set. Note
322 * that there is no 8-bit equivalent operation.
323 */
324#define pl_bts(ptr, bit) ( \
325 (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
326 unsigned long ret; \
327 asm volatile("lock btsq %2, %0\n\t" \
328 "sbb %1, %1\n\t" \
329 : "+m" (*(ptr)), "=r" (ret) \
330 : "Ir" ((unsigned long)(bit)) \
331 : "cc"); \
332 ret; /* return value */ \
333 }) : (sizeof(*(ptr)) == 4) ? ({ \
334 unsigned int ret; \
335 asm volatile("lock btsl %2, %0\n\t" \
336 "sbb %1, %1\n\t" \
337 : "+m" (*(ptr)), "=r" (ret) \
338 : "Ir" ((unsigned int)(bit)) \
339 : "cc"); \
340 ret; /* return value */ \
341 }) : (sizeof(*(ptr)) == 2) ? ({ \
342 unsigned short ret; \
343 asm volatile("lock btsw %2, %0\n\t" \
344 "sbb %1, %1\n\t" \
345 : "+m" (*(ptr)), "=r" (ret) \
346 : "Ir" ((unsigned short)(bit)) \
347 : "cc"); \
348 ret; /* return value */ \
349 }) : ({ \
350 void __unsupported_argument_size_for_pl_bts__(char *,int); \
351 __unsupported_argument_size_for_pl_bts__(__FILE__,__LINE__); \
352 0; \
353 }) \
354)
355
356/* Note: for an unclear reason, gcc's __sync_fetch_and_add() implementation
357 * produces less optimal than hand-crafted asm code so let's implement here the
358 * operations we need for the most common archs.
359 */
360
361/* fetch-and-add: fetch integer value pointed to by pointer <ptr>, add <x> to
362 * to <*ptr> and return the previous value.
363 */
364#define pl_xadd(ptr, x) ( \
365 (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
366 unsigned long ret = (unsigned long)(x); \
367 asm volatile("lock xaddq %0, %1\n" \
368 : "=r" (ret), "+m" (*(ptr)) \
369 : "0" (ret) \
370 : "cc"); \
371 ret; /* return value */ \
372 }) : (sizeof(*(ptr)) == 4) ? ({ \
373 unsigned int ret = (unsigned int)(x); \
374 asm volatile("lock xaddl %0, %1\n" \
375 : "=r" (ret), "+m" (*(ptr)) \
376 : "0" (ret) \
377 : "cc"); \
378 ret; /* return value */ \
379 }) : (sizeof(*(ptr)) == 2) ? ({ \
380 unsigned short ret = (unsigned short)(x); \
381 asm volatile("lock xaddw %0, %1\n" \
382 : "=r" (ret), "+m" (*(ptr)) \
383 : "0" (ret) \
384 : "cc"); \
385 ret; /* return value */ \
386 }) : (sizeof(*(ptr)) == 1) ? ({ \
387 unsigned char ret = (unsigned char)(x); \
388 asm volatile("lock xaddb %0, %1\n" \
389 : "=r" (ret), "+m" (*(ptr)) \
390 : "0" (ret) \
391 : "cc"); \
392 ret; /* return value */ \
393 }) : ({ \
394 void __unsupported_argument_size_for_pl_xadd__(char *,int); \
395 __unsupported_argument_size_for_pl_xadd__(__FILE__,__LINE__); \
396 0; \
397 }) \
398)
399
400/* exchage value <x> with integer value pointed to by pointer <ptr>, and return
401 * previous <*ptr> value. <x> must be of the same size as <*ptr>.
402 */
403#define pl_xchg(ptr, x) ( \
404 (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
405 unsigned long ret = (unsigned long)(x); \
406 asm volatile("xchgq %0, %1\n" \
407 : "=r" (ret), "+m" (*(ptr)) \
408 : "0" (ret) \
409 : "cc"); \
410 ret; /* return value */ \
411 }) : (sizeof(*(ptr)) == 4) ? ({ \
412 unsigned int ret = (unsigned int)(x); \
413 asm volatile("xchgl %0, %1\n" \
414 : "=r" (ret), "+m" (*(ptr)) \
415 : "0" (ret) \
416 : "cc"); \
417 ret; /* return value */ \
418 }) : (sizeof(*(ptr)) == 2) ? ({ \
419 unsigned short ret = (unsigned short)(x); \
420 asm volatile("xchgw %0, %1\n" \
421 : "=r" (ret), "+m" (*(ptr)) \
422 : "0" (ret) \
423 : "cc"); \
424 ret; /* return value */ \
425 }) : (sizeof(*(ptr)) == 1) ? ({ \
426 unsigned char ret = (unsigned char)(x); \
427 asm volatile("xchgb %0, %1\n" \
428 : "=r" (ret), "+m" (*(ptr)) \
429 : "0" (ret) \
430 : "cc"); \
431 ret; /* return value */ \
432 }) : ({ \
433 void __unsupported_argument_size_for_pl_xchg__(char *,int); \
434 __unsupported_argument_size_for_pl_xchg__(__FILE__,__LINE__); \
435 0; \
436 }) \
437)
438
439/* compare integer value <*ptr> with <old> and exchange it with <new> if
440 * it matches, and return <old>. <old> and <new> must be of the same size as
441 * <*ptr>.
442 */
443#define pl_cmpxchg(ptr, old, new) ( \
444 (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
445 unsigned long ret; \
446 asm volatile("lock cmpxchgq %2,%1" \
447 : "=a" (ret), "+m" (*(ptr)) \
448 : "r" ((unsigned long)(new)), \
449 "0" ((unsigned long)(old)) \
450 : "cc"); \
451 ret; /* return value */ \
452 }) : (sizeof(*(ptr)) == 4) ? ({ \
453 unsigned int ret; \
454 asm volatile("lock cmpxchgl %2,%1" \
455 : "=a" (ret), "+m" (*(ptr)) \
456 : "r" ((unsigned int)(new)), \
457 "0" ((unsigned int)(old)) \
458 : "cc"); \
459 ret; /* return value */ \
460 }) : (sizeof(*(ptr)) == 2) ? ({ \
461 unsigned short ret; \
462 asm volatile("lock cmpxchgw %2,%1" \
463 : "=a" (ret), "+m" (*(ptr)) \
464 : "r" ((unsigned short)(new)), \
465 "0" ((unsigned short)(old)) \
466 : "cc"); \
467 ret; /* return value */ \
468 }) : (sizeof(*(ptr)) == 1) ? ({ \
469 unsigned char ret; \
470 asm volatile("lock cmpxchgb %2,%1" \
471 : "=a" (ret), "+m" (*(ptr)) \
472 : "r" ((unsigned char)(new)), \
473 "0" ((unsigned char)(old)) \
474 : "cc"); \
475 ret; /* return value */ \
476 }) : ({ \
477 void __unsupported_argument_size_for_pl_cmpxchg__(char *,int); \
478 __unsupported_argument_size_for_pl_cmpxchg__(__FILE__,__LINE__); \
479 0; \
480 }) \
481)
482
483#else
484/* generic implementations */
485
486static inline void pl_cpu_relax()
487{
488 asm volatile("");
489}
490
491#define pl_inc_noret(ptr) ({ __sync_add_and_fetch((ptr), 1); })
492#define pl_dec_noret(ptr) ({ __sync_sub_and_fetch((ptr), 1); })
493#define pl_inc(ptr) ({ __sync_add_and_fetch((ptr), 1); })
494#define pl_dec(ptr) ({ __sync_sub_and_fetch((ptr), 1); })
495#define pl_add(ptr, x) ({ __sync_add_and_fetch((ptr), (x)); })
496#define pl_and(ptr, x) ({ __sync_and_and_fetch((ptr), (x)); })
497#define pl_or(ptr, x) ({ __sync_or_and_fetch((ptr), (x)); })
498#define pl_xor(ptr, x) ({ __sync_xor_and_fetch((ptr), (x)); })
499#define pl_sub(ptr, x) ({ __sync_sub_and_fetch((ptr), (x)); })
500#define pl_xadd(ptr, x) ({ __sync_fetch_and_add((ptr), (x)); })
501#define pl_cmpxchg(ptr, o, n) ({ __sync_val_compare_and_swap((ptr), (o), (n)); })
Willy Tarreau98409e32017-07-18 14:20:41 +0200502#define pl_xchg(ptr, x) ({ typeof(*(ptr)) __pl_t; \
503 do { __pl_t = *(ptr); \
504 } while (!__sync_bool_compare_and_swap((ptr), __pl_t, (x))); \
505 __pl_t; \
Emeric Brun7122ab32017-07-07 10:26:46 +0200506 })
507
508#endif
509
510#endif /* PL_ATOMIC_OPS_H */