1 /**
2  * Many similar implementations exist. See for example libwsbm
3  * or the linux kernel include/atomic.h
4  *
5  * No copyright claimed on this file.
6  *
7  */
8 
9 #include "no_extern_c.h"
10 
11 #ifndef U_ATOMIC_H
12 #define U_ATOMIC_H
13 
14 #include <stdbool.h>
15 #include <stdint.h>
16 
17 /* Favor OS-provided implementations.
18  *
19  * Where no OS-provided implementation is available, fall back to
20  * locally coded assembly, compiler intrinsic or ultimately a
21  * mutex-based implementation.
22  */
23 #if defined(__sun)
24 #define PIPE_ATOMIC_OS_SOLARIS
25 #elif defined(_MSC_VER)
26 #define PIPE_ATOMIC_MSVC_INTRINSIC
27 #elif defined(__GNUC__)
28 #define PIPE_ATOMIC_GCC_INTRINSIC
29 #else
30 #error "Unsupported platform"
31 #endif
32 
33 
34 /* Implementation using GCC-provided synchronization intrinsics
35  */
36 #if defined(PIPE_ATOMIC_GCC_INTRINSIC)
37 
38 #define PIPE_ATOMIC "GCC Sync Intrinsics"
39 
40 #if defined(USE_GCC_ATOMIC_BUILTINS)
41 
42 /* The builtins with explicit memory model are available since GCC 4.7. */
43 #define p_atomic_set(_v, _i) __atomic_store_n((_v), (_i), __ATOMIC_RELEASE)
44 #define p_atomic_read(_v) __atomic_load_n((_v), __ATOMIC_ACQUIRE)
45 #define p_atomic_read_relaxed(_v) __atomic_load_n((_v), __ATOMIC_RELAXED)
46 #define p_atomic_dec_zero(v) (__atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL) == 0)
47 #define p_atomic_inc(v) (void) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
48 #define p_atomic_dec(v) (void) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
49 #define p_atomic_add(v, i) (void) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
50 #define p_atomic_inc_return(v) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
51 #define p_atomic_dec_return(v) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
52 #define p_atomic_add_return(v, i) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
53 #define p_atomic_fetch_add(v, i) __atomic_fetch_add((v), (i), __ATOMIC_ACQ_REL)
54 #define p_atomic_xchg(v, i) __atomic_exchange_n((v), (i), __ATOMIC_ACQ_REL)
55 #define PIPE_NATIVE_ATOMIC_XCHG
56 
57 #else
58 
59 #define p_atomic_set(_v, _i) (*(_v) = (_i))
60 #define p_atomic_read(_v) (*(_v))
61 #define p_atomic_read_relaxed(_v) (*(_v))
62 #define p_atomic_dec_zero(v) (__sync_sub_and_fetch((v), 1) == 0)
63 #define p_atomic_inc(v) (void) __sync_add_and_fetch((v), 1)
64 #define p_atomic_dec(v) (void) __sync_sub_and_fetch((v), 1)
65 #define p_atomic_add(v, i) (void) __sync_add_and_fetch((v), (i))
66 #define p_atomic_inc_return(v) __sync_add_and_fetch((v), 1)
67 #define p_atomic_dec_return(v) __sync_sub_and_fetch((v), 1)
68 #define p_atomic_add_return(v, i) __sync_add_and_fetch((v), (i))
69 #define p_atomic_fetch_add(v, i) __sync_fetch_and_add((v), (i))
70 
71 #endif
72 
73 /* There is no __atomic_* compare and exchange that returns the current value.
74  * Also, GCC 5.4 seems unable to optimize a compound statement expression that
75  * uses an additional stack variable with __atomic_compare_exchange[_n].
76  */
77 #define p_atomic_cmpxchg(v, old, _new) \
78    __sync_val_compare_and_swap((v), (old), (_new))
79 #define p_atomic_cmpxchg_ptr(v, old, _new) p_atomic_cmpxchg(v, old, _new)
80 
81 #endif
82 
83 
84 
85 /* Unlocked version for single threaded environments, such as some
86  * windows kernel modules.
87  */
88 #if defined(PIPE_ATOMIC_OS_UNLOCKED)
89 
90 #define PIPE_ATOMIC "Unlocked"
91 
92 #define p_atomic_set(_v, _i) (*(_v) = (_i))
93 #define p_atomic_read(_v) (*(_v))
94 #define p_atomic_read_relaxed(_v) (*(_v))
95 #define p_atomic_dec_zero(_v) (p_atomic_dec_return(_v) == 0)
96 #define p_atomic_inc(_v) ((void) p_atomic_inc_return(_v))
97 #define p_atomic_dec(_v) ((void) p_atomic_dec_return(_v))
98 #define p_atomic_add(_v, _i) ((void) p_atomic_add_return((_v), (_i)))
99 #define p_atomic_inc_return(_v) (++(*(_v)))
100 #define p_atomic_dec_return(_v) (--(*(_v)))
101 #define p_atomic_add_return(_v, _i) (*(_v) = *(_v) + (_i))
102 #define p_atomic_fetch_add(_v, _i) (*(_v) = *(_v) + (_i), *(_v) - (_i))
103 #define p_atomic_cmpxchg(_v, _old, _new) (*(_v) == (_old) ? (*(_v) = (_new), (_old)) : *(_v))
104 #define p_atomic_cmpxchg_ptr(_v, _old, _new) p_atomic_cmpxchg(_v, _old, _new)
105 
106 #endif
107 
108 
109 #if defined(PIPE_ATOMIC_MSVC_INTRINSIC)
110 
111 #define PIPE_ATOMIC "MSVC Intrinsics"
112 
113 /* We use the Windows header's Interlocked*64 functions instead of the
114  * _Interlocked*64 intrinsics wherever we can, as support for the latter varies
115  * with target CPU, whereas Windows headers take care of all portability
116  * issues: using intrinsics where available, falling back to library
117  * implementations where not.
118  */
119 #include <intrin.h>
120 #include <assert.h>
121 
_interlockedadd8(char volatile * _Addend,char _Value)122 __forceinline char _interlockedadd8(char volatile * _Addend, char _Value)
123 {
124    return _InterlockedExchangeAdd8(_Addend, _Value) + _Value;
125 }
126 
_interlockedadd16(short volatile * _Addend,short _Value)127 __forceinline short _interlockedadd16(short volatile * _Addend, short _Value)
128 {
129    return _InterlockedExchangeAdd16(_Addend, _Value) + _Value;
130 }
131 
132 /* MSVC supports decltype keyword, but it's only supported on C++ and doesn't
133  * quite work here; and if a C++-only solution is worthwhile, then it would be
134  * better to use templates / function overloading, instead of decltype magic.
135  * Therefore, we rely on implicit casting to LONGLONG for the functions that return
136  */
137 
138 #define p_atomic_set(_v, _i) (*(_v) = (_i))
139 #define p_atomic_read(_v) (*(_v))
140 #define p_atomic_read_relaxed(_v) (*(_v))
141 
142 #define p_atomic_dec_zero(_v) \
143    (p_atomic_dec_return(_v) == 0)
144 
145 #define p_atomic_inc(_v) \
146    ((void) p_atomic_inc_return(_v))
147 
148 #define p_atomic_inc_return(_v) (\
149    sizeof *(_v) == sizeof(char)    ? p_atomic_add_return((_v), 1) : \
150    sizeof *(_v) == sizeof(short)   ? _InterlockedIncrement16((short *)  (_v)) : \
151    sizeof *(_v) == sizeof(long)    ? _InterlockedIncrement  ((long *)   (_v)) : \
152    sizeof *(_v) == sizeof(__int64) ? _interlockedincrement64((__int64 *)(_v)) : \
153                                      (assert(!"should not get here"), 0))
154 
155 #define p_atomic_dec(_v) \
156    ((void) p_atomic_dec_return(_v))
157 
158 #define p_atomic_dec_return(_v) (\
159    sizeof *(_v) == sizeof(char)    ? p_atomic_add_return((_v), -1) : \
160    sizeof *(_v) == sizeof(short)   ? _InterlockedDecrement16((short *)  (_v)) : \
161    sizeof *(_v) == sizeof(long)    ? _InterlockedDecrement  ((long *)   (_v)) : \
162    sizeof *(_v) == sizeof(__int64) ? _interlockeddecrement64((__int64 *)(_v)) : \
163                                      (assert(!"should not get here"), 0))
164 
165 #define p_atomic_add(_v, _i) \
166    ((void) p_atomic_fetch_add((_v), (_i)))
167 
168 #define p_atomic_add_return(_v, _i) (\
169    sizeof *(_v) == sizeof(char)    ? _interlockedadd8 ((char *)   (_v), (_i)) : \
170    sizeof *(_v) == sizeof(short)   ? _interlockedadd16((short *)  (_v), (_i)) : \
171    sizeof *(_v) == sizeof(long)    ? _interlockedadd  ((long *)   (_v), (_i)) : \
172    sizeof *(_v) == sizeof(__int64) ? _interlockedadd64((__int64 *)(_v), (_i)) : \
173                                      (assert(!"should not get here"), 0))
174 
175 #define p_atomic_fetch_add(_v, _i) (\
176    sizeof *(_v) == sizeof(char)    ? _InterlockedExchangeAdd8 ((char *)   (_v), (_i)) : \
177    sizeof *(_v) == sizeof(short)   ? _InterlockedExchangeAdd16((short *)  (_v), (_i)) : \
178    sizeof *(_v) == sizeof(long)    ? _InterlockedExchangeAdd  ((long *)   (_v), (_i)) : \
179    sizeof *(_v) == sizeof(__int64) ? _interlockedexchangeadd64((__int64 *)(_v), (_i)) : \
180                                      (assert(!"should not get here"), 0))
181 
182 #define p_atomic_cmpxchg(_v, _old, _new) (\
183    sizeof *(_v) == sizeof(char)    ? _InterlockedCompareExchange8 ((char *)   (_v), (char)   (_new), (char)   (_old)) : \
184    sizeof *(_v) == sizeof(short)   ? _InterlockedCompareExchange16((short *)  (_v), (short)  (_new), (short)  (_old)) : \
185    sizeof *(_v) == sizeof(long)    ? _InterlockedCompareExchange  ((long *)   (_v), (long)   (_new), (long)   (_old)) : \
186    sizeof *(_v) == sizeof(__int64) ? _InterlockedCompareExchange64((__int64 *)(_v), (__int64)(_new), (__int64)(_old)) : \
187                                      (assert(!"should not get here"), 0))
188 
189 #if defined(_WIN64)
190 #define p_atomic_cmpxchg_ptr(_v, _old, _new) (void *)_InterlockedCompareExchange64((__int64 *)(_v), (__int64)(_new), (__int64)(_old))
191 #else
192 #define p_atomic_cmpxchg_ptr(_v, _old, _new) (void *)_InterlockedCompareExchange((long *)(_v), (long)(_new), (long)(_old))
193 #endif
194 
195 #define PIPE_NATIVE_ATOMIC_XCHG
196 #define p_atomic_xchg(_v, _new) (\
197    sizeof *(_v) == sizeof(char)    ? _InterlockedExchange8 ((char *)   (_v), (char)   (_new)) : \
198    sizeof *(_v) == sizeof(short)   ? _InterlockedExchange16((short *)  (_v), (short)  (_new)) : \
199    sizeof *(_v) == sizeof(long)    ? _InterlockedExchange  ((long *)   (_v), (long)   (_new)) : \
200    sizeof *(_v) == sizeof(__int64) ? _interlockedexchange64((__int64 *)(_v), (__int64)(_new)) : \
201                                      (assert(!"should not get here"), 0))
202 
203 #endif
204 
205 #if defined(PIPE_ATOMIC_OS_SOLARIS)
206 
207 #define PIPE_ATOMIC "Solaris OS atomic functions"
208 
209 #include <atomic.h>
210 #include <assert.h>
211 
212 #define p_atomic_set(_v, _i) (*(_v) = (_i))
213 #define p_atomic_read(_v) (*(_v))
214 
215 #define p_atomic_dec_zero(v) (\
216    sizeof(*v) == sizeof(uint8_t)  ? atomic_dec_8_nv ((uint8_t  *)(v)) == 0 : \
217    sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) == 0 : \
218    sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) == 0 : \
219    sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) == 0 : \
220                                     (assert(!"should not get here"), 0))
221 
222 #define p_atomic_inc(v) (void) (\
223    sizeof(*v) == sizeof(uint8_t)  ? atomic_inc_8 ((uint8_t  *)(v)) : \
224    sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16((uint16_t *)(v)) : \
225    sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32((uint32_t *)(v)) : \
226    sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64((uint64_t *)(v)) : \
227                                     (assert(!"should not get here"), 0))
228 
229 #define p_atomic_inc_return(v) (__typeof(*v))( \
230    sizeof(*v) == sizeof(uint8_t)  ? atomic_inc_8_nv ((uint8_t  *)(v)) : \
231    sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16_nv((uint16_t *)(v)) : \
232    sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32_nv((uint32_t *)(v)) : \
233    sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64_nv((uint64_t *)(v)) : \
234                                     (assert(!"should not get here"), 0))
235 
236 #define p_atomic_dec(v) (void) ( \
237    sizeof(*v) == sizeof(uint8_t)  ? atomic_dec_8 ((uint8_t  *)(v)) : \
238    sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16((uint16_t *)(v)) : \
239    sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32((uint32_t *)(v)) : \
240    sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64((uint64_t *)(v)) : \
241                                     (assert(!"should not get here"), 0))
242 
243 #define p_atomic_dec_return(v) (__typeof(*v))( \
244    sizeof(*v) == sizeof(uint8_t)  ? atomic_dec_8_nv ((uint8_t  *)(v)) : \
245    sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) : \
246    sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) : \
247    sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) : \
248                                     (assert(!"should not get here"), 0))
249 
250 #define p_atomic_add(v, i) (void) ( \
251    sizeof(*v) == sizeof(uint8_t)  ? atomic_add_8 ((uint8_t  *)(v), (i)) : \
252    sizeof(*v) == sizeof(uint16_t) ? atomic_add_16((uint16_t *)(v), (i)) : \
253    sizeof(*v) == sizeof(uint32_t) ? atomic_add_32((uint32_t *)(v), (i)) : \
254    sizeof(*v) == sizeof(uint64_t) ? atomic_add_64((uint64_t *)(v), (i)) : \
255                                     (assert(!"should not get here"), 0))
256 
257 #define p_atomic_add_return(v, i) (__typeof(*v)) ( \
258    sizeof(*v) == sizeof(uint8_t)  ? atomic_add_8_nv ((uint8_t  *)(v), (i)) : \
259    sizeof(*v) == sizeof(uint16_t) ? atomic_add_16_nv((uint16_t *)(v), (i)) : \
260    sizeof(*v) == sizeof(uint32_t) ? atomic_add_32_nv((uint32_t *)(v), (i)) : \
261    sizeof(*v) == sizeof(uint64_t) ? atomic_add_64_nv((uint64_t *)(v), (i)) : \
262                                     (assert(!"should not get here"), 0))
263 
264 #define p_atomic_fetch_add(v, i) (__typeof(*v)) ( \
265    sizeof(*v) == sizeof(uint8_t)  ? atomic_add_8_nv ((uint8_t  *)(v), (i)) - (i) : \
266    sizeof(*v) == sizeof(uint16_t) ? atomic_add_16_nv((uint16_t *)(v), (i)) - (i) : \
267    sizeof(*v) == sizeof(uint32_t) ? atomic_add_32_nv((uint32_t *)(v), (i)) - (i) : \
268    sizeof(*v) == sizeof(uint64_t) ? atomic_add_64_nv((uint64_t *)(v), (i)) - (i) : \
269                                     (assert(!"should not get here"), 0))
270 
271 #define p_atomic_cmpxchg(v, old, _new) (__typeof(*v))( \
272    sizeof(*v) == sizeof(uint8_t)  ? atomic_cas_8 ((uint8_t  *)(v), (uint8_t )(old), (uint8_t )(_new)) : \
273    sizeof(*v) == sizeof(uint16_t) ? atomic_cas_16((uint16_t *)(v), (uint16_t)(old), (uint16_t)(_new)) : \
274    sizeof(*v) == sizeof(uint32_t) ? atomic_cas_32((uint32_t *)(v), (uint32_t)(old), (uint32_t)(_new)) : \
275    sizeof(*v) == sizeof(uint64_t) ? atomic_cas_64((uint64_t *)(v), (uint64_t)(old), (uint64_t)(_new)) : \
276                                     (assert(!"should not get here"), 0))
277 
278 #if INTPTR_MAX == INT32_MAX
279 #define p_atomic_cmpxchg_ptr(v, old, _new) (__typeof(*v))(atomic_cas_32((uint32_t *)(v), (uint32_t)(old), (uint32_t)(_new)))
280 #else
281 #define p_atomic_cmpxchg_ptr(v, old, _new) (__typeof(*v))(atomic_cas_64((uint64_t *)(v), (uint64_t)(old), (uint64_t)(_new)))
282 #endif
283 
284 #endif
285 
286 #ifndef PIPE_ATOMIC
287 #error "No pipe_atomic implementation selected"
288 #endif
289 
290 #ifndef PIPE_NATIVE_ATOMIC_XCHG
p_atomic_xchg_8(uint8_t * v,uint8_t i)291 static inline uint8_t p_atomic_xchg_8(uint8_t *v, uint8_t i)
292 {
293    uint8_t actual = p_atomic_read(v);
294    uint8_t expected;
295    do {
296       expected = actual;
297       actual = p_atomic_cmpxchg(v, expected, i);
298    } while (expected != actual);
299    return actual;
300 }
301 
p_atomic_xchg_16(uint16_t * v,uint16_t i)302 static inline uint16_t p_atomic_xchg_16(uint16_t *v, uint16_t i)
303 {
304    uint16_t actual = p_atomic_read(v);
305    uint16_t expected;
306    do {
307       expected = actual;
308       actual = p_atomic_cmpxchg(v, expected, i);
309    } while (expected != actual);
310    return actual;
311 }
312 
p_atomic_xchg_32(uint32_t * v,uint32_t i)313 static inline uint32_t p_atomic_xchg_32(uint32_t *v, uint32_t i)
314 {
315    uint32_t actual = p_atomic_read(v);
316    uint32_t expected;
317    do {
318       expected = actual;
319       actual = p_atomic_cmpxchg(v, expected, i);
320    } while (expected != actual);
321    return actual;
322 }
323 
p_atomic_xchg_64(uint64_t * v,uint64_t i)324 static inline uint64_t p_atomic_xchg_64(uint64_t *v, uint64_t i)
325 {
326    uint64_t actual = p_atomic_read(v);
327    uint64_t expected;
328    do {
329       expected = actual;
330       actual = p_atomic_cmpxchg(v, expected, i);
331    } while (expected != actual);
332    return actual;
333 }
334 
335 #define p_atomic_xchg(v, i) (__typeof(*(v)))( \
336    sizeof(*(v)) == sizeof(uint8_t)  ? p_atomic_xchg_8 ((uint8_t  *)(v), (uint8_t )(i)) : \
337    sizeof(*(v)) == sizeof(uint16_t) ? p_atomic_xchg_16((uint16_t *)(v), (uint16_t)(i)) : \
338    sizeof(*(v)) == sizeof(uint32_t) ? p_atomic_xchg_32((uint32_t *)(v), (uint32_t)(i)) : \
339    sizeof(*(v)) == sizeof(uint64_t) ? p_atomic_xchg_64((uint64_t *)(v), (uint64_t)(i)) : \
340                                       (assert(!"should not get here"), 0))
341 #endif
342 
343 /* On x86 we can have sizeof(uint64_t) = 8 and _Alignof(uint64_t) = 4. causing split locks. The
344  * implementation does handle that correctly, but with an internal mutex. Extend the alignment to
345  * avoid this.
346  */
347 #if  __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) && defined(USE_GCC_ATOMIC_BUILTINS)
348 typedef int64_t __attribute__((aligned(_Alignof(_Atomic(int64_t))))) p_atomic_int64_t;
349 typedef uint64_t __attribute__((aligned(_Alignof(_Atomic(uint64_t))))) p_atomic_uint64_t;
350 #else
351 typedef int64_t p_atomic_int64_t;
352 typedef uint64_t p_atomic_uint64_t;
353 #endif
354 
355 #endif /* U_ATOMIC_H */
356