1 //===-- atomic.c - Implement support functions for atomic operations.------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // atomic.c defines a set of functions for performing atomic accesses on
10 // arbitrary-sized memory locations. This design uses locks that should
11 // be fast in the uncontended case, for two reasons:
12 //
13 // 1) This code must work with C programs that do not link to anything
14 // (including pthreads) and so it should not depend on any pthread
15 // functions.
16 // 2) Atomic operations, rather than explicit mutexes, are most commonly used
17 // on code where contended operations are rate.
18 //
19 // To avoid needing a per-object lock, this code allocates an array of
20 // locks and hashes the object pointers to find the one that it should use.
21 // For operations that must be atomic on two locations, the lower lock is
22 // always acquired first, to avoid deadlock.
23 //
24 //===----------------------------------------------------------------------===//
25
26 #include <stdbool.h>
27 #include <stdint.h>
28 #include <string.h>
29
30 #include "assembly.h"
31
32 // Clang objects if you redefine a builtin. This little hack allows us to
33 // define a function with the same name as an intrinsic.
34 #pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
35 #pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
36 #pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
37 #pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME( \
38 __atomic_compare_exchange)
39
40 /// Number of locks. This allocates one page on 32-bit platforms, two on
41 /// 64-bit. This can be specified externally if a different trade between
42 /// memory usage and contention probability is required for a given platform.
43 #ifndef SPINLOCK_COUNT
44 #define SPINLOCK_COUNT (1 << 10)
45 #endif
46 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
47
48 ////////////////////////////////////////////////////////////////////////////////
49 // Platform-specific lock implementation. Falls back to spinlocks if none is
50 // defined. Each platform should define the Lock type, and corresponding
51 // lock() and unlock() functions.
52 ////////////////////////////////////////////////////////////////////////////////
53 #ifdef __FreeBSD__
54 #include <errno.h>
55 // clang-format off
56 #include <sys/types.h>
57 #include <machine/atomic.h>
58 #include <sys/umtx.h>
59 // clang-format on
60 typedef struct _usem Lock;
unlock(Lock * l)61 __inline static void unlock(Lock *l) {
62 __c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE);
63 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
64 if (l->_has_waiters)
65 _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
66 }
lock(Lock * l)67 __inline static void lock(Lock *l) {
68 uint32_t old = 1;
69 while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count,
70 &old, 0, __ATOMIC_ACQUIRE,
71 __ATOMIC_RELAXED)) {
72 _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
73 old = 1;
74 }
75 }
76 /// locks for atomic operations
77 static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}};
78
79 #elif defined(__APPLE__)
80 #include <libkern/OSAtomic.h>
81 typedef OSSpinLock Lock;
unlock(Lock * l)82 __inline static void unlock(Lock *l) { OSSpinLockUnlock(l); }
83 /// Locks a lock. In the current implementation, this is potentially
84 /// unbounded in the contended case.
lock(Lock * l)85 __inline static void lock(Lock *l) { OSSpinLockLock(l); }
86 static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
87
88 #else
89 typedef _Atomic(uintptr_t) Lock;
90 /// Unlock a lock. This is a release operation.
unlock(Lock * l)91 __inline static void unlock(Lock *l) {
92 __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
93 }
94 /// Locks a lock. In the current implementation, this is potentially
95 /// unbounded in the contended case.
lock(Lock * l)96 __inline static void lock(Lock *l) {
97 uintptr_t old = 0;
98 while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
99 __ATOMIC_RELAXED))
100 old = 0;
101 }
102 /// locks for atomic operations
103 static Lock locks[SPINLOCK_COUNT];
104 #endif
105
106 /// Returns a lock to use for a given pointer.
lock_for_pointer(void * ptr)107 static __inline Lock *lock_for_pointer(void *ptr) {
108 intptr_t hash = (intptr_t)ptr;
109 // Disregard the lowest 4 bits. We want all values that may be part of the
110 // same memory operation to hash to the same value and therefore use the same
111 // lock.
112 hash >>= 4;
113 // Use the next bits as the basis for the hash
114 intptr_t low = hash & SPINLOCK_MASK;
115 // Now use the high(er) set of bits to perturb the hash, so that we don't
116 // get collisions from atomic fields in a single object
117 hash >>= 16;
118 hash ^= low;
119 // Return a pointer to the word to use
120 return locks + (hash & SPINLOCK_MASK);
121 }
122
123 /// Macros for determining whether a size is lock free.
124 #define ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(size, p) \
125 (__atomic_always_lock_free(size, p) || \
126 (__atomic_always_lock_free(size, 0) && ((uintptr_t)p % size) == 0))
127 #define IS_LOCK_FREE_1(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(1, p)
128 #define IS_LOCK_FREE_2(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(2, p)
129 #define IS_LOCK_FREE_4(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(4, p)
130 #define IS_LOCK_FREE_8(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(8, p)
131 #define IS_LOCK_FREE_16(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(16, p)
132
133 /// Macro that calls the compiler-generated lock-free versions of functions
134 /// when they exist.
135 #define TRY_LOCK_FREE_CASE(n, type, ptr) \
136 case n: \
137 if (IS_LOCK_FREE_##n(ptr)) { \
138 LOCK_FREE_ACTION(type); \
139 } \
140 break;
141 #ifdef __SIZEOF_INT128__
142 #define TRY_LOCK_FREE_CASE_16(p) TRY_LOCK_FREE_CASE(16, __uint128_t, p)
143 #else
144 #define TRY_LOCK_FREE_CASE_16(p) /* __uint128_t not available */
145 #endif
146
147 #define LOCK_FREE_CASES(ptr) \
148 do { \
149 switch (size) { \
150 TRY_LOCK_FREE_CASE(1, uint8_t, ptr) \
151 TRY_LOCK_FREE_CASE(2, uint16_t, ptr) \
152 TRY_LOCK_FREE_CASE(4, uint32_t, ptr) \
153 TRY_LOCK_FREE_CASE(8, uint64_t, ptr) \
154 TRY_LOCK_FREE_CASE_16(ptr) /* __uint128_t may not be supported */ \
155 default: \
156 break; \
157 } \
158 } while (0)
159
160 /// An atomic load operation. This is atomic with respect to the source
161 /// pointer only.
__atomic_load_c(int size,void * src,void * dest,int model)162 void __atomic_load_c(int size, void *src, void *dest, int model) {
163 #define LOCK_FREE_ACTION(type) \
164 *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \
165 return;
166 LOCK_FREE_CASES(src);
167 #undef LOCK_FREE_ACTION
168 Lock *l = lock_for_pointer(src);
169 lock(l);
170 memcpy(dest, src, size);
171 unlock(l);
172 }
173
174 /// An atomic store operation. This is atomic with respect to the destination
175 /// pointer only.
__atomic_store_c(int size,void * dest,void * src,int model)176 void __atomic_store_c(int size, void *dest, void *src, int model) {
177 #define LOCK_FREE_ACTION(type) \
178 __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \
179 return;
180 LOCK_FREE_CASES(dest);
181 #undef LOCK_FREE_ACTION
182 Lock *l = lock_for_pointer(dest);
183 lock(l);
184 memcpy(dest, src, size);
185 unlock(l);
186 }
187
188 /// Atomic compare and exchange operation. If the value at *ptr is identical
189 /// to the value at *expected, then this copies value at *desired to *ptr. If
190 /// they are not, then this stores the current value from *ptr in *expected.
191 ///
192 /// This function returns 1 if the exchange takes place or 0 if it fails.
__atomic_compare_exchange_c(int size,void * ptr,void * expected,void * desired,int success,int failure)193 int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
194 void *desired, int success, int failure) {
195 #define LOCK_FREE_ACTION(type) \
196 return __c11_atomic_compare_exchange_strong( \
197 (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \
198 failure)
199 LOCK_FREE_CASES(ptr);
200 #undef LOCK_FREE_ACTION
201 Lock *l = lock_for_pointer(ptr);
202 lock(l);
203 if (memcmp(ptr, expected, size) == 0) {
204 memcpy(ptr, desired, size);
205 unlock(l);
206 return 1;
207 }
208 memcpy(expected, ptr, size);
209 unlock(l);
210 return 0;
211 }
212
213 /// Performs an atomic exchange operation between two pointers. This is atomic
214 /// with respect to the target address.
__atomic_exchange_c(int size,void * ptr,void * val,void * old,int model)215 void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
216 #define LOCK_FREE_ACTION(type) \
217 *(type *)old = \
218 __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \
219 return;
220 LOCK_FREE_CASES(ptr);
221 #undef LOCK_FREE_ACTION
222 Lock *l = lock_for_pointer(ptr);
223 lock(l);
224 memcpy(old, ptr, size);
225 memcpy(ptr, val, size);
226 unlock(l);
227 }
228
229 ////////////////////////////////////////////////////////////////////////////////
230 // Where the size is known at compile time, the compiler may emit calls to
231 // specialised versions of the above functions.
232 ////////////////////////////////////////////////////////////////////////////////
233 #ifdef __SIZEOF_INT128__
234 #define OPTIMISED_CASES \
235 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
236 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
237 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
238 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) \
239 OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
240 #else
241 #define OPTIMISED_CASES \
242 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
243 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
244 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
245 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
246 #endif
247
248 #define OPTIMISED_CASE(n, lockfree, type) \
249 type __atomic_load_##n(type *src, int model) { \
250 if (lockfree(src)) \
251 return __c11_atomic_load((_Atomic(type) *)src, model); \
252 Lock *l = lock_for_pointer(src); \
253 lock(l); \
254 type val = *src; \
255 unlock(l); \
256 return val; \
257 }
258 OPTIMISED_CASES
259 #undef OPTIMISED_CASE
260
261 #define OPTIMISED_CASE(n, lockfree, type) \
262 void __atomic_store_##n(type *dest, type val, int model) { \
263 if (lockfree(dest)) { \
264 __c11_atomic_store((_Atomic(type) *)dest, val, model); \
265 return; \
266 } \
267 Lock *l = lock_for_pointer(dest); \
268 lock(l); \
269 *dest = val; \
270 unlock(l); \
271 return; \
272 }
273 OPTIMISED_CASES
274 #undef OPTIMISED_CASE
275
276 #define OPTIMISED_CASE(n, lockfree, type) \
277 type __atomic_exchange_##n(type *dest, type val, int model) { \
278 if (lockfree(dest)) \
279 return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \
280 Lock *l = lock_for_pointer(dest); \
281 lock(l); \
282 type tmp = *dest; \
283 *dest = val; \
284 unlock(l); \
285 return tmp; \
286 }
287 OPTIMISED_CASES
288 #undef OPTIMISED_CASE
289
290 #define OPTIMISED_CASE(n, lockfree, type) \
291 bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \
292 int success, int failure) { \
293 if (lockfree(ptr)) \
294 return __c11_atomic_compare_exchange_strong( \
295 (_Atomic(type) *)ptr, expected, desired, success, failure); \
296 Lock *l = lock_for_pointer(ptr); \
297 lock(l); \
298 if (*ptr == *expected) { \
299 *ptr = desired; \
300 unlock(l); \
301 return true; \
302 } \
303 *expected = *ptr; \
304 unlock(l); \
305 return false; \
306 }
307 OPTIMISED_CASES
308 #undef OPTIMISED_CASE
309
310 ////////////////////////////////////////////////////////////////////////////////
311 // Atomic read-modify-write operations for integers of various sizes.
312 ////////////////////////////////////////////////////////////////////////////////
313 #define ATOMIC_RMW(n, lockfree, type, opname, op) \
314 type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \
315 if (lockfree(ptr)) \
316 return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \
317 Lock *l = lock_for_pointer(ptr); \
318 lock(l); \
319 type tmp = *ptr; \
320 *ptr = tmp op val; \
321 unlock(l); \
322 return tmp; \
323 }
324
325 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
326 OPTIMISED_CASES
327 #undef OPTIMISED_CASE
328 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
329 OPTIMISED_CASES
330 #undef OPTIMISED_CASE
331 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
332 OPTIMISED_CASES
333 #undef OPTIMISED_CASE
334 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
335 OPTIMISED_CASES
336 #undef OPTIMISED_CASE
337 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
338 OPTIMISED_CASES
339 #undef OPTIMISED_CASE
340