1 //===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_ATOMIC_H_
10 #define SCUDO_ATOMIC_H_
11
12 #include "internal_defs.h"
13
14 namespace scudo {
15
16 enum memory_order {
17 memory_order_relaxed = 0,
18 memory_order_consume = 1,
19 memory_order_acquire = 2,
20 memory_order_release = 3,
21 memory_order_acq_rel = 4,
22 memory_order_seq_cst = 5
23 };
24 static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
25 static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
26 static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
27 static_assert(memory_order_release == __ATOMIC_RELEASE, "");
28 static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
29 static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");
30
31 struct atomic_u8 {
32 typedef u8 Type;
33 volatile Type ValDoNotUse;
34 };
35
36 struct atomic_u16 {
37 typedef u16 Type;
38 volatile Type ValDoNotUse;
39 };
40
41 struct atomic_s32 {
42 typedef s32 Type;
43 volatile Type ValDoNotUse;
44 };
45
46 struct atomic_u32 {
47 typedef u32 Type;
48 volatile Type ValDoNotUse;
49 };
50
51 struct atomic_u64 {
52 typedef u64 Type;
53 // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
54 alignas(8) volatile Type ValDoNotUse;
55 };
56
57 struct atomic_uptr {
58 typedef uptr Type;
59 volatile Type ValDoNotUse;
60 };
61
62 template <typename T>
atomic_load(const volatile T * A,memory_order MO)63 inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
64 DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
65 typename T::Type V;
66 __atomic_load(&A->ValDoNotUse, &V, MO);
67 return V;
68 }
69
70 template <typename T>
atomic_store(volatile T * A,typename T::Type V,memory_order MO)71 inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
72 DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
73 __atomic_store(&A->ValDoNotUse, &V, MO);
74 }
75
atomic_thread_fence(memory_order)76 inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }
77
78 template <typename T>
atomic_fetch_add(volatile T * A,typename T::Type V,memory_order MO)79 inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
80 memory_order MO) {
81 DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
82 return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
83 }
84
85 template <typename T>
atomic_fetch_sub(volatile T * A,typename T::Type V,memory_order MO)86 inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
87 memory_order MO) {
88 DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
89 return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
90 }
91
92 template <typename T>
atomic_fetch_and(volatile T * A,typename T::Type V,memory_order MO)93 inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V,
94 memory_order MO) {
95 DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
96 return __atomic_fetch_and(&A->ValDoNotUse, V, MO);
97 }
98
99 template <typename T>
atomic_fetch_or(volatile T * A,typename T::Type V,memory_order MO)100 inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V,
101 memory_order MO) {
102 DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
103 return __atomic_fetch_or(&A->ValDoNotUse, V, MO);
104 }
105
106 template <typename T>
atomic_exchange(volatile T * A,typename T::Type V,memory_order MO)107 inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
108 memory_order MO) {
109 DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
110 typename T::Type R;
111 __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
112 return R;
113 }
114
115 template <typename T>
atomic_compare_exchange_strong(volatile T * A,typename T::Type * Cmp,typename T::Type Xchg,memory_order MO)116 inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
117 typename T::Type Xchg,
118 memory_order MO) {
119 return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
120 __ATOMIC_RELAXED);
121 }
122
123 // Clutter-reducing helpers.
124
125 template <typename T>
atomic_load_relaxed(const volatile T * A)126 inline typename T::Type atomic_load_relaxed(const volatile T *A) {
127 return atomic_load(A, memory_order_relaxed);
128 }
129
130 template <typename T>
atomic_store_relaxed(volatile T * A,typename T::Type V)131 inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
132 atomic_store(A, V, memory_order_relaxed);
133 }
134
135 template <typename T>
atomic_compare_exchange(volatile T * A,typename T::Type Cmp,typename T::Type Xchg)136 inline typename T::Type atomic_compare_exchange(volatile T *A,
137 typename T::Type Cmp,
138 typename T::Type Xchg) {
139 atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
140 return Cmp;
141 }
142
143 } // namespace scudo
144
145 #endif // SCUDO_ATOMIC_H_
146