1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_ATOMIC_H_
18 #define ART_RUNTIME_ATOMIC_H_
19
20 #include <stdint.h>
21 #include <atomic>
22 #include <limits>
23 #include <vector>
24
25 #include "base/logging.h"
26 #include "base/macros.h"
27
28 namespace art {
29
30 class Mutex;
31
32 // QuasiAtomic encapsulates two separate facilities that we are
33 // trying to move away from: "quasiatomic" 64 bit operations
34 // and custom memory fences. For the time being, they remain
35 // exposed. Clients should be converted to use either class Atomic
36 // below whenever possible, and should eventually use C++11 atomics.
37 // The two facilities that do not have a good C++11 analog are
38 // ThreadFenceForConstructor and Atomic::*JavaData.
39 //
40 // NOTE: Two "quasiatomic" operations on the exact same memory address
41 // are guaranteed to operate atomically with respect to each other,
42 // but no guarantees are made about quasiatomic operations mixed with
43 // non-quasiatomic operations on the same address, nor about
44 // quasiatomic operations that are performed on partially-overlapping
45 // memory.
46 class QuasiAtomic {
47 #if defined(__mips__) && !defined(__LP64__)
48 static constexpr bool kNeedSwapMutexes = true;
49 #elif defined(__mips__) && defined(__LP64__)
50 // TODO - mips64 still need this for Cas64 ???
51 static constexpr bool kNeedSwapMutexes = true;
52 #else
53 static constexpr bool kNeedSwapMutexes = false;
54 #endif
55
56 public:
57 static void Startup();
58
59 static void Shutdown();
60
61 // Reads the 64-bit value at "addr" without tearing.
Read64(volatile const int64_t * addr)62 static int64_t Read64(volatile const int64_t* addr) {
63 if (!kNeedSwapMutexes) {
64 int64_t value;
65 #if defined(__LP64__)
66 value = *addr;
67 #else
68 #if defined(__arm__)
69 #if defined(__ARM_FEATURE_LPAE)
70 // With LPAE support (such as Cortex-A15) then ldrd is defined not to tear.
71 __asm__ __volatile__("@ QuasiAtomic::Read64\n"
72 "ldrd %0, %H0, %1"
73 : "=r" (value)
74 : "m" (*addr));
75 #else
76 // Exclusive loads are defined not to tear, clearing the exclusive state isn't necessary.
77 __asm__ __volatile__("@ QuasiAtomic::Read64\n"
78 "ldrexd %0, %H0, %1"
79 : "=r" (value)
80 : "Q" (*addr));
81 #endif
82 #elif defined(__i386__)
83 __asm__ __volatile__(
84 "movq %1, %0\n"
85 : "=x" (value)
86 : "m" (*addr));
87 #else
88 LOG(FATAL) << "Unsupported architecture";
89 #endif
90 #endif // defined(__LP64__)
91 return value;
92 } else {
93 return SwapMutexRead64(addr);
94 }
95 }
96
97 // Writes to the 64-bit value at "addr" without tearing.
Write64(volatile int64_t * addr,int64_t value)98 static void Write64(volatile int64_t* addr, int64_t value) {
99 if (!kNeedSwapMutexes) {
100 #if defined(__LP64__)
101 *addr = value;
102 #else
103 #if defined(__arm__)
104 #if defined(__ARM_FEATURE_LPAE)
105 // If we know that ARM architecture has LPAE (such as Cortex-A15) strd is defined not to tear.
106 __asm__ __volatile__("@ QuasiAtomic::Write64\n"
107 "strd %1, %H1, %0"
108 : "=m"(*addr)
109 : "r" (value));
110 #else
111 // The write is done as a swap so that the cache-line is in the exclusive state for the store.
112 int64_t prev;
113 int status;
114 do {
115 __asm__ __volatile__("@ QuasiAtomic::Write64\n"
116 "ldrexd %0, %H0, %2\n"
117 "strexd %1, %3, %H3, %2"
118 : "=&r" (prev), "=&r" (status), "+Q"(*addr)
119 : "r" (value)
120 : "cc");
121 } while (UNLIKELY(status != 0));
122 #endif
123 #elif defined(__i386__)
124 __asm__ __volatile__(
125 "movq %1, %0"
126 : "=m" (*addr)
127 : "x" (value));
128 #else
129 LOG(FATAL) << "Unsupported architecture";
130 #endif
131 #endif // defined(__LP64__)
132 } else {
133 SwapMutexWrite64(addr, value);
134 }
135 }
136
137 // Atomically compare the value at "addr" to "old_value", if equal replace it with "new_value"
138 // and return true. Otherwise, don't swap, and return false.
139 // This is fully ordered, i.e. it has C++11 memory_order_seq_cst
140 // semantics (assuming all other accesses use a mutex if this one does).
141 // This has "strong" semantics; if it fails then it is guaranteed that
142 // at some point during the execution of Cas64, *addr was not equal to
143 // old_value.
Cas64(int64_t old_value,int64_t new_value,volatile int64_t * addr)144 static bool Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) {
145 if (!kNeedSwapMutexes) {
146 return __sync_bool_compare_and_swap(addr, old_value, new_value);
147 } else {
148 return SwapMutexCas64(old_value, new_value, addr);
149 }
150 }
151
152 // Does the architecture provide reasonable atomic long operations or do we fall back on mutexes?
LongAtomicsUseMutexes()153 static bool LongAtomicsUseMutexes() {
154 return kNeedSwapMutexes;
155 }
156
ThreadFenceAcquire()157 static void ThreadFenceAcquire() {
158 std::atomic_thread_fence(std::memory_order_acquire);
159 }
160
ThreadFenceRelease()161 static void ThreadFenceRelease() {
162 std::atomic_thread_fence(std::memory_order_release);
163 }
164
ThreadFenceForConstructor()165 static void ThreadFenceForConstructor() {
166 #if defined(__aarch64__)
167 __asm__ __volatile__("dmb ishst" : : : "memory");
168 #else
169 std::atomic_thread_fence(std::memory_order_release);
170 #endif
171 }
172
ThreadFenceSequentiallyConsistent()173 static void ThreadFenceSequentiallyConsistent() {
174 std::atomic_thread_fence(std::memory_order_seq_cst);
175 }
176
177 private:
178 static Mutex* GetSwapMutex(const volatile int64_t* addr);
179 static int64_t SwapMutexRead64(volatile const int64_t* addr);
180 static void SwapMutexWrite64(volatile int64_t* addr, int64_t val);
181 static bool SwapMutexCas64(int64_t old_value, int64_t new_value, volatile int64_t* addr);
182
183 // We stripe across a bunch of different mutexes to reduce contention.
184 static constexpr size_t kSwapMutexCount = 32;
185 static std::vector<Mutex*>* gSwapMutexes;
186
187 DISALLOW_COPY_AND_ASSIGN(QuasiAtomic);
188 };
189
190 template<typename T>
PACKED(sizeof (T))191 class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
192 public:
193 Atomic<T>() : std::atomic<T>(0) { }
194
195 explicit Atomic<T>(T value) : std::atomic<T>(value) { }
196
197 // Load from memory without ordering or synchronization constraints.
198 T LoadRelaxed() const {
199 return this->load(std::memory_order_relaxed);
200 }
201
202 // Word tearing allowed, but may race.
203 // TODO: Optimize?
204 // There has been some discussion of eventually disallowing word
205 // tearing for Java data loads.
206 T LoadJavaData() const {
207 return this->load(std::memory_order_relaxed);
208 }
209
210 // Load from memory with a total ordering.
211 // Corresponds exactly to a Java volatile load.
212 T LoadSequentiallyConsistent() const {
213 return this->load(std::memory_order_seq_cst);
214 }
215
216 // Store to memory without ordering or synchronization constraints.
217 void StoreRelaxed(T desired) {
218 this->store(desired, std::memory_order_relaxed);
219 }
220
221 // Word tearing allowed, but may race.
222 void StoreJavaData(T desired) {
223 this->store(desired, std::memory_order_relaxed);
224 }
225
226 // Store to memory with release ordering.
227 void StoreRelease(T desired) {
228 this->store(desired, std::memory_order_release);
229 }
230
231 // Store to memory with a total ordering.
232 void StoreSequentiallyConsistent(T desired) {
233 this->store(desired, std::memory_order_seq_cst);
234 }
235
236 // Atomically replace the value with desired value if it matches the expected value.
237 // Participates in total ordering of atomic operations.
238 bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) {
239 return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_seq_cst);
240 }
241
242 // The same, except it may fail spuriously.
243 bool CompareExchangeWeakSequentiallyConsistent(T expected_value, T desired_value) {
244 return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_seq_cst);
245 }
246
247 // Atomically replace the value with desired value if it matches the expected value. Doesn't
248 // imply ordering or synchronization constraints.
249 bool CompareExchangeStrongRelaxed(T expected_value, T desired_value) {
250 return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_relaxed);
251 }
252
253 // The same, except it may fail spuriously.
254 bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) {
255 return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed);
256 }
257
258 // Atomically replace the value with desired value if it matches the expected value. Prior writes
259 // made to other memory locations by the thread that did the release become visible in this
260 // thread.
261 bool CompareExchangeWeakAcquire(T expected_value, T desired_value) {
262 return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_acquire);
263 }
264
265 // Atomically replace the value with desired value if it matches the expected value. prior writes
266 // to other memory locations become visible to the threads that do a consume or an acquire on the
267 // same location.
268 bool CompareExchangeWeakRelease(T expected_value, T desired_value) {
269 return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release);
270 }
271
272 T FetchAndAddSequentiallyConsistent(const T value) {
273 return this->fetch_add(value, std::memory_order_seq_cst); // Return old_value.
274 }
275
276 T FetchAndSubSequentiallyConsistent(const T value) {
277 return this->fetch_sub(value, std::memory_order_seq_cst); // Return old value.
278 }
279
280 T FetchAndOrSequentiallyConsistent(const T value) {
281 return this->fetch_or(value, std::memory_order_seq_cst); // Return old_value.
282 }
283
284 T FetchAndAndSequentiallyConsistent(const T value) {
285 return this->fetch_and(value, std::memory_order_seq_cst); // Return old_value.
286 }
287
288 volatile T* Address() {
289 return reinterpret_cast<T*>(this);
290 }
291
292 static T MaxValue() {
293 return std::numeric_limits<T>::max();
294 }
295 };
296
297 typedef Atomic<int32_t> AtomicInteger;
298
299 static_assert(sizeof(AtomicInteger) == sizeof(int32_t), "Weird AtomicInteger size");
300 static_assert(alignof(AtomicInteger) == alignof(int32_t),
301 "AtomicInteger alignment differs from that of underlyingtype");
302 static_assert(sizeof(Atomic<int64_t>) == sizeof(int64_t), "Weird Atomic<int64> size");
303
304 // Assert the alignment of 64-bit integers is 64-bit. This isn't true on certain 32-bit
305 // architectures (e.g. x86-32) but we know that 64-bit integers here are arranged to be 8-byte
306 // aligned.
307 #if defined(__LP64__)
308 static_assert(alignof(Atomic<int64_t>) == alignof(int64_t),
309 "Atomic<int64> alignment differs from that of underlying type");
310 #endif
311
312 } // namespace art
313
314 #endif // ART_RUNTIME_ATOMIC_H_
315