1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // This file is an internal atomic implementation, use atomicops.h instead.
6 //
7 // This implementation uses C++11 atomics' member functions. The code base is
8 // currently written assuming atomicity revolves around accesses instead of
9 // C++11's memory locations. The burden is on the programmer to ensure that all
10 // memory locations accessed atomically are never accessed non-atomically (tsan
11 // should help with this).
12 //
13 // Of note in this implementation:
14 //  * All NoBarrier variants are implemented as relaxed.
15 //  * All Barrier variants are implemented as sequentially-consistent.
16 //  * Compare exchange's failure ordering is always the same as the success one
17 //    (except for release, which fails as relaxed): using a weaker ordering is
18 //    only valid under certain uses of compare exchange.
19 //  * Acquire store doesn't exist in the C11 memory model, it is instead
20 //    implemented as a relaxed store followed by a sequentially consistent
21 //    fence.
22 //  * Release load doesn't exist in the C11 memory model, it is instead
23 //    implemented as sequentially consistent fence followed by a relaxed load.
24 //  * Atomic increment is expected to return the post-incremented value, whereas
25 //    C11 fetch add returns the previous value. The implementation therefore
26 //    needs to increment twice (which the compiler should be able to detect and
27 //    optimize).
28 
29 #ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
30 #define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
31 
32 #include <atomic>
33 
34 #include "src/base/build_config.h"
35 #include "src/base/macros.h"
36 
37 namespace v8 {
38 namespace base {
39 
40 // This implementation is transitional and maintains the original API for
41 // atomicops.h.
42 
SeqCst_MemoryFence()43 inline void SeqCst_MemoryFence() {
44 #if defined(__GLIBCXX__)
45   // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
46   // not defined, leading to the linker complaining about undefined references.
47   __atomic_thread_fence(std::memory_order_seq_cst);
48 #else
49   std::atomic_thread_fence(std::memory_order_seq_cst);
50 #endif
51 }
52 
Relaxed_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)53 inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
54                                        Atomic32 old_value, Atomic32 new_value) {
55   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
56                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
57   return old_value;
58 }
59 
Relaxed_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)60 inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
61                                        Atomic32 new_value) {
62   return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
63 }
64 
Relaxed_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)65 inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
66                                         Atomic32 increment) {
67   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
68 }
69 
Barrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)70 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
71                                         Atomic32 increment) {
72   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
73 }
74 
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)75 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
76                                        Atomic32 old_value, Atomic32 new_value) {
77   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
78                               __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
79   return old_value;
80 }
81 
Release_CompareAndSwap(volatile Atomic8 * ptr,Atomic8 old_value,Atomic8 new_value)82 inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
83                                       Atomic8 new_value) {
84   bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
85                                             __ATOMIC_RELEASE, __ATOMIC_RELAXED);
86   USE(result);  // Make gcc compiler happy.
87   return old_value;
88 }
89 
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)90 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
91                                        Atomic32 old_value, Atomic32 new_value) {
92   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
93                               __ATOMIC_RELEASE, __ATOMIC_RELAXED);
94   return old_value;
95 }
96 
Relaxed_Store(volatile Atomic8 * ptr,Atomic8 value)97 inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
98   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
99 }
100 
Relaxed_Store(volatile Atomic32 * ptr,Atomic32 value)101 inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
102   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
103 }
104 
Release_Store(volatile Atomic32 * ptr,Atomic32 value)105 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
106   __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
107 }
108 
Relaxed_Load(volatile const Atomic8 * ptr)109 inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
110   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
111 }
112 
Relaxed_Load(volatile const Atomic32 * ptr)113 inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
114   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
115 }
116 
Acquire_Load(volatile const Atomic32 * ptr)117 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
118   return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
119 }
120 
121 #if defined(V8_HOST_ARCH_64_BIT)
122 
Relaxed_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)123 inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
124                                        Atomic64 old_value, Atomic64 new_value) {
125   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
126                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
127   return old_value;
128 }
129 
Relaxed_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)130 inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
131                                        Atomic64 new_value) {
132   return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
133 }
134 
Relaxed_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)135 inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
136                                         Atomic64 increment) {
137   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
138 }
139 
Barrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)140 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
141                                         Atomic64 increment) {
142   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
143 }
144 
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)145 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
146                                        Atomic64 old_value, Atomic64 new_value) {
147   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
148                               __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
149   return old_value;
150 }
151 
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)152 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
153                                        Atomic64 old_value, Atomic64 new_value) {
154   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
155                               __ATOMIC_RELEASE, __ATOMIC_RELAXED);
156   return old_value;
157 }
158 
Relaxed_Store(volatile Atomic64 * ptr,Atomic64 value)159 inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
160   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
161 }
162 
Release_Store(volatile Atomic64 * ptr,Atomic64 value)163 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
164   __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
165 }
166 
Relaxed_Load(volatile const Atomic64 * ptr)167 inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
168   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
169 }
170 
Acquire_Load(volatile const Atomic64 * ptr)171 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
172   return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
173 }
174 
175 #endif  // defined(V8_HOST_ARCH_64_BIT)
176 }  // namespace base
177 }  // namespace v8
178 
179 #endif  // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
180