1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_BASE_ATOMICOPS_INTERNALS_STD_H_
6 #define V8_BASE_ATOMICOPS_INTERNALS_STD_H_
7
8 #include <atomic>
9
10 #include "src/base/build_config.h"
11 #include "src/base/macros.h"
12
13 namespace v8 {
14 namespace base {
15
16 namespace helper {
17 template <typename T>
to_std_atomic(volatile T * ptr)18 volatile std::atomic<T>* to_std_atomic(volatile T* ptr) {
19 return reinterpret_cast<volatile std::atomic<T>*>(ptr);
20 }
21 template <typename T>
to_std_atomic_const(volatile const T * ptr)22 volatile const std::atomic<T>* to_std_atomic_const(volatile const T* ptr) {
23 return reinterpret_cast<volatile const std::atomic<T>*>(ptr);
24 }
25 } // namespace helper
26
SeqCst_MemoryFence()27 inline void SeqCst_MemoryFence() {
28 std::atomic_thread_fence(std::memory_order_seq_cst);
29 }
30
Relaxed_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)31 inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
32 Atomic32 old_value, Atomic32 new_value) {
33 std::atomic_compare_exchange_strong_explicit(
34 helper::to_std_atomic(ptr), &old_value, new_value,
35 std::memory_order_relaxed, std::memory_order_relaxed);
36 return old_value;
37 }
38
Relaxed_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)39 inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
40 Atomic32 new_value) {
41 return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
42 std::memory_order_relaxed);
43 }
44
Relaxed_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)45 inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
46 Atomic32 increment) {
47 return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
48 increment,
49 std::memory_order_relaxed);
50 }
51
Barrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)52 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
53 Atomic32 increment) {
54 return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
55 increment,
56 std::memory_order_seq_cst);
57 }
58
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)59 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
60 Atomic32 old_value, Atomic32 new_value) {
61 atomic_compare_exchange_strong_explicit(
62 helper::to_std_atomic(ptr), &old_value, new_value,
63 std::memory_order_acquire, std::memory_order_acquire);
64 return old_value;
65 }
66
Release_CompareAndSwap(volatile Atomic8 * ptr,Atomic8 old_value,Atomic8 new_value)67 inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
68 Atomic8 new_value) {
69 bool result = atomic_compare_exchange_strong_explicit(
70 helper::to_std_atomic(ptr), &old_value, new_value,
71 std::memory_order_release, std::memory_order_relaxed);
72 USE(result); // Make gcc compiler happy.
73 return old_value;
74 }
75
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)76 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
77 Atomic32 old_value, Atomic32 new_value) {
78 atomic_compare_exchange_strong_explicit(
79 helper::to_std_atomic(ptr), &old_value, new_value,
80 std::memory_order_release, std::memory_order_relaxed);
81 return old_value;
82 }
83
Relaxed_Store(volatile Atomic8 * ptr,Atomic8 value)84 inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
85 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
86 std::memory_order_relaxed);
87 }
88
Relaxed_Store(volatile Atomic32 * ptr,Atomic32 value)89 inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
90 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
91 std::memory_order_relaxed);
92 }
93
Release_Store(volatile Atomic32 * ptr,Atomic32 value)94 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
95 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
96 std::memory_order_release);
97 }
98
Relaxed_Load(volatile const Atomic8 * ptr)99 inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
100 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
101 std::memory_order_relaxed);
102 }
103
Relaxed_Load(volatile const Atomic32 * ptr)104 inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
105 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
106 std::memory_order_relaxed);
107 }
108
Acquire_Load(volatile const Atomic32 * ptr)109 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
110 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
111 std::memory_order_acquire);
112 }
113
114 #if defined(V8_HOST_ARCH_64_BIT)
115
Relaxed_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)116 inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
117 Atomic64 old_value, Atomic64 new_value) {
118 std::atomic_compare_exchange_strong_explicit(
119 helper::to_std_atomic(ptr), &old_value, new_value,
120 std::memory_order_relaxed, std::memory_order_relaxed);
121 return old_value;
122 }
123
Relaxed_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)124 inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
125 Atomic64 new_value) {
126 return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
127 std::memory_order_relaxed);
128 }
129
Relaxed_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)130 inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
131 Atomic64 increment) {
132 return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
133 increment,
134 std::memory_order_relaxed);
135 }
136
Barrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)137 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
138 Atomic64 increment) {
139 return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
140 increment,
141 std::memory_order_seq_cst);
142 }
143
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)144 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
145 Atomic64 old_value, Atomic64 new_value) {
146 std::atomic_compare_exchange_strong_explicit(
147 helper::to_std_atomic(ptr), &old_value, new_value,
148 std::memory_order_acquire, std::memory_order_acquire);
149 return old_value;
150 }
151
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)152 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
153 Atomic64 old_value, Atomic64 new_value) {
154 std::atomic_compare_exchange_strong_explicit(
155 helper::to_std_atomic(ptr), &old_value, new_value,
156 std::memory_order_release, std::memory_order_relaxed);
157 return old_value;
158 }
159
Relaxed_Store(volatile Atomic64 * ptr,Atomic64 value)160 inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
161 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
162 std::memory_order_relaxed);
163 }
164
Release_Store(volatile Atomic64 * ptr,Atomic64 value)165 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
166 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
167 std::memory_order_release);
168 }
169
Relaxed_Load(volatile const Atomic64 * ptr)170 inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
171 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
172 std::memory_order_relaxed);
173 }
174
Acquire_Load(volatile const Atomic64 * ptr)175 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
176 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
177 std::memory_order_acquire);
178 }
179
180 #endif // defined(V8_HOST_ARCH_64_BIT)
181 } // namespace base
182 } // namespace v8
183
184 #endif // V8_BASE_ATOMICOPS_INTERNALS_STD_H_
185