1 // Copyright 2010 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 // The routines exported by this module are subtle. If you use them, even if 6 // you get the code right, it will depend on careful reasoning about atomicity 7 // and memory ordering; it will be less readable, and harder to maintain. If 8 // you plan to use these routines, you should have a good reason, such as solid 9 // evidence that performance would otherwise suffer, or there being no 10 // alternative. You should assume only properties explicitly guaranteed by the 11 // specifications in this file. You are almost certainly _not_ writing code 12 // just for the x86; if you assume x86 semantics, x86 hardware bugs and 13 // implementations on other archtectures will cause your code to break. If you 14 // do not know what you are doing, avoid these routines, and use a Mutex. 15 // 16 // It is incorrect to make direct assignments to/from an atomic variable. 17 // You should use one of the Load or Store routines. The NoBarrier 18 // versions are provided when no barriers are needed: 19 // NoBarrier_Store() 20 // NoBarrier_Load() 21 // Although there are currently no compiler enforcement, you are encouraged 22 // to use these. 23 // 24 25 #ifndef V8_BASE_ATOMICOPS_H_ 26 #define V8_BASE_ATOMICOPS_H_ 27 28 #include "include/v8stdint.h" 29 #include "src/base/build_config.h" 30 31 #if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT) 32 // windows.h #defines this (only on x64). This causes problems because the 33 // public API also uses MemoryBarrier at the public name for this fence. So, on 34 // X64, undef it, and call its documented 35 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx) 36 // implementation directly. 37 #undef MemoryBarrier 38 #endif 39 40 namespace v8 { 41 namespace base { 42 43 typedef char Atomic8; 44 typedef int32_t Atomic32; 45 #ifdef V8_HOST_ARCH_64_BIT 46 // We need to be able to go between Atomic64 and AtomicWord implicitly. This 47 // means Atomic64 and AtomicWord should be the same type on 64-bit. 48 #if defined(__ILP32__) 49 typedef int64_t Atomic64; 50 #else 51 typedef intptr_t Atomic64; 52 #endif 53 #endif 54 55 // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or 56 // Atomic64 routines below, depending on your architecture. 57 typedef intptr_t AtomicWord; 58 59 // Atomically execute: 60 // result = *ptr; 61 // if (*ptr == old_value) 62 // *ptr = new_value; 63 // return result; 64 // 65 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". 66 // Always return the old value of "*ptr" 67 // 68 // This routine implies no memory barriers. 69 Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, 70 Atomic32 old_value, 71 Atomic32 new_value); 72 73 // Atomically store new_value into *ptr, returning the previous value held in 74 // *ptr. This routine implies no memory barriers. 75 Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); 76 77 // Atomically increment *ptr by "increment". Returns the new value of 78 // *ptr with the increment applied. This routine implies no memory barriers. 79 Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); 80 81 Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, 82 Atomic32 increment); 83 84 // These following lower-level operations are typically useful only to people 85 // implementing higher-level synchronization operations like spinlocks, 86 // mutexes, and condition-variables. They combine CompareAndSwap(), a load, or 87 // a store with appropriate memory-ordering instructions. "Acquire" operations 88 // ensure that no later memory access can be reordered ahead of the operation. 89 // "Release" operations ensure that no previous memory access can be reordered 90 // after the operation. "Barrier" operations have both "Acquire" and "Release" 91 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory 92 // access. 93 Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, 94 Atomic32 old_value, 95 Atomic32 new_value); 96 Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, 97 Atomic32 old_value, 98 Atomic32 new_value); 99 100 void MemoryBarrier(); 101 void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value); 102 void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); 103 void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); 104 void Release_Store(volatile Atomic32* ptr, Atomic32 value); 105 106 Atomic8 NoBarrier_Load(volatile const Atomic8* ptr); 107 Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); 108 Atomic32 Acquire_Load(volatile const Atomic32* ptr); 109 Atomic32 Release_Load(volatile const Atomic32* ptr); 110 111 // 64-bit atomic operations (only available on 64-bit processors). 112 #ifdef V8_HOST_ARCH_64_BIT 113 Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, 114 Atomic64 old_value, 115 Atomic64 new_value); 116 Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); 117 Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); 118 Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); 119 120 Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, 121 Atomic64 old_value, 122 Atomic64 new_value); 123 Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, 124 Atomic64 old_value, 125 Atomic64 new_value); 126 void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); 127 void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); 128 void Release_Store(volatile Atomic64* ptr, Atomic64 value); 129 Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); 130 Atomic64 Acquire_Load(volatile const Atomic64* ptr); 131 Atomic64 Release_Load(volatile const Atomic64* ptr); 132 #endif // V8_HOST_ARCH_64_BIT 133 134 } } // namespace v8::base 135 136 // Include our platform specific implementation. 137 #if defined(THREAD_SANITIZER) 138 #include "src/base/atomicops_internals_tsan.h" 139 #elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) 140 #include "src/base/atomicops_internals_x86_msvc.h" 141 #elif defined(__APPLE__) 142 #include "src/base/atomicops_internals_mac.h" 143 #elif defined(__GNUC__) && V8_HOST_ARCH_ARM64 144 #include "src/base/atomicops_internals_arm64_gcc.h" 145 #elif defined(__GNUC__) && V8_HOST_ARCH_ARM 146 #include "src/base/atomicops_internals_arm_gcc.h" 147 #elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) 148 #include "src/base/atomicops_internals_x86_gcc.h" 149 #elif defined(__GNUC__) && V8_HOST_ARCH_MIPS 150 #include "src/base/atomicops_internals_mips_gcc.h" 151 #elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64 152 #include "src/base/atomicops_internals_mips64_gcc.h" 153 #else 154 #error "Atomic operations are not supported on your platform" 155 #endif 156 157 // On some platforms we need additional declarations to make 158 // AtomicWord compatible with our other Atomic* types. 159 #if defined(__APPLE__) || defined(__OpenBSD__) 160 #include "src/base/atomicops_internals_atomicword_compat.h" 161 #endif 162 163 #endif // V8_BASE_ATOMICOPS_H_ 164