1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_MIRROR_OBJECT_READBARRIER_INL_H_ 18 #define ART_RUNTIME_MIRROR_OBJECT_READBARRIER_INL_H_ 19 20 #include "object.h" 21 22 #include "base/atomic.h" 23 #include "heap_poisoning.h" 24 #include "lock_word-inl.h" 25 #include "object_reference-inl.h" 26 #include "read_barrier.h" 27 #include "runtime.h" 28 29 namespace art { 30 namespace mirror { 31 32 template<VerifyObjectFlags kVerifyFlags> 33 inline LockWord Object::GetLockWord(bool as_volatile) { 34 if (as_volatile) { 35 return LockWord(GetField32Volatile<kVerifyFlags>(MonitorOffset())); 36 } 37 return LockWord(GetField32<kVerifyFlags>(MonitorOffset())); 38 } 39 40 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> 41 inline bool Object::CasField32(MemberOffset field_offset, 42 int32_t old_value, 43 int32_t new_value, 44 CASMode mode, 45 std::memory_order memory_order) { 46 if (kCheckTransaction) { 47 DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); 48 } 49 if (kTransactionActive) { 50 Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true); 51 } 52 if (kVerifyFlags & kVerifyThis) { 53 VerifyObject(this); 54 } 55 uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); 56 AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr); 57 58 return atomic_addr->CompareAndSet(old_value, new_value, mode, memory_order); 59 } 60 61 inline bool Object::CasLockWord(LockWord old_val, 62 LockWord new_val, 63 CASMode mode, 64 std::memory_order memory_order) { 65 // Force use of non-transactional mode and do not check. 66 return CasField32<false, false>(MonitorOffset(), 67 old_val.GetValue(), 68 new_val.GetValue(), 69 mode, 70 memory_order); 71 } 72 73 inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency) { 74 if (!kUseBakerReadBarrier) { 75 LOG(FATAL) << "Unreachable"; 76 UNREACHABLE(); 77 } 78 #if defined(__arm__) 79 uintptr_t obj = reinterpret_cast<uintptr_t>(this); 80 uintptr_t result; 81 DCHECK_EQ(OFFSETOF_MEMBER(Object, monitor_), 4U); 82 // Use inline assembly to prevent the compiler from optimizing away the false dependency. 83 __asm__ __volatile__( 84 "ldr %[result], [%[obj], #4]\n\t" 85 // This instruction is enough to "fool the compiler and the CPU" by having `fad` always be 86 // null, without them being able to assume that fact. 87 "eor %[fad], %[result], %[result]\n\t" 88 : [result] "+r" (result), [fad] "=r" (*fake_address_dependency) 89 : [obj] "r" (obj)); 90 DCHECK_EQ(*fake_address_dependency, 0U); 91 LockWord lw(static_cast<uint32_t>(result)); 92 uint32_t rb_state = lw.ReadBarrierState(); 93 return rb_state; 94 #elif defined(__aarch64__) 95 uintptr_t obj = reinterpret_cast<uintptr_t>(this); 96 uintptr_t result; 97 DCHECK_EQ(OFFSETOF_MEMBER(Object, monitor_), 4U); 98 // Use inline assembly to prevent the compiler from optimizing away the false dependency. 99 __asm__ __volatile__( 100 "ldr %w[result], [%[obj], #4]\n\t" 101 // This instruction is enough to "fool the compiler and the CPU" by having `fad` always be 102 // null, without them being able to assume that fact. 103 "eor %[fad], %[result], %[result]\n\t" 104 : [result] "+r" (result), [fad] "=r" (*fake_address_dependency) 105 : [obj] "r" (obj)); 106 DCHECK_EQ(*fake_address_dependency, 0U); 107 LockWord lw(static_cast<uint32_t>(result)); 108 uint32_t rb_state = lw.ReadBarrierState(); 109 return rb_state; 110 #elif defined(__i386__) || defined(__x86_64__) 111 LockWord lw = GetLockWord(false); 112 // i386/x86_64 don't need fake address dependency. Use a compiler fence to avoid compiler 113 // reordering. 114 *fake_address_dependency = 0; 115 std::atomic_signal_fence(std::memory_order_acquire); 116 uint32_t rb_state = lw.ReadBarrierState(); 117 return rb_state; 118 #else 119 UNUSED(fake_address_dependency); 120 LOG(FATAL) << "Unsupported architecture."; 121 UNREACHABLE(); 122 #endif 123 } 124 125 inline uint32_t Object::GetReadBarrierState() { 126 if (!kUseBakerReadBarrier) { 127 LOG(FATAL) << "Unreachable"; 128 UNREACHABLE(); 129 } 130 DCHECK(kUseBakerReadBarrier); 131 LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile=*/false>(MonitorOffset())); 132 uint32_t rb_state = lw.ReadBarrierState(); 133 DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state; 134 return rb_state; 135 } 136 137 inline uint32_t Object::GetReadBarrierStateAcquire() { 138 if (!kUseBakerReadBarrier) { 139 LOG(FATAL) << "Unreachable"; 140 UNREACHABLE(); 141 } 142 LockWord lw(GetFieldAcquire<uint32_t>(MonitorOffset())); 143 uint32_t rb_state = lw.ReadBarrierState(); 144 DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state; 145 return rb_state; 146 } 147 148 template<std::memory_order kMemoryOrder> 149 inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) { 150 if (!kUseBakerReadBarrier) { 151 LOG(FATAL) << "Unreachable"; 152 UNREACHABLE(); 153 } 154 DCHECK(ReadBarrier::IsValidReadBarrierState(expected_rb_state)) << expected_rb_state; 155 DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state; 156 LockWord expected_lw; 157 LockWord new_lw; 158 do { 159 LockWord lw = GetLockWord(false); 160 if (UNLIKELY(lw.ReadBarrierState() != expected_rb_state)) { 161 // Lost the race. 162 return false; 163 } 164 expected_lw = lw; 165 expected_lw.SetReadBarrierState(expected_rb_state); 166 new_lw = lw; 167 new_lw.SetReadBarrierState(rb_state); 168 // ConcurrentCopying::ProcessMarkStackRef uses this with 169 // `kMemoryOrder` == `std::memory_order_release`. 170 // If `kMemoryOrder` == `std::memory_order_release`, use a CAS release so that when GC updates 171 // all the fields of an object and then changes the object from gray to black (non-gray), the 172 // field updates (stores) will be visible (won't be reordered after this CAS.) 173 } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, kMemoryOrder)); 174 return true; 175 } 176 177 inline bool Object::AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit) { 178 LockWord expected_lw; 179 LockWord new_lw; 180 do { 181 LockWord lw = GetLockWord(false); 182 if (UNLIKELY(lw.MarkBitState() != expected_mark_bit)) { 183 // Lost the race. 184 return false; 185 } 186 expected_lw = lw; 187 new_lw = lw; 188 new_lw.SetMarkBitState(mark_bit); 189 // Since this is only set from the mutator, we can use the non-release CAS. 190 } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, std::memory_order_relaxed)); 191 return true; 192 } 193 194 } // namespace mirror 195 } // namespace art 196 197 #endif // ART_RUNTIME_MIRROR_OBJECT_READBARRIER_INL_H_ 198