1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MIRROR_OBJECT_READBARRIER_INL_H_
18 #define ART_RUNTIME_MIRROR_OBJECT_READBARRIER_INL_H_
19 
20 #include "object.h"
21 
22 #include "base/atomic.h"
23 #include "class_linker.h"
24 #include "heap_poisoning.h"
25 #include "lock_word-inl.h"
26 #include "object_reference-inl.h"
27 #include "read_barrier.h"
28 #include "runtime.h"
29 
30 namespace art HIDDEN {
31 namespace mirror {
32 
33 template<VerifyObjectFlags kVerifyFlags>
GetLockWord(bool as_volatile)34 inline LockWord Object::GetLockWord(bool as_volatile) {
35   if (as_volatile) {
36     return LockWord(GetField32Volatile<kVerifyFlags>(MonitorOffset()));
37   }
38   return LockWord(GetField32<kVerifyFlags>(MonitorOffset()));
39 }
40 
41 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
CasField32(MemberOffset field_offset,int32_t old_value,int32_t new_value,CASMode mode,std::memory_order memory_order)42 inline bool Object::CasField32(MemberOffset field_offset,
43                                int32_t old_value,
44                                int32_t new_value,
45                                CASMode mode,
46                                std::memory_order memory_order) {
47   if (kCheckTransaction) {
48     DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
49   }
50   if (kVerifyFlags & kVerifyThis) {
51     VerifyObject(this);
52   }
53   uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
54   AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
55 
56   bool success = atomic_addr->CompareAndSet(old_value, new_value, mode, memory_order);
57   if (kTransactionActive && success) {
58     Runtime::Current()->GetClassLinker()->RecordWriteField32(
59         this, field_offset, old_value, /*is_volatile=*/ true);
60   }
61   return success;
62 }
63 
CasLockWord(LockWord old_val,LockWord new_val,CASMode mode,std::memory_order memory_order)64 inline bool Object::CasLockWord(LockWord old_val,
65                                 LockWord new_val,
66                                 CASMode mode,
67                                 std::memory_order memory_order) {
68   // Force use of non-transactional mode and do not check.
69   return CasField32<false, false>(MonitorOffset(),
70                                   old_val.GetValue(),
71                                   new_val.GetValue(),
72                                   mode,
73                                   memory_order);
74 }
75 
GetReadBarrierState(uintptr_t * fake_address_dependency)76 inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency) {
77   if (!kUseBakerReadBarrier) {
78     LOG(FATAL) << "Unreachable";
79     UNREACHABLE();
80   }
81 #if defined(__arm__)
82   uintptr_t obj = reinterpret_cast<uintptr_t>(this);
83   uintptr_t result;
84   DCHECK_EQ(OFFSETOF_MEMBER(Object, monitor_), 4U);
85   // Use inline assembly to prevent the compiler from optimizing away the false dependency.
86   __asm__ __volatile__(
87       "ldr %[result], [%[obj], #4]\n\t"
88       // This instruction is enough to "fool the compiler and the CPU" by having `fad` always be
89       // null, without them being able to assume that fact.
90       "eor %[fad], %[result], %[result]\n\t"
91       : [result] "+r" (result), [fad] "=r" (*fake_address_dependency)
92       : [obj] "r" (obj));
93   DCHECK_EQ(*fake_address_dependency, 0U);
94   LockWord lw(static_cast<uint32_t>(result));
95   uint32_t rb_state = lw.ReadBarrierState();
96   return rb_state;
97 #elif defined(__aarch64__)
98   uintptr_t obj = reinterpret_cast<uintptr_t>(this);
99   uintptr_t result;
100   DCHECK_EQ(OFFSETOF_MEMBER(Object, monitor_), 4U);
101   // Use inline assembly to prevent the compiler from optimizing away the false dependency.
102   __asm__ __volatile__(
103       "ldr %w[result], [%[obj], #4]\n\t"
104       // This instruction is enough to "fool the compiler and the CPU" by having `fad` always be
105       // null, without them being able to assume that fact.
106       "eor %[fad], %[result], %[result]\n\t"
107       : [result] "+r" (result), [fad] "=r" (*fake_address_dependency)
108       : [obj] "r" (obj));
109   DCHECK_EQ(*fake_address_dependency, 0U);
110   LockWord lw(static_cast<uint32_t>(result));
111   uint32_t rb_state = lw.ReadBarrierState();
112   return rb_state;
113 #elif defined(__i386__) || defined(__x86_64__) || defined(__riscv)
114   // TODO(riscv64): add arch-specific implementation
115   LockWord lw = GetLockWord(false);
116   // i386/x86_64 don't need fake address dependency. Use a compiler fence to avoid compiler
117   // reordering.
118   *fake_address_dependency = 0;
119   std::atomic_signal_fence(std::memory_order_acquire);
120   uint32_t rb_state = lw.ReadBarrierState();
121   return rb_state;
122 #else
123   UNUSED(fake_address_dependency);
124   LOG(FATAL) << "Unsupported architecture.";
125   UNREACHABLE();
126 #endif
127 }
128 
GetReadBarrierState()129 inline uint32_t Object::GetReadBarrierState() {
130   if (!kUseBakerReadBarrier) {
131     LOG(FATAL) << "Unreachable";
132     UNREACHABLE();
133   }
134   DCHECK(kUseBakerReadBarrier);
135   LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile=*/false>(MonitorOffset()));
136   uint32_t rb_state = lw.ReadBarrierState();
137   DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
138   return rb_state;
139 }
140 
GetReadBarrierStateAcquire()141 inline uint32_t Object::GetReadBarrierStateAcquire() {
142   if (!kUseBakerReadBarrier) {
143     LOG(FATAL) << "Unreachable";
144     UNREACHABLE();
145   }
146   LockWord lw(GetFieldAcquire<uint32_t>(MonitorOffset()));
147   uint32_t rb_state = lw.ReadBarrierState();
148   DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
149   return rb_state;
150 }
151 
AtomicSetReadBarrierState(uint32_t expected_rb_state,uint32_t rb_state,std::memory_order order)152 inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state,
153                                               uint32_t rb_state,
154                                               std::memory_order order) {
155   if (!kUseBakerReadBarrier) {
156     LOG(FATAL) << "Unreachable";
157     UNREACHABLE();
158   }
159   DCHECK(ReadBarrier::IsValidReadBarrierState(expected_rb_state)) << expected_rb_state;
160   DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
161   LockWord expected_lw;
162   LockWord new_lw;
163   do {
164     LockWord lw = GetLockWord(false);
165     if (UNLIKELY(lw.ReadBarrierState() != expected_rb_state)) {
166       // Lost the race.
167       return false;
168     }
169     expected_lw = lw;
170     expected_lw.SetReadBarrierState(expected_rb_state);
171     new_lw = lw;
172     new_lw.SetReadBarrierState(rb_state);
173     // ConcurrentCopying::ProcessMarkStackRef uses this with
174     // `kMemoryOrder` == `std::memory_order_release`.
175     // If `kMemoryOrder` == `std::memory_order_release`, use a CAS release so that when GC updates
176     // all the fields of an object and then changes the object from gray to black (non-gray), the
177     // field updates (stores) will be visible (won't be reordered after this CAS.)
178   } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, order));
179   return true;
180 }
181 
AtomicSetMarkBit(uint32_t expected_mark_bit,uint32_t mark_bit)182 inline bool Object::AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit) {
183   LockWord expected_lw;
184   LockWord new_lw;
185   do {
186     LockWord lw = GetLockWord(false);
187     if (UNLIKELY(lw.MarkBitState() != expected_mark_bit)) {
188       // Lost the race.
189       return false;
190     }
191     expected_lw = lw;
192     new_lw = lw;
193     new_lw.SetMarkBitState(mark_bit);
194     // Since this is only set from the mutator, we can use the non-release CAS.
195   } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, std::memory_order_relaxed));
196   return true;
197 }
198 
199 }  // namespace mirror
200 }  // namespace art
201 
202 #endif  // ART_RUNTIME_MIRROR_OBJECT_READBARRIER_INL_H_
203