1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18 #define ART_RUNTIME_READ_BARRIER_INL_H_
19 
20 #include "read_barrier.h"
21 
22 #include "gc/collector/concurrent_copying-inl.h"
23 #include "gc/heap.h"
24 #include "mirror/object_reference.h"
25 #include "mirror/object-readbarrier-inl.h"
26 #include "mirror/reference.h"
27 #include "runtime.h"
28 #include "utils.h"
29 
30 namespace art {
31 
32 // Disabled for performance reasons.
33 static constexpr bool kCheckDebugDisallowReadBarrierCount = false;
34 
35 template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kAlwaysUpdateField>
Barrier(mirror::Object * obj,MemberOffset offset,mirror::HeapReference<MirrorType> * ref_addr)36 inline MirrorType* ReadBarrier::Barrier(
37     mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
38   constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
39   if (kUseReadBarrier && with_read_barrier) {
40     if (kCheckDebugDisallowReadBarrierCount) {
41       Thread* const self = Thread::Current();
42       if (self != nullptr) {
43         CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
44       }
45     }
46     if (kUseBakerReadBarrier) {
47       // fake_address_dependency (must be zero) is used to create artificial data dependency from
48       // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
49       // the two.
50       uintptr_t fake_address_dependency;
51       bool is_gray = IsGray(obj, &fake_address_dependency);
52       if (kEnableReadBarrierInvariantChecks) {
53         CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
54       }
55       ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
56           fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
57       MirrorType* ref = ref_addr->AsMirrorPtr();
58       MirrorType* old_ref = ref;
59       if (is_gray) {
60         // Slow-path.
61         ref = reinterpret_cast<MirrorType*>(Mark(ref));
62         // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
63         // updates before us, but it's OK.
64         if (kAlwaysUpdateField && ref != old_ref) {
65           obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
66               offset, old_ref, ref);
67         }
68       }
69       AssertToSpaceInvariant(obj, offset, ref);
70       return ref;
71     } else if (kUseBrooksReadBarrier) {
72       // To be implemented.
73       return ref_addr->AsMirrorPtr();
74     } else if (kUseTableLookupReadBarrier) {
75       MirrorType* ref = ref_addr->AsMirrorPtr();
76       MirrorType* old_ref = ref;
77       // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
78       gc::Heap* heap = Runtime::Current()->GetHeap();
79       if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
80         ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
81         // Update the field atomically. This may fail if mutator updates before us, but it's ok.
82         if (ref != old_ref) {
83           obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
84               offset, old_ref, ref);
85         }
86       }
87       AssertToSpaceInvariant(obj, offset, ref);
88       return ref;
89     } else {
90       LOG(FATAL) << "Unexpected read barrier type";
91       UNREACHABLE();
92     }
93   } else {
94     // No read barrier.
95     return ref_addr->AsMirrorPtr();
96   }
97 }
98 
99 template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
BarrierForRoot(MirrorType ** root,GcRootSource * gc_root_source)100 inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
101                                                GcRootSource* gc_root_source) {
102   MirrorType* ref = *root;
103   const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
104   if (kUseReadBarrier && with_read_barrier) {
105     if (kIsDebugBuild) {
106       Thread* const self = Thread::Current();
107       if (self != nullptr) {
108         CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
109       }
110     }
111     if (kUseBakerReadBarrier) {
112       // TODO: separate the read barrier code from the collector code more.
113       Thread* self = Thread::Current();
114       if (self != nullptr && self->GetIsGcMarking()) {
115         ref = reinterpret_cast<MirrorType*>(Mark(ref));
116       }
117       AssertToSpaceInvariant(gc_root_source, ref);
118       return ref;
119     } else if (kUseBrooksReadBarrier) {
120       // To be implemented.
121       return ref;
122     } else if (kUseTableLookupReadBarrier) {
123       Thread* self = Thread::Current();
124       if (self != nullptr &&
125           self->GetIsGcMarking() &&
126           Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
127         MirrorType* old_ref = ref;
128         ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
129         // Update the field atomically. This may fail if mutator updates before us, but it's ok.
130         if (ref != old_ref) {
131           Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
132           atomic_root->CompareExchangeStrongRelaxed(old_ref, ref);
133         }
134       }
135       AssertToSpaceInvariant(gc_root_source, ref);
136       return ref;
137     } else {
138       LOG(FATAL) << "Unexpected read barrier type";
139       UNREACHABLE();
140     }
141   } else {
142     return ref;
143   }
144 }
145 
146 // TODO: Reduce copy paste
147 template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
BarrierForRoot(mirror::CompressedReference<MirrorType> * root,GcRootSource * gc_root_source)148 inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
149                                                GcRootSource* gc_root_source) {
150   MirrorType* ref = root->AsMirrorPtr();
151   const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
152   if (with_read_barrier && kUseBakerReadBarrier) {
153     // TODO: separate the read barrier code from the collector code more.
154     Thread* self = Thread::Current();
155     if (self != nullptr && self->GetIsGcMarking()) {
156       ref = reinterpret_cast<MirrorType*>(Mark(ref));
157     }
158     AssertToSpaceInvariant(gc_root_source, ref);
159     return ref;
160   } else if (with_read_barrier && kUseBrooksReadBarrier) {
161     // To be implemented.
162     return ref;
163   } else if (with_read_barrier && kUseTableLookupReadBarrier) {
164     Thread* self = Thread::Current();
165     if (self != nullptr &&
166         self->GetIsGcMarking() &&
167         Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
168       auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
169       ref = reinterpret_cast<MirrorType*>(Mark(ref));
170       auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
171       // Update the field atomically. This may fail if mutator updates before us, but it's ok.
172       if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
173         auto* atomic_root =
174             reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
175         atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref);
176       }
177     }
178     AssertToSpaceInvariant(gc_root_source, ref);
179     return ref;
180   } else {
181     return ref;
182   }
183 }
184 
185 template <typename MirrorType>
IsMarked(MirrorType * ref)186 inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
187   // Only read-barrier configurations can have mutators run while
188   // the GC is marking.
189   if (!kUseReadBarrier) {
190     return ref;
191   }
192   // IsMarked does not handle null, so handle it here.
193   if (ref == nullptr) {
194     return nullptr;
195   }
196   // IsMarked should only be called when the GC is marking.
197   if (!Thread::Current()->GetIsGcMarking()) {
198     return ref;
199   }
200 
201   return reinterpret_cast<MirrorType*>(
202       Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarked(ref));
203 }
204 
IsDuringStartup()205 inline bool ReadBarrier::IsDuringStartup() {
206   gc::Heap* heap = Runtime::Current()->GetHeap();
207   if (heap == nullptr) {
208     // During startup, the heap can be null.
209     return true;
210   }
211   if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
212     // CC isn't running.
213     return true;
214   }
215   gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
216   if (collector == nullptr) {
217     // During startup, the collector can be null.
218     return true;
219   }
220   return false;
221 }
222 
AssertToSpaceInvariant(mirror::Object * obj,MemberOffset offset,mirror::Object * ref)223 inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
224                                                 mirror::Object* ref) {
225   if (kEnableToSpaceInvariantChecks) {
226     if (ref == nullptr || IsDuringStartup()) {
227       return;
228     }
229     Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
230         AssertToSpaceInvariant(obj, offset, ref);
231   }
232 }
233 
AssertToSpaceInvariant(GcRootSource * gc_root_source,mirror::Object * ref)234 inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
235                                                 mirror::Object* ref) {
236   if (kEnableToSpaceInvariantChecks) {
237     if (ref == nullptr || IsDuringStartup()) {
238       return;
239     }
240     Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
241         AssertToSpaceInvariant(gc_root_source, ref);
242   }
243 }
244 
Mark(mirror::Object * obj)245 inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
246   return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
247 }
248 
IsGray(mirror::Object * obj,uintptr_t * fake_address_dependency)249 inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
250   return obj->GetReadBarrierState(fake_address_dependency) == gray_state_;
251 }
252 
IsGray(mirror::Object * obj)253 inline bool ReadBarrier::IsGray(mirror::Object* obj) {
254   // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
255   // GetReadBarrierStateAcquire() has load-acquire semantics.
256   return obj->GetReadBarrierStateAcquire() == gray_state_;
257 }
258 
259 }  // namespace art
260 
261 #endif  // ART_RUNTIME_READ_BARRIER_INL_H_
262