1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18 #define ART_RUNTIME_READ_BARRIER_INL_H_
19
20 #include "read_barrier.h"
21
22 #include "gc/collector/concurrent_copying.h"
23 #include "gc/heap.h"
24 #include "mirror/object_reference.h"
25 #include "mirror/reference.h"
26 #include "runtime.h"
27 #include "utils.h"
28
29 namespace art {
30
31 template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
Barrier(mirror::Object * obj,MemberOffset offset,mirror::HeapReference<MirrorType> * ref_addr)32 inline MirrorType* ReadBarrier::Barrier(
33 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
34 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
35 if (with_read_barrier && kUseBakerReadBarrier) {
36 // The higher bits of the rb ptr, rb_ptr_high_bits (must be zero)
37 // is used to create artificial data dependency from the is_gray
38 // load to the ref field (ptr) load to avoid needing a load-load
39 // barrier between the two.
40 uintptr_t rb_ptr_high_bits;
41 bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits);
42 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
43 rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr));
44 MirrorType* ref = ref_addr->AsMirrorPtr();
45 if (is_gray) {
46 // Slow-path.
47 ref = reinterpret_cast<MirrorType*>(Mark(ref));
48 }
49 if (kEnableReadBarrierInvariantChecks) {
50 CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer();
51 }
52 AssertToSpaceInvariant(obj, offset, ref);
53 return ref;
54 } else if (with_read_barrier && kUseBrooksReadBarrier) {
55 // To be implemented.
56 return ref_addr->AsMirrorPtr();
57 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
58 MirrorType* ref = ref_addr->AsMirrorPtr();
59 MirrorType* old_ref = ref;
60 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
61 gc::Heap* heap = Runtime::Current()->GetHeap();
62 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
63 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
64 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
65 obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
66 offset, old_ref, ref);
67 }
68 AssertToSpaceInvariant(obj, offset, ref);
69 return ref;
70 } else {
71 // No read barrier.
72 return ref_addr->AsMirrorPtr();
73 }
74 }
75
76 template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
BarrierForRoot(MirrorType ** root)77 inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root) {
78 MirrorType* ref = *root;
79 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
80 if (with_read_barrier && kUseBakerReadBarrier) {
81 if (kMaybeDuringStartup && IsDuringStartup()) {
82 // During startup, the heap may not be initialized yet. Just
83 // return the given ref.
84 return ref;
85 }
86 // TODO: separate the read barrier code from the collector code more.
87 if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
88 ref = reinterpret_cast<MirrorType*>(Mark(ref));
89 }
90 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
91 return ref;
92 } else if (with_read_barrier && kUseBrooksReadBarrier) {
93 // To be implemented.
94 return ref;
95 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
96 if (kMaybeDuringStartup && IsDuringStartup()) {
97 // During startup, the heap may not be initialized yet. Just
98 // return the given ref.
99 return ref;
100 }
101 if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
102 MirrorType* old_ref = ref;
103 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
104 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
105 Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
106 atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
107 }
108 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
109 return ref;
110 } else {
111 return ref;
112 }
113 }
114
115 // TODO: Reduce copy paste
116 template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
BarrierForRoot(mirror::CompressedReference<MirrorType> * root)117 inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root) {
118 MirrorType* ref = root->AsMirrorPtr();
119 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
120 if (with_read_barrier && kUseBakerReadBarrier) {
121 if (kMaybeDuringStartup && IsDuringStartup()) {
122 // During startup, the heap may not be initialized yet. Just
123 // return the given ref.
124 return ref;
125 }
126 // TODO: separate the read barrier code from the collector code more.
127 if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
128 ref = reinterpret_cast<MirrorType*>(Mark(ref));
129 }
130 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
131 return ref;
132 } else if (with_read_barrier && kUseBrooksReadBarrier) {
133 // To be implemented.
134 return ref;
135 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
136 if (kMaybeDuringStartup && IsDuringStartup()) {
137 // During startup, the heap may not be initialized yet. Just
138 // return the given ref.
139 return ref;
140 }
141 if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
142 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
143 ref = reinterpret_cast<MirrorType*>(Mark(ref));
144 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
145 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
146 auto* atomic_root =
147 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
148 atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
149 }
150 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
151 return ref;
152 } else {
153 return ref;
154 }
155 }
156
IsDuringStartup()157 inline bool ReadBarrier::IsDuringStartup() {
158 gc::Heap* heap = Runtime::Current()->GetHeap();
159 if (heap == nullptr) {
160 // During startup, the heap can be null.
161 return true;
162 }
163 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
164 // CC isn't running.
165 return true;
166 }
167 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
168 if (collector == nullptr) {
169 // During startup, the collector can be null.
170 return true;
171 }
172 return false;
173 }
174
AssertToSpaceInvariant(mirror::Object * obj,MemberOffset offset,mirror::Object * ref)175 inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
176 mirror::Object* ref) {
177 if (kEnableToSpaceInvariantChecks || kIsDebugBuild) {
178 if (ref == nullptr || IsDuringStartup()) {
179 return;
180 }
181 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
182 AssertToSpaceInvariant(obj, offset, ref);
183 }
184 }
185
Mark(mirror::Object * obj)186 inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
187 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->Mark(obj);
188 }
189
HasGrayReadBarrierPointer(mirror::Object * obj,uintptr_t * out_rb_ptr_high_bits)190 inline bool ReadBarrier::HasGrayReadBarrierPointer(mirror::Object* obj,
191 uintptr_t* out_rb_ptr_high_bits) {
192 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
193 uintptr_t rb_ptr_bits = reinterpret_cast<uintptr_t>(rb_ptr);
194 uintptr_t rb_ptr_low_bits = rb_ptr_bits & rb_ptr_mask_;
195 if (kEnableReadBarrierInvariantChecks) {
196 CHECK(rb_ptr_low_bits == white_ptr_ || rb_ptr_low_bits == gray_ptr_ ||
197 rb_ptr_low_bits == black_ptr_)
198 << "obj=" << obj << " rb_ptr=" << rb_ptr << " " << PrettyTypeOf(obj);
199 }
200 bool is_gray = rb_ptr_low_bits == gray_ptr_;
201 // The high bits are supposed to be zero. We check this on the caller side.
202 *out_rb_ptr_high_bits = rb_ptr_bits & ~rb_ptr_mask_;
203 return is_gray;
204 }
205
206 } // namespace art
207
208 #endif // ART_RUNTIME_READ_BARRIER_INL_H_
209