1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "concurrent_copying.h"
18
19 #include "art_field-inl.h"
20 #include "gc/accounting/heap_bitmap-inl.h"
21 #include "gc/accounting/space_bitmap-inl.h"
22 #include "gc/space/image_space.h"
23 #include "gc/space/space.h"
24 #include "intern_table.h"
25 #include "mirror/class-inl.h"
26 #include "mirror/object-inl.h"
27 #include "scoped_thread_state_change.h"
28 #include "thread-inl.h"
29 #include "thread_list.h"
30 #include "well_known_classes.h"
31
32 namespace art {
33 namespace gc {
34 namespace collector {
35
ConcurrentCopying(Heap * heap,const std::string & name_prefix)36 ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
37 : GarbageCollector(heap,
38 name_prefix + (name_prefix.empty() ? "" : " ") +
39 "concurrent copying + mark sweep"),
40 region_space_(nullptr), gc_barrier_(new Barrier(0)), mark_queue_(2 * MB),
41 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
42 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0),
43 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
44 rb_table_(heap_->GetReadBarrierTable()),
45 force_evacuate_all_(false) {
46 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
47 "The region space size and the read barrier table region size must match");
48 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
49 {
50 Thread* self = Thread::Current();
51 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
52 // Cache this so that we won't have to lock heap_bitmap_lock_ in
53 // Mark() which could cause a nested lock on heap_bitmap_lock_
54 // when GC causes a RB while doing GC or a lock order violation
55 // (class_linker_lock_ and heap_bitmap_lock_).
56 heap_mark_bitmap_ = heap->GetMarkBitmap();
57 }
58 }
59
~ConcurrentCopying()60 ConcurrentCopying::~ConcurrentCopying() {
61 }
62
RunPhases()63 void ConcurrentCopying::RunPhases() {
64 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
65 CHECK(!is_active_);
66 is_active_ = true;
67 Thread* self = Thread::Current();
68 Locks::mutator_lock_->AssertNotHeld(self);
69 {
70 ReaderMutexLock mu(self, *Locks::mutator_lock_);
71 InitializePhase();
72 }
73 FlipThreadRoots();
74 {
75 ReaderMutexLock mu(self, *Locks::mutator_lock_);
76 MarkingPhase();
77 }
78 // Verify no from space refs. This causes a pause.
79 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
80 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
81 ScopedPause pause(this);
82 CheckEmptyMarkQueue();
83 if (kVerboseMode) {
84 LOG(INFO) << "Verifying no from-space refs";
85 }
86 VerifyNoFromSpaceReferences();
87 if (kVerboseMode) {
88 LOG(INFO) << "Done verifying no from-space refs";
89 }
90 CheckEmptyMarkQueue();
91 }
92 {
93 ReaderMutexLock mu(self, *Locks::mutator_lock_);
94 ReclaimPhase();
95 }
96 FinishPhase();
97 CHECK(is_active_);
98 is_active_ = false;
99 }
100
BindBitmaps()101 void ConcurrentCopying::BindBitmaps() {
102 Thread* self = Thread::Current();
103 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
104 // Mark all of the spaces we never collect as immune.
105 for (const auto& space : heap_->GetContinuousSpaces()) {
106 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
107 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
108 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
109 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
110 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
111 "cc zygote space bitmap";
112 // TODO: try avoiding using bitmaps for image/zygote to save space.
113 accounting::ContinuousSpaceBitmap* bitmap =
114 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
115 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
116 cc_bitmaps_.push_back(bitmap);
117 } else if (space == region_space_) {
118 accounting::ContinuousSpaceBitmap* bitmap =
119 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
120 space->Begin(), space->Capacity());
121 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
122 cc_bitmaps_.push_back(bitmap);
123 region_space_bitmap_ = bitmap;
124 }
125 }
126 }
127
InitializePhase()128 void ConcurrentCopying::InitializePhase() {
129 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
130 if (kVerboseMode) {
131 LOG(INFO) << "GC InitializePhase";
132 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
133 << reinterpret_cast<void*>(region_space_->Limit());
134 }
135 CHECK(mark_queue_.IsEmpty());
136 immune_region_.Reset();
137 bytes_moved_.StoreRelaxed(0);
138 objects_moved_.StoreRelaxed(0);
139 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
140 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
141 GetCurrentIteration()->GetClearSoftReferences()) {
142 force_evacuate_all_ = true;
143 } else {
144 force_evacuate_all_ = false;
145 }
146 BindBitmaps();
147 if (kVerboseMode) {
148 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
149 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End();
150 LOG(INFO) << "GC end of InitializePhase";
151 }
152 }
153
154 // Used to switch the thread roots of a thread from from-space refs to to-space refs.
155 class ThreadFlipVisitor : public Closure {
156 public:
ThreadFlipVisitor(ConcurrentCopying * concurrent_copying,bool use_tlab)157 explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
158 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
159 }
160
Run(Thread * thread)161 virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
162 // Note: self is not necessarily equal to thread since thread may be suspended.
163 Thread* self = Thread::Current();
164 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
165 << thread->GetState() << " thread " << thread << " self " << self;
166 if (use_tlab_ && thread->HasTlab()) {
167 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
168 // This must come before the revoke.
169 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
170 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
171 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
172 FetchAndAddSequentiallyConsistent(thread_local_objects);
173 } else {
174 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
175 }
176 }
177 if (kUseThreadLocalAllocationStack) {
178 thread->RevokeThreadLocalAllocationStack();
179 }
180 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
181 thread->VisitRoots(concurrent_copying_);
182 concurrent_copying_->GetBarrier().Pass(self);
183 }
184
185 private:
186 ConcurrentCopying* const concurrent_copying_;
187 const bool use_tlab_;
188 };
189
190 // Called back from Runtime::FlipThreadRoots() during a pause.
191 class FlipCallback : public Closure {
192 public:
FlipCallback(ConcurrentCopying * concurrent_copying)193 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
194 : concurrent_copying_(concurrent_copying) {
195 }
196
Run(Thread * thread)197 virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
198 ConcurrentCopying* cc = concurrent_copying_;
199 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
200 // Note: self is not necessarily equal to thread since thread may be suspended.
201 Thread* self = Thread::Current();
202 CHECK(thread == self);
203 Locks::mutator_lock_->AssertExclusiveHeld(self);
204 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
205 cc->SwapStacks(self);
206 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
207 cc->RecordLiveStackFreezeSize(self);
208 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
209 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
210 }
211 cc->is_marking_ = true;
212 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
213 CHECK(Runtime::Current()->IsAotCompiler());
214 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
215 Runtime::Current()->VisitTransactionRoots(cc);
216 }
217 }
218
219 private:
220 ConcurrentCopying* const concurrent_copying_;
221 };
222
223 // Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
FlipThreadRoots()224 void ConcurrentCopying::FlipThreadRoots() {
225 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
226 if (kVerboseMode) {
227 LOG(INFO) << "time=" << region_space_->Time();
228 region_space_->DumpNonFreeRegions(LOG(INFO));
229 }
230 Thread* self = Thread::Current();
231 Locks::mutator_lock_->AssertNotHeld(self);
232 gc_barrier_->Init(self, 0);
233 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
234 FlipCallback flip_callback(this);
235 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
236 &thread_flip_visitor, &flip_callback, this);
237 {
238 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
239 gc_barrier_->Increment(self, barrier_count);
240 }
241 is_asserting_to_space_invariant_ = true;
242 QuasiAtomic::ThreadFenceForConstructor();
243 if (kVerboseMode) {
244 LOG(INFO) << "time=" << region_space_->Time();
245 region_space_->DumpNonFreeRegions(LOG(INFO));
246 LOG(INFO) << "GC end of FlipThreadRoots";
247 }
248 }
249
SwapStacks(Thread * self)250 void ConcurrentCopying::SwapStacks(Thread* self) {
251 heap_->SwapStacks(self);
252 }
253
RecordLiveStackFreezeSize(Thread * self)254 void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
255 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
256 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
257 }
258
259 // Used to visit objects in the immune spaces.
260 class ConcurrentCopyingImmuneSpaceObjVisitor {
261 public:
ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying * cc)262 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
263 : collector_(cc) {}
264
operator ()(mirror::Object * obj) const265 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
266 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
267 DCHECK(obj != nullptr);
268 DCHECK(collector_->immune_region_.ContainsObject(obj));
269 accounting::ContinuousSpaceBitmap* cc_bitmap =
270 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
271 DCHECK(cc_bitmap != nullptr)
272 << "An immune space object must have a bitmap";
273 if (kIsDebugBuild) {
274 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
275 << "Immune space object must be already marked";
276 }
277 // This may or may not succeed, which is ok.
278 if (kUseBakerReadBarrier) {
279 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
280 }
281 if (cc_bitmap->AtomicTestAndSet(obj)) {
282 // Already marked. Do nothing.
283 } else {
284 // Newly marked. Set the gray bit and push it onto the mark stack.
285 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
286 collector_->PushOntoMarkStack<true>(obj);
287 }
288 }
289
290 private:
291 ConcurrentCopying* collector_;
292 };
293
294 class EmptyCheckpoint : public Closure {
295 public:
EmptyCheckpoint(ConcurrentCopying * concurrent_copying)296 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
297 : concurrent_copying_(concurrent_copying) {
298 }
299
Run(Thread * thread)300 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
301 // Note: self is not necessarily equal to thread since thread may be suspended.
302 Thread* self = Thread::Current();
303 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
304 << thread->GetState() << " thread " << thread << " self " << self;
305 // If thread is a running mutator, then act on behalf of the garbage collector.
306 // See the code in ThreadList::RunCheckpoint.
307 if (thread->GetState() == kRunnable) {
308 concurrent_copying_->GetBarrier().Pass(self);
309 }
310 }
311
312 private:
313 ConcurrentCopying* const concurrent_copying_;
314 };
315
316 // Concurrently mark roots that are guarded by read barriers and process the mark stack.
MarkingPhase()317 void ConcurrentCopying::MarkingPhase() {
318 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
319 if (kVerboseMode) {
320 LOG(INFO) << "GC MarkingPhase";
321 }
322 {
323 // Mark the image root. The WB-based collectors do not need to
324 // scan the image objects from roots by relying on the card table,
325 // but it's necessary for the RB to-space invariant to hold.
326 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
327 gc::space::ImageSpace* image = heap_->GetImageSpace();
328 if (image != nullptr) {
329 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
330 mirror::Object* marked_image_root = Mark(image_root);
331 CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
332 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
333 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
334 }
335 }
336 }
337 {
338 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
339 Runtime::Current()->VisitConstantRoots(this);
340 }
341 {
342 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings());
343 Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots);
344 }
345 {
346 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings());
347 Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots);
348 }
349 {
350 // TODO: don't visit the transaction roots if it's not active.
351 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
352 Runtime::Current()->VisitNonThreadRoots(this);
353 }
354
355 // Immune spaces.
356 for (auto& space : heap_->GetContinuousSpaces()) {
357 if (immune_region_.ContainsSpace(space)) {
358 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
359 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
360 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this);
361 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
362 reinterpret_cast<uintptr_t>(space->Limit()),
363 visitor);
364 }
365 }
366
367 Thread* self = Thread::Current();
368 {
369 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings());
370 // Process the mark stack and issue an empty check point. If the
371 // mark stack is still empty after the check point, we're
372 // done. Otherwise, repeat.
373 ProcessMarkStack();
374 size_t count = 0;
375 while (!ProcessMarkStack()) {
376 ++count;
377 if (kVerboseMode) {
378 LOG(INFO) << "Issue an empty check point. " << count;
379 }
380 IssueEmptyCheckpoint();
381 }
382 // Need to ensure the mark stack is empty before reference
383 // processing to get rid of non-reference gray objects.
384 CheckEmptyMarkQueue();
385 // Enable the GetReference slow path and disallow access to the system weaks.
386 GetHeap()->GetReferenceProcessor()->EnableSlowPath();
387 Runtime::Current()->DisallowNewSystemWeaks();
388 QuasiAtomic::ThreadFenceForConstructor();
389 // Lock-unlock the system weak locks so that there's no thread in
390 // the middle of accessing system weaks.
391 Runtime::Current()->EnsureNewSystemWeaksDisallowed();
392 // Note: Do not issue a checkpoint from here to the
393 // SweepSystemWeaks call or else a deadlock due to
394 // WaitHoldingLocks() would occur.
395 if (kVerboseMode) {
396 LOG(INFO) << "Enabled the ref proc slow path & disabled access to system weaks.";
397 LOG(INFO) << "ProcessReferences";
398 }
399 ProcessReferences(self, true);
400 CheckEmptyMarkQueue();
401 if (kVerboseMode) {
402 LOG(INFO) << "SweepSystemWeaks";
403 }
404 SweepSystemWeaks(self);
405 if (kVerboseMode) {
406 LOG(INFO) << "SweepSystemWeaks done";
407 }
408 // Because hash_set::Erase() can call the hash function for
409 // arbitrary elements in the weak intern table in
410 // InternTable::Table::SweepWeaks(), the above SweepSystemWeaks()
411 // call may have marked some objects (strings) alive. So process
412 // the mark stack here once again.
413 ProcessMarkStack();
414 CheckEmptyMarkQueue();
415 // Disable marking.
416 if (kUseTableLookupReadBarrier) {
417 heap_->rb_table_->ClearAll();
418 DCHECK(heap_->rb_table_->IsAllCleared());
419 }
420 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(1);
421 is_marking_ = false;
422 if (kVerboseMode) {
423 LOG(INFO) << "AllowNewSystemWeaks";
424 }
425 Runtime::Current()->AllowNewSystemWeaks();
426 CheckEmptyMarkQueue();
427 }
428
429 if (kVerboseMode) {
430 LOG(INFO) << "GC end of MarkingPhase";
431 }
432 }
433
IssueEmptyCheckpoint()434 void ConcurrentCopying::IssueEmptyCheckpoint() {
435 Thread* self = Thread::Current();
436 EmptyCheckpoint check_point(this);
437 ThreadList* thread_list = Runtime::Current()->GetThreadList();
438 gc_barrier_->Init(self, 0);
439 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
440 // If there are no threads to wait which implys that all the checkpoint functions are finished,
441 // then no need to release the mutator lock.
442 if (barrier_count == 0) {
443 return;
444 }
445 // Release locks then wait for all mutator threads to pass the barrier.
446 Locks::mutator_lock_->SharedUnlock(self);
447 {
448 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
449 gc_barrier_->Increment(self, barrier_count);
450 }
451 Locks::mutator_lock_->SharedLock(self);
452 }
453
PopOffMarkStack()454 mirror::Object* ConcurrentCopying::PopOffMarkStack() {
455 return mark_queue_.Dequeue();
456 }
457
458 template<bool kThreadSafe>
PushOntoMarkStack(mirror::Object * to_ref)459 void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
460 CHECK_EQ(is_mark_queue_push_disallowed_.LoadRelaxed(), 0)
461 << " " << to_ref << " " << PrettyTypeOf(to_ref);
462 if (kThreadSafe) {
463 CHECK(mark_queue_.Enqueue(to_ref)) << "Mark queue overflow";
464 } else {
465 CHECK(mark_queue_.EnqueueThreadUnsafe(to_ref)) << "Mark queue overflow";
466 }
467 }
468
GetAllocationStack()469 accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
470 return heap_->allocation_stack_.get();
471 }
472
GetLiveStack()473 accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
474 return heap_->live_stack_.get();
475 }
476
GetFwdPtr(mirror::Object * from_ref)477 inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
478 DCHECK(region_space_->IsInFromSpace(from_ref));
479 LockWord lw = from_ref->GetLockWord(false);
480 if (lw.GetState() == LockWord::kForwardingAddress) {
481 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
482 CHECK(fwd_ptr != nullptr);
483 return fwd_ptr;
484 } else {
485 return nullptr;
486 }
487 }
488
489 // The following visitors are that used to verify that there's no
490 // references to the from-space left after marking.
491 class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
492 public:
ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying * collector)493 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
494 : collector_(collector) {}
495
operator ()(mirror::Object * ref) const496 void operator()(mirror::Object* ref) const
497 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
498 if (ref == nullptr) {
499 // OK.
500 return;
501 }
502 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
503 if (kUseBakerReadBarrier) {
504 if (collector_->RegionSpace()->IsInToSpace(ref)) {
505 CHECK(ref->GetReadBarrierPointer() == nullptr)
506 << "To-space ref " << ref << " " << PrettyTypeOf(ref)
507 << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
508 } else {
509 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
510 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
511 collector_->IsOnAllocStack(ref)))
512 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
513 << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
514 << " but isn't on the alloc stack (and has white rb_ptr)."
515 << " Is it in the non-moving space="
516 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
517 }
518 }
519 }
520
VisitRoot(mirror::Object * root,const RootInfo & info ATTRIBUTE_UNUSED)521 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
522 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
523 DCHECK(root != nullptr);
524 operator()(root);
525 }
526
527 private:
528 ConcurrentCopying* const collector_;
529 };
530
531 class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
532 public:
ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying * collector)533 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
534 : collector_(collector) {}
535
operator ()(mirror::Object * obj,MemberOffset offset,bool) const536 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
537 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
538 mirror::Object* ref =
539 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
540 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
541 visitor(ref);
542 }
operator ()(mirror::Class * klass,mirror::Reference * ref) const543 void operator()(mirror::Class* klass, mirror::Reference* ref) const
544 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
545 CHECK(klass->IsTypeOfReferenceClass());
546 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
547 }
548
549 private:
550 ConcurrentCopying* collector_;
551 };
552
553 class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
554 public:
ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying * collector)555 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
556 : collector_(collector) {}
operator ()(mirror::Object * obj) const557 void operator()(mirror::Object* obj) const
558 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
559 ObjectCallback(obj, collector_);
560 }
ObjectCallback(mirror::Object * obj,void * arg)561 static void ObjectCallback(mirror::Object* obj, void *arg)
562 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
563 CHECK(obj != nullptr);
564 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
565 space::RegionSpace* region_space = collector->RegionSpace();
566 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
567 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
568 obj->VisitReferences<true>(visitor, visitor);
569 if (kUseBakerReadBarrier) {
570 if (collector->RegionSpace()->IsInToSpace(obj)) {
571 CHECK(obj->GetReadBarrierPointer() == nullptr)
572 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
573 } else {
574 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
575 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
576 collector->IsOnAllocStack(obj)))
577 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
578 << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
579 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
580 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
581 }
582 }
583 }
584
585 private:
586 ConcurrentCopying* const collector_;
587 };
588
589 // Verify there's no from-space references left after the marking phase.
VerifyNoFromSpaceReferences()590 void ConcurrentCopying::VerifyNoFromSpaceReferences() {
591 Thread* self = Thread::Current();
592 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
593 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
594 // Roots.
595 {
596 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
597 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
598 Runtime::Current()->VisitRoots(&ref_visitor);
599 }
600 // The to-space.
601 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
602 this);
603 // Non-moving spaces.
604 {
605 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
606 heap_->GetMarkBitmap()->Visit(visitor);
607 }
608 // The alloc stack.
609 {
610 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
611 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
612 it < end; ++it) {
613 mirror::Object* const obj = it->AsMirrorPtr();
614 if (obj != nullptr && obj->GetClass() != nullptr) {
615 // TODO: need to call this only if obj is alive?
616 ref_visitor(obj);
617 visitor(obj);
618 }
619 }
620 }
621 // TODO: LOS. But only refs in LOS are classes.
622 }
623
624 // The following visitors are used to assert the to-space invariant.
625 class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
626 public:
ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying * collector)627 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
628 : collector_(collector) {}
629
operator ()(mirror::Object * ref) const630 void operator()(mirror::Object* ref) const
631 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
632 if (ref == nullptr) {
633 // OK.
634 return;
635 }
636 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
637 }
RootCallback(mirror::Object ** root,void * arg,const RootInfo &)638 static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/)
639 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
640 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
641 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector);
642 DCHECK(root != nullptr);
643 visitor(*root);
644 }
645
646 private:
647 ConcurrentCopying* collector_;
648 };
649
650 class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
651 public:
ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying * collector)652 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
653 : collector_(collector) {}
654
operator ()(mirror::Object * obj,MemberOffset offset,bool) const655 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
656 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
657 mirror::Object* ref =
658 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
659 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
660 visitor(ref);
661 }
operator ()(mirror::Class * klass,mirror::Reference *) const662 void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const
663 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
664 CHECK(klass->IsTypeOfReferenceClass());
665 }
666
667 private:
668 ConcurrentCopying* collector_;
669 };
670
671 class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
672 public:
ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying * collector)673 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
674 : collector_(collector) {}
operator ()(mirror::Object * obj) const675 void operator()(mirror::Object* obj) const
676 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
677 ObjectCallback(obj, collector_);
678 }
ObjectCallback(mirror::Object * obj,void * arg)679 static void ObjectCallback(mirror::Object* obj, void *arg)
680 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
681 CHECK(obj != nullptr);
682 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
683 space::RegionSpace* region_space = collector->RegionSpace();
684 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
685 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
686 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector);
687 obj->VisitReferences<true>(visitor, visitor);
688 }
689
690 private:
691 ConcurrentCopying* collector_;
692 };
693
ProcessMarkStack()694 bool ConcurrentCopying::ProcessMarkStack() {
695 if (kVerboseMode) {
696 LOG(INFO) << "ProcessMarkStack. ";
697 }
698 size_t count = 0;
699 mirror::Object* to_ref;
700 while ((to_ref = PopOffMarkStack()) != nullptr) {
701 ++count;
702 DCHECK(!region_space_->IsInFromSpace(to_ref));
703 if (kUseBakerReadBarrier) {
704 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
705 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
706 << " is_marked=" << IsMarked(to_ref);
707 }
708 // Scan ref fields.
709 Scan(to_ref);
710 // Mark the gray ref as white or black.
711 if (kUseBakerReadBarrier) {
712 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
713 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
714 << " is_marked=" << IsMarked(to_ref);
715 }
716 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
717 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
718 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) {
719 // Leave References gray so that GetReferent() will trigger RB.
720 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
721 } else {
722 #ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
723 if (kUseBakerReadBarrier) {
724 if (region_space_->IsInToSpace(to_ref)) {
725 // If to-space, change from gray to white.
726 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
727 ReadBarrier::WhitePtr());
728 CHECK(success) << "Must succeed as we won the race.";
729 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
730 } else {
731 // If non-moving space/unevac from space, change from gray
732 // to black. We can't change gray to white because it's not
733 // safe to use CAS if two threads change values in opposite
734 // directions (A->B and B->A). So, we change it to black to
735 // indicate non-moving objects that have been marked
736 // through. Note we'd need to change from black to white
737 // later (concurrently).
738 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
739 ReadBarrier::BlackPtr());
740 CHECK(success) << "Must succeed as we won the race.";
741 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
742 }
743 }
744 #else
745 DCHECK(!kUseBakerReadBarrier);
746 #endif
747 }
748 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
749 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
750 visitor(to_ref);
751 }
752 }
753 // Return true if the stack was empty.
754 return count == 0;
755 }
756
CheckEmptyMarkQueue()757 void ConcurrentCopying::CheckEmptyMarkQueue() {
758 if (!mark_queue_.IsEmpty()) {
759 while (!mark_queue_.IsEmpty()) {
760 mirror::Object* obj = mark_queue_.Dequeue();
761 if (kUseBakerReadBarrier) {
762 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
763 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
764 << " is_marked=" << IsMarked(obj);
765 } else {
766 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
767 << " is_marked=" << IsMarked(obj);
768 }
769 }
770 LOG(FATAL) << "mark queue is not empty";
771 }
772 }
773
SweepSystemWeaks(Thread * self)774 void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
775 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
776 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
777 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
778 }
779
Sweep(bool swap_bitmaps)780 void ConcurrentCopying::Sweep(bool swap_bitmaps) {
781 {
782 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
783 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
784 if (kEnableFromSpaceAccountingCheck) {
785 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
786 }
787 heap_->MarkAllocStackAsLive(live_stack);
788 live_stack->Reset();
789 }
790 CHECK(mark_queue_.IsEmpty());
791 TimingLogger::ScopedTiming split("Sweep", GetTimings());
792 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
793 if (space->IsContinuousMemMapAllocSpace()) {
794 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
795 if (space == region_space_ || immune_region_.ContainsSpace(space)) {
796 continue;
797 }
798 TimingLogger::ScopedTiming split2(
799 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
800 RecordFree(alloc_space->Sweep(swap_bitmaps));
801 }
802 }
803 SweepLargeObjects(swap_bitmaps);
804 }
805
SweepLargeObjects(bool swap_bitmaps)806 void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
807 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
808 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
809 }
810
811 class ConcurrentCopyingClearBlackPtrsVisitor {
812 public:
ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying * cc)813 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
814 : collector_(cc) {}
815 #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
816 NO_RETURN
817 #endif
operator ()(mirror::Object * obj) const818 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
819 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
820 DCHECK(obj != nullptr);
821 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
822 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
823 obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
824 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
825 }
826
827 private:
828 ConcurrentCopying* const collector_;
829 };
830
831 // Clear the black ptrs in non-moving objects back to white.
ClearBlackPtrs()832 void ConcurrentCopying::ClearBlackPtrs() {
833 CHECK(kUseBakerReadBarrier);
834 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
835 ConcurrentCopyingClearBlackPtrsVisitor visitor(this);
836 for (auto& space : heap_->GetContinuousSpaces()) {
837 if (space == region_space_) {
838 continue;
839 }
840 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
841 if (kVerboseMode) {
842 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
843 }
844 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
845 reinterpret_cast<uintptr_t>(space->Limit()),
846 visitor);
847 }
848 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
849 large_object_space->GetMarkBitmap()->VisitMarkedRange(
850 reinterpret_cast<uintptr_t>(large_object_space->Begin()),
851 reinterpret_cast<uintptr_t>(large_object_space->End()),
852 visitor);
853 // Objects on the allocation stack?
854 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
855 size_t count = GetAllocationStack()->Size();
856 auto* it = GetAllocationStack()->Begin();
857 auto* end = GetAllocationStack()->End();
858 for (size_t i = 0; i < count; ++i, ++it) {
859 CHECK_LT(it, end);
860 mirror::Object* obj = it->AsMirrorPtr();
861 if (obj != nullptr) {
862 // Must have been cleared above.
863 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
864 }
865 }
866 }
867 }
868
ReclaimPhase()869 void ConcurrentCopying::ReclaimPhase() {
870 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
871 if (kVerboseMode) {
872 LOG(INFO) << "GC ReclaimPhase";
873 }
874 Thread* self = Thread::Current();
875
876 {
877 // Double-check that the mark stack is empty.
878 // Note: need to set this after VerifyNoFromSpaceRef().
879 is_asserting_to_space_invariant_ = false;
880 QuasiAtomic::ThreadFenceForConstructor();
881 if (kVerboseMode) {
882 LOG(INFO) << "Issue an empty check point. ";
883 }
884 IssueEmptyCheckpoint();
885 // Disable the check.
886 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(0);
887 CheckEmptyMarkQueue();
888 }
889
890 {
891 // Record freed objects.
892 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
893 // Don't include thread-locals that are in the to-space.
894 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
895 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
896 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
897 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
898 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
899 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
900 if (kEnableFromSpaceAccountingCheck) {
901 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
902 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
903 }
904 CHECK_LE(to_objects, from_objects);
905 CHECK_LE(to_bytes, from_bytes);
906 int64_t freed_bytes = from_bytes - to_bytes;
907 int64_t freed_objects = from_objects - to_objects;
908 if (kVerboseMode) {
909 LOG(INFO) << "RecordFree:"
910 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
911 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
912 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
913 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
914 << " from_space size=" << region_space_->FromSpaceSize()
915 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
916 << " to_space size=" << region_space_->ToSpaceSize();
917 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
918 }
919 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
920 if (kVerboseMode) {
921 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
922 }
923 }
924
925 {
926 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
927 ComputeUnevacFromSpaceLiveRatio();
928 }
929
930 {
931 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
932 region_space_->ClearFromSpace();
933 }
934
935 {
936 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
937 if (kUseBakerReadBarrier) {
938 ClearBlackPtrs();
939 }
940 Sweep(false);
941 SwapBitmaps();
942 heap_->UnBindBitmaps();
943
944 // Remove bitmaps for the immune spaces.
945 while (!cc_bitmaps_.empty()) {
946 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
947 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
948 delete cc_bitmap;
949 cc_bitmaps_.pop_back();
950 }
951 region_space_bitmap_ = nullptr;
952 }
953
954 if (kVerboseMode) {
955 LOG(INFO) << "GC end of ReclaimPhase";
956 }
957 }
958
959 class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
960 public:
ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying * cc)961 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
962 : collector_(cc) {}
operator ()(mirror::Object * ref) const963 void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
964 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
965 DCHECK(ref != nullptr);
966 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
967 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
968 if (kUseBakerReadBarrier) {
969 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
970 // Clear the black ptr.
971 ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
972 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref;
973 }
974 size_t obj_size = ref->SizeOf();
975 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
976 collector_->region_space_->AddLiveBytes(ref, alloc_size);
977 }
978
979 private:
980 ConcurrentCopying* collector_;
981 };
982
983 // Compute how much live objects are left in regions.
ComputeUnevacFromSpaceLiveRatio()984 void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
985 region_space_->AssertAllRegionLiveBytesZeroOrCleared();
986 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
987 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
988 reinterpret_cast<uintptr_t>(region_space_->Limit()),
989 visitor);
990 }
991
992 // Assert the to-space invariant.
AssertToSpaceInvariant(mirror::Object * obj,MemberOffset offset,mirror::Object * ref)993 void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
994 mirror::Object* ref) {
995 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
996 if (is_asserting_to_space_invariant_) {
997 if (region_space_->IsInToSpace(ref)) {
998 // OK.
999 return;
1000 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1001 CHECK(region_space_bitmap_->Test(ref)) << ref;
1002 } else if (region_space_->IsInFromSpace(ref)) {
1003 // Not OK. Do extra logging.
1004 if (obj != nullptr) {
1005 if (kUseBakerReadBarrier) {
1006 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1007 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1008 } else {
1009 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1010 }
1011 if (region_space_->IsInFromSpace(obj)) {
1012 LOG(INFO) << "holder is in the from-space.";
1013 } else if (region_space_->IsInToSpace(obj)) {
1014 LOG(INFO) << "holder is in the to-space.";
1015 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1016 LOG(INFO) << "holder is in the unevac from-space.";
1017 if (region_space_bitmap_->Test(obj)) {
1018 LOG(INFO) << "holder is marked in the region space bitmap.";
1019 } else {
1020 LOG(INFO) << "holder is not marked in the region space bitmap.";
1021 }
1022 } else {
1023 // In a non-moving space.
1024 if (immune_region_.ContainsObject(obj)) {
1025 LOG(INFO) << "holder is in the image or the zygote space.";
1026 accounting::ContinuousSpaceBitmap* cc_bitmap =
1027 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
1028 CHECK(cc_bitmap != nullptr)
1029 << "An immune space object must have a bitmap.";
1030 if (cc_bitmap->Test(obj)) {
1031 LOG(INFO) << "holder is marked in the bit map.";
1032 } else {
1033 LOG(INFO) << "holder is NOT marked in the bit map.";
1034 }
1035 } else {
1036 LOG(INFO) << "holder is in a non-moving (or main) space.";
1037 accounting::ContinuousSpaceBitmap* mark_bitmap =
1038 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1039 accounting::LargeObjectBitmap* los_bitmap =
1040 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1041 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1042 bool is_los = mark_bitmap == nullptr;
1043 if (!is_los && mark_bitmap->Test(obj)) {
1044 LOG(INFO) << "holder is marked in the mark bit map.";
1045 } else if (is_los && los_bitmap->Test(obj)) {
1046 LOG(INFO) << "holder is marked in the los bit map.";
1047 } else {
1048 // If ref is on the allocation stack, then it is considered
1049 // mark/alive (but not necessarily on the live stack.)
1050 if (IsOnAllocStack(obj)) {
1051 LOG(INFO) << "holder is on the alloc stack.";
1052 } else {
1053 LOG(INFO) << "holder is not marked or on the alloc stack.";
1054 }
1055 }
1056 }
1057 }
1058 LOG(INFO) << "offset=" << offset.SizeValue();
1059 }
1060 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1061 } else {
1062 // In a non-moving spaces. Check that the ref is marked.
1063 if (immune_region_.ContainsObject(ref)) {
1064 accounting::ContinuousSpaceBitmap* cc_bitmap =
1065 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1066 CHECK(cc_bitmap != nullptr)
1067 << "An immune space ref must have a bitmap. " << ref;
1068 if (kUseBakerReadBarrier) {
1069 CHECK(cc_bitmap->Test(ref))
1070 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
1071 << obj->GetReadBarrierPointer() << " ref=" << ref;
1072 } else {
1073 CHECK(cc_bitmap->Test(ref))
1074 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
1075 }
1076 } else {
1077 accounting::ContinuousSpaceBitmap* mark_bitmap =
1078 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1079 accounting::LargeObjectBitmap* los_bitmap =
1080 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1081 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1082 bool is_los = mark_bitmap == nullptr;
1083 if ((!is_los && mark_bitmap->Test(ref)) ||
1084 (is_los && los_bitmap->Test(ref))) {
1085 // OK.
1086 } else {
1087 // If ref is on the allocation stack, then it may not be
1088 // marked live, but considered marked/alive (but not
1089 // necessarily on the live stack).
1090 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1091 << "obj=" << obj << " ref=" << ref;
1092 }
1093 }
1094 }
1095 }
1096 }
1097
1098 // Used to scan ref fields of an object.
1099 class ConcurrentCopyingRefFieldsVisitor {
1100 public:
ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying * collector)1101 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector)
1102 : collector_(collector) {}
1103
operator ()(mirror::Object * obj,MemberOffset offset,bool) const1104 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
1105 const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1106 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1107 collector_->Process(obj, offset);
1108 }
1109
operator ()(mirror::Class * klass,mirror::Reference * ref) const1110 void operator()(mirror::Class* klass, mirror::Reference* ref) const
1111 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
1112 CHECK(klass->IsTypeOfReferenceClass());
1113 collector_->DelayReferenceReferent(klass, ref);
1114 }
1115
1116 private:
1117 ConcurrentCopying* const collector_;
1118 };
1119
1120 // Scan ref fields of an object.
Scan(mirror::Object * to_ref)1121 void ConcurrentCopying::Scan(mirror::Object* to_ref) {
1122 DCHECK(!region_space_->IsInFromSpace(to_ref));
1123 ConcurrentCopyingRefFieldsVisitor visitor(this);
1124 to_ref->VisitReferences<true>(visitor, visitor);
1125 }
1126
1127 // Process a field.
Process(mirror::Object * obj,MemberOffset offset)1128 inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
1129 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
1130 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1131 return;
1132 }
1133 mirror::Object* to_ref = Mark(ref);
1134 if (to_ref == ref) {
1135 return;
1136 }
1137 // This may fail if the mutator writes to the field at the same time. But it's ok.
1138 mirror::Object* expected_ref = ref;
1139 mirror::Object* new_ref = to_ref;
1140 do {
1141 if (expected_ref !=
1142 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1143 // It was updated by the mutator.
1144 break;
1145 }
1146 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>(
1147 offset, expected_ref, new_ref));
1148 }
1149
1150 // Process some roots.
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)1151 void ConcurrentCopying::VisitRoots(
1152 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1153 for (size_t i = 0; i < count; ++i) {
1154 mirror::Object** root = roots[i];
1155 mirror::Object* ref = *root;
1156 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1157 continue;
1158 }
1159 mirror::Object* to_ref = Mark(ref);
1160 if (to_ref == ref) {
1161 continue;
1162 }
1163 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1164 mirror::Object* expected_ref = ref;
1165 mirror::Object* new_ref = to_ref;
1166 do {
1167 if (expected_ref != addr->LoadRelaxed()) {
1168 // It was updated by the mutator.
1169 break;
1170 }
1171 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1172 }
1173 }
1174
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)1175 void ConcurrentCopying::VisitRoots(
1176 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1177 const RootInfo& info ATTRIBUTE_UNUSED) {
1178 for (size_t i = 0; i < count; ++i) {
1179 mirror::CompressedReference<mirror::Object>* root = roots[i];
1180 mirror::Object* ref = root->AsMirrorPtr();
1181 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1182 continue;
1183 }
1184 mirror::Object* to_ref = Mark(ref);
1185 if (to_ref == ref) {
1186 continue;
1187 }
1188 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1189 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1190 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
1191 do {
1192 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1193 // It was updated by the mutator.
1194 break;
1195 }
1196 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1197 }
1198 }
1199
1200 // Fill the given memory block with a dummy object. Used to fill in a
1201 // copy of objects that was lost in race.
FillWithDummyObject(mirror::Object * dummy_obj,size_t byte_size)1202 void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
1203 CHECK(IsAligned<kObjectAlignment>(byte_size));
1204 memset(dummy_obj, 0, byte_size);
1205 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1206 CHECK(int_array_class != nullptr);
1207 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
1208 size_t component_size = int_array_class->GetComponentSize();
1209 CHECK_EQ(component_size, sizeof(int32_t));
1210 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1211 if (data_offset > byte_size) {
1212 // An int array is too big. Use java.lang.Object.
1213 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1214 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1215 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1216 dummy_obj->SetClass(java_lang_Object);
1217 CHECK_EQ(byte_size, dummy_obj->SizeOf());
1218 } else {
1219 // Use an int array.
1220 dummy_obj->SetClass(int_array_class);
1221 CHECK(dummy_obj->IsArrayInstance());
1222 int32_t length = (byte_size - data_offset) / component_size;
1223 dummy_obj->AsArray()->SetLength(length);
1224 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
1225 << "byte_size=" << byte_size << " length=" << length
1226 << " component_size=" << component_size << " data_offset=" << data_offset;
1227 CHECK_EQ(byte_size, dummy_obj->SizeOf())
1228 << "byte_size=" << byte_size << " length=" << length
1229 << " component_size=" << component_size << " data_offset=" << data_offset;
1230 }
1231 }
1232
1233 // Reuse the memory blocks that were copy of objects that were lost in race.
AllocateInSkippedBlock(size_t alloc_size)1234 mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1235 // Try to reuse the blocks that were unused due to CAS failures.
1236 CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size));
1237 Thread* self = Thread::Current();
1238 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1239 MutexLock mu(self, skipped_blocks_lock_);
1240 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1241 if (it == skipped_blocks_map_.end()) {
1242 // Not found.
1243 return nullptr;
1244 }
1245 {
1246 size_t byte_size = it->first;
1247 CHECK_GE(byte_size, alloc_size);
1248 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1249 // If remainder would be too small for a dummy object, retry with a larger request size.
1250 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1251 if (it == skipped_blocks_map_.end()) {
1252 // Not found.
1253 return nullptr;
1254 }
1255 CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size));
1256 CHECK_GE(it->first - alloc_size, min_object_size)
1257 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1258 }
1259 }
1260 // Found a block.
1261 CHECK(it != skipped_blocks_map_.end());
1262 size_t byte_size = it->first;
1263 uint8_t* addr = it->second;
1264 CHECK_GE(byte_size, alloc_size);
1265 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
1266 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size));
1267 if (kVerboseMode) {
1268 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1269 }
1270 skipped_blocks_map_.erase(it);
1271 memset(addr, 0, byte_size);
1272 if (byte_size > alloc_size) {
1273 // Return the remainder to the map.
1274 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size));
1275 CHECK_GE(byte_size - alloc_size, min_object_size);
1276 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1277 byte_size - alloc_size);
1278 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1279 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1280 }
1281 return reinterpret_cast<mirror::Object*>(addr);
1282 }
1283
Copy(mirror::Object * from_ref)1284 mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1285 DCHECK(region_space_->IsInFromSpace(from_ref));
1286 // No read barrier to avoid nested RB that might violate the to-space
1287 // invariant. Note that from_ref is a from space ref so the SizeOf()
1288 // call will access the from-space meta objects, but it's ok and necessary.
1289 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1290 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1291 size_t region_space_bytes_allocated = 0U;
1292 size_t non_moving_space_bytes_allocated = 0U;
1293 size_t bytes_allocated = 0U;
1294 size_t dummy;
1295 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
1296 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy);
1297 bytes_allocated = region_space_bytes_allocated;
1298 if (to_ref != nullptr) {
1299 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1300 }
1301 bool fall_back_to_non_moving = false;
1302 if (UNLIKELY(to_ref == nullptr)) {
1303 // Failed to allocate in the region space. Try the skipped blocks.
1304 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1305 if (to_ref != nullptr) {
1306 // Succeeded to allocate in a skipped block.
1307 if (heap_->use_tlab_) {
1308 // This is necessary for the tlab case as it's not accounted in the space.
1309 region_space_->RecordAlloc(to_ref);
1310 }
1311 bytes_allocated = region_space_alloc_size;
1312 } else {
1313 // Fall back to the non-moving space.
1314 fall_back_to_non_moving = true;
1315 if (kVerboseMode) {
1316 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1317 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1318 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1319 }
1320 fall_back_to_non_moving = true;
1321 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
1322 &non_moving_space_bytes_allocated, nullptr, &dummy);
1323 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1324 bytes_allocated = non_moving_space_bytes_allocated;
1325 // Mark it in the mark bitmap.
1326 accounting::ContinuousSpaceBitmap* mark_bitmap =
1327 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1328 CHECK(mark_bitmap != nullptr);
1329 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1330 }
1331 }
1332 DCHECK(to_ref != nullptr);
1333
1334 // Attempt to install the forward pointer. This is in a loop as the
1335 // lock word atomic write can fail.
1336 while (true) {
1337 // Copy the object. TODO: copy only the lockword in the second iteration and on?
1338 memcpy(to_ref, from_ref, obj_size);
1339
1340 LockWord old_lock_word = to_ref->GetLockWord(false);
1341
1342 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1343 // Lost the race. Another thread (either GC or mutator) stored
1344 // the forwarding pointer first. Make the lost copy (to_ref)
1345 // look like a valid but dead (dummy) object and keep it for
1346 // future reuse.
1347 FillWithDummyObject(to_ref, bytes_allocated);
1348 if (!fall_back_to_non_moving) {
1349 DCHECK(region_space_->IsInToSpace(to_ref));
1350 if (bytes_allocated > space::RegionSpace::kRegionSize) {
1351 // Free the large alloc.
1352 region_space_->FreeLarge(to_ref, bytes_allocated);
1353 } else {
1354 // Record the lost copy for later reuse.
1355 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1356 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1357 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
1358 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1359 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
1360 reinterpret_cast<uint8_t*>(to_ref)));
1361 }
1362 } else {
1363 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1364 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1365 // Free the non-moving-space chunk.
1366 accounting::ContinuousSpaceBitmap* mark_bitmap =
1367 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1368 CHECK(mark_bitmap != nullptr);
1369 CHECK(mark_bitmap->Clear(to_ref));
1370 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
1371 }
1372
1373 // Get the winner's forward ptr.
1374 mirror::Object* lost_fwd_ptr = to_ref;
1375 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
1376 CHECK(to_ref != nullptr);
1377 CHECK_NE(to_ref, lost_fwd_ptr);
1378 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
1379 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1380 return to_ref;
1381 }
1382
1383 // Set the gray ptr.
1384 if (kUseBakerReadBarrier) {
1385 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
1386 }
1387
1388 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
1389
1390 // Try to atomically write the fwd ptr.
1391 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
1392 if (LIKELY(success)) {
1393 // The CAS succeeded.
1394 objects_moved_.FetchAndAddSequentiallyConsistent(1);
1395 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
1396 if (LIKELY(!fall_back_to_non_moving)) {
1397 DCHECK(region_space_->IsInToSpace(to_ref));
1398 } else {
1399 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1400 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1401 }
1402 if (kUseBakerReadBarrier) {
1403 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1404 }
1405 DCHECK(GetFwdPtr(from_ref) == to_ref);
1406 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1407 PushOntoMarkStack<true>(to_ref);
1408 return to_ref;
1409 } else {
1410 // The CAS failed. It may have lost the race or may have failed
1411 // due to monitor/hashcode ops. Either way, retry.
1412 }
1413 }
1414 }
1415
IsMarked(mirror::Object * from_ref)1416 mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
1417 DCHECK(from_ref != nullptr);
1418 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1419 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
1420 // It's already marked.
1421 return from_ref;
1422 }
1423 mirror::Object* to_ref;
1424 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
1425 to_ref = GetFwdPtr(from_ref);
1426 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
1427 heap_->non_moving_space_->HasAddress(to_ref))
1428 << "from_ref=" << from_ref << " to_ref=" << to_ref;
1429 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
1430 if (region_space_bitmap_->Test(from_ref)) {
1431 to_ref = from_ref;
1432 } else {
1433 to_ref = nullptr;
1434 }
1435 } else {
1436 // from_ref is in a non-moving space.
1437 if (immune_region_.ContainsObject(from_ref)) {
1438 accounting::ContinuousSpaceBitmap* cc_bitmap =
1439 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1440 DCHECK(cc_bitmap != nullptr)
1441 << "An immune space object must have a bitmap";
1442 if (kIsDebugBuild) {
1443 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1444 << "Immune space object must be already marked";
1445 }
1446 if (cc_bitmap->Test(from_ref)) {
1447 // Already marked.
1448 to_ref = from_ref;
1449 } else {
1450 // Newly marked.
1451 to_ref = nullptr;
1452 }
1453 } else {
1454 // Non-immune non-moving space. Use the mark bitmap.
1455 accounting::ContinuousSpaceBitmap* mark_bitmap =
1456 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1457 accounting::LargeObjectBitmap* los_bitmap =
1458 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1459 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1460 bool is_los = mark_bitmap == nullptr;
1461 if (!is_los && mark_bitmap->Test(from_ref)) {
1462 // Already marked.
1463 to_ref = from_ref;
1464 } else if (is_los && los_bitmap->Test(from_ref)) {
1465 // Already marked in LOS.
1466 to_ref = from_ref;
1467 } else {
1468 // Not marked.
1469 if (IsOnAllocStack(from_ref)) {
1470 // If on the allocation stack, it's considered marked.
1471 to_ref = from_ref;
1472 } else {
1473 // Not marked.
1474 to_ref = nullptr;
1475 }
1476 }
1477 }
1478 }
1479 return to_ref;
1480 }
1481
IsOnAllocStack(mirror::Object * ref)1482 bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
1483 QuasiAtomic::ThreadFenceAcquire();
1484 accounting::ObjectStack* alloc_stack = GetAllocationStack();
1485 return alloc_stack->Contains(ref);
1486 }
1487
Mark(mirror::Object * from_ref)1488 mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
1489 if (from_ref == nullptr) {
1490 return nullptr;
1491 }
1492 DCHECK(from_ref != nullptr);
1493 DCHECK(heap_->collector_type_ == kCollectorTypeCC);
1494 if (kUseBakerReadBarrier && !is_active_) {
1495 // In the lock word forward address state, the read barrier bits
1496 // in the lock word are part of the stored forwarding address and
1497 // invalid. This is usually OK as the from-space copy of objects
1498 // aren't accessed by mutators due to the to-space
1499 // invariant. However, during the dex2oat image writing relocation
1500 // and the zygote compaction, objects can be in the forward
1501 // address state (to store the forward/relocation addresses) and
1502 // they can still be accessed and the invalid read barrier bits
1503 // are consulted. If they look like gray but aren't really, the
1504 // read barriers slow path can trigger when it shouldn't. To guard
1505 // against this, return here if the CC collector isn't running.
1506 return from_ref;
1507 }
1508 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
1509 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1510 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
1511 // It's already marked.
1512 return from_ref;
1513 }
1514 mirror::Object* to_ref;
1515 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
1516 to_ref = GetFwdPtr(from_ref);
1517 if (kUseBakerReadBarrier) {
1518 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref;
1519 }
1520 if (to_ref == nullptr) {
1521 // It isn't marked yet. Mark it by copying it to the to-space.
1522 to_ref = Copy(from_ref);
1523 }
1524 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
1525 << "from_ref=" << from_ref << " to_ref=" << to_ref;
1526 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
1527 // This may or may not succeed, which is ok.
1528 if (kUseBakerReadBarrier) {
1529 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1530 }
1531 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
1532 // Already marked.
1533 to_ref = from_ref;
1534 } else {
1535 // Newly marked.
1536 to_ref = from_ref;
1537 if (kUseBakerReadBarrier) {
1538 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1539 }
1540 PushOntoMarkStack<true>(to_ref);
1541 }
1542 } else {
1543 // from_ref is in a non-moving space.
1544 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
1545 if (immune_region_.ContainsObject(from_ref)) {
1546 accounting::ContinuousSpaceBitmap* cc_bitmap =
1547 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1548 DCHECK(cc_bitmap != nullptr)
1549 << "An immune space object must have a bitmap";
1550 if (kIsDebugBuild) {
1551 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1552 << "Immune space object must be already marked";
1553 }
1554 // This may or may not succeed, which is ok.
1555 if (kUseBakerReadBarrier) {
1556 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1557 }
1558 if (cc_bitmap->AtomicTestAndSet(from_ref)) {
1559 // Already marked.
1560 to_ref = from_ref;
1561 } else {
1562 // Newly marked.
1563 to_ref = from_ref;
1564 if (kUseBakerReadBarrier) {
1565 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1566 }
1567 PushOntoMarkStack<true>(to_ref);
1568 }
1569 } else {
1570 // Use the mark bitmap.
1571 accounting::ContinuousSpaceBitmap* mark_bitmap =
1572 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1573 accounting::LargeObjectBitmap* los_bitmap =
1574 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1575 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1576 bool is_los = mark_bitmap == nullptr;
1577 if (!is_los && mark_bitmap->Test(from_ref)) {
1578 // Already marked.
1579 to_ref = from_ref;
1580 if (kUseBakerReadBarrier) {
1581 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1582 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1583 }
1584 } else if (is_los && los_bitmap->Test(from_ref)) {
1585 // Already marked in LOS.
1586 to_ref = from_ref;
1587 if (kUseBakerReadBarrier) {
1588 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1589 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1590 }
1591 } else {
1592 // Not marked.
1593 if (IsOnAllocStack(from_ref)) {
1594 // If it's on the allocation stack, it's considered marked. Keep it white.
1595 to_ref = from_ref;
1596 // Objects on the allocation stack need not be marked.
1597 if (!is_los) {
1598 DCHECK(!mark_bitmap->Test(to_ref));
1599 } else {
1600 DCHECK(!los_bitmap->Test(to_ref));
1601 }
1602 if (kUseBakerReadBarrier) {
1603 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
1604 }
1605 } else {
1606 // Not marked or on the allocation stack. Try to mark it.
1607 // This may or may not succeed, which is ok.
1608 if (kUseBakerReadBarrier) {
1609 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1610 }
1611 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) {
1612 // Already marked.
1613 to_ref = from_ref;
1614 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) {
1615 // Already marked in LOS.
1616 to_ref = from_ref;
1617 } else {
1618 // Newly marked.
1619 to_ref = from_ref;
1620 if (kUseBakerReadBarrier) {
1621 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1622 }
1623 PushOntoMarkStack<true>(to_ref);
1624 }
1625 }
1626 }
1627 }
1628 }
1629 return to_ref;
1630 }
1631
FinishPhase()1632 void ConcurrentCopying::FinishPhase() {
1633 region_space_ = nullptr;
1634 CHECK(mark_queue_.IsEmpty());
1635 mark_queue_.Clear();
1636 {
1637 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1638 skipped_blocks_map_.clear();
1639 }
1640 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1641 heap_->ClearMarkedObjects();
1642 }
1643
IsMarkedCallback(mirror::Object * from_ref,void * arg)1644 mirror::Object* ConcurrentCopying::IsMarkedCallback(mirror::Object* from_ref, void* arg) {
1645 return reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref);
1646 }
1647
IsHeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object> * field,void * arg)1648 bool ConcurrentCopying::IsHeapReferenceMarkedCallback(
1649 mirror::HeapReference<mirror::Object>* field, void* arg) {
1650 mirror::Object* from_ref = field->AsMirrorPtr();
1651 mirror::Object* to_ref = reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref);
1652 if (to_ref == nullptr) {
1653 return false;
1654 }
1655 if (from_ref != to_ref) {
1656 QuasiAtomic::ThreadFenceRelease();
1657 field->Assign(to_ref);
1658 QuasiAtomic::ThreadFenceSequentiallyConsistent();
1659 }
1660 return true;
1661 }
1662
MarkCallback(mirror::Object * from_ref,void * arg)1663 mirror::Object* ConcurrentCopying::MarkCallback(mirror::Object* from_ref, void* arg) {
1664 return reinterpret_cast<ConcurrentCopying*>(arg)->Mark(from_ref);
1665 }
1666
ProcessMarkStackCallback(void * arg)1667 void ConcurrentCopying::ProcessMarkStackCallback(void* arg) {
1668 reinterpret_cast<ConcurrentCopying*>(arg)->ProcessMarkStack();
1669 }
1670
DelayReferenceReferent(mirror::Class * klass,mirror::Reference * reference)1671 void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
1672 heap_->GetReferenceProcessor()->DelayReferenceReferent(
1673 klass, reference, &IsHeapReferenceMarkedCallback, this);
1674 }
1675
ProcessReferences(Thread * self,bool concurrent)1676 void ConcurrentCopying::ProcessReferences(Thread* self, bool concurrent) {
1677 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
1678 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1679 GetHeap()->GetReferenceProcessor()->ProcessReferences(
1680 concurrent, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
1681 &IsHeapReferenceMarkedCallback, &MarkCallback, &ProcessMarkStackCallback, this);
1682 }
1683
RevokeAllThreadLocalBuffers()1684 void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
1685 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1686 region_space_->RevokeAllThreadLocalBuffers();
1687 }
1688
1689 } // namespace collector
1690 } // namespace gc
1691 } // namespace art
1692