1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "concurrent_copying.h"
18 
19 #include "art_field-inl.h"
20 #include "base/stl_util.h"
21 #include "debugger.h"
22 #include "gc/accounting/heap_bitmap-inl.h"
23 #include "gc/accounting/space_bitmap-inl.h"
24 #include "gc/reference_processor.h"
25 #include "gc/space/image_space.h"
26 #include "gc/space/space-inl.h"
27 #include "image-inl.h"
28 #include "intern_table.h"
29 #include "mirror/class-inl.h"
30 #include "mirror/object-inl.h"
31 #include "scoped_thread_state_change.h"
32 #include "thread-inl.h"
33 #include "thread_list.h"
34 #include "well_known_classes.h"
35 
36 namespace art {
37 namespace gc {
38 namespace collector {
39 
40 static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
41 
ConcurrentCopying(Heap * heap,const std::string & name_prefix)42 ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
43     : GarbageCollector(heap,
44                        name_prefix + (name_prefix.empty() ? "" : " ") +
45                        "concurrent copying + mark sweep"),
46       region_space_(nullptr), gc_barrier_(new Barrier(0)),
47       gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
48                                                      kDefaultGcMarkStackSize,
49                                                      kDefaultGcMarkStackSize)),
50       mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
51       thread_running_gc_(nullptr),
52       is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
53       heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
54       weak_ref_access_enabled_(true),
55       skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
56       rb_table_(heap_->GetReadBarrierTable()),
57       force_evacuate_all_(false) {
58   static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
59                 "The region space size and the read barrier table region size must match");
60   cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
61   Thread* self = Thread::Current();
62   {
63     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
64     // Cache this so that we won't have to lock heap_bitmap_lock_ in
65     // Mark() which could cause a nested lock on heap_bitmap_lock_
66     // when GC causes a RB while doing GC or a lock order violation
67     // (class_linker_lock_ and heap_bitmap_lock_).
68     heap_mark_bitmap_ = heap->GetMarkBitmap();
69   }
70   {
71     MutexLock mu(self, mark_stack_lock_);
72     for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
73       accounting::AtomicStack<mirror::Object>* mark_stack =
74           accounting::AtomicStack<mirror::Object>::Create(
75               "thread local mark stack", kMarkStackSize, kMarkStackSize);
76       pooled_mark_stacks_.push_back(mark_stack);
77     }
78   }
79 }
80 
MarkHeapReference(mirror::HeapReference<mirror::Object> * from_ref)81 void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
82   // Used for preserving soft references, should be OK to not have a CAS here since there should be
83   // no other threads which can trigger read barriers on the same referent during reference
84   // processing.
85   from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
86   DCHECK(!from_ref->IsNull());
87 }
88 
~ConcurrentCopying()89 ConcurrentCopying::~ConcurrentCopying() {
90   STLDeleteElements(&pooled_mark_stacks_);
91 }
92 
RunPhases()93 void ConcurrentCopying::RunPhases() {
94   CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
95   CHECK(!is_active_);
96   is_active_ = true;
97   Thread* self = Thread::Current();
98   thread_running_gc_ = self;
99   Locks::mutator_lock_->AssertNotHeld(self);
100   {
101     ReaderMutexLock mu(self, *Locks::mutator_lock_);
102     InitializePhase();
103   }
104   FlipThreadRoots();
105   {
106     ReaderMutexLock mu(self, *Locks::mutator_lock_);
107     MarkingPhase();
108   }
109   // Verify no from space refs. This causes a pause.
110   if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
111     TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
112     ScopedPause pause(this);
113     CheckEmptyMarkStack();
114     if (kVerboseMode) {
115       LOG(INFO) << "Verifying no from-space refs";
116     }
117     VerifyNoFromSpaceReferences();
118     if (kVerboseMode) {
119       LOG(INFO) << "Done verifying no from-space refs";
120     }
121     CheckEmptyMarkStack();
122   }
123   {
124     ReaderMutexLock mu(self, *Locks::mutator_lock_);
125     ReclaimPhase();
126   }
127   FinishPhase();
128   CHECK(is_active_);
129   is_active_ = false;
130   thread_running_gc_ = nullptr;
131 }
132 
BindBitmaps()133 void ConcurrentCopying::BindBitmaps() {
134   Thread* self = Thread::Current();
135   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
136   // Mark all of the spaces we never collect as immune.
137   for (const auto& space : heap_->GetContinuousSpaces()) {
138     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
139         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
140       CHECK(space->IsZygoteSpace() || space->IsImageSpace());
141       immune_spaces_.AddSpace(space);
142       const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
143           "cc zygote space bitmap";
144       // TODO: try avoiding using bitmaps for image/zygote to save space.
145       accounting::ContinuousSpaceBitmap* bitmap =
146           accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
147       cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
148       cc_bitmaps_.push_back(bitmap);
149     } else if (space == region_space_) {
150       accounting::ContinuousSpaceBitmap* bitmap =
151           accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
152                                                     space->Begin(), space->Capacity());
153       cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
154       cc_bitmaps_.push_back(bitmap);
155       region_space_bitmap_ = bitmap;
156     }
157   }
158 }
159 
InitializePhase()160 void ConcurrentCopying::InitializePhase() {
161   TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
162   if (kVerboseMode) {
163     LOG(INFO) << "GC InitializePhase";
164     LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
165               << reinterpret_cast<void*>(region_space_->Limit());
166   }
167   CheckEmptyMarkStack();
168   immune_spaces_.Reset();
169   bytes_moved_.StoreRelaxed(0);
170   objects_moved_.StoreRelaxed(0);
171   if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
172       GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
173       GetCurrentIteration()->GetClearSoftReferences()) {
174     force_evacuate_all_ = true;
175   } else {
176     force_evacuate_all_ = false;
177   }
178   BindBitmaps();
179   if (kVerboseMode) {
180     LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
181     LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
182               << "-" << immune_spaces_.GetLargestImmuneRegion().End();
183     for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
184       LOG(INFO) << "Immune space: " << *space;
185     }
186     LOG(INFO) << "GC end of InitializePhase";
187   }
188 }
189 
190 // Used to switch the thread roots of a thread from from-space refs to to-space refs.
191 class ThreadFlipVisitor : public Closure {
192  public:
ThreadFlipVisitor(ConcurrentCopying * concurrent_copying,bool use_tlab)193   ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
194       : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
195   }
196 
Run(Thread * thread)197   virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
198     // Note: self is not necessarily equal to thread since thread may be suspended.
199     Thread* self = Thread::Current();
200     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
201         << thread->GetState() << " thread " << thread << " self " << self;
202     thread->SetIsGcMarking(true);
203     if (use_tlab_ && thread->HasTlab()) {
204       if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
205         // This must come before the revoke.
206         size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
207         concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
208         reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
209             FetchAndAddSequentiallyConsistent(thread_local_objects);
210       } else {
211         concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
212       }
213     }
214     if (kUseThreadLocalAllocationStack) {
215       thread->RevokeThreadLocalAllocationStack();
216     }
217     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
218     thread->VisitRoots(concurrent_copying_);
219     concurrent_copying_->GetBarrier().Pass(self);
220   }
221 
222  private:
223   ConcurrentCopying* const concurrent_copying_;
224   const bool use_tlab_;
225 };
226 
227 // Called back from Runtime::FlipThreadRoots() during a pause.
228 class FlipCallback : public Closure {
229  public:
FlipCallback(ConcurrentCopying * concurrent_copying)230   explicit FlipCallback(ConcurrentCopying* concurrent_copying)
231       : concurrent_copying_(concurrent_copying) {
232   }
233 
Run(Thread * thread)234   virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
235     ConcurrentCopying* cc = concurrent_copying_;
236     TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
237     // Note: self is not necessarily equal to thread since thread may be suspended.
238     Thread* self = Thread::Current();
239     CHECK(thread == self);
240     Locks::mutator_lock_->AssertExclusiveHeld(self);
241     cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
242     cc->SwapStacks();
243     if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
244       cc->RecordLiveStackFreezeSize(self);
245       cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
246       cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
247     }
248     cc->is_marking_ = true;
249     cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
250     if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
251       CHECK(Runtime::Current()->IsAotCompiler());
252       TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
253       Runtime::Current()->VisitTransactionRoots(cc);
254     }
255   }
256 
257  private:
258   ConcurrentCopying* const concurrent_copying_;
259 };
260 
261 // Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
FlipThreadRoots()262 void ConcurrentCopying::FlipThreadRoots() {
263   TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
264   if (kVerboseMode) {
265     LOG(INFO) << "time=" << region_space_->Time();
266     region_space_->DumpNonFreeRegions(LOG(INFO));
267   }
268   Thread* self = Thread::Current();
269   Locks::mutator_lock_->AssertNotHeld(self);
270   gc_barrier_->Init(self, 0);
271   ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
272   FlipCallback flip_callback(this);
273   heap_->ThreadFlipBegin(self);  // Sync with JNI critical calls.
274   size_t barrier_count = Runtime::Current()->FlipThreadRoots(
275       &thread_flip_visitor, &flip_callback, this);
276   heap_->ThreadFlipEnd(self);
277   {
278     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
279     gc_barrier_->Increment(self, barrier_count);
280   }
281   is_asserting_to_space_invariant_ = true;
282   QuasiAtomic::ThreadFenceForConstructor();
283   if (kVerboseMode) {
284     LOG(INFO) << "time=" << region_space_->Time();
285     region_space_->DumpNonFreeRegions(LOG(INFO));
286     LOG(INFO) << "GC end of FlipThreadRoots";
287   }
288 }
289 
SwapStacks()290 void ConcurrentCopying::SwapStacks() {
291   heap_->SwapStacks();
292 }
293 
RecordLiveStackFreezeSize(Thread * self)294 void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
295   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
296   live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
297 }
298 
299 // Used to visit objects in the immune spaces.
300 class ConcurrentCopyingImmuneSpaceObjVisitor {
301  public:
ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying * cc)302   explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
303       : collector_(cc) {}
304 
operator ()(mirror::Object * obj) const305   void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
306       SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
307     DCHECK(obj != nullptr);
308     DCHECK(collector_->immune_spaces_.ContainsObject(obj));
309     accounting::ContinuousSpaceBitmap* cc_bitmap =
310         collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
311     DCHECK(cc_bitmap != nullptr)
312         << "An immune space object must have a bitmap";
313     if (kIsDebugBuild) {
314       DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
315           << "Immune space object must be already marked";
316     }
317     // This may or may not succeed, which is ok.
318     if (kUseBakerReadBarrier) {
319       obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
320     }
321     if (cc_bitmap->AtomicTestAndSet(obj)) {
322       // Already marked. Do nothing.
323     } else {
324       // Newly marked. Set the gray bit and push it onto the mark stack.
325       CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
326       collector_->PushOntoMarkStack(obj);
327     }
328   }
329 
330  private:
331   ConcurrentCopying* const collector_;
332 };
333 
334 class EmptyCheckpoint : public Closure {
335  public:
EmptyCheckpoint(ConcurrentCopying * concurrent_copying)336   explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
337       : concurrent_copying_(concurrent_copying) {
338   }
339 
Run(Thread * thread)340   virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
341     // Note: self is not necessarily equal to thread since thread may be suspended.
342     Thread* self = Thread::Current();
343     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
344         << thread->GetState() << " thread " << thread << " self " << self;
345     // If thread is a running mutator, then act on behalf of the garbage collector.
346     // See the code in ThreadList::RunCheckpoint.
347     concurrent_copying_->GetBarrier().Pass(self);
348   }
349 
350  private:
351   ConcurrentCopying* const concurrent_copying_;
352 };
353 
354 // Concurrently mark roots that are guarded by read barriers and process the mark stack.
MarkingPhase()355 void ConcurrentCopying::MarkingPhase() {
356   TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
357   if (kVerboseMode) {
358     LOG(INFO) << "GC MarkingPhase";
359   }
360   CHECK(weak_ref_access_enabled_);
361   {
362     // Mark the image root. The WB-based collectors do not need to
363     // scan the image objects from roots by relying on the card table,
364     // but it's necessary for the RB to-space invariant to hold.
365     TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
366     for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
367       if (space->IsImageSpace()) {
368         gc::space::ImageSpace* image = space->AsImageSpace();
369         if (image != nullptr) {
370           mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
371           mirror::Object* marked_image_root = Mark(image_root);
372           CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
373           if (ReadBarrier::kEnableToSpaceInvariantChecks) {
374             AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
375           }
376         }
377       }
378     }
379   }
380   {
381     TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
382     Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
383   }
384   {
385     // TODO: don't visit the transaction roots if it's not active.
386     TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
387     Runtime::Current()->VisitNonThreadRoots(this);
388   }
389 
390   // Immune spaces.
391   for (auto& space : immune_spaces_.GetSpaces()) {
392     DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
393     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
394     ConcurrentCopyingImmuneSpaceObjVisitor visitor(this);
395     live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
396                                   reinterpret_cast<uintptr_t>(space->Limit()),
397                                   visitor);
398   }
399 
400   Thread* self = Thread::Current();
401   {
402     TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
403     // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
404     // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
405     // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
406     // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
407     // reach the point where we process weak references, we can avoid using a lock when accessing
408     // the GC mark stack, which makes mark stack processing more efficient.
409 
410     // Process the mark stack once in the thread local stack mode. This marks most of the live
411     // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
412     // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
413     // objects and push refs on the mark stack.
414     ProcessMarkStack();
415     // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
416     // for the last time before transitioning to the shared mark stack mode, which would process new
417     // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
418     // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
419     // important to do these together in a single checkpoint so that we can ensure that mutators
420     // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
421     // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
422     // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
423     // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
424     SwitchToSharedMarkStackMode();
425     CHECK(!self->GetWeakRefAccessEnabled());
426     // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
427     // (which may be non-empty if there were refs found on thread-local mark stacks during the above
428     // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
429     // (via read barriers) have no way to produce any more refs to process. Marking converges once
430     // before we process weak refs below.
431     ProcessMarkStack();
432     CheckEmptyMarkStack();
433     // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
434     // lock from this point on.
435     SwitchToGcExclusiveMarkStackMode();
436     CheckEmptyMarkStack();
437     if (kVerboseMode) {
438       LOG(INFO) << "ProcessReferences";
439     }
440     // Process weak references. This may produce new refs to process and have them processed via
441     // ProcessMarkStack (in the GC exclusive mark stack mode).
442     ProcessReferences(self);
443     CheckEmptyMarkStack();
444     if (kVerboseMode) {
445       LOG(INFO) << "SweepSystemWeaks";
446     }
447     SweepSystemWeaks(self);
448     if (kVerboseMode) {
449       LOG(INFO) << "SweepSystemWeaks done";
450     }
451     // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
452     // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
453     // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
454     ProcessMarkStack();
455     CheckEmptyMarkStack();
456     // Re-enable weak ref accesses.
457     ReenableWeakRefAccess(self);
458     // Free data for class loaders that we unloaded.
459     Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
460     // Marking is done. Disable marking.
461     DisableMarking();
462     CheckEmptyMarkStack();
463   }
464 
465   CHECK(weak_ref_access_enabled_);
466   if (kVerboseMode) {
467     LOG(INFO) << "GC end of MarkingPhase";
468   }
469 }
470 
ReenableWeakRefAccess(Thread * self)471 void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
472   if (kVerboseMode) {
473     LOG(INFO) << "ReenableWeakRefAccess";
474   }
475   weak_ref_access_enabled_.StoreRelaxed(true);  // This is for new threads.
476   QuasiAtomic::ThreadFenceForConstructor();
477   // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
478   {
479     MutexLock mu(self, *Locks::thread_list_lock_);
480     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
481     for (Thread* thread : thread_list) {
482       thread->SetWeakRefAccessEnabled(true);
483     }
484   }
485   // Unblock blocking threads.
486   GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
487   Runtime::Current()->BroadcastForNewSystemWeaks();
488 }
489 
490 class DisableMarkingCheckpoint : public Closure {
491  public:
DisableMarkingCheckpoint(ConcurrentCopying * concurrent_copying)492   explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
493       : concurrent_copying_(concurrent_copying) {
494   }
495 
Run(Thread * thread)496   void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
497     // Note: self is not necessarily equal to thread since thread may be suspended.
498     Thread* self = Thread::Current();
499     DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
500         << thread->GetState() << " thread " << thread << " self " << self;
501     // Disable the thread-local is_gc_marking flag.
502     // Note a thread that has just started right before this checkpoint may have already this flag
503     // set to false, which is ok.
504     thread->SetIsGcMarking(false);
505     // If thread is a running mutator, then act on behalf of the garbage collector.
506     // See the code in ThreadList::RunCheckpoint.
507     concurrent_copying_->GetBarrier().Pass(self);
508   }
509 
510  private:
511   ConcurrentCopying* const concurrent_copying_;
512 };
513 
IssueDisableMarkingCheckpoint()514 void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
515   Thread* self = Thread::Current();
516   DisableMarkingCheckpoint check_point(this);
517   ThreadList* thread_list = Runtime::Current()->GetThreadList();
518   gc_barrier_->Init(self, 0);
519   size_t barrier_count = thread_list->RunCheckpoint(&check_point);
520   // If there are no threads to wait which implies that all the checkpoint functions are finished,
521   // then no need to release the mutator lock.
522   if (barrier_count == 0) {
523     return;
524   }
525   // Release locks then wait for all mutator threads to pass the barrier.
526   Locks::mutator_lock_->SharedUnlock(self);
527   {
528     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
529     gc_barrier_->Increment(self, barrier_count);
530   }
531   Locks::mutator_lock_->SharedLock(self);
532 }
533 
DisableMarking()534 void ConcurrentCopying::DisableMarking() {
535   // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the
536   // thread-local flags so that a new thread starting up will get the correct is_marking flag.
537   is_marking_ = false;
538   QuasiAtomic::ThreadFenceForConstructor();
539   // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are
540   // still in the middle of a read barrier which may have a from-space ref cached in a local
541   // variable.
542   IssueDisableMarkingCheckpoint();
543   if (kUseTableLookupReadBarrier) {
544     heap_->rb_table_->ClearAll();
545     DCHECK(heap_->rb_table_->IsAllCleared());
546   }
547   is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
548   mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
549 }
550 
IssueEmptyCheckpoint()551 void ConcurrentCopying::IssueEmptyCheckpoint() {
552   Thread* self = Thread::Current();
553   EmptyCheckpoint check_point(this);
554   ThreadList* thread_list = Runtime::Current()->GetThreadList();
555   gc_barrier_->Init(self, 0);
556   size_t barrier_count = thread_list->RunCheckpoint(&check_point);
557   // If there are no threads to wait which implys that all the checkpoint functions are finished,
558   // then no need to release the mutator lock.
559   if (barrier_count == 0) {
560     return;
561   }
562   // Release locks then wait for all mutator threads to pass the barrier.
563   Locks::mutator_lock_->SharedUnlock(self);
564   {
565     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
566     gc_barrier_->Increment(self, barrier_count);
567   }
568   Locks::mutator_lock_->SharedLock(self);
569 }
570 
ExpandGcMarkStack()571 void ConcurrentCopying::ExpandGcMarkStack() {
572   DCHECK(gc_mark_stack_->IsFull());
573   const size_t new_size = gc_mark_stack_->Capacity() * 2;
574   std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
575                                                    gc_mark_stack_->End());
576   gc_mark_stack_->Resize(new_size);
577   for (auto& ref : temp) {
578     gc_mark_stack_->PushBack(ref.AsMirrorPtr());
579   }
580   DCHECK(!gc_mark_stack_->IsFull());
581 }
582 
PushOntoMarkStack(mirror::Object * to_ref)583 void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
584   CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
585       << " " << to_ref << " " << PrettyTypeOf(to_ref);
586   Thread* self = Thread::Current();  // TODO: pass self as an argument from call sites?
587   CHECK(thread_running_gc_ != nullptr);
588   MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
589   if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
590     if (LIKELY(self == thread_running_gc_)) {
591       // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
592       CHECK(self->GetThreadLocalMarkStack() == nullptr);
593       if (UNLIKELY(gc_mark_stack_->IsFull())) {
594         ExpandGcMarkStack();
595       }
596       gc_mark_stack_->PushBack(to_ref);
597     } else {
598       // Otherwise, use a thread-local mark stack.
599       accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
600       if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
601         MutexLock mu(self, mark_stack_lock_);
602         // Get a new thread local mark stack.
603         accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
604         if (!pooled_mark_stacks_.empty()) {
605           // Use a pooled mark stack.
606           new_tl_mark_stack = pooled_mark_stacks_.back();
607           pooled_mark_stacks_.pop_back();
608         } else {
609           // None pooled. Create a new one.
610           new_tl_mark_stack =
611               accounting::AtomicStack<mirror::Object>::Create(
612                   "thread local mark stack", 4 * KB, 4 * KB);
613         }
614         DCHECK(new_tl_mark_stack != nullptr);
615         DCHECK(new_tl_mark_stack->IsEmpty());
616         new_tl_mark_stack->PushBack(to_ref);
617         self->SetThreadLocalMarkStack(new_tl_mark_stack);
618         if (tl_mark_stack != nullptr) {
619           // Store the old full stack into a vector.
620           revoked_mark_stacks_.push_back(tl_mark_stack);
621         }
622       } else {
623         tl_mark_stack->PushBack(to_ref);
624       }
625     }
626   } else if (mark_stack_mode == kMarkStackModeShared) {
627     // Access the shared GC mark stack with a lock.
628     MutexLock mu(self, mark_stack_lock_);
629     if (UNLIKELY(gc_mark_stack_->IsFull())) {
630       ExpandGcMarkStack();
631     }
632     gc_mark_stack_->PushBack(to_ref);
633   } else {
634     CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
635              static_cast<uint32_t>(kMarkStackModeGcExclusive))
636         << "ref=" << to_ref
637         << " self->gc_marking=" << self->GetIsGcMarking()
638         << " cc->is_marking=" << is_marking_;
639     CHECK(self == thread_running_gc_)
640         << "Only GC-running thread should access the mark stack "
641         << "in the GC exclusive mark stack mode";
642     // Access the GC mark stack without a lock.
643     if (UNLIKELY(gc_mark_stack_->IsFull())) {
644       ExpandGcMarkStack();
645     }
646     gc_mark_stack_->PushBack(to_ref);
647   }
648 }
649 
GetAllocationStack()650 accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
651   return heap_->allocation_stack_.get();
652 }
653 
GetLiveStack()654 accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
655   return heap_->live_stack_.get();
656 }
657 
658 // The following visitors are that used to verify that there's no
659 // references to the from-space left after marking.
660 class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
661  public:
ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying * collector)662   explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
663       : collector_(collector) {}
664 
operator ()(mirror::Object * ref) const665   void operator()(mirror::Object* ref) const
666       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
667     if (ref == nullptr) {
668       // OK.
669       return;
670     }
671     collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
672     if (kUseBakerReadBarrier) {
673       if (collector_->RegionSpace()->IsInToSpace(ref)) {
674         CHECK(ref->GetReadBarrierPointer() == nullptr)
675             << "To-space ref " << ref << " " << PrettyTypeOf(ref)
676             << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
677       } else {
678         CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
679               (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
680                collector_->IsOnAllocStack(ref)))
681             << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
682             << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
683             << " but isn't on the alloc stack (and has white rb_ptr)."
684             << " Is it in the non-moving space="
685             << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
686       }
687     }
688   }
689 
VisitRoot(mirror::Object * root,const RootInfo & info ATTRIBUTE_UNUSED)690   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
691       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
692     DCHECK(root != nullptr);
693     operator()(root);
694   }
695 
696  private:
697   ConcurrentCopying* const collector_;
698 };
699 
700 class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
701  public:
ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying * collector)702   explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
703       : collector_(collector) {}
704 
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const705   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
706       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
707     mirror::Object* ref =
708         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
709     ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
710     visitor(ref);
711   }
operator ()(mirror::Class * klass,mirror::Reference * ref) const712   void operator()(mirror::Class* klass, mirror::Reference* ref) const
713       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
714     CHECK(klass->IsTypeOfReferenceClass());
715     this->operator()(ref, mirror::Reference::ReferentOffset(), false);
716   }
717 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const718   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
719       SHARED_REQUIRES(Locks::mutator_lock_) {
720     if (!root->IsNull()) {
721       VisitRoot(root);
722     }
723   }
724 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const725   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
726       SHARED_REQUIRES(Locks::mutator_lock_) {
727     ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
728     visitor(root->AsMirrorPtr());
729   }
730 
731  private:
732   ConcurrentCopying* const collector_;
733 };
734 
735 class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
736  public:
ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying * collector)737   explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
738       : collector_(collector) {}
operator ()(mirror::Object * obj) const739   void operator()(mirror::Object* obj) const
740       SHARED_REQUIRES(Locks::mutator_lock_) {
741     ObjectCallback(obj, collector_);
742   }
ObjectCallback(mirror::Object * obj,void * arg)743   static void ObjectCallback(mirror::Object* obj, void *arg)
744       SHARED_REQUIRES(Locks::mutator_lock_) {
745     CHECK(obj != nullptr);
746     ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
747     space::RegionSpace* region_space = collector->RegionSpace();
748     CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
749     ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
750     obj->VisitReferences(visitor, visitor);
751     if (kUseBakerReadBarrier) {
752       if (collector->RegionSpace()->IsInToSpace(obj)) {
753         CHECK(obj->GetReadBarrierPointer() == nullptr)
754             << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
755       } else {
756         CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
757               (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
758                collector->IsOnAllocStack(obj)))
759             << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
760             << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
761             << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
762             << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
763       }
764     }
765   }
766 
767  private:
768   ConcurrentCopying* const collector_;
769 };
770 
771 // Verify there's no from-space references left after the marking phase.
VerifyNoFromSpaceReferences()772 void ConcurrentCopying::VerifyNoFromSpaceReferences() {
773   Thread* self = Thread::Current();
774   DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
775   // Verify all threads have is_gc_marking to be false
776   {
777     MutexLock mu(self, *Locks::thread_list_lock_);
778     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
779     for (Thread* thread : thread_list) {
780       CHECK(!thread->GetIsGcMarking());
781     }
782   }
783   ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
784   // Roots.
785   {
786     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
787     ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
788     Runtime::Current()->VisitRoots(&ref_visitor);
789   }
790   // The to-space.
791   region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
792                              this);
793   // Non-moving spaces.
794   {
795     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
796     heap_->GetMarkBitmap()->Visit(visitor);
797   }
798   // The alloc stack.
799   {
800     ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
801     for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
802         it < end; ++it) {
803       mirror::Object* const obj = it->AsMirrorPtr();
804       if (obj != nullptr && obj->GetClass() != nullptr) {
805         // TODO: need to call this only if obj is alive?
806         ref_visitor(obj);
807         visitor(obj);
808       }
809     }
810   }
811   // TODO: LOS. But only refs in LOS are classes.
812 }
813 
814 // The following visitors are used to assert the to-space invariant.
815 class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
816  public:
ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying * collector)817   explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
818       : collector_(collector) {}
819 
operator ()(mirror::Object * ref) const820   void operator()(mirror::Object* ref) const
821       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
822     if (ref == nullptr) {
823       // OK.
824       return;
825     }
826     collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
827   }
828 
829  private:
830   ConcurrentCopying* const collector_;
831 };
832 
833 class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
834  public:
ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying * collector)835   explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
836       : collector_(collector) {}
837 
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const838   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
839       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
840     mirror::Object* ref =
841         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
842     ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
843     visitor(ref);
844   }
operator ()(mirror::Class * klass,mirror::Reference * ref ATTRIBUTE_UNUSED) const845   void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
846       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
847     CHECK(klass->IsTypeOfReferenceClass());
848   }
849 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const850   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
851       SHARED_REQUIRES(Locks::mutator_lock_) {
852     if (!root->IsNull()) {
853       VisitRoot(root);
854     }
855   }
856 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const857   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
858       SHARED_REQUIRES(Locks::mutator_lock_) {
859     ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
860     visitor(root->AsMirrorPtr());
861   }
862 
863  private:
864   ConcurrentCopying* const collector_;
865 };
866 
867 class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
868  public:
ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying * collector)869   explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
870       : collector_(collector) {}
operator ()(mirror::Object * obj) const871   void operator()(mirror::Object* obj) const
872       SHARED_REQUIRES(Locks::mutator_lock_) {
873     ObjectCallback(obj, collector_);
874   }
ObjectCallback(mirror::Object * obj,void * arg)875   static void ObjectCallback(mirror::Object* obj, void *arg)
876       SHARED_REQUIRES(Locks::mutator_lock_) {
877     CHECK(obj != nullptr);
878     ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
879     space::RegionSpace* region_space = collector->RegionSpace();
880     CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
881     collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
882     ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector);
883     obj->VisitReferences(visitor, visitor);
884   }
885 
886  private:
887   ConcurrentCopying* const collector_;
888 };
889 
890 class RevokeThreadLocalMarkStackCheckpoint : public Closure {
891  public:
RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying * concurrent_copying,bool disable_weak_ref_access)892   RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
893                                        bool disable_weak_ref_access)
894       : concurrent_copying_(concurrent_copying),
895         disable_weak_ref_access_(disable_weak_ref_access) {
896   }
897 
Run(Thread * thread)898   virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
899     // Note: self is not necessarily equal to thread since thread may be suspended.
900     Thread* self = Thread::Current();
901     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
902         << thread->GetState() << " thread " << thread << " self " << self;
903     // Revoke thread local mark stacks.
904     accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
905     if (tl_mark_stack != nullptr) {
906       MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
907       concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
908       thread->SetThreadLocalMarkStack(nullptr);
909     }
910     // Disable weak ref access.
911     if (disable_weak_ref_access_) {
912       thread->SetWeakRefAccessEnabled(false);
913     }
914     // If thread is a running mutator, then act on behalf of the garbage collector.
915     // See the code in ThreadList::RunCheckpoint.
916     concurrent_copying_->GetBarrier().Pass(self);
917   }
918 
919  private:
920   ConcurrentCopying* const concurrent_copying_;
921   const bool disable_weak_ref_access_;
922 };
923 
RevokeThreadLocalMarkStacks(bool disable_weak_ref_access)924 void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
925   Thread* self = Thread::Current();
926   RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
927   ThreadList* thread_list = Runtime::Current()->GetThreadList();
928   gc_barrier_->Init(self, 0);
929   size_t barrier_count = thread_list->RunCheckpoint(&check_point);
930   // If there are no threads to wait which implys that all the checkpoint functions are finished,
931   // then no need to release the mutator lock.
932   if (barrier_count == 0) {
933     return;
934   }
935   Locks::mutator_lock_->SharedUnlock(self);
936   {
937     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
938     gc_barrier_->Increment(self, barrier_count);
939   }
940   Locks::mutator_lock_->SharedLock(self);
941 }
942 
RevokeThreadLocalMarkStack(Thread * thread)943 void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
944   Thread* self = Thread::Current();
945   CHECK_EQ(self, thread);
946   accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
947   if (tl_mark_stack != nullptr) {
948     CHECK(is_marking_);
949     MutexLock mu(self, mark_stack_lock_);
950     revoked_mark_stacks_.push_back(tl_mark_stack);
951     thread->SetThreadLocalMarkStack(nullptr);
952   }
953 }
954 
ProcessMarkStack()955 void ConcurrentCopying::ProcessMarkStack() {
956   if (kVerboseMode) {
957     LOG(INFO) << "ProcessMarkStack. ";
958   }
959   bool empty_prev = false;
960   while (true) {
961     bool empty = ProcessMarkStackOnce();
962     if (empty_prev && empty) {
963       // Saw empty mark stack for a second time, done.
964       break;
965     }
966     empty_prev = empty;
967   }
968 }
969 
ProcessMarkStackOnce()970 bool ConcurrentCopying::ProcessMarkStackOnce() {
971   Thread* self = Thread::Current();
972   CHECK(thread_running_gc_ != nullptr);
973   CHECK(self == thread_running_gc_);
974   CHECK(self->GetThreadLocalMarkStack() == nullptr);
975   size_t count = 0;
976   MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
977   if (mark_stack_mode == kMarkStackModeThreadLocal) {
978     // Process the thread-local mark stacks and the GC mark stack.
979     count += ProcessThreadLocalMarkStacks(false);
980     while (!gc_mark_stack_->IsEmpty()) {
981       mirror::Object* to_ref = gc_mark_stack_->PopBack();
982       ProcessMarkStackRef(to_ref);
983       ++count;
984     }
985     gc_mark_stack_->Reset();
986   } else if (mark_stack_mode == kMarkStackModeShared) {
987     // Process the shared GC mark stack with a lock.
988     {
989       MutexLock mu(self, mark_stack_lock_);
990       CHECK(revoked_mark_stacks_.empty());
991     }
992     while (true) {
993       std::vector<mirror::Object*> refs;
994       {
995         // Copy refs with lock. Note the number of refs should be small.
996         MutexLock mu(self, mark_stack_lock_);
997         if (gc_mark_stack_->IsEmpty()) {
998           break;
999         }
1000         for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
1001              p != gc_mark_stack_->End(); ++p) {
1002           refs.push_back(p->AsMirrorPtr());
1003         }
1004         gc_mark_stack_->Reset();
1005       }
1006       for (mirror::Object* ref : refs) {
1007         ProcessMarkStackRef(ref);
1008         ++count;
1009       }
1010     }
1011   } else {
1012     CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1013              static_cast<uint32_t>(kMarkStackModeGcExclusive));
1014     {
1015       MutexLock mu(self, mark_stack_lock_);
1016       CHECK(revoked_mark_stacks_.empty());
1017     }
1018     // Process the GC mark stack in the exclusive mode. No need to take the lock.
1019     while (!gc_mark_stack_->IsEmpty()) {
1020       mirror::Object* to_ref = gc_mark_stack_->PopBack();
1021       ProcessMarkStackRef(to_ref);
1022       ++count;
1023     }
1024     gc_mark_stack_->Reset();
1025   }
1026 
1027   // Return true if the stack was empty.
1028   return count == 0;
1029 }
1030 
ProcessThreadLocalMarkStacks(bool disable_weak_ref_access)1031 size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
1032   // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
1033   RevokeThreadLocalMarkStacks(disable_weak_ref_access);
1034   size_t count = 0;
1035   std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
1036   {
1037     MutexLock mu(Thread::Current(), mark_stack_lock_);
1038     // Make a copy of the mark stack vector.
1039     mark_stacks = revoked_mark_stacks_;
1040     revoked_mark_stacks_.clear();
1041   }
1042   for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
1043     for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
1044       mirror::Object* to_ref = p->AsMirrorPtr();
1045       ProcessMarkStackRef(to_ref);
1046       ++count;
1047     }
1048     {
1049       MutexLock mu(Thread::Current(), mark_stack_lock_);
1050       if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
1051         // The pool has enough. Delete it.
1052         delete mark_stack;
1053       } else {
1054         // Otherwise, put it into the pool for later reuse.
1055         mark_stack->Reset();
1056         pooled_mark_stacks_.push_back(mark_stack);
1057       }
1058     }
1059   }
1060   return count;
1061 }
1062 
ProcessMarkStackRef(mirror::Object * to_ref)1063 inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
1064   DCHECK(!region_space_->IsInFromSpace(to_ref));
1065   if (kUseBakerReadBarrier) {
1066     DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1067         << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1068         << " is_marked=" << IsMarked(to_ref);
1069   }
1070   // Scan ref fields.
1071   Scan(to_ref);
1072   // Mark the gray ref as white or black.
1073   if (kUseBakerReadBarrier) {
1074     DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1075         << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1076         << " is_marked=" << IsMarked(to_ref);
1077   }
1078 #ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
1079   if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
1080                 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
1081                 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
1082     // Leave this Reference gray in the queue so that GetReferent() will trigger a read barrier. We
1083     // will change it to black or white later in ReferenceQueue::DequeuePendingReference().
1084     DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
1085   } else {
1086     // We may occasionally leave a Reference black or white in the queue if its referent happens to
1087     // be concurrently marked after the Scan() call above has enqueued the Reference, in which case
1088     // the above IsInToSpace() evaluates to true and we change the color from gray to black or white
1089     // here in this else block.
1090     if (kUseBakerReadBarrier) {
1091       if (region_space_->IsInToSpace(to_ref)) {
1092         // If to-space, change from gray to white.
1093         bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
1094             ReadBarrier::GrayPtr(),
1095             ReadBarrier::WhitePtr());
1096         DCHECK(success) << "Must succeed as we won the race.";
1097         DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
1098       } else {
1099         // If non-moving space/unevac from space, change from gray
1100         // to black. We can't change gray to white because it's not
1101         // safe to use CAS if two threads change values in opposite
1102         // directions (A->B and B->A). So, we change it to black to
1103         // indicate non-moving objects that have been marked
1104         // through. Note we'd need to change from black to white
1105         // later (concurrently).
1106         bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
1107             ReadBarrier::GrayPtr(),
1108             ReadBarrier::BlackPtr());
1109         DCHECK(success) << "Must succeed as we won the race.";
1110         DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1111       }
1112     }
1113   }
1114 #else
1115   DCHECK(!kUseBakerReadBarrier);
1116 #endif
1117   if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
1118     ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
1119     visitor(to_ref);
1120   }
1121 }
1122 
SwitchToSharedMarkStackMode()1123 void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1124   Thread* self = Thread::Current();
1125   CHECK(thread_running_gc_ != nullptr);
1126   CHECK_EQ(self, thread_running_gc_);
1127   CHECK(self->GetThreadLocalMarkStack() == nullptr);
1128   MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1129   CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1130            static_cast<uint32_t>(kMarkStackModeThreadLocal));
1131   mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
1132   CHECK(weak_ref_access_enabled_.LoadRelaxed());
1133   weak_ref_access_enabled_.StoreRelaxed(false);
1134   QuasiAtomic::ThreadFenceForConstructor();
1135   // Process the thread local mark stacks one last time after switching to the shared mark stack
1136   // mode and disable weak ref accesses.
1137   ProcessThreadLocalMarkStacks(true);
1138   if (kVerboseMode) {
1139     LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1140   }
1141 }
1142 
SwitchToGcExclusiveMarkStackMode()1143 void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1144   Thread* self = Thread::Current();
1145   CHECK(thread_running_gc_ != nullptr);
1146   CHECK_EQ(self, thread_running_gc_);
1147   CHECK(self->GetThreadLocalMarkStack() == nullptr);
1148   MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1149   CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1150            static_cast<uint32_t>(kMarkStackModeShared));
1151   mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1152   QuasiAtomic::ThreadFenceForConstructor();
1153   if (kVerboseMode) {
1154     LOG(INFO) << "Switched to GC exclusive mark stack mode";
1155   }
1156 }
1157 
CheckEmptyMarkStack()1158 void ConcurrentCopying::CheckEmptyMarkStack() {
1159   Thread* self = Thread::Current();
1160   CHECK(thread_running_gc_ != nullptr);
1161   CHECK_EQ(self, thread_running_gc_);
1162   CHECK(self->GetThreadLocalMarkStack() == nullptr);
1163   MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1164   if (mark_stack_mode == kMarkStackModeThreadLocal) {
1165     // Thread-local mark stack mode.
1166     RevokeThreadLocalMarkStacks(false);
1167     MutexLock mu(Thread::Current(), mark_stack_lock_);
1168     if (!revoked_mark_stacks_.empty()) {
1169       for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1170         while (!mark_stack->IsEmpty()) {
1171           mirror::Object* obj = mark_stack->PopBack();
1172           if (kUseBakerReadBarrier) {
1173             mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1174             LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1175                       << " is_marked=" << IsMarked(obj);
1176           } else {
1177             LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1178                       << " is_marked=" << IsMarked(obj);
1179           }
1180         }
1181       }
1182       LOG(FATAL) << "mark stack is not empty";
1183     }
1184   } else {
1185     // Shared, GC-exclusive, or off.
1186     MutexLock mu(Thread::Current(), mark_stack_lock_);
1187     CHECK(gc_mark_stack_->IsEmpty());
1188     CHECK(revoked_mark_stacks_.empty());
1189   }
1190 }
1191 
SweepSystemWeaks(Thread * self)1192 void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1193   TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1194   ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1195   Runtime::Current()->SweepSystemWeaks(this);
1196 }
1197 
Sweep(bool swap_bitmaps)1198 void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1199   {
1200     TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1201     accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1202     if (kEnableFromSpaceAccountingCheck) {
1203       CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1204     }
1205     heap_->MarkAllocStackAsLive(live_stack);
1206     live_stack->Reset();
1207   }
1208   CheckEmptyMarkStack();
1209   TimingLogger::ScopedTiming split("Sweep", GetTimings());
1210   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1211     if (space->IsContinuousMemMapAllocSpace()) {
1212       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1213       if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
1214         continue;
1215       }
1216       TimingLogger::ScopedTiming split2(
1217           alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1218       RecordFree(alloc_space->Sweep(swap_bitmaps));
1219     }
1220   }
1221   SweepLargeObjects(swap_bitmaps);
1222 }
1223 
SweepLargeObjects(bool swap_bitmaps)1224 void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1225   TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1226   RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1227 }
1228 
1229 class ConcurrentCopyingClearBlackPtrsVisitor {
1230  public:
ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying * cc)1231   explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
1232       : collector_(cc) {}
operator ()(mirror::Object * obj) const1233   void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
1234       SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
1235     DCHECK(obj != nullptr);
1236     DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
1237     DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
1238     obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
1239     DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
1240   }
1241 
1242  private:
1243   ConcurrentCopying* const collector_;
1244 };
1245 
1246 // Clear the black ptrs in non-moving objects back to white.
ClearBlackPtrs()1247 void ConcurrentCopying::ClearBlackPtrs() {
1248   CHECK(kUseBakerReadBarrier);
1249   TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
1250   ConcurrentCopyingClearBlackPtrsVisitor visitor(this);
1251   for (auto& space : heap_->GetContinuousSpaces()) {
1252     if (space == region_space_) {
1253       continue;
1254     }
1255     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1256     if (kVerboseMode) {
1257       LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
1258     }
1259     mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
1260                                   reinterpret_cast<uintptr_t>(space->Limit()),
1261                                   visitor);
1262   }
1263   space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
1264   large_object_space->GetMarkBitmap()->VisitMarkedRange(
1265       reinterpret_cast<uintptr_t>(large_object_space->Begin()),
1266       reinterpret_cast<uintptr_t>(large_object_space->End()),
1267       visitor);
1268   // Objects on the allocation stack?
1269   if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
1270     size_t count = GetAllocationStack()->Size();
1271     auto* it = GetAllocationStack()->Begin();
1272     auto* end = GetAllocationStack()->End();
1273     for (size_t i = 0; i < count; ++i, ++it) {
1274       CHECK_LT(it, end);
1275       mirror::Object* obj = it->AsMirrorPtr();
1276       if (obj != nullptr) {
1277         // Must have been cleared above.
1278         CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
1279       }
1280     }
1281   }
1282 }
1283 
ReclaimPhase()1284 void ConcurrentCopying::ReclaimPhase() {
1285   TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1286   if (kVerboseMode) {
1287     LOG(INFO) << "GC ReclaimPhase";
1288   }
1289   Thread* self = Thread::Current();
1290 
1291   {
1292     // Double-check that the mark stack is empty.
1293     // Note: need to set this after VerifyNoFromSpaceRef().
1294     is_asserting_to_space_invariant_ = false;
1295     QuasiAtomic::ThreadFenceForConstructor();
1296     if (kVerboseMode) {
1297       LOG(INFO) << "Issue an empty check point. ";
1298     }
1299     IssueEmptyCheckpoint();
1300     // Disable the check.
1301     is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
1302     CheckEmptyMarkStack();
1303   }
1304 
1305   {
1306     // Record freed objects.
1307     TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1308     // Don't include thread-locals that are in the to-space.
1309     uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1310     uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1311     uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1312     uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1313     uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
1314     uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
1315     if (kEnableFromSpaceAccountingCheck) {
1316       CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1317       CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1318     }
1319     CHECK_LE(to_objects, from_objects);
1320     CHECK_LE(to_bytes, from_bytes);
1321     int64_t freed_bytes = from_bytes - to_bytes;
1322     int64_t freed_objects = from_objects - to_objects;
1323     if (kVerboseMode) {
1324       LOG(INFO) << "RecordFree:"
1325                 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1326                 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1327                 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1328                 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1329                 << " from_space size=" << region_space_->FromSpaceSize()
1330                 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1331                 << " to_space size=" << region_space_->ToSpaceSize();
1332       LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1333     }
1334     RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1335     if (kVerboseMode) {
1336       LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1337     }
1338   }
1339 
1340   {
1341     TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
1342     ComputeUnevacFromSpaceLiveRatio();
1343   }
1344 
1345   {
1346     TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1347     region_space_->ClearFromSpace();
1348   }
1349 
1350   {
1351     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1352     if (kUseBakerReadBarrier) {
1353       ClearBlackPtrs();
1354     }
1355     Sweep(false);
1356     SwapBitmaps();
1357     heap_->UnBindBitmaps();
1358 
1359     // Remove bitmaps for the immune spaces.
1360     while (!cc_bitmaps_.empty()) {
1361       accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
1362       cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
1363       delete cc_bitmap;
1364       cc_bitmaps_.pop_back();
1365     }
1366     region_space_bitmap_ = nullptr;
1367   }
1368 
1369   CheckEmptyMarkStack();
1370 
1371   if (kVerboseMode) {
1372     LOG(INFO) << "GC end of ReclaimPhase";
1373   }
1374 }
1375 
1376 class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
1377  public:
ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying * cc)1378   explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
1379       : collector_(cc) {}
operator ()(mirror::Object * ref) const1380   void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_)
1381       SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
1382     DCHECK(ref != nullptr);
1383     DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
1384     DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
1385     if (kUseBakerReadBarrier) {
1386       DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
1387       // Clear the black ptr.
1388       ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
1389       DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref;
1390     }
1391     size_t obj_size = ref->SizeOf();
1392     size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1393     collector_->region_space_->AddLiveBytes(ref, alloc_size);
1394   }
1395 
1396  private:
1397   ConcurrentCopying* const collector_;
1398 };
1399 
1400 // Compute how much live objects are left in regions.
ComputeUnevacFromSpaceLiveRatio()1401 void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
1402   region_space_->AssertAllRegionLiveBytesZeroOrCleared();
1403   ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
1404   region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
1405                                          reinterpret_cast<uintptr_t>(region_space_->Limit()),
1406                                          visitor);
1407 }
1408 
1409 // Assert the to-space invariant.
AssertToSpaceInvariant(mirror::Object * obj,MemberOffset offset,mirror::Object * ref)1410 void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1411                                                mirror::Object* ref) {
1412   CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1413   if (is_asserting_to_space_invariant_) {
1414     if (region_space_->IsInToSpace(ref)) {
1415       // OK.
1416       return;
1417     } else if (region_space_->IsInUnevacFromSpace(ref)) {
1418       CHECK(region_space_bitmap_->Test(ref)) << ref;
1419     } else if (region_space_->IsInFromSpace(ref)) {
1420       // Not OK. Do extra logging.
1421       if (obj != nullptr) {
1422         LogFromSpaceRefHolder(obj, offset);
1423       }
1424       ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1425       CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1426     } else {
1427       AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1428     }
1429   }
1430 }
1431 
1432 class RootPrinter {
1433  public:
RootPrinter()1434   RootPrinter() { }
1435 
1436   template <class MirrorType>
VisitRootIfNonNull(mirror::CompressedReference<MirrorType> * root)1437   ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
1438       SHARED_REQUIRES(Locks::mutator_lock_) {
1439     if (!root->IsNull()) {
1440       VisitRoot(root);
1441     }
1442   }
1443 
1444   template <class MirrorType>
VisitRoot(mirror::Object ** root)1445   void VisitRoot(mirror::Object** root)
1446       SHARED_REQUIRES(Locks::mutator_lock_) {
1447     LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1448   }
1449 
1450   template <class MirrorType>
VisitRoot(mirror::CompressedReference<MirrorType> * root)1451   void VisitRoot(mirror::CompressedReference<MirrorType>* root)
1452       SHARED_REQUIRES(Locks::mutator_lock_) {
1453     LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1454   }
1455 };
1456 
AssertToSpaceInvariant(GcRootSource * gc_root_source,mirror::Object * ref)1457 void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1458                                                mirror::Object* ref) {
1459   CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1460   if (is_asserting_to_space_invariant_) {
1461     if (region_space_->IsInToSpace(ref)) {
1462       // OK.
1463       return;
1464     } else if (region_space_->IsInUnevacFromSpace(ref)) {
1465       CHECK(region_space_bitmap_->Test(ref)) << ref;
1466     } else if (region_space_->IsInFromSpace(ref)) {
1467       // Not OK. Do extra logging.
1468       if (gc_root_source == nullptr) {
1469         // No info.
1470       } else if (gc_root_source->HasArtField()) {
1471         ArtField* field = gc_root_source->GetArtField();
1472         LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1473         RootPrinter root_printer;
1474         field->VisitRoots(root_printer);
1475       } else if (gc_root_source->HasArtMethod()) {
1476         ArtMethod* method = gc_root_source->GetArtMethod();
1477         LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1478         RootPrinter root_printer;
1479         method->VisitRoots(root_printer, sizeof(void*));
1480       }
1481       ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1482       region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1483       PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1484       MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1485       CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1486     } else {
1487       AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1488     }
1489   }
1490 }
1491 
LogFromSpaceRefHolder(mirror::Object * obj,MemberOffset offset)1492 void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1493   if (kUseBakerReadBarrier) {
1494     LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1495               << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1496   } else {
1497     LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1498   }
1499   if (region_space_->IsInFromSpace(obj)) {
1500     LOG(INFO) << "holder is in the from-space.";
1501   } else if (region_space_->IsInToSpace(obj)) {
1502     LOG(INFO) << "holder is in the to-space.";
1503   } else if (region_space_->IsInUnevacFromSpace(obj)) {
1504     LOG(INFO) << "holder is in the unevac from-space.";
1505     if (region_space_bitmap_->Test(obj)) {
1506       LOG(INFO) << "holder is marked in the region space bitmap.";
1507     } else {
1508       LOG(INFO) << "holder is not marked in the region space bitmap.";
1509     }
1510   } else {
1511     // In a non-moving space.
1512     if (immune_spaces_.ContainsObject(obj)) {
1513       LOG(INFO) << "holder is in an immune image or the zygote space.";
1514       accounting::ContinuousSpaceBitmap* cc_bitmap =
1515           cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
1516       CHECK(cc_bitmap != nullptr)
1517           << "An immune space object must have a bitmap.";
1518       if (cc_bitmap->Test(obj)) {
1519         LOG(INFO) << "holder is marked in the bit map.";
1520       } else {
1521         LOG(INFO) << "holder is NOT marked in the bit map.";
1522       }
1523     } else {
1524       LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
1525       accounting::ContinuousSpaceBitmap* mark_bitmap =
1526           heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1527       accounting::LargeObjectBitmap* los_bitmap =
1528           heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1529       CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1530       bool is_los = mark_bitmap == nullptr;
1531       if (!is_los && mark_bitmap->Test(obj)) {
1532         LOG(INFO) << "holder is marked in the mark bit map.";
1533       } else if (is_los && los_bitmap->Test(obj)) {
1534         LOG(INFO) << "holder is marked in the los bit map.";
1535       } else {
1536         // If ref is on the allocation stack, then it is considered
1537         // mark/alive (but not necessarily on the live stack.)
1538         if (IsOnAllocStack(obj)) {
1539           LOG(INFO) << "holder is on the alloc stack.";
1540         } else {
1541           LOG(INFO) << "holder is not marked or on the alloc stack.";
1542         }
1543       }
1544     }
1545   }
1546   LOG(INFO) << "offset=" << offset.SizeValue();
1547 }
1548 
AssertToSpaceInvariantInNonMovingSpace(mirror::Object * obj,mirror::Object * ref)1549 void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1550                                                                mirror::Object* ref) {
1551   // In a non-moving spaces. Check that the ref is marked.
1552   if (immune_spaces_.ContainsObject(ref)) {
1553     accounting::ContinuousSpaceBitmap* cc_bitmap =
1554         cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1555     CHECK(cc_bitmap != nullptr)
1556         << "An immune space ref must have a bitmap. " << ref;
1557     if (kUseBakerReadBarrier) {
1558       CHECK(cc_bitmap->Test(ref))
1559           << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
1560           << obj->GetReadBarrierPointer() << " ref=" << ref;
1561     } else {
1562       CHECK(cc_bitmap->Test(ref))
1563           << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
1564     }
1565   } else {
1566     accounting::ContinuousSpaceBitmap* mark_bitmap =
1567         heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1568     accounting::LargeObjectBitmap* los_bitmap =
1569         heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1570     CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1571     bool is_los = mark_bitmap == nullptr;
1572     if ((!is_los && mark_bitmap->Test(ref)) ||
1573         (is_los && los_bitmap->Test(ref))) {
1574       // OK.
1575     } else {
1576       // If ref is on the allocation stack, then it may not be
1577       // marked live, but considered marked/alive (but not
1578       // necessarily on the live stack).
1579       CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1580                                  << "obj=" << obj << " ref=" << ref;
1581     }
1582   }
1583 }
1584 
1585 // Used to scan ref fields of an object.
1586 class ConcurrentCopyingRefFieldsVisitor {
1587  public:
ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying * collector)1588   explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector)
1589       : collector_(collector) {}
1590 
operator ()(mirror::Object * obj,MemberOffset offset,bool) const1591   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
1592       const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
1593       SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
1594     collector_->Process(obj, offset);
1595   }
1596 
operator ()(mirror::Class * klass,mirror::Reference * ref) const1597   void operator()(mirror::Class* klass, mirror::Reference* ref) const
1598       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
1599     CHECK(klass->IsTypeOfReferenceClass());
1600     collector_->DelayReferenceReferent(klass, ref);
1601   }
1602 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const1603   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1604       ALWAYS_INLINE
1605       SHARED_REQUIRES(Locks::mutator_lock_) {
1606     if (!root->IsNull()) {
1607       VisitRoot(root);
1608     }
1609   }
1610 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const1611   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1612       ALWAYS_INLINE
1613       SHARED_REQUIRES(Locks::mutator_lock_) {
1614     collector_->MarkRoot(root);
1615   }
1616 
1617  private:
1618   ConcurrentCopying* const collector_;
1619 };
1620 
1621 // Scan ref fields of an object.
Scan(mirror::Object * to_ref)1622 inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
1623   DCHECK(!region_space_->IsInFromSpace(to_ref));
1624   ConcurrentCopyingRefFieldsVisitor visitor(this);
1625   // Disable the read barrier for a performance reason.
1626   to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1627       visitor, visitor);
1628 }
1629 
1630 // Process a field.
Process(mirror::Object * obj,MemberOffset offset)1631 inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
1632   mirror::Object* ref = obj->GetFieldObject<
1633       mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
1634   mirror::Object* to_ref = Mark(ref);
1635   if (to_ref == ref) {
1636     return;
1637   }
1638   // This may fail if the mutator writes to the field at the same time. But it's ok.
1639   mirror::Object* expected_ref = ref;
1640   mirror::Object* new_ref = to_ref;
1641   do {
1642     if (expected_ref !=
1643         obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1644       // It was updated by the mutator.
1645       break;
1646     }
1647   } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
1648       false, false, kVerifyNone>(offset, expected_ref, new_ref));
1649 }
1650 
1651 // Process some roots.
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)1652 inline void ConcurrentCopying::VisitRoots(
1653     mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1654   for (size_t i = 0; i < count; ++i) {
1655     mirror::Object** root = roots[i];
1656     mirror::Object* ref = *root;
1657     mirror::Object* to_ref = Mark(ref);
1658     if (to_ref == ref) {
1659       continue;
1660     }
1661     Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1662     mirror::Object* expected_ref = ref;
1663     mirror::Object* new_ref = to_ref;
1664     do {
1665       if (expected_ref != addr->LoadRelaxed()) {
1666         // It was updated by the mutator.
1667         break;
1668       }
1669     } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
1670   }
1671 }
1672 
MarkRoot(mirror::CompressedReference<mirror::Object> * root)1673 inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
1674   DCHECK(!root->IsNull());
1675   mirror::Object* const ref = root->AsMirrorPtr();
1676   mirror::Object* to_ref = Mark(ref);
1677   if (to_ref != ref) {
1678     auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1679     auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1680     auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
1681     // If the cas fails, then it was updated by the mutator.
1682     do {
1683       if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1684         // It was updated by the mutator.
1685         break;
1686       }
1687     } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
1688   }
1689 }
1690 
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)1691 inline void ConcurrentCopying::VisitRoots(
1692     mirror::CompressedReference<mirror::Object>** roots, size_t count,
1693     const RootInfo& info ATTRIBUTE_UNUSED) {
1694   for (size_t i = 0; i < count; ++i) {
1695     mirror::CompressedReference<mirror::Object>* const root = roots[i];
1696     if (!root->IsNull()) {
1697       MarkRoot(root);
1698     }
1699   }
1700 }
1701 
1702 // Fill the given memory block with a dummy object. Used to fill in a
1703 // copy of objects that was lost in race.
FillWithDummyObject(mirror::Object * dummy_obj,size_t byte_size)1704 void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
1705   CHECK_ALIGNED(byte_size, kObjectAlignment);
1706   memset(dummy_obj, 0, byte_size);
1707   mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1708   CHECK(int_array_class != nullptr);
1709   AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
1710   size_t component_size = int_array_class->GetComponentSize();
1711   CHECK_EQ(component_size, sizeof(int32_t));
1712   size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1713   if (data_offset > byte_size) {
1714     // An int array is too big. Use java.lang.Object.
1715     mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1716     AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1717     CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1718     dummy_obj->SetClass(java_lang_Object);
1719     CHECK_EQ(byte_size, dummy_obj->SizeOf());
1720   } else {
1721     // Use an int array.
1722     dummy_obj->SetClass(int_array_class);
1723     CHECK(dummy_obj->IsArrayInstance());
1724     int32_t length = (byte_size - data_offset) / component_size;
1725     dummy_obj->AsArray()->SetLength(length);
1726     CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
1727         << "byte_size=" << byte_size << " length=" << length
1728         << " component_size=" << component_size << " data_offset=" << data_offset;
1729     CHECK_EQ(byte_size, dummy_obj->SizeOf())
1730         << "byte_size=" << byte_size << " length=" << length
1731         << " component_size=" << component_size << " data_offset=" << data_offset;
1732   }
1733 }
1734 
1735 // Reuse the memory blocks that were copy of objects that were lost in race.
AllocateInSkippedBlock(size_t alloc_size)1736 mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1737   // Try to reuse the blocks that were unused due to CAS failures.
1738   CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
1739   Thread* self = Thread::Current();
1740   size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1741   MutexLock mu(self, skipped_blocks_lock_);
1742   auto it = skipped_blocks_map_.lower_bound(alloc_size);
1743   if (it == skipped_blocks_map_.end()) {
1744     // Not found.
1745     return nullptr;
1746   }
1747   {
1748     size_t byte_size = it->first;
1749     CHECK_GE(byte_size, alloc_size);
1750     if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1751       // If remainder would be too small for a dummy object, retry with a larger request size.
1752       it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1753       if (it == skipped_blocks_map_.end()) {
1754         // Not found.
1755         return nullptr;
1756       }
1757       CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
1758       CHECK_GE(it->first - alloc_size, min_object_size)
1759           << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1760     }
1761   }
1762   // Found a block.
1763   CHECK(it != skipped_blocks_map_.end());
1764   size_t byte_size = it->first;
1765   uint8_t* addr = it->second;
1766   CHECK_GE(byte_size, alloc_size);
1767   CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
1768   CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
1769   if (kVerboseMode) {
1770     LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1771   }
1772   skipped_blocks_map_.erase(it);
1773   memset(addr, 0, byte_size);
1774   if (byte_size > alloc_size) {
1775     // Return the remainder to the map.
1776     CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
1777     CHECK_GE(byte_size - alloc_size, min_object_size);
1778     FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1779                         byte_size - alloc_size);
1780     CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1781     skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1782   }
1783   return reinterpret_cast<mirror::Object*>(addr);
1784 }
1785 
Copy(mirror::Object * from_ref)1786 mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1787   DCHECK(region_space_->IsInFromSpace(from_ref));
1788   // No read barrier to avoid nested RB that might violate the to-space
1789   // invariant. Note that from_ref is a from space ref so the SizeOf()
1790   // call will access the from-space meta objects, but it's ok and necessary.
1791   size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1792   size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1793   size_t region_space_bytes_allocated = 0U;
1794   size_t non_moving_space_bytes_allocated = 0U;
1795   size_t bytes_allocated = 0U;
1796   size_t dummy;
1797   mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
1798       region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
1799   bytes_allocated = region_space_bytes_allocated;
1800   if (to_ref != nullptr) {
1801     DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1802   }
1803   bool fall_back_to_non_moving = false;
1804   if (UNLIKELY(to_ref == nullptr)) {
1805     // Failed to allocate in the region space. Try the skipped blocks.
1806     to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1807     if (to_ref != nullptr) {
1808       // Succeeded to allocate in a skipped block.
1809       if (heap_->use_tlab_) {
1810         // This is necessary for the tlab case as it's not accounted in the space.
1811         region_space_->RecordAlloc(to_ref);
1812       }
1813       bytes_allocated = region_space_alloc_size;
1814     } else {
1815       // Fall back to the non-moving space.
1816       fall_back_to_non_moving = true;
1817       if (kVerboseMode) {
1818         LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1819                   << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1820                   << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1821       }
1822       fall_back_to_non_moving = true;
1823       to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
1824                                                &non_moving_space_bytes_allocated, nullptr, &dummy);
1825       CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1826       bytes_allocated = non_moving_space_bytes_allocated;
1827       // Mark it in the mark bitmap.
1828       accounting::ContinuousSpaceBitmap* mark_bitmap =
1829           heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1830       CHECK(mark_bitmap != nullptr);
1831       CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1832     }
1833   }
1834   DCHECK(to_ref != nullptr);
1835 
1836   // Attempt to install the forward pointer. This is in a loop as the
1837   // lock word atomic write can fail.
1838   while (true) {
1839     // Copy the object. TODO: copy only the lockword in the second iteration and on?
1840     memcpy(to_ref, from_ref, obj_size);
1841 
1842     LockWord old_lock_word = to_ref->GetLockWord(false);
1843 
1844     if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1845       // Lost the race. Another thread (either GC or mutator) stored
1846       // the forwarding pointer first. Make the lost copy (to_ref)
1847       // look like a valid but dead (dummy) object and keep it for
1848       // future reuse.
1849       FillWithDummyObject(to_ref, bytes_allocated);
1850       if (!fall_back_to_non_moving) {
1851         DCHECK(region_space_->IsInToSpace(to_ref));
1852         if (bytes_allocated > space::RegionSpace::kRegionSize) {
1853           // Free the large alloc.
1854           region_space_->FreeLarge(to_ref, bytes_allocated);
1855         } else {
1856           // Record the lost copy for later reuse.
1857           heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1858           to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1859           to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
1860           MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1861           skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
1862                                                     reinterpret_cast<uint8_t*>(to_ref)));
1863         }
1864       } else {
1865         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1866         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1867         // Free the non-moving-space chunk.
1868         accounting::ContinuousSpaceBitmap* mark_bitmap =
1869             heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1870         CHECK(mark_bitmap != nullptr);
1871         CHECK(mark_bitmap->Clear(to_ref));
1872         heap_->non_moving_space_->Free(Thread::Current(), to_ref);
1873       }
1874 
1875       // Get the winner's forward ptr.
1876       mirror::Object* lost_fwd_ptr = to_ref;
1877       to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
1878       CHECK(to_ref != nullptr);
1879       CHECK_NE(to_ref, lost_fwd_ptr);
1880       CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
1881       CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1882       return to_ref;
1883     }
1884 
1885     // Set the gray ptr.
1886     if (kUseBakerReadBarrier) {
1887       to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
1888     }
1889 
1890     LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
1891 
1892     // Try to atomically write the fwd ptr.
1893     bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
1894     if (LIKELY(success)) {
1895       // The CAS succeeded.
1896       objects_moved_.FetchAndAddSequentiallyConsistent(1);
1897       bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
1898       if (LIKELY(!fall_back_to_non_moving)) {
1899         DCHECK(region_space_->IsInToSpace(to_ref));
1900       } else {
1901         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1902         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1903       }
1904       if (kUseBakerReadBarrier) {
1905         DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1906       }
1907       DCHECK(GetFwdPtr(from_ref) == to_ref);
1908       CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1909       PushOntoMarkStack(to_ref);
1910       return to_ref;
1911     } else {
1912       // The CAS failed. It may have lost the race or may have failed
1913       // due to monitor/hashcode ops. Either way, retry.
1914     }
1915   }
1916 }
1917 
IsMarked(mirror::Object * from_ref)1918 mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
1919   DCHECK(from_ref != nullptr);
1920   space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1921   if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
1922     // It's already marked.
1923     return from_ref;
1924   }
1925   mirror::Object* to_ref;
1926   if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
1927     to_ref = GetFwdPtr(from_ref);
1928     DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
1929            heap_->non_moving_space_->HasAddress(to_ref))
1930         << "from_ref=" << from_ref << " to_ref=" << to_ref;
1931   } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
1932     if (region_space_bitmap_->Test(from_ref)) {
1933       to_ref = from_ref;
1934     } else {
1935       to_ref = nullptr;
1936     }
1937   } else {
1938     // from_ref is in a non-moving space.
1939     if (immune_spaces_.ContainsObject(from_ref)) {
1940       accounting::ContinuousSpaceBitmap* cc_bitmap =
1941           cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1942       DCHECK(cc_bitmap != nullptr)
1943           << "An immune space object must have a bitmap";
1944       if (kIsDebugBuild) {
1945         DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1946             << "Immune space object must be already marked";
1947       }
1948       if (cc_bitmap->Test(from_ref)) {
1949         // Already marked.
1950         to_ref = from_ref;
1951       } else {
1952         // Newly marked.
1953         to_ref = nullptr;
1954       }
1955     } else {
1956       // Non-immune non-moving space. Use the mark bitmap.
1957       accounting::ContinuousSpaceBitmap* mark_bitmap =
1958           heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1959       accounting::LargeObjectBitmap* los_bitmap =
1960           heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1961       CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1962       bool is_los = mark_bitmap == nullptr;
1963       if (!is_los && mark_bitmap->Test(from_ref)) {
1964         // Already marked.
1965         to_ref = from_ref;
1966       } else if (is_los && los_bitmap->Test(from_ref)) {
1967         // Already marked in LOS.
1968         to_ref = from_ref;
1969       } else {
1970         // Not marked.
1971         if (IsOnAllocStack(from_ref)) {
1972           // If on the allocation stack, it's considered marked.
1973           to_ref = from_ref;
1974         } else {
1975           // Not marked.
1976           to_ref = nullptr;
1977         }
1978       }
1979     }
1980   }
1981   return to_ref;
1982 }
1983 
IsOnAllocStack(mirror::Object * ref)1984 bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
1985   QuasiAtomic::ThreadFenceAcquire();
1986   accounting::ObjectStack* alloc_stack = GetAllocationStack();
1987   return alloc_stack->Contains(ref);
1988 }
1989 
MarkNonMoving(mirror::Object * ref)1990 mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
1991   // ref is in a non-moving space (from_ref == to_ref).
1992   DCHECK(!region_space_->HasAddress(ref)) << ref;
1993   if (immune_spaces_.ContainsObject(ref)) {
1994     accounting::ContinuousSpaceBitmap* cc_bitmap =
1995         cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1996     DCHECK(cc_bitmap != nullptr)
1997         << "An immune space object must have a bitmap";
1998     if (kIsDebugBuild) {
1999       DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref))
2000           << "Immune space object must be already marked";
2001     }
2002     // This may or may not succeed, which is ok.
2003     if (kUseBakerReadBarrier) {
2004       ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
2005     }
2006     if (cc_bitmap->AtomicTestAndSet(ref)) {
2007       // Already marked.
2008     } else {
2009       // Newly marked.
2010       if (kUseBakerReadBarrier) {
2011         DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
2012       }
2013       PushOntoMarkStack(ref);
2014     }
2015   } else {
2016     // Use the mark bitmap.
2017     accounting::ContinuousSpaceBitmap* mark_bitmap =
2018         heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
2019     accounting::LargeObjectBitmap* los_bitmap =
2020         heap_mark_bitmap_->GetLargeObjectBitmap(ref);
2021     CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2022     bool is_los = mark_bitmap == nullptr;
2023     if (!is_los && mark_bitmap->Test(ref)) {
2024       // Already marked.
2025       if (kUseBakerReadBarrier) {
2026         DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2027                ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
2028       }
2029     } else if (is_los && los_bitmap->Test(ref)) {
2030       // Already marked in LOS.
2031       if (kUseBakerReadBarrier) {
2032         DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2033                ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
2034       }
2035     } else {
2036       // Not marked.
2037       if (IsOnAllocStack(ref)) {
2038         // If it's on the allocation stack, it's considered marked. Keep it white.
2039         // Objects on the allocation stack need not be marked.
2040         if (!is_los) {
2041           DCHECK(!mark_bitmap->Test(ref));
2042         } else {
2043           DCHECK(!los_bitmap->Test(ref));
2044         }
2045         if (kUseBakerReadBarrier) {
2046           DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
2047         }
2048       } else {
2049         // Not marked or on the allocation stack. Try to mark it.
2050         // This may or may not succeed, which is ok.
2051         if (kUseBakerReadBarrier) {
2052           ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
2053         }
2054         if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
2055           // Already marked.
2056         } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
2057           // Already marked in LOS.
2058         } else {
2059           // Newly marked.
2060           if (kUseBakerReadBarrier) {
2061             DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
2062           }
2063           PushOntoMarkStack(ref);
2064         }
2065       }
2066     }
2067   }
2068   return ref;
2069 }
2070 
FinishPhase()2071 void ConcurrentCopying::FinishPhase() {
2072   Thread* const self = Thread::Current();
2073   {
2074     MutexLock mu(self, mark_stack_lock_);
2075     CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2076   }
2077   region_space_ = nullptr;
2078   {
2079     MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2080     skipped_blocks_map_.clear();
2081   }
2082   ReaderMutexLock mu(self, *Locks::mutator_lock_);
2083   WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2084   heap_->ClearMarkedObjects();
2085 }
2086 
IsMarkedHeapReference(mirror::HeapReference<mirror::Object> * field)2087 bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
2088   mirror::Object* from_ref = field->AsMirrorPtr();
2089   mirror::Object* to_ref = IsMarked(from_ref);
2090   if (to_ref == nullptr) {
2091     return false;
2092   }
2093   if (from_ref != to_ref) {
2094     QuasiAtomic::ThreadFenceRelease();
2095     field->Assign(to_ref);
2096     QuasiAtomic::ThreadFenceSequentiallyConsistent();
2097   }
2098   return true;
2099 }
2100 
MarkObject(mirror::Object * from_ref)2101 mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2102   return Mark(from_ref);
2103 }
2104 
DelayReferenceReferent(mirror::Class * klass,mirror::Reference * reference)2105 void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
2106   heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
2107 }
2108 
ProcessReferences(Thread * self)2109 void ConcurrentCopying::ProcessReferences(Thread* self) {
2110   TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
2111   // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
2112   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2113   GetHeap()->GetReferenceProcessor()->ProcessReferences(
2114       true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
2115 }
2116 
RevokeAllThreadLocalBuffers()2117 void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2118   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2119   region_space_->RevokeAllThreadLocalBuffers();
2120 }
2121 
2122 }  // namespace collector
2123 }  // namespace gc
2124 }  // namespace art
2125