1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mark_sweep.h"
18 
19 #include <atomic>
20 #include <functional>
21 #include <numeric>
22 #include <climits>
23 #include <vector>
24 
25 #define ATRACE_TAG ATRACE_TAG_DALVIK
26 #include "cutils/trace.h"
27 
28 #include "base/bounded_fifo.h"
29 #include "base/logging.h"
30 #include "base/macros.h"
31 #include "base/mutex-inl.h"
32 #include "base/time_utils.h"
33 #include "base/timing_logger.h"
34 #include "gc/accounting/card_table-inl.h"
35 #include "gc/accounting/heap_bitmap-inl.h"
36 #include "gc/accounting/mod_union_table.h"
37 #include "gc/accounting/space_bitmap-inl.h"
38 #include "gc/heap.h"
39 #include "gc/reference_processor.h"
40 #include "gc/space/image_space.h"
41 #include "gc/space/large_object_space.h"
42 #include "gc/space/space-inl.h"
43 #include "mark_sweep-inl.h"
44 #include "mirror/object-inl.h"
45 #include "runtime.h"
46 #include "scoped_thread_state_change.h"
47 #include "thread-inl.h"
48 #include "thread_list.h"
49 
50 using ::art::mirror::Object;
51 
52 namespace art {
53 namespace gc {
54 namespace collector {
55 
56 // Performance options.
57 static constexpr bool kUseRecursiveMark = false;
58 static constexpr bool kUseMarkStackPrefetch = true;
59 static constexpr size_t kSweepArrayChunkFreeSize = 1024;
60 static constexpr bool kPreCleanCards = true;
61 
62 // Parallelism options.
63 static constexpr bool kParallelCardScan = true;
64 static constexpr bool kParallelRecursiveMark = true;
65 // Don't attempt to parallelize mark stack processing unless the mark stack is at least n
66 // elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
67 // having this can add overhead in ProcessReferences since we may end up doing many calls of
68 // ProcessMarkStack with very small mark stacks.
69 static constexpr size_t kMinimumParallelMarkStackSize = 128;
70 static constexpr bool kParallelProcessMarkStack = true;
71 
72 // Profiling and information flags.
73 static constexpr bool kProfileLargeObjects = false;
74 static constexpr bool kMeasureOverhead = false;
75 static constexpr bool kCountTasks = false;
76 static constexpr bool kCountJavaLangRefs = false;
77 static constexpr bool kCountMarkedObjects = false;
78 
79 // Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
80 static constexpr bool kCheckLocks = kDebugLocking;
81 static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
82 
83 // If true, revoke the rosalloc thread-local buffers at the
84 // checkpoint, as opposed to during the pause.
85 static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
86 
BindBitmaps()87 void MarkSweep::BindBitmaps() {
88   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
89   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
90   // Mark all of the spaces we never collect as immune.
91   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
92     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
93       CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
94     }
95   }
96 }
97 
MarkSweep(Heap * heap,bool is_concurrent,const std::string & name_prefix)98 MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
99     : GarbageCollector(heap,
100                        name_prefix +
101                        (is_concurrent ? "concurrent mark sweep": "mark sweep")),
102       current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr),
103       gc_barrier_(new Barrier(0)),
104       mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
105       is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
106   std::string error_msg;
107   MemMap* mem_map = MemMap::MapAnonymous(
108       "mark sweep sweep array free buffer", nullptr,
109       RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
110       PROT_READ | PROT_WRITE, false, false, &error_msg);
111   CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
112   sweep_array_free_buffer_mem_map_.reset(mem_map);
113 }
114 
InitializePhase()115 void MarkSweep::InitializePhase() {
116   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
117   mark_stack_ = heap_->GetMarkStack();
118   DCHECK(mark_stack_ != nullptr);
119   immune_region_.Reset();
120   class_count_.StoreRelaxed(0);
121   array_count_.StoreRelaxed(0);
122   other_count_.StoreRelaxed(0);
123   large_object_test_.StoreRelaxed(0);
124   large_object_mark_.StoreRelaxed(0);
125   overhead_time_ .StoreRelaxed(0);
126   work_chunks_created_.StoreRelaxed(0);
127   work_chunks_deleted_.StoreRelaxed(0);
128   reference_count_.StoreRelaxed(0);
129   mark_null_count_.StoreRelaxed(0);
130   mark_immune_count_.StoreRelaxed(0);
131   mark_fastpath_count_.StoreRelaxed(0);
132   mark_slowpath_count_.StoreRelaxed(0);
133   {
134     // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
135     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
136     mark_bitmap_ = heap_->GetMarkBitmap();
137   }
138   if (!GetCurrentIteration()->GetClearSoftReferences()) {
139     // Always clear soft references if a non-sticky collection.
140     GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
141   }
142 }
143 
RunPhases()144 void MarkSweep::RunPhases() {
145   Thread* self = Thread::Current();
146   InitializePhase();
147   Locks::mutator_lock_->AssertNotHeld(self);
148   if (IsConcurrent()) {
149     GetHeap()->PreGcVerification(this);
150     {
151       ReaderMutexLock mu(self, *Locks::mutator_lock_);
152       MarkingPhase();
153     }
154     ScopedPause pause(this);
155     GetHeap()->PrePauseRosAllocVerification(this);
156     PausePhase();
157     RevokeAllThreadLocalBuffers();
158   } else {
159     ScopedPause pause(this);
160     GetHeap()->PreGcVerificationPaused(this);
161     MarkingPhase();
162     GetHeap()->PrePauseRosAllocVerification(this);
163     PausePhase();
164     RevokeAllThreadLocalBuffers();
165   }
166   {
167     // Sweeping always done concurrently, even for non concurrent mark sweep.
168     ReaderMutexLock mu(self, *Locks::mutator_lock_);
169     ReclaimPhase();
170   }
171   GetHeap()->PostGcVerification(this);
172   FinishPhase();
173 }
174 
ProcessReferences(Thread * self)175 void MarkSweep::ProcessReferences(Thread* self) {
176   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
177   GetHeap()->GetReferenceProcessor()->ProcessReferences(
178       true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
179       &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
180 }
181 
PausePhase()182 void MarkSweep::PausePhase() {
183   TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings());
184   Thread* self = Thread::Current();
185   Locks::mutator_lock_->AssertExclusiveHeld(self);
186   if (IsConcurrent()) {
187     // Handle the dirty objects if we are a concurrent GC.
188     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
189     // Re-mark root set.
190     ReMarkRoots();
191     // Scan dirty objects, this is only required if we are not doing concurrent GC.
192     RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
193   }
194   {
195     TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
196     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
197     heap_->SwapStacks(self);
198     live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
199     // Need to revoke all the thread local allocation stacks since we just swapped the allocation
200     // stacks and don't want anybody to allocate into the live stack.
201     RevokeAllThreadLocalAllocationStacks(self);
202   }
203   heap_->PreSweepingGcVerification(this);
204   // Disallow new system weaks to prevent a race which occurs when someone adds a new system
205   // weak before we sweep them. Since this new system weak may not be marked, the GC may
206   // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
207   // reference to a string that is about to be swept.
208   Runtime::Current()->DisallowNewSystemWeaks();
209   // Enable the reference processing slow path, needs to be done with mutators paused since there
210   // is no lock in the GetReferent fast path.
211   GetHeap()->GetReferenceProcessor()->EnableSlowPath();
212 }
213 
PreCleanCards()214 void MarkSweep::PreCleanCards() {
215   // Don't do this for non concurrent GCs since they don't have any dirty cards.
216   if (kPreCleanCards && IsConcurrent()) {
217     TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
218     Thread* self = Thread::Current();
219     CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
220     // Process dirty cards and add dirty cards to mod union tables, also ages cards.
221     heap_->ProcessCards(GetTimings(), false, true, false);
222     // The checkpoint root marking is required to avoid a race condition which occurs if the
223     // following happens during a reference write:
224     // 1. mutator dirties the card (write barrier)
225     // 2. GC ages the card (the above ProcessCards call)
226     // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
227     // 4. mutator writes the value (corresponding to the write barrier in 1.)
228     // This causes the GC to age the card but not necessarily mark the reference which the mutator
229     // wrote into the object stored in the card.
230     // Having the checkpoint fixes this issue since it ensures that the card mark and the
231     // reference write are visible to the GC before the card is scanned (this is due to locks being
232     // acquired / released in the checkpoint code).
233     // The other roots are also marked to help reduce the pause.
234     MarkRootsCheckpoint(self, false);
235     MarkNonThreadRoots();
236     MarkConcurrentRoots(
237         static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
238     // Process the newly aged cards.
239     RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
240     // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
241     // in the next GC.
242   }
243 }
244 
RevokeAllThreadLocalAllocationStacks(Thread * self)245 void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
246   if (kUseThreadLocalAllocationStack) {
247     TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
248     Locks::mutator_lock_->AssertExclusiveHeld(self);
249     heap_->RevokeAllThreadLocalAllocationStacks(self);
250   }
251 }
252 
MarkingPhase()253 void MarkSweep::MarkingPhase() {
254   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
255   Thread* self = Thread::Current();
256   BindBitmaps();
257   FindDefaultSpaceBitmap();
258   // Process dirty cards and add dirty cards to mod union tables.
259   // If the GC type is non sticky, then we just clear the cards instead of ageing them.
260   heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky);
261   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
262   MarkRoots(self);
263   MarkReachableObjects();
264   // Pre-clean dirtied cards to reduce pauses.
265   PreCleanCards();
266 }
267 
UpdateAndMarkModUnion()268 void MarkSweep::UpdateAndMarkModUnion() {
269   for (const auto& space : heap_->GetContinuousSpaces()) {
270     if (immune_region_.ContainsSpace(space)) {
271       const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
272           "UpdateAndMarkImageModUnionTable";
273       TimingLogger::ScopedTiming t(name, GetTimings());
274       accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
275       CHECK(mod_union_table != nullptr);
276       mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
277     }
278   }
279 }
280 
MarkReachableObjects()281 void MarkSweep::MarkReachableObjects() {
282   UpdateAndMarkModUnion();
283   // Recursively mark all the non-image bits set in the mark bitmap.
284   RecursiveMark();
285 }
286 
ReclaimPhase()287 void MarkSweep::ReclaimPhase() {
288   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
289   Thread* self = Thread::Current();
290   // Process the references concurrently.
291   ProcessReferences(self);
292   SweepSystemWeaks(self);
293   Runtime::Current()->AllowNewSystemWeaks();
294   {
295     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
296     GetHeap()->RecordFreeRevoke();
297     // Reclaim unmarked objects.
298     Sweep(false);
299     // Swap the live and mark bitmaps for each space which we modified space. This is an
300     // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
301     // bitmaps.
302     SwapBitmaps();
303     // Unbind the live and mark bitmaps.
304     GetHeap()->UnBindBitmaps();
305   }
306 }
307 
FindDefaultSpaceBitmap()308 void MarkSweep::FindDefaultSpaceBitmap() {
309   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
310   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
311     accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
312     // We want to have the main space instead of non moving if possible.
313     if (bitmap != nullptr &&
314         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
315       current_space_bitmap_ = bitmap;
316       // If we are not the non moving space exit the loop early since this will be good enough.
317       if (space != heap_->GetNonMovingSpace()) {
318         break;
319       }
320     }
321   }
322   CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
323       << heap_->DumpSpaces();
324 }
325 
ExpandMarkStack()326 void MarkSweep::ExpandMarkStack() {
327   ResizeMarkStack(mark_stack_->Capacity() * 2);
328 }
329 
ResizeMarkStack(size_t new_size)330 void MarkSweep::ResizeMarkStack(size_t new_size) {
331   // Rare case, no need to have Thread::Current be a parameter.
332   if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
333     // Someone else acquired the lock and expanded the mark stack before us.
334     return;
335   }
336   std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
337   CHECK_LE(mark_stack_->Size(), new_size);
338   mark_stack_->Resize(new_size);
339   for (auto& obj : temp) {
340     mark_stack_->PushBack(obj.AsMirrorPtr());
341   }
342 }
343 
MarkObjectNonNullParallel(Object * obj)344 inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
345   DCHECK(obj != nullptr);
346   if (MarkObjectParallel(obj)) {
347     MutexLock mu(Thread::Current(), mark_stack_lock_);
348     if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
349       ExpandMarkStack();
350     }
351     // The object must be pushed on to the mark stack.
352     mark_stack_->PushBack(obj);
353   }
354 }
355 
MarkObjectCallback(mirror::Object * obj,void * arg)356 mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
357   MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
358   mark_sweep->MarkObject(obj);
359   return obj;
360 }
361 
MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object> * ref,void * arg)362 void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
363   reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
364 }
365 
HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object> * ref,void * arg)366 bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
367   return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr());
368 }
369 
370 class MarkSweepMarkObjectSlowPath {
371  public:
MarkSweepMarkObjectSlowPath(MarkSweep * mark_sweep,Object * holder=nullptr,MemberOffset offset=MemberOffset (0))372   explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, Object* holder = nullptr,
373                                        MemberOffset offset = MemberOffset(0))
374       : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
375   }
376 
operator ()(const Object * obj) const377   void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
378     if (kProfileLargeObjects) {
379       // TODO: Differentiate between marking and testing somehow.
380       ++mark_sweep_->large_object_test_;
381       ++mark_sweep_->large_object_mark_;
382     }
383     space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
384     if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
385                  (kIsDebugBuild && large_object_space != nullptr &&
386                      !large_object_space->Contains(obj)))) {
387       LOG(INTERNAL_FATAL) << "Tried to mark " << obj << " not contained by any spaces";
388       if (holder_ != nullptr) {
389         size_t holder_size = holder_->SizeOf();
390         ArtField* field = holder_->FindFieldByOffset(offset_);
391         LOG(INTERNAL_FATAL) << "Field info: "
392                             << " holder=" << holder_
393                             << " holder is "
394                             << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_)
395                                 ? "alive" : "dead")
396                             << " holder_size=" << holder_size
397                             << " holder_type=" << PrettyTypeOf(holder_)
398                             << " offset=" << offset_.Uint32Value()
399                             << " field=" << (field != nullptr ? field->GetName() : "nullptr")
400                             << " field_type="
401                             << (field != nullptr ? field->GetTypeDescriptor() : "")
402                             << " first_ref_field_offset="
403                             << (holder_->IsClass()
404                                 ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset(
405                                     sizeof(void*))
406                                 : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
407                             << " num_of_ref_fields="
408                             << (holder_->IsClass()
409                                 ? holder_->AsClass()->NumReferenceStaticFields()
410                                 : holder_->GetClass()->NumReferenceInstanceFields())
411                             << "\n";
412         // Print the memory content of the holder.
413         for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) {
414           uint32_t* p = reinterpret_cast<uint32_t*>(holder_);
415           LOG(INTERNAL_FATAL) << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = "
416                               << std::hex << p[i];
417         }
418       }
419       PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
420       MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
421       {
422         LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root";
423         Thread* self = Thread::Current();
424         if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
425           mark_sweep_->VerifyRoots();
426         } else {
427           const bool heap_bitmap_exclusive_locked =
428               Locks::heap_bitmap_lock_->IsExclusiveHeld(self);
429           if (heap_bitmap_exclusive_locked) {
430             Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
431           }
432           Locks::mutator_lock_->SharedUnlock(self);
433           ThreadList* tl = Runtime::Current()->GetThreadList();
434           tl->SuspendAll(__FUNCTION__);
435           mark_sweep_->VerifyRoots();
436           tl->ResumeAll();
437           Locks::mutator_lock_->SharedLock(self);
438           if (heap_bitmap_exclusive_locked) {
439             Locks::heap_bitmap_lock_->ExclusiveLock(self);
440           }
441         }
442       }
443       LOG(FATAL) << "Can't mark invalid object";
444     }
445   }
446 
447  private:
448   MarkSweep* const mark_sweep_;
449   mirror::Object* const holder_;
450   MemberOffset offset_;
451 };
452 
MarkObjectNonNull(Object * obj,Object * holder,MemberOffset offset)453 inline void MarkSweep::MarkObjectNonNull(Object* obj, Object* holder, MemberOffset offset) {
454   DCHECK(obj != nullptr);
455   if (kUseBakerOrBrooksReadBarrier) {
456     // Verify all the objects have the correct pointer installed.
457     obj->AssertReadBarrierPointer();
458   }
459   if (immune_region_.ContainsObject(obj)) {
460     if (kCountMarkedObjects) {
461       ++mark_immune_count_;
462     }
463     DCHECK(mark_bitmap_->Test(obj));
464   } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
465     if (kCountMarkedObjects) {
466       ++mark_fastpath_count_;
467     }
468     if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
469       PushOnMarkStack(obj);  // This object was not previously marked.
470     }
471   } else {
472     if (kCountMarkedObjects) {
473       ++mark_slowpath_count_;
474     }
475     MarkSweepMarkObjectSlowPath visitor(this, holder, offset);
476     // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
477     // will check again.
478     if (!mark_bitmap_->Set(obj, visitor)) {
479       PushOnMarkStack(obj);  // Was not already marked, push.
480     }
481   }
482 }
483 
PushOnMarkStack(Object * obj)484 inline void MarkSweep::PushOnMarkStack(Object* obj) {
485   if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
486     // Lock is not needed but is here anyways to please annotalysis.
487     MutexLock mu(Thread::Current(), mark_stack_lock_);
488     ExpandMarkStack();
489   }
490   // The object must be pushed on to the mark stack.
491   mark_stack_->PushBack(obj);
492 }
493 
MarkObjectParallel(const Object * obj)494 inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
495   DCHECK(obj != nullptr);
496   if (kUseBakerOrBrooksReadBarrier) {
497     // Verify all the objects have the correct pointer installed.
498     obj->AssertReadBarrierPointer();
499   }
500   if (immune_region_.ContainsObject(obj)) {
501     DCHECK(IsMarked(obj));
502     return false;
503   }
504   // Try to take advantage of locality of references within a space, failing this find the space
505   // the hard way.
506   accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
507   if (LIKELY(object_bitmap->HasAddress(obj))) {
508     return !object_bitmap->AtomicTestAndSet(obj);
509   }
510   MarkSweepMarkObjectSlowPath visitor(this);
511   return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
512 }
513 
514 // Used to mark objects when processing the mark stack. If an object is null, it is not marked.
MarkObject(Object * obj,Object * holder,MemberOffset offset)515 inline void MarkSweep::MarkObject(Object* obj, Object* holder, MemberOffset offset) {
516   if (obj != nullptr) {
517     MarkObjectNonNull(obj, holder, offset);
518   } else if (kCountMarkedObjects) {
519     ++mark_null_count_;
520   }
521 }
522 
523 class VerifyRootMarkedVisitor : public SingleRootVisitor {
524  public:
VerifyRootMarkedVisitor(MarkSweep * collector)525   explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
526 
VisitRoot(mirror::Object * root,const RootInfo & info)527   void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
528       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
529     CHECK(collector_->IsMarked(root)) << info.ToString();
530   }
531 
532  private:
533   MarkSweep* const collector_;
534 };
535 
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)536 void MarkSweep::VisitRoots(mirror::Object*** roots, size_t count,
537                            const RootInfo& info ATTRIBUTE_UNUSED) {
538   for (size_t i = 0; i < count; ++i) {
539     MarkObjectNonNull(*roots[i]);
540   }
541 }
542 
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)543 void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
544                            const RootInfo& info ATTRIBUTE_UNUSED) {
545   for (size_t i = 0; i < count; ++i) {
546     MarkObjectNonNull(roots[i]->AsMirrorPtr());
547   }
548 }
549 
550 class VerifyRootVisitor : public SingleRootVisitor {
551  public:
VisitRoot(mirror::Object * root,const RootInfo & info)552   void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
553       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
554     // See if the root is on any space bitmap.
555     auto* heap = Runtime::Current()->GetHeap();
556     if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
557       space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
558       if (large_object_space != nullptr && !large_object_space->Contains(root)) {
559         LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info;
560       }
561     }
562   }
563 };
564 
VerifyRoots()565 void MarkSweep::VerifyRoots() {
566   VerifyRootVisitor visitor;
567   Runtime::Current()->GetThreadList()->VisitRoots(&visitor);
568 }
569 
MarkRoots(Thread * self)570 void MarkSweep::MarkRoots(Thread* self) {
571   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
572   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
573     // If we exclusively hold the mutator lock, all threads must be suspended.
574     Runtime::Current()->VisitRoots(this);
575     RevokeAllThreadLocalAllocationStacks(self);
576   } else {
577     MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
578     // At this point the live stack should no longer have any mutators which push into it.
579     MarkNonThreadRoots();
580     MarkConcurrentRoots(
581         static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
582   }
583 }
584 
MarkNonThreadRoots()585 void MarkSweep::MarkNonThreadRoots() {
586   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
587   Runtime::Current()->VisitNonThreadRoots(this);
588 }
589 
MarkConcurrentRoots(VisitRootFlags flags)590 void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
591   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
592   // Visit all runtime roots and clear dirty flags.
593   Runtime::Current()->VisitConcurrentRoots(
594       this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving));
595 }
596 
597 class ScanObjectVisitor {
598  public:
ScanObjectVisitor(MarkSweep * const mark_sweep)599   explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
600       : mark_sweep_(mark_sweep) {}
601 
operator ()(Object * obj) const602   void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
603       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
604     if (kCheckLocks) {
605       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
606       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
607     }
608     mark_sweep_->ScanObject(obj);
609   }
610 
611  private:
612   MarkSweep* const mark_sweep_;
613 };
614 
615 class DelayReferenceReferentVisitor {
616  public:
DelayReferenceReferentVisitor(MarkSweep * collector)617   explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {
618   }
619 
operator ()(mirror::Class * klass,mirror::Reference * ref) const620   void operator()(mirror::Class* klass, mirror::Reference* ref) const
621       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
622       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
623     collector_->DelayReferenceReferent(klass, ref);
624   }
625 
626  private:
627   MarkSweep* const collector_;
628 };
629 
630 template <bool kUseFinger = false>
631 class MarkStackTask : public Task {
632  public:
MarkStackTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,size_t mark_stack_size,StackReference<Object> * mark_stack)633   MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
634                 StackReference<Object>* mark_stack)
635       : mark_sweep_(mark_sweep),
636         thread_pool_(thread_pool),
637         mark_stack_pos_(mark_stack_size) {
638     // We may have to copy part of an existing mark stack when another mark stack overflows.
639     if (mark_stack_size != 0) {
640       DCHECK(mark_stack != nullptr);
641       // TODO: Check performance?
642       std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
643     }
644     if (kCountTasks) {
645       ++mark_sweep_->work_chunks_created_;
646     }
647   }
648 
649   static const size_t kMaxSize = 1 * KB;
650 
651  protected:
652   class MarkObjectParallelVisitor {
653    public:
MarkObjectParallelVisitor(MarkStackTask<kUseFinger> * chunk_task,MarkSweep * mark_sweep)654     explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
655                                        MarkSweep* mark_sweep) ALWAYS_INLINE
656             : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
657 
operator ()(Object * obj,MemberOffset offset,bool) const658     void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
659         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
660       mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
661       if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
662         if (kUseFinger) {
663           std::atomic_thread_fence(std::memory_order_seq_cst);
664           if (reinterpret_cast<uintptr_t>(ref) >=
665               static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
666             return;
667           }
668         }
669         chunk_task_->MarkStackPush(ref);
670       }
671     }
672 
673    private:
674     MarkStackTask<kUseFinger>* const chunk_task_;
675     MarkSweep* const mark_sweep_;
676   };
677 
678   class ScanObjectParallelVisitor {
679    public:
ScanObjectParallelVisitor(MarkStackTask<kUseFinger> * chunk_task)680     explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
681         : chunk_task_(chunk_task) {}
682 
683     // No thread safety analysis since multiple threads will use this visitor.
operator ()(Object * obj) const684     void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
685         EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
686       MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
687       MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
688       DelayReferenceReferentVisitor ref_visitor(mark_sweep);
689       mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
690     }
691 
692    private:
693     MarkStackTask<kUseFinger>* const chunk_task_;
694   };
695 
~MarkStackTask()696   virtual ~MarkStackTask() {
697     // Make sure that we have cleared our mark stack.
698     DCHECK_EQ(mark_stack_pos_, 0U);
699     if (kCountTasks) {
700       ++mark_sweep_->work_chunks_deleted_;
701     }
702   }
703 
704   MarkSweep* const mark_sweep_;
705   ThreadPool* const thread_pool_;
706   // Thread local mark stack for this task.
707   StackReference<Object> mark_stack_[kMaxSize];
708   // Mark stack position.
709   size_t mark_stack_pos_;
710 
MarkStackPush(Object * obj)711   ALWAYS_INLINE void MarkStackPush(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
712     if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
713       // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
714       mark_stack_pos_ /= 2;
715       auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
716                                      mark_stack_ + mark_stack_pos_);
717       thread_pool_->AddTask(Thread::Current(), task);
718     }
719     DCHECK(obj != nullptr);
720     DCHECK_LT(mark_stack_pos_, kMaxSize);
721     mark_stack_[mark_stack_pos_++].Assign(obj);
722   }
723 
Finalize()724   virtual void Finalize() {
725     delete this;
726   }
727 
728   // Scans all of the objects
Run(Thread * self)729   virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
730       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
731     UNUSED(self);
732     ScanObjectParallelVisitor visitor(this);
733     // TODO: Tune this.
734     static const size_t kFifoSize = 4;
735     BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
736     for (;;) {
737       Object* obj = nullptr;
738       if (kUseMarkStackPrefetch) {
739         while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
740           Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
741           DCHECK(mark_stack_obj != nullptr);
742           __builtin_prefetch(mark_stack_obj);
743           prefetch_fifo.push_back(mark_stack_obj);
744         }
745         if (UNLIKELY(prefetch_fifo.empty())) {
746           break;
747         }
748         obj = prefetch_fifo.front();
749         prefetch_fifo.pop_front();
750       } else {
751         if (UNLIKELY(mark_stack_pos_ == 0)) {
752           break;
753         }
754         obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
755       }
756       DCHECK(obj != nullptr);
757       visitor(obj);
758     }
759   }
760 };
761 
762 class CardScanTask : public MarkStackTask<false> {
763  public:
CardScanTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,accounting::ContinuousSpaceBitmap * bitmap,uint8_t * begin,uint8_t * end,uint8_t minimum_age,size_t mark_stack_size,StackReference<Object> * mark_stack_obj,bool clear_card)764   CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
765                accounting::ContinuousSpaceBitmap* bitmap,
766                uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size,
767                StackReference<Object>* mark_stack_obj, bool clear_card)
768       : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
769         bitmap_(bitmap),
770         begin_(begin),
771         end_(end),
772         minimum_age_(minimum_age), clear_card_(clear_card) {
773   }
774 
775  protected:
776   accounting::ContinuousSpaceBitmap* const bitmap_;
777   uint8_t* const begin_;
778   uint8_t* const end_;
779   const uint8_t minimum_age_;
780   const bool clear_card_;
781 
Finalize()782   virtual void Finalize() {
783     delete this;
784   }
785 
Run(Thread * self)786   virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
787     ScanObjectParallelVisitor visitor(this);
788     accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
789     size_t cards_scanned = clear_card_ ?
790                            card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_) :
791                            card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
792     VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
793         << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
794     // Finish by emptying our local mark stack.
795     MarkStackTask::Run(self);
796   }
797 };
798 
GetThreadCount(bool paused) const799 size_t MarkSweep::GetThreadCount(bool paused) const {
800   if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
801     return 1;
802   }
803   return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1;
804 }
805 
ScanGrayObjects(bool paused,uint8_t minimum_age)806 void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
807   accounting::CardTable* card_table = GetHeap()->GetCardTable();
808   ThreadPool* thread_pool = GetHeap()->GetThreadPool();
809   size_t thread_count = GetThreadCount(paused);
810   // The parallel version with only one thread is faster for card scanning, TODO: fix.
811   if (kParallelCardScan && thread_count > 1) {
812     Thread* self = Thread::Current();
813     // Can't have a different split for each space since multiple spaces can have their cards being
814     // scanned at the same time.
815     TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
816         GetTimings());
817     // Try to take some of the mark stack since we can pass this off to the worker tasks.
818     StackReference<Object>* mark_stack_begin = mark_stack_->Begin();
819     StackReference<Object>* mark_stack_end = mark_stack_->End();
820     const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
821     // Estimated number of work tasks we will create.
822     const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
823     DCHECK_NE(mark_stack_tasks, 0U);
824     const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
825                                              mark_stack_size / mark_stack_tasks + 1);
826     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
827       if (space->GetMarkBitmap() == nullptr) {
828         continue;
829       }
830       uint8_t* card_begin = space->Begin();
831       uint8_t* card_end = space->End();
832       // Align up the end address. For example, the image space's end
833       // may not be card-size-aligned.
834       card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
835       DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
836       DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
837       // Calculate how many bytes of heap we will scan,
838       const size_t address_range = card_end - card_begin;
839       // Calculate how much address range each task gets.
840       const size_t card_delta = RoundUp(address_range / thread_count + 1,
841                                         accounting::CardTable::kCardSize);
842       // If paused and the space is neither zygote nor image space, we could clear the dirty
843       // cards to avoid accumulating them to increase card scanning load in the following GC
844       // cycles. We need to keep dirty cards of image space and zygote space in order to track
845       // references to the other spaces.
846       bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
847       // Create the worker tasks for this space.
848       while (card_begin != card_end) {
849         // Add a range of cards.
850         size_t addr_remaining = card_end - card_begin;
851         size_t card_increment = std::min(card_delta, addr_remaining);
852         // Take from the back of the mark stack.
853         size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
854         size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
855         mark_stack_end -= mark_stack_increment;
856         mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
857         DCHECK_EQ(mark_stack_end, mark_stack_->End());
858         // Add the new task to the thread pool.
859         auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
860                                       card_begin + card_increment, minimum_age,
861                                       mark_stack_increment, mark_stack_end, clear_card);
862         thread_pool->AddTask(self, task);
863         card_begin += card_increment;
864       }
865     }
866 
867     // Note: the card scan below may dirty new cards (and scan them)
868     // as a side effect when a Reference object is encountered and
869     // queued during the marking. See b/11465268.
870     thread_pool->SetMaxActiveWorkers(thread_count - 1);
871     thread_pool->StartWorkers(self);
872     thread_pool->Wait(self, true, true);
873     thread_pool->StopWorkers(self);
874   } else {
875     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
876       if (space->GetMarkBitmap() != nullptr) {
877         // Image spaces are handled properly since live == marked for them.
878         const char* name = nullptr;
879         switch (space->GetGcRetentionPolicy()) {
880         case space::kGcRetentionPolicyNeverCollect:
881           name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects";
882           break;
883         case space::kGcRetentionPolicyFullCollect:
884           name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
885           break;
886         case space::kGcRetentionPolicyAlwaysCollect:
887           name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
888           break;
889         default:
890           LOG(FATAL) << "Unreachable";
891           UNREACHABLE();
892         }
893         TimingLogger::ScopedTiming t(name, GetTimings());
894         ScanObjectVisitor visitor(this);
895         bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
896         if (clear_card) {
897           card_table->Scan<true>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
898                                  minimum_age);
899         } else {
900           card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
901                                   minimum_age);
902         }
903       }
904     }
905   }
906 }
907 
908 class RecursiveMarkTask : public MarkStackTask<false> {
909  public:
RecursiveMarkTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,accounting::ContinuousSpaceBitmap * bitmap,uintptr_t begin,uintptr_t end)910   RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
911                     accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
912       : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), bitmap_(bitmap), begin_(begin),
913         end_(end) {
914   }
915 
916  protected:
917   accounting::ContinuousSpaceBitmap* const bitmap_;
918   const uintptr_t begin_;
919   const uintptr_t end_;
920 
Finalize()921   virtual void Finalize() {
922     delete this;
923   }
924 
925   // Scans all of the objects
Run(Thread * self)926   virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
927     ScanObjectParallelVisitor visitor(this);
928     bitmap_->VisitMarkedRange(begin_, end_, visitor);
929     // Finish by emptying our local mark stack.
930     MarkStackTask::Run(self);
931   }
932 };
933 
934 // Populates the mark stack based on the set of marked objects and
935 // recursively marks until the mark stack is emptied.
RecursiveMark()936 void MarkSweep::RecursiveMark() {
937   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
938   // RecursiveMark will build the lists of known instances of the Reference classes. See
939   // DelayReferenceReferent for details.
940   if (kUseRecursiveMark) {
941     const bool partial = GetGcType() == kGcTypePartial;
942     ScanObjectVisitor scan_visitor(this);
943     auto* self = Thread::Current();
944     ThreadPool* thread_pool = heap_->GetThreadPool();
945     size_t thread_count = GetThreadCount(false);
946     const bool parallel = kParallelRecursiveMark && thread_count > 1;
947     mark_stack_->Reset();
948     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
949       if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
950           (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
951         current_space_bitmap_ = space->GetMarkBitmap();
952         if (current_space_bitmap_ == nullptr) {
953           continue;
954         }
955         if (parallel) {
956           // We will use the mark stack the future.
957           // CHECK(mark_stack_->IsEmpty());
958           // This function does not handle heap end increasing, so we must use the space end.
959           uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
960           uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
961           atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
962 
963           // Create a few worker tasks.
964           const size_t n = thread_count * 2;
965           while (begin != end) {
966             uintptr_t start = begin;
967             uintptr_t delta = (end - begin) / n;
968             delta = RoundUp(delta, KB);
969             if (delta < 16 * KB) delta = end - begin;
970             begin += delta;
971             auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
972                                                begin);
973             thread_pool->AddTask(self, task);
974           }
975           thread_pool->SetMaxActiveWorkers(thread_count - 1);
976           thread_pool->StartWorkers(self);
977           thread_pool->Wait(self, true, true);
978           thread_pool->StopWorkers(self);
979         } else {
980           // This function does not handle heap end increasing, so we must use the space end.
981           uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
982           uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
983           current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
984         }
985       }
986     }
987   }
988   ProcessMarkStack(false);
989 }
990 
IsMarkedCallback(mirror::Object * object,void * arg)991 mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
992   if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
993     return object;
994   }
995   return nullptr;
996 }
997 
RecursiveMarkDirtyObjects(bool paused,uint8_t minimum_age)998 void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
999   ScanGrayObjects(paused, minimum_age);
1000   ProcessMarkStack(paused);
1001 }
1002 
ReMarkRoots()1003 void MarkSweep::ReMarkRoots() {
1004   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1005   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
1006   Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>(
1007       kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog));
1008   if (kVerifyRootsMarked) {
1009     TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
1010     VerifyRootMarkedVisitor visitor(this);
1011     Runtime::Current()->VisitRoots(&visitor);
1012   }
1013 }
1014 
SweepSystemWeaks(Thread * self)1015 void MarkSweep::SweepSystemWeaks(Thread* self) {
1016   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1017   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1018   Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
1019 }
1020 
VerifySystemWeakIsLiveCallback(Object * obj,void * arg)1021 mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
1022   reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
1023   // We don't actually want to sweep the object, so lets return "marked"
1024   return obj;
1025 }
1026 
VerifyIsLive(const Object * obj)1027 void MarkSweep::VerifyIsLive(const Object* obj) {
1028   if (!heap_->GetLiveBitmap()->Test(obj)) {
1029     // TODO: Consider live stack? Has this code bitrotted?
1030     CHECK(!heap_->allocation_stack_->Contains(obj))
1031         << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
1032   }
1033 }
1034 
VerifySystemWeaks()1035 void MarkSweep::VerifySystemWeaks() {
1036   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1037   // Verify system weaks, uses a special object visitor which returns the input object.
1038   Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
1039 }
1040 
1041 class CheckpointMarkThreadRoots : public Closure, public RootVisitor {
1042  public:
CheckpointMarkThreadRoots(MarkSweep * mark_sweep,bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)1043   explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
1044                                      bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
1045       : mark_sweep_(mark_sweep),
1046         revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
1047             revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1048   }
1049 
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)1050   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
1051       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1052       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1053     for (size_t i = 0; i < count; ++i) {
1054       mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
1055     }
1056   }
1057 
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)1058   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
1059                   const RootInfo& info ATTRIBUTE_UNUSED)
1060       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1061       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1062     for (size_t i = 0; i < count; ++i) {
1063       mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
1064     }
1065   }
1066 
Run(Thread * thread)1067   virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1068     ATRACE_BEGIN("Marking thread roots");
1069     // Note: self is not necessarily equal to thread since thread may be suspended.
1070     Thread* const self = Thread::Current();
1071     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1072         << thread->GetState() << " thread " << thread << " self " << self;
1073     thread->VisitRoots(this);
1074     ATRACE_END();
1075     if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
1076       ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
1077       mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
1078       ATRACE_END();
1079     }
1080     // If thread is a running mutator, then act on behalf of the garbage collector.
1081     // See the code in ThreadList::RunCheckpoint.
1082     if (thread->GetState() == kRunnable) {
1083       mark_sweep_->GetBarrier().Pass(self);
1084     }
1085   }
1086 
1087  private:
1088   MarkSweep* const mark_sweep_;
1089   const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
1090 };
1091 
MarkRootsCheckpoint(Thread * self,bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)1092 void MarkSweep::MarkRootsCheckpoint(Thread* self,
1093                                     bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1094   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1095   CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
1096   ThreadList* thread_list = Runtime::Current()->GetThreadList();
1097   // Request the check point is run on all threads returning a count of the threads that must
1098   // run through the barrier including self.
1099   size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1100   // Release locks then wait for all mutator threads to pass the barrier.
1101   // If there are no threads to wait which implys that all the checkpoint functions are finished,
1102   // then no need to release locks.
1103   if (barrier_count == 0) {
1104     return;
1105   }
1106   Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1107   Locks::mutator_lock_->SharedUnlock(self);
1108   {
1109     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1110     gc_barrier_->Increment(self, barrier_count);
1111   }
1112   Locks::mutator_lock_->SharedLock(self);
1113   Locks::heap_bitmap_lock_->ExclusiveLock(self);
1114 }
1115 
SweepArray(accounting::ObjectStack * allocations,bool swap_bitmaps)1116 void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
1117   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1118   Thread* self = Thread::Current();
1119   mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
1120       sweep_array_free_buffer_mem_map_->BaseBegin());
1121   size_t chunk_free_pos = 0;
1122   ObjectBytePair freed;
1123   ObjectBytePair freed_los;
1124   // How many objects are left in the array, modified after each space is swept.
1125   StackReference<Object>* objects = allocations->Begin();
1126   size_t count = allocations->Size();
1127   // Change the order to ensure that the non-moving space last swept as an optimization.
1128   std::vector<space::ContinuousSpace*> sweep_spaces;
1129   space::ContinuousSpace* non_moving_space = nullptr;
1130   for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
1131     if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) &&
1132         space->GetLiveBitmap() != nullptr) {
1133       if (space == heap_->GetNonMovingSpace()) {
1134         non_moving_space = space;
1135       } else {
1136         sweep_spaces.push_back(space);
1137       }
1138     }
1139   }
1140   // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1141   // the other alloc spaces as an optimization.
1142   if (non_moving_space != nullptr) {
1143     sweep_spaces.push_back(non_moving_space);
1144   }
1145   // Start by sweeping the continuous spaces.
1146   for (space::ContinuousSpace* space : sweep_spaces) {
1147     space::AllocSpace* alloc_space = space->AsAllocSpace();
1148     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1149     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1150     if (swap_bitmaps) {
1151       std::swap(live_bitmap, mark_bitmap);
1152     }
1153     StackReference<Object>* out = objects;
1154     for (size_t i = 0; i < count; ++i) {
1155       Object* const obj = objects[i].AsMirrorPtr();
1156       if (kUseThreadLocalAllocationStack && obj == nullptr) {
1157         continue;
1158       }
1159       if (space->HasAddress(obj)) {
1160         // This object is in the space, remove it from the array and add it to the sweep buffer
1161         // if needed.
1162         if (!mark_bitmap->Test(obj)) {
1163           if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1164             TimingLogger::ScopedTiming t2("FreeList", GetTimings());
1165             freed.objects += chunk_free_pos;
1166             freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1167             chunk_free_pos = 0;
1168           }
1169           chunk_free_buffer[chunk_free_pos++] = obj;
1170         }
1171       } else {
1172         (out++)->Assign(obj);
1173       }
1174     }
1175     if (chunk_free_pos > 0) {
1176       TimingLogger::ScopedTiming t2("FreeList", GetTimings());
1177       freed.objects += chunk_free_pos;
1178       freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1179       chunk_free_pos = 0;
1180     }
1181     // All of the references which space contained are no longer in the allocation stack, update
1182     // the count.
1183     count = out - objects;
1184   }
1185   // Handle the large object space.
1186   space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1187   if (large_object_space != nullptr) {
1188     accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
1189     accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
1190     if (swap_bitmaps) {
1191       std::swap(large_live_objects, large_mark_objects);
1192     }
1193     for (size_t i = 0; i < count; ++i) {
1194       Object* const obj = objects[i].AsMirrorPtr();
1195       // Handle large objects.
1196       if (kUseThreadLocalAllocationStack && obj == nullptr) {
1197         continue;
1198       }
1199       if (!large_mark_objects->Test(obj)) {
1200         ++freed_los.objects;
1201         freed_los.bytes += large_object_space->Free(self, obj);
1202       }
1203     }
1204   }
1205   {
1206     TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
1207     RecordFree(freed);
1208     RecordFreeLOS(freed_los);
1209     t2.NewTiming("ResetStack");
1210     allocations->Reset();
1211   }
1212   sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
1213 }
1214 
Sweep(bool swap_bitmaps)1215 void MarkSweep::Sweep(bool swap_bitmaps) {
1216   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1217   // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1218   CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
1219   {
1220     TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
1221     // Mark everything allocated since the last as GC live so that we can sweep concurrently,
1222     // knowing that new allocations won't be marked as live.
1223     accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1224     heap_->MarkAllocStackAsLive(live_stack);
1225     live_stack->Reset();
1226     DCHECK(mark_stack_->IsEmpty());
1227   }
1228   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1229     if (space->IsContinuousMemMapAllocSpace()) {
1230       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1231       TimingLogger::ScopedTiming split(
1232           alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings());
1233       RecordFree(alloc_space->Sweep(swap_bitmaps));
1234     }
1235   }
1236   SweepLargeObjects(swap_bitmaps);
1237 }
1238 
SweepLargeObjects(bool swap_bitmaps)1239 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
1240   space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
1241   if (los != nullptr) {
1242     TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1243     RecordFreeLOS(los->Sweep(swap_bitmaps));
1244   }
1245 }
1246 
1247 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
1248 // marked, put it on the appropriate list in the heap for later processing.
DelayReferenceReferent(mirror::Class * klass,mirror::Reference * ref)1249 void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
1250   if (kCountJavaLangRefs) {
1251     ++reference_count_;
1252   }
1253   heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback,
1254                                                          this);
1255 }
1256 
1257 class MarkObjectVisitor {
1258  public:
MarkObjectVisitor(MarkSweep * const mark_sweep)1259   explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
1260   }
1261 
operator ()(Object * obj,MemberOffset offset,bool) const1262   void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
1263       ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1264       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1265     if (kCheckLocks) {
1266       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1267       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1268     }
1269     mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
1270   }
1271 
1272  private:
1273   MarkSweep* const mark_sweep_;
1274 };
1275 
1276 // Scans an object reference.  Determines the type of the reference
1277 // and dispatches to a specialized scanning routine.
ScanObject(Object * obj)1278 void MarkSweep::ScanObject(Object* obj) {
1279   MarkObjectVisitor mark_visitor(this);
1280   DelayReferenceReferentVisitor ref_visitor(this);
1281   ScanObjectVisit(obj, mark_visitor, ref_visitor);
1282 }
1283 
ProcessMarkStackCallback(void * arg)1284 void MarkSweep::ProcessMarkStackCallback(void* arg) {
1285   reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false);
1286 }
1287 
ProcessMarkStackParallel(size_t thread_count)1288 void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
1289   Thread* self = Thread::Current();
1290   ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1291   const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1292                                      static_cast<size_t>(MarkStackTask<false>::kMaxSize));
1293   CHECK_GT(chunk_size, 0U);
1294   // Split the current mark stack up into work tasks.
1295   for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) {
1296     const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1297     thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
1298     it += delta;
1299   }
1300   thread_pool->SetMaxActiveWorkers(thread_count - 1);
1301   thread_pool->StartWorkers(self);
1302   thread_pool->Wait(self, true, true);
1303   thread_pool->StopWorkers(self);
1304   mark_stack_->Reset();
1305   CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
1306            work_chunks_deleted_.LoadSequentiallyConsistent())
1307       << " some of the work chunks were leaked";
1308 }
1309 
1310 // Scan anything that's on the mark stack.
ProcessMarkStack(bool paused)1311 void MarkSweep::ProcessMarkStack(bool paused) {
1312   TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings());
1313   size_t thread_count = GetThreadCount(paused);
1314   if (kParallelProcessMarkStack && thread_count > 1 &&
1315       mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1316     ProcessMarkStackParallel(thread_count);
1317   } else {
1318     // TODO: Tune this.
1319     static const size_t kFifoSize = 4;
1320     BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
1321     for (;;) {
1322       Object* obj = nullptr;
1323       if (kUseMarkStackPrefetch) {
1324         while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
1325           Object* mark_stack_obj = mark_stack_->PopBack();
1326           DCHECK(mark_stack_obj != nullptr);
1327           __builtin_prefetch(mark_stack_obj);
1328           prefetch_fifo.push_back(mark_stack_obj);
1329         }
1330         if (prefetch_fifo.empty()) {
1331           break;
1332         }
1333         obj = prefetch_fifo.front();
1334         prefetch_fifo.pop_front();
1335       } else {
1336         if (mark_stack_->IsEmpty()) {
1337           break;
1338         }
1339         obj = mark_stack_->PopBack();
1340       }
1341       DCHECK(obj != nullptr);
1342       ScanObject(obj);
1343     }
1344   }
1345 }
1346 
IsMarked(const Object * object) const1347 inline bool MarkSweep::IsMarked(const Object* object) const {
1348   if (immune_region_.ContainsObject(object)) {
1349     return true;
1350   }
1351   if (current_space_bitmap_->HasAddress(object)) {
1352     return current_space_bitmap_->Test(object);
1353   }
1354   return mark_bitmap_->Test(object);
1355 }
1356 
FinishPhase()1357 void MarkSweep::FinishPhase() {
1358   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1359   if (kCountScannedTypes) {
1360     VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
1361         << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
1362   }
1363   if (kCountTasks) {
1364     VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
1365   }
1366   if (kMeasureOverhead) {
1367     VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
1368   }
1369   if (kProfileLargeObjects) {
1370     VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
1371         << " marked " << large_object_mark_.LoadRelaxed();
1372   }
1373   if (kCountJavaLangRefs) {
1374     VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed();
1375   }
1376   if (kCountMarkedObjects) {
1377     VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
1378         << " immune=" <<  mark_immune_count_.LoadRelaxed()
1379         << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
1380         << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
1381   }
1382   CHECK(mark_stack_->IsEmpty());  // Ensure that the mark stack is empty.
1383   mark_stack_->Reset();
1384   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1385   heap_->ClearMarkedObjects();
1386 }
1387 
RevokeAllThreadLocalBuffers()1388 void MarkSweep::RevokeAllThreadLocalBuffers() {
1389   if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1390     // If concurrent, rosalloc thread-local buffers are revoked at the
1391     // thread checkpoint. Bump pointer space thread-local buffers must
1392     // not be in use.
1393     GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1394   } else {
1395     TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1396     GetHeap()->RevokeAllThreadLocalBuffers();
1397   }
1398 }
1399 
1400 }  // namespace collector
1401 }  // namespace gc
1402 }  // namespace art
1403