1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "mark_sweep.h"
18
19 #include <atomic>
20 #include <functional>
21 #include <numeric>
22 #include <climits>
23 #include <vector>
24
25 #include "base/bounded_fifo.h"
26 #include "base/enums.h"
27 #include "base/logging.h"
28 #include "base/macros.h"
29 #include "base/mutex-inl.h"
30 #include "base/systrace.h"
31 #include "base/time_utils.h"
32 #include "base/timing_logger.h"
33 #include "gc/accounting/card_table-inl.h"
34 #include "gc/accounting/heap_bitmap-inl.h"
35 #include "gc/accounting/mod_union_table.h"
36 #include "gc/accounting/space_bitmap-inl.h"
37 #include "gc/heap.h"
38 #include "gc/reference_processor.h"
39 #include "gc/space/large_object_space.h"
40 #include "gc/space/space-inl.h"
41 #include "mark_sweep-inl.h"
42 #include "mirror/object-inl.h"
43 #include "runtime.h"
44 #include "scoped_thread_state_change-inl.h"
45 #include "thread-inl.h"
46 #include "thread_list.h"
47
48 namespace art {
49 namespace gc {
50 namespace collector {
51
52 // Performance options.
53 static constexpr bool kUseRecursiveMark = false;
54 static constexpr bool kUseMarkStackPrefetch = true;
55 static constexpr size_t kSweepArrayChunkFreeSize = 1024;
56 static constexpr bool kPreCleanCards = true;
57
58 // Parallelism options.
59 static constexpr bool kParallelCardScan = true;
60 static constexpr bool kParallelRecursiveMark = true;
61 // Don't attempt to parallelize mark stack processing unless the mark stack is at least n
62 // elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
63 // having this can add overhead in ProcessReferences since we may end up doing many calls of
64 // ProcessMarkStack with very small mark stacks.
65 static constexpr size_t kMinimumParallelMarkStackSize = 128;
66 static constexpr bool kParallelProcessMarkStack = true;
67
68 // Profiling and information flags.
69 static constexpr bool kProfileLargeObjects = false;
70 static constexpr bool kMeasureOverhead = false;
71 static constexpr bool kCountTasks = false;
72 static constexpr bool kCountMarkedObjects = false;
73
74 // Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
75 static constexpr bool kCheckLocks = kDebugLocking;
76 static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
77
78 // If true, revoke the rosalloc thread-local buffers at the
79 // checkpoint, as opposed to during the pause.
80 static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
81
BindBitmaps()82 void MarkSweep::BindBitmaps() {
83 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
84 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
85 // Mark all of the spaces we never collect as immune.
86 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
87 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
88 immune_spaces_.AddSpace(space);
89 }
90 }
91 }
92
MarkSweep(Heap * heap,bool is_concurrent,const std::string & name_prefix)93 MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
94 : GarbageCollector(heap,
95 name_prefix +
96 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
97 current_space_bitmap_(nullptr),
98 mark_bitmap_(nullptr),
99 mark_stack_(nullptr),
100 gc_barrier_(new Barrier(0)),
101 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
102 is_concurrent_(is_concurrent),
103 live_stack_freeze_size_(0) {
104 std::string error_msg;
105 MemMap* mem_map = MemMap::MapAnonymous(
106 "mark sweep sweep array free buffer", nullptr,
107 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
108 PROT_READ | PROT_WRITE, false, false, &error_msg);
109 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
110 sweep_array_free_buffer_mem_map_.reset(mem_map);
111 }
112
InitializePhase()113 void MarkSweep::InitializePhase() {
114 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
115 mark_stack_ = heap_->GetMarkStack();
116 DCHECK(mark_stack_ != nullptr);
117 immune_spaces_.Reset();
118 no_reference_class_count_.StoreRelaxed(0);
119 normal_count_.StoreRelaxed(0);
120 class_count_.StoreRelaxed(0);
121 object_array_count_.StoreRelaxed(0);
122 other_count_.StoreRelaxed(0);
123 reference_count_.StoreRelaxed(0);
124 large_object_test_.StoreRelaxed(0);
125 large_object_mark_.StoreRelaxed(0);
126 overhead_time_ .StoreRelaxed(0);
127 work_chunks_created_.StoreRelaxed(0);
128 work_chunks_deleted_.StoreRelaxed(0);
129 mark_null_count_.StoreRelaxed(0);
130 mark_immune_count_.StoreRelaxed(0);
131 mark_fastpath_count_.StoreRelaxed(0);
132 mark_slowpath_count_.StoreRelaxed(0);
133 {
134 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
135 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
136 mark_bitmap_ = heap_->GetMarkBitmap();
137 }
138 if (!GetCurrentIteration()->GetClearSoftReferences()) {
139 // Always clear soft references if a non-sticky collection.
140 GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
141 }
142 }
143
RunPhases()144 void MarkSweep::RunPhases() {
145 Thread* self = Thread::Current();
146 InitializePhase();
147 Locks::mutator_lock_->AssertNotHeld(self);
148 if (IsConcurrent()) {
149 GetHeap()->PreGcVerification(this);
150 {
151 ReaderMutexLock mu(self, *Locks::mutator_lock_);
152 MarkingPhase();
153 }
154 ScopedPause pause(this);
155 GetHeap()->PrePauseRosAllocVerification(this);
156 PausePhase();
157 RevokeAllThreadLocalBuffers();
158 } else {
159 ScopedPause pause(this);
160 GetHeap()->PreGcVerificationPaused(this);
161 MarkingPhase();
162 GetHeap()->PrePauseRosAllocVerification(this);
163 PausePhase();
164 RevokeAllThreadLocalBuffers();
165 }
166 {
167 // Sweeping always done concurrently, even for non concurrent mark sweep.
168 ReaderMutexLock mu(self, *Locks::mutator_lock_);
169 ReclaimPhase();
170 }
171 GetHeap()->PostGcVerification(this);
172 FinishPhase();
173 }
174
ProcessReferences(Thread * self)175 void MarkSweep::ProcessReferences(Thread* self) {
176 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
177 GetHeap()->GetReferenceProcessor()->ProcessReferences(
178 true,
179 GetTimings(),
180 GetCurrentIteration()->GetClearSoftReferences(),
181 this);
182 }
183
PausePhase()184 void MarkSweep::PausePhase() {
185 TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings());
186 Thread* self = Thread::Current();
187 Locks::mutator_lock_->AssertExclusiveHeld(self);
188 if (IsConcurrent()) {
189 // Handle the dirty objects if we are a concurrent GC.
190 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
191 // Re-mark root set.
192 ReMarkRoots();
193 // Scan dirty objects, this is only required if we are not doing concurrent GC.
194 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
195 }
196 {
197 TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
198 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
199 heap_->SwapStacks();
200 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
201 // Need to revoke all the thread local allocation stacks since we just swapped the allocation
202 // stacks and don't want anybody to allocate into the live stack.
203 RevokeAllThreadLocalAllocationStacks(self);
204 }
205 heap_->PreSweepingGcVerification(this);
206 // Disallow new system weaks to prevent a race which occurs when someone adds a new system
207 // weak before we sweep them. Since this new system weak may not be marked, the GC may
208 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
209 // reference to a string that is about to be swept.
210 Runtime::Current()->DisallowNewSystemWeaks();
211 // Enable the reference processing slow path, needs to be done with mutators paused since there
212 // is no lock in the GetReferent fast path.
213 GetHeap()->GetReferenceProcessor()->EnableSlowPath();
214 }
215
PreCleanCards()216 void MarkSweep::PreCleanCards() {
217 // Don't do this for non concurrent GCs since they don't have any dirty cards.
218 if (kPreCleanCards && IsConcurrent()) {
219 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
220 Thread* self = Thread::Current();
221 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
222 // Process dirty cards and add dirty cards to mod union tables, also ages cards.
223 heap_->ProcessCards(GetTimings(), false, true, false);
224 // The checkpoint root marking is required to avoid a race condition which occurs if the
225 // following happens during a reference write:
226 // 1. mutator dirties the card (write barrier)
227 // 2. GC ages the card (the above ProcessCards call)
228 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
229 // 4. mutator writes the value (corresponding to the write barrier in 1.)
230 // This causes the GC to age the card but not necessarily mark the reference which the mutator
231 // wrote into the object stored in the card.
232 // Having the checkpoint fixes this issue since it ensures that the card mark and the
233 // reference write are visible to the GC before the card is scanned (this is due to locks being
234 // acquired / released in the checkpoint code).
235 // The other roots are also marked to help reduce the pause.
236 MarkRootsCheckpoint(self, false);
237 MarkNonThreadRoots();
238 MarkConcurrentRoots(
239 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
240 // Process the newly aged cards.
241 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
242 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
243 // in the next GC.
244 }
245 }
246
RevokeAllThreadLocalAllocationStacks(Thread * self)247 void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
248 if (kUseThreadLocalAllocationStack) {
249 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
250 Locks::mutator_lock_->AssertExclusiveHeld(self);
251 heap_->RevokeAllThreadLocalAllocationStacks(self);
252 }
253 }
254
MarkingPhase()255 void MarkSweep::MarkingPhase() {
256 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
257 Thread* self = Thread::Current();
258 BindBitmaps();
259 FindDefaultSpaceBitmap();
260 // Process dirty cards and add dirty cards to mod union tables.
261 // If the GC type is non sticky, then we just clear the cards instead of ageing them.
262 heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky);
263 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
264 MarkRoots(self);
265 MarkReachableObjects();
266 // Pre-clean dirtied cards to reduce pauses.
267 PreCleanCards();
268 }
269
270 class MarkSweep::ScanObjectVisitor {
271 public:
ScanObjectVisitor(MarkSweep * const mark_sweep)272 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
273 : mark_sweep_(mark_sweep) {}
274
operator ()(ObjPtr<mirror::Object> obj) const275 void operator()(ObjPtr<mirror::Object> obj) const
276 ALWAYS_INLINE
277 REQUIRES(Locks::heap_bitmap_lock_)
278 REQUIRES_SHARED(Locks::mutator_lock_) {
279 if (kCheckLocks) {
280 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
281 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
282 }
283 mark_sweep_->ScanObject(obj.Ptr());
284 }
285
286 private:
287 MarkSweep* const mark_sweep_;
288 };
289
UpdateAndMarkModUnion()290 void MarkSweep::UpdateAndMarkModUnion() {
291 for (const auto& space : immune_spaces_.GetSpaces()) {
292 const char* name = space->IsZygoteSpace()
293 ? "UpdateAndMarkZygoteModUnionTable"
294 : "UpdateAndMarkImageModUnionTable";
295 DCHECK(space->IsZygoteSpace() || space->IsImageSpace()) << *space;
296 TimingLogger::ScopedTiming t(name, GetTimings());
297 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
298 if (mod_union_table != nullptr) {
299 mod_union_table->UpdateAndMarkReferences(this);
300 } else {
301 // No mod-union table, scan all the live bits. This can only occur for app images.
302 space->GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
303 reinterpret_cast<uintptr_t>(space->End()),
304 ScanObjectVisitor(this));
305 }
306 }
307 }
308
MarkReachableObjects()309 void MarkSweep::MarkReachableObjects() {
310 UpdateAndMarkModUnion();
311 // Recursively mark all the non-image bits set in the mark bitmap.
312 RecursiveMark();
313 }
314
ReclaimPhase()315 void MarkSweep::ReclaimPhase() {
316 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
317 Thread* const self = Thread::Current();
318 // Process the references concurrently.
319 ProcessReferences(self);
320 SweepSystemWeaks(self);
321 Runtime* const runtime = Runtime::Current();
322 runtime->AllowNewSystemWeaks();
323 // Clean up class loaders after system weaks are swept since that is how we know if class
324 // unloading occurred.
325 runtime->GetClassLinker()->CleanupClassLoaders();
326 {
327 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
328 GetHeap()->RecordFreeRevoke();
329 // Reclaim unmarked objects.
330 Sweep(false);
331 // Swap the live and mark bitmaps for each space which we modified space. This is an
332 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
333 // bitmaps.
334 SwapBitmaps();
335 // Unbind the live and mark bitmaps.
336 GetHeap()->UnBindBitmaps();
337 }
338 }
339
FindDefaultSpaceBitmap()340 void MarkSweep::FindDefaultSpaceBitmap() {
341 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
342 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
343 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
344 // We want to have the main space instead of non moving if possible.
345 if (bitmap != nullptr &&
346 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
347 current_space_bitmap_ = bitmap;
348 // If we are not the non moving space exit the loop early since this will be good enough.
349 if (space != heap_->GetNonMovingSpace()) {
350 break;
351 }
352 }
353 }
354 CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
355 << heap_->DumpSpaces();
356 }
357
ExpandMarkStack()358 void MarkSweep::ExpandMarkStack() {
359 ResizeMarkStack(mark_stack_->Capacity() * 2);
360 }
361
ResizeMarkStack(size_t new_size)362 void MarkSweep::ResizeMarkStack(size_t new_size) {
363 // Rare case, no need to have Thread::Current be a parameter.
364 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
365 // Someone else acquired the lock and expanded the mark stack before us.
366 return;
367 }
368 std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End());
369 CHECK_LE(mark_stack_->Size(), new_size);
370 mark_stack_->Resize(new_size);
371 for (auto& obj : temp) {
372 mark_stack_->PushBack(obj.AsMirrorPtr());
373 }
374 }
375
MarkObject(mirror::Object * obj)376 mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) {
377 MarkObject(obj, nullptr, MemberOffset(0));
378 return obj;
379 }
380
MarkObjectNonNullParallel(mirror::Object * obj)381 inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
382 DCHECK(obj != nullptr);
383 if (MarkObjectParallel(obj)) {
384 MutexLock mu(Thread::Current(), mark_stack_lock_);
385 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
386 ExpandMarkStack();
387 }
388 // The object must be pushed on to the mark stack.
389 mark_stack_->PushBack(obj);
390 }
391 }
392
IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object> * ref,bool do_atomic_update ATTRIBUTE_UNUSED)393 bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
394 bool do_atomic_update ATTRIBUTE_UNUSED) {
395 mirror::Object* obj = ref->AsMirrorPtr();
396 if (obj == nullptr) {
397 return true;
398 }
399 return IsMarked(obj);
400 }
401
402 class MarkSweep::MarkObjectSlowPath {
403 public:
MarkObjectSlowPath(MarkSweep * mark_sweep,mirror::Object * holder=nullptr,MemberOffset offset=MemberOffset (0))404 explicit MarkObjectSlowPath(MarkSweep* mark_sweep,
405 mirror::Object* holder = nullptr,
406 MemberOffset offset = MemberOffset(0))
407 : mark_sweep_(mark_sweep),
408 holder_(holder),
409 offset_(offset) {}
410
operator ()(const mirror::Object * obj) const411 void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
412 if (kProfileLargeObjects) {
413 // TODO: Differentiate between marking and testing somehow.
414 ++mark_sweep_->large_object_test_;
415 ++mark_sweep_->large_object_mark_;
416 }
417 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
418 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
419 (kIsDebugBuild && large_object_space != nullptr &&
420 !large_object_space->Contains(obj)))) {
421 // Lowest priority logging first:
422 PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
423 MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
424 // Buffer the output in the string stream since it is more important than the stack traces
425 // and we want it to have log priority. The stack traces are printed from Runtime::Abort
426 // which is called from LOG(FATAL) but before the abort message.
427 std::ostringstream oss;
428 oss << "Tried to mark " << obj << " not contained by any spaces" << std::endl;
429 if (holder_ != nullptr) {
430 size_t holder_size = holder_->SizeOf();
431 ArtField* field = holder_->FindFieldByOffset(offset_);
432 oss << "Field info: "
433 << " holder=" << holder_
434 << " holder is "
435 << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_)
436 ? "alive" : "dead")
437 << " holder_size=" << holder_size
438 << " holder_type=" << holder_->PrettyTypeOf()
439 << " offset=" << offset_.Uint32Value()
440 << " field=" << (field != nullptr ? field->GetName() : "nullptr")
441 << " field_type="
442 << (field != nullptr ? field->GetTypeDescriptor() : "")
443 << " first_ref_field_offset="
444 << (holder_->IsClass()
445 ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset(
446 kRuntimePointerSize)
447 : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
448 << " num_of_ref_fields="
449 << (holder_->IsClass()
450 ? holder_->AsClass()->NumReferenceStaticFields()
451 : holder_->GetClass()->NumReferenceInstanceFields())
452 << std::endl;
453 // Print the memory content of the holder.
454 for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) {
455 uint32_t* p = reinterpret_cast<uint32_t*>(holder_);
456 oss << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = " << std::hex << p[i]
457 << std::endl;
458 }
459 }
460 oss << "Attempting see if it's a bad thread root" << std::endl;
461 mark_sweep_->VerifySuspendedThreadRoots(oss);
462 LOG(FATAL) << oss.str();
463 }
464 }
465
466 private:
467 MarkSweep* const mark_sweep_;
468 mirror::Object* const holder_;
469 MemberOffset offset_;
470 };
471
MarkObjectNonNull(mirror::Object * obj,mirror::Object * holder,MemberOffset offset)472 inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj,
473 mirror::Object* holder,
474 MemberOffset offset) {
475 DCHECK(obj != nullptr);
476 if (kUseBakerReadBarrier) {
477 // Verify all the objects have the correct state installed.
478 obj->AssertReadBarrierState();
479 }
480 if (immune_spaces_.IsInImmuneRegion(obj)) {
481 if (kCountMarkedObjects) {
482 ++mark_immune_count_;
483 }
484 DCHECK(mark_bitmap_->Test(obj));
485 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
486 if (kCountMarkedObjects) {
487 ++mark_fastpath_count_;
488 }
489 if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
490 PushOnMarkStack(obj); // This object was not previously marked.
491 }
492 } else {
493 if (kCountMarkedObjects) {
494 ++mark_slowpath_count_;
495 }
496 MarkObjectSlowPath visitor(this, holder, offset);
497 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
498 // will check again.
499 if (!mark_bitmap_->Set(obj, visitor)) {
500 PushOnMarkStack(obj); // Was not already marked, push.
501 }
502 }
503 }
504
PushOnMarkStack(mirror::Object * obj)505 inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) {
506 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
507 // Lock is not needed but is here anyways to please annotalysis.
508 MutexLock mu(Thread::Current(), mark_stack_lock_);
509 ExpandMarkStack();
510 }
511 // The object must be pushed on to the mark stack.
512 mark_stack_->PushBack(obj);
513 }
514
MarkObjectParallel(mirror::Object * obj)515 inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) {
516 DCHECK(obj != nullptr);
517 if (kUseBakerReadBarrier) {
518 // Verify all the objects have the correct state installed.
519 obj->AssertReadBarrierState();
520 }
521 if (immune_spaces_.IsInImmuneRegion(obj)) {
522 DCHECK(IsMarked(obj) != nullptr);
523 return false;
524 }
525 // Try to take advantage of locality of references within a space, failing this find the space
526 // the hard way.
527 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
528 if (LIKELY(object_bitmap->HasAddress(obj))) {
529 return !object_bitmap->AtomicTestAndSet(obj);
530 }
531 MarkObjectSlowPath visitor(this);
532 return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
533 }
534
MarkHeapReference(mirror::HeapReference<mirror::Object> * ref,bool do_atomic_update ATTRIBUTE_UNUSED)535 void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
536 bool do_atomic_update ATTRIBUTE_UNUSED) {
537 MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0));
538 }
539
540 // Used to mark objects when processing the mark stack. If an object is null, it is not marked.
MarkObject(mirror::Object * obj,mirror::Object * holder,MemberOffset offset)541 inline void MarkSweep::MarkObject(mirror::Object* obj,
542 mirror::Object* holder,
543 MemberOffset offset) {
544 if (obj != nullptr) {
545 MarkObjectNonNull(obj, holder, offset);
546 } else if (kCountMarkedObjects) {
547 ++mark_null_count_;
548 }
549 }
550
551 class MarkSweep::VerifyRootMarkedVisitor : public SingleRootVisitor {
552 public:
VerifyRootMarkedVisitor(MarkSweep * collector)553 explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
554
VisitRoot(mirror::Object * root,const RootInfo & info)555 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
556 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
557 CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
558 }
559
560 private:
561 MarkSweep* const collector_;
562 };
563
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)564 void MarkSweep::VisitRoots(mirror::Object*** roots,
565 size_t count,
566 const RootInfo& info ATTRIBUTE_UNUSED) {
567 for (size_t i = 0; i < count; ++i) {
568 MarkObjectNonNull(*roots[i]);
569 }
570 }
571
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)572 void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
573 size_t count,
574 const RootInfo& info ATTRIBUTE_UNUSED) {
575 for (size_t i = 0; i < count; ++i) {
576 MarkObjectNonNull(roots[i]->AsMirrorPtr());
577 }
578 }
579
580 class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
581 public:
VerifyRootVisitor(std::ostream & os)582 explicit VerifyRootVisitor(std::ostream& os) : os_(os) {}
583
VisitRoot(mirror::Object * root,const RootInfo & info)584 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
585 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
586 // See if the root is on any space bitmap.
587 auto* heap = Runtime::Current()->GetHeap();
588 if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
589 space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
590 if (large_object_space != nullptr && !large_object_space->Contains(root)) {
591 os_ << "Found invalid root: " << root << " " << info << std::endl;
592 }
593 }
594 }
595
596 private:
597 std::ostream& os_;
598 };
599
VerifySuspendedThreadRoots(std::ostream & os)600 void MarkSweep::VerifySuspendedThreadRoots(std::ostream& os) {
601 VerifyRootVisitor visitor(os);
602 Runtime::Current()->GetThreadList()->VisitRootsForSuspendedThreads(&visitor);
603 }
604
MarkRoots(Thread * self)605 void MarkSweep::MarkRoots(Thread* self) {
606 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
607 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
608 // If we exclusively hold the mutator lock, all threads must be suspended.
609 Runtime::Current()->VisitRoots(this);
610 RevokeAllThreadLocalAllocationStacks(self);
611 } else {
612 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
613 // At this point the live stack should no longer have any mutators which push into it.
614 MarkNonThreadRoots();
615 MarkConcurrentRoots(
616 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
617 }
618 }
619
MarkNonThreadRoots()620 void MarkSweep::MarkNonThreadRoots() {
621 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
622 Runtime::Current()->VisitNonThreadRoots(this);
623 }
624
MarkConcurrentRoots(VisitRootFlags flags)625 void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
626 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
627 // Visit all runtime roots and clear dirty flags.
628 Runtime::Current()->VisitConcurrentRoots(this, flags);
629 }
630
631 class MarkSweep::DelayReferenceReferentVisitor {
632 public:
DelayReferenceReferentVisitor(MarkSweep * collector)633 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {}
634
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const635 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
636 REQUIRES(Locks::heap_bitmap_lock_)
637 REQUIRES_SHARED(Locks::mutator_lock_) {
638 collector_->DelayReferenceReferent(klass, ref);
639 }
640
641 private:
642 MarkSweep* const collector_;
643 };
644
645 template <bool kUseFinger = false>
646 class MarkSweep::MarkStackTask : public Task {
647 public:
MarkStackTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,size_t mark_stack_size,StackReference<mirror::Object> * mark_stack)648 MarkStackTask(ThreadPool* thread_pool,
649 MarkSweep* mark_sweep,
650 size_t mark_stack_size,
651 StackReference<mirror::Object>* mark_stack)
652 : mark_sweep_(mark_sweep),
653 thread_pool_(thread_pool),
654 mark_stack_pos_(mark_stack_size) {
655 // We may have to copy part of an existing mark stack when another mark stack overflows.
656 if (mark_stack_size != 0) {
657 DCHECK(mark_stack != nullptr);
658 // TODO: Check performance?
659 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
660 }
661 if (kCountTasks) {
662 ++mark_sweep_->work_chunks_created_;
663 }
664 }
665
666 static const size_t kMaxSize = 1 * KB;
667
668 protected:
669 class MarkObjectParallelVisitor {
670 public:
MarkObjectParallelVisitor(MarkStackTask<kUseFinger> * chunk_task,MarkSweep * mark_sweep)671 ALWAYS_INLINE MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
672 MarkSweep* mark_sweep)
673 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
674
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const675 ALWAYS_INLINE void operator()(mirror::Object* obj,
676 MemberOffset offset,
677 bool is_static ATTRIBUTE_UNUSED) const
678 REQUIRES_SHARED(Locks::mutator_lock_) {
679 Mark(obj->GetFieldObject<mirror::Object>(offset));
680 }
681
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const682 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
683 REQUIRES_SHARED(Locks::mutator_lock_) {
684 if (!root->IsNull()) {
685 VisitRoot(root);
686 }
687 }
688
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const689 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
690 REQUIRES_SHARED(Locks::mutator_lock_) {
691 if (kCheckLocks) {
692 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
693 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
694 }
695 Mark(root->AsMirrorPtr());
696 }
697
698 private:
Mark(mirror::Object * ref) const699 ALWAYS_INLINE void Mark(mirror::Object* ref) const REQUIRES_SHARED(Locks::mutator_lock_) {
700 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
701 if (kUseFinger) {
702 std::atomic_thread_fence(std::memory_order_seq_cst);
703 if (reinterpret_cast<uintptr_t>(ref) >=
704 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
705 return;
706 }
707 }
708 chunk_task_->MarkStackPush(ref);
709 }
710 }
711
712 MarkStackTask<kUseFinger>* const chunk_task_;
713 MarkSweep* const mark_sweep_;
714 };
715
716 class ScanObjectParallelVisitor {
717 public:
ScanObjectParallelVisitor(MarkStackTask<kUseFinger> * chunk_task)718 ALWAYS_INLINE explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task)
719 : chunk_task_(chunk_task) {}
720
721 // No thread safety analysis since multiple threads will use this visitor.
operator ()(mirror::Object * obj) const722 void operator()(mirror::Object* obj) const
723 REQUIRES(Locks::heap_bitmap_lock_)
724 REQUIRES_SHARED(Locks::mutator_lock_) {
725 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
726 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
727 DelayReferenceReferentVisitor ref_visitor(mark_sweep);
728 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
729 }
730
731 private:
732 MarkStackTask<kUseFinger>* const chunk_task_;
733 };
734
~MarkStackTask()735 virtual ~MarkStackTask() {
736 // Make sure that we have cleared our mark stack.
737 DCHECK_EQ(mark_stack_pos_, 0U);
738 if (kCountTasks) {
739 ++mark_sweep_->work_chunks_deleted_;
740 }
741 }
742
743 MarkSweep* const mark_sweep_;
744 ThreadPool* const thread_pool_;
745 // Thread local mark stack for this task.
746 StackReference<mirror::Object> mark_stack_[kMaxSize];
747 // Mark stack position.
748 size_t mark_stack_pos_;
749
MarkStackPush(mirror::Object * obj)750 ALWAYS_INLINE void MarkStackPush(mirror::Object* obj)
751 REQUIRES_SHARED(Locks::mutator_lock_) {
752 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
753 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
754 mark_stack_pos_ /= 2;
755 auto* task = new MarkStackTask(thread_pool_,
756 mark_sweep_,
757 kMaxSize - mark_stack_pos_,
758 mark_stack_ + mark_stack_pos_);
759 thread_pool_->AddTask(Thread::Current(), task);
760 }
761 DCHECK(obj != nullptr);
762 DCHECK_LT(mark_stack_pos_, kMaxSize);
763 mark_stack_[mark_stack_pos_++].Assign(obj);
764 }
765
Finalize()766 virtual void Finalize() {
767 delete this;
768 }
769
770 // Scans all of the objects
Run(Thread * self ATTRIBUTE_UNUSED)771 virtual void Run(Thread* self ATTRIBUTE_UNUSED)
772 REQUIRES(Locks::heap_bitmap_lock_)
773 REQUIRES_SHARED(Locks::mutator_lock_) {
774 ScanObjectParallelVisitor visitor(this);
775 // TODO: Tune this.
776 static const size_t kFifoSize = 4;
777 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
778 for (;;) {
779 mirror::Object* obj = nullptr;
780 if (kUseMarkStackPrefetch) {
781 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
782 mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
783 DCHECK(mark_stack_obj != nullptr);
784 __builtin_prefetch(mark_stack_obj);
785 prefetch_fifo.push_back(mark_stack_obj);
786 }
787 if (UNLIKELY(prefetch_fifo.empty())) {
788 break;
789 }
790 obj = prefetch_fifo.front();
791 prefetch_fifo.pop_front();
792 } else {
793 if (UNLIKELY(mark_stack_pos_ == 0)) {
794 break;
795 }
796 obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
797 }
798 DCHECK(obj != nullptr);
799 visitor(obj);
800 }
801 }
802 };
803
804 class MarkSweep::CardScanTask : public MarkStackTask<false> {
805 public:
CardScanTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,accounting::ContinuousSpaceBitmap * bitmap,uint8_t * begin,uint8_t * end,uint8_t minimum_age,size_t mark_stack_size,StackReference<mirror::Object> * mark_stack_obj,bool clear_card)806 CardScanTask(ThreadPool* thread_pool,
807 MarkSweep* mark_sweep,
808 accounting::ContinuousSpaceBitmap* bitmap,
809 uint8_t* begin,
810 uint8_t* end,
811 uint8_t minimum_age,
812 size_t mark_stack_size,
813 StackReference<mirror::Object>* mark_stack_obj,
814 bool clear_card)
815 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
816 bitmap_(bitmap),
817 begin_(begin),
818 end_(end),
819 minimum_age_(minimum_age),
820 clear_card_(clear_card) {}
821
822 protected:
823 accounting::ContinuousSpaceBitmap* const bitmap_;
824 uint8_t* const begin_;
825 uint8_t* const end_;
826 const uint8_t minimum_age_;
827 const bool clear_card_;
828
Finalize()829 virtual void Finalize() {
830 delete this;
831 }
832
Run(Thread * self)833 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
834 ScanObjectParallelVisitor visitor(this);
835 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
836 size_t cards_scanned = clear_card_
837 ? card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_)
838 : card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
839 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
840 << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
841 // Finish by emptying our local mark stack.
842 MarkStackTask::Run(self);
843 }
844 };
845
GetThreadCount(bool paused) const846 size_t MarkSweep::GetThreadCount(bool paused) const {
847 // Use less threads if we are in a background state (non jank perceptible) since we want to leave
848 // more CPU time for the foreground apps.
849 if (heap_->GetThreadPool() == nullptr || !Runtime::Current()->InJankPerceptibleProcessState()) {
850 return 1;
851 }
852 return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1;
853 }
854
ScanGrayObjects(bool paused,uint8_t minimum_age)855 void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
856 accounting::CardTable* card_table = GetHeap()->GetCardTable();
857 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
858 size_t thread_count = GetThreadCount(paused);
859 // The parallel version with only one thread is faster for card scanning, TODO: fix.
860 if (kParallelCardScan && thread_count > 1) {
861 Thread* self = Thread::Current();
862 // Can't have a different split for each space since multiple spaces can have their cards being
863 // scanned at the same time.
864 TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
865 GetTimings());
866 // Try to take some of the mark stack since we can pass this off to the worker tasks.
867 StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin();
868 StackReference<mirror::Object>* mark_stack_end = mark_stack_->End();
869 const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
870 // Estimated number of work tasks we will create.
871 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
872 DCHECK_NE(mark_stack_tasks, 0U);
873 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
874 mark_stack_size / mark_stack_tasks + 1);
875 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
876 if (space->GetMarkBitmap() == nullptr) {
877 continue;
878 }
879 uint8_t* card_begin = space->Begin();
880 uint8_t* card_end = space->End();
881 // Align up the end address. For example, the image space's end
882 // may not be card-size-aligned.
883 card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
884 DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize);
885 DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize);
886 // Calculate how many bytes of heap we will scan,
887 const size_t address_range = card_end - card_begin;
888 // Calculate how much address range each task gets.
889 const size_t card_delta = RoundUp(address_range / thread_count + 1,
890 accounting::CardTable::kCardSize);
891 // If paused and the space is neither zygote nor image space, we could clear the dirty
892 // cards to avoid accumulating them to increase card scanning load in the following GC
893 // cycles. We need to keep dirty cards of image space and zygote space in order to track
894 // references to the other spaces.
895 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
896 // Create the worker tasks for this space.
897 while (card_begin != card_end) {
898 // Add a range of cards.
899 size_t addr_remaining = card_end - card_begin;
900 size_t card_increment = std::min(card_delta, addr_remaining);
901 // Take from the back of the mark stack.
902 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
903 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
904 mark_stack_end -= mark_stack_increment;
905 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
906 DCHECK_EQ(mark_stack_end, mark_stack_->End());
907 // Add the new task to the thread pool.
908 auto* task = new CardScanTask(thread_pool,
909 this,
910 space->GetMarkBitmap(),
911 card_begin,
912 card_begin + card_increment,
913 minimum_age,
914 mark_stack_increment,
915 mark_stack_end,
916 clear_card);
917 thread_pool->AddTask(self, task);
918 card_begin += card_increment;
919 }
920 }
921
922 // Note: the card scan below may dirty new cards (and scan them)
923 // as a side effect when a Reference object is encountered and
924 // queued during the marking. See b/11465268.
925 thread_pool->SetMaxActiveWorkers(thread_count - 1);
926 thread_pool->StartWorkers(self);
927 thread_pool->Wait(self, true, true);
928 thread_pool->StopWorkers(self);
929 } else {
930 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
931 if (space->GetMarkBitmap() != nullptr) {
932 // Image spaces are handled properly since live == marked for them.
933 const char* name = nullptr;
934 switch (space->GetGcRetentionPolicy()) {
935 case space::kGcRetentionPolicyNeverCollect:
936 name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects";
937 break;
938 case space::kGcRetentionPolicyFullCollect:
939 name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
940 break;
941 case space::kGcRetentionPolicyAlwaysCollect:
942 name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
943 break;
944 default:
945 LOG(FATAL) << "Unreachable";
946 UNREACHABLE();
947 }
948 TimingLogger::ScopedTiming t(name, GetTimings());
949 ScanObjectVisitor visitor(this);
950 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
951 if (clear_card) {
952 card_table->Scan<true>(space->GetMarkBitmap(),
953 space->Begin(),
954 space->End(),
955 visitor,
956 minimum_age);
957 } else {
958 card_table->Scan<false>(space->GetMarkBitmap(),
959 space->Begin(),
960 space->End(),
961 visitor,
962 minimum_age);
963 }
964 }
965 }
966 }
967 }
968
969 class MarkSweep::RecursiveMarkTask : public MarkStackTask<false> {
970 public:
RecursiveMarkTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,accounting::ContinuousSpaceBitmap * bitmap,uintptr_t begin,uintptr_t end)971 RecursiveMarkTask(ThreadPool* thread_pool,
972 MarkSweep* mark_sweep,
973 accounting::ContinuousSpaceBitmap* bitmap,
974 uintptr_t begin,
975 uintptr_t end)
976 : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr),
977 bitmap_(bitmap),
978 begin_(begin),
979 end_(end) {}
980
981 protected:
982 accounting::ContinuousSpaceBitmap* const bitmap_;
983 const uintptr_t begin_;
984 const uintptr_t end_;
985
Finalize()986 virtual void Finalize() {
987 delete this;
988 }
989
990 // Scans all of the objects
Run(Thread * self)991 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
992 ScanObjectParallelVisitor visitor(this);
993 bitmap_->VisitMarkedRange(begin_, end_, visitor);
994 // Finish by emptying our local mark stack.
995 MarkStackTask::Run(self);
996 }
997 };
998
999 // Populates the mark stack based on the set of marked objects and
1000 // recursively marks until the mark stack is emptied.
RecursiveMark()1001 void MarkSweep::RecursiveMark() {
1002 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1003 // RecursiveMark will build the lists of known instances of the Reference classes. See
1004 // DelayReferenceReferent for details.
1005 if (kUseRecursiveMark) {
1006 const bool partial = GetGcType() == kGcTypePartial;
1007 ScanObjectVisitor scan_visitor(this);
1008 auto* self = Thread::Current();
1009 ThreadPool* thread_pool = heap_->GetThreadPool();
1010 size_t thread_count = GetThreadCount(false);
1011 const bool parallel = kParallelRecursiveMark && thread_count > 1;
1012 mark_stack_->Reset();
1013 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1014 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
1015 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
1016 current_space_bitmap_ = space->GetMarkBitmap();
1017 if (current_space_bitmap_ == nullptr) {
1018 continue;
1019 }
1020 if (parallel) {
1021 // We will use the mark stack the future.
1022 // CHECK(mark_stack_->IsEmpty());
1023 // This function does not handle heap end increasing, so we must use the space end.
1024 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1025 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
1026 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
1027
1028 // Create a few worker tasks.
1029 const size_t n = thread_count * 2;
1030 while (begin != end) {
1031 uintptr_t start = begin;
1032 uintptr_t delta = (end - begin) / n;
1033 delta = RoundUp(delta, KB);
1034 if (delta < 16 * KB) delta = end - begin;
1035 begin += delta;
1036 auto* task = new RecursiveMarkTask(thread_pool,
1037 this,
1038 current_space_bitmap_,
1039 start,
1040 begin);
1041 thread_pool->AddTask(self, task);
1042 }
1043 thread_pool->SetMaxActiveWorkers(thread_count - 1);
1044 thread_pool->StartWorkers(self);
1045 thread_pool->Wait(self, true, true);
1046 thread_pool->StopWorkers(self);
1047 } else {
1048 // This function does not handle heap end increasing, so we must use the space end.
1049 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1050 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
1051 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
1052 }
1053 }
1054 }
1055 }
1056 ProcessMarkStack(false);
1057 }
1058
RecursiveMarkDirtyObjects(bool paused,uint8_t minimum_age)1059 void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
1060 ScanGrayObjects(paused, minimum_age);
1061 ProcessMarkStack(paused);
1062 }
1063
ReMarkRoots()1064 void MarkSweep::ReMarkRoots() {
1065 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1066 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
1067 Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>(
1068 kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog));
1069 if (kVerifyRootsMarked) {
1070 TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
1071 VerifyRootMarkedVisitor visitor(this);
1072 Runtime::Current()->VisitRoots(&visitor);
1073 }
1074 }
1075
SweepSystemWeaks(Thread * self)1076 void MarkSweep::SweepSystemWeaks(Thread* self) {
1077 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1078 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1079 Runtime::Current()->SweepSystemWeaks(this);
1080 }
1081
1082 class MarkSweep::VerifySystemWeakVisitor : public IsMarkedVisitor {
1083 public:
VerifySystemWeakVisitor(MarkSweep * mark_sweep)1084 explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
1085
IsMarked(mirror::Object * obj)1086 virtual mirror::Object* IsMarked(mirror::Object* obj)
1087 OVERRIDE
1088 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1089 mark_sweep_->VerifyIsLive(obj);
1090 return obj;
1091 }
1092
1093 MarkSweep* const mark_sweep_;
1094 };
1095
VerifyIsLive(const mirror::Object * obj)1096 void MarkSweep::VerifyIsLive(const mirror::Object* obj) {
1097 if (!heap_->GetLiveBitmap()->Test(obj)) {
1098 // TODO: Consider live stack? Has this code bitrotted?
1099 CHECK(!heap_->allocation_stack_->Contains(obj))
1100 << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
1101 }
1102 }
1103
VerifySystemWeaks()1104 void MarkSweep::VerifySystemWeaks() {
1105 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1106 // Verify system weaks, uses a special object visitor which returns the input object.
1107 VerifySystemWeakVisitor visitor(this);
1108 Runtime::Current()->SweepSystemWeaks(&visitor);
1109 }
1110
1111 class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor {
1112 public:
CheckpointMarkThreadRoots(MarkSweep * mark_sweep,bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)1113 CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
1114 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
1115 : mark_sweep_(mark_sweep),
1116 revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
1117 revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1118 }
1119
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)1120 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
1121 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
1122 REQUIRES(Locks::heap_bitmap_lock_) {
1123 for (size_t i = 0; i < count; ++i) {
1124 mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
1125 }
1126 }
1127
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)1128 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
1129 size_t count,
1130 const RootInfo& info ATTRIBUTE_UNUSED)
1131 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
1132 REQUIRES(Locks::heap_bitmap_lock_) {
1133 for (size_t i = 0; i < count; ++i) {
1134 mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
1135 }
1136 }
1137
Run(Thread * thread)1138 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1139 ScopedTrace trace("Marking thread roots");
1140 // Note: self is not necessarily equal to thread since thread may be suspended.
1141 Thread* const self = Thread::Current();
1142 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1143 << thread->GetState() << " thread " << thread << " self " << self;
1144 thread->VisitRoots(this);
1145 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
1146 ScopedTrace trace2("RevokeRosAllocThreadLocalBuffers");
1147 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
1148 }
1149 // If thread is a running mutator, then act on behalf of the garbage collector.
1150 // See the code in ThreadList::RunCheckpoint.
1151 mark_sweep_->GetBarrier().Pass(self);
1152 }
1153
1154 private:
1155 MarkSweep* const mark_sweep_;
1156 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
1157 };
1158
MarkRootsCheckpoint(Thread * self,bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)1159 void MarkSweep::MarkRootsCheckpoint(Thread* self,
1160 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1161 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1162 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
1163 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1164 // Request the check point is run on all threads returning a count of the threads that must
1165 // run through the barrier including self.
1166 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1167 // Release locks then wait for all mutator threads to pass the barrier.
1168 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1169 // then no need to release locks.
1170 if (barrier_count == 0) {
1171 return;
1172 }
1173 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1174 Locks::mutator_lock_->SharedUnlock(self);
1175 {
1176 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1177 gc_barrier_->Increment(self, barrier_count);
1178 }
1179 Locks::mutator_lock_->SharedLock(self);
1180 Locks::heap_bitmap_lock_->ExclusiveLock(self);
1181 }
1182
SweepArray(accounting::ObjectStack * allocations,bool swap_bitmaps)1183 void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
1184 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1185 Thread* self = Thread::Current();
1186 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
1187 sweep_array_free_buffer_mem_map_->BaseBegin());
1188 size_t chunk_free_pos = 0;
1189 ObjectBytePair freed;
1190 ObjectBytePair freed_los;
1191 // How many objects are left in the array, modified after each space is swept.
1192 StackReference<mirror::Object>* objects = allocations->Begin();
1193 size_t count = allocations->Size();
1194 // Change the order to ensure that the non-moving space last swept as an optimization.
1195 std::vector<space::ContinuousSpace*> sweep_spaces;
1196 space::ContinuousSpace* non_moving_space = nullptr;
1197 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
1198 if (space->IsAllocSpace() &&
1199 !immune_spaces_.ContainsSpace(space) &&
1200 space->GetLiveBitmap() != nullptr) {
1201 if (space == heap_->GetNonMovingSpace()) {
1202 non_moving_space = space;
1203 } else {
1204 sweep_spaces.push_back(space);
1205 }
1206 }
1207 }
1208 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1209 // the other alloc spaces as an optimization.
1210 if (non_moving_space != nullptr) {
1211 sweep_spaces.push_back(non_moving_space);
1212 }
1213 // Start by sweeping the continuous spaces.
1214 for (space::ContinuousSpace* space : sweep_spaces) {
1215 space::AllocSpace* alloc_space = space->AsAllocSpace();
1216 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1217 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1218 if (swap_bitmaps) {
1219 std::swap(live_bitmap, mark_bitmap);
1220 }
1221 StackReference<mirror::Object>* out = objects;
1222 for (size_t i = 0; i < count; ++i) {
1223 mirror::Object* const obj = objects[i].AsMirrorPtr();
1224 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1225 continue;
1226 }
1227 if (space->HasAddress(obj)) {
1228 // This object is in the space, remove it from the array and add it to the sweep buffer
1229 // if needed.
1230 if (!mark_bitmap->Test(obj)) {
1231 if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1232 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
1233 freed.objects += chunk_free_pos;
1234 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1235 chunk_free_pos = 0;
1236 }
1237 chunk_free_buffer[chunk_free_pos++] = obj;
1238 }
1239 } else {
1240 (out++)->Assign(obj);
1241 }
1242 }
1243 if (chunk_free_pos > 0) {
1244 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
1245 freed.objects += chunk_free_pos;
1246 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1247 chunk_free_pos = 0;
1248 }
1249 // All of the references which space contained are no longer in the allocation stack, update
1250 // the count.
1251 count = out - objects;
1252 }
1253 // Handle the large object space.
1254 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1255 if (large_object_space != nullptr) {
1256 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
1257 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
1258 if (swap_bitmaps) {
1259 std::swap(large_live_objects, large_mark_objects);
1260 }
1261 for (size_t i = 0; i < count; ++i) {
1262 mirror::Object* const obj = objects[i].AsMirrorPtr();
1263 // Handle large objects.
1264 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1265 continue;
1266 }
1267 if (!large_mark_objects->Test(obj)) {
1268 ++freed_los.objects;
1269 freed_los.bytes += large_object_space->Free(self, obj);
1270 }
1271 }
1272 }
1273 {
1274 TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
1275 RecordFree(freed);
1276 RecordFreeLOS(freed_los);
1277 t2.NewTiming("ResetStack");
1278 allocations->Reset();
1279 }
1280 sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
1281 }
1282
Sweep(bool swap_bitmaps)1283 void MarkSweep::Sweep(bool swap_bitmaps) {
1284 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1285 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1286 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
1287 {
1288 TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
1289 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
1290 // knowing that new allocations won't be marked as live.
1291 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1292 heap_->MarkAllocStackAsLive(live_stack);
1293 live_stack->Reset();
1294 DCHECK(mark_stack_->IsEmpty());
1295 }
1296 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1297 if (space->IsContinuousMemMapAllocSpace()) {
1298 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1299 TimingLogger::ScopedTiming split(
1300 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace",
1301 GetTimings());
1302 RecordFree(alloc_space->Sweep(swap_bitmaps));
1303 }
1304 }
1305 SweepLargeObjects(swap_bitmaps);
1306 }
1307
SweepLargeObjects(bool swap_bitmaps)1308 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
1309 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
1310 if (los != nullptr) {
1311 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1312 RecordFreeLOS(los->Sweep(swap_bitmaps));
1313 }
1314 }
1315
1316 // Process the "referent" field lin a java.lang.ref.Reference. If the referent has not yet been
1317 // marked, put it on the appropriate list in the heap for later processing.
DelayReferenceReferent(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref)1318 void MarkSweep::DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) {
1319 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
1320 }
1321
1322 class MarkVisitor {
1323 public:
MarkVisitor(MarkSweep * const mark_sweep)1324 ALWAYS_INLINE explicit MarkVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
1325
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const1326 ALWAYS_INLINE void operator()(mirror::Object* obj,
1327 MemberOffset offset,
1328 bool is_static ATTRIBUTE_UNUSED) const
1329 REQUIRES(Locks::heap_bitmap_lock_)
1330 REQUIRES_SHARED(Locks::mutator_lock_) {
1331 if (kCheckLocks) {
1332 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1333 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1334 }
1335 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
1336 }
1337
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const1338 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1339 REQUIRES(Locks::heap_bitmap_lock_)
1340 REQUIRES_SHARED(Locks::mutator_lock_) {
1341 if (!root->IsNull()) {
1342 VisitRoot(root);
1343 }
1344 }
1345
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const1346 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1347 REQUIRES(Locks::heap_bitmap_lock_)
1348 REQUIRES_SHARED(Locks::mutator_lock_) {
1349 if (kCheckLocks) {
1350 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1351 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1352 }
1353 mark_sweep_->MarkObject(root->AsMirrorPtr());
1354 }
1355
1356 private:
1357 MarkSweep* const mark_sweep_;
1358 };
1359
1360 // Scans an object reference. Determines the type of the reference
1361 // and dispatches to a specialized scanning routine.
ScanObject(mirror::Object * obj)1362 void MarkSweep::ScanObject(mirror::Object* obj) {
1363 MarkVisitor mark_visitor(this);
1364 DelayReferenceReferentVisitor ref_visitor(this);
1365 ScanObjectVisit(obj, mark_visitor, ref_visitor);
1366 }
1367
ProcessMarkStackParallel(size_t thread_count)1368 void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
1369 Thread* self = Thread::Current();
1370 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1371 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1372 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
1373 CHECK_GT(chunk_size, 0U);
1374 // Split the current mark stack up into work tasks.
1375 for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) {
1376 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1377 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
1378 it += delta;
1379 }
1380 thread_pool->SetMaxActiveWorkers(thread_count - 1);
1381 thread_pool->StartWorkers(self);
1382 thread_pool->Wait(self, true, true);
1383 thread_pool->StopWorkers(self);
1384 mark_stack_->Reset();
1385 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
1386 work_chunks_deleted_.LoadSequentiallyConsistent())
1387 << " some of the work chunks were leaked";
1388 }
1389
1390 // Scan anything that's on the mark stack.
ProcessMarkStack(bool paused)1391 void MarkSweep::ProcessMarkStack(bool paused) {
1392 TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings());
1393 size_t thread_count = GetThreadCount(paused);
1394 if (kParallelProcessMarkStack && thread_count > 1 &&
1395 mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1396 ProcessMarkStackParallel(thread_count);
1397 } else {
1398 // TODO: Tune this.
1399 static const size_t kFifoSize = 4;
1400 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
1401 for (;;) {
1402 mirror::Object* obj = nullptr;
1403 if (kUseMarkStackPrefetch) {
1404 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
1405 mirror::Object* mark_stack_obj = mark_stack_->PopBack();
1406 DCHECK(mark_stack_obj != nullptr);
1407 __builtin_prefetch(mark_stack_obj);
1408 prefetch_fifo.push_back(mark_stack_obj);
1409 }
1410 if (prefetch_fifo.empty()) {
1411 break;
1412 }
1413 obj = prefetch_fifo.front();
1414 prefetch_fifo.pop_front();
1415 } else {
1416 if (mark_stack_->IsEmpty()) {
1417 break;
1418 }
1419 obj = mark_stack_->PopBack();
1420 }
1421 DCHECK(obj != nullptr);
1422 ScanObject(obj);
1423 }
1424 }
1425 }
1426
IsMarked(mirror::Object * object)1427 inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) {
1428 if (immune_spaces_.IsInImmuneRegion(object)) {
1429 return object;
1430 }
1431 if (current_space_bitmap_->HasAddress(object)) {
1432 return current_space_bitmap_->Test(object) ? object : nullptr;
1433 }
1434 return mark_bitmap_->Test(object) ? object : nullptr;
1435 }
1436
FinishPhase()1437 void MarkSweep::FinishPhase() {
1438 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1439 if (kCountScannedTypes) {
1440 VLOG(gc)
1441 << "MarkSweep scanned"
1442 << " no reference objects=" << no_reference_class_count_.LoadRelaxed()
1443 << " normal objects=" << normal_count_.LoadRelaxed()
1444 << " classes=" << class_count_.LoadRelaxed()
1445 << " object arrays=" << object_array_count_.LoadRelaxed()
1446 << " references=" << reference_count_.LoadRelaxed()
1447 << " other=" << other_count_.LoadRelaxed();
1448 }
1449 if (kCountTasks) {
1450 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
1451 }
1452 if (kMeasureOverhead) {
1453 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
1454 }
1455 if (kProfileLargeObjects) {
1456 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
1457 << " marked " << large_object_mark_.LoadRelaxed();
1458 }
1459 if (kCountMarkedObjects) {
1460 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
1461 << " immune=" << mark_immune_count_.LoadRelaxed()
1462 << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
1463 << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
1464 }
1465 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty.
1466 mark_stack_->Reset();
1467 Thread* const self = Thread::Current();
1468 ReaderMutexLock mu(self, *Locks::mutator_lock_);
1469 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
1470 heap_->ClearMarkedObjects();
1471 }
1472
RevokeAllThreadLocalBuffers()1473 void MarkSweep::RevokeAllThreadLocalBuffers() {
1474 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1475 // If concurrent, rosalloc thread-local buffers are revoked at the
1476 // thread checkpoint. Bump pointer space thread-local buffers must
1477 // not be in use.
1478 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1479 } else {
1480 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1481 GetHeap()->RevokeAllThreadLocalBuffers();
1482 }
1483 }
1484
1485 } // namespace collector
1486 } // namespace gc
1487 } // namespace art
1488