1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "mark_sweep.h"
18
19 #include <atomic>
20 #include <functional>
21 #include <numeric>
22 #include <climits>
23 #include <vector>
24
25 #include "base/bounded_fifo.h"
26 #include "base/logging.h"
27 #include "base/macros.h"
28 #include "base/mutex-inl.h"
29 #include "base/systrace.h"
30 #include "base/time_utils.h"
31 #include "base/timing_logger.h"
32 #include "gc/accounting/card_table-inl.h"
33 #include "gc/accounting/heap_bitmap-inl.h"
34 #include "gc/accounting/mod_union_table.h"
35 #include "gc/accounting/space_bitmap-inl.h"
36 #include "gc/heap.h"
37 #include "gc/reference_processor.h"
38 #include "gc/space/large_object_space.h"
39 #include "gc/space/space-inl.h"
40 #include "mark_sweep-inl.h"
41 #include "mirror/object-inl.h"
42 #include "runtime.h"
43 #include "scoped_thread_state_change.h"
44 #include "thread-inl.h"
45 #include "thread_list.h"
46
47 namespace art {
48 namespace gc {
49 namespace collector {
50
51 // Performance options.
52 static constexpr bool kUseRecursiveMark = false;
53 static constexpr bool kUseMarkStackPrefetch = true;
54 static constexpr size_t kSweepArrayChunkFreeSize = 1024;
55 static constexpr bool kPreCleanCards = true;
56
57 // Parallelism options.
58 static constexpr bool kParallelCardScan = true;
59 static constexpr bool kParallelRecursiveMark = true;
60 // Don't attempt to parallelize mark stack processing unless the mark stack is at least n
61 // elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
62 // having this can add overhead in ProcessReferences since we may end up doing many calls of
63 // ProcessMarkStack with very small mark stacks.
64 static constexpr size_t kMinimumParallelMarkStackSize = 128;
65 static constexpr bool kParallelProcessMarkStack = true;
66
67 // Profiling and information flags.
68 static constexpr bool kProfileLargeObjects = false;
69 static constexpr bool kMeasureOverhead = false;
70 static constexpr bool kCountTasks = false;
71 static constexpr bool kCountMarkedObjects = false;
72
73 // Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
74 static constexpr bool kCheckLocks = kDebugLocking;
75 static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
76
77 // If true, revoke the rosalloc thread-local buffers at the
78 // checkpoint, as opposed to during the pause.
79 static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
80
BindBitmaps()81 void MarkSweep::BindBitmaps() {
82 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
83 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
84 // Mark all of the spaces we never collect as immune.
85 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
86 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
87 immune_spaces_.AddSpace(space);
88 }
89 }
90 }
91
MarkSweep(Heap * heap,bool is_concurrent,const std::string & name_prefix)92 MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
93 : GarbageCollector(heap,
94 name_prefix +
95 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
96 current_space_bitmap_(nullptr),
97 mark_bitmap_(nullptr),
98 mark_stack_(nullptr),
99 gc_barrier_(new Barrier(0)),
100 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
101 is_concurrent_(is_concurrent),
102 live_stack_freeze_size_(0) {
103 std::string error_msg;
104 MemMap* mem_map = MemMap::MapAnonymous(
105 "mark sweep sweep array free buffer", nullptr,
106 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
107 PROT_READ | PROT_WRITE, false, false, &error_msg);
108 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
109 sweep_array_free_buffer_mem_map_.reset(mem_map);
110 }
111
InitializePhase()112 void MarkSweep::InitializePhase() {
113 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
114 mark_stack_ = heap_->GetMarkStack();
115 DCHECK(mark_stack_ != nullptr);
116 immune_spaces_.Reset();
117 no_reference_class_count_.StoreRelaxed(0);
118 normal_count_.StoreRelaxed(0);
119 class_count_.StoreRelaxed(0);
120 object_array_count_.StoreRelaxed(0);
121 other_count_.StoreRelaxed(0);
122 reference_count_.StoreRelaxed(0);
123 large_object_test_.StoreRelaxed(0);
124 large_object_mark_.StoreRelaxed(0);
125 overhead_time_ .StoreRelaxed(0);
126 work_chunks_created_.StoreRelaxed(0);
127 work_chunks_deleted_.StoreRelaxed(0);
128 mark_null_count_.StoreRelaxed(0);
129 mark_immune_count_.StoreRelaxed(0);
130 mark_fastpath_count_.StoreRelaxed(0);
131 mark_slowpath_count_.StoreRelaxed(0);
132 {
133 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
134 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
135 mark_bitmap_ = heap_->GetMarkBitmap();
136 }
137 if (!GetCurrentIteration()->GetClearSoftReferences()) {
138 // Always clear soft references if a non-sticky collection.
139 GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
140 }
141 }
142
RunPhases()143 void MarkSweep::RunPhases() {
144 Thread* self = Thread::Current();
145 InitializePhase();
146 Locks::mutator_lock_->AssertNotHeld(self);
147 if (IsConcurrent()) {
148 GetHeap()->PreGcVerification(this);
149 {
150 ReaderMutexLock mu(self, *Locks::mutator_lock_);
151 MarkingPhase();
152 }
153 ScopedPause pause(this);
154 GetHeap()->PrePauseRosAllocVerification(this);
155 PausePhase();
156 RevokeAllThreadLocalBuffers();
157 } else {
158 ScopedPause pause(this);
159 GetHeap()->PreGcVerificationPaused(this);
160 MarkingPhase();
161 GetHeap()->PrePauseRosAllocVerification(this);
162 PausePhase();
163 RevokeAllThreadLocalBuffers();
164 }
165 {
166 // Sweeping always done concurrently, even for non concurrent mark sweep.
167 ReaderMutexLock mu(self, *Locks::mutator_lock_);
168 ReclaimPhase();
169 }
170 GetHeap()->PostGcVerification(this);
171 FinishPhase();
172 }
173
ProcessReferences(Thread * self)174 void MarkSweep::ProcessReferences(Thread* self) {
175 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
176 GetHeap()->GetReferenceProcessor()->ProcessReferences(
177 true,
178 GetTimings(),
179 GetCurrentIteration()->GetClearSoftReferences(),
180 this);
181 }
182
PausePhase()183 void MarkSweep::PausePhase() {
184 TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings());
185 Thread* self = Thread::Current();
186 Locks::mutator_lock_->AssertExclusiveHeld(self);
187 if (IsConcurrent()) {
188 // Handle the dirty objects if we are a concurrent GC.
189 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
190 // Re-mark root set.
191 ReMarkRoots();
192 // Scan dirty objects, this is only required if we are not doing concurrent GC.
193 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
194 }
195 {
196 TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
197 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
198 heap_->SwapStacks();
199 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
200 // Need to revoke all the thread local allocation stacks since we just swapped the allocation
201 // stacks and don't want anybody to allocate into the live stack.
202 RevokeAllThreadLocalAllocationStacks(self);
203 }
204 heap_->PreSweepingGcVerification(this);
205 // Disallow new system weaks to prevent a race which occurs when someone adds a new system
206 // weak before we sweep them. Since this new system weak may not be marked, the GC may
207 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
208 // reference to a string that is about to be swept.
209 Runtime::Current()->DisallowNewSystemWeaks();
210 // Enable the reference processing slow path, needs to be done with mutators paused since there
211 // is no lock in the GetReferent fast path.
212 GetHeap()->GetReferenceProcessor()->EnableSlowPath();
213 }
214
PreCleanCards()215 void MarkSweep::PreCleanCards() {
216 // Don't do this for non concurrent GCs since they don't have any dirty cards.
217 if (kPreCleanCards && IsConcurrent()) {
218 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
219 Thread* self = Thread::Current();
220 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
221 // Process dirty cards and add dirty cards to mod union tables, also ages cards.
222 heap_->ProcessCards(GetTimings(), false, true, false);
223 // The checkpoint root marking is required to avoid a race condition which occurs if the
224 // following happens during a reference write:
225 // 1. mutator dirties the card (write barrier)
226 // 2. GC ages the card (the above ProcessCards call)
227 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
228 // 4. mutator writes the value (corresponding to the write barrier in 1.)
229 // This causes the GC to age the card but not necessarily mark the reference which the mutator
230 // wrote into the object stored in the card.
231 // Having the checkpoint fixes this issue since it ensures that the card mark and the
232 // reference write are visible to the GC before the card is scanned (this is due to locks being
233 // acquired / released in the checkpoint code).
234 // The other roots are also marked to help reduce the pause.
235 MarkRootsCheckpoint(self, false);
236 MarkNonThreadRoots();
237 MarkConcurrentRoots(
238 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
239 // Process the newly aged cards.
240 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
241 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
242 // in the next GC.
243 }
244 }
245
RevokeAllThreadLocalAllocationStacks(Thread * self)246 void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
247 if (kUseThreadLocalAllocationStack) {
248 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
249 Locks::mutator_lock_->AssertExclusiveHeld(self);
250 heap_->RevokeAllThreadLocalAllocationStacks(self);
251 }
252 }
253
MarkingPhase()254 void MarkSweep::MarkingPhase() {
255 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
256 Thread* self = Thread::Current();
257 BindBitmaps();
258 FindDefaultSpaceBitmap();
259 // Process dirty cards and add dirty cards to mod union tables.
260 // If the GC type is non sticky, then we just clear the cards instead of ageing them.
261 heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky);
262 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
263 MarkRoots(self);
264 MarkReachableObjects();
265 // Pre-clean dirtied cards to reduce pauses.
266 PreCleanCards();
267 }
268
269 class ScanObjectVisitor {
270 public:
ScanObjectVisitor(MarkSweep * const mark_sweep)271 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
272 : mark_sweep_(mark_sweep) {}
273
operator ()(mirror::Object * obj) const274 void operator()(mirror::Object* obj) const
275 ALWAYS_INLINE
276 REQUIRES(Locks::heap_bitmap_lock_)
277 SHARED_REQUIRES(Locks::mutator_lock_) {
278 if (kCheckLocks) {
279 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
280 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
281 }
282 mark_sweep_->ScanObject(obj);
283 }
284
285 private:
286 MarkSweep* const mark_sweep_;
287 };
288
UpdateAndMarkModUnion()289 void MarkSweep::UpdateAndMarkModUnion() {
290 for (const auto& space : immune_spaces_.GetSpaces()) {
291 const char* name = space->IsZygoteSpace()
292 ? "UpdateAndMarkZygoteModUnionTable"
293 : "UpdateAndMarkImageModUnionTable";
294 DCHECK(space->IsZygoteSpace() || space->IsImageSpace()) << *space;
295 TimingLogger::ScopedTiming t(name, GetTimings());
296 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
297 if (mod_union_table != nullptr) {
298 mod_union_table->UpdateAndMarkReferences(this);
299 } else {
300 // No mod-union table, scan all the live bits. This can only occur for app images.
301 space->GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
302 reinterpret_cast<uintptr_t>(space->End()),
303 ScanObjectVisitor(this));
304 }
305 }
306 }
307
MarkReachableObjects()308 void MarkSweep::MarkReachableObjects() {
309 UpdateAndMarkModUnion();
310 // Recursively mark all the non-image bits set in the mark bitmap.
311 RecursiveMark();
312 }
313
ReclaimPhase()314 void MarkSweep::ReclaimPhase() {
315 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
316 Thread* const self = Thread::Current();
317 // Process the references concurrently.
318 ProcessReferences(self);
319 SweepSystemWeaks(self);
320 Runtime* const runtime = Runtime::Current();
321 runtime->AllowNewSystemWeaks();
322 // Clean up class loaders after system weaks are swept since that is how we know if class
323 // unloading occurred.
324 runtime->GetClassLinker()->CleanupClassLoaders();
325 {
326 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
327 GetHeap()->RecordFreeRevoke();
328 // Reclaim unmarked objects.
329 Sweep(false);
330 // Swap the live and mark bitmaps for each space which we modified space. This is an
331 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
332 // bitmaps.
333 SwapBitmaps();
334 // Unbind the live and mark bitmaps.
335 GetHeap()->UnBindBitmaps();
336 }
337 }
338
FindDefaultSpaceBitmap()339 void MarkSweep::FindDefaultSpaceBitmap() {
340 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
341 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
342 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
343 // We want to have the main space instead of non moving if possible.
344 if (bitmap != nullptr &&
345 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
346 current_space_bitmap_ = bitmap;
347 // If we are not the non moving space exit the loop early since this will be good enough.
348 if (space != heap_->GetNonMovingSpace()) {
349 break;
350 }
351 }
352 }
353 CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
354 << heap_->DumpSpaces();
355 }
356
ExpandMarkStack()357 void MarkSweep::ExpandMarkStack() {
358 ResizeMarkStack(mark_stack_->Capacity() * 2);
359 }
360
ResizeMarkStack(size_t new_size)361 void MarkSweep::ResizeMarkStack(size_t new_size) {
362 // Rare case, no need to have Thread::Current be a parameter.
363 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
364 // Someone else acquired the lock and expanded the mark stack before us.
365 return;
366 }
367 std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End());
368 CHECK_LE(mark_stack_->Size(), new_size);
369 mark_stack_->Resize(new_size);
370 for (auto& obj : temp) {
371 mark_stack_->PushBack(obj.AsMirrorPtr());
372 }
373 }
374
MarkObject(mirror::Object * obj)375 mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) {
376 MarkObject(obj, nullptr, MemberOffset(0));
377 return obj;
378 }
379
MarkObjectNonNullParallel(mirror::Object * obj)380 inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
381 DCHECK(obj != nullptr);
382 if (MarkObjectParallel(obj)) {
383 MutexLock mu(Thread::Current(), mark_stack_lock_);
384 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
385 ExpandMarkStack();
386 }
387 // The object must be pushed on to the mark stack.
388 mark_stack_->PushBack(obj);
389 }
390 }
391
IsMarkedHeapReference(mirror::HeapReference<mirror::Object> * ref)392 bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) {
393 return IsMarked(ref->AsMirrorPtr());
394 }
395
396 class MarkSweepMarkObjectSlowPath {
397 public:
MarkSweepMarkObjectSlowPath(MarkSweep * mark_sweep,mirror::Object * holder=nullptr,MemberOffset offset=MemberOffset (0))398 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep,
399 mirror::Object* holder = nullptr,
400 MemberOffset offset = MemberOffset(0))
401 : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {}
402
operator ()(const mirror::Object * obj) const403 void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
404 if (kProfileLargeObjects) {
405 // TODO: Differentiate between marking and testing somehow.
406 ++mark_sweep_->large_object_test_;
407 ++mark_sweep_->large_object_mark_;
408 }
409 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
410 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
411 (kIsDebugBuild && large_object_space != nullptr &&
412 !large_object_space->Contains(obj)))) {
413 LOG(INTERNAL_FATAL) << "Tried to mark " << obj << " not contained by any spaces";
414 if (holder_ != nullptr) {
415 size_t holder_size = holder_->SizeOf();
416 ArtField* field = holder_->FindFieldByOffset(offset_);
417 LOG(INTERNAL_FATAL) << "Field info: "
418 << " holder=" << holder_
419 << " holder is "
420 << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_)
421 ? "alive" : "dead")
422 << " holder_size=" << holder_size
423 << " holder_type=" << PrettyTypeOf(holder_)
424 << " offset=" << offset_.Uint32Value()
425 << " field=" << (field != nullptr ? field->GetName() : "nullptr")
426 << " field_type="
427 << (field != nullptr ? field->GetTypeDescriptor() : "")
428 << " first_ref_field_offset="
429 << (holder_->IsClass()
430 ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset(
431 sizeof(void*))
432 : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
433 << " num_of_ref_fields="
434 << (holder_->IsClass()
435 ? holder_->AsClass()->NumReferenceStaticFields()
436 : holder_->GetClass()->NumReferenceInstanceFields())
437 << "\n";
438 // Print the memory content of the holder.
439 for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) {
440 uint32_t* p = reinterpret_cast<uint32_t*>(holder_);
441 LOG(INTERNAL_FATAL) << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = "
442 << std::hex << p[i];
443 }
444 }
445 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
446 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
447 {
448 LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root";
449 Thread* self = Thread::Current();
450 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
451 mark_sweep_->VerifyRoots();
452 } else {
453 const bool heap_bitmap_exclusive_locked =
454 Locks::heap_bitmap_lock_->IsExclusiveHeld(self);
455 if (heap_bitmap_exclusive_locked) {
456 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
457 }
458 {
459 ScopedThreadSuspension(self, kSuspended);
460 ScopedSuspendAll ssa(__FUNCTION__);
461 mark_sweep_->VerifyRoots();
462 }
463 if (heap_bitmap_exclusive_locked) {
464 Locks::heap_bitmap_lock_->ExclusiveLock(self);
465 }
466 }
467 }
468 LOG(FATAL) << "Can't mark invalid object";
469 }
470 }
471
472 private:
473 MarkSweep* const mark_sweep_;
474 mirror::Object* const holder_;
475 MemberOffset offset_;
476 };
477
MarkObjectNonNull(mirror::Object * obj,mirror::Object * holder,MemberOffset offset)478 inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj,
479 mirror::Object* holder,
480 MemberOffset offset) {
481 DCHECK(obj != nullptr);
482 if (kUseBakerOrBrooksReadBarrier) {
483 // Verify all the objects have the correct pointer installed.
484 obj->AssertReadBarrierPointer();
485 }
486 if (immune_spaces_.IsInImmuneRegion(obj)) {
487 if (kCountMarkedObjects) {
488 ++mark_immune_count_;
489 }
490 DCHECK(mark_bitmap_->Test(obj));
491 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
492 if (kCountMarkedObjects) {
493 ++mark_fastpath_count_;
494 }
495 if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
496 PushOnMarkStack(obj); // This object was not previously marked.
497 }
498 } else {
499 if (kCountMarkedObjects) {
500 ++mark_slowpath_count_;
501 }
502 MarkSweepMarkObjectSlowPath visitor(this, holder, offset);
503 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
504 // will check again.
505 if (!mark_bitmap_->Set(obj, visitor)) {
506 PushOnMarkStack(obj); // Was not already marked, push.
507 }
508 }
509 }
510
PushOnMarkStack(mirror::Object * obj)511 inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) {
512 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
513 // Lock is not needed but is here anyways to please annotalysis.
514 MutexLock mu(Thread::Current(), mark_stack_lock_);
515 ExpandMarkStack();
516 }
517 // The object must be pushed on to the mark stack.
518 mark_stack_->PushBack(obj);
519 }
520
MarkObjectParallel(mirror::Object * obj)521 inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) {
522 DCHECK(obj != nullptr);
523 if (kUseBakerOrBrooksReadBarrier) {
524 // Verify all the objects have the correct pointer installed.
525 obj->AssertReadBarrierPointer();
526 }
527 if (immune_spaces_.IsInImmuneRegion(obj)) {
528 DCHECK(IsMarked(obj) != nullptr);
529 return false;
530 }
531 // Try to take advantage of locality of references within a space, failing this find the space
532 // the hard way.
533 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
534 if (LIKELY(object_bitmap->HasAddress(obj))) {
535 return !object_bitmap->AtomicTestAndSet(obj);
536 }
537 MarkSweepMarkObjectSlowPath visitor(this);
538 return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
539 }
540
MarkHeapReference(mirror::HeapReference<mirror::Object> * ref)541 void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) {
542 MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0));
543 }
544
545 // Used to mark objects when processing the mark stack. If an object is null, it is not marked.
MarkObject(mirror::Object * obj,mirror::Object * holder,MemberOffset offset)546 inline void MarkSweep::MarkObject(mirror::Object* obj,
547 mirror::Object* holder,
548 MemberOffset offset) {
549 if (obj != nullptr) {
550 MarkObjectNonNull(obj, holder, offset);
551 } else if (kCountMarkedObjects) {
552 ++mark_null_count_;
553 }
554 }
555
556 class VerifyRootMarkedVisitor : public SingleRootVisitor {
557 public:
VerifyRootMarkedVisitor(MarkSweep * collector)558 explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
559
VisitRoot(mirror::Object * root,const RootInfo & info)560 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
561 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
562 CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
563 }
564
565 private:
566 MarkSweep* const collector_;
567 };
568
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)569 void MarkSweep::VisitRoots(mirror::Object*** roots,
570 size_t count,
571 const RootInfo& info ATTRIBUTE_UNUSED) {
572 for (size_t i = 0; i < count; ++i) {
573 MarkObjectNonNull(*roots[i]);
574 }
575 }
576
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)577 void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
578 size_t count,
579 const RootInfo& info ATTRIBUTE_UNUSED) {
580 for (size_t i = 0; i < count; ++i) {
581 MarkObjectNonNull(roots[i]->AsMirrorPtr());
582 }
583 }
584
585 class VerifyRootVisitor : public SingleRootVisitor {
586 public:
VisitRoot(mirror::Object * root,const RootInfo & info)587 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
588 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
589 // See if the root is on any space bitmap.
590 auto* heap = Runtime::Current()->GetHeap();
591 if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
592 space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
593 if (large_object_space != nullptr && !large_object_space->Contains(root)) {
594 LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info;
595 }
596 }
597 }
598 };
599
VerifyRoots()600 void MarkSweep::VerifyRoots() {
601 VerifyRootVisitor visitor;
602 Runtime::Current()->GetThreadList()->VisitRoots(&visitor);
603 }
604
MarkRoots(Thread * self)605 void MarkSweep::MarkRoots(Thread* self) {
606 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
607 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
608 // If we exclusively hold the mutator lock, all threads must be suspended.
609 Runtime::Current()->VisitRoots(this);
610 RevokeAllThreadLocalAllocationStacks(self);
611 } else {
612 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
613 // At this point the live stack should no longer have any mutators which push into it.
614 MarkNonThreadRoots();
615 MarkConcurrentRoots(
616 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
617 }
618 }
619
MarkNonThreadRoots()620 void MarkSweep::MarkNonThreadRoots() {
621 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
622 Runtime::Current()->VisitNonThreadRoots(this);
623 }
624
MarkConcurrentRoots(VisitRootFlags flags)625 void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
626 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
627 // Visit all runtime roots and clear dirty flags.
628 Runtime::Current()->VisitConcurrentRoots(
629 this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving));
630 }
631
632 class DelayReferenceReferentVisitor {
633 public:
DelayReferenceReferentVisitor(MarkSweep * collector)634 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {}
635
operator ()(mirror::Class * klass,mirror::Reference * ref) const636 void operator()(mirror::Class* klass, mirror::Reference* ref) const
637 REQUIRES(Locks::heap_bitmap_lock_)
638 SHARED_REQUIRES(Locks::mutator_lock_) {
639 collector_->DelayReferenceReferent(klass, ref);
640 }
641
642 private:
643 MarkSweep* const collector_;
644 };
645
646 template <bool kUseFinger = false>
647 class MarkStackTask : public Task {
648 public:
MarkStackTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,size_t mark_stack_size,StackReference<mirror::Object> * mark_stack)649 MarkStackTask(ThreadPool* thread_pool,
650 MarkSweep* mark_sweep,
651 size_t mark_stack_size,
652 StackReference<mirror::Object>* mark_stack)
653 : mark_sweep_(mark_sweep),
654 thread_pool_(thread_pool),
655 mark_stack_pos_(mark_stack_size) {
656 // We may have to copy part of an existing mark stack when another mark stack overflows.
657 if (mark_stack_size != 0) {
658 DCHECK(mark_stack != nullptr);
659 // TODO: Check performance?
660 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
661 }
662 if (kCountTasks) {
663 ++mark_sweep_->work_chunks_created_;
664 }
665 }
666
667 static const size_t kMaxSize = 1 * KB;
668
669 protected:
670 class MarkObjectParallelVisitor {
671 public:
MarkObjectParallelVisitor(MarkStackTask<kUseFinger> * chunk_task,MarkSweep * mark_sweep)672 ALWAYS_INLINE MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
673 MarkSweep* mark_sweep)
674 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
675
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const676 ALWAYS_INLINE void operator()(mirror::Object* obj,
677 MemberOffset offset,
678 bool is_static ATTRIBUTE_UNUSED) const
679 SHARED_REQUIRES(Locks::mutator_lock_) {
680 Mark(obj->GetFieldObject<mirror::Object>(offset));
681 }
682
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const683 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
684 SHARED_REQUIRES(Locks::mutator_lock_) {
685 if (!root->IsNull()) {
686 VisitRoot(root);
687 }
688 }
689
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const690 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
691 SHARED_REQUIRES(Locks::mutator_lock_) {
692 if (kCheckLocks) {
693 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
694 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
695 }
696 Mark(root->AsMirrorPtr());
697 }
698
699 private:
Mark(mirror::Object * ref) const700 ALWAYS_INLINE void Mark(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) {
701 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
702 if (kUseFinger) {
703 std::atomic_thread_fence(std::memory_order_seq_cst);
704 if (reinterpret_cast<uintptr_t>(ref) >=
705 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
706 return;
707 }
708 }
709 chunk_task_->MarkStackPush(ref);
710 }
711 }
712
713 MarkStackTask<kUseFinger>* const chunk_task_;
714 MarkSweep* const mark_sweep_;
715 };
716
717 class ScanObjectParallelVisitor {
718 public:
ScanObjectParallelVisitor(MarkStackTask<kUseFinger> * chunk_task)719 ALWAYS_INLINE explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task)
720 : chunk_task_(chunk_task) {}
721
722 // No thread safety analysis since multiple threads will use this visitor.
operator ()(mirror::Object * obj) const723 void operator()(mirror::Object* obj) const
724 REQUIRES(Locks::heap_bitmap_lock_)
725 SHARED_REQUIRES(Locks::mutator_lock_) {
726 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
727 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
728 DelayReferenceReferentVisitor ref_visitor(mark_sweep);
729 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
730 }
731
732 private:
733 MarkStackTask<kUseFinger>* const chunk_task_;
734 };
735
~MarkStackTask()736 virtual ~MarkStackTask() {
737 // Make sure that we have cleared our mark stack.
738 DCHECK_EQ(mark_stack_pos_, 0U);
739 if (kCountTasks) {
740 ++mark_sweep_->work_chunks_deleted_;
741 }
742 }
743
744 MarkSweep* const mark_sweep_;
745 ThreadPool* const thread_pool_;
746 // Thread local mark stack for this task.
747 StackReference<mirror::Object> mark_stack_[kMaxSize];
748 // Mark stack position.
749 size_t mark_stack_pos_;
750
MarkStackPush(mirror::Object * obj)751 ALWAYS_INLINE void MarkStackPush(mirror::Object* obj)
752 SHARED_REQUIRES(Locks::mutator_lock_) {
753 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
754 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
755 mark_stack_pos_ /= 2;
756 auto* task = new MarkStackTask(thread_pool_,
757 mark_sweep_,
758 kMaxSize - mark_stack_pos_,
759 mark_stack_ + mark_stack_pos_);
760 thread_pool_->AddTask(Thread::Current(), task);
761 }
762 DCHECK(obj != nullptr);
763 DCHECK_LT(mark_stack_pos_, kMaxSize);
764 mark_stack_[mark_stack_pos_++].Assign(obj);
765 }
766
Finalize()767 virtual void Finalize() {
768 delete this;
769 }
770
771 // Scans all of the objects
Run(Thread * self ATTRIBUTE_UNUSED)772 virtual void Run(Thread* self ATTRIBUTE_UNUSED)
773 REQUIRES(Locks::heap_bitmap_lock_)
774 SHARED_REQUIRES(Locks::mutator_lock_) {
775 ScanObjectParallelVisitor visitor(this);
776 // TODO: Tune this.
777 static const size_t kFifoSize = 4;
778 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
779 for (;;) {
780 mirror::Object* obj = nullptr;
781 if (kUseMarkStackPrefetch) {
782 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
783 mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
784 DCHECK(mark_stack_obj != nullptr);
785 __builtin_prefetch(mark_stack_obj);
786 prefetch_fifo.push_back(mark_stack_obj);
787 }
788 if (UNLIKELY(prefetch_fifo.empty())) {
789 break;
790 }
791 obj = prefetch_fifo.front();
792 prefetch_fifo.pop_front();
793 } else {
794 if (UNLIKELY(mark_stack_pos_ == 0)) {
795 break;
796 }
797 obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
798 }
799 DCHECK(obj != nullptr);
800 visitor(obj);
801 }
802 }
803 };
804
805 class CardScanTask : public MarkStackTask<false> {
806 public:
CardScanTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,accounting::ContinuousSpaceBitmap * bitmap,uint8_t * begin,uint8_t * end,uint8_t minimum_age,size_t mark_stack_size,StackReference<mirror::Object> * mark_stack_obj,bool clear_card)807 CardScanTask(ThreadPool* thread_pool,
808 MarkSweep* mark_sweep,
809 accounting::ContinuousSpaceBitmap* bitmap,
810 uint8_t* begin,
811 uint8_t* end,
812 uint8_t minimum_age,
813 size_t mark_stack_size,
814 StackReference<mirror::Object>* mark_stack_obj,
815 bool clear_card)
816 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
817 bitmap_(bitmap),
818 begin_(begin),
819 end_(end),
820 minimum_age_(minimum_age),
821 clear_card_(clear_card) {}
822
823 protected:
824 accounting::ContinuousSpaceBitmap* const bitmap_;
825 uint8_t* const begin_;
826 uint8_t* const end_;
827 const uint8_t minimum_age_;
828 const bool clear_card_;
829
Finalize()830 virtual void Finalize() {
831 delete this;
832 }
833
Run(Thread * self)834 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
835 ScanObjectParallelVisitor visitor(this);
836 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
837 size_t cards_scanned = clear_card_
838 ? card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_)
839 : card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
840 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
841 << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
842 // Finish by emptying our local mark stack.
843 MarkStackTask::Run(self);
844 }
845 };
846
GetThreadCount(bool paused) const847 size_t MarkSweep::GetThreadCount(bool paused) const {
848 // Use less threads if we are in a background state (non jank perceptible) since we want to leave
849 // more CPU time for the foreground apps.
850 if (heap_->GetThreadPool() == nullptr || !Runtime::Current()->InJankPerceptibleProcessState()) {
851 return 1;
852 }
853 return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1;
854 }
855
ScanGrayObjects(bool paused,uint8_t minimum_age)856 void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
857 accounting::CardTable* card_table = GetHeap()->GetCardTable();
858 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
859 size_t thread_count = GetThreadCount(paused);
860 // The parallel version with only one thread is faster for card scanning, TODO: fix.
861 if (kParallelCardScan && thread_count > 1) {
862 Thread* self = Thread::Current();
863 // Can't have a different split for each space since multiple spaces can have their cards being
864 // scanned at the same time.
865 TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
866 GetTimings());
867 // Try to take some of the mark stack since we can pass this off to the worker tasks.
868 StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin();
869 StackReference<mirror::Object>* mark_stack_end = mark_stack_->End();
870 const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
871 // Estimated number of work tasks we will create.
872 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
873 DCHECK_NE(mark_stack_tasks, 0U);
874 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
875 mark_stack_size / mark_stack_tasks + 1);
876 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
877 if (space->GetMarkBitmap() == nullptr) {
878 continue;
879 }
880 uint8_t* card_begin = space->Begin();
881 uint8_t* card_end = space->End();
882 // Align up the end address. For example, the image space's end
883 // may not be card-size-aligned.
884 card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
885 DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize);
886 DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize);
887 // Calculate how many bytes of heap we will scan,
888 const size_t address_range = card_end - card_begin;
889 // Calculate how much address range each task gets.
890 const size_t card_delta = RoundUp(address_range / thread_count + 1,
891 accounting::CardTable::kCardSize);
892 // If paused and the space is neither zygote nor image space, we could clear the dirty
893 // cards to avoid accumulating them to increase card scanning load in the following GC
894 // cycles. We need to keep dirty cards of image space and zygote space in order to track
895 // references to the other spaces.
896 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
897 // Create the worker tasks for this space.
898 while (card_begin != card_end) {
899 // Add a range of cards.
900 size_t addr_remaining = card_end - card_begin;
901 size_t card_increment = std::min(card_delta, addr_remaining);
902 // Take from the back of the mark stack.
903 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
904 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
905 mark_stack_end -= mark_stack_increment;
906 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
907 DCHECK_EQ(mark_stack_end, mark_stack_->End());
908 // Add the new task to the thread pool.
909 auto* task = new CardScanTask(thread_pool,
910 this,
911 space->GetMarkBitmap(),
912 card_begin,
913 card_begin + card_increment,
914 minimum_age,
915 mark_stack_increment,
916 mark_stack_end,
917 clear_card);
918 thread_pool->AddTask(self, task);
919 card_begin += card_increment;
920 }
921 }
922
923 // Note: the card scan below may dirty new cards (and scan them)
924 // as a side effect when a Reference object is encountered and
925 // queued during the marking. See b/11465268.
926 thread_pool->SetMaxActiveWorkers(thread_count - 1);
927 thread_pool->StartWorkers(self);
928 thread_pool->Wait(self, true, true);
929 thread_pool->StopWorkers(self);
930 } else {
931 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
932 if (space->GetMarkBitmap() != nullptr) {
933 // Image spaces are handled properly since live == marked for them.
934 const char* name = nullptr;
935 switch (space->GetGcRetentionPolicy()) {
936 case space::kGcRetentionPolicyNeverCollect:
937 name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects";
938 break;
939 case space::kGcRetentionPolicyFullCollect:
940 name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
941 break;
942 case space::kGcRetentionPolicyAlwaysCollect:
943 name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
944 break;
945 default:
946 LOG(FATAL) << "Unreachable";
947 UNREACHABLE();
948 }
949 TimingLogger::ScopedTiming t(name, GetTimings());
950 ScanObjectVisitor visitor(this);
951 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
952 if (clear_card) {
953 card_table->Scan<true>(space->GetMarkBitmap(),
954 space->Begin(),
955 space->End(),
956 visitor,
957 minimum_age);
958 } else {
959 card_table->Scan<false>(space->GetMarkBitmap(),
960 space->Begin(),
961 space->End(),
962 visitor,
963 minimum_age);
964 }
965 }
966 }
967 }
968 }
969
970 class RecursiveMarkTask : public MarkStackTask<false> {
971 public:
RecursiveMarkTask(ThreadPool * thread_pool,MarkSweep * mark_sweep,accounting::ContinuousSpaceBitmap * bitmap,uintptr_t begin,uintptr_t end)972 RecursiveMarkTask(ThreadPool* thread_pool,
973 MarkSweep* mark_sweep,
974 accounting::ContinuousSpaceBitmap* bitmap,
975 uintptr_t begin,
976 uintptr_t end)
977 : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr),
978 bitmap_(bitmap),
979 begin_(begin),
980 end_(end) {}
981
982 protected:
983 accounting::ContinuousSpaceBitmap* const bitmap_;
984 const uintptr_t begin_;
985 const uintptr_t end_;
986
Finalize()987 virtual void Finalize() {
988 delete this;
989 }
990
991 // Scans all of the objects
Run(Thread * self)992 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
993 ScanObjectParallelVisitor visitor(this);
994 bitmap_->VisitMarkedRange(begin_, end_, visitor);
995 // Finish by emptying our local mark stack.
996 MarkStackTask::Run(self);
997 }
998 };
999
1000 // Populates the mark stack based on the set of marked objects and
1001 // recursively marks until the mark stack is emptied.
RecursiveMark()1002 void MarkSweep::RecursiveMark() {
1003 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1004 // RecursiveMark will build the lists of known instances of the Reference classes. See
1005 // DelayReferenceReferent for details.
1006 if (kUseRecursiveMark) {
1007 const bool partial = GetGcType() == kGcTypePartial;
1008 ScanObjectVisitor scan_visitor(this);
1009 auto* self = Thread::Current();
1010 ThreadPool* thread_pool = heap_->GetThreadPool();
1011 size_t thread_count = GetThreadCount(false);
1012 const bool parallel = kParallelRecursiveMark && thread_count > 1;
1013 mark_stack_->Reset();
1014 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1015 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
1016 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
1017 current_space_bitmap_ = space->GetMarkBitmap();
1018 if (current_space_bitmap_ == nullptr) {
1019 continue;
1020 }
1021 if (parallel) {
1022 // We will use the mark stack the future.
1023 // CHECK(mark_stack_->IsEmpty());
1024 // This function does not handle heap end increasing, so we must use the space end.
1025 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1026 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
1027 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
1028
1029 // Create a few worker tasks.
1030 const size_t n = thread_count * 2;
1031 while (begin != end) {
1032 uintptr_t start = begin;
1033 uintptr_t delta = (end - begin) / n;
1034 delta = RoundUp(delta, KB);
1035 if (delta < 16 * KB) delta = end - begin;
1036 begin += delta;
1037 auto* task = new RecursiveMarkTask(thread_pool,
1038 this,
1039 current_space_bitmap_,
1040 start,
1041 begin);
1042 thread_pool->AddTask(self, task);
1043 }
1044 thread_pool->SetMaxActiveWorkers(thread_count - 1);
1045 thread_pool->StartWorkers(self);
1046 thread_pool->Wait(self, true, true);
1047 thread_pool->StopWorkers(self);
1048 } else {
1049 // This function does not handle heap end increasing, so we must use the space end.
1050 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1051 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
1052 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
1053 }
1054 }
1055 }
1056 }
1057 ProcessMarkStack(false);
1058 }
1059
RecursiveMarkDirtyObjects(bool paused,uint8_t minimum_age)1060 void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
1061 ScanGrayObjects(paused, minimum_age);
1062 ProcessMarkStack(paused);
1063 }
1064
ReMarkRoots()1065 void MarkSweep::ReMarkRoots() {
1066 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1067 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
1068 Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>(
1069 kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog));
1070 if (kVerifyRootsMarked) {
1071 TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
1072 VerifyRootMarkedVisitor visitor(this);
1073 Runtime::Current()->VisitRoots(&visitor);
1074 }
1075 }
1076
SweepSystemWeaks(Thread * self)1077 void MarkSweep::SweepSystemWeaks(Thread* self) {
1078 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1079 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1080 Runtime::Current()->SweepSystemWeaks(this);
1081 }
1082
1083 class VerifySystemWeakVisitor : public IsMarkedVisitor {
1084 public:
VerifySystemWeakVisitor(MarkSweep * mark_sweep)1085 explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
1086
IsMarked(mirror::Object * obj)1087 virtual mirror::Object* IsMarked(mirror::Object* obj)
1088 OVERRIDE
1089 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1090 mark_sweep_->VerifyIsLive(obj);
1091 return obj;
1092 }
1093
1094 MarkSweep* const mark_sweep_;
1095 };
1096
VerifyIsLive(const mirror::Object * obj)1097 void MarkSweep::VerifyIsLive(const mirror::Object* obj) {
1098 if (!heap_->GetLiveBitmap()->Test(obj)) {
1099 // TODO: Consider live stack? Has this code bitrotted?
1100 CHECK(!heap_->allocation_stack_->Contains(obj))
1101 << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
1102 }
1103 }
1104
VerifySystemWeaks()1105 void MarkSweep::VerifySystemWeaks() {
1106 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1107 // Verify system weaks, uses a special object visitor which returns the input object.
1108 VerifySystemWeakVisitor visitor(this);
1109 Runtime::Current()->SweepSystemWeaks(&visitor);
1110 }
1111
1112 class CheckpointMarkThreadRoots : public Closure, public RootVisitor {
1113 public:
CheckpointMarkThreadRoots(MarkSweep * mark_sweep,bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)1114 CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
1115 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
1116 : mark_sweep_(mark_sweep),
1117 revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
1118 revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1119 }
1120
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)1121 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
1122 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
1123 REQUIRES(Locks::heap_bitmap_lock_) {
1124 for (size_t i = 0; i < count; ++i) {
1125 mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
1126 }
1127 }
1128
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)1129 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
1130 size_t count,
1131 const RootInfo& info ATTRIBUTE_UNUSED)
1132 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
1133 REQUIRES(Locks::heap_bitmap_lock_) {
1134 for (size_t i = 0; i < count; ++i) {
1135 mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
1136 }
1137 }
1138
Run(Thread * thread)1139 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1140 ScopedTrace trace("Marking thread roots");
1141 // Note: self is not necessarily equal to thread since thread may be suspended.
1142 Thread* const self = Thread::Current();
1143 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1144 << thread->GetState() << " thread " << thread << " self " << self;
1145 thread->VisitRoots(this);
1146 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
1147 ScopedTrace trace2("RevokeRosAllocThreadLocalBuffers");
1148 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
1149 }
1150 // If thread is a running mutator, then act on behalf of the garbage collector.
1151 // See the code in ThreadList::RunCheckpoint.
1152 mark_sweep_->GetBarrier().Pass(self);
1153 }
1154
1155 private:
1156 MarkSweep* const mark_sweep_;
1157 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
1158 };
1159
MarkRootsCheckpoint(Thread * self,bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)1160 void MarkSweep::MarkRootsCheckpoint(Thread* self,
1161 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1162 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1163 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
1164 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1165 // Request the check point is run on all threads returning a count of the threads that must
1166 // run through the barrier including self.
1167 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1168 // Release locks then wait for all mutator threads to pass the barrier.
1169 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1170 // then no need to release locks.
1171 if (barrier_count == 0) {
1172 return;
1173 }
1174 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1175 Locks::mutator_lock_->SharedUnlock(self);
1176 {
1177 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1178 gc_barrier_->Increment(self, barrier_count);
1179 }
1180 Locks::mutator_lock_->SharedLock(self);
1181 Locks::heap_bitmap_lock_->ExclusiveLock(self);
1182 }
1183
SweepArray(accounting::ObjectStack * allocations,bool swap_bitmaps)1184 void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
1185 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1186 Thread* self = Thread::Current();
1187 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
1188 sweep_array_free_buffer_mem_map_->BaseBegin());
1189 size_t chunk_free_pos = 0;
1190 ObjectBytePair freed;
1191 ObjectBytePair freed_los;
1192 // How many objects are left in the array, modified after each space is swept.
1193 StackReference<mirror::Object>* objects = allocations->Begin();
1194 size_t count = allocations->Size();
1195 // Change the order to ensure that the non-moving space last swept as an optimization.
1196 std::vector<space::ContinuousSpace*> sweep_spaces;
1197 space::ContinuousSpace* non_moving_space = nullptr;
1198 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
1199 if (space->IsAllocSpace() &&
1200 !immune_spaces_.ContainsSpace(space) &&
1201 space->GetLiveBitmap() != nullptr) {
1202 if (space == heap_->GetNonMovingSpace()) {
1203 non_moving_space = space;
1204 } else {
1205 sweep_spaces.push_back(space);
1206 }
1207 }
1208 }
1209 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1210 // the other alloc spaces as an optimization.
1211 if (non_moving_space != nullptr) {
1212 sweep_spaces.push_back(non_moving_space);
1213 }
1214 // Start by sweeping the continuous spaces.
1215 for (space::ContinuousSpace* space : sweep_spaces) {
1216 space::AllocSpace* alloc_space = space->AsAllocSpace();
1217 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1218 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1219 if (swap_bitmaps) {
1220 std::swap(live_bitmap, mark_bitmap);
1221 }
1222 StackReference<mirror::Object>* out = objects;
1223 for (size_t i = 0; i < count; ++i) {
1224 mirror::Object* const obj = objects[i].AsMirrorPtr();
1225 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1226 continue;
1227 }
1228 if (space->HasAddress(obj)) {
1229 // This object is in the space, remove it from the array and add it to the sweep buffer
1230 // if needed.
1231 if (!mark_bitmap->Test(obj)) {
1232 if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1233 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
1234 freed.objects += chunk_free_pos;
1235 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1236 chunk_free_pos = 0;
1237 }
1238 chunk_free_buffer[chunk_free_pos++] = obj;
1239 }
1240 } else {
1241 (out++)->Assign(obj);
1242 }
1243 }
1244 if (chunk_free_pos > 0) {
1245 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
1246 freed.objects += chunk_free_pos;
1247 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1248 chunk_free_pos = 0;
1249 }
1250 // All of the references which space contained are no longer in the allocation stack, update
1251 // the count.
1252 count = out - objects;
1253 }
1254 // Handle the large object space.
1255 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1256 if (large_object_space != nullptr) {
1257 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
1258 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
1259 if (swap_bitmaps) {
1260 std::swap(large_live_objects, large_mark_objects);
1261 }
1262 for (size_t i = 0; i < count; ++i) {
1263 mirror::Object* const obj = objects[i].AsMirrorPtr();
1264 // Handle large objects.
1265 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1266 continue;
1267 }
1268 if (!large_mark_objects->Test(obj)) {
1269 ++freed_los.objects;
1270 freed_los.bytes += large_object_space->Free(self, obj);
1271 }
1272 }
1273 }
1274 {
1275 TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
1276 RecordFree(freed);
1277 RecordFreeLOS(freed_los);
1278 t2.NewTiming("ResetStack");
1279 allocations->Reset();
1280 }
1281 sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
1282 }
1283
Sweep(bool swap_bitmaps)1284 void MarkSweep::Sweep(bool swap_bitmaps) {
1285 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1286 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1287 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
1288 {
1289 TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
1290 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
1291 // knowing that new allocations won't be marked as live.
1292 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1293 heap_->MarkAllocStackAsLive(live_stack);
1294 live_stack->Reset();
1295 DCHECK(mark_stack_->IsEmpty());
1296 }
1297 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1298 if (space->IsContinuousMemMapAllocSpace()) {
1299 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1300 TimingLogger::ScopedTiming split(
1301 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace",
1302 GetTimings());
1303 RecordFree(alloc_space->Sweep(swap_bitmaps));
1304 }
1305 }
1306 SweepLargeObjects(swap_bitmaps);
1307 }
1308
SweepLargeObjects(bool swap_bitmaps)1309 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
1310 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
1311 if (los != nullptr) {
1312 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1313 RecordFreeLOS(los->Sweep(swap_bitmaps));
1314 }
1315 }
1316
1317 // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
1318 // marked, put it on the appropriate list in the heap for later processing.
DelayReferenceReferent(mirror::Class * klass,mirror::Reference * ref)1319 void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
1320 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
1321 }
1322
1323 class MarkVisitor {
1324 public:
MarkVisitor(MarkSweep * const mark_sweep)1325 ALWAYS_INLINE explicit MarkVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
1326
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const1327 ALWAYS_INLINE void operator()(mirror::Object* obj,
1328 MemberOffset offset,
1329 bool is_static ATTRIBUTE_UNUSED) const
1330 REQUIRES(Locks::heap_bitmap_lock_)
1331 SHARED_REQUIRES(Locks::mutator_lock_) {
1332 if (kCheckLocks) {
1333 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1334 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1335 }
1336 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
1337 }
1338
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const1339 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1340 REQUIRES(Locks::heap_bitmap_lock_)
1341 SHARED_REQUIRES(Locks::mutator_lock_) {
1342 if (!root->IsNull()) {
1343 VisitRoot(root);
1344 }
1345 }
1346
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const1347 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1348 REQUIRES(Locks::heap_bitmap_lock_)
1349 SHARED_REQUIRES(Locks::mutator_lock_) {
1350 if (kCheckLocks) {
1351 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1352 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1353 }
1354 mark_sweep_->MarkObject(root->AsMirrorPtr());
1355 }
1356
1357 private:
1358 MarkSweep* const mark_sweep_;
1359 };
1360
1361 // Scans an object reference. Determines the type of the reference
1362 // and dispatches to a specialized scanning routine.
ScanObject(mirror::Object * obj)1363 void MarkSweep::ScanObject(mirror::Object* obj) {
1364 MarkVisitor mark_visitor(this);
1365 DelayReferenceReferentVisitor ref_visitor(this);
1366 ScanObjectVisit(obj, mark_visitor, ref_visitor);
1367 }
1368
ProcessMarkStackParallel(size_t thread_count)1369 void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
1370 Thread* self = Thread::Current();
1371 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1372 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1373 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
1374 CHECK_GT(chunk_size, 0U);
1375 // Split the current mark stack up into work tasks.
1376 for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) {
1377 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1378 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
1379 it += delta;
1380 }
1381 thread_pool->SetMaxActiveWorkers(thread_count - 1);
1382 thread_pool->StartWorkers(self);
1383 thread_pool->Wait(self, true, true);
1384 thread_pool->StopWorkers(self);
1385 mark_stack_->Reset();
1386 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
1387 work_chunks_deleted_.LoadSequentiallyConsistent())
1388 << " some of the work chunks were leaked";
1389 }
1390
1391 // Scan anything that's on the mark stack.
ProcessMarkStack(bool paused)1392 void MarkSweep::ProcessMarkStack(bool paused) {
1393 TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings());
1394 size_t thread_count = GetThreadCount(paused);
1395 if (kParallelProcessMarkStack && thread_count > 1 &&
1396 mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1397 ProcessMarkStackParallel(thread_count);
1398 } else {
1399 // TODO: Tune this.
1400 static const size_t kFifoSize = 4;
1401 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
1402 for (;;) {
1403 mirror::Object* obj = nullptr;
1404 if (kUseMarkStackPrefetch) {
1405 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
1406 mirror::Object* mark_stack_obj = mark_stack_->PopBack();
1407 DCHECK(mark_stack_obj != nullptr);
1408 __builtin_prefetch(mark_stack_obj);
1409 prefetch_fifo.push_back(mark_stack_obj);
1410 }
1411 if (prefetch_fifo.empty()) {
1412 break;
1413 }
1414 obj = prefetch_fifo.front();
1415 prefetch_fifo.pop_front();
1416 } else {
1417 if (mark_stack_->IsEmpty()) {
1418 break;
1419 }
1420 obj = mark_stack_->PopBack();
1421 }
1422 DCHECK(obj != nullptr);
1423 ScanObject(obj);
1424 }
1425 }
1426 }
1427
IsMarked(mirror::Object * object)1428 inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) {
1429 if (immune_spaces_.IsInImmuneRegion(object)) {
1430 return object;
1431 }
1432 if (current_space_bitmap_->HasAddress(object)) {
1433 return current_space_bitmap_->Test(object) ? object : nullptr;
1434 }
1435 return mark_bitmap_->Test(object) ? object : nullptr;
1436 }
1437
FinishPhase()1438 void MarkSweep::FinishPhase() {
1439 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1440 if (kCountScannedTypes) {
1441 VLOG(gc)
1442 << "MarkSweep scanned"
1443 << " no reference objects=" << no_reference_class_count_.LoadRelaxed()
1444 << " normal objects=" << normal_count_.LoadRelaxed()
1445 << " classes=" << class_count_.LoadRelaxed()
1446 << " object arrays=" << object_array_count_.LoadRelaxed()
1447 << " references=" << reference_count_.LoadRelaxed()
1448 << " other=" << other_count_.LoadRelaxed();
1449 }
1450 if (kCountTasks) {
1451 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
1452 }
1453 if (kMeasureOverhead) {
1454 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
1455 }
1456 if (kProfileLargeObjects) {
1457 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
1458 << " marked " << large_object_mark_.LoadRelaxed();
1459 }
1460 if (kCountMarkedObjects) {
1461 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
1462 << " immune=" << mark_immune_count_.LoadRelaxed()
1463 << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
1464 << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
1465 }
1466 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty.
1467 mark_stack_->Reset();
1468 Thread* const self = Thread::Current();
1469 ReaderMutexLock mu(self, *Locks::mutator_lock_);
1470 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
1471 heap_->ClearMarkedObjects();
1472 }
1473
RevokeAllThreadLocalBuffers()1474 void MarkSweep::RevokeAllThreadLocalBuffers() {
1475 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1476 // If concurrent, rosalloc thread-local buffers are revoked at the
1477 // thread checkpoint. Bump pointer space thread-local buffers must
1478 // not be in use.
1479 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1480 } else {
1481 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1482 GetHeap()->RevokeAllThreadLocalBuffers();
1483 }
1484 }
1485
1486 } // namespace collector
1487 } // namespace gc
1488 } // namespace art
1489