1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "semi_space-inl.h"
18 
19 #include <climits>
20 #include <functional>
21 #include <numeric>
22 #include <sstream>
23 #include <vector>
24 
25 #include "base/logging.h"  // For VLOG.
26 #include "base/macros.h"
27 #include "base/mutex-inl.h"
28 #include "base/timing_logger.h"
29 #include "gc/accounting/heap_bitmap-inl.h"
30 #include "gc/accounting/mod_union_table.h"
31 #include "gc/accounting/remembered_set.h"
32 #include "gc/accounting/space_bitmap-inl.h"
33 #include "gc/heap.h"
34 #include "gc/reference_processor.h"
35 #include "gc/space/bump_pointer_space-inl.h"
36 #include "gc/space/bump_pointer_space.h"
37 #include "gc/space/image_space.h"
38 #include "gc/space/large_object_space.h"
39 #include "gc/space/space-inl.h"
40 #include "indirect_reference_table.h"
41 #include "intern_table.h"
42 #include "jni/jni_internal.h"
43 #include "mark_sweep-inl.h"
44 #include "mirror/object-inl.h"
45 #include "mirror/object-refvisitor-inl.h"
46 #include "mirror/reference-inl.h"
47 #include "monitor.h"
48 #include "runtime.h"
49 #include "thread-inl.h"
50 #include "thread_list.h"
51 #include "write_barrier-inl.h"
52 
53 using ::art::mirror::Object;
54 
55 namespace art {
56 namespace gc {
57 namespace collector {
58 
59 static constexpr bool kProtectFromSpace = true;
60 static constexpr bool kStoreStackTraces = false;
61 static constexpr size_t kBytesPromotedThreshold = 4 * MB;
62 static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB;
63 
BindBitmaps()64 void SemiSpace::BindBitmaps() {
65   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
66   WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
67   // Mark all of the spaces we never collect as immune.
68   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
69     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
70         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
71       immune_spaces_.AddSpace(space);
72     } else if (space->GetLiveBitmap() != nullptr) {
73       // TODO: We can probably also add this space to the immune region.
74       if (space == to_space_ || collect_from_space_only_) {
75         if (collect_from_space_only_) {
76           // Bind the bitmaps of the main free list space and the non-moving space we are doing a
77           // bump pointer space only collection.
78           CHECK(space == GetHeap()->GetPrimaryFreeListSpace() ||
79                 space == GetHeap()->GetNonMovingSpace());
80         }
81         CHECK(space->IsContinuousMemMapAllocSpace());
82         space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
83       }
84     }
85   }
86   if (collect_from_space_only_) {
87     // We won't collect the large object space if a bump pointer space only collection.
88     is_large_object_space_immune_ = true;
89   }
90 }
91 
SemiSpace(Heap * heap,bool generational,const std::string & name_prefix)92 SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
93     : GarbageCollector(heap,
94                        name_prefix + (name_prefix.empty() ? "" : " ") + "semispace"),
95       mark_stack_(nullptr),
96       is_large_object_space_immune_(false),
97       to_space_(nullptr),
98       to_space_live_bitmap_(nullptr),
99       from_space_(nullptr),
100       mark_bitmap_(nullptr),
101       self_(nullptr),
102       generational_(generational),
103       last_gc_to_space_end_(nullptr),
104       bytes_promoted_(0),
105       bytes_promoted_since_last_whole_heap_collection_(0),
106       large_object_bytes_allocated_at_last_whole_heap_collection_(0),
107       collect_from_space_only_(generational),
108       promo_dest_space_(nullptr),
109       fallback_space_(nullptr),
110       bytes_moved_(0U),
111       objects_moved_(0U),
112       saved_bytes_(0U),
113       collector_name_(name_),
114       swap_semi_spaces_(true) {
115 }
116 
RunPhases()117 void SemiSpace::RunPhases() {
118   Thread* self = Thread::Current();
119   InitializePhase();
120   // Semi-space collector is special since it is sometimes called with the mutators suspended
121   // during the zygote creation and collector transitions. If we already exclusively hold the
122   // mutator lock, then we can't lock it again since it will cause a deadlock.
123   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
124     GetHeap()->PreGcVerificationPaused(this);
125     GetHeap()->PrePauseRosAllocVerification(this);
126     MarkingPhase();
127     ReclaimPhase();
128     GetHeap()->PostGcVerificationPaused(this);
129   } else {
130     Locks::mutator_lock_->AssertNotHeld(self);
131     {
132       ScopedPause pause(this);
133       GetHeap()->PreGcVerificationPaused(this);
134       GetHeap()->PrePauseRosAllocVerification(this);
135       MarkingPhase();
136     }
137     {
138       ReaderMutexLock mu(self, *Locks::mutator_lock_);
139       ReclaimPhase();
140     }
141     GetHeap()->PostGcVerification(this);
142   }
143   FinishPhase();
144 }
145 
InitializePhase()146 void SemiSpace::InitializePhase() {
147   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
148   mark_stack_ = heap_->GetMarkStack();
149   DCHECK(mark_stack_ != nullptr);
150   immune_spaces_.Reset();
151   is_large_object_space_immune_ = false;
152   saved_bytes_ = 0;
153   bytes_moved_ = 0;
154   objects_moved_ = 0;
155   self_ = Thread::Current();
156   CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
157   // Set the initial bitmap.
158   to_space_live_bitmap_ = to_space_->GetLiveBitmap();
159   {
160     // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
161     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
162     mark_bitmap_ = heap_->GetMarkBitmap();
163   }
164   if (generational_) {
165     promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace();
166   }
167   fallback_space_ = GetHeap()->GetNonMovingSpace();
168 }
169 
ProcessReferences(Thread * self)170 void SemiSpace::ProcessReferences(Thread* self) {
171   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
172   GetHeap()->GetReferenceProcessor()->ProcessReferences(
173       false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
174 }
175 
MarkingPhase()176 void SemiSpace::MarkingPhase() {
177   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
178   CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
179   if (kStoreStackTraces) {
180     Locks::mutator_lock_->AssertExclusiveHeld(self_);
181     // Store the stack traces into the runtime fault string in case we Get a heap corruption
182     // related crash later.
183     ThreadState old_state = self_->SetStateUnsafe(kRunnable);
184     std::ostringstream oss;
185     Runtime* runtime = Runtime::Current();
186     runtime->GetThreadList()->DumpForSigQuit(oss);
187     runtime->GetThreadList()->DumpNativeStacks(oss);
188     runtime->SetFaultMessage(oss.str());
189     CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
190   }
191   // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps
192   // to prevent fragmentation.
193   RevokeAllThreadLocalBuffers();
194   if (generational_) {
195     if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
196         GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
197         GetCurrentIteration()->GetClearSoftReferences()) {
198       // If an explicit, native allocation-triggered, or last attempt
199       // collection, collect the whole heap.
200       collect_from_space_only_ = false;
201     }
202     if (!collect_from_space_only_) {
203       VLOG(heap) << "Whole heap collection";
204       name_ = collector_name_ + " whole";
205     } else {
206       VLOG(heap) << "Bump pointer space only collection";
207       name_ = collector_name_ + " bps";
208     }
209   }
210 
211   if (!collect_from_space_only_) {
212     // If non-generational, always clear soft references.
213     // If generational, clear soft references if a whole heap collection.
214     GetCurrentIteration()->SetClearSoftReferences(true);
215   }
216   Locks::mutator_lock_->AssertExclusiveHeld(self_);
217   if (generational_) {
218     // If last_gc_to_space_end_ is out of the bounds of the from-space
219     // (the to-space from last GC), then point it to the beginning of
220     // the from-space. For example, the very first GC or the
221     // pre-zygote compaction.
222     if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
223       last_gc_to_space_end_ = from_space_->Begin();
224     }
225     // Reset this before the marking starts below.
226     bytes_promoted_ = 0;
227   }
228   // Assume the cleared space is already empty.
229   BindBitmaps();
230   // Process dirty cards and add dirty cards to mod-union tables.
231   heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_, false, true);
232   // Clear the whole card table since we cannot get any additional dirty cards during the
233   // paused GC. This saves memory but only works for pause the world collectors.
234   t.NewTiming("ClearCardTable");
235   heap_->GetCardTable()->ClearCardTable();
236   // Need to do this before the checkpoint since we don't want any threads to add references to
237   // the live stack during the recursive mark.
238   if (kUseThreadLocalAllocationStack) {
239     TimingLogger::ScopedTiming t2("RevokeAllThreadLocalAllocationStacks", GetTimings());
240     heap_->RevokeAllThreadLocalAllocationStacks(self_);
241   }
242   heap_->SwapStacks();
243   {
244     WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
245     MarkRoots();
246     // Recursively mark remaining objects.
247     MarkReachableObjects();
248   }
249   ProcessReferences(self_);
250   {
251     ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
252     SweepSystemWeaks();
253   }
254   Runtime::Current()->BroadcastForNewSystemWeaks();
255   Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
256   // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
257   // before they are properly counted.
258   RevokeAllThreadLocalBuffers();
259   GetHeap()->RecordFreeRevoke();  // this is for the non-moving rosalloc space used by GSS.
260   // Record freed memory.
261   const int64_t from_bytes = from_space_->GetBytesAllocated();
262   const int64_t to_bytes = bytes_moved_;
263   const uint64_t from_objects = from_space_->GetObjectsAllocated();
264   const uint64_t to_objects = objects_moved_;
265   CHECK_LE(to_objects, from_objects);
266   // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
267   // space.
268   RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
269   // Clear and protect the from space.
270   from_space_->Clear();
271   // b/31172841. Temporarily disable the from-space protection with host debug build
272   // due to some protection issue in the build server.
273   if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
274     if (!from_space_->IsRosAllocSpace()) {
275       // Protect with PROT_NONE.
276       VLOG(heap) << "Protecting from_space_ : " << *from_space_;
277       from_space_->GetMemMap()->Protect(PROT_NONE);
278     } else {
279       // If RosAllocSpace, we'll leave it as PROT_READ here so the
280       // rosaloc verification can read the metadata magic number and
281       // protect it with PROT_NONE later in FinishPhase().
282       VLOG(heap) << "Protecting from_space_ with PROT_READ : " << *from_space_;
283       from_space_->GetMemMap()->Protect(PROT_READ);
284     }
285   }
286   heap_->PreSweepingGcVerification(this);
287   if (swap_semi_spaces_) {
288     heap_->SwapSemiSpaces();
289   }
290 }
291 
292 // Used to verify that there's no references to the from-space.
293 class SemiSpace::VerifyNoFromSpaceReferencesVisitor {
294  public:
VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace * from_space)295   explicit VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space)
296       : from_space_(from_space) {}
297 
operator ()(Object * obj,MemberOffset offset,bool) const298   void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
299       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
300     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
301     if (from_space_->HasAddress(ref)) {
302       LOG(FATAL) << ref << " found in from space";
303     }
304   }
305 
306   // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const307   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
308       NO_THREAD_SAFETY_ANALYSIS {
309     if (!root->IsNull()) {
310       VisitRoot(root);
311     }
312   }
313 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const314   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
315       NO_THREAD_SAFETY_ANALYSIS {
316     if (kIsDebugBuild) {
317       Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
318       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
319     }
320     CHECK(!from_space_->HasAddress(root->AsMirrorPtr()));
321   }
322 
323  private:
324   space::ContinuousMemMapAllocSpace* const from_space_;
325 };
326 
VerifyNoFromSpaceReferences(Object * obj)327 void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
328   DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
329   VerifyNoFromSpaceReferencesVisitor visitor(from_space_);
330   obj->VisitReferences(visitor, VoidFunctor());
331 }
332 
MarkReachableObjects()333 void SemiSpace::MarkReachableObjects() {
334   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
335   {
336     TimingLogger::ScopedTiming t2("MarkStackAsLive", GetTimings());
337     accounting::ObjectStack* live_stack = heap_->GetLiveStack();
338     heap_->MarkAllocStackAsLive(live_stack);
339     live_stack->Reset();
340   }
341   for (auto& space : heap_->GetContinuousSpaces()) {
342     // If the space is immune then we need to mark the references to other spaces.
343     accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
344     if (table != nullptr) {
345       // TODO: Improve naming.
346       TimingLogger::ScopedTiming t2(
347           space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
348                                    "UpdateAndMarkImageModUnionTable",
349                                    GetTimings());
350       table->UpdateAndMarkReferences(this);
351       DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
352     } else if ((space->IsImageSpace() || collect_from_space_only_) &&
353                space->GetLiveBitmap() != nullptr) {
354       // If the space has no mod union table (the non-moving space, app image spaces, main spaces
355       // when the bump pointer space only collection is enabled,) then we need to scan its live
356       // bitmap or dirty cards as roots (including the objects on the live stack which have just
357       // marked in the live bitmap above in MarkAllocStackAsLive().)
358       accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
359       if (!space->IsImageSpace()) {
360         DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
361             << "Space " << space->GetName() << " "
362             << "generational_=" << generational_ << " "
363             << "collect_from_space_only_=" << collect_from_space_only_;
364         // App images currently do not have remembered sets.
365         DCHECK_EQ(kUseRememberedSet, rem_set != nullptr);
366       } else {
367         DCHECK(rem_set == nullptr);
368       }
369       if (rem_set != nullptr) {
370         TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
371         rem_set->UpdateAndMarkReferences(from_space_, this);
372       } else {
373         TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
374         accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
375         live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
376                                       reinterpret_cast<uintptr_t>(space->End()),
377                                       [this](mirror::Object* obj)
378            REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
379           ScanObject(obj);
380         });
381       }
382       if (kIsDebugBuild) {
383         // Verify that there are no from-space references that
384         // remain in the space, that is, the remembered set (and the
385         // card table) didn't miss any from-space references in the
386         // space.
387         accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
388         live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
389                                       reinterpret_cast<uintptr_t>(space->End()),
390                                       [this](Object* obj)
391             REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
392           DCHECK(obj != nullptr);
393           VerifyNoFromSpaceReferences(obj);
394         });
395       }
396     }
397   }
398 
399   CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
400   space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
401   if (is_large_object_space_immune_ && los != nullptr) {
402     TimingLogger::ScopedTiming t2("VisitLargeObjects", GetTimings());
403     DCHECK(collect_from_space_only_);
404     // Delay copying the live set to the marked set until here from
405     // BindBitmaps() as the large objects on the allocation stack may
406     // be newly added to the live set above in MarkAllocStackAsLive().
407     los->CopyLiveToMarked();
408 
409     // When the large object space is immune, we need to scan the
410     // large object space as roots as they contain references to their
411     // classes (primitive array classes) that could move though they
412     // don't contain any other references.
413     accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
414     std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
415     large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
416                                         reinterpret_cast<uintptr_t>(range.second),
417                                         [this](mirror::Object* obj)
418         REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
419       ScanObject(obj);
420     });
421   }
422   // Recursively process the mark stack.
423   ProcessMarkStack();
424 }
425 
ReclaimPhase()426 void SemiSpace::ReclaimPhase() {
427   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
428   WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
429   // Reclaim unmarked objects.
430   Sweep(false);
431   // Swap the live and mark bitmaps for each space which we modified space. This is an
432   // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
433   // bitmaps.
434   SwapBitmaps();
435   // Unbind the live and mark bitmaps.
436   GetHeap()->UnBindBitmaps();
437   if (saved_bytes_ > 0) {
438     VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
439   }
440   if (generational_) {
441     // Record the end (top) of the to space so we can distinguish
442     // between objects that were allocated since the last GC and the
443     // older objects.
444     last_gc_to_space_end_ = to_space_->End();
445   }
446 }
447 
ResizeMarkStack(size_t new_size)448 void SemiSpace::ResizeMarkStack(size_t new_size) {
449   std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
450   CHECK_LE(mark_stack_->Size(), new_size);
451   mark_stack_->Resize(new_size);
452   for (auto& obj : temp) {
453     mark_stack_->PushBack(obj.AsMirrorPtr());
454   }
455 }
456 
MarkStackPush(Object * obj)457 inline void SemiSpace::MarkStackPush(Object* obj) {
458   if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
459     ResizeMarkStack(mark_stack_->Capacity() * 2);
460   }
461   // The object must be pushed on to the mark stack.
462   mark_stack_->PushBack(obj);
463 }
464 
CopyAvoidingDirtyingPages(void * dest,const void * src,size_t size)465 static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
466   if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
467     // We will dirty the current page and somewhere in the middle of the next page. This means
468     // that the next object copied will also dirty that page.
469     // TODO: Worth considering the last object copied? We may end up dirtying one page which is
470     // not necessary per GC.
471     memcpy(dest, src, size);
472     return 0;
473   }
474   size_t saved_bytes = 0;
475   uint8_t* byte_dest = reinterpret_cast<uint8_t*>(dest);
476   if (kIsDebugBuild) {
477     for (size_t i = 0; i < size; ++i) {
478       CHECK_EQ(byte_dest[i], 0U);
479     }
480   }
481   // Process the start of the page. The page must already be dirty, don't bother with checking.
482   const uint8_t* byte_src = reinterpret_cast<const uint8_t*>(src);
483   const uint8_t* limit = byte_src + size;
484   size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
485   // Copy the bytes until the start of the next page.
486   memcpy(dest, src, page_remain);
487   byte_src += page_remain;
488   byte_dest += page_remain;
489   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
490   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
491   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
492   while (byte_src + kPageSize < limit) {
493     bool all_zero = true;
494     uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
495     const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src);
496     for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) {
497       // Assumes the destination of the copy is all zeros.
498       if (word_src[i] != 0) {
499         all_zero = false;
500         word_dest[i] = word_src[i];
501       }
502     }
503     if (all_zero) {
504       // Avoided copying into the page since it was all zeros.
505       saved_bytes += kPageSize;
506     }
507     byte_src += kPageSize;
508     byte_dest += kPageSize;
509   }
510   // Handle the part of the page at the end.
511   memcpy(byte_dest, byte_src, limit - byte_src);
512   return saved_bytes;
513 }
514 
MarkNonForwardedObject(mirror::Object * obj)515 mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
516   const size_t object_size = obj->SizeOf();
517   size_t bytes_allocated, dummy;
518   mirror::Object* forward_address = nullptr;
519   if (generational_ && reinterpret_cast<uint8_t*>(obj) < last_gc_to_space_end_) {
520     // If it's allocated before the last GC (older), move
521     // (pseudo-promote) it to the main free list space (as sort
522     // of an old generation.)
523     forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
524                                                            nullptr, &dummy);
525     if (UNLIKELY(forward_address == nullptr)) {
526       // If out of space, fall back to the to-space.
527       forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
528                                                      &dummy);
529       // No logic for marking the bitmap, so it must be null.
530       DCHECK(to_space_live_bitmap_ == nullptr);
531     } else {
532       bytes_promoted_ += bytes_allocated;
533       // Dirty the card at the destionation as it may contain
534       // references (including the class pointer) to the bump pointer
535       // space.
536       WriteBarrier::ForEveryFieldWrite(forward_address);
537       // Handle the bitmaps marking.
538       accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
539       DCHECK(live_bitmap != nullptr);
540       accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
541       DCHECK(mark_bitmap != nullptr);
542       DCHECK(!live_bitmap->Test(forward_address));
543       if (collect_from_space_only_) {
544         // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
545         DCHECK_EQ(live_bitmap, mark_bitmap);
546 
547         // If a bump pointer space only collection, delay the live
548         // bitmap marking of the promoted object until it's popped off
549         // the mark stack (ProcessMarkStack()). The rationale: we may
550         // be in the middle of scanning the objects in the promo
551         // destination space for
552         // non-moving-space-to-bump-pointer-space references by
553         // iterating over the marked bits of the live bitmap
554         // (MarkReachableObjects()). If we don't delay it (and instead
555         // mark the promoted object here), the above promo destination
556         // space scan could encounter the just-promoted object and
557         // forward the references in the promoted object's fields even
558         // through it is pushed onto the mark stack. If this happens,
559         // the promoted object would be in an inconsistent state, that
560         // is, it's on the mark stack (gray) but its fields are
561         // already forwarded (black), which would cause a
562         // DCHECK(!to_space_->HasAddress(obj)) failure below.
563       } else {
564         // Mark forward_address on the live bit map.
565         live_bitmap->Set(forward_address);
566         // Mark forward_address on the mark bit map.
567         DCHECK(!mark_bitmap->Test(forward_address));
568         mark_bitmap->Set(forward_address);
569       }
570     }
571   } else {
572     // If it's allocated after the last GC (younger), copy it to the to-space.
573     forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
574                                                    &dummy);
575     if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
576       to_space_live_bitmap_->Set(forward_address);
577     }
578   }
579   // If it's still null, attempt to use the fallback space.
580   if (UNLIKELY(forward_address == nullptr)) {
581     forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
582                                                          nullptr, &dummy);
583     CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
584     accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
585     if (bitmap != nullptr) {
586       bitmap->Set(forward_address);
587     }
588   }
589   ++objects_moved_;
590   bytes_moved_ += bytes_allocated;
591   // Copy over the object and add it to the mark stack since we still need to update its
592   // references.
593   saved_bytes_ +=
594       CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
595   if (kUseBakerReadBarrier) {
596     obj->AssertReadBarrierState();
597     forward_address->AssertReadBarrierState();
598   }
599   DCHECK(to_space_->HasAddress(forward_address) ||
600          fallback_space_->HasAddress(forward_address) ||
601          (generational_ && promo_dest_space_->HasAddress(forward_address)))
602       << forward_address << "\n" << GetHeap()->DumpSpaces();
603   return forward_address;
604 }
605 
MarkObject(mirror::Object * root)606 mirror::Object* SemiSpace::MarkObject(mirror::Object* root) {
607   auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
608   MarkObjectIfNotInToSpace(&ref);
609   return ref.AsMirrorPtr();
610 }
611 
MarkHeapReference(mirror::HeapReference<mirror::Object> * obj_ptr,bool do_atomic_update ATTRIBUTE_UNUSED)612 void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
613                                   bool do_atomic_update ATTRIBUTE_UNUSED) {
614   MarkObject(obj_ptr);
615 }
616 
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)617 void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
618                            const RootInfo& info ATTRIBUTE_UNUSED) {
619   for (size_t i = 0; i < count; ++i) {
620     auto* root = roots[i];
621     auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
622     // The root can be in the to-space since we may visit the declaring class of an ArtMethod
623     // multiple times if it is on the call stack.
624     MarkObjectIfNotInToSpace(&ref);
625     if (*root != ref.AsMirrorPtr()) {
626       *root = ref.AsMirrorPtr();
627     }
628   }
629 }
630 
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)631 void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
632                            const RootInfo& info ATTRIBUTE_UNUSED) {
633   for (size_t i = 0; i < count; ++i) {
634     MarkObjectIfNotInToSpace(roots[i]);
635   }
636 }
637 
638 // Marks all objects in the root set.
MarkRoots()639 void SemiSpace::MarkRoots() {
640   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
641   Runtime::Current()->VisitRoots(this);
642 }
643 
SweepSystemWeaks()644 void SemiSpace::SweepSystemWeaks() {
645   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
646   Runtime::Current()->SweepSystemWeaks(this);
647 }
648 
ShouldSweepSpace(space::ContinuousSpace * space) const649 bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
650   return space != from_space_ && space != to_space_;
651 }
652 
Sweep(bool swap_bitmaps)653 void SemiSpace::Sweep(bool swap_bitmaps) {
654   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
655   DCHECK(mark_stack_->IsEmpty());
656   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
657     if (space->IsContinuousMemMapAllocSpace()) {
658       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
659       if (!ShouldSweepSpace(alloc_space)) {
660         continue;
661       }
662       TimingLogger::ScopedTiming split(
663           alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
664       RecordFree(alloc_space->Sweep(swap_bitmaps));
665     }
666   }
667   if (!is_large_object_space_immune_) {
668     SweepLargeObjects(swap_bitmaps);
669   }
670 }
671 
SweepLargeObjects(bool swap_bitmaps)672 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
673   DCHECK(!is_large_object_space_immune_);
674   space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
675   if (los != nullptr) {
676     TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
677     RecordFreeLOS(los->Sweep(swap_bitmaps));
678   }
679 }
680 
681 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
682 // marked, put it on the appropriate list in the heap for later processing.
DelayReferenceReferent(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> reference)683 void SemiSpace::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
684                                        ObjPtr<mirror::Reference> reference) {
685   heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
686 }
687 
688 class SemiSpace::MarkObjectVisitor {
689  public:
MarkObjectVisitor(SemiSpace * collector)690   explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {}
691 
operator ()(ObjPtr<Object> obj,MemberOffset offset,bool) const692   void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
693       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
694     // Object was already verified when we scanned it.
695     collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
696   }
697 
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const698   void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
699       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
700     collector_->DelayReferenceReferent(klass, ref);
701   }
702 
703   // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const704   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
705       NO_THREAD_SAFETY_ANALYSIS {
706     if (!root->IsNull()) {
707       VisitRoot(root);
708     }
709   }
710 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const711   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
712       NO_THREAD_SAFETY_ANALYSIS {
713     if (kIsDebugBuild) {
714       Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
715       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
716     }
717     // We may visit the same root multiple times, so avoid marking things in the to-space since
718     // this is not handled by the GC.
719     collector_->MarkObjectIfNotInToSpace(root);
720   }
721 
722  private:
723   SemiSpace* const collector_;
724 };
725 
726 // Visit all of the references of an object and update.
ScanObject(Object * obj)727 void SemiSpace::ScanObject(Object* obj) {
728   DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
729   MarkObjectVisitor visitor(this);
730   // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
731   obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
732       visitor, visitor);
733 }
734 
735 // Scan anything that's on the mark stack.
ProcessMarkStack()736 void SemiSpace::ProcessMarkStack() {
737   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
738   accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
739   const bool collect_from_space_only = collect_from_space_only_;
740   if (collect_from_space_only) {
741     // If a bump pointer space only collection (and the promotion is
742     // enabled,) we delay the live-bitmap marking of promoted objects
743     // from MarkObject() until this function.
744     live_bitmap = promo_dest_space_->GetLiveBitmap();
745     DCHECK(live_bitmap != nullptr);
746     accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
747     DCHECK(mark_bitmap != nullptr);
748     DCHECK_EQ(live_bitmap, mark_bitmap);
749   }
750   while (!mark_stack_->IsEmpty()) {
751     Object* obj = mark_stack_->PopBack();
752     if (collect_from_space_only && promo_dest_space_->HasAddress(obj)) {
753       // obj has just been promoted. Mark the live bitmap for it,
754       // which is delayed from MarkObject().
755       DCHECK(!live_bitmap->Test(obj));
756       live_bitmap->Set(obj);
757     }
758     ScanObject(obj);
759   }
760 }
761 
IsMarked(mirror::Object * obj)762 mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
763   // All immune objects are assumed marked.
764   if (from_space_->HasAddress(obj)) {
765     // Returns either the forwarding address or null.
766     return GetForwardingAddressInFromSpace(obj);
767   } else if (collect_from_space_only_ ||
768              immune_spaces_.IsInImmuneRegion(obj) ||
769              to_space_->HasAddress(obj)) {
770     return obj;  // Already forwarded, must be marked.
771   }
772   return mark_bitmap_->Test(obj) ? obj : nullptr;
773 }
774 
IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object> * object,bool do_atomic_update ATTRIBUTE_UNUSED)775 bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
776                                             // SemiSpace does the GC in a pause. No CAS needed.
777                                             bool do_atomic_update ATTRIBUTE_UNUSED) {
778   mirror::Object* obj = object->AsMirrorPtr();
779   if (obj == nullptr) {
780     return true;
781   }
782   mirror::Object* new_obj = IsMarked(obj);
783   if (new_obj == nullptr) {
784     return false;
785   }
786   if (new_obj != obj) {
787     // Write barrier is not necessary since it still points to the same object, just at a different
788     // address.
789     object->Assign(new_obj);
790   }
791   return true;
792 }
793 
SetToSpace(space::ContinuousMemMapAllocSpace * to_space)794 void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
795   DCHECK(to_space != nullptr);
796   to_space_ = to_space;
797 }
798 
SetFromSpace(space::ContinuousMemMapAllocSpace * from_space)799 void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
800   DCHECK(from_space != nullptr);
801   from_space_ = from_space;
802 }
803 
FinishPhase()804 void SemiSpace::FinishPhase() {
805   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
806   // b/31172841. Temporarily disable the from-space protection with host debug build
807   // due to some protection issue in the build server.
808   if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
809     if (from_space_->IsRosAllocSpace()) {
810       VLOG(heap) << "Protecting from_space_ with PROT_NONE : " << *from_space_;
811       from_space_->GetMemMap()->Protect(PROT_NONE);
812     }
813   }
814   // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
815   // further action is done by the heap.
816   to_space_ = nullptr;
817   from_space_ = nullptr;
818   CHECK(mark_stack_->IsEmpty());
819   mark_stack_->Reset();
820   space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
821   if (generational_) {
822     // Decide whether to do a whole heap collection or a bump pointer
823     // only space collection at the next collection by updating
824     // collect_from_space_only_.
825     if (collect_from_space_only_) {
826       // Disable collect_from_space_only_ if the bytes promoted since the
827       // last whole heap collection or the large object bytes
828       // allocated exceeds a threshold.
829       bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
830       bool bytes_promoted_threshold_exceeded =
831           bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
832       uint64_t current_los_bytes_allocated = los != nullptr ? los->GetBytesAllocated() : 0U;
833       uint64_t last_los_bytes_allocated =
834           large_object_bytes_allocated_at_last_whole_heap_collection_;
835       bool large_object_bytes_threshold_exceeded =
836           current_los_bytes_allocated >=
837           last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
838       if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
839         collect_from_space_only_ = false;
840       }
841     } else {
842       // Reset the counters.
843       bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
844       large_object_bytes_allocated_at_last_whole_heap_collection_ =
845           los != nullptr ? los->GetBytesAllocated() : 0U;
846       collect_from_space_only_ = true;
847     }
848   }
849   // Clear all of the spaces' mark bitmaps.
850   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
851   heap_->ClearMarkedObjects();
852 }
853 
RevokeAllThreadLocalBuffers()854 void SemiSpace::RevokeAllThreadLocalBuffers() {
855   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
856   GetHeap()->RevokeAllThreadLocalBuffers();
857 }
858 
859 }  // namespace collector
860 }  // namespace gc
861 }  // namespace art
862