1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "semi_space-inl.h"
18 
19 #include <climits>
20 #include <functional>
21 #include <numeric>
22 #include <sstream>
23 #include <vector>
24 
25 #include "base/logging.h"  // For VLOG.
26 #include "base/macros.h"
27 #include "base/mutex-inl.h"
28 #include "base/timing_logger.h"
29 #include "gc/accounting/heap_bitmap-inl.h"
30 #include "gc/accounting/mod_union_table.h"
31 #include "gc/accounting/remembered_set.h"
32 #include "gc/accounting/space_bitmap-inl.h"
33 #include "gc/heap.h"
34 #include "gc/reference_processor.h"
35 #include "gc/space/bump_pointer_space-inl.h"
36 #include "gc/space/bump_pointer_space.h"
37 #include "gc/space/image_space.h"
38 #include "gc/space/large_object_space.h"
39 #include "gc/space/space-inl.h"
40 #include "indirect_reference_table.h"
41 #include "intern_table.h"
42 #include "jni/jni_internal.h"
43 #include "mark_sweep-inl.h"
44 #include "mirror/object-inl.h"
45 #include "mirror/object-refvisitor-inl.h"
46 #include "mirror/reference-inl.h"
47 #include "monitor.h"
48 #include "runtime.h"
49 #include "thread-inl.h"
50 #include "thread_list.h"
51 #include "write_barrier-inl.h"
52 
53 using ::art::mirror::Object;
54 
55 namespace art {
56 namespace gc {
57 namespace collector {
58 
59 static constexpr bool kProtectFromSpace = true;
60 static constexpr bool kStoreStackTraces = false;
61 
BindBitmaps()62 void SemiSpace::BindBitmaps() {
63   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
64   WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
65   // Mark all of the spaces we never collect as immune.
66   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
67     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
68         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
69       immune_spaces_.AddSpace(space);
70     } else if (space->GetLiveBitmap() != nullptr) {
71       // TODO: We can probably also add this space to the immune region.
72       if (space == to_space_) {
73         CHECK(space->IsContinuousMemMapAllocSpace());
74         space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
75       }
76     }
77   }
78 }
79 
SemiSpace(Heap * heap,const std::string & name_prefix)80 SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
81     : GarbageCollector(heap,
82                        name_prefix + (name_prefix.empty() ? "" : " ") + "semispace"),
83       mark_stack_(nullptr),
84       to_space_(nullptr),
85       to_space_live_bitmap_(nullptr),
86       from_space_(nullptr),
87       mark_bitmap_(nullptr),
88       self_(nullptr),
89       fallback_space_(nullptr),
90       bytes_moved_(0U),
91       objects_moved_(0U),
92       saved_bytes_(0U),
93       collector_name_(name_),
94       swap_semi_spaces_(true) {
95 }
96 
RunPhases()97 void SemiSpace::RunPhases() {
98   Thread* self = Thread::Current();
99   InitializePhase();
100   // Semi-space collector is special since it is sometimes called with the mutators suspended
101   // during the zygote creation and collector transitions. If we already exclusively hold the
102   // mutator lock, then we can't lock it again since it will cause a deadlock.
103   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
104     GetHeap()->PreGcVerificationPaused(this);
105     GetHeap()->PrePauseRosAllocVerification(this);
106     MarkingPhase();
107     ReclaimPhase();
108     GetHeap()->PostGcVerificationPaused(this);
109   } else {
110     Locks::mutator_lock_->AssertNotHeld(self);
111     {
112       ScopedPause pause(this);
113       GetHeap()->PreGcVerificationPaused(this);
114       GetHeap()->PrePauseRosAllocVerification(this);
115       MarkingPhase();
116     }
117     {
118       ReaderMutexLock mu(self, *Locks::mutator_lock_);
119       ReclaimPhase();
120     }
121     GetHeap()->PostGcVerification(this);
122   }
123   FinishPhase();
124 }
125 
InitializePhase()126 void SemiSpace::InitializePhase() {
127   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
128   mark_stack_ = heap_->GetMarkStack();
129   DCHECK(mark_stack_ != nullptr);
130   immune_spaces_.Reset();
131   saved_bytes_ = 0;
132   bytes_moved_ = 0;
133   objects_moved_ = 0;
134   self_ = Thread::Current();
135   CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
136   // Set the initial bitmap.
137   to_space_live_bitmap_ = to_space_->GetLiveBitmap();
138   {
139     // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
140     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
141     mark_bitmap_ = heap_->GetMarkBitmap();
142   }
143   fallback_space_ = GetHeap()->GetNonMovingSpace();
144 }
145 
ProcessReferences(Thread * self)146 void SemiSpace::ProcessReferences(Thread* self) {
147   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
148   GetHeap()->GetReferenceProcessor()->ProcessReferences(
149       false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
150 }
151 
MarkingPhase()152 void SemiSpace::MarkingPhase() {
153   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
154   CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
155   if (kStoreStackTraces) {
156     Locks::mutator_lock_->AssertExclusiveHeld(self_);
157     // Store the stack traces into the runtime fault string in case we Get a heap corruption
158     // related crash later.
159     ThreadState old_state = self_->SetStateUnsafe(kRunnable);
160     std::ostringstream oss;
161     Runtime* runtime = Runtime::Current();
162     runtime->GetThreadList()->DumpForSigQuit(oss);
163     runtime->GetThreadList()->DumpNativeStacks(oss);
164     runtime->SetFaultMessage(oss.str());
165     CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
166   }
167   // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps
168   // to prevent fragmentation.
169   RevokeAllThreadLocalBuffers();
170 
171   // Always clear soft references.
172   GetCurrentIteration()->SetClearSoftReferences(true);
173   Locks::mutator_lock_->AssertExclusiveHeld(self_);
174   // Assume the cleared space is already empty.
175   BindBitmaps();
176   // Process dirty cards and add dirty cards to mod-union tables.
177   heap_->ProcessCards(GetTimings(), /*use_rem_sets=*/false, false, true);
178   // Clear the whole card table since we cannot get any additional dirty cards during the
179   // paused GC. This saves memory but only works for pause the world collectors.
180   t.NewTiming("ClearCardTable");
181   heap_->GetCardTable()->ClearCardTable();
182   // Need to do this before the checkpoint since we don't want any threads to add references to
183   // the live stack during the recursive mark.
184   if (kUseThreadLocalAllocationStack) {
185     TimingLogger::ScopedTiming t2("RevokeAllThreadLocalAllocationStacks", GetTimings());
186     heap_->RevokeAllThreadLocalAllocationStacks(self_);
187   }
188   heap_->SwapStacks();
189   {
190     WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
191     MarkRoots();
192     // Recursively mark remaining objects.
193     MarkReachableObjects();
194   }
195   ProcessReferences(self_);
196   {
197     ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
198     SweepSystemWeaks();
199   }
200   Runtime::Current()->BroadcastForNewSystemWeaks();
201   Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
202   // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
203   // before they are properly counted.
204   RevokeAllThreadLocalBuffers();
205   GetHeap()->RecordFreeRevoke();  // This is for the non-moving rosalloc space.
206   // Record freed memory.
207   const int64_t from_bytes = from_space_->GetBytesAllocated();
208   const int64_t to_bytes = bytes_moved_;
209   const uint64_t from_objects = from_space_->GetObjectsAllocated();
210   const uint64_t to_objects = objects_moved_;
211   CHECK_LE(to_objects, from_objects);
212   // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
213   // space.
214   RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
215   // Clear and protect the from space.
216   from_space_->Clear();
217   // b/31172841. Temporarily disable the from-space protection with host debug build
218   // due to some protection issue in the build server.
219   if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
220     if (!from_space_->IsRosAllocSpace()) {
221       // Protect with PROT_NONE.
222       VLOG(heap) << "Protecting from_space_ : " << *from_space_;
223       from_space_->GetMemMap()->Protect(PROT_NONE);
224     } else {
225       // If RosAllocSpace, we'll leave it as PROT_READ here so the
226       // rosaloc verification can read the metadata magic number and
227       // protect it with PROT_NONE later in FinishPhase().
228       VLOG(heap) << "Protecting from_space_ with PROT_READ : " << *from_space_;
229       from_space_->GetMemMap()->Protect(PROT_READ);
230     }
231   }
232   heap_->PreSweepingGcVerification(this);
233   if (swap_semi_spaces_) {
234     heap_->SwapSemiSpaces();
235   }
236 }
237 
238 // Used to verify that there's no references to the from-space.
239 class SemiSpace::VerifyNoFromSpaceReferencesVisitor {
240  public:
VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace * from_space)241   explicit VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space)
242       : from_space_(from_space) {}
243 
operator ()(Object * obj,MemberOffset offset,bool) const244   void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
245       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
246     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
247     if (from_space_->HasAddress(ref)) {
248       LOG(FATAL) << ref << " found in from space";
249     }
250   }
251 
252   // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const253   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
254       NO_THREAD_SAFETY_ANALYSIS {
255     if (!root->IsNull()) {
256       VisitRoot(root);
257     }
258   }
259 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const260   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
261       NO_THREAD_SAFETY_ANALYSIS {
262     if (kIsDebugBuild) {
263       Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
264       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
265     }
266     CHECK(!from_space_->HasAddress(root->AsMirrorPtr()));
267   }
268 
269  private:
270   space::ContinuousMemMapAllocSpace* const from_space_;
271 };
272 
VerifyNoFromSpaceReferences(Object * obj)273 void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
274   DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
275   VerifyNoFromSpaceReferencesVisitor visitor(from_space_);
276   obj->VisitReferences(visitor, VoidFunctor());
277 }
278 
MarkReachableObjects()279 void SemiSpace::MarkReachableObjects() {
280   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
281   {
282     TimingLogger::ScopedTiming t2("MarkStackAsLive", GetTimings());
283     accounting::ObjectStack* live_stack = heap_->GetLiveStack();
284     heap_->MarkAllocStackAsLive(live_stack);
285     live_stack->Reset();
286   }
287   for (auto& space : heap_->GetContinuousSpaces()) {
288     // If the space is immune then we need to mark the references to other spaces.
289     accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
290     if (table != nullptr) {
291       // TODO: Improve naming.
292       TimingLogger::ScopedTiming t2(
293           space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
294                                    "UpdateAndMarkImageModUnionTable",
295                                    GetTimings());
296       table->UpdateAndMarkReferences(this);
297       DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
298     } else if (space->IsImageSpace() && space->GetLiveBitmap() != nullptr) {
299       // If the space has no mod union table (the non-moving space, app image spaces, main spaces
300       // when the bump pointer space only collection is enabled,) then we need to scan its live
301       // bitmap or dirty cards as roots (including the objects on the live stack which have just
302       // marked in the live bitmap above in MarkAllocStackAsLive().)
303       accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
304       if (!space->IsImageSpace()) {
305         DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
306             << "Space " << space->GetName();
307         // App images currently do not have remembered sets.
308       } else {
309         DCHECK(rem_set == nullptr);
310       }
311       if (rem_set != nullptr) {
312         TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
313         rem_set->UpdateAndMarkReferences(from_space_, this);
314       } else {
315         TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
316         accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
317         live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
318                                       reinterpret_cast<uintptr_t>(space->End()),
319                                       [this](mirror::Object* obj)
320            REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
321           ScanObject(obj);
322         });
323       }
324       if (kIsDebugBuild) {
325         // Verify that there are no from-space references that
326         // remain in the space, that is, the remembered set (and the
327         // card table) didn't miss any from-space references in the
328         // space.
329         accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
330         live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
331                                       reinterpret_cast<uintptr_t>(space->End()),
332                                       [this](Object* obj)
333             REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
334           DCHECK(obj != nullptr);
335           VerifyNoFromSpaceReferences(obj);
336         });
337       }
338     }
339   }
340   // Recursively process the mark stack.
341   ProcessMarkStack();
342 }
343 
ReclaimPhase()344 void SemiSpace::ReclaimPhase() {
345   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
346   WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
347   // Reclaim unmarked objects.
348   Sweep(false);
349   // Swap the live and mark bitmaps for each space which we modified space. This is an
350   // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
351   // bitmaps.
352   SwapBitmaps();
353   // Unbind the live and mark bitmaps.
354   GetHeap()->UnBindBitmaps();
355   if (saved_bytes_ > 0) {
356     VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
357   }
358 }
359 
ResizeMarkStack(size_t new_size)360 void SemiSpace::ResizeMarkStack(size_t new_size) {
361   std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
362   CHECK_LE(mark_stack_->Size(), new_size);
363   mark_stack_->Resize(new_size);
364   for (auto& obj : temp) {
365     mark_stack_->PushBack(obj.AsMirrorPtr());
366   }
367 }
368 
MarkStackPush(Object * obj)369 inline void SemiSpace::MarkStackPush(Object* obj) {
370   if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
371     ResizeMarkStack(mark_stack_->Capacity() * 2);
372   }
373   // The object must be pushed on to the mark stack.
374   mark_stack_->PushBack(obj);
375 }
376 
CopyAvoidingDirtyingPages(void * dest,const void * src,size_t size)377 static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
378   if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
379     // We will dirty the current page and somewhere in the middle of the next page. This means
380     // that the next object copied will also dirty that page.
381     // TODO: Worth considering the last object copied? We may end up dirtying one page which is
382     // not necessary per GC.
383     memcpy(dest, src, size);
384     return 0;
385   }
386   size_t saved_bytes = 0;
387   uint8_t* byte_dest = reinterpret_cast<uint8_t*>(dest);
388   if (kIsDebugBuild) {
389     for (size_t i = 0; i < size; ++i) {
390       CHECK_EQ(byte_dest[i], 0U);
391     }
392   }
393   // Process the start of the page. The page must already be dirty, don't bother with checking.
394   const uint8_t* byte_src = reinterpret_cast<const uint8_t*>(src);
395   const uint8_t* limit = byte_src + size;
396   size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
397   // Copy the bytes until the start of the next page.
398   memcpy(dest, src, page_remain);
399   byte_src += page_remain;
400   byte_dest += page_remain;
401   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
402   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
403   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
404   while (byte_src + kPageSize < limit) {
405     bool all_zero = true;
406     uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
407     const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src);
408     for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) {
409       // Assumes the destination of the copy is all zeros.
410       if (word_src[i] != 0) {
411         all_zero = false;
412         word_dest[i] = word_src[i];
413       }
414     }
415     if (all_zero) {
416       // Avoided copying into the page since it was all zeros.
417       saved_bytes += kPageSize;
418     }
419     byte_src += kPageSize;
420     byte_dest += kPageSize;
421   }
422   // Handle the part of the page at the end.
423   memcpy(byte_dest, byte_src, limit - byte_src);
424   return saved_bytes;
425 }
426 
MarkNonForwardedObject(mirror::Object * obj)427 mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
428   const size_t object_size = obj->SizeOf();
429   size_t bytes_allocated, dummy;
430   // Copy it to the to-space.
431   mirror::Object* forward_address = to_space_->AllocThreadUnsafe(self_,
432                                                                  object_size,
433                                                                  &bytes_allocated,
434                                                                  nullptr,
435                                                                  &dummy);
436 
437   if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
438     to_space_live_bitmap_->Set(forward_address);
439   }
440   // If it's still null, attempt to use the fallback space.
441   if (UNLIKELY(forward_address == nullptr)) {
442     forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
443                                                          nullptr, &dummy);
444     CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
445     accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
446     if (bitmap != nullptr) {
447       bitmap->Set(forward_address);
448     }
449   }
450   ++objects_moved_;
451   bytes_moved_ += bytes_allocated;
452   // Copy over the object and add it to the mark stack since we still need to update its
453   // references.
454   saved_bytes_ +=
455       CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
456   if (kUseBakerReadBarrier) {
457     obj->AssertReadBarrierState();
458     forward_address->AssertReadBarrierState();
459   }
460   DCHECK(to_space_->HasAddress(forward_address) || fallback_space_->HasAddress(forward_address))
461       << forward_address << "\n" << GetHeap()->DumpSpaces();
462   return forward_address;
463 }
464 
MarkObject(mirror::Object * root)465 mirror::Object* SemiSpace::MarkObject(mirror::Object* root) {
466   auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
467   MarkObjectIfNotInToSpace(&ref);
468   return ref.AsMirrorPtr();
469 }
470 
MarkHeapReference(mirror::HeapReference<mirror::Object> * obj_ptr,bool do_atomic_update ATTRIBUTE_UNUSED)471 void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
472                                   bool do_atomic_update ATTRIBUTE_UNUSED) {
473   MarkObject(obj_ptr);
474 }
475 
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)476 void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
477                            const RootInfo& info ATTRIBUTE_UNUSED) {
478   for (size_t i = 0; i < count; ++i) {
479     auto* root = roots[i];
480     auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
481     // The root can be in the to-space since we may visit the declaring class of an ArtMethod
482     // multiple times if it is on the call stack.
483     MarkObjectIfNotInToSpace(&ref);
484     if (*root != ref.AsMirrorPtr()) {
485       *root = ref.AsMirrorPtr();
486     }
487   }
488 }
489 
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)490 void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
491                            const RootInfo& info ATTRIBUTE_UNUSED) {
492   for (size_t i = 0; i < count; ++i) {
493     MarkObjectIfNotInToSpace(roots[i]);
494   }
495 }
496 
497 // Marks all objects in the root set.
MarkRoots()498 void SemiSpace::MarkRoots() {
499   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
500   Runtime::Current()->VisitRoots(this);
501 }
502 
SweepSystemWeaks()503 void SemiSpace::SweepSystemWeaks() {
504   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
505   Runtime::Current()->SweepSystemWeaks(this);
506 }
507 
ShouldSweepSpace(space::ContinuousSpace * space) const508 bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
509   return space != from_space_ && space != to_space_;
510 }
511 
Sweep(bool swap_bitmaps)512 void SemiSpace::Sweep(bool swap_bitmaps) {
513   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
514   DCHECK(mark_stack_->IsEmpty());
515   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
516     if (space->IsContinuousMemMapAllocSpace()) {
517       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
518       if (!ShouldSweepSpace(alloc_space)) {
519         continue;
520       }
521       TimingLogger::ScopedTiming split(
522           alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
523       RecordFree(alloc_space->Sweep(swap_bitmaps));
524     }
525   }
526   SweepLargeObjects(swap_bitmaps);
527 }
528 
SweepLargeObjects(bool swap_bitmaps)529 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
530   space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
531   if (los != nullptr) {
532     TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
533     RecordFreeLOS(los->Sweep(swap_bitmaps));
534   }
535 }
536 
537 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
538 // marked, put it on the appropriate list in the heap for later processing.
DelayReferenceReferent(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> reference)539 void SemiSpace::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
540                                        ObjPtr<mirror::Reference> reference) {
541   heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
542 }
543 
544 class SemiSpace::MarkObjectVisitor {
545  public:
MarkObjectVisitor(SemiSpace * collector)546   explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {}
547 
operator ()(ObjPtr<Object> obj,MemberOffset offset,bool) const548   void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
549       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
550     // Object was already verified when we scanned it.
551     collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
552   }
553 
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const554   void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
555       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
556     collector_->DelayReferenceReferent(klass, ref);
557   }
558 
559   // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const560   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
561       NO_THREAD_SAFETY_ANALYSIS {
562     if (!root->IsNull()) {
563       VisitRoot(root);
564     }
565   }
566 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const567   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
568       NO_THREAD_SAFETY_ANALYSIS {
569     if (kIsDebugBuild) {
570       Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
571       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
572     }
573     // We may visit the same root multiple times, so avoid marking things in the to-space since
574     // this is not handled by the GC.
575     collector_->MarkObjectIfNotInToSpace(root);
576   }
577 
578  private:
579   SemiSpace* const collector_;
580 };
581 
582 // Visit all of the references of an object and update.
ScanObject(Object * obj)583 void SemiSpace::ScanObject(Object* obj) {
584   DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
585   MarkObjectVisitor visitor(this);
586   // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
587   obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
588       visitor, visitor);
589 }
590 
591 // Scan anything that's on the mark stack.
ProcessMarkStack()592 void SemiSpace::ProcessMarkStack() {
593   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
594   while (!mark_stack_->IsEmpty()) {
595     Object* obj = mark_stack_->PopBack();
596     ScanObject(obj);
597   }
598 }
599 
IsMarked(mirror::Object * obj)600 mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
601   // All immune objects are assumed marked.
602   if (from_space_->HasAddress(obj)) {
603     // Returns either the forwarding address or null.
604     return GetForwardingAddressInFromSpace(obj);
605   } else if (immune_spaces_.IsInImmuneRegion(obj) || to_space_->HasAddress(obj)) {
606     return obj;  // Already forwarded, must be marked.
607   }
608   return mark_bitmap_->Test(obj) ? obj : nullptr;
609 }
610 
IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object> * object,bool do_atomic_update ATTRIBUTE_UNUSED)611 bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
612                                             // SemiSpace does the GC in a pause. No CAS needed.
613                                             bool do_atomic_update ATTRIBUTE_UNUSED) {
614   mirror::Object* obj = object->AsMirrorPtr();
615   if (obj == nullptr) {
616     return true;
617   }
618   mirror::Object* new_obj = IsMarked(obj);
619   if (new_obj == nullptr) {
620     return false;
621   }
622   if (new_obj != obj) {
623     // Write barrier is not necessary since it still points to the same object, just at a different
624     // address.
625     object->Assign(new_obj);
626   }
627   return true;
628 }
629 
SetToSpace(space::ContinuousMemMapAllocSpace * to_space)630 void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
631   DCHECK(to_space != nullptr);
632   to_space_ = to_space;
633 }
634 
SetFromSpace(space::ContinuousMemMapAllocSpace * from_space)635 void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
636   DCHECK(from_space != nullptr);
637   from_space_ = from_space;
638 }
639 
FinishPhase()640 void SemiSpace::FinishPhase() {
641   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
642   // b/31172841. Temporarily disable the from-space protection with host debug build
643   // due to some protection issue in the build server.
644   if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
645     if (from_space_->IsRosAllocSpace()) {
646       VLOG(heap) << "Protecting from_space_ with PROT_NONE : " << *from_space_;
647       from_space_->GetMemMap()->Protect(PROT_NONE);
648     }
649   }
650   // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
651   // further action is done by the heap.
652   to_space_ = nullptr;
653   from_space_ = nullptr;
654   CHECK(mark_stack_->IsEmpty());
655   mark_stack_->Reset();
656   // Clear all of the spaces' mark bitmaps.
657   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
658   heap_->ClearMarkedObjects();
659 }
660 
RevokeAllThreadLocalBuffers()661 void SemiSpace::RevokeAllThreadLocalBuffers() {
662   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
663   GetHeap()->RevokeAllThreadLocalBuffers();
664 }
665 
666 }  // namespace collector
667 }  // namespace gc
668 }  // namespace art
669