1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "semi_space-inl.h"
18
19 #include <climits>
20 #include <functional>
21 #include <numeric>
22 #include <sstream>
23 #include <vector>
24
25 #include "base/logging.h" // For VLOG.
26 #include "base/macros.h"
27 #include "base/mutex-inl.h"
28 #include "base/timing_logger.h"
29 #include "gc/accounting/heap_bitmap-inl.h"
30 #include "gc/accounting/mod_union_table.h"
31 #include "gc/accounting/remembered_set.h"
32 #include "gc/accounting/space_bitmap-inl.h"
33 #include "gc/heap.h"
34 #include "gc/reference_processor.h"
35 #include "gc/space/bump_pointer_space-inl.h"
36 #include "gc/space/bump_pointer_space.h"
37 #include "gc/space/image_space.h"
38 #include "gc/space/large_object_space.h"
39 #include "gc/space/space-inl.h"
40 #include "indirect_reference_table.h"
41 #include "intern_table.h"
42 #include "jni/jni_internal.h"
43 #include "mark_sweep-inl.h"
44 #include "mirror/object-inl.h"
45 #include "mirror/object-refvisitor-inl.h"
46 #include "mirror/reference-inl.h"
47 #include "monitor.h"
48 #include "runtime.h"
49 #include "thread-inl.h"
50 #include "thread_list.h"
51 #include "write_barrier-inl.h"
52
53 using ::art::mirror::Object;
54
55 namespace art {
56 namespace gc {
57 namespace collector {
58
59 static constexpr bool kProtectFromSpace = true;
60 static constexpr bool kStoreStackTraces = false;
61
BindBitmaps()62 void SemiSpace::BindBitmaps() {
63 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
64 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
65 // Mark all of the spaces we never collect as immune.
66 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
67 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
68 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
69 immune_spaces_.AddSpace(space);
70 } else if (space->GetLiveBitmap() != nullptr) {
71 // TODO: We can probably also add this space to the immune region.
72 if (space == to_space_) {
73 CHECK(space->IsContinuousMemMapAllocSpace());
74 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
75 }
76 }
77 }
78 }
79
SemiSpace(Heap * heap,const std::string & name_prefix)80 SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
81 : GarbageCollector(heap,
82 name_prefix + (name_prefix.empty() ? "" : " ") + "semispace"),
83 mark_stack_(nullptr),
84 to_space_(nullptr),
85 to_space_live_bitmap_(nullptr),
86 from_space_(nullptr),
87 mark_bitmap_(nullptr),
88 self_(nullptr),
89 fallback_space_(nullptr),
90 bytes_moved_(0U),
91 objects_moved_(0U),
92 saved_bytes_(0U),
93 collector_name_(name_),
94 swap_semi_spaces_(true) {
95 }
96
RunPhases()97 void SemiSpace::RunPhases() {
98 Thread* self = Thread::Current();
99 InitializePhase();
100 // Semi-space collector is special since it is sometimes called with the mutators suspended
101 // during the zygote creation and collector transitions. If we already exclusively hold the
102 // mutator lock, then we can't lock it again since it will cause a deadlock.
103 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
104 GetHeap()->PreGcVerificationPaused(this);
105 GetHeap()->PrePauseRosAllocVerification(this);
106 MarkingPhase();
107 ReclaimPhase();
108 GetHeap()->PostGcVerificationPaused(this);
109 } else {
110 Locks::mutator_lock_->AssertNotHeld(self);
111 {
112 ScopedPause pause(this);
113 GetHeap()->PreGcVerificationPaused(this);
114 GetHeap()->PrePauseRosAllocVerification(this);
115 MarkingPhase();
116 }
117 {
118 ReaderMutexLock mu(self, *Locks::mutator_lock_);
119 ReclaimPhase();
120 }
121 GetHeap()->PostGcVerification(this);
122 }
123 FinishPhase();
124 }
125
InitializePhase()126 void SemiSpace::InitializePhase() {
127 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
128 mark_stack_ = heap_->GetMarkStack();
129 DCHECK(mark_stack_ != nullptr);
130 immune_spaces_.Reset();
131 saved_bytes_ = 0;
132 bytes_moved_ = 0;
133 objects_moved_ = 0;
134 self_ = Thread::Current();
135 CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
136 // Set the initial bitmap.
137 to_space_live_bitmap_ = to_space_->GetLiveBitmap();
138 {
139 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
140 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
141 mark_bitmap_ = heap_->GetMarkBitmap();
142 }
143 fallback_space_ = GetHeap()->GetNonMovingSpace();
144 }
145
ProcessReferences(Thread * self)146 void SemiSpace::ProcessReferences(Thread* self) {
147 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
148 GetHeap()->GetReferenceProcessor()->ProcessReferences(
149 false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
150 }
151
MarkingPhase()152 void SemiSpace::MarkingPhase() {
153 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
154 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
155 if (kStoreStackTraces) {
156 Locks::mutator_lock_->AssertExclusiveHeld(self_);
157 // Store the stack traces into the runtime fault string in case we Get a heap corruption
158 // related crash later.
159 ThreadState old_state = self_->SetStateUnsafe(kRunnable);
160 std::ostringstream oss;
161 Runtime* runtime = Runtime::Current();
162 runtime->GetThreadList()->DumpForSigQuit(oss);
163 runtime->GetThreadList()->DumpNativeStacks(oss);
164 runtime->SetFaultMessage(oss.str());
165 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
166 }
167 // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps
168 // to prevent fragmentation.
169 RevokeAllThreadLocalBuffers();
170
171 // Always clear soft references.
172 GetCurrentIteration()->SetClearSoftReferences(true);
173 Locks::mutator_lock_->AssertExclusiveHeld(self_);
174 // Assume the cleared space is already empty.
175 BindBitmaps();
176 // Process dirty cards and add dirty cards to mod-union tables.
177 heap_->ProcessCards(GetTimings(), /*use_rem_sets=*/false, false, true);
178 // Clear the whole card table since we cannot get any additional dirty cards during the
179 // paused GC. This saves memory but only works for pause the world collectors.
180 t.NewTiming("ClearCardTable");
181 heap_->GetCardTable()->ClearCardTable();
182 // Need to do this before the checkpoint since we don't want any threads to add references to
183 // the live stack during the recursive mark.
184 if (kUseThreadLocalAllocationStack) {
185 TimingLogger::ScopedTiming t2("RevokeAllThreadLocalAllocationStacks", GetTimings());
186 heap_->RevokeAllThreadLocalAllocationStacks(self_);
187 }
188 heap_->SwapStacks();
189 {
190 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
191 MarkRoots();
192 // Recursively mark remaining objects.
193 MarkReachableObjects();
194 }
195 ProcessReferences(self_);
196 {
197 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
198 SweepSystemWeaks();
199 }
200 Runtime::Current()->BroadcastForNewSystemWeaks();
201 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
202 // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
203 // before they are properly counted.
204 RevokeAllThreadLocalBuffers();
205 GetHeap()->RecordFreeRevoke(); // This is for the non-moving rosalloc space.
206 // Record freed memory.
207 const int64_t from_bytes = from_space_->GetBytesAllocated();
208 const int64_t to_bytes = bytes_moved_;
209 const uint64_t from_objects = from_space_->GetObjectsAllocated();
210 const uint64_t to_objects = objects_moved_;
211 CHECK_LE(to_objects, from_objects);
212 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
213 // space.
214 RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
215 // Clear and protect the from space.
216 from_space_->Clear();
217 // b/31172841. Temporarily disable the from-space protection with host debug build
218 // due to some protection issue in the build server.
219 if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
220 if (!from_space_->IsRosAllocSpace()) {
221 // Protect with PROT_NONE.
222 VLOG(heap) << "Protecting from_space_ : " << *from_space_;
223 from_space_->GetMemMap()->Protect(PROT_NONE);
224 } else {
225 // If RosAllocSpace, we'll leave it as PROT_READ here so the
226 // rosaloc verification can read the metadata magic number and
227 // protect it with PROT_NONE later in FinishPhase().
228 VLOG(heap) << "Protecting from_space_ with PROT_READ : " << *from_space_;
229 from_space_->GetMemMap()->Protect(PROT_READ);
230 }
231 }
232 heap_->PreSweepingGcVerification(this);
233 if (swap_semi_spaces_) {
234 heap_->SwapSemiSpaces();
235 }
236 }
237
238 // Used to verify that there's no references to the from-space.
239 class SemiSpace::VerifyNoFromSpaceReferencesVisitor {
240 public:
VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace * from_space)241 explicit VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space)
242 : from_space_(from_space) {}
243
operator ()(Object * obj,MemberOffset offset,bool) const244 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
245 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
246 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
247 if (from_space_->HasAddress(ref)) {
248 LOG(FATAL) << ref << " found in from space";
249 }
250 }
251
252 // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const253 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
254 NO_THREAD_SAFETY_ANALYSIS {
255 if (!root->IsNull()) {
256 VisitRoot(root);
257 }
258 }
259
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const260 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
261 NO_THREAD_SAFETY_ANALYSIS {
262 if (kIsDebugBuild) {
263 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
264 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
265 }
266 CHECK(!from_space_->HasAddress(root->AsMirrorPtr()));
267 }
268
269 private:
270 space::ContinuousMemMapAllocSpace* const from_space_;
271 };
272
VerifyNoFromSpaceReferences(Object * obj)273 void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
274 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
275 VerifyNoFromSpaceReferencesVisitor visitor(from_space_);
276 obj->VisitReferences(visitor, VoidFunctor());
277 }
278
MarkReachableObjects()279 void SemiSpace::MarkReachableObjects() {
280 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
281 {
282 TimingLogger::ScopedTiming t2("MarkStackAsLive", GetTimings());
283 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
284 heap_->MarkAllocStackAsLive(live_stack);
285 live_stack->Reset();
286 }
287 for (auto& space : heap_->GetContinuousSpaces()) {
288 // If the space is immune then we need to mark the references to other spaces.
289 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
290 if (table != nullptr) {
291 // TODO: Improve naming.
292 TimingLogger::ScopedTiming t2(
293 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
294 "UpdateAndMarkImageModUnionTable",
295 GetTimings());
296 table->UpdateAndMarkReferences(this);
297 DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
298 } else if (space->IsImageSpace() && space->GetLiveBitmap() != nullptr) {
299 // If the space has no mod union table (the non-moving space, app image spaces, main spaces
300 // when the bump pointer space only collection is enabled,) then we need to scan its live
301 // bitmap or dirty cards as roots (including the objects on the live stack which have just
302 // marked in the live bitmap above in MarkAllocStackAsLive().)
303 accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
304 if (!space->IsImageSpace()) {
305 DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
306 << "Space " << space->GetName();
307 // App images currently do not have remembered sets.
308 } else {
309 DCHECK(rem_set == nullptr);
310 }
311 if (rem_set != nullptr) {
312 TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
313 rem_set->UpdateAndMarkReferences(from_space_, this);
314 } else {
315 TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
316 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
317 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
318 reinterpret_cast<uintptr_t>(space->End()),
319 [this](mirror::Object* obj)
320 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
321 ScanObject(obj);
322 });
323 }
324 if (kIsDebugBuild) {
325 // Verify that there are no from-space references that
326 // remain in the space, that is, the remembered set (and the
327 // card table) didn't miss any from-space references in the
328 // space.
329 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
330 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
331 reinterpret_cast<uintptr_t>(space->End()),
332 [this](Object* obj)
333 REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
334 DCHECK(obj != nullptr);
335 VerifyNoFromSpaceReferences(obj);
336 });
337 }
338 }
339 }
340 // Recursively process the mark stack.
341 ProcessMarkStack();
342 }
343
ReclaimPhase()344 void SemiSpace::ReclaimPhase() {
345 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
346 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
347 // Reclaim unmarked objects.
348 Sweep(false);
349 // Swap the live and mark bitmaps for each space which we modified space. This is an
350 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
351 // bitmaps.
352 SwapBitmaps();
353 // Unbind the live and mark bitmaps.
354 GetHeap()->UnBindBitmaps();
355 if (saved_bytes_ > 0) {
356 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
357 }
358 }
359
ResizeMarkStack(size_t new_size)360 void SemiSpace::ResizeMarkStack(size_t new_size) {
361 std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
362 CHECK_LE(mark_stack_->Size(), new_size);
363 mark_stack_->Resize(new_size);
364 for (auto& obj : temp) {
365 mark_stack_->PushBack(obj.AsMirrorPtr());
366 }
367 }
368
MarkStackPush(Object * obj)369 inline void SemiSpace::MarkStackPush(Object* obj) {
370 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
371 ResizeMarkStack(mark_stack_->Capacity() * 2);
372 }
373 // The object must be pushed on to the mark stack.
374 mark_stack_->PushBack(obj);
375 }
376
CopyAvoidingDirtyingPages(void * dest,const void * src,size_t size)377 static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
378 if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
379 // We will dirty the current page and somewhere in the middle of the next page. This means
380 // that the next object copied will also dirty that page.
381 // TODO: Worth considering the last object copied? We may end up dirtying one page which is
382 // not necessary per GC.
383 memcpy(dest, src, size);
384 return 0;
385 }
386 size_t saved_bytes = 0;
387 uint8_t* byte_dest = reinterpret_cast<uint8_t*>(dest);
388 if (kIsDebugBuild) {
389 for (size_t i = 0; i < size; ++i) {
390 CHECK_EQ(byte_dest[i], 0U);
391 }
392 }
393 // Process the start of the page. The page must already be dirty, don't bother with checking.
394 const uint8_t* byte_src = reinterpret_cast<const uint8_t*>(src);
395 const uint8_t* limit = byte_src + size;
396 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
397 // Copy the bytes until the start of the next page.
398 memcpy(dest, src, page_remain);
399 byte_src += page_remain;
400 byte_dest += page_remain;
401 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
402 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
403 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
404 while (byte_src + kPageSize < limit) {
405 bool all_zero = true;
406 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
407 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src);
408 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) {
409 // Assumes the destination of the copy is all zeros.
410 if (word_src[i] != 0) {
411 all_zero = false;
412 word_dest[i] = word_src[i];
413 }
414 }
415 if (all_zero) {
416 // Avoided copying into the page since it was all zeros.
417 saved_bytes += kPageSize;
418 }
419 byte_src += kPageSize;
420 byte_dest += kPageSize;
421 }
422 // Handle the part of the page at the end.
423 memcpy(byte_dest, byte_src, limit - byte_src);
424 return saved_bytes;
425 }
426
MarkNonForwardedObject(mirror::Object * obj)427 mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
428 const size_t object_size = obj->SizeOf();
429 size_t bytes_allocated, unused_bytes_tl_bulk_allocated;
430 // Copy it to the to-space.
431 mirror::Object* forward_address = to_space_->AllocThreadUnsafe(
432 self_, object_size, &bytes_allocated, nullptr, &unused_bytes_tl_bulk_allocated);
433
434 if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
435 to_space_live_bitmap_->Set(forward_address);
436 }
437 // If it's still null, attempt to use the fallback space.
438 if (UNLIKELY(forward_address == nullptr)) {
439 forward_address = fallback_space_->AllocThreadUnsafe(
440 self_, object_size, &bytes_allocated, nullptr, &unused_bytes_tl_bulk_allocated);
441 CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
442 accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
443 if (bitmap != nullptr) {
444 bitmap->Set(forward_address);
445 }
446 }
447 ++objects_moved_;
448 bytes_moved_ += bytes_allocated;
449 // Copy over the object and add it to the mark stack since we still need to update its
450 // references.
451 saved_bytes_ +=
452 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
453 if (kUseBakerReadBarrier) {
454 obj->AssertReadBarrierState();
455 forward_address->AssertReadBarrierState();
456 }
457 DCHECK(to_space_->HasAddress(forward_address) || fallback_space_->HasAddress(forward_address))
458 << forward_address << "\n" << GetHeap()->DumpSpaces();
459 return forward_address;
460 }
461
MarkObject(mirror::Object * root)462 mirror::Object* SemiSpace::MarkObject(mirror::Object* root) {
463 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
464 MarkObjectIfNotInToSpace(&ref);
465 return ref.AsMirrorPtr();
466 }
467
MarkHeapReference(mirror::HeapReference<mirror::Object> * obj_ptr,bool do_atomic_update ATTRIBUTE_UNUSED)468 void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
469 bool do_atomic_update ATTRIBUTE_UNUSED) {
470 MarkObject(obj_ptr);
471 }
472
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)473 void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
474 const RootInfo& info ATTRIBUTE_UNUSED) {
475 for (size_t i = 0; i < count; ++i) {
476 auto* root = roots[i];
477 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
478 // The root can be in the to-space since we may visit the declaring class of an ArtMethod
479 // multiple times if it is on the call stack.
480 MarkObjectIfNotInToSpace(&ref);
481 if (*root != ref.AsMirrorPtr()) {
482 *root = ref.AsMirrorPtr();
483 }
484 }
485 }
486
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)487 void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
488 const RootInfo& info ATTRIBUTE_UNUSED) {
489 for (size_t i = 0; i < count; ++i) {
490 MarkObjectIfNotInToSpace(roots[i]);
491 }
492 }
493
494 // Marks all objects in the root set.
MarkRoots()495 void SemiSpace::MarkRoots() {
496 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
497 Runtime::Current()->VisitRoots(this);
498 }
499
SweepSystemWeaks()500 void SemiSpace::SweepSystemWeaks() {
501 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
502 Runtime::Current()->SweepSystemWeaks(this);
503 }
504
ShouldSweepSpace(space::ContinuousSpace * space) const505 bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
506 return space != from_space_ && space != to_space_;
507 }
508
Sweep(bool swap_bitmaps)509 void SemiSpace::Sweep(bool swap_bitmaps) {
510 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
511 DCHECK(mark_stack_->IsEmpty());
512 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
513 if (space->IsContinuousMemMapAllocSpace()) {
514 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
515 if (!ShouldSweepSpace(alloc_space)) {
516 continue;
517 }
518 TimingLogger::ScopedTiming split(
519 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
520 RecordFree(alloc_space->Sweep(swap_bitmaps));
521 }
522 }
523 SweepLargeObjects(swap_bitmaps);
524 }
525
SweepLargeObjects(bool swap_bitmaps)526 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
527 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
528 if (los != nullptr) {
529 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
530 RecordFreeLOS(los->Sweep(swap_bitmaps));
531 }
532 }
533
534 // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
535 // marked, put it on the appropriate list in the heap for later processing.
DelayReferenceReferent(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> reference)536 void SemiSpace::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
537 ObjPtr<mirror::Reference> reference) {
538 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
539 }
540
541 class SemiSpace::MarkObjectVisitor {
542 public:
MarkObjectVisitor(SemiSpace * collector)543 explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {}
544
operator ()(ObjPtr<Object> obj,MemberOffset offset,bool) const545 void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
546 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
547 // Object was already verified when we scanned it.
548 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
549 }
550
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const551 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
552 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
553 collector_->DelayReferenceReferent(klass, ref);
554 }
555
556 // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const557 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
558 NO_THREAD_SAFETY_ANALYSIS {
559 if (!root->IsNull()) {
560 VisitRoot(root);
561 }
562 }
563
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const564 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
565 NO_THREAD_SAFETY_ANALYSIS {
566 if (kIsDebugBuild) {
567 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
568 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
569 }
570 // We may visit the same root multiple times, so avoid marking things in the to-space since
571 // this is not handled by the GC.
572 collector_->MarkObjectIfNotInToSpace(root);
573 }
574
575 private:
576 SemiSpace* const collector_;
577 };
578
579 // Visit all of the references of an object and update.
ScanObject(Object * obj)580 void SemiSpace::ScanObject(Object* obj) {
581 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
582 MarkObjectVisitor visitor(this);
583 // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
584 obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
585 visitor, visitor);
586 }
587
588 // Scan anything that's on the mark stack.
ProcessMarkStack()589 void SemiSpace::ProcessMarkStack() {
590 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
591 while (!mark_stack_->IsEmpty()) {
592 Object* obj = mark_stack_->PopBack();
593 ScanObject(obj);
594 }
595 }
596
IsMarked(mirror::Object * obj)597 mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
598 // All immune objects are assumed marked.
599 if (from_space_->HasAddress(obj)) {
600 // Returns either the forwarding address or null.
601 return GetForwardingAddressInFromSpace(obj);
602 } else if (immune_spaces_.IsInImmuneRegion(obj) || to_space_->HasAddress(obj)) {
603 return obj; // Already forwarded, must be marked.
604 }
605 return mark_bitmap_->Test(obj) ? obj : nullptr;
606 }
607
IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object> * object,bool do_atomic_update ATTRIBUTE_UNUSED)608 bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
609 // SemiSpace does the GC in a pause. No CAS needed.
610 bool do_atomic_update ATTRIBUTE_UNUSED) {
611 mirror::Object* obj = object->AsMirrorPtr();
612 if (obj == nullptr) {
613 return true;
614 }
615 mirror::Object* new_obj = IsMarked(obj);
616 if (new_obj == nullptr) {
617 return false;
618 }
619 if (new_obj != obj) {
620 // Write barrier is not necessary since it still points to the same object, just at a different
621 // address.
622 object->Assign(new_obj);
623 }
624 return true;
625 }
626
SetToSpace(space::ContinuousMemMapAllocSpace * to_space)627 void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
628 DCHECK(to_space != nullptr);
629 to_space_ = to_space;
630 }
631
SetFromSpace(space::ContinuousMemMapAllocSpace * from_space)632 void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
633 DCHECK(from_space != nullptr);
634 from_space_ = from_space;
635 }
636
FinishPhase()637 void SemiSpace::FinishPhase() {
638 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
639 // b/31172841. Temporarily disable the from-space protection with host debug build
640 // due to some protection issue in the build server.
641 if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
642 if (from_space_->IsRosAllocSpace()) {
643 VLOG(heap) << "Protecting from_space_ with PROT_NONE : " << *from_space_;
644 from_space_->GetMemMap()->Protect(PROT_NONE);
645 }
646 }
647 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
648 // further action is done by the heap.
649 to_space_ = nullptr;
650 from_space_ = nullptr;
651 CHECK(mark_stack_->IsEmpty());
652 mark_stack_->Reset();
653 // Clear all of the spaces' mark bitmaps.
654 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
655 heap_->ClearMarkedObjects();
656 }
657
RevokeAllThreadLocalBuffers()658 void SemiSpace::RevokeAllThreadLocalBuffers() {
659 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
660 GetHeap()->RevokeAllThreadLocalBuffers();
661 }
662
663 } // namespace collector
664 } // namespace gc
665 } // namespace art
666