1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "semi_space-inl.h"
18
19 #include <climits>
20 #include <functional>
21 #include <numeric>
22 #include <sstream>
23 #include <vector>
24
25 #include "base/logging.h"
26 #include "base/macros.h"
27 #include "base/mutex-inl.h"
28 #include "base/timing_logger.h"
29 #include "gc/accounting/heap_bitmap-inl.h"
30 #include "gc/accounting/mod_union_table.h"
31 #include "gc/accounting/remembered_set.h"
32 #include "gc/accounting/space_bitmap-inl.h"
33 #include "gc/heap.h"
34 #include "gc/reference_processor.h"
35 #include "gc/space/bump_pointer_space.h"
36 #include "gc/space/bump_pointer_space-inl.h"
37 #include "gc/space/image_space.h"
38 #include "gc/space/large_object_space.h"
39 #include "gc/space/space-inl.h"
40 #include "indirect_reference_table.h"
41 #include "intern_table.h"
42 #include "jni_internal.h"
43 #include "mark_sweep-inl.h"
44 #include "monitor.h"
45 #include "mirror/reference-inl.h"
46 #include "mirror/object-inl.h"
47 #include "mirror/object-refvisitor-inl.h"
48 #include "runtime.h"
49 #include "thread-inl.h"
50 #include "thread_list.h"
51
52 using ::art::mirror::Object;
53
54 namespace art {
55 namespace gc {
56 namespace collector {
57
58 static constexpr bool kProtectFromSpace = true;
59 static constexpr bool kStoreStackTraces = false;
60 static constexpr size_t kBytesPromotedThreshold = 4 * MB;
61 static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB;
62
BindBitmaps()63 void SemiSpace::BindBitmaps() {
64 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
65 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
66 // Mark all of the spaces we never collect as immune.
67 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
68 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
69 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
70 immune_spaces_.AddSpace(space);
71 } else if (space->GetLiveBitmap() != nullptr) {
72 // TODO: We can probably also add this space to the immune region.
73 if (space == to_space_ || collect_from_space_only_) {
74 if (collect_from_space_only_) {
75 // Bind the bitmaps of the main free list space and the non-moving space we are doing a
76 // bump pointer space only collection.
77 CHECK(space == GetHeap()->GetPrimaryFreeListSpace() ||
78 space == GetHeap()->GetNonMovingSpace());
79 }
80 CHECK(space->IsContinuousMemMapAllocSpace());
81 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
82 }
83 }
84 }
85 if (collect_from_space_only_) {
86 // We won't collect the large object space if a bump pointer space only collection.
87 is_large_object_space_immune_ = true;
88 }
89 }
90
SemiSpace(Heap * heap,bool generational,const std::string & name_prefix)91 SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
92 : GarbageCollector(heap,
93 name_prefix + (name_prefix.empty() ? "" : " ") + "semispace"),
94 mark_stack_(nullptr),
95 is_large_object_space_immune_(false),
96 to_space_(nullptr),
97 to_space_live_bitmap_(nullptr),
98 from_space_(nullptr),
99 mark_bitmap_(nullptr),
100 self_(nullptr),
101 generational_(generational),
102 last_gc_to_space_end_(nullptr),
103 bytes_promoted_(0),
104 bytes_promoted_since_last_whole_heap_collection_(0),
105 large_object_bytes_allocated_at_last_whole_heap_collection_(0),
106 collect_from_space_only_(generational),
107 promo_dest_space_(nullptr),
108 fallback_space_(nullptr),
109 bytes_moved_(0U),
110 objects_moved_(0U),
111 saved_bytes_(0U),
112 collector_name_(name_),
113 swap_semi_spaces_(true) {
114 }
115
RunPhases()116 void SemiSpace::RunPhases() {
117 Thread* self = Thread::Current();
118 InitializePhase();
119 // Semi-space collector is special since it is sometimes called with the mutators suspended
120 // during the zygote creation and collector transitions. If we already exclusively hold the
121 // mutator lock, then we can't lock it again since it will cause a deadlock.
122 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
123 GetHeap()->PreGcVerificationPaused(this);
124 GetHeap()->PrePauseRosAllocVerification(this);
125 MarkingPhase();
126 ReclaimPhase();
127 GetHeap()->PostGcVerificationPaused(this);
128 } else {
129 Locks::mutator_lock_->AssertNotHeld(self);
130 {
131 ScopedPause pause(this);
132 GetHeap()->PreGcVerificationPaused(this);
133 GetHeap()->PrePauseRosAllocVerification(this);
134 MarkingPhase();
135 }
136 {
137 ReaderMutexLock mu(self, *Locks::mutator_lock_);
138 ReclaimPhase();
139 }
140 GetHeap()->PostGcVerification(this);
141 }
142 FinishPhase();
143 }
144
InitializePhase()145 void SemiSpace::InitializePhase() {
146 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
147 mark_stack_ = heap_->GetMarkStack();
148 DCHECK(mark_stack_ != nullptr);
149 immune_spaces_.Reset();
150 is_large_object_space_immune_ = false;
151 saved_bytes_ = 0;
152 bytes_moved_ = 0;
153 objects_moved_ = 0;
154 self_ = Thread::Current();
155 CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
156 // Set the initial bitmap.
157 to_space_live_bitmap_ = to_space_->GetLiveBitmap();
158 {
159 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
160 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
161 mark_bitmap_ = heap_->GetMarkBitmap();
162 }
163 if (generational_) {
164 promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace();
165 }
166 fallback_space_ = GetHeap()->GetNonMovingSpace();
167 }
168
ProcessReferences(Thread * self)169 void SemiSpace::ProcessReferences(Thread* self) {
170 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
171 GetHeap()->GetReferenceProcessor()->ProcessReferences(
172 false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
173 }
174
MarkingPhase()175 void SemiSpace::MarkingPhase() {
176 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
177 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
178 if (kStoreStackTraces) {
179 Locks::mutator_lock_->AssertExclusiveHeld(self_);
180 // Store the stack traces into the runtime fault string in case we Get a heap corruption
181 // related crash later.
182 ThreadState old_state = self_->SetStateUnsafe(kRunnable);
183 std::ostringstream oss;
184 Runtime* runtime = Runtime::Current();
185 runtime->GetThreadList()->DumpForSigQuit(oss);
186 runtime->GetThreadList()->DumpNativeStacks(oss);
187 runtime->SetFaultMessage(oss.str());
188 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
189 }
190 // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps
191 // to prevent fragmentation.
192 RevokeAllThreadLocalBuffers();
193 if (generational_) {
194 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
195 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
196 GetCurrentIteration()->GetClearSoftReferences()) {
197 // If an explicit, native allocation-triggered, or last attempt
198 // collection, collect the whole heap.
199 collect_from_space_only_ = false;
200 }
201 if (!collect_from_space_only_) {
202 VLOG(heap) << "Whole heap collection";
203 name_ = collector_name_ + " whole";
204 } else {
205 VLOG(heap) << "Bump pointer space only collection";
206 name_ = collector_name_ + " bps";
207 }
208 }
209
210 if (!collect_from_space_only_) {
211 // If non-generational, always clear soft references.
212 // If generational, clear soft references if a whole heap collection.
213 GetCurrentIteration()->SetClearSoftReferences(true);
214 }
215 Locks::mutator_lock_->AssertExclusiveHeld(self_);
216 if (generational_) {
217 // If last_gc_to_space_end_ is out of the bounds of the from-space
218 // (the to-space from last GC), then point it to the beginning of
219 // the from-space. For example, the very first GC or the
220 // pre-zygote compaction.
221 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
222 last_gc_to_space_end_ = from_space_->Begin();
223 }
224 // Reset this before the marking starts below.
225 bytes_promoted_ = 0;
226 }
227 // Assume the cleared space is already empty.
228 BindBitmaps();
229 // Process dirty cards and add dirty cards to mod-union tables.
230 heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_, false, true);
231 // Clear the whole card table since we cannot get any additional dirty cards during the
232 // paused GC. This saves memory but only works for pause the world collectors.
233 t.NewTiming("ClearCardTable");
234 heap_->GetCardTable()->ClearCardTable();
235 // Need to do this before the checkpoint since we don't want any threads to add references to
236 // the live stack during the recursive mark.
237 if (kUseThreadLocalAllocationStack) {
238 TimingLogger::ScopedTiming t2("RevokeAllThreadLocalAllocationStacks", GetTimings());
239 heap_->RevokeAllThreadLocalAllocationStacks(self_);
240 }
241 heap_->SwapStacks();
242 {
243 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
244 MarkRoots();
245 // Recursively mark remaining objects.
246 MarkReachableObjects();
247 }
248 ProcessReferences(self_);
249 {
250 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
251 SweepSystemWeaks();
252 }
253 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
254 // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
255 // before they are properly counted.
256 RevokeAllThreadLocalBuffers();
257 GetHeap()->RecordFreeRevoke(); // this is for the non-moving rosalloc space used by GSS.
258 // Record freed memory.
259 const int64_t from_bytes = from_space_->GetBytesAllocated();
260 const int64_t to_bytes = bytes_moved_;
261 const uint64_t from_objects = from_space_->GetObjectsAllocated();
262 const uint64_t to_objects = objects_moved_;
263 CHECK_LE(to_objects, from_objects);
264 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
265 // space.
266 RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
267 // Clear and protect the from space.
268 from_space_->Clear();
269 // b/31172841. Temporarily disable the from-space protection with host debug build
270 // due to some protection issue in the build server.
271 if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
272 if (!from_space_->IsRosAllocSpace()) {
273 // Protect with PROT_NONE.
274 VLOG(heap) << "Protecting from_space_ : " << *from_space_;
275 from_space_->GetMemMap()->Protect(PROT_NONE);
276 } else {
277 // If RosAllocSpace, we'll leave it as PROT_READ here so the
278 // rosaloc verification can read the metadata magic number and
279 // protect it with PROT_NONE later in FinishPhase().
280 VLOG(heap) << "Protecting from_space_ with PROT_READ : " << *from_space_;
281 from_space_->GetMemMap()->Protect(PROT_READ);
282 }
283 }
284 heap_->PreSweepingGcVerification(this);
285 if (swap_semi_spaces_) {
286 heap_->SwapSemiSpaces();
287 }
288 }
289
290 // Used to verify that there's no references to the from-space.
291 class SemiSpace::VerifyNoFromSpaceReferencesVisitor {
292 public:
VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace * from_space)293 explicit VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space)
294 : from_space_(from_space) {}
295
operator ()(Object * obj,MemberOffset offset,bool) const296 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
297 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
298 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
299 if (from_space_->HasAddress(ref)) {
300 LOG(FATAL) << ref << " found in from space";
301 }
302 }
303
304 // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const305 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
306 NO_THREAD_SAFETY_ANALYSIS {
307 if (!root->IsNull()) {
308 VisitRoot(root);
309 }
310 }
311
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const312 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
313 NO_THREAD_SAFETY_ANALYSIS {
314 if (kIsDebugBuild) {
315 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
316 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
317 }
318 CHECK(!from_space_->HasAddress(root->AsMirrorPtr()));
319 }
320
321 private:
322 space::ContinuousMemMapAllocSpace* const from_space_;
323 };
324
VerifyNoFromSpaceReferences(Object * obj)325 void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
326 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
327 VerifyNoFromSpaceReferencesVisitor visitor(from_space_);
328 obj->VisitReferences(visitor, VoidFunctor());
329 }
330
MarkReachableObjects()331 void SemiSpace::MarkReachableObjects() {
332 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
333 {
334 TimingLogger::ScopedTiming t2("MarkStackAsLive", GetTimings());
335 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
336 heap_->MarkAllocStackAsLive(live_stack);
337 live_stack->Reset();
338 }
339 for (auto& space : heap_->GetContinuousSpaces()) {
340 // If the space is immune then we need to mark the references to other spaces.
341 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
342 if (table != nullptr) {
343 // TODO: Improve naming.
344 TimingLogger::ScopedTiming t2(
345 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
346 "UpdateAndMarkImageModUnionTable",
347 GetTimings());
348 table->UpdateAndMarkReferences(this);
349 DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
350 } else if ((space->IsImageSpace() || collect_from_space_only_) &&
351 space->GetLiveBitmap() != nullptr) {
352 // If the space has no mod union table (the non-moving space, app image spaces, main spaces
353 // when the bump pointer space only collection is enabled,) then we need to scan its live
354 // bitmap or dirty cards as roots (including the objects on the live stack which have just
355 // marked in the live bitmap above in MarkAllocStackAsLive().)
356 accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
357 if (!space->IsImageSpace()) {
358 DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
359 << "Space " << space->GetName() << " "
360 << "generational_=" << generational_ << " "
361 << "collect_from_space_only_=" << collect_from_space_only_;
362 // App images currently do not have remembered sets.
363 DCHECK_EQ(kUseRememberedSet, rem_set != nullptr);
364 } else {
365 DCHECK(rem_set == nullptr);
366 }
367 if (rem_set != nullptr) {
368 TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
369 rem_set->UpdateAndMarkReferences(from_space_, this);
370 } else {
371 TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
372 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
373 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
374 reinterpret_cast<uintptr_t>(space->End()),
375 [this](mirror::Object* obj)
376 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
377 ScanObject(obj);
378 });
379 }
380 if (kIsDebugBuild) {
381 // Verify that there are no from-space references that
382 // remain in the space, that is, the remembered set (and the
383 // card table) didn't miss any from-space references in the
384 // space.
385 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
386 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
387 reinterpret_cast<uintptr_t>(space->End()),
388 [this](Object* obj)
389 REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
390 DCHECK(obj != nullptr);
391 VerifyNoFromSpaceReferences(obj);
392 });
393 }
394 }
395 }
396
397 CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
398 space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
399 if (is_large_object_space_immune_ && los != nullptr) {
400 TimingLogger::ScopedTiming t2("VisitLargeObjects", GetTimings());
401 DCHECK(collect_from_space_only_);
402 // Delay copying the live set to the marked set until here from
403 // BindBitmaps() as the large objects on the allocation stack may
404 // be newly added to the live set above in MarkAllocStackAsLive().
405 los->CopyLiveToMarked();
406
407 // When the large object space is immune, we need to scan the
408 // large object space as roots as they contain references to their
409 // classes (primitive array classes) that could move though they
410 // don't contain any other references.
411 accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
412 std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
413 large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
414 reinterpret_cast<uintptr_t>(range.second),
415 [this](mirror::Object* obj)
416 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
417 ScanObject(obj);
418 });
419 }
420 // Recursively process the mark stack.
421 ProcessMarkStack();
422 }
423
ReclaimPhase()424 void SemiSpace::ReclaimPhase() {
425 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
426 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
427 // Reclaim unmarked objects.
428 Sweep(false);
429 // Swap the live and mark bitmaps for each space which we modified space. This is an
430 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
431 // bitmaps.
432 SwapBitmaps();
433 // Unbind the live and mark bitmaps.
434 GetHeap()->UnBindBitmaps();
435 if (saved_bytes_ > 0) {
436 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
437 }
438 if (generational_) {
439 // Record the end (top) of the to space so we can distinguish
440 // between objects that were allocated since the last GC and the
441 // older objects.
442 last_gc_to_space_end_ = to_space_->End();
443 }
444 }
445
ResizeMarkStack(size_t new_size)446 void SemiSpace::ResizeMarkStack(size_t new_size) {
447 std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
448 CHECK_LE(mark_stack_->Size(), new_size);
449 mark_stack_->Resize(new_size);
450 for (auto& obj : temp) {
451 mark_stack_->PushBack(obj.AsMirrorPtr());
452 }
453 }
454
MarkStackPush(Object * obj)455 inline void SemiSpace::MarkStackPush(Object* obj) {
456 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
457 ResizeMarkStack(mark_stack_->Capacity() * 2);
458 }
459 // The object must be pushed on to the mark stack.
460 mark_stack_->PushBack(obj);
461 }
462
CopyAvoidingDirtyingPages(void * dest,const void * src,size_t size)463 static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
464 if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
465 // We will dirty the current page and somewhere in the middle of the next page. This means
466 // that the next object copied will also dirty that page.
467 // TODO: Worth considering the last object copied? We may end up dirtying one page which is
468 // not necessary per GC.
469 memcpy(dest, src, size);
470 return 0;
471 }
472 size_t saved_bytes = 0;
473 uint8_t* byte_dest = reinterpret_cast<uint8_t*>(dest);
474 if (kIsDebugBuild) {
475 for (size_t i = 0; i < size; ++i) {
476 CHECK_EQ(byte_dest[i], 0U);
477 }
478 }
479 // Process the start of the page. The page must already be dirty, don't bother with checking.
480 const uint8_t* byte_src = reinterpret_cast<const uint8_t*>(src);
481 const uint8_t* limit = byte_src + size;
482 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
483 // Copy the bytes until the start of the next page.
484 memcpy(dest, src, page_remain);
485 byte_src += page_remain;
486 byte_dest += page_remain;
487 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
488 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
489 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
490 while (byte_src + kPageSize < limit) {
491 bool all_zero = true;
492 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
493 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src);
494 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) {
495 // Assumes the destination of the copy is all zeros.
496 if (word_src[i] != 0) {
497 all_zero = false;
498 word_dest[i] = word_src[i];
499 }
500 }
501 if (all_zero) {
502 // Avoided copying into the page since it was all zeros.
503 saved_bytes += kPageSize;
504 }
505 byte_src += kPageSize;
506 byte_dest += kPageSize;
507 }
508 // Handle the part of the page at the end.
509 memcpy(byte_dest, byte_src, limit - byte_src);
510 return saved_bytes;
511 }
512
MarkNonForwardedObject(mirror::Object * obj)513 mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
514 const size_t object_size = obj->SizeOf();
515 size_t bytes_allocated, dummy;
516 mirror::Object* forward_address = nullptr;
517 if (generational_ && reinterpret_cast<uint8_t*>(obj) < last_gc_to_space_end_) {
518 // If it's allocated before the last GC (older), move
519 // (pseudo-promote) it to the main free list space (as sort
520 // of an old generation.)
521 forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
522 nullptr, &dummy);
523 if (UNLIKELY(forward_address == nullptr)) {
524 // If out of space, fall back to the to-space.
525 forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
526 &dummy);
527 // No logic for marking the bitmap, so it must be null.
528 DCHECK(to_space_live_bitmap_ == nullptr);
529 } else {
530 bytes_promoted_ += bytes_allocated;
531 // Dirty the card at the destionation as it may contain
532 // references (including the class pointer) to the bump pointer
533 // space.
534 GetHeap()->WriteBarrierEveryFieldOf(forward_address);
535 // Handle the bitmaps marking.
536 accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
537 DCHECK(live_bitmap != nullptr);
538 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
539 DCHECK(mark_bitmap != nullptr);
540 DCHECK(!live_bitmap->Test(forward_address));
541 if (collect_from_space_only_) {
542 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
543 DCHECK_EQ(live_bitmap, mark_bitmap);
544
545 // If a bump pointer space only collection, delay the live
546 // bitmap marking of the promoted object until it's popped off
547 // the mark stack (ProcessMarkStack()). The rationale: we may
548 // be in the middle of scanning the objects in the promo
549 // destination space for
550 // non-moving-space-to-bump-pointer-space references by
551 // iterating over the marked bits of the live bitmap
552 // (MarkReachableObjects()). If we don't delay it (and instead
553 // mark the promoted object here), the above promo destination
554 // space scan could encounter the just-promoted object and
555 // forward the references in the promoted object's fields even
556 // through it is pushed onto the mark stack. If this happens,
557 // the promoted object would be in an inconsistent state, that
558 // is, it's on the mark stack (gray) but its fields are
559 // already forwarded (black), which would cause a
560 // DCHECK(!to_space_->HasAddress(obj)) failure below.
561 } else {
562 // Mark forward_address on the live bit map.
563 live_bitmap->Set(forward_address);
564 // Mark forward_address on the mark bit map.
565 DCHECK(!mark_bitmap->Test(forward_address));
566 mark_bitmap->Set(forward_address);
567 }
568 }
569 } else {
570 // If it's allocated after the last GC (younger), copy it to the to-space.
571 forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
572 &dummy);
573 if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
574 to_space_live_bitmap_->Set(forward_address);
575 }
576 }
577 // If it's still null, attempt to use the fallback space.
578 if (UNLIKELY(forward_address == nullptr)) {
579 forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
580 nullptr, &dummy);
581 CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
582 accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
583 if (bitmap != nullptr) {
584 bitmap->Set(forward_address);
585 }
586 }
587 ++objects_moved_;
588 bytes_moved_ += bytes_allocated;
589 // Copy over the object and add it to the mark stack since we still need to update its
590 // references.
591 saved_bytes_ +=
592 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
593 if (kUseBakerReadBarrier) {
594 obj->AssertReadBarrierState();
595 forward_address->AssertReadBarrierState();
596 }
597 DCHECK(to_space_->HasAddress(forward_address) ||
598 fallback_space_->HasAddress(forward_address) ||
599 (generational_ && promo_dest_space_->HasAddress(forward_address)))
600 << forward_address << "\n" << GetHeap()->DumpSpaces();
601 return forward_address;
602 }
603
MarkObject(mirror::Object * root)604 mirror::Object* SemiSpace::MarkObject(mirror::Object* root) {
605 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
606 MarkObjectIfNotInToSpace(&ref);
607 return ref.AsMirrorPtr();
608 }
609
MarkHeapReference(mirror::HeapReference<mirror::Object> * obj_ptr,bool do_atomic_update ATTRIBUTE_UNUSED)610 void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
611 bool do_atomic_update ATTRIBUTE_UNUSED) {
612 MarkObject(obj_ptr);
613 }
614
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)615 void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
616 const RootInfo& info ATTRIBUTE_UNUSED) {
617 for (size_t i = 0; i < count; ++i) {
618 auto* root = roots[i];
619 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
620 // The root can be in the to-space since we may visit the declaring class of an ArtMethod
621 // multiple times if it is on the call stack.
622 MarkObjectIfNotInToSpace(&ref);
623 if (*root != ref.AsMirrorPtr()) {
624 *root = ref.AsMirrorPtr();
625 }
626 }
627 }
628
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)629 void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
630 const RootInfo& info ATTRIBUTE_UNUSED) {
631 for (size_t i = 0; i < count; ++i) {
632 MarkObjectIfNotInToSpace(roots[i]);
633 }
634 }
635
636 // Marks all objects in the root set.
MarkRoots()637 void SemiSpace::MarkRoots() {
638 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
639 Runtime::Current()->VisitRoots(this);
640 }
641
SweepSystemWeaks()642 void SemiSpace::SweepSystemWeaks() {
643 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
644 Runtime::Current()->SweepSystemWeaks(this);
645 }
646
ShouldSweepSpace(space::ContinuousSpace * space) const647 bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
648 return space != from_space_ && space != to_space_;
649 }
650
Sweep(bool swap_bitmaps)651 void SemiSpace::Sweep(bool swap_bitmaps) {
652 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
653 DCHECK(mark_stack_->IsEmpty());
654 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
655 if (space->IsContinuousMemMapAllocSpace()) {
656 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
657 if (!ShouldSweepSpace(alloc_space)) {
658 continue;
659 }
660 TimingLogger::ScopedTiming split(
661 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
662 RecordFree(alloc_space->Sweep(swap_bitmaps));
663 }
664 }
665 if (!is_large_object_space_immune_) {
666 SweepLargeObjects(swap_bitmaps);
667 }
668 }
669
SweepLargeObjects(bool swap_bitmaps)670 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
671 DCHECK(!is_large_object_space_immune_);
672 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
673 if (los != nullptr) {
674 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
675 RecordFreeLOS(los->Sweep(swap_bitmaps));
676 }
677 }
678
679 // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
680 // marked, put it on the appropriate list in the heap for later processing.
DelayReferenceReferent(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> reference)681 void SemiSpace::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
682 ObjPtr<mirror::Reference> reference) {
683 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
684 }
685
686 class SemiSpace::MarkObjectVisitor {
687 public:
MarkObjectVisitor(SemiSpace * collector)688 explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {}
689
operator ()(ObjPtr<Object> obj,MemberOffset offset,bool) const690 void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
691 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
692 // Object was already verified when we scanned it.
693 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
694 }
695
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const696 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
697 REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
698 collector_->DelayReferenceReferent(klass, ref);
699 }
700
701 // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const702 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
703 NO_THREAD_SAFETY_ANALYSIS {
704 if (!root->IsNull()) {
705 VisitRoot(root);
706 }
707 }
708
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const709 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
710 NO_THREAD_SAFETY_ANALYSIS {
711 if (kIsDebugBuild) {
712 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
713 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
714 }
715 // We may visit the same root multiple times, so avoid marking things in the to-space since
716 // this is not handled by the GC.
717 collector_->MarkObjectIfNotInToSpace(root);
718 }
719
720 private:
721 SemiSpace* const collector_;
722 };
723
724 // Visit all of the references of an object and update.
ScanObject(Object * obj)725 void SemiSpace::ScanObject(Object* obj) {
726 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
727 MarkObjectVisitor visitor(this);
728 // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
729 obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
730 visitor, visitor);
731 }
732
733 // Scan anything that's on the mark stack.
ProcessMarkStack()734 void SemiSpace::ProcessMarkStack() {
735 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
736 accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
737 if (collect_from_space_only_) {
738 // If a bump pointer space only collection (and the promotion is
739 // enabled,) we delay the live-bitmap marking of promoted objects
740 // from MarkObject() until this function.
741 live_bitmap = promo_dest_space_->GetLiveBitmap();
742 DCHECK(live_bitmap != nullptr);
743 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
744 DCHECK(mark_bitmap != nullptr);
745 DCHECK_EQ(live_bitmap, mark_bitmap);
746 }
747 while (!mark_stack_->IsEmpty()) {
748 Object* obj = mark_stack_->PopBack();
749 if (collect_from_space_only_ && promo_dest_space_->HasAddress(obj)) {
750 // obj has just been promoted. Mark the live bitmap for it,
751 // which is delayed from MarkObject().
752 DCHECK(!live_bitmap->Test(obj));
753 live_bitmap->Set(obj);
754 }
755 ScanObject(obj);
756 }
757 }
758
IsMarked(mirror::Object * obj)759 mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
760 // All immune objects are assumed marked.
761 if (from_space_->HasAddress(obj)) {
762 // Returns either the forwarding address or null.
763 return GetForwardingAddressInFromSpace(obj);
764 } else if (collect_from_space_only_ ||
765 immune_spaces_.IsInImmuneRegion(obj) ||
766 to_space_->HasAddress(obj)) {
767 return obj; // Already forwarded, must be marked.
768 }
769 return mark_bitmap_->Test(obj) ? obj : nullptr;
770 }
771
IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object> * object,bool do_atomic_update ATTRIBUTE_UNUSED)772 bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
773 // SemiSpace does the GC in a pause. No CAS needed.
774 bool do_atomic_update ATTRIBUTE_UNUSED) {
775 mirror::Object* obj = object->AsMirrorPtr();
776 if (obj == nullptr) {
777 return true;
778 }
779 mirror::Object* new_obj = IsMarked(obj);
780 if (new_obj == nullptr) {
781 return false;
782 }
783 if (new_obj != obj) {
784 // Write barrier is not necessary since it still points to the same object, just at a different
785 // address.
786 object->Assign(new_obj);
787 }
788 return true;
789 }
790
SetToSpace(space::ContinuousMemMapAllocSpace * to_space)791 void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
792 DCHECK(to_space != nullptr);
793 to_space_ = to_space;
794 }
795
SetFromSpace(space::ContinuousMemMapAllocSpace * from_space)796 void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
797 DCHECK(from_space != nullptr);
798 from_space_ = from_space;
799 }
800
FinishPhase()801 void SemiSpace::FinishPhase() {
802 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
803 // b/31172841. Temporarily disable the from-space protection with host debug build
804 // due to some protection issue in the build server.
805 if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
806 if (from_space_->IsRosAllocSpace()) {
807 VLOG(heap) << "Protecting from_space_ with PROT_NONE : " << *from_space_;
808 from_space_->GetMemMap()->Protect(PROT_NONE);
809 }
810 }
811 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
812 // further action is done by the heap.
813 to_space_ = nullptr;
814 from_space_ = nullptr;
815 CHECK(mark_stack_->IsEmpty());
816 mark_stack_->Reset();
817 space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
818 if (generational_) {
819 // Decide whether to do a whole heap collection or a bump pointer
820 // only space collection at the next collection by updating
821 // collect_from_space_only_.
822 if (collect_from_space_only_) {
823 // Disable collect_from_space_only_ if the bytes promoted since the
824 // last whole heap collection or the large object bytes
825 // allocated exceeds a threshold.
826 bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
827 bool bytes_promoted_threshold_exceeded =
828 bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
829 uint64_t current_los_bytes_allocated = los != nullptr ? los->GetBytesAllocated() : 0U;
830 uint64_t last_los_bytes_allocated =
831 large_object_bytes_allocated_at_last_whole_heap_collection_;
832 bool large_object_bytes_threshold_exceeded =
833 current_los_bytes_allocated >=
834 last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
835 if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
836 collect_from_space_only_ = false;
837 }
838 } else {
839 // Reset the counters.
840 bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
841 large_object_bytes_allocated_at_last_whole_heap_collection_ =
842 los != nullptr ? los->GetBytesAllocated() : 0U;
843 collect_from_space_only_ = true;
844 }
845 }
846 // Clear all of the spaces' mark bitmaps.
847 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
848 heap_->ClearMarkedObjects();
849 }
850
RevokeAllThreadLocalBuffers()851 void SemiSpace::RevokeAllThreadLocalBuffers() {
852 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
853 GetHeap()->RevokeAllThreadLocalBuffers();
854 }
855
856 } // namespace collector
857 } // namespace gc
858 } // namespace art
859