1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/mark-compact.h"
6 
7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h"
11 #include "src/compilation-cache.h"
12 #include "src/deoptimizer.h"
13 #include "src/execution.h"
14 #include "src/frames-inl.h"
15 #include "src/gdb-jit.h"
16 #include "src/global-handles.h"
17 #include "src/heap/array-buffer-tracker.h"
18 #include "src/heap/gc-tracer.h"
19 #include "src/heap/incremental-marking.h"
20 #include "src/heap/mark-compact-inl.h"
21 #include "src/heap/object-stats.h"
22 #include "src/heap/objects-visiting.h"
23 #include "src/heap/objects-visiting-inl.h"
24 #include "src/heap/slots-buffer.h"
25 #include "src/heap/spaces-inl.h"
26 #include "src/ic/ic.h"
27 #include "src/ic/stub-cache.h"
28 #include "src/profiler/cpu-profiler.h"
29 #include "src/v8.h"
30 
31 namespace v8 {
32 namespace internal {
33 
34 
35 const char* Marking::kWhiteBitPattern = "00";
36 const char* Marking::kBlackBitPattern = "11";
37 const char* Marking::kGreyBitPattern = "10";
38 const char* Marking::kImpossibleBitPattern = "01";
39 
40 
41 // The following has to hold in order for {Marking::MarkBitFrom} to not produce
42 // invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
43 STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
44 
45 
46 // -------------------------------------------------------------------------
47 // MarkCompactCollector
48 
MarkCompactCollector(Heap * heap)49 MarkCompactCollector::MarkCompactCollector(Heap* heap)
50     :  // NOLINT
51 #ifdef DEBUG
52       state_(IDLE),
53 #endif
54       marking_parity_(ODD_MARKING_PARITY),
55       was_marked_incrementally_(false),
56       evacuation_(false),
57       slots_buffer_allocator_(nullptr),
58       migration_slots_buffer_(nullptr),
59       heap_(heap),
60       marking_deque_memory_(NULL),
61       marking_deque_memory_committed_(0),
62       code_flusher_(nullptr),
63       have_code_to_deoptimize_(false),
64       compacting_(false),
65       sweeping_in_progress_(false),
66       compaction_in_progress_(false),
67       pending_sweeper_tasks_semaphore_(0),
68       pending_compaction_tasks_semaphore_(0) {
69 }
70 
71 #ifdef VERIFY_HEAP
72 class VerifyMarkingVisitor : public ObjectVisitor {
73  public:
VerifyMarkingVisitor(Heap * heap)74   explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
75 
VisitPointers(Object ** start,Object ** end)76   void VisitPointers(Object** start, Object** end) override {
77     for (Object** current = start; current < end; current++) {
78       if ((*current)->IsHeapObject()) {
79         HeapObject* object = HeapObject::cast(*current);
80         CHECK(heap_->mark_compact_collector()->IsMarked(object));
81       }
82     }
83   }
84 
VisitEmbeddedPointer(RelocInfo * rinfo)85   void VisitEmbeddedPointer(RelocInfo* rinfo) override {
86     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
87     if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
88       Object* p = rinfo->target_object();
89       VisitPointer(&p);
90     }
91   }
92 
VisitCell(RelocInfo * rinfo)93   void VisitCell(RelocInfo* rinfo) override {
94     Code* code = rinfo->host();
95     DCHECK(rinfo->rmode() == RelocInfo::CELL);
96     if (!code->IsWeakObject(rinfo->target_cell())) {
97       ObjectVisitor::VisitCell(rinfo);
98     }
99   }
100 
101  private:
102   Heap* heap_;
103 };
104 
105 
VerifyMarking(Heap * heap,Address bottom,Address top)106 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
107   VerifyMarkingVisitor visitor(heap);
108   HeapObject* object;
109   Address next_object_must_be_here_or_later = bottom;
110 
111   for (Address current = bottom; current < top; current += kPointerSize) {
112     object = HeapObject::FromAddress(current);
113     if (MarkCompactCollector::IsMarked(object)) {
114       CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
115       CHECK(current >= next_object_must_be_here_or_later);
116       object->Iterate(&visitor);
117       next_object_must_be_here_or_later = current + object->Size();
118       // The next word for sure belongs to the current object, jump over it.
119       current += kPointerSize;
120     }
121   }
122 }
123 
124 
VerifyMarking(NewSpace * space)125 static void VerifyMarking(NewSpace* space) {
126   Address end = space->top();
127   NewSpacePageIterator it(space->bottom(), end);
128   // The bottom position is at the start of its page. Allows us to use
129   // page->area_start() as start of range on all pages.
130   CHECK_EQ(space->bottom(),
131            NewSpacePage::FromAddress(space->bottom())->area_start());
132   while (it.has_next()) {
133     NewSpacePage* page = it.next();
134     Address limit = it.has_next() ? page->area_end() : end;
135     CHECK(limit == end || !page->Contains(end));
136     VerifyMarking(space->heap(), page->area_start(), limit);
137   }
138 }
139 
140 
VerifyMarking(PagedSpace * space)141 static void VerifyMarking(PagedSpace* space) {
142   PageIterator it(space);
143 
144   while (it.has_next()) {
145     Page* p = it.next();
146     VerifyMarking(space->heap(), p->area_start(), p->area_end());
147   }
148 }
149 
150 
VerifyMarking(Heap * heap)151 static void VerifyMarking(Heap* heap) {
152   VerifyMarking(heap->old_space());
153   VerifyMarking(heap->code_space());
154   VerifyMarking(heap->map_space());
155   VerifyMarking(heap->new_space());
156 
157   VerifyMarkingVisitor visitor(heap);
158 
159   LargeObjectIterator it(heap->lo_space());
160   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
161     if (MarkCompactCollector::IsMarked(obj)) {
162       obj->Iterate(&visitor);
163     }
164   }
165 
166   heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
167 }
168 
169 
170 class VerifyEvacuationVisitor : public ObjectVisitor {
171  public:
VisitPointers(Object ** start,Object ** end)172   void VisitPointers(Object** start, Object** end) override {
173     for (Object** current = start; current < end; current++) {
174       if ((*current)->IsHeapObject()) {
175         HeapObject* object = HeapObject::cast(*current);
176         CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
177       }
178     }
179   }
180 };
181 
182 
VerifyEvacuation(Page * page)183 static void VerifyEvacuation(Page* page) {
184   VerifyEvacuationVisitor visitor;
185   HeapObjectIterator iterator(page);
186   for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
187        heap_object = iterator.Next()) {
188     // We skip free space objects.
189     if (!heap_object->IsFiller()) {
190       heap_object->Iterate(&visitor);
191     }
192   }
193 }
194 
195 
VerifyEvacuation(NewSpace * space)196 static void VerifyEvacuation(NewSpace* space) {
197   NewSpacePageIterator it(space->bottom(), space->top());
198   VerifyEvacuationVisitor visitor;
199 
200   while (it.has_next()) {
201     NewSpacePage* page = it.next();
202     Address current = page->area_start();
203     Address limit = it.has_next() ? page->area_end() : space->top();
204     CHECK(limit == space->top() || !page->Contains(space->top()));
205     while (current < limit) {
206       HeapObject* object = HeapObject::FromAddress(current);
207       object->Iterate(&visitor);
208       current += object->Size();
209     }
210   }
211 }
212 
213 
VerifyEvacuation(Heap * heap,PagedSpace * space)214 static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
215   if (FLAG_use_allocation_folding && (space == heap->old_space())) {
216     return;
217   }
218   PageIterator it(space);
219 
220   while (it.has_next()) {
221     Page* p = it.next();
222     if (p->IsEvacuationCandidate()) continue;
223     VerifyEvacuation(p);
224   }
225 }
226 
227 
VerifyEvacuation(Heap * heap)228 static void VerifyEvacuation(Heap* heap) {
229   VerifyEvacuation(heap, heap->old_space());
230   VerifyEvacuation(heap, heap->code_space());
231   VerifyEvacuation(heap, heap->map_space());
232   VerifyEvacuation(heap->new_space());
233 
234   VerifyEvacuationVisitor visitor;
235   heap->IterateStrongRoots(&visitor, VISIT_ALL);
236 }
237 #endif  // VERIFY_HEAP
238 
239 
SetUp()240 void MarkCompactCollector::SetUp() {
241   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
242   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
243   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
244   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
245 
246   free_list_old_space_.Reset(new FreeList(heap_->old_space()));
247   free_list_code_space_.Reset(new FreeList(heap_->code_space()));
248   free_list_map_space_.Reset(new FreeList(heap_->map_space()));
249   EnsureMarkingDequeIsReserved();
250   EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
251   slots_buffer_allocator_ = new SlotsBufferAllocator();
252 
253   if (FLAG_flush_code) {
254     code_flusher_ = new CodeFlusher(isolate());
255     if (FLAG_trace_code_flushing) {
256       PrintF("[code-flushing is now on]\n");
257     }
258   }
259 }
260 
261 
TearDown()262 void MarkCompactCollector::TearDown() {
263   AbortCompaction();
264   delete marking_deque_memory_;
265   delete slots_buffer_allocator_;
266   delete code_flusher_;
267 }
268 
269 
AddEvacuationCandidate(Page * p)270 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
271   DCHECK(!p->NeverEvacuate());
272   p->MarkEvacuationCandidate();
273   evacuation_candidates_.Add(p);
274 }
275 
276 
TraceFragmentation(PagedSpace * space)277 static void TraceFragmentation(PagedSpace* space) {
278   int number_of_pages = space->CountTotalPages();
279   intptr_t reserved = (number_of_pages * space->AreaSize());
280   intptr_t free = reserved - space->SizeOfObjects();
281   PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
282          AllocationSpaceName(space->identity()), number_of_pages,
283          static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
284 }
285 
286 
StartCompaction(CompactionMode mode)287 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
288   if (!compacting_) {
289     DCHECK(evacuation_candidates_.length() == 0);
290 
291     CollectEvacuationCandidates(heap()->old_space());
292 
293     if (FLAG_compact_code_space) {
294       CollectEvacuationCandidates(heap()->code_space());
295     } else if (FLAG_trace_fragmentation) {
296       TraceFragmentation(heap()->code_space());
297     }
298 
299     if (FLAG_trace_fragmentation) {
300       TraceFragmentation(heap()->map_space());
301     }
302 
303     heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
304     heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
305 
306     compacting_ = evacuation_candidates_.length() > 0;
307   }
308 
309   return compacting_;
310 }
311 
312 
ClearInvalidStoreAndSlotsBufferEntries()313 void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
314   {
315     GCTracer::Scope gc_scope(heap()->tracer(),
316                              GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
317     heap_->store_buffer()->ClearInvalidStoreBufferEntries();
318   }
319 
320   {
321     GCTracer::Scope gc_scope(heap()->tracer(),
322                              GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
323     int number_of_pages = evacuation_candidates_.length();
324     for (int i = 0; i < number_of_pages; i++) {
325       Page* p = evacuation_candidates_[i];
326       SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
327     }
328   }
329 #ifdef VERIFY_HEAP
330   if (FLAG_verify_heap) {
331     VerifyValidStoreAndSlotsBufferEntries();
332   }
333 #endif
334 }
335 
336 
337 #ifdef VERIFY_HEAP
VerifyValidSlotsBufferEntries(Heap * heap,PagedSpace * space)338 static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) {
339   PageIterator it(space);
340   while (it.has_next()) {
341     Page* p = it.next();
342     SlotsBuffer::VerifySlots(heap, p->slots_buffer());
343   }
344 }
345 
346 
VerifyValidStoreAndSlotsBufferEntries()347 void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() {
348   heap()->store_buffer()->VerifyValidStoreBufferEntries();
349 
350   VerifyValidSlotsBufferEntries(heap(), heap()->old_space());
351   VerifyValidSlotsBufferEntries(heap(), heap()->code_space());
352   VerifyValidSlotsBufferEntries(heap(), heap()->map_space());
353 
354   LargeObjectIterator it(heap()->lo_space());
355   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
356     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
357     SlotsBuffer::VerifySlots(heap(), chunk->slots_buffer());
358   }
359 }
360 #endif
361 
362 
CollectGarbage()363 void MarkCompactCollector::CollectGarbage() {
364   // Make sure that Prepare() has been called. The individual steps below will
365   // update the state as they proceed.
366   DCHECK(state_ == PREPARE_GC);
367 
368   MarkLiveObjects();
369 
370   DCHECK(heap_->incremental_marking()->IsStopped());
371 
372   ClearNonLiveReferences();
373 
374 #ifdef VERIFY_HEAP
375   if (FLAG_verify_heap) {
376     VerifyMarking(heap_);
377   }
378 #endif
379 
380   SweepSpaces();
381 
382   EvacuateNewSpaceAndCandidates();
383 
384   Finish();
385 }
386 
387 
388 #ifdef VERIFY_HEAP
VerifyMarkbitsAreClean(PagedSpace * space)389 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
390   PageIterator it(space);
391 
392   while (it.has_next()) {
393     Page* p = it.next();
394     CHECK(p->markbits()->IsClean());
395     CHECK_EQ(0, p->LiveBytes());
396   }
397 }
398 
399 
VerifyMarkbitsAreClean(NewSpace * space)400 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
401   NewSpacePageIterator it(space->bottom(), space->top());
402 
403   while (it.has_next()) {
404     NewSpacePage* p = it.next();
405     CHECK(p->markbits()->IsClean());
406     CHECK_EQ(0, p->LiveBytes());
407   }
408 }
409 
410 
VerifyMarkbitsAreClean()411 void MarkCompactCollector::VerifyMarkbitsAreClean() {
412   VerifyMarkbitsAreClean(heap_->old_space());
413   VerifyMarkbitsAreClean(heap_->code_space());
414   VerifyMarkbitsAreClean(heap_->map_space());
415   VerifyMarkbitsAreClean(heap_->new_space());
416 
417   LargeObjectIterator it(heap_->lo_space());
418   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
419     MarkBit mark_bit = Marking::MarkBitFrom(obj);
420     CHECK(Marking::IsWhite(mark_bit));
421     CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
422   }
423 }
424 
425 
VerifyWeakEmbeddedObjectsInCode()426 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
427   HeapObjectIterator code_iterator(heap()->code_space());
428   for (HeapObject* obj = code_iterator.Next(); obj != NULL;
429        obj = code_iterator.Next()) {
430     Code* code = Code::cast(obj);
431     if (!code->is_optimized_code()) continue;
432     if (WillBeDeoptimized(code)) continue;
433     code->VerifyEmbeddedObjectsDependency();
434   }
435 }
436 
437 
VerifyOmittedMapChecks()438 void MarkCompactCollector::VerifyOmittedMapChecks() {
439   HeapObjectIterator iterator(heap()->map_space());
440   for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
441     Map* map = Map::cast(obj);
442     map->VerifyOmittedMapChecks();
443   }
444 }
445 #endif  // VERIFY_HEAP
446 
447 
ClearMarkbitsInPagedSpace(PagedSpace * space)448 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
449   PageIterator it(space);
450 
451   while (it.has_next()) {
452     Bitmap::Clear(it.next());
453   }
454 }
455 
456 
ClearMarkbitsInNewSpace(NewSpace * space)457 static void ClearMarkbitsInNewSpace(NewSpace* space) {
458   NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
459 
460   while (it.has_next()) {
461     Bitmap::Clear(it.next());
462   }
463 }
464 
465 
ClearMarkbits()466 void MarkCompactCollector::ClearMarkbits() {
467   ClearMarkbitsInPagedSpace(heap_->code_space());
468   ClearMarkbitsInPagedSpace(heap_->map_space());
469   ClearMarkbitsInPagedSpace(heap_->old_space());
470   ClearMarkbitsInNewSpace(heap_->new_space());
471 
472   LargeObjectIterator it(heap_->lo_space());
473   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
474     Marking::MarkWhite(Marking::MarkBitFrom(obj));
475     Page::FromAddress(obj->address())->ResetProgressBar();
476     Page::FromAddress(obj->address())->ResetLiveBytes();
477   }
478 }
479 
480 
481 class MarkCompactCollector::CompactionTask : public CancelableTask {
482  public:
CompactionTask(Heap * heap,CompactionSpaceCollection * spaces)483   explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
484       : CancelableTask(heap->isolate()), spaces_(spaces) {}
485 
~CompactionTask()486   virtual ~CompactionTask() {}
487 
488  private:
489   // v8::internal::CancelableTask overrides.
RunInternal()490   void RunInternal() override {
491     MarkCompactCollector* mark_compact =
492         isolate()->heap()->mark_compact_collector();
493     SlotsBuffer* evacuation_slots_buffer = nullptr;
494     mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
495     mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
496     mark_compact->pending_compaction_tasks_semaphore_.Signal();
497   }
498 
499   CompactionSpaceCollection* spaces_;
500 
501   DISALLOW_COPY_AND_ASSIGN(CompactionTask);
502 };
503 
504 
505 class MarkCompactCollector::SweeperTask : public v8::Task {
506  public:
SweeperTask(Heap * heap,PagedSpace * space)507   SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
508 
~SweeperTask()509   virtual ~SweeperTask() {}
510 
511  private:
512   // v8::Task overrides.
Run()513   void Run() override {
514     heap_->mark_compact_collector()->SweepInParallel(space_, 0);
515     heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
516   }
517 
518   Heap* heap_;
519   PagedSpace* space_;
520 
521   DISALLOW_COPY_AND_ASSIGN(SweeperTask);
522 };
523 
524 
StartSweeperThreads()525 void MarkCompactCollector::StartSweeperThreads() {
526   DCHECK(free_list_old_space_.get()->IsEmpty());
527   DCHECK(free_list_code_space_.get()->IsEmpty());
528   DCHECK(free_list_map_space_.get()->IsEmpty());
529   V8::GetCurrentPlatform()->CallOnBackgroundThread(
530       new SweeperTask(heap(), heap()->old_space()),
531       v8::Platform::kShortRunningTask);
532   V8::GetCurrentPlatform()->CallOnBackgroundThread(
533       new SweeperTask(heap(), heap()->code_space()),
534       v8::Platform::kShortRunningTask);
535   V8::GetCurrentPlatform()->CallOnBackgroundThread(
536       new SweeperTask(heap(), heap()->map_space()),
537       v8::Platform::kShortRunningTask);
538 }
539 
540 
SweepOrWaitUntilSweepingCompleted(Page * page)541 void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
542   PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
543   if (!page->SweepingCompleted()) {
544     SweepInParallel(page, owner);
545     if (!page->SweepingCompleted()) {
546       // We were not able to sweep that page, i.e., a concurrent
547       // sweeper thread currently owns this page. Wait for the sweeper
548       // thread to be done with this page.
549       page->WaitUntilSweepingCompleted();
550     }
551   }
552 }
553 
554 
SweepAndRefill(CompactionSpace * space)555 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
556   if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) {
557     SweepInParallel(heap()->paged_space(space->identity()), 0);
558     space->RefillFreeList();
559   }
560 }
561 
562 
EnsureSweepingCompleted()563 void MarkCompactCollector::EnsureSweepingCompleted() {
564   DCHECK(sweeping_in_progress_ == true);
565 
566   // If sweeping is not completed or not running at all, we try to complete it
567   // here.
568   if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
569     SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
570     SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
571     SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
572   }
573 
574   if (FLAG_concurrent_sweeping) {
575     pending_sweeper_tasks_semaphore_.Wait();
576     pending_sweeper_tasks_semaphore_.Wait();
577     pending_sweeper_tasks_semaphore_.Wait();
578   }
579 
580   ParallelSweepSpacesComplete();
581   sweeping_in_progress_ = false;
582   heap()->old_space()->RefillFreeList();
583   heap()->code_space()->RefillFreeList();
584   heap()->map_space()->RefillFreeList();
585 
586 #ifdef VERIFY_HEAP
587   if (FLAG_verify_heap && !evacuation()) {
588     VerifyEvacuation(heap_);
589   }
590 #endif
591 }
592 
593 
IsSweepingCompleted()594 bool MarkCompactCollector::IsSweepingCompleted() {
595   if (!pending_sweeper_tasks_semaphore_.WaitFor(
596           base::TimeDelta::FromSeconds(0))) {
597     return false;
598   }
599   pending_sweeper_tasks_semaphore_.Signal();
600   return true;
601 }
602 
603 
TransferMark(Heap * heap,Address old_start,Address new_start)604 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
605   // This is only used when resizing an object.
606   DCHECK(MemoryChunk::FromAddress(old_start) ==
607          MemoryChunk::FromAddress(new_start));
608 
609   if (!heap->incremental_marking()->IsMarking()) return;
610 
611   // If the mark doesn't move, we don't check the color of the object.
612   // It doesn't matter whether the object is black, since it hasn't changed
613   // size, so the adjustment to the live data count will be zero anyway.
614   if (old_start == new_start) return;
615 
616   MarkBit new_mark_bit = MarkBitFrom(new_start);
617   MarkBit old_mark_bit = MarkBitFrom(old_start);
618 
619 #ifdef DEBUG
620   ObjectColor old_color = Color(old_mark_bit);
621 #endif
622 
623   if (Marking::IsBlack(old_mark_bit)) {
624     Marking::BlackToWhite(old_mark_bit);
625     Marking::MarkBlack(new_mark_bit);
626     return;
627   } else if (Marking::IsGrey(old_mark_bit)) {
628     Marking::GreyToWhite(old_mark_bit);
629     heap->incremental_marking()->WhiteToGreyAndPush(
630         HeapObject::FromAddress(new_start), new_mark_bit);
631     heap->incremental_marking()->RestartIfNotMarking();
632   }
633 
634 #ifdef DEBUG
635   ObjectColor new_color = Color(new_mark_bit);
636   DCHECK(new_color == old_color);
637 #endif
638 }
639 
640 
AllocationSpaceName(AllocationSpace space)641 const char* AllocationSpaceName(AllocationSpace space) {
642   switch (space) {
643     case NEW_SPACE:
644       return "NEW_SPACE";
645     case OLD_SPACE:
646       return "OLD_SPACE";
647     case CODE_SPACE:
648       return "CODE_SPACE";
649     case MAP_SPACE:
650       return "MAP_SPACE";
651     case LO_SPACE:
652       return "LO_SPACE";
653     default:
654       UNREACHABLE();
655   }
656 
657   return NULL;
658 }
659 
660 
ComputeEvacuationHeuristics(int area_size,int * target_fragmentation_percent,int * max_evacuated_bytes)661 void MarkCompactCollector::ComputeEvacuationHeuristics(
662     int area_size, int* target_fragmentation_percent,
663     int* max_evacuated_bytes) {
664   // For memory reducing mode we directly define both constants.
665   const int kTargetFragmentationPercentForReduceMemory = 20;
666   const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
667 
668   // For regular mode (which is latency critical) we define less aggressive
669   // defaults to start and switch to a trace-based (using compaction speed)
670   // approach as soon as we have enough samples.
671   const int kTargetFragmentationPercent = 70;
672   const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
673   // Time to take for a single area (=payload of page). Used as soon as there
674   // exist enough compaction speed samples.
675   const int kTargetMsPerArea = 1;
676 
677   if (heap()->ShouldReduceMemory()) {
678     *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
679     *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
680   } else {
681     const intptr_t estimated_compaction_speed =
682         heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
683     if (estimated_compaction_speed != 0) {
684       // Estimate the target fragmentation based on traced compaction speed
685       // and a goal for a single page.
686       const intptr_t estimated_ms_per_area =
687           1 + static_cast<intptr_t>(area_size) / estimated_compaction_speed;
688       *target_fragmentation_percent =
689           100 - 100 * kTargetMsPerArea / estimated_ms_per_area;
690       if (*target_fragmentation_percent <
691           kTargetFragmentationPercentForReduceMemory) {
692         *target_fragmentation_percent =
693             kTargetFragmentationPercentForReduceMemory;
694       }
695     } else {
696       *target_fragmentation_percent = kTargetFragmentationPercent;
697     }
698     *max_evacuated_bytes = kMaxEvacuatedBytes;
699   }
700 }
701 
702 
CollectEvacuationCandidates(PagedSpace * space)703 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
704   DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
705 
706   int number_of_pages = space->CountTotalPages();
707   int area_size = space->AreaSize();
708 
709   // Pairs of (live_bytes_in_page, page).
710   typedef std::pair<int, Page*> LiveBytesPagePair;
711   std::vector<LiveBytesPagePair> pages;
712   pages.reserve(number_of_pages);
713 
714   PageIterator it(space);
715   while (it.has_next()) {
716     Page* p = it.next();
717     if (p->NeverEvacuate()) continue;
718     if (p->IsFlagSet(Page::POPULAR_PAGE)) {
719       // This page had slots buffer overflow on previous GC, skip it.
720       p->ClearFlag(Page::POPULAR_PAGE);
721       continue;
722     }
723     // Invariant: Evacuation candidates are just created when marking is
724     // started. At the end of a GC all evacuation candidates are cleared and
725     // their slot buffers are released.
726     CHECK(!p->IsEvacuationCandidate());
727     CHECK(p->slots_buffer() == NULL);
728     DCHECK(p->area_size() == area_size);
729     int live_bytes =
730         p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes();
731     pages.push_back(std::make_pair(live_bytes, p));
732   }
733 
734   int candidate_count = 0;
735   int total_live_bytes = 0;
736 
737   const bool reduce_memory = heap()->ShouldReduceMemory();
738   if (FLAG_manual_evacuation_candidates_selection) {
739     for (size_t i = 0; i < pages.size(); i++) {
740       Page* p = pages[i].second;
741       if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
742         candidate_count++;
743         total_live_bytes += pages[i].first;
744         p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
745         AddEvacuationCandidate(p);
746       }
747     }
748   } else if (FLAG_stress_compaction) {
749     for (size_t i = 0; i < pages.size(); i++) {
750       Page* p = pages[i].second;
751       if (i % 2 == 0) {
752         candidate_count++;
753         total_live_bytes += pages[i].first;
754         AddEvacuationCandidate(p);
755       }
756     }
757   } else {
758     // The following approach determines the pages that should be evacuated.
759     //
760     // We use two conditions to decide whether a page qualifies as an evacuation
761     // candidate, or not:
762     // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
763     //   between live bytes and capacity of this page (= area).
764     // * Evacuation quota: A global quota determining how much bytes should be
765     //   compacted.
766     //
767     // The algorithm sorts all pages by live bytes and then iterates through
768     // them starting with the page with the most free memory, adding them to the
769     // set of evacuation candidates as long as both conditions (fragmentation
770     // and quota) hold.
771     int max_evacuated_bytes;
772     int target_fragmentation_percent;
773     ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
774                                 &max_evacuated_bytes);
775 
776     const intptr_t free_bytes_threshold =
777         target_fragmentation_percent * (area_size / 100);
778 
779     // Sort pages from the most free to the least free, then select
780     // the first n pages for evacuation such that:
781     // - the total size of evacuated objects does not exceed the specified
782     // limit.
783     // - fragmentation of (n+1)-th page does not exceed the specified limit.
784     std::sort(pages.begin(), pages.end(),
785               [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
786                 return a.first < b.first;
787               });
788     for (size_t i = 0; i < pages.size(); i++) {
789       int live_bytes = pages[i].first;
790       int free_bytes = area_size - live_bytes;
791       if (FLAG_always_compact ||
792           ((free_bytes >= free_bytes_threshold) &&
793            ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
794         candidate_count++;
795         total_live_bytes += live_bytes;
796       }
797       if (FLAG_trace_fragmentation_verbose) {
798         PrintIsolate(isolate(),
799                      "compaction-selection-page: space=%s free_bytes_page=%d "
800                      "fragmentation_limit_kb=%d fragmentation_limit_percent=%d "
801                      "sum_compaction_kb=%d "
802                      "compaction_limit_kb=%d\n",
803                      AllocationSpaceName(space->identity()), free_bytes / KB,
804                      free_bytes_threshold / KB, target_fragmentation_percent,
805                      total_live_bytes / KB, max_evacuated_bytes / KB);
806       }
807     }
808     // How many pages we will allocated for the evacuated objects
809     // in the worst case: ceil(total_live_bytes / area_size)
810     int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
811     DCHECK_LE(estimated_new_pages, candidate_count);
812     int estimated_released_pages = candidate_count - estimated_new_pages;
813     // Avoid (compact -> expand) cycles.
814     if ((estimated_released_pages == 0) && !FLAG_always_compact) {
815       candidate_count = 0;
816     }
817     for (int i = 0; i < candidate_count; i++) {
818       AddEvacuationCandidate(pages[i].second);
819     }
820   }
821 
822   if (FLAG_trace_fragmentation) {
823     PrintIsolate(isolate(),
824                  "compaction-selection: space=%s reduce_memory=%d pages=%d "
825                  "total_live_bytes=%d\n",
826                  AllocationSpaceName(space->identity()), reduce_memory,
827                  candidate_count, total_live_bytes / KB);
828   }
829 }
830 
831 
AbortCompaction()832 void MarkCompactCollector::AbortCompaction() {
833   if (compacting_) {
834     int npages = evacuation_candidates_.length();
835     for (int i = 0; i < npages; i++) {
836       Page* p = evacuation_candidates_[i];
837       slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
838       p->ClearEvacuationCandidate();
839       p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
840     }
841     compacting_ = false;
842     evacuation_candidates_.Rewind(0);
843   }
844   DCHECK_EQ(0, evacuation_candidates_.length());
845 }
846 
847 
Prepare()848 void MarkCompactCollector::Prepare() {
849   was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
850 
851 #ifdef DEBUG
852   DCHECK(state_ == IDLE);
853   state_ = PREPARE_GC;
854 #endif
855 
856   DCHECK(!FLAG_never_compact || !FLAG_always_compact);
857 
858   if (sweeping_in_progress()) {
859     // Instead of waiting we could also abort the sweeper threads here.
860     EnsureSweepingCompleted();
861   }
862 
863   // If concurrent unmapping tasks are still running, we should wait for
864   // them here.
865   heap()->WaitUntilUnmappingOfFreeChunksCompleted();
866 
867   // Clear marking bits if incremental marking is aborted.
868   if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
869     heap()->incremental_marking()->Stop();
870     ClearMarkbits();
871     AbortWeakCollections();
872     AbortWeakCells();
873     AbortTransitionArrays();
874     AbortCompaction();
875     was_marked_incrementally_ = false;
876   }
877 
878   // Don't start compaction if we are in the middle of incremental
879   // marking cycle. We did not collect any slots.
880   if (!FLAG_never_compact && !was_marked_incrementally_) {
881     StartCompaction(NON_INCREMENTAL_COMPACTION);
882   }
883 
884   PagedSpaces spaces(heap());
885   for (PagedSpace* space = spaces.next(); space != NULL;
886        space = spaces.next()) {
887     space->PrepareForMarkCompact();
888   }
889 
890 #ifdef VERIFY_HEAP
891   if (!was_marked_incrementally_ && FLAG_verify_heap) {
892     VerifyMarkbitsAreClean();
893   }
894 #endif
895 }
896 
897 
Finish()898 void MarkCompactCollector::Finish() {
899   GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_FINISH);
900 
901   // The hashing of weak_object_to_code_table is no longer valid.
902   heap()->weak_object_to_code_table()->Rehash(
903       heap()->isolate()->factory()->undefined_value());
904 
905   // Clear the marking state of live large objects.
906   heap_->lo_space()->ClearMarkingStateOfLiveObjects();
907 
908 #ifdef DEBUG
909   DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
910   state_ = IDLE;
911 #endif
912   heap_->isolate()->inner_pointer_to_code_cache()->Flush();
913 
914   // The stub cache is not traversed during GC; clear the cache to
915   // force lazy re-initialization of it. This must be done after the
916   // GC, because it relies on the new address of certain old space
917   // objects (empty string, illegal builtin).
918   isolate()->stub_cache()->Clear();
919 
920   if (have_code_to_deoptimize_) {
921     // Some code objects were marked for deoptimization during the GC.
922     Deoptimizer::DeoptimizeMarkedCode(isolate());
923     have_code_to_deoptimize_ = false;
924   }
925 
926   heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
927 
928   if (marking_parity_ == EVEN_MARKING_PARITY) {
929     marking_parity_ = ODD_MARKING_PARITY;
930   } else {
931     DCHECK(marking_parity_ == ODD_MARKING_PARITY);
932     marking_parity_ = EVEN_MARKING_PARITY;
933   }
934 }
935 
936 
937 // -------------------------------------------------------------------------
938 // Phase 1: tracing and marking live objects.
939 //   before: all objects are in normal state.
940 //   after: a live object's map pointer is marked as '00'.
941 
942 // Marking all live objects in the heap as part of mark-sweep or mark-compact
943 // collection.  Before marking, all objects are in their normal state.  After
944 // marking, live objects' map pointers are marked indicating that the object
945 // has been found reachable.
946 //
947 // The marking algorithm is a (mostly) depth-first (because of possible stack
948 // overflow) traversal of the graph of objects reachable from the roots.  It
949 // uses an explicit stack of pointers rather than recursion.  The young
950 // generation's inactive ('from') space is used as a marking stack.  The
951 // objects in the marking stack are the ones that have been reached and marked
952 // but their children have not yet been visited.
953 //
954 // The marking stack can overflow during traversal.  In that case, we set an
955 // overflow flag.  When the overflow flag is set, we continue marking objects
956 // reachable from the objects on the marking stack, but no longer push them on
957 // the marking stack.  Instead, we mark them as both marked and overflowed.
958 // When the stack is in the overflowed state, objects marked as overflowed
959 // have been reached and marked but their children have not been visited yet.
960 // After emptying the marking stack, we clear the overflow flag and traverse
961 // the heap looking for objects marked as overflowed, push them on the stack,
962 // and continue with marking.  This process repeats until all reachable
963 // objects have been marked.
964 
ProcessJSFunctionCandidates()965 void CodeFlusher::ProcessJSFunctionCandidates() {
966   Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
967   Object* undefined = isolate_->heap()->undefined_value();
968 
969   JSFunction* candidate = jsfunction_candidates_head_;
970   JSFunction* next_candidate;
971   while (candidate != NULL) {
972     next_candidate = GetNextCandidate(candidate);
973     ClearNextCandidate(candidate, undefined);
974 
975     SharedFunctionInfo* shared = candidate->shared();
976 
977     Code* code = shared->code();
978     MarkBit code_mark = Marking::MarkBitFrom(code);
979     if (Marking::IsWhite(code_mark)) {
980       if (FLAG_trace_code_flushing && shared->is_compiled()) {
981         PrintF("[code-flushing clears: ");
982         shared->ShortPrint();
983         PrintF(" - age: %d]\n", code->GetAge());
984       }
985       // Always flush the optimized code map if there is one.
986       if (!shared->OptimizedCodeMapIsCleared()) {
987         shared->ClearOptimizedCodeMap();
988       }
989       shared->set_code(lazy_compile);
990       candidate->set_code(lazy_compile);
991     } else {
992       DCHECK(Marking::IsBlack(code_mark));
993       candidate->set_code(code);
994     }
995 
996     // We are in the middle of a GC cycle so the write barrier in the code
997     // setter did not record the slot update and we have to do that manually.
998     Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
999     Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
1000     isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
1001         candidate, slot, target);
1002 
1003     Object** shared_code_slot =
1004         HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1005     isolate_->heap()->mark_compact_collector()->RecordSlot(
1006         shared, shared_code_slot, *shared_code_slot);
1007 
1008     candidate = next_candidate;
1009   }
1010 
1011   jsfunction_candidates_head_ = NULL;
1012 }
1013 
1014 
ProcessSharedFunctionInfoCandidates()1015 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1016   Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
1017 
1018   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1019   SharedFunctionInfo* next_candidate;
1020   while (candidate != NULL) {
1021     next_candidate = GetNextCandidate(candidate);
1022     ClearNextCandidate(candidate);
1023 
1024     Code* code = candidate->code();
1025     MarkBit code_mark = Marking::MarkBitFrom(code);
1026     if (Marking::IsWhite(code_mark)) {
1027       if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1028         PrintF("[code-flushing clears: ");
1029         candidate->ShortPrint();
1030         PrintF(" - age: %d]\n", code->GetAge());
1031       }
1032       // Always flush the optimized code map if there is one.
1033       if (!candidate->OptimizedCodeMapIsCleared()) {
1034         candidate->ClearOptimizedCodeMap();
1035       }
1036       candidate->set_code(lazy_compile);
1037     }
1038 
1039     Object** code_slot =
1040         HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1041     isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
1042                                                            *code_slot);
1043 
1044     candidate = next_candidate;
1045   }
1046 
1047   shared_function_info_candidates_head_ = NULL;
1048 }
1049 
1050 
EvictCandidate(SharedFunctionInfo * shared_info)1051 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1052   // Make sure previous flushing decisions are revisited.
1053   isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1054 
1055   if (FLAG_trace_code_flushing) {
1056     PrintF("[code-flushing abandons function-info: ");
1057     shared_info->ShortPrint();
1058     PrintF("]\n");
1059   }
1060 
1061   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1062   SharedFunctionInfo* next_candidate;
1063   if (candidate == shared_info) {
1064     next_candidate = GetNextCandidate(shared_info);
1065     shared_function_info_candidates_head_ = next_candidate;
1066     ClearNextCandidate(shared_info);
1067   } else {
1068     while (candidate != NULL) {
1069       next_candidate = GetNextCandidate(candidate);
1070 
1071       if (next_candidate == shared_info) {
1072         next_candidate = GetNextCandidate(shared_info);
1073         SetNextCandidate(candidate, next_candidate);
1074         ClearNextCandidate(shared_info);
1075         break;
1076       }
1077 
1078       candidate = next_candidate;
1079     }
1080   }
1081 }
1082 
1083 
EvictCandidate(JSFunction * function)1084 void CodeFlusher::EvictCandidate(JSFunction* function) {
1085   DCHECK(!function->next_function_link()->IsUndefined());
1086   Object* undefined = isolate_->heap()->undefined_value();
1087 
1088   // Make sure previous flushing decisions are revisited.
1089   isolate_->heap()->incremental_marking()->RecordWrites(function);
1090   isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1091 
1092   if (FLAG_trace_code_flushing) {
1093     PrintF("[code-flushing abandons closure: ");
1094     function->shared()->ShortPrint();
1095     PrintF("]\n");
1096   }
1097 
1098   JSFunction* candidate = jsfunction_candidates_head_;
1099   JSFunction* next_candidate;
1100   if (candidate == function) {
1101     next_candidate = GetNextCandidate(function);
1102     jsfunction_candidates_head_ = next_candidate;
1103     ClearNextCandidate(function, undefined);
1104   } else {
1105     while (candidate != NULL) {
1106       next_candidate = GetNextCandidate(candidate);
1107 
1108       if (next_candidate == function) {
1109         next_candidate = GetNextCandidate(function);
1110         SetNextCandidate(candidate, next_candidate);
1111         ClearNextCandidate(function, undefined);
1112         break;
1113       }
1114 
1115       candidate = next_candidate;
1116     }
1117   }
1118 }
1119 
1120 
IteratePointersToFromSpace(ObjectVisitor * v)1121 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1122   Heap* heap = isolate_->heap();
1123 
1124   JSFunction** slot = &jsfunction_candidates_head_;
1125   JSFunction* candidate = jsfunction_candidates_head_;
1126   while (candidate != NULL) {
1127     if (heap->InFromSpace(candidate)) {
1128       v->VisitPointer(reinterpret_cast<Object**>(slot));
1129     }
1130     candidate = GetNextCandidate(*slot);
1131     slot = GetNextCandidateSlot(*slot);
1132   }
1133 }
1134 
1135 
1136 class MarkCompactMarkingVisitor
1137     : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1138  public:
1139   static void Initialize();
1140 
INLINE(static void VisitPointer (Heap * heap,HeapObject * object,Object ** p))1141   INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
1142     MarkObjectByPointer(heap->mark_compact_collector(), object, p);
1143   }
1144 
INLINE(static void VisitPointers (Heap * heap,HeapObject * object,Object ** start,Object ** end))1145   INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
1146                                    Object** start, Object** end)) {
1147     // Mark all objects pointed to in [start, end).
1148     const int kMinRangeForMarkingRecursion = 64;
1149     if (end - start >= kMinRangeForMarkingRecursion) {
1150       if (VisitUnmarkedObjects(heap, object, start, end)) return;
1151       // We are close to a stack overflow, so just mark the objects.
1152     }
1153     MarkCompactCollector* collector = heap->mark_compact_collector();
1154     for (Object** p = start; p < end; p++) {
1155       MarkObjectByPointer(collector, object, p);
1156     }
1157   }
1158 
1159   // Marks the object black and pushes it on the marking stack.
INLINE(static void MarkObject (Heap * heap,HeapObject * object))1160   INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1161     MarkBit mark = Marking::MarkBitFrom(object);
1162     heap->mark_compact_collector()->MarkObject(object, mark);
1163   }
1164 
1165   // Marks the object black without pushing it on the marking stack.
1166   // Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush (Heap * heap,HeapObject * object))1167   INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1168     MarkBit mark_bit = Marking::MarkBitFrom(object);
1169     if (Marking::IsWhite(mark_bit)) {
1170       heap->mark_compact_collector()->SetMark(object, mark_bit);
1171       return true;
1172     }
1173     return false;
1174   }
1175 
1176   // Mark object pointed to by p.
INLINE(static void MarkObjectByPointer (MarkCompactCollector * collector,HeapObject * object,Object ** p))1177   INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1178                                          HeapObject* object, Object** p)) {
1179     if (!(*p)->IsHeapObject()) return;
1180     HeapObject* target_object = HeapObject::cast(*p);
1181     collector->RecordSlot(object, p, target_object);
1182     MarkBit mark = Marking::MarkBitFrom(target_object);
1183     collector->MarkObject(target_object, mark);
1184   }
1185 
1186 
1187   // Visit an unmarked object.
INLINE(static void VisitUnmarkedObject (MarkCompactCollector * collector,HeapObject * obj))1188   INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1189                                          HeapObject* obj)) {
1190 #ifdef DEBUG
1191     DCHECK(collector->heap()->Contains(obj));
1192     DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1193 #endif
1194     Map* map = obj->map();
1195     Heap* heap = obj->GetHeap();
1196     MarkBit mark = Marking::MarkBitFrom(obj);
1197     heap->mark_compact_collector()->SetMark(obj, mark);
1198     // Mark the map pointer and the body.
1199     MarkBit map_mark = Marking::MarkBitFrom(map);
1200     heap->mark_compact_collector()->MarkObject(map, map_mark);
1201     IterateBody(map, obj);
1202   }
1203 
1204   // Visit all unmarked objects pointed to by [start, end).
1205   // Returns false if the operation fails (lack of stack space).
INLINE(static bool VisitUnmarkedObjects (Heap * heap,HeapObject * object,Object ** start,Object ** end))1206   INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
1207                                           Object** start, Object** end)) {
1208     // Return false is we are close to the stack limit.
1209     StackLimitCheck check(heap->isolate());
1210     if (check.HasOverflowed()) return false;
1211 
1212     MarkCompactCollector* collector = heap->mark_compact_collector();
1213     // Visit the unmarked objects.
1214     for (Object** p = start; p < end; p++) {
1215       Object* o = *p;
1216       if (!o->IsHeapObject()) continue;
1217       collector->RecordSlot(object, p, o);
1218       HeapObject* obj = HeapObject::cast(o);
1219       MarkBit mark = Marking::MarkBitFrom(obj);
1220       if (Marking::IsBlackOrGrey(mark)) continue;
1221       VisitUnmarkedObject(collector, obj);
1222     }
1223     return true;
1224   }
1225 
1226  private:
1227   template <int id>
1228   static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1229 
1230   // Code flushing support.
1231 
1232   static const int kRegExpCodeThreshold = 5;
1233 
UpdateRegExpCodeAgeAndFlush(Heap * heap,JSRegExp * re,bool is_one_byte)1234   static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1235                                           bool is_one_byte) {
1236     // Make sure that the fixed array is in fact initialized on the RegExp.
1237     // We could potentially trigger a GC when initializing the RegExp.
1238     if (HeapObject::cast(re->data())->map()->instance_type() !=
1239         FIXED_ARRAY_TYPE)
1240       return;
1241 
1242     // Make sure this is a RegExp that actually contains code.
1243     if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1244 
1245     Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1246     if (!code->IsSmi() &&
1247         HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1248       // Save a copy that can be reinstated if we need the code again.
1249       re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1250 
1251       // Saving a copy might create a pointer into compaction candidate
1252       // that was not observed by marker.  This might happen if JSRegExp data
1253       // was marked through the compilation cache before marker reached JSRegExp
1254       // object.
1255       FixedArray* data = FixedArray::cast(re->data());
1256       Object** slot =
1257           data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1258       heap->mark_compact_collector()->RecordSlot(data, slot, code);
1259 
1260       // Set a number in the 0-255 range to guarantee no smi overflow.
1261       re->SetDataAt(JSRegExp::code_index(is_one_byte),
1262                     Smi::FromInt(heap->ms_count() & 0xff));
1263     } else if (code->IsSmi()) {
1264       int value = Smi::cast(code)->value();
1265       // The regexp has not been compiled yet or there was a compilation error.
1266       if (value == JSRegExp::kUninitializedValue ||
1267           value == JSRegExp::kCompilationErrorValue) {
1268         return;
1269       }
1270 
1271       // Check if we should flush now.
1272       if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
1273         re->SetDataAt(JSRegExp::code_index(is_one_byte),
1274                       Smi::FromInt(JSRegExp::kUninitializedValue));
1275         re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1276                       Smi::FromInt(JSRegExp::kUninitializedValue));
1277       }
1278     }
1279   }
1280 
1281 
1282   // Works by setting the current sweep_generation (as a smi) in the
1283   // code object place in the data array of the RegExp and keeps a copy
1284   // around that can be reinstated if we reuse the RegExp before flushing.
1285   // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1286   // we flush the code.
VisitRegExpAndFlushCode(Map * map,HeapObject * object)1287   static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1288     Heap* heap = map->GetHeap();
1289     MarkCompactCollector* collector = heap->mark_compact_collector();
1290     if (!collector->is_code_flushing_enabled()) {
1291       VisitJSRegExp(map, object);
1292       return;
1293     }
1294     JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1295     // Flush code or set age on both one byte and two byte code.
1296     UpdateRegExpCodeAgeAndFlush(heap, re, true);
1297     UpdateRegExpCodeAgeAndFlush(heap, re, false);
1298     // Visit the fields of the RegExp, including the updated FixedArray.
1299     VisitJSRegExp(map, object);
1300   }
1301 };
1302 
1303 
Initialize()1304 void MarkCompactMarkingVisitor::Initialize() {
1305   StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1306 
1307   table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1308 
1309   if (FLAG_track_gc_object_stats) {
1310     ObjectStatsVisitor::Initialize(&table_);
1311   }
1312 }
1313 
1314 
1315 class CodeMarkingVisitor : public ThreadVisitor {
1316  public:
CodeMarkingVisitor(MarkCompactCollector * collector)1317   explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1318       : collector_(collector) {}
1319 
VisitThread(Isolate * isolate,ThreadLocalTop * top)1320   void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1321     collector_->PrepareThreadForCodeFlushing(isolate, top);
1322   }
1323 
1324  private:
1325   MarkCompactCollector* collector_;
1326 };
1327 
1328 
1329 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1330  public:
SharedFunctionInfoMarkingVisitor(MarkCompactCollector * collector)1331   explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1332       : collector_(collector) {}
1333 
VisitPointers(Object ** start,Object ** end)1334   void VisitPointers(Object** start, Object** end) override {
1335     for (Object** p = start; p < end; p++) VisitPointer(p);
1336   }
1337 
VisitPointer(Object ** slot)1338   void VisitPointer(Object** slot) override {
1339     Object* obj = *slot;
1340     if (obj->IsSharedFunctionInfo()) {
1341       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1342       MarkBit shared_mark = Marking::MarkBitFrom(shared);
1343       MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1344       collector_->MarkObject(shared->code(), code_mark);
1345       collector_->MarkObject(shared, shared_mark);
1346     }
1347   }
1348 
1349  private:
1350   MarkCompactCollector* collector_;
1351 };
1352 
1353 
PrepareThreadForCodeFlushing(Isolate * isolate,ThreadLocalTop * top)1354 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1355                                                         ThreadLocalTop* top) {
1356   for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1357     // Note: for the frame that has a pending lazy deoptimization
1358     // StackFrame::unchecked_code will return a non-optimized code object for
1359     // the outermost function and StackFrame::LookupCode will return
1360     // actual optimized code object.
1361     StackFrame* frame = it.frame();
1362     Code* code = frame->unchecked_code();
1363     MarkBit code_mark = Marking::MarkBitFrom(code);
1364     MarkObject(code, code_mark);
1365     if (frame->is_optimized()) {
1366       Code* optimized_code = frame->LookupCode();
1367       MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code);
1368       MarkObject(optimized_code, optimized_code_mark);
1369     }
1370   }
1371 }
1372 
1373 
PrepareForCodeFlushing()1374 void MarkCompactCollector::PrepareForCodeFlushing() {
1375   // If code flushing is disabled, there is no need to prepare for it.
1376   if (!is_code_flushing_enabled()) return;
1377 
1378   // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1379   // relies on it being marked before any other descriptor array.
1380   HeapObject* descriptor_array = heap()->empty_descriptor_array();
1381   MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1382   MarkObject(descriptor_array, descriptor_array_mark);
1383 
1384   // Make sure we are not referencing the code from the stack.
1385   DCHECK(this == heap()->mark_compact_collector());
1386   PrepareThreadForCodeFlushing(heap()->isolate(),
1387                                heap()->isolate()->thread_local_top());
1388 
1389   // Iterate the archived stacks in all threads to check if
1390   // the code is referenced.
1391   CodeMarkingVisitor code_marking_visitor(this);
1392   heap()->isolate()->thread_manager()->IterateArchivedThreads(
1393       &code_marking_visitor);
1394 
1395   SharedFunctionInfoMarkingVisitor visitor(this);
1396   heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1397   heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1398 
1399   ProcessMarkingDeque();
1400 }
1401 
1402 
1403 // Visitor class for marking heap roots.
1404 class RootMarkingVisitor : public ObjectVisitor {
1405  public:
RootMarkingVisitor(Heap * heap)1406   explicit RootMarkingVisitor(Heap* heap)
1407       : collector_(heap->mark_compact_collector()) {}
1408 
VisitPointer(Object ** p)1409   void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
1410 
VisitPointers(Object ** start,Object ** end)1411   void VisitPointers(Object** start, Object** end) override {
1412     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1413   }
1414 
1415   // Skip the weak next code link in a code object, which is visited in
1416   // ProcessTopOptimizedFrame.
VisitNextCodeLink(Object ** p)1417   void VisitNextCodeLink(Object** p) override {}
1418 
1419  private:
MarkObjectByPointer(Object ** p)1420   void MarkObjectByPointer(Object** p) {
1421     if (!(*p)->IsHeapObject()) return;
1422 
1423     // Replace flat cons strings in place.
1424     HeapObject* object = HeapObject::cast(*p);
1425     MarkBit mark_bit = Marking::MarkBitFrom(object);
1426     if (Marking::IsBlackOrGrey(mark_bit)) return;
1427 
1428     Map* map = object->map();
1429     // Mark the object.
1430     collector_->SetMark(object, mark_bit);
1431 
1432     // Mark the map pointer and body, and push them on the marking stack.
1433     MarkBit map_mark = Marking::MarkBitFrom(map);
1434     collector_->MarkObject(map, map_mark);
1435     MarkCompactMarkingVisitor::IterateBody(map, object);
1436 
1437     // Mark all the objects reachable from the map and body.  May leave
1438     // overflowed objects in the heap.
1439     collector_->EmptyMarkingDeque();
1440   }
1441 
1442   MarkCompactCollector* collector_;
1443 };
1444 
1445 
1446 // Helper class for pruning the string table.
1447 template <bool finalize_external_strings>
1448 class StringTableCleaner : public ObjectVisitor {
1449  public:
StringTableCleaner(Heap * heap)1450   explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
1451 
VisitPointers(Object ** start,Object ** end)1452   void VisitPointers(Object** start, Object** end) override {
1453     // Visit all HeapObject pointers in [start, end).
1454     for (Object** p = start; p < end; p++) {
1455       Object* o = *p;
1456       if (o->IsHeapObject() &&
1457           Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
1458         if (finalize_external_strings) {
1459           DCHECK(o->IsExternalString());
1460           heap_->FinalizeExternalString(String::cast(*p));
1461         } else {
1462           pointers_removed_++;
1463         }
1464         // Set the entry to the_hole_value (as deleted).
1465         *p = heap_->the_hole_value();
1466       }
1467     }
1468   }
1469 
PointersRemoved()1470   int PointersRemoved() {
1471     DCHECK(!finalize_external_strings);
1472     return pointers_removed_;
1473   }
1474 
1475  private:
1476   Heap* heap_;
1477   int pointers_removed_;
1478 };
1479 
1480 
1481 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1482 typedef StringTableCleaner<true> ExternalStringTableCleaner;
1483 
1484 
1485 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1486 // are retained.
1487 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1488  public:
RetainAs(Object * object)1489   virtual Object* RetainAs(Object* object) {
1490     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object));
1491     DCHECK(!Marking::IsGrey(mark_bit));
1492     if (Marking::IsBlack(mark_bit)) {
1493       return object;
1494     } else if (object->IsAllocationSite() &&
1495                !(AllocationSite::cast(object)->IsZombie())) {
1496       // "dead" AllocationSites need to live long enough for a traversal of new
1497       // space. These sites get a one-time reprieve.
1498       AllocationSite* site = AllocationSite::cast(object);
1499       site->MarkZombie();
1500       site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1501       return object;
1502     } else {
1503       return NULL;
1504     }
1505   }
1506 };
1507 
1508 
1509 // Fill the marking stack with overflowed objects returned by the given
1510 // iterator.  Stop when the marking stack is filled or the end of the space
1511 // is reached, whichever comes first.
1512 template <class T>
DiscoverGreyObjectsWithIterator(T * it)1513 void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
1514   // The caller should ensure that the marking stack is initially not full,
1515   // so that we don't waste effort pointlessly scanning for objects.
1516   DCHECK(!marking_deque()->IsFull());
1517 
1518   Map* filler_map = heap()->one_pointer_filler_map();
1519   for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1520     MarkBit markbit = Marking::MarkBitFrom(object);
1521     if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1522       Marking::GreyToBlack(markbit);
1523       PushBlack(object);
1524       if (marking_deque()->IsFull()) return;
1525     }
1526   }
1527 }
1528 
1529 
DiscoverGreyObjectsOnPage(MemoryChunk * p)1530 void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
1531   DCHECK(!marking_deque()->IsFull());
1532   LiveObjectIterator<kGreyObjects> it(p);
1533   HeapObject* object = NULL;
1534   while ((object = it.Next()) != NULL) {
1535     MarkBit markbit = Marking::MarkBitFrom(object);
1536     DCHECK(Marking::IsGrey(markbit));
1537     Marking::GreyToBlack(markbit);
1538     PushBlack(object);
1539     if (marking_deque()->IsFull()) return;
1540   }
1541 }
1542 
1543 
1544 class MarkCompactCollector::HeapObjectVisitor {
1545  public:
~HeapObjectVisitor()1546   virtual ~HeapObjectVisitor() {}
1547   virtual bool Visit(HeapObject* object) = 0;
1548 };
1549 
1550 
1551 class MarkCompactCollector::EvacuateVisitorBase
1552     : public MarkCompactCollector::HeapObjectVisitor {
1553  public:
EvacuateVisitorBase(Heap * heap,SlotsBuffer ** evacuation_slots_buffer)1554   EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
1555       : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
1556 
TryEvacuateObject(PagedSpace * target_space,HeapObject * object,HeapObject ** target_object)1557   bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1558                          HeapObject** target_object) {
1559     int size = object->Size();
1560     AllocationAlignment alignment = object->RequiredAlignment();
1561     AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1562     if (allocation.To(target_object)) {
1563       heap_->mark_compact_collector()->MigrateObject(
1564           *target_object, object, size, target_space->identity(),
1565           evacuation_slots_buffer_);
1566       return true;
1567     }
1568     return false;
1569   }
1570 
1571  protected:
1572   Heap* heap_;
1573   SlotsBuffer** evacuation_slots_buffer_;
1574 };
1575 
1576 
1577 class MarkCompactCollector::EvacuateNewSpaceVisitor final
1578     : public MarkCompactCollector::EvacuateVisitorBase {
1579  public:
1580   static const intptr_t kLabSize = 4 * KB;
1581   static const intptr_t kMaxLabObjectSize = 256;
1582 
EvacuateNewSpaceVisitor(Heap * heap,SlotsBuffer ** evacuation_slots_buffer,HashMap * local_pretenuring_feedback)1583   explicit EvacuateNewSpaceVisitor(Heap* heap,
1584                                    SlotsBuffer** evacuation_slots_buffer,
1585                                    HashMap* local_pretenuring_feedback)
1586       : EvacuateVisitorBase(heap, evacuation_slots_buffer),
1587         buffer_(LocalAllocationBuffer::InvalidBuffer()),
1588         space_to_allocate_(NEW_SPACE),
1589         promoted_size_(0),
1590         semispace_copied_size_(0),
1591         local_pretenuring_feedback_(local_pretenuring_feedback) {}
1592 
Visit(HeapObject * object)1593   bool Visit(HeapObject* object) override {
1594     heap_->UpdateAllocationSite(object, local_pretenuring_feedback_);
1595     int size = object->Size();
1596     HeapObject* target_object = nullptr;
1597     if (heap_->ShouldBePromoted(object->address(), size) &&
1598         TryEvacuateObject(heap_->old_space(), object, &target_object)) {
1599       // If we end up needing more special cases, we should factor this out.
1600       if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1601         heap_->array_buffer_tracker()->Promote(
1602             JSArrayBuffer::cast(target_object));
1603       }
1604       promoted_size_ += size;
1605       return true;
1606     }
1607     HeapObject* target = nullptr;
1608     AllocationSpace space = AllocateTargetObject(object, &target);
1609     heap_->mark_compact_collector()->MigrateObject(
1610         HeapObject::cast(target), object, size, space,
1611         (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
1612     if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1613       heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1614     }
1615     semispace_copied_size_ += size;
1616     return true;
1617   }
1618 
promoted_size()1619   intptr_t promoted_size() { return promoted_size_; }
semispace_copied_size()1620   intptr_t semispace_copied_size() { return semispace_copied_size_; }
1621 
1622  private:
1623   enum NewSpaceAllocationMode {
1624     kNonstickyBailoutOldSpace,
1625     kStickyBailoutOldSpace,
1626   };
1627 
AllocateTargetObject(HeapObject * old_object,HeapObject ** target_object)1628   inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
1629                                               HeapObject** target_object) {
1630     const int size = old_object->Size();
1631     AllocationAlignment alignment = old_object->RequiredAlignment();
1632     AllocationResult allocation;
1633     if (space_to_allocate_ == NEW_SPACE) {
1634       if (size > kMaxLabObjectSize) {
1635         allocation =
1636             AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
1637       } else {
1638         allocation = AllocateInLab(size, alignment);
1639       }
1640     }
1641     if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
1642       allocation = AllocateInOldSpace(size, alignment);
1643     }
1644     bool ok = allocation.To(target_object);
1645     DCHECK(ok);
1646     USE(ok);
1647     return space_to_allocate_;
1648   }
1649 
NewLocalAllocationBuffer()1650   inline bool NewLocalAllocationBuffer() {
1651     AllocationResult result =
1652         AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
1653     LocalAllocationBuffer saved_old_buffer = buffer_;
1654     buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
1655     if (buffer_.IsValid()) {
1656       buffer_.TryMerge(&saved_old_buffer);
1657       return true;
1658     }
1659     return false;
1660   }
1661 
AllocateInNewSpace(int size_in_bytes,AllocationAlignment alignment,NewSpaceAllocationMode mode)1662   inline AllocationResult AllocateInNewSpace(int size_in_bytes,
1663                                              AllocationAlignment alignment,
1664                                              NewSpaceAllocationMode mode) {
1665     AllocationResult allocation =
1666         heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
1667     if (allocation.IsRetry()) {
1668       if (!heap_->new_space()->AddFreshPageSynchronized()) {
1669         if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1670       } else {
1671         allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
1672                                                                  alignment);
1673         if (allocation.IsRetry()) {
1674           if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1675         }
1676       }
1677     }
1678     return allocation;
1679   }
1680 
AllocateInOldSpace(int size_in_bytes,AllocationAlignment alignment)1681   inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1682                                              AllocationAlignment alignment) {
1683     AllocationResult allocation =
1684         heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
1685     if (allocation.IsRetry()) {
1686       FatalProcessOutOfMemory(
1687           "MarkCompactCollector: semi-space copy, fallback in old gen\n");
1688     }
1689     return allocation;
1690   }
1691 
AllocateInLab(int size_in_bytes,AllocationAlignment alignment)1692   inline AllocationResult AllocateInLab(int size_in_bytes,
1693                                         AllocationAlignment alignment) {
1694     AllocationResult allocation;
1695     if (!buffer_.IsValid()) {
1696       if (!NewLocalAllocationBuffer()) {
1697         space_to_allocate_ = OLD_SPACE;
1698         return AllocationResult::Retry(OLD_SPACE);
1699       }
1700     }
1701     allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1702     if (allocation.IsRetry()) {
1703       if (!NewLocalAllocationBuffer()) {
1704         space_to_allocate_ = OLD_SPACE;
1705         return AllocationResult::Retry(OLD_SPACE);
1706       } else {
1707         allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1708         if (allocation.IsRetry()) {
1709           space_to_allocate_ = OLD_SPACE;
1710           return AllocationResult::Retry(OLD_SPACE);
1711         }
1712       }
1713     }
1714     return allocation;
1715   }
1716 
1717   LocalAllocationBuffer buffer_;
1718   AllocationSpace space_to_allocate_;
1719   intptr_t promoted_size_;
1720   intptr_t semispace_copied_size_;
1721   HashMap* local_pretenuring_feedback_;
1722 };
1723 
1724 
1725 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1726     : public MarkCompactCollector::EvacuateVisitorBase {
1727  public:
EvacuateOldSpaceVisitor(Heap * heap,CompactionSpaceCollection * compaction_spaces,SlotsBuffer ** evacuation_slots_buffer)1728   EvacuateOldSpaceVisitor(Heap* heap,
1729                           CompactionSpaceCollection* compaction_spaces,
1730                           SlotsBuffer** evacuation_slots_buffer)
1731       : EvacuateVisitorBase(heap, evacuation_slots_buffer),
1732         compaction_spaces_(compaction_spaces) {}
1733 
Visit(HeapObject * object)1734   bool Visit(HeapObject* object) override {
1735     CompactionSpace* target_space = compaction_spaces_->Get(
1736         Page::FromAddress(object->address())->owner()->identity());
1737     HeapObject* target_object = nullptr;
1738     if (TryEvacuateObject(target_space, object, &target_object)) {
1739       DCHECK(object->map_word().IsForwardingAddress());
1740       return true;
1741     }
1742     return false;
1743   }
1744 
1745  private:
1746   CompactionSpaceCollection* compaction_spaces_;
1747 };
1748 
1749 
DiscoverGreyObjectsInSpace(PagedSpace * space)1750 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1751   PageIterator it(space);
1752   while (it.has_next()) {
1753     Page* p = it.next();
1754     DiscoverGreyObjectsOnPage(p);
1755     if (marking_deque()->IsFull()) return;
1756   }
1757 }
1758 
1759 
DiscoverGreyObjectsInNewSpace()1760 void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
1761   NewSpace* space = heap()->new_space();
1762   NewSpacePageIterator it(space->bottom(), space->top());
1763   while (it.has_next()) {
1764     NewSpacePage* page = it.next();
1765     DiscoverGreyObjectsOnPage(page);
1766     if (marking_deque()->IsFull()) return;
1767   }
1768 }
1769 
1770 
IsUnmarkedHeapObject(Object ** p)1771 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1772   Object* o = *p;
1773   if (!o->IsHeapObject()) return false;
1774   HeapObject* heap_object = HeapObject::cast(o);
1775   MarkBit mark = Marking::MarkBitFrom(heap_object);
1776   return Marking::IsWhite(mark);
1777 }
1778 
1779 
IsUnmarkedHeapObjectWithHeap(Heap * heap,Object ** p)1780 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1781                                                         Object** p) {
1782   Object* o = *p;
1783   DCHECK(o->IsHeapObject());
1784   HeapObject* heap_object = HeapObject::cast(o);
1785   MarkBit mark = Marking::MarkBitFrom(heap_object);
1786   return Marking::IsWhite(mark);
1787 }
1788 
1789 
MarkStringTable(RootMarkingVisitor * visitor)1790 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
1791   StringTable* string_table = heap()->string_table();
1792   // Mark the string table itself.
1793   MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
1794   if (Marking::IsWhite(string_table_mark)) {
1795     // String table could have already been marked by visiting the handles list.
1796     SetMark(string_table, string_table_mark);
1797   }
1798   // Explicitly mark the prefix.
1799   string_table->IteratePrefix(visitor);
1800   ProcessMarkingDeque();
1801 }
1802 
1803 
MarkAllocationSite(AllocationSite * site)1804 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
1805   MarkBit mark_bit = Marking::MarkBitFrom(site);
1806   SetMark(site, mark_bit);
1807 }
1808 
1809 
MarkRoots(RootMarkingVisitor * visitor)1810 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1811   // Mark the heap roots including global variables, stack variables,
1812   // etc., and all objects reachable from them.
1813   heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1814 
1815   // Handle the string table specially.
1816   MarkStringTable(visitor);
1817 
1818   // There may be overflowed objects in the heap.  Visit them now.
1819   while (marking_deque_.overflowed()) {
1820     RefillMarkingDeque();
1821     EmptyMarkingDeque();
1822   }
1823 }
1824 
1825 
MarkImplicitRefGroups(MarkObjectFunction mark_object)1826 void MarkCompactCollector::MarkImplicitRefGroups(
1827     MarkObjectFunction mark_object) {
1828   List<ImplicitRefGroup*>* ref_groups =
1829       isolate()->global_handles()->implicit_ref_groups();
1830 
1831   int last = 0;
1832   for (int i = 0; i < ref_groups->length(); i++) {
1833     ImplicitRefGroup* entry = ref_groups->at(i);
1834     DCHECK(entry != NULL);
1835 
1836     if (!IsMarked(*entry->parent)) {
1837       (*ref_groups)[last++] = entry;
1838       continue;
1839     }
1840 
1841     Object*** children = entry->children;
1842     // A parent object is marked, so mark all child heap objects.
1843     for (size_t j = 0; j < entry->length; ++j) {
1844       if ((*children[j])->IsHeapObject()) {
1845         mark_object(heap(), HeapObject::cast(*children[j]));
1846       }
1847     }
1848 
1849     // Once the entire group has been marked, dispose it because it's
1850     // not needed anymore.
1851     delete entry;
1852   }
1853   ref_groups->Rewind(last);
1854 }
1855 
1856 
1857 // Mark all objects reachable from the objects on the marking stack.
1858 // Before: the marking stack contains zero or more heap object pointers.
1859 // After: the marking stack is empty, and all objects reachable from the
1860 // marking stack have been marked, or are overflowed in the heap.
EmptyMarkingDeque()1861 void MarkCompactCollector::EmptyMarkingDeque() {
1862   Map* filler_map = heap_->one_pointer_filler_map();
1863   while (!marking_deque_.IsEmpty()) {
1864     HeapObject* object = marking_deque_.Pop();
1865     // Explicitly skip one word fillers. Incremental markbit patterns are
1866     // correct only for objects that occupy at least two words.
1867     Map* map = object->map();
1868     if (map == filler_map) continue;
1869 
1870     DCHECK(object->IsHeapObject());
1871     DCHECK(heap()->Contains(object));
1872     DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
1873 
1874     MarkBit map_mark = Marking::MarkBitFrom(map);
1875     MarkObject(map, map_mark);
1876 
1877     MarkCompactMarkingVisitor::IterateBody(map, object);
1878   }
1879 }
1880 
1881 
1882 // Sweep the heap for overflowed objects, clear their overflow bits, and
1883 // push them on the marking stack.  Stop early if the marking stack fills
1884 // before sweeping completes.  If sweeping completes, there are no remaining
1885 // overflowed objects in the heap so the overflow flag on the markings stack
1886 // is cleared.
RefillMarkingDeque()1887 void MarkCompactCollector::RefillMarkingDeque() {
1888   isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
1889   DCHECK(marking_deque_.overflowed());
1890 
1891   DiscoverGreyObjectsInNewSpace();
1892   if (marking_deque_.IsFull()) return;
1893 
1894   DiscoverGreyObjectsInSpace(heap()->old_space());
1895   if (marking_deque_.IsFull()) return;
1896 
1897   DiscoverGreyObjectsInSpace(heap()->code_space());
1898   if (marking_deque_.IsFull()) return;
1899 
1900   DiscoverGreyObjectsInSpace(heap()->map_space());
1901   if (marking_deque_.IsFull()) return;
1902 
1903   LargeObjectIterator lo_it(heap()->lo_space());
1904   DiscoverGreyObjectsWithIterator(&lo_it);
1905   if (marking_deque_.IsFull()) return;
1906 
1907   marking_deque_.ClearOverflowed();
1908 }
1909 
1910 
1911 // Mark all objects reachable (transitively) from objects on the marking
1912 // stack.  Before: the marking stack contains zero or more heap object
1913 // pointers.  After: the marking stack is empty and there are no overflowed
1914 // objects in the heap.
ProcessMarkingDeque()1915 void MarkCompactCollector::ProcessMarkingDeque() {
1916   EmptyMarkingDeque();
1917   while (marking_deque_.overflowed()) {
1918     RefillMarkingDeque();
1919     EmptyMarkingDeque();
1920   }
1921 }
1922 
1923 
1924 // Mark all objects reachable (transitively) from objects on the marking
1925 // stack including references only considered in the atomic marking pause.
ProcessEphemeralMarking(ObjectVisitor * visitor,bool only_process_harmony_weak_collections)1926 void MarkCompactCollector::ProcessEphemeralMarking(
1927     ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
1928   bool work_to_do = true;
1929   DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
1930   while (work_to_do) {
1931     if (!only_process_harmony_weak_collections) {
1932       isolate()->global_handles()->IterateObjectGroups(
1933           visitor, &IsUnmarkedHeapObjectWithHeap);
1934       MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
1935     }
1936     ProcessWeakCollections();
1937     work_to_do = !marking_deque_.IsEmpty();
1938     ProcessMarkingDeque();
1939   }
1940 }
1941 
1942 
ProcessTopOptimizedFrame(ObjectVisitor * visitor)1943 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1944   for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1945        !it.done(); it.Advance()) {
1946     if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
1947       return;
1948     }
1949     if (it.frame()->type() == StackFrame::OPTIMIZED) {
1950       Code* code = it.frame()->LookupCode();
1951       if (!code->CanDeoptAt(it.frame()->pc())) {
1952         Code::BodyDescriptor::IterateBody(code, visitor);
1953       }
1954       ProcessMarkingDeque();
1955       return;
1956     }
1957   }
1958 }
1959 
1960 
EnsureMarkingDequeIsReserved()1961 void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
1962   DCHECK(!marking_deque_.in_use());
1963   if (marking_deque_memory_ == NULL) {
1964     marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
1965     marking_deque_memory_committed_ = 0;
1966   }
1967   if (marking_deque_memory_ == NULL) {
1968     V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
1969   }
1970 }
1971 
1972 
EnsureMarkingDequeIsCommitted(size_t max_size)1973 void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
1974   // If the marking deque is too small, we try to allocate a bigger one.
1975   // If that fails, make do with a smaller one.
1976   CHECK(!marking_deque_.in_use());
1977   for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
1978     base::VirtualMemory* memory = marking_deque_memory_;
1979     size_t currently_committed = marking_deque_memory_committed_;
1980 
1981     if (currently_committed == size) return;
1982 
1983     if (currently_committed > size) {
1984       bool success = marking_deque_memory_->Uncommit(
1985           reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
1986           currently_committed - size);
1987       if (success) {
1988         marking_deque_memory_committed_ = size;
1989         return;
1990       }
1991       UNREACHABLE();
1992     }
1993 
1994     bool success = memory->Commit(
1995         reinterpret_cast<Address>(memory->address()) + currently_committed,
1996         size - currently_committed,
1997         false);  // Not executable.
1998     if (success) {
1999       marking_deque_memory_committed_ = size;
2000       return;
2001     }
2002   }
2003   V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
2004 }
2005 
2006 
InitializeMarkingDeque()2007 void MarkCompactCollector::InitializeMarkingDeque() {
2008   DCHECK(!marking_deque_.in_use());
2009   DCHECK(marking_deque_memory_committed_ > 0);
2010   Address addr = static_cast<Address>(marking_deque_memory_->address());
2011   size_t size = marking_deque_memory_committed_;
2012   if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
2013   marking_deque_.Initialize(addr, addr + size);
2014 }
2015 
2016 
Initialize(Address low,Address high)2017 void MarkingDeque::Initialize(Address low, Address high) {
2018   DCHECK(!in_use_);
2019   HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
2020   HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
2021   array_ = obj_low;
2022   mask_ = base::bits::RoundDownToPowerOfTwo32(
2023               static_cast<uint32_t>(obj_high - obj_low)) -
2024           1;
2025   top_ = bottom_ = 0;
2026   overflowed_ = false;
2027   in_use_ = true;
2028 }
2029 
2030 
Uninitialize(bool aborting)2031 void MarkingDeque::Uninitialize(bool aborting) {
2032   if (!aborting) {
2033     DCHECK(IsEmpty());
2034     DCHECK(!overflowed_);
2035   }
2036   DCHECK(in_use_);
2037   top_ = bottom_ = 0xdecbad;
2038   in_use_ = false;
2039 }
2040 
2041 
MarkLiveObjects()2042 void MarkCompactCollector::MarkLiveObjects() {
2043   GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
2044   double start_time = 0.0;
2045   if (FLAG_print_cumulative_gc_stat) {
2046     start_time = heap_->MonotonicallyIncreasingTimeInMs();
2047   }
2048   // The recursive GC marker detects when it is nearing stack overflow,
2049   // and switches to a different marking system.  JS interrupts interfere
2050   // with the C stack limit check.
2051   PostponeInterruptsScope postpone(isolate());
2052 
2053   {
2054     GCTracer::Scope gc_scope(heap()->tracer(),
2055                              GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
2056     IncrementalMarking* incremental_marking = heap_->incremental_marking();
2057     if (was_marked_incrementally_) {
2058       incremental_marking->Finalize();
2059     } else {
2060       // Abort any pending incremental activities e.g. incremental sweeping.
2061       incremental_marking->Stop();
2062       if (marking_deque_.in_use()) {
2063         marking_deque_.Uninitialize(true);
2064       }
2065     }
2066   }
2067 
2068 #ifdef DEBUG
2069   DCHECK(state_ == PREPARE_GC);
2070   state_ = MARK_LIVE_OBJECTS;
2071 #endif
2072 
2073   EnsureMarkingDequeIsCommittedAndInitialize(
2074       MarkCompactCollector::kMaxMarkingDequeSize);
2075 
2076   {
2077     GCTracer::Scope gc_scope(heap()->tracer(),
2078                              GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
2079     PrepareForCodeFlushing();
2080   }
2081 
2082   RootMarkingVisitor root_visitor(heap());
2083 
2084   {
2085     GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
2086     MarkRoots(&root_visitor);
2087     ProcessTopOptimizedFrame(&root_visitor);
2088   }
2089 
2090   {
2091     GCTracer::Scope gc_scope(heap()->tracer(),
2092                              GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
2093 
2094     // The objects reachable from the roots are marked, yet unreachable
2095     // objects are unmarked.  Mark objects reachable due to host
2096     // application specific logic or through Harmony weak maps.
2097     ProcessEphemeralMarking(&root_visitor, false);
2098 
2099     // The objects reachable from the roots, weak maps or object groups
2100     // are marked. Objects pointed to only by weak global handles cannot be
2101     // immediately reclaimed. Instead, we have to mark them as pending and mark
2102     // objects reachable from them.
2103     //
2104     // First we identify nonlive weak handles and mark them as pending
2105     // destruction.
2106     heap()->isolate()->global_handles()->IdentifyWeakHandles(
2107         &IsUnmarkedHeapObject);
2108     // Then we mark the objects.
2109     heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2110     ProcessMarkingDeque();
2111 
2112     // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2113     // the weak roots we just marked as pending destruction.
2114     //
2115     // We only process harmony collections, as all object groups have been fully
2116     // processed and no weakly reachable node can discover new objects groups.
2117     ProcessEphemeralMarking(&root_visitor, true);
2118   }
2119 
2120   if (FLAG_print_cumulative_gc_stat) {
2121     heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
2122                                     start_time);
2123   }
2124   if (FLAG_track_gc_object_stats) {
2125     if (FLAG_trace_gc_object_stats) {
2126       heap()->object_stats_->TraceObjectStats();
2127     }
2128     heap()->object_stats_->CheckpointObjectStats();
2129   }
2130 }
2131 
2132 
ClearNonLiveReferences()2133 void MarkCompactCollector::ClearNonLiveReferences() {
2134   GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
2135 
2136   {
2137     GCTracer::Scope gc_scope(heap()->tracer(),
2138                              GCTracer::Scope::MC_CLEAR_STRING_TABLE);
2139 
2140     // Prune the string table removing all strings only pointed to by the
2141     // string table.  Cannot use string_table() here because the string
2142     // table is marked.
2143     StringTable* string_table = heap()->string_table();
2144     InternalizedStringTableCleaner internalized_visitor(heap());
2145     string_table->IterateElements(&internalized_visitor);
2146     string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2147 
2148     ExternalStringTableCleaner external_visitor(heap());
2149     heap()->external_string_table_.Iterate(&external_visitor);
2150     heap()->external_string_table_.CleanUp();
2151   }
2152 
2153   {
2154     GCTracer::Scope gc_scope(heap()->tracer(),
2155                              GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
2156     // Process the weak references.
2157     MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2158     heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
2159   }
2160 
2161   {
2162     GCTracer::Scope gc_scope(heap()->tracer(),
2163                              GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
2164 
2165     // Remove object groups after marking phase.
2166     heap()->isolate()->global_handles()->RemoveObjectGroups();
2167     heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2168   }
2169 
2170   // Flush code from collected candidates.
2171   if (is_code_flushing_enabled()) {
2172     GCTracer::Scope gc_scope(heap()->tracer(),
2173                              GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
2174     code_flusher_->ProcessCandidates();
2175   }
2176 
2177 
2178   DependentCode* dependent_code_list;
2179   Object* non_live_map_list;
2180   ClearWeakCells(&non_live_map_list, &dependent_code_list);
2181 
2182   {
2183     GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
2184     ClearSimpleMapTransitions(non_live_map_list);
2185     ClearFullMapTransitions();
2186   }
2187 
2188   MarkDependentCodeForDeoptimization(dependent_code_list);
2189 
2190   ClearWeakCollections();
2191 
2192   ClearInvalidStoreAndSlotsBufferEntries();
2193 }
2194 
2195 
MarkDependentCodeForDeoptimization(DependentCode * list_head)2196 void MarkCompactCollector::MarkDependentCodeForDeoptimization(
2197     DependentCode* list_head) {
2198   GCTracer::Scope gc_scope(heap()->tracer(),
2199                            GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
2200   Isolate* isolate = this->isolate();
2201   DependentCode* current = list_head;
2202   while (current->length() > 0) {
2203     have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
2204         isolate, DependentCode::kWeakCodeGroup);
2205     current = current->next_link();
2206   }
2207 
2208   WeakHashTable* table = heap_->weak_object_to_code_table();
2209   uint32_t capacity = table->Capacity();
2210   for (uint32_t i = 0; i < capacity; i++) {
2211     uint32_t key_index = table->EntryToIndex(i);
2212     Object* key = table->get(key_index);
2213     if (!table->IsKey(key)) continue;
2214     uint32_t value_index = table->EntryToValueIndex(i);
2215     Object* value = table->get(value_index);
2216     DCHECK(key->IsWeakCell());
2217     if (WeakCell::cast(key)->cleared()) {
2218       have_code_to_deoptimize_ |=
2219           DependentCode::cast(value)->MarkCodeForDeoptimization(
2220               isolate, DependentCode::kWeakCodeGroup);
2221       table->set(key_index, heap_->the_hole_value());
2222       table->set(value_index, heap_->the_hole_value());
2223       table->ElementRemoved();
2224     }
2225   }
2226 }
2227 
2228 
ClearSimpleMapTransitions(Object * non_live_map_list)2229 void MarkCompactCollector::ClearSimpleMapTransitions(
2230     Object* non_live_map_list) {
2231   Object* the_hole_value = heap()->the_hole_value();
2232   Object* weak_cell_obj = non_live_map_list;
2233   while (weak_cell_obj != Smi::FromInt(0)) {
2234     WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
2235     Map* map = Map::cast(weak_cell->value());
2236     DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map)));
2237     Object* potential_parent = map->constructor_or_backpointer();
2238     if (potential_parent->IsMap()) {
2239       Map* parent = Map::cast(potential_parent);
2240       if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) &&
2241           parent->raw_transitions() == weak_cell) {
2242         ClearSimpleMapTransition(parent, map);
2243       }
2244     }
2245     weak_cell->clear();
2246     weak_cell_obj = weak_cell->next();
2247     weak_cell->clear_next(the_hole_value);
2248   }
2249 }
2250 
2251 
ClearSimpleMapTransition(Map * map,Map * dead_transition)2252 void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
2253                                                     Map* dead_transition) {
2254   // A previously existing simple transition (stored in a WeakCell) is going
2255   // to be cleared. Clear the useless cell pointer, and take ownership
2256   // of the descriptor array.
2257   map->set_raw_transitions(Smi::FromInt(0));
2258   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2259   DescriptorArray* descriptors = map->instance_descriptors();
2260   if (descriptors == dead_transition->instance_descriptors() &&
2261       number_of_own_descriptors > 0) {
2262     TrimDescriptorArray(map, descriptors);
2263     DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2264     map->set_owns_descriptors(true);
2265   }
2266 }
2267 
2268 
ClearFullMapTransitions()2269 void MarkCompactCollector::ClearFullMapTransitions() {
2270   HeapObject* undefined = heap()->undefined_value();
2271   Object* obj = heap()->encountered_transition_arrays();
2272   while (obj != Smi::FromInt(0)) {
2273     TransitionArray* array = TransitionArray::cast(obj);
2274     int num_transitions = array->number_of_entries();
2275     DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
2276     if (num_transitions > 0) {
2277       Map* map = array->GetTarget(0);
2278       Map* parent = Map::cast(map->constructor_or_backpointer());
2279       bool parent_is_alive =
2280           Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
2281       DescriptorArray* descriptors =
2282           parent_is_alive ? parent->instance_descriptors() : nullptr;
2283       bool descriptors_owner_died =
2284           CompactTransitionArray(parent, array, descriptors);
2285       if (descriptors_owner_died) {
2286         TrimDescriptorArray(parent, descriptors);
2287       }
2288     }
2289     obj = array->next_link();
2290     array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2291   }
2292   heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2293 }
2294 
2295 
CompactTransitionArray(Map * map,TransitionArray * transitions,DescriptorArray * descriptors)2296 bool MarkCompactCollector::CompactTransitionArray(
2297     Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
2298   int num_transitions = transitions->number_of_entries();
2299   bool descriptors_owner_died = false;
2300   int transition_index = 0;
2301   // Compact all live transitions to the left.
2302   for (int i = 0; i < num_transitions; ++i) {
2303     Map* target = transitions->GetTarget(i);
2304     DCHECK_EQ(target->constructor_or_backpointer(), map);
2305     if (Marking::IsWhite(Marking::MarkBitFrom(target))) {
2306       if (descriptors != nullptr &&
2307           target->instance_descriptors() == descriptors) {
2308         descriptors_owner_died = true;
2309       }
2310     } else {
2311       if (i != transition_index) {
2312         Name* key = transitions->GetKey(i);
2313         transitions->SetKey(transition_index, key);
2314         Object** key_slot = transitions->GetKeySlot(transition_index);
2315         RecordSlot(transitions, key_slot, key);
2316         // Target slots do not need to be recorded since maps are not compacted.
2317         transitions->SetTarget(transition_index, transitions->GetTarget(i));
2318       }
2319       transition_index++;
2320     }
2321   }
2322   // If there are no transitions to be cleared, return.
2323   if (transition_index == num_transitions) {
2324     DCHECK(!descriptors_owner_died);
2325     return false;
2326   }
2327   // Note that we never eliminate a transition array, though we might right-trim
2328   // such that number_of_transitions() == 0. If this assumption changes,
2329   // TransitionArray::Insert() will need to deal with the case that a transition
2330   // array disappeared during GC.
2331   int trim = TransitionArray::Capacity(transitions) - transition_index;
2332   if (trim > 0) {
2333     heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2334         transitions, trim * TransitionArray::kTransitionSize);
2335     transitions->SetNumberOfTransitions(transition_index);
2336   }
2337   return descriptors_owner_died;
2338 }
2339 
2340 
TrimDescriptorArray(Map * map,DescriptorArray * descriptors)2341 void MarkCompactCollector::TrimDescriptorArray(Map* map,
2342                                                DescriptorArray* descriptors) {
2343   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2344   if (number_of_own_descriptors == 0) {
2345     DCHECK(descriptors == heap_->empty_descriptor_array());
2346     return;
2347   }
2348 
2349   int number_of_descriptors = descriptors->number_of_descriptors_storage();
2350   int to_trim = number_of_descriptors - number_of_own_descriptors;
2351   if (to_trim > 0) {
2352     heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2353         descriptors, to_trim * DescriptorArray::kDescriptorSize);
2354     descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2355 
2356     if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2357     descriptors->Sort();
2358 
2359     if (FLAG_unbox_double_fields) {
2360       LayoutDescriptor* layout_descriptor = map->layout_descriptor();
2361       layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2362                                                   number_of_own_descriptors);
2363       SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2364     }
2365   }
2366   DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2367   map->set_owns_descriptors(true);
2368 }
2369 
2370 
TrimEnumCache(Map * map,DescriptorArray * descriptors)2371 void MarkCompactCollector::TrimEnumCache(Map* map,
2372                                          DescriptorArray* descriptors) {
2373   int live_enum = map->EnumLength();
2374   if (live_enum == kInvalidEnumCacheSentinel) {
2375     live_enum =
2376         map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
2377   }
2378   if (live_enum == 0) return descriptors->ClearEnumCache();
2379 
2380   FixedArray* enum_cache = descriptors->GetEnumCache();
2381 
2382   int to_trim = enum_cache->length() - live_enum;
2383   if (to_trim <= 0) return;
2384   heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2385       descriptors->GetEnumCache(), to_trim);
2386 
2387   if (!descriptors->HasEnumIndicesCache()) return;
2388   FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
2389   heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
2390                                                           to_trim);
2391 }
2392 
2393 
ProcessWeakCollections()2394 void MarkCompactCollector::ProcessWeakCollections() {
2395   Object* weak_collection_obj = heap()->encountered_weak_collections();
2396   while (weak_collection_obj != Smi::FromInt(0)) {
2397     JSWeakCollection* weak_collection =
2398         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2399     DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2400     if (weak_collection->table()->IsHashTable()) {
2401       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2402       for (int i = 0; i < table->Capacity(); i++) {
2403         if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2404           Object** key_slot =
2405               table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2406           RecordSlot(table, key_slot, *key_slot);
2407           Object** value_slot =
2408               table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2409           MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
2410                                                          value_slot);
2411         }
2412       }
2413     }
2414     weak_collection_obj = weak_collection->next();
2415   }
2416 }
2417 
2418 
ClearWeakCollections()2419 void MarkCompactCollector::ClearWeakCollections() {
2420   GCTracer::Scope gc_scope(heap()->tracer(),
2421                            GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2422   Object* weak_collection_obj = heap()->encountered_weak_collections();
2423   while (weak_collection_obj != Smi::FromInt(0)) {
2424     JSWeakCollection* weak_collection =
2425         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2426     DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2427     if (weak_collection->table()->IsHashTable()) {
2428       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2429       for (int i = 0; i < table->Capacity(); i++) {
2430         HeapObject* key = HeapObject::cast(table->KeyAt(i));
2431         if (!MarkCompactCollector::IsMarked(key)) {
2432           table->RemoveEntry(i);
2433         }
2434       }
2435     }
2436     weak_collection_obj = weak_collection->next();
2437     weak_collection->set_next(heap()->undefined_value());
2438   }
2439   heap()->set_encountered_weak_collections(Smi::FromInt(0));
2440 }
2441 
2442 
AbortWeakCollections()2443 void MarkCompactCollector::AbortWeakCollections() {
2444   Object* weak_collection_obj = heap()->encountered_weak_collections();
2445   while (weak_collection_obj != Smi::FromInt(0)) {
2446     JSWeakCollection* weak_collection =
2447         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2448     weak_collection_obj = weak_collection->next();
2449     weak_collection->set_next(heap()->undefined_value());
2450   }
2451   heap()->set_encountered_weak_collections(Smi::FromInt(0));
2452 }
2453 
2454 
ClearWeakCells(Object ** non_live_map_list,DependentCode ** dependent_code_list)2455 void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
2456                                           DependentCode** dependent_code_list) {
2457   Heap* heap = this->heap();
2458   GCTracer::Scope gc_scope(heap->tracer(),
2459                            GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
2460   Object* weak_cell_obj = heap->encountered_weak_cells();
2461   Object* the_hole_value = heap->the_hole_value();
2462   DependentCode* dependent_code_head =
2463       DependentCode::cast(heap->empty_fixed_array());
2464   Object* non_live_map_head = Smi::FromInt(0);
2465   while (weak_cell_obj != Smi::FromInt(0)) {
2466     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2467     Object* next_weak_cell = weak_cell->next();
2468     bool clear_value = true;
2469     bool clear_next = true;
2470     // We do not insert cleared weak cells into the list, so the value
2471     // cannot be a Smi here.
2472     HeapObject* value = HeapObject::cast(weak_cell->value());
2473     if (!MarkCompactCollector::IsMarked(value)) {
2474       // Cells for new-space objects embedded in optimized code are wrapped in
2475       // WeakCell and put into Heap::weak_object_to_code_table.
2476       // Such cells do not have any strong references but we want to keep them
2477       // alive as long as the cell value is alive.
2478       // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
2479       if (value->IsCell()) {
2480         Object* cell_value = Cell::cast(value)->value();
2481         if (cell_value->IsHeapObject() &&
2482             MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
2483           // Resurrect the cell.
2484           MarkBit mark = Marking::MarkBitFrom(value);
2485           SetMark(value, mark);
2486           Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
2487           RecordSlot(value, slot, *slot);
2488           slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2489           RecordSlot(weak_cell, slot, *slot);
2490           clear_value = false;
2491         }
2492       }
2493       if (value->IsMap()) {
2494         // The map is non-live.
2495         Map* map = Map::cast(value);
2496         // Add dependent code to the dependent_code_list.
2497         DependentCode* candidate = map->dependent_code();
2498         // We rely on the fact that the weak code group comes first.
2499         STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
2500         if (candidate->length() > 0 &&
2501             candidate->group() == DependentCode::kWeakCodeGroup) {
2502           candidate->set_next_link(dependent_code_head);
2503           dependent_code_head = candidate;
2504         }
2505         // Add the weak cell to the non_live_map list.
2506         weak_cell->set_next(non_live_map_head);
2507         non_live_map_head = weak_cell;
2508         clear_value = false;
2509         clear_next = false;
2510       }
2511     } else {
2512       // The value of the weak cell is alive.
2513       Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2514       RecordSlot(weak_cell, slot, *slot);
2515       clear_value = false;
2516     }
2517     if (clear_value) {
2518       weak_cell->clear();
2519     }
2520     if (clear_next) {
2521       weak_cell->clear_next(the_hole_value);
2522     }
2523     weak_cell_obj = next_weak_cell;
2524   }
2525   heap->set_encountered_weak_cells(Smi::FromInt(0));
2526   *non_live_map_list = non_live_map_head;
2527   *dependent_code_list = dependent_code_head;
2528 }
2529 
2530 
AbortWeakCells()2531 void MarkCompactCollector::AbortWeakCells() {
2532   Object* the_hole_value = heap()->the_hole_value();
2533   Object* weak_cell_obj = heap()->encountered_weak_cells();
2534   while (weak_cell_obj != Smi::FromInt(0)) {
2535     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2536     weak_cell_obj = weak_cell->next();
2537     weak_cell->clear_next(the_hole_value);
2538   }
2539   heap()->set_encountered_weak_cells(Smi::FromInt(0));
2540 }
2541 
2542 
AbortTransitionArrays()2543 void MarkCompactCollector::AbortTransitionArrays() {
2544   HeapObject* undefined = heap()->undefined_value();
2545   Object* obj = heap()->encountered_transition_arrays();
2546   while (obj != Smi::FromInt(0)) {
2547     TransitionArray* array = TransitionArray::cast(obj);
2548     obj = array->next_link();
2549     array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2550   }
2551   heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2552 }
2553 
2554 
RecordMigratedSlot(Object * value,Address slot,SlotsBuffer ** evacuation_slots_buffer)2555 void MarkCompactCollector::RecordMigratedSlot(
2556     Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
2557   // When parallel compaction is in progress, store and slots buffer entries
2558   // require synchronization.
2559   if (heap_->InNewSpace(value)) {
2560     if (compaction_in_progress_) {
2561       heap_->store_buffer()->MarkSynchronized(slot);
2562     } else {
2563       heap_->store_buffer()->Mark(slot);
2564     }
2565   } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2566     SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2567                        reinterpret_cast<Object**>(slot),
2568                        SlotsBuffer::IGNORE_OVERFLOW);
2569   }
2570 }
2571 
2572 
RecordMigratedCodeEntrySlot(Address code_entry,Address code_entry_slot,SlotsBuffer ** evacuation_slots_buffer)2573 void MarkCompactCollector::RecordMigratedCodeEntrySlot(
2574     Address code_entry, Address code_entry_slot,
2575     SlotsBuffer** evacuation_slots_buffer) {
2576   if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2577     SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2578                        SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2579                        SlotsBuffer::IGNORE_OVERFLOW);
2580   }
2581 }
2582 
2583 
RecordMigratedCodeObjectSlot(Address code_object,SlotsBuffer ** evacuation_slots_buffer)2584 void MarkCompactCollector::RecordMigratedCodeObjectSlot(
2585     Address code_object, SlotsBuffer** evacuation_slots_buffer) {
2586   SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2587                      SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
2588                      SlotsBuffer::IGNORE_OVERFLOW);
2589 }
2590 
2591 
SlotTypeForRMode(RelocInfo::Mode rmode)2592 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
2593   if (RelocInfo::IsCodeTarget(rmode)) {
2594     return SlotsBuffer::CODE_TARGET_SLOT;
2595   } else if (RelocInfo::IsCell(rmode)) {
2596     return SlotsBuffer::CELL_TARGET_SLOT;
2597   } else if (RelocInfo::IsEmbeddedObject(rmode)) {
2598     return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
2599   } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
2600     return SlotsBuffer::DEBUG_TARGET_SLOT;
2601   }
2602   UNREACHABLE();
2603   return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
2604 }
2605 
2606 
DecodeSlotType(SlotsBuffer::ObjectSlot slot)2607 static inline SlotsBuffer::SlotType DecodeSlotType(
2608     SlotsBuffer::ObjectSlot slot) {
2609   return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
2610 }
2611 
2612 
RecordRelocSlot(RelocInfo * rinfo,Object * target)2613 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
2614   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
2615   RelocInfo::Mode rmode = rinfo->rmode();
2616   if (target_page->IsEvacuationCandidate() &&
2617       (rinfo->host() == NULL ||
2618        !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
2619     Address addr = rinfo->pc();
2620     SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode);
2621     if (rinfo->IsInConstantPool()) {
2622       addr = rinfo->constant_pool_entry_address();
2623       if (RelocInfo::IsCodeTarget(rmode)) {
2624         slot_type = SlotsBuffer::CODE_ENTRY_SLOT;
2625       } else {
2626         DCHECK(RelocInfo::IsEmbeddedObject(rmode));
2627         slot_type = SlotsBuffer::OBJECT_SLOT;
2628       }
2629     }
2630     bool success = SlotsBuffer::AddTo(
2631         slots_buffer_allocator_, target_page->slots_buffer_address(), slot_type,
2632         addr, SlotsBuffer::FAIL_ON_OVERFLOW);
2633     if (!success) {
2634       EvictPopularEvacuationCandidate(target_page);
2635     }
2636   }
2637 }
2638 
2639 
2640 class RecordMigratedSlotVisitor final : public ObjectVisitor {
2641  public:
RecordMigratedSlotVisitor(MarkCompactCollector * collector,SlotsBuffer ** evacuation_slots_buffer)2642   RecordMigratedSlotVisitor(MarkCompactCollector* collector,
2643                             SlotsBuffer** evacuation_slots_buffer)
2644       : collector_(collector),
2645         evacuation_slots_buffer_(evacuation_slots_buffer) {}
2646 
VisitPointer(Object ** p)2647   V8_INLINE void VisitPointer(Object** p) override {
2648     collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
2649                                    evacuation_slots_buffer_);
2650   }
2651 
VisitPointers(Object ** start,Object ** end)2652   V8_INLINE void VisitPointers(Object** start, Object** end) override {
2653     while (start < end) {
2654       collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
2655                                      evacuation_slots_buffer_);
2656       ++start;
2657     }
2658   }
2659 
VisitCodeEntry(Address code_entry_slot)2660   V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
2661     if (collector_->compacting_) {
2662       Address code_entry = Memory::Address_at(code_entry_slot);
2663       collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
2664                                               evacuation_slots_buffer_);
2665     }
2666   }
2667 
2668  private:
2669   MarkCompactCollector* collector_;
2670   SlotsBuffer** evacuation_slots_buffer_;
2671 };
2672 
2673 
2674 // We scavenge new space simultaneously with sweeping. This is done in two
2675 // passes.
2676 //
2677 // The first pass migrates all alive objects from one semispace to another or
2678 // promotes them to old space.  Forwarding address is written directly into
2679 // first word of object without any encoding.  If object is dead we write
2680 // NULL as a forwarding address.
2681 //
2682 // The second pass updates pointers to new space in all spaces.  It is possible
2683 // to encounter pointers to dead new space objects during traversal of pointers
2684 // to new space.  We should clear them to avoid encountering them during next
2685 // pointer iteration.  This is an issue if the store buffer overflows and we
2686 // have to scan the entire old space, including dead objects, looking for
2687 // pointers to new space.
MigrateObject(HeapObject * dst,HeapObject * src,int size,AllocationSpace dest,SlotsBuffer ** evacuation_slots_buffer)2688 void MarkCompactCollector::MigrateObject(
2689     HeapObject* dst, HeapObject* src, int size, AllocationSpace dest,
2690     SlotsBuffer** evacuation_slots_buffer) {
2691   Address dst_addr = dst->address();
2692   Address src_addr = src->address();
2693   DCHECK(heap()->AllowedToBeMigrated(src, dest));
2694   DCHECK(dest != LO_SPACE);
2695   if (dest == OLD_SPACE) {
2696     DCHECK_OBJECT_SIZE(size);
2697     DCHECK(evacuation_slots_buffer != nullptr);
2698     DCHECK(IsAligned(size, kPointerSize));
2699 
2700     heap()->MoveBlock(dst->address(), src->address(), size);
2701     RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer);
2702     dst->IterateBody(&visitor);
2703   } else if (dest == CODE_SPACE) {
2704     DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
2705     DCHECK(evacuation_slots_buffer != nullptr);
2706     PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2707     heap()->MoveBlock(dst_addr, src_addr, size);
2708     RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
2709     Code::cast(dst)->Relocate(dst_addr - src_addr);
2710   } else {
2711     DCHECK_OBJECT_SIZE(size);
2712     DCHECK(evacuation_slots_buffer == nullptr);
2713     DCHECK(dest == NEW_SPACE);
2714     heap()->MoveBlock(dst_addr, src_addr, size);
2715   }
2716   heap()->OnMoveEvent(dst, src, size);
2717   Memory::Address_at(src_addr) = dst_addr;
2718 }
2719 
2720 
UpdateSlot(Isolate * isolate,ObjectVisitor * v,SlotsBuffer::SlotType slot_type,Address addr)2721 static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
2722                               SlotsBuffer::SlotType slot_type, Address addr) {
2723   switch (slot_type) {
2724     case SlotsBuffer::CODE_TARGET_SLOT: {
2725       RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
2726       rinfo.Visit(isolate, v);
2727       break;
2728     }
2729     case SlotsBuffer::CELL_TARGET_SLOT: {
2730       RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
2731       rinfo.Visit(isolate, v);
2732       break;
2733     }
2734     case SlotsBuffer::CODE_ENTRY_SLOT: {
2735       v->VisitCodeEntry(addr);
2736       break;
2737     }
2738     case SlotsBuffer::RELOCATED_CODE_OBJECT: {
2739       HeapObject* obj = HeapObject::FromAddress(addr);
2740       Code::BodyDescriptor::IterateBody(obj, v);
2741       break;
2742     }
2743     case SlotsBuffer::DEBUG_TARGET_SLOT: {
2744       RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
2745                       NULL);
2746       if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
2747       break;
2748     }
2749     case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
2750       RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
2751       rinfo.Visit(isolate, v);
2752       break;
2753     }
2754     case SlotsBuffer::OBJECT_SLOT: {
2755       v->VisitPointer(reinterpret_cast<Object**>(addr));
2756       break;
2757     }
2758     default:
2759       UNREACHABLE();
2760       break;
2761   }
2762 }
2763 
2764 
2765 // Visitor for updating pointers from live objects in old spaces to new space.
2766 // It does not expect to encounter pointers to dead objects.
2767 class PointersUpdatingVisitor : public ObjectVisitor {
2768  public:
PointersUpdatingVisitor(Heap * heap)2769   explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2770 
VisitPointer(Object ** p)2771   void VisitPointer(Object** p) override { UpdatePointer(p); }
2772 
VisitPointers(Object ** start,Object ** end)2773   void VisitPointers(Object** start, Object** end) override {
2774     for (Object** p = start; p < end; p++) UpdatePointer(p);
2775   }
2776 
VisitCell(RelocInfo * rinfo)2777   void VisitCell(RelocInfo* rinfo) override {
2778     DCHECK(rinfo->rmode() == RelocInfo::CELL);
2779     Object* cell = rinfo->target_cell();
2780     Object* old_cell = cell;
2781     VisitPointer(&cell);
2782     if (cell != old_cell) {
2783       rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
2784     }
2785   }
2786 
VisitEmbeddedPointer(RelocInfo * rinfo)2787   void VisitEmbeddedPointer(RelocInfo* rinfo) override {
2788     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2789     Object* target = rinfo->target_object();
2790     Object* old_target = target;
2791     VisitPointer(&target);
2792     // Avoid unnecessary changes that might unnecessary flush the instruction
2793     // cache.
2794     if (target != old_target) {
2795       rinfo->set_target_object(target);
2796     }
2797   }
2798 
VisitCodeTarget(RelocInfo * rinfo)2799   void VisitCodeTarget(RelocInfo* rinfo) override {
2800     DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
2801     Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2802     Object* old_target = target;
2803     VisitPointer(&target);
2804     if (target != old_target) {
2805       rinfo->set_target_address(Code::cast(target)->instruction_start());
2806     }
2807   }
2808 
VisitCodeAgeSequence(RelocInfo * rinfo)2809   void VisitCodeAgeSequence(RelocInfo* rinfo) override {
2810     DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2811     Object* stub = rinfo->code_age_stub();
2812     DCHECK(stub != NULL);
2813     VisitPointer(&stub);
2814     if (stub != rinfo->code_age_stub()) {
2815       rinfo->set_code_age_stub(Code::cast(stub));
2816     }
2817   }
2818 
VisitDebugTarget(RelocInfo * rinfo)2819   void VisitDebugTarget(RelocInfo* rinfo) override {
2820     DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2821            rinfo->IsPatchedDebugBreakSlotSequence());
2822     Object* target =
2823         Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
2824     VisitPointer(&target);
2825     rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
2826   }
2827 
UpdateSlot(Heap * heap,Object ** slot)2828   static inline void UpdateSlot(Heap* heap, Object** slot) {
2829     Object* obj = reinterpret_cast<Object*>(
2830         base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
2831 
2832     if (!obj->IsHeapObject()) return;
2833 
2834     HeapObject* heap_obj = HeapObject::cast(obj);
2835 
2836     MapWord map_word = heap_obj->map_word();
2837     if (map_word.IsForwardingAddress()) {
2838       DCHECK(heap->InFromSpace(heap_obj) ||
2839              MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2840              Page::FromAddress(heap_obj->address())
2841                  ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2842       HeapObject* target = map_word.ToForwardingAddress();
2843       base::NoBarrier_CompareAndSwap(
2844           reinterpret_cast<base::AtomicWord*>(slot),
2845           reinterpret_cast<base::AtomicWord>(obj),
2846           reinterpret_cast<base::AtomicWord>(target));
2847       DCHECK(!heap->InFromSpace(target) &&
2848              !MarkCompactCollector::IsOnEvacuationCandidate(target));
2849     }
2850   }
2851 
2852  private:
UpdatePointer(Object ** p)2853   inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
2854 
2855   Heap* heap_;
2856 };
2857 
2858 
UpdateSlots(SlotsBuffer * buffer)2859 void MarkCompactCollector::UpdateSlots(SlotsBuffer* buffer) {
2860   PointersUpdatingVisitor v(heap_);
2861   size_t buffer_size = buffer->Size();
2862 
2863   for (size_t slot_idx = 0; slot_idx < buffer_size; ++slot_idx) {
2864     SlotsBuffer::ObjectSlot slot = buffer->Get(slot_idx);
2865     if (!SlotsBuffer::IsTypedSlot(slot)) {
2866       PointersUpdatingVisitor::UpdateSlot(heap_, slot);
2867     } else {
2868       ++slot_idx;
2869       DCHECK(slot_idx < buffer_size);
2870       UpdateSlot(heap_->isolate(), &v, DecodeSlotType(slot),
2871                  reinterpret_cast<Address>(buffer->Get(slot_idx)));
2872     }
2873   }
2874 }
2875 
2876 
UpdateSlotsRecordedIn(SlotsBuffer * buffer)2877 void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) {
2878   while (buffer != NULL) {
2879     UpdateSlots(buffer);
2880     buffer = buffer->next();
2881   }
2882 }
2883 
2884 
UpdatePointer(HeapObject ** address,HeapObject * object)2885 static void UpdatePointer(HeapObject** address, HeapObject* object) {
2886   MapWord map_word = object->map_word();
2887   // The store buffer can still contain stale pointers in dead large objects.
2888   // Ignore these pointers here.
2889   DCHECK(map_word.IsForwardingAddress() ||
2890          object->GetHeap()->lo_space()->FindPage(
2891              reinterpret_cast<Address>(address)) != NULL);
2892   if (map_word.IsForwardingAddress()) {
2893     // Update the corresponding slot.
2894     *address = map_word.ToForwardingAddress();
2895   }
2896 }
2897 
2898 
UpdateReferenceInExternalStringTableEntry(Heap * heap,Object ** p)2899 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2900                                                          Object** p) {
2901   MapWord map_word = HeapObject::cast(*p)->map_word();
2902 
2903   if (map_word.IsForwardingAddress()) {
2904     return String::cast(map_word.ToForwardingAddress());
2905   }
2906 
2907   return String::cast(*p);
2908 }
2909 
2910 
IsSlotInBlackObject(Page * p,Address slot,HeapObject ** out_object)2911 bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
2912                                                HeapObject** out_object) {
2913   Space* owner = p->owner();
2914   if (owner == heap_->lo_space() || owner == NULL) {
2915     Object* large_object = heap_->lo_space()->FindObject(slot);
2916     // This object has to exist, otherwise we would not have recorded a slot
2917     // for it.
2918     CHECK(large_object->IsHeapObject());
2919     HeapObject* large_heap_object = HeapObject::cast(large_object);
2920     if (IsMarked(large_heap_object)) {
2921       *out_object = large_heap_object;
2922       return true;
2923     }
2924     return false;
2925   }
2926 
2927   uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
2928   unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
2929   MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
2930   MarkBit::CellType* cells = p->markbits()->cells();
2931   Address base_address = p->area_start();
2932   unsigned int base_address_cell_index = Bitmap::IndexToCell(
2933       Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
2934 
2935   // Check if the slot points to the start of an object. This can happen e.g.
2936   // when we left trim a fixed array. Such slots are invalid and we can remove
2937   // them.
2938   if (index_mask > 1) {
2939     if ((cells[cell_index] & index_mask) != 0 &&
2940         (cells[cell_index] & (index_mask >> 1)) == 0) {
2941       return false;
2942     }
2943   } else {
2944     // Left trimming moves the mark bits so we cannot be in the very first cell.
2945     DCHECK(cell_index != base_address_cell_index);
2946     if ((cells[cell_index] & index_mask) != 0 &&
2947         (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
2948       return false;
2949     }
2950   }
2951 
2952   // Check if the object is in the current cell.
2953   MarkBit::CellType slot_mask;
2954   if ((cells[cell_index] == 0) ||
2955       (base::bits::CountTrailingZeros32(cells[cell_index]) >
2956        base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
2957     // If we are already in the first cell, there is no live object.
2958     if (cell_index == base_address_cell_index) return false;
2959 
2960     // If not, find a cell in a preceding cell slot that has a mark bit set.
2961     do {
2962       cell_index--;
2963     } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
2964 
2965     // The slot must be in a dead object if there are no preceding cells that
2966     // have mark bits set.
2967     if (cells[cell_index] == 0) {
2968       return false;
2969     }
2970 
2971     // The object is in a preceding cell. Set the mask to find any object.
2972     slot_mask = ~0u;
2973   } else {
2974     // We are interested in object mark bits right before the slot.
2975     slot_mask = index_mask + (index_mask - 1);
2976   }
2977 
2978   MarkBit::CellType current_cell = cells[cell_index];
2979   CHECK(current_cell != 0);
2980 
2981   // Find the last live object in the cell.
2982   unsigned int leading_zeros =
2983       base::bits::CountLeadingZeros32(current_cell & slot_mask);
2984   CHECK(leading_zeros != Bitmap::kBitsPerCell);
2985   int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
2986 
2987   base_address += (cell_index - base_address_cell_index) *
2988                   Bitmap::kBitsPerCell * kPointerSize;
2989   Address address = base_address + offset * kPointerSize;
2990   HeapObject* object = HeapObject::FromAddress(address);
2991   CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2992   CHECK(object->address() < reinterpret_cast<Address>(slot));
2993   if ((object->address() + kPointerSize) <= slot &&
2994       (object->address() + object->Size()) > slot) {
2995     // If the slot is within the last found object in the cell, the slot is
2996     // in a live object.
2997     // Slots pointing to the first word of an object are invalid and removed.
2998     // This can happen when we move the object header while left trimming.
2999     *out_object = object;
3000     return true;
3001   }
3002   return false;
3003 }
3004 
3005 
IsSlotInBlackObjectSlow(Page * p,Address slot)3006 bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) {
3007   // This function does not support large objects right now.
3008   Space* owner = p->owner();
3009   if (owner == heap_->lo_space() || owner == NULL) {
3010     Object* large_object = heap_->lo_space()->FindObject(slot);
3011     // This object has to exist, otherwise we would not have recorded a slot
3012     // for it.
3013     CHECK(large_object->IsHeapObject());
3014     HeapObject* large_heap_object = HeapObject::cast(large_object);
3015     if (IsMarked(large_heap_object)) {
3016       return true;
3017     }
3018     return false;
3019   }
3020 
3021   LiveObjectIterator<kBlackObjects> it(p);
3022   HeapObject* object = NULL;
3023   while ((object = it.Next()) != NULL) {
3024     int size = object->Size();
3025 
3026     if (object->address() > slot) return false;
3027     if (object->address() <= slot && slot < (object->address() + size)) {
3028       return true;
3029     }
3030   }
3031   return false;
3032 }
3033 
3034 
IsSlotInLiveObject(Address slot)3035 bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
3036   HeapObject* object = NULL;
3037   // The target object is black but we don't know if the source slot is black.
3038   // The source object could have died and the slot could be part of a free
3039   // space. Find out based on mark bits if the slot is part of a live object.
3040   if (!IsSlotInBlackObject(Page::FromAddress(slot), slot, &object)) {
3041     return false;
3042   }
3043 
3044   DCHECK(object != NULL);
3045   int offset = static_cast<int>(slot - object->address());
3046   return object->IsValidSlot(offset);
3047 }
3048 
3049 
VerifyIsSlotInLiveObject(Address slot,HeapObject * object)3050 void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
3051                                                     HeapObject* object) {
3052   // The target object has to be black.
3053   CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3054 
3055   // The target object is black but we don't know if the source slot is black.
3056   // The source object could have died and the slot could be part of a free
3057   // space. Use the mark bit iterator to find out about liveness of the slot.
3058   CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot));
3059 }
3060 
3061 
EvacuateNewSpacePrologue()3062 void MarkCompactCollector::EvacuateNewSpacePrologue() {
3063   // There are soft limits in the allocation code, designed trigger a mark
3064   // sweep collection by failing allocations.  But since we are already in
3065   // a mark-sweep allocation, there is no sense in trying to trigger one.
3066   AlwaysAllocateScope scope(isolate());
3067 
3068   NewSpace* new_space = heap()->new_space();
3069 
3070   // Store allocation range before flipping semispaces.
3071   Address from_bottom = new_space->bottom();
3072   Address from_top = new_space->top();
3073 
3074   // Flip the semispaces.  After flipping, to space is empty, from space has
3075   // live objects.
3076   new_space->Flip();
3077   new_space->ResetAllocationInfo();
3078 
3079   newspace_evacuation_candidates_.Clear();
3080   NewSpacePageIterator it(from_bottom, from_top);
3081   while (it.has_next()) {
3082     newspace_evacuation_candidates_.Add(it.next());
3083   }
3084 }
3085 
3086 
EvacuateNewSpaceInParallel()3087 HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
3088   HashMap* local_pretenuring_feedback = new HashMap(
3089       HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
3090   EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
3091                                             local_pretenuring_feedback);
3092   // First pass: traverse all objects in inactive semispace, remove marks,
3093   // migrate live objects and write forwarding addresses.  This stage puts
3094   // new entries in the store buffer and may cause some pages to be marked
3095   // scan-on-scavenge.
3096   for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
3097     NewSpacePage* p =
3098         reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
3099     bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
3100     USE(ok);
3101     DCHECK(ok);
3102   }
3103   heap_->IncrementPromotedObjectsSize(
3104       static_cast<int>(new_space_visitor.promoted_size()));
3105   heap_->IncrementSemiSpaceCopiedObjectSize(
3106       static_cast<int>(new_space_visitor.semispace_copied_size()));
3107   heap_->IncrementYoungSurvivorsCounter(
3108       static_cast<int>(new_space_visitor.promoted_size()) +
3109       static_cast<int>(new_space_visitor.semispace_copied_size()));
3110   return local_pretenuring_feedback;
3111 }
3112 
3113 
AddEvacuationSlotsBufferSynchronized(SlotsBuffer * evacuation_slots_buffer)3114 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
3115     SlotsBuffer* evacuation_slots_buffer) {
3116   base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
3117   evacuation_slots_buffers_.Add(evacuation_slots_buffer);
3118 }
3119 
3120 
NumberOfParallelCompactionTasks()3121 int MarkCompactCollector::NumberOfParallelCompactionTasks() {
3122   if (!FLAG_parallel_compaction) return 1;
3123   // Compute the number of needed tasks based on a target compaction time, the
3124   // profiled compaction speed and marked live memory.
3125   //
3126   // The number of parallel compaction tasks is limited by:
3127   // - #evacuation pages
3128   // - (#cores - 1)
3129   // - a hard limit
3130   const double kTargetCompactionTimeInMs = 1;
3131   const int kMaxCompactionTasks = 8;
3132 
3133   intptr_t compaction_speed =
3134       heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3135   if (compaction_speed == 0) return 1;
3136 
3137   intptr_t live_bytes = 0;
3138   for (Page* page : evacuation_candidates_) {
3139     live_bytes += page->LiveBytes();
3140   }
3141 
3142   const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
3143   const int tasks =
3144       1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed /
3145                            kTargetCompactionTimeInMs);
3146   const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
3147   const int tasks_capped_cores = Min(cores, tasks_capped_pages);
3148   const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores);
3149   return tasks_capped_hard;
3150 }
3151 
3152 
EvacuatePagesInParallel()3153 void MarkCompactCollector::EvacuatePagesInParallel() {
3154   const int num_pages = evacuation_candidates_.length();
3155   if (num_pages == 0) return;
3156 
3157   // Used for trace summary.
3158   intptr_t live_bytes = 0;
3159   intptr_t compaction_speed = 0;
3160   if (FLAG_trace_fragmentation) {
3161     for (Page* page : evacuation_candidates_) {
3162       live_bytes += page->LiveBytes();
3163     }
3164     compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3165   }
3166   const int num_tasks = NumberOfParallelCompactionTasks();
3167 
3168   // Set up compaction spaces.
3169   CompactionSpaceCollection** compaction_spaces_for_tasks =
3170       new CompactionSpaceCollection*[num_tasks];
3171   for (int i = 0; i < num_tasks; i++) {
3172     compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
3173   }
3174 
3175   heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
3176                                                   num_tasks);
3177   heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
3178                                                    num_tasks);
3179 
3180   uint32_t* task_ids = new uint32_t[num_tasks - 1];
3181   // Kick off parallel tasks.
3182   StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks);
3183   // Wait for unfinished and not-yet-started tasks.
3184   WaitUntilCompactionCompleted(task_ids, num_tasks - 1);
3185   delete[] task_ids;
3186 
3187   double compaction_duration = 0.0;
3188   intptr_t compacted_memory = 0;
3189   // Merge back memory (compacted and unused) from compaction spaces.
3190   for (int i = 0; i < num_tasks; i++) {
3191     heap()->old_space()->MergeCompactionSpace(
3192         compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
3193     heap()->code_space()->MergeCompactionSpace(
3194         compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
3195     compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
3196     compaction_duration += compaction_spaces_for_tasks[i]->duration();
3197     delete compaction_spaces_for_tasks[i];
3198   }
3199   delete[] compaction_spaces_for_tasks;
3200   heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
3201 
3202   // Finalize sequentially.
3203   int abandoned_pages = 0;
3204   for (int i = 0; i < num_pages; i++) {
3205     Page* p = evacuation_candidates_[i];
3206     switch (p->parallel_compaction_state().Value()) {
3207       case MemoryChunk::ParallelCompactingState::kCompactingAborted:
3208         // We have partially compacted the page, i.e., some objects may have
3209         // moved, others are still in place.
3210         // We need to:
3211         // - Leave the evacuation candidate flag for later processing of
3212         //   slots buffer entries.
3213         // - Leave the slots buffer there for processing of entries added by
3214         //   the write barrier.
3215         // - Rescan the page as slot recording in the migration buffer only
3216         //   happens upon moving (which we potentially didn't do).
3217         // - Leave the page in the list of pages of a space since we could not
3218         //   fully evacuate it.
3219         // - Mark them for rescanning for store buffer entries as we otherwise
3220         //   might have stale store buffer entries that become "valid" again
3221         //   after reusing the memory. Note that all existing store buffer
3222         //   entries of such pages are filtered before rescanning.
3223         DCHECK(p->IsEvacuationCandidate());
3224         p->SetFlag(Page::COMPACTION_WAS_ABORTED);
3225         p->set_scan_on_scavenge(true);
3226         abandoned_pages++;
3227         break;
3228       case MemoryChunk::kCompactingFinalize:
3229         DCHECK(p->IsEvacuationCandidate());
3230         p->SetWasSwept();
3231         p->Unlink();
3232         break;
3233       case MemoryChunk::kCompactingDone:
3234         DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
3235         DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3236         break;
3237       default:
3238         // We should not observe kCompactingInProgress, or kCompactingDone.
3239         UNREACHABLE();
3240     }
3241     p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3242   }
3243   if (FLAG_trace_fragmentation) {
3244     PrintIsolate(isolate(),
3245                  "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
3246                  "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
3247                  "d compaction_speed=%" V8_PTR_PREFIX "d\n",
3248                  isolate()->time_millis_since_init(), FLAG_parallel_compaction,
3249                  num_pages, abandoned_pages, num_tasks,
3250                  base::SysInfo::NumberOfProcessors(), live_bytes,
3251                  compaction_speed);
3252   }
3253 }
3254 
3255 
StartParallelCompaction(CompactionSpaceCollection ** compaction_spaces,uint32_t * task_ids,int len)3256 void MarkCompactCollector::StartParallelCompaction(
3257     CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids,
3258     int len) {
3259   compaction_in_progress_ = true;
3260   for (int i = 1; i < len; i++) {
3261     CompactionTask* task = new CompactionTask(heap(), compaction_spaces[i]);
3262     task_ids[i - 1] = task->id();
3263     V8::GetCurrentPlatform()->CallOnBackgroundThread(
3264         task, v8::Platform::kShortRunningTask);
3265   }
3266 
3267   // Contribute in main thread.
3268   EvacuatePages(compaction_spaces[0], &migration_slots_buffer_);
3269 }
3270 
3271 
WaitUntilCompactionCompleted(uint32_t * task_ids,int len)3272 void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
3273                                                         int len) {
3274   // Try to cancel compaction tasks that have not been run (as they might be
3275   // stuck in a worker queue). Tasks that cannot be canceled, have either
3276   // already completed or are still running, hence we need to wait for their
3277   // semaphore signal.
3278   for (int i = 0; i < len; i++) {
3279     if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) {
3280       pending_compaction_tasks_semaphore_.Wait();
3281     }
3282   }
3283   compaction_in_progress_ = false;
3284 }
3285 
3286 
EvacuatePages(CompactionSpaceCollection * compaction_spaces,SlotsBuffer ** evacuation_slots_buffer)3287 void MarkCompactCollector::EvacuatePages(
3288     CompactionSpaceCollection* compaction_spaces,
3289     SlotsBuffer** evacuation_slots_buffer) {
3290   EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
3291                                   evacuation_slots_buffer);
3292   for (int i = 0; i < evacuation_candidates_.length(); i++) {
3293     Page* p = evacuation_candidates_[i];
3294     DCHECK(p->IsEvacuationCandidate() ||
3295            p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3296     DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
3297            MemoryChunk::kSweepingDone);
3298     if (p->parallel_compaction_state().TrySetValue(
3299             MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3300       if (p->IsEvacuationCandidate()) {
3301         DCHECK_EQ(p->parallel_compaction_state().Value(),
3302                   MemoryChunk::kCompactingInProgress);
3303         double start = heap()->MonotonicallyIncreasingTimeInMs();
3304         intptr_t live_bytes = p->LiveBytes();
3305         AlwaysAllocateScope always_allocate(isolate());
3306         if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
3307           p->ResetLiveBytes();
3308           p->parallel_compaction_state().SetValue(
3309               MemoryChunk::kCompactingFinalize);
3310           compaction_spaces->ReportCompactionProgress(
3311               heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
3312         } else {
3313           p->parallel_compaction_state().SetValue(
3314               MemoryChunk::kCompactingAborted);
3315         }
3316       } else {
3317         // There could be popular pages in the list of evacuation candidates
3318         // which we do compact.
3319         p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3320       }
3321     }
3322   }
3323 }
3324 
3325 
3326 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3327  public:
RetainAs(Object * object)3328   virtual Object* RetainAs(Object* object) {
3329     if (object->IsHeapObject()) {
3330       HeapObject* heap_object = HeapObject::cast(object);
3331       MapWord map_word = heap_object->map_word();
3332       if (map_word.IsForwardingAddress()) {
3333         return map_word.ToForwardingAddress();
3334       }
3335     }
3336     return object;
3337   }
3338 };
3339 
3340 
3341 enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
3342 
3343 
3344 enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
3345 
3346 
3347 enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
3348 
3349 
3350 template <MarkCompactCollector::SweepingParallelism mode>
Free(PagedSpace * space,FreeList * free_list,Address start,int size)3351 static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
3352                      int size) {
3353   if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3354     DCHECK(free_list == NULL);
3355     return space->Free(start, size);
3356   } else {
3357     return size - free_list->Free(start, size);
3358   }
3359 }
3360 
3361 
3362 // Sweeps a page. After sweeping the page can be iterated.
3363 // Slots in live objects pointing into evacuation candidates are updated
3364 // if requested.
3365 // Returns the size of the biggest continuous freed memory chunk in bytes.
3366 template <SweepingMode sweeping_mode,
3367           MarkCompactCollector::SweepingParallelism parallelism,
3368           SkipListRebuildingMode skip_list_mode,
3369           FreeSpaceTreatmentMode free_space_mode>
Sweep(PagedSpace * space,FreeList * free_list,Page * p,ObjectVisitor * v)3370 static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
3371                  ObjectVisitor* v) {
3372   DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3373   DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3374             space->identity() == CODE_SPACE);
3375   DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3376   DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3377          sweeping_mode == SWEEP_ONLY);
3378 
3379   Address free_start = p->area_start();
3380   DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3381 
3382   // If we use the skip list for code space pages, we have to lock the skip
3383   // list because it could be accessed concurrently by the runtime or the
3384   // deoptimizer.
3385   SkipList* skip_list = p->skip_list();
3386   if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3387     skip_list->Clear();
3388   }
3389 
3390   intptr_t freed_bytes = 0;
3391   intptr_t max_freed_bytes = 0;
3392   int curr_region = -1;
3393 
3394   LiveObjectIterator<kBlackObjects> it(p);
3395   HeapObject* object = NULL;
3396   while ((object = it.Next()) != NULL) {
3397     DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3398     Address free_end = object->address();
3399     if (free_end != free_start) {
3400       int size = static_cast<int>(free_end - free_start);
3401       if (free_space_mode == ZAP_FREE_SPACE) {
3402         memset(free_start, 0xcc, size);
3403       }
3404       freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3405       max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3406     }
3407     Map* map = object->synchronized_map();
3408     int size = object->SizeFromMap(map);
3409     if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3410       object->IterateBody(map->instance_type(), size, v);
3411     }
3412     if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3413       int new_region_start = SkipList::RegionNumber(free_end);
3414       int new_region_end =
3415           SkipList::RegionNumber(free_end + size - kPointerSize);
3416       if (new_region_start != curr_region || new_region_end != curr_region) {
3417         skip_list->AddObject(free_end, size);
3418         curr_region = new_region_end;
3419       }
3420     }
3421     free_start = free_end + size;
3422   }
3423 
3424   // Clear the mark bits of that page and reset live bytes count.
3425   Bitmap::Clear(p);
3426 
3427   if (free_start != p->area_end()) {
3428     int size = static_cast<int>(p->area_end() - free_start);
3429     if (free_space_mode == ZAP_FREE_SPACE) {
3430       memset(free_start, 0xcc, size);
3431     }
3432     freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3433     max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3434   }
3435 
3436   if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3437     // When concurrent sweeping is active, the page will be marked after
3438     // sweeping by the main thread.
3439     p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingFinalize);
3440   } else {
3441     p->SetWasSwept();
3442   }
3443   return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3444 }
3445 
3446 
InvalidateCode(Code * code)3447 void MarkCompactCollector::InvalidateCode(Code* code) {
3448   if (heap_->incremental_marking()->IsCompacting() &&
3449       !ShouldSkipEvacuationSlotRecording(code)) {
3450     DCHECK(compacting_);
3451 
3452     // If the object is white than no slots were recorded on it yet.
3453     MarkBit mark_bit = Marking::MarkBitFrom(code);
3454     if (Marking::IsWhite(mark_bit)) return;
3455 
3456     // Ignore all slots that might have been recorded in the body of the
3457     // deoptimized code object. Assumption: no slots will be recorded for
3458     // this object after invalidating it.
3459     RemoveObjectSlots(code->instruction_start(),
3460                       code->address() + code->Size());
3461   }
3462 }
3463 
3464 
3465 // Return true if the given code is deoptimized or will be deoptimized.
WillBeDeoptimized(Code * code)3466 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3467   return code->is_optimized_code() && code->marked_for_deoptimization();
3468 }
3469 
3470 
RemoveObjectSlots(Address start_slot,Address end_slot)3471 void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
3472                                              Address end_slot) {
3473   // Remove entries by replacing them with an old-space slot containing a smi
3474   // that is located in an unmovable page.
3475   int npages = evacuation_candidates_.length();
3476   for (int i = 0; i < npages; i++) {
3477     Page* p = evacuation_candidates_[i];
3478     DCHECK(p->IsEvacuationCandidate() ||
3479            p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3480     if (p->IsEvacuationCandidate()) {
3481       SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
3482                                      end_slot);
3483     }
3484   }
3485 }
3486 
3487 
3488 #ifdef VERIFY_HEAP
VerifyAllBlackObjects(MemoryChunk * page)3489 static void VerifyAllBlackObjects(MemoryChunk* page) {
3490   LiveObjectIterator<kAllLiveObjects> it(page);
3491   HeapObject* object = NULL;
3492   while ((object = it.Next()) != NULL) {
3493     CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3494   }
3495 }
3496 #endif  // VERIFY_HEAP
3497 
3498 
VisitLiveObjects(MemoryChunk * page,HeapObjectVisitor * visitor,IterationMode mode)3499 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
3500                                             HeapObjectVisitor* visitor,
3501                                             IterationMode mode) {
3502 #ifdef VERIFY_HEAP
3503   VerifyAllBlackObjects(page);
3504 #endif  // VERIFY_HEAP
3505 
3506   LiveObjectIterator<kBlackObjects> it(page);
3507   HeapObject* object = nullptr;
3508   while ((object = it.Next()) != nullptr) {
3509     DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3510     if (!visitor->Visit(object)) {
3511       if (mode == kClearMarkbits) {
3512         page->markbits()->ClearRange(
3513             page->AddressToMarkbitIndex(page->area_start()),
3514             page->AddressToMarkbitIndex(object->address()));
3515         RecomputeLiveBytes(page);
3516       }
3517       return false;
3518     }
3519   }
3520   if (mode == kClearMarkbits) {
3521     Bitmap::Clear(page);
3522   }
3523   return true;
3524 }
3525 
3526 
RecomputeLiveBytes(MemoryChunk * page)3527 void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
3528   LiveObjectIterator<kBlackObjects> it(page);
3529   int new_live_size = 0;
3530   HeapObject* object = nullptr;
3531   while ((object = it.Next()) != nullptr) {
3532     new_live_size += object->Size();
3533   }
3534   page->SetLiveBytes(new_live_size);
3535 }
3536 
3537 
VisitLiveObjectsBody(Page * page,ObjectVisitor * visitor)3538 void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
3539                                                 ObjectVisitor* visitor) {
3540 #ifdef VERIFY_HEAP
3541   VerifyAllBlackObjects(page);
3542 #endif  // VERIFY_HEAP
3543 
3544   LiveObjectIterator<kBlackObjects> it(page);
3545   HeapObject* object = NULL;
3546   while ((object = it.Next()) != NULL) {
3547     DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3548     Map* map = object->synchronized_map();
3549     int size = object->SizeFromMap(map);
3550     object->IterateBody(map->instance_type(), size, visitor);
3551   }
3552 }
3553 
3554 
SweepAbortedPages()3555 void MarkCompactCollector::SweepAbortedPages() {
3556   // Second pass on aborted pages.
3557   for (int i = 0; i < evacuation_candidates_.length(); i++) {
3558     Page* p = evacuation_candidates_[i];
3559     if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3560       p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
3561       PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3562       switch (space->identity()) {
3563         case OLD_SPACE:
3564           Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3565                 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
3566           break;
3567         case CODE_SPACE:
3568           if (FLAG_zap_code_space) {
3569             Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
3570                   ZAP_FREE_SPACE>(space, NULL, p, nullptr);
3571           } else {
3572             Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
3573                   IGNORE_FREE_SPACE>(space, NULL, p, nullptr);
3574           }
3575           break;
3576         default:
3577           UNREACHABLE();
3578           break;
3579       }
3580     }
3581   }
3582 }
3583 
3584 
EvacuateNewSpaceAndCandidates()3585 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3586   GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3587   Heap::RelocationLock relocation_lock(heap());
3588 
3589   HashMap* local_pretenuring_feedback = nullptr;
3590   {
3591     GCTracer::Scope gc_scope(heap()->tracer(),
3592                              GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
3593     EvacuationScope evacuation_scope(this);
3594     EvacuateNewSpacePrologue();
3595     local_pretenuring_feedback = EvacuateNewSpaceInParallel();
3596     heap_->new_space()->set_age_mark(heap_->new_space()->top());
3597   }
3598 
3599   {
3600     GCTracer::Scope gc_scope(heap()->tracer(),
3601                              GCTracer::Scope::MC_EVACUATE_CANDIDATES);
3602     EvacuationScope evacuation_scope(this);
3603     EvacuatePagesInParallel();
3604   }
3605 
3606   {
3607     heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
3608     delete local_pretenuring_feedback;
3609   }
3610 
3611   UpdatePointersAfterEvacuation();
3612 
3613   {
3614     GCTracer::Scope gc_scope(heap()->tracer(),
3615                              GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3616     // After updating all pointers, we can finally sweep the aborted pages,
3617     // effectively overriding any forward pointers.
3618     SweepAbortedPages();
3619 
3620     // EvacuateNewSpaceAndCandidates iterates over new space objects and for
3621     // ArrayBuffers either re-registers them as live or promotes them. This is
3622     // needed to properly free them.
3623     heap()->array_buffer_tracker()->FreeDead(false);
3624 
3625     // Deallocate evacuated candidate pages.
3626     ReleaseEvacuationCandidates();
3627   }
3628 
3629 #ifdef VERIFY_HEAP
3630   if (FLAG_verify_heap && !sweeping_in_progress_) {
3631     VerifyEvacuation(heap());
3632   }
3633 #endif
3634 }
3635 
3636 
UpdatePointersAfterEvacuation()3637 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3638   GCTracer::Scope gc_scope(heap()->tracer(),
3639                            GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3640   {
3641     GCTracer::Scope gc_scope(
3642         heap()->tracer(),
3643         GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
3644     UpdateSlotsRecordedIn(migration_slots_buffer_);
3645     if (FLAG_trace_fragmentation_verbose) {
3646       PrintF("  migration slots buffer: %d\n",
3647              SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3648     }
3649     slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
3650     DCHECK(migration_slots_buffer_ == NULL);
3651 
3652     // TODO(hpayer): Process the slots buffers in parallel. This has to be done
3653     // after evacuation of all pages finishes.
3654     int buffers = evacuation_slots_buffers_.length();
3655     for (int i = 0; i < buffers; i++) {
3656       SlotsBuffer* buffer = evacuation_slots_buffers_[i];
3657       UpdateSlotsRecordedIn(buffer);
3658       slots_buffer_allocator_->DeallocateChain(&buffer);
3659     }
3660     evacuation_slots_buffers_.Rewind(0);
3661   }
3662 
3663   // Second pass: find pointers to new space and update them.
3664   PointersUpdatingVisitor updating_visitor(heap());
3665 
3666   {
3667     GCTracer::Scope gc_scope(
3668         heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
3669     // Update pointers in to space.
3670     SemiSpaceIterator to_it(heap()->new_space());
3671     for (HeapObject* object = to_it.Next(); object != NULL;
3672          object = to_it.Next()) {
3673       Map* map = object->map();
3674       object->IterateBody(map->instance_type(), object->SizeFromMap(map),
3675                           &updating_visitor);
3676     }
3677     // Update roots.
3678     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3679 
3680     StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
3681                                   &Heap::ScavengeStoreBufferCallback);
3682     heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
3683   }
3684 
3685   int npages = evacuation_candidates_.length();
3686   {
3687     GCTracer::Scope gc_scope(
3688         heap()->tracer(),
3689         GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
3690     for (int i = 0; i < npages; i++) {
3691       Page* p = evacuation_candidates_[i];
3692       DCHECK(p->IsEvacuationCandidate() ||
3693              p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3694 
3695       if (p->IsEvacuationCandidate()) {
3696         UpdateSlotsRecordedIn(p->slots_buffer());
3697         if (FLAG_trace_fragmentation_verbose) {
3698           PrintF("  page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3699                  SlotsBuffer::SizeOfChain(p->slots_buffer()));
3700         }
3701         slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
3702 
3703         // Important: skip list should be cleared only after roots were updated
3704         // because root iteration traverses the stack and might have to find
3705         // code objects from non-updated pc pointing into evacuation candidate.
3706         SkipList* list = p->skip_list();
3707         if (list != NULL) list->Clear();
3708 
3709         // First pass on aborted pages, fixing up all live objects.
3710         if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3711           p->ClearEvacuationCandidate();
3712           VisitLiveObjectsBody(p, &updating_visitor);
3713         }
3714       }
3715 
3716       if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3717         if (FLAG_gc_verbose) {
3718           PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3719                  reinterpret_cast<intptr_t>(p));
3720         }
3721         PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3722         p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3723 
3724         switch (space->identity()) {
3725           case OLD_SPACE:
3726             Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3727                   IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3728                                                        &updating_visitor);
3729             break;
3730           case CODE_SPACE:
3731             if (FLAG_zap_code_space) {
3732               Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3733                     REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
3734                                                        &updating_visitor);
3735             } else {
3736               Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3737                     REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3738                                                           &updating_visitor);
3739             }
3740             break;
3741           default:
3742             UNREACHABLE();
3743             break;
3744         }
3745       }
3746     }
3747   }
3748 
3749   {
3750     GCTracer::Scope gc_scope(heap()->tracer(),
3751                              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3752     heap_->string_table()->Iterate(&updating_visitor);
3753 
3754     // Update pointers from external string table.
3755     heap_->UpdateReferencesInExternalStringTable(
3756         &UpdateReferenceInExternalStringTableEntry);
3757 
3758     EvacuationWeakObjectRetainer evacuation_object_retainer;
3759     heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
3760   }
3761 }
3762 
3763 
MoveEvacuationCandidatesToEndOfPagesList()3764 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
3765   int npages = evacuation_candidates_.length();
3766   for (int i = 0; i < npages; i++) {
3767     Page* p = evacuation_candidates_[i];
3768     if (!p->IsEvacuationCandidate()) continue;
3769     p->Unlink();
3770     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3771     p->InsertAfter(space->LastPage());
3772   }
3773 }
3774 
3775 
ReleaseEvacuationCandidates()3776 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3777   int npages = evacuation_candidates_.length();
3778   for (int i = 0; i < npages; i++) {
3779     Page* p = evacuation_candidates_[i];
3780     if (!p->IsEvacuationCandidate()) continue;
3781     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3782     space->Free(p->area_start(), p->area_size());
3783     p->set_scan_on_scavenge(false);
3784     p->ResetLiveBytes();
3785     CHECK(p->WasSwept());
3786     space->ReleasePage(p);
3787   }
3788   evacuation_candidates_.Rewind(0);
3789   compacting_ = false;
3790   heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
3791   heap()->FreeQueuedChunks();
3792 }
3793 
3794 
SweepInParallel(PagedSpace * space,int required_freed_bytes)3795 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
3796                                           int required_freed_bytes) {
3797   int max_freed = 0;
3798   int max_freed_overall = 0;
3799   PageIterator it(space);
3800   while (it.has_next()) {
3801     Page* p = it.next();
3802     max_freed = SweepInParallel(p, space);
3803     DCHECK(max_freed >= 0);
3804     if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
3805       return max_freed;
3806     }
3807     max_freed_overall = Max(max_freed, max_freed_overall);
3808     if (p == space->end_of_unswept_pages()) break;
3809   }
3810   return max_freed_overall;
3811 }
3812 
3813 
SweepInParallel(Page * page,PagedSpace * space)3814 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
3815   int max_freed = 0;
3816   if (page->TryLock()) {
3817     // If this page was already swept in the meantime, we can return here.
3818     if (page->parallel_sweeping_state().Value() !=
3819         MemoryChunk::kSweepingPending) {
3820       page->mutex()->Unlock();
3821       return 0;
3822     }
3823     page->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingInProgress);
3824     FreeList* free_list;
3825     FreeList private_free_list(space);
3826     if (space->identity() == OLD_SPACE) {
3827       free_list = free_list_old_space_.get();
3828       max_freed =
3829           Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3830                 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
3831     } else if (space->identity() == CODE_SPACE) {
3832       free_list = free_list_code_space_.get();
3833       max_freed =
3834           Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
3835                 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
3836     } else {
3837       free_list = free_list_map_space_.get();
3838       max_freed =
3839           Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3840                 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
3841     }
3842     free_list->Concatenate(&private_free_list);
3843     page->mutex()->Unlock();
3844   }
3845   return max_freed;
3846 }
3847 
3848 
StartSweepSpace(PagedSpace * space)3849 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
3850   space->ClearStats();
3851 
3852   // We defensively initialize end_of_unswept_pages_ here with the first page
3853   // of the pages list.
3854   space->set_end_of_unswept_pages(space->FirstPage());
3855 
3856   PageIterator it(space);
3857 
3858   int pages_swept = 0;
3859   bool unused_page_present = false;
3860   bool parallel_sweeping_active = false;
3861 
3862   while (it.has_next()) {
3863     Page* p = it.next();
3864     DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
3865 
3866     // Clear sweeping flags indicating that marking bits are still intact.
3867     p->ClearWasSwept();
3868 
3869     if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
3870         p->IsEvacuationCandidate()) {
3871       // Will be processed in EvacuateNewSpaceAndCandidates.
3872       DCHECK(evacuation_candidates_.length() > 0);
3873       continue;
3874     }
3875 
3876     if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3877       // We need to sweep the page to get it into an iterable state again. Note
3878       // that this adds unusable memory into the free list that is later on
3879       // (in the free list) dropped again. Since we only use the flag for
3880       // testing this is fine.
3881       Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3882             IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
3883       continue;
3884     }
3885 
3886     // One unused page is kept, all further are released before sweeping them.
3887     if (p->LiveBytes() == 0) {
3888       if (unused_page_present) {
3889         if (FLAG_gc_verbose) {
3890           PrintIsolate(isolate(), "sweeping: released page: %p", p);
3891         }
3892         space->ReleasePage(p);
3893         continue;
3894       }
3895       unused_page_present = true;
3896     }
3897 
3898     if (!parallel_sweeping_active) {
3899       if (FLAG_gc_verbose) {
3900         PrintIsolate(isolate(), "sweeping: %p", p);
3901       }
3902       if (space->identity() == CODE_SPACE) {
3903         if (FLAG_zap_code_space) {
3904           Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
3905                 ZAP_FREE_SPACE>(space, NULL, p, NULL);
3906         } else {
3907           Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
3908                 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
3909         }
3910       } else {
3911         Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3912               IGNORE_FREE_SPACE>(space, NULL, p, NULL);
3913       }
3914       pages_swept++;
3915       parallel_sweeping_active = true;
3916     } else {
3917       if (FLAG_gc_verbose) {
3918         PrintIsolate(isolate(), "sweeping: initialized for parallel: %p", p);
3919       }
3920       p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
3921       int to_sweep = p->area_size() - p->LiveBytes();
3922       space->accounting_stats_.ShrinkSpace(to_sweep);
3923     }
3924     space->set_end_of_unswept_pages(p);
3925   }
3926 
3927   if (FLAG_gc_verbose) {
3928     PrintIsolate(isolate(), "sweeping: space=%s pages_swept=%d",
3929                  AllocationSpaceName(space->identity()), pages_swept);
3930   }
3931 }
3932 
3933 
SweepSpaces()3934 void MarkCompactCollector::SweepSpaces() {
3935   GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
3936   double start_time = 0.0;
3937   if (FLAG_print_cumulative_gc_stat) {
3938     start_time = heap_->MonotonicallyIncreasingTimeInMs();
3939   }
3940 
3941 #ifdef DEBUG
3942   state_ = SWEEP_SPACES;
3943 #endif
3944 
3945   MoveEvacuationCandidatesToEndOfPagesList();
3946 
3947   {
3948     sweeping_in_progress_ = true;
3949     {
3950       GCTracer::Scope sweep_scope(heap()->tracer(),
3951                                   GCTracer::Scope::MC_SWEEP_OLD);
3952       StartSweepSpace(heap()->old_space());
3953     }
3954     {
3955       GCTracer::Scope sweep_scope(heap()->tracer(),
3956                                   GCTracer::Scope::MC_SWEEP_CODE);
3957       StartSweepSpace(heap()->code_space());
3958     }
3959     {
3960       GCTracer::Scope sweep_scope(heap()->tracer(),
3961                                   GCTracer::Scope::MC_SWEEP_MAP);
3962       StartSweepSpace(heap()->map_space());
3963     }
3964     if (FLAG_concurrent_sweeping) {
3965       StartSweeperThreads();
3966     }
3967   }
3968 
3969   // Deallocate unmarked large objects.
3970   heap_->lo_space()->FreeUnmarkedObjects();
3971 
3972   // Give pages that are queued to be freed back to the OS. Invalid store
3973   // buffer entries are already filter out. We can just release the memory.
3974   heap()->FreeQueuedChunks();
3975 
3976   if (FLAG_print_cumulative_gc_stat) {
3977     heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
3978                                      start_time);
3979   }
3980 }
3981 
3982 
ParallelSweepSpaceComplete(PagedSpace * space)3983 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
3984   PageIterator it(space);
3985   while (it.has_next()) {
3986     Page* p = it.next();
3987     if (p->parallel_sweeping_state().Value() ==
3988         MemoryChunk::kSweepingFinalize) {
3989       p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingDone);
3990       p->SetWasSwept();
3991     }
3992     DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
3993   }
3994 }
3995 
3996 
ParallelSweepSpacesComplete()3997 void MarkCompactCollector::ParallelSweepSpacesComplete() {
3998   ParallelSweepSpaceComplete(heap()->old_space());
3999   ParallelSweepSpaceComplete(heap()->code_space());
4000   ParallelSweepSpaceComplete(heap()->map_space());
4001 }
4002 
4003 
4004 // TODO(1466) ReportDeleteIfNeeded is not called currently.
4005 // Our profiling tools do not expect intersections between
4006 // code objects. We should either reenable it or change our tools.
ReportDeleteIfNeeded(HeapObject * obj,Isolate * isolate)4007 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4008                                                 Isolate* isolate) {
4009   if (obj->IsCode()) {
4010     PROFILE(isolate, CodeDeleteEvent(obj->address()));
4011   }
4012 }
4013 
4014 
isolate() const4015 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
4016 
4017 
Initialize()4018 void MarkCompactCollector::Initialize() {
4019   MarkCompactMarkingVisitor::Initialize();
4020   IncrementalMarking::Initialize();
4021 }
4022 
4023 
EvictPopularEvacuationCandidate(Page * page)4024 void MarkCompactCollector::EvictPopularEvacuationCandidate(Page* page) {
4025   if (FLAG_trace_fragmentation) {
4026     PrintF("Page %p is too popular. Disabling evacuation.\n",
4027            reinterpret_cast<void*>(page));
4028   }
4029 
4030   isolate()->CountUsage(v8::Isolate::UseCounterFeature::kSlotsBufferOverflow);
4031 
4032   // TODO(gc) If all evacuation candidates are too popular we
4033   // should stop slots recording entirely.
4034   page->ClearEvacuationCandidate();
4035 
4036   DCHECK(!page->IsFlagSet(Page::POPULAR_PAGE));
4037   page->SetFlag(Page::POPULAR_PAGE);
4038 
4039   // We were not collecting slots on this page that point
4040   // to other evacuation candidates thus we have to
4041   // rescan the page after evacuation to discover and update all
4042   // pointers to evacuated objects.
4043   page->SetFlag(Page::RESCAN_ON_EVACUATION);
4044 }
4045 
4046 
RecordCodeEntrySlot(HeapObject * object,Address slot,Code * target)4047 void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* object, Address slot,
4048                                                Code* target) {
4049   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4050   if (target_page->IsEvacuationCandidate() &&
4051       !ShouldSkipEvacuationSlotRecording(object)) {
4052     if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
4053                             target_page->slots_buffer_address(),
4054                             SlotsBuffer::CODE_ENTRY_SLOT, slot,
4055                             SlotsBuffer::FAIL_ON_OVERFLOW)) {
4056       EvictPopularEvacuationCandidate(target_page);
4057     }
4058   }
4059 }
4060 
4061 
RecordCodeTargetPatch(Address pc,Code * target)4062 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4063   DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4064   if (is_compacting()) {
4065     Code* host =
4066         isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
4067             pc);
4068     MarkBit mark_bit = Marking::MarkBitFrom(host);
4069     if (Marking::IsBlack(mark_bit)) {
4070       RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
4071       RecordRelocSlot(&rinfo, target);
4072     }
4073   }
4074 }
4075 
4076 }  // namespace internal
4077 }  // namespace v8
4078