1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h"
9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h"
11 #include "src/cpu-profiler.h"
12 #include "src/deoptimizer.h"
13 #include "src/execution.h"
14 #include "src/gdb-jit.h"
15 #include "src/global-handles.h"
16 #include "src/heap/incremental-marking.h"
17 #include "src/heap/mark-compact.h"
18 #include "src/heap/objects-visiting.h"
19 #include "src/heap/objects-visiting-inl.h"
20 #include "src/heap/spaces-inl.h"
21 #include "src/heap/sweeper-thread.h"
22 #include "src/heap-profiler.h"
23 #include "src/ic/ic.h"
24 #include "src/ic/stub-cache.h"
25 
26 namespace v8 {
27 namespace internal {
28 
29 
30 const char* Marking::kWhiteBitPattern = "00";
31 const char* Marking::kBlackBitPattern = "10";
32 const char* Marking::kGreyBitPattern = "11";
33 const char* Marking::kImpossibleBitPattern = "01";
34 
35 
36 // -------------------------------------------------------------------------
37 // MarkCompactCollector
38 
MarkCompactCollector(Heap * heap)39 MarkCompactCollector::MarkCompactCollector(Heap* heap)
40     :  // NOLINT
41 #ifdef DEBUG
42       state_(IDLE),
43 #endif
44       reduce_memory_footprint_(false),
45       abort_incremental_marking_(false),
46       marking_parity_(ODD_MARKING_PARITY),
47       compacting_(false),
48       was_marked_incrementally_(false),
49       sweeping_in_progress_(false),
50       pending_sweeper_jobs_semaphore_(0),
51       sequential_sweeping_(false),
52       migration_slots_buffer_(NULL),
53       heap_(heap),
54       code_flusher_(NULL),
55       have_code_to_deoptimize_(false) {
56 }
57 
58 #ifdef VERIFY_HEAP
59 class VerifyMarkingVisitor : public ObjectVisitor {
60  public:
VerifyMarkingVisitor(Heap * heap)61   explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
62 
VisitPointers(Object ** start,Object ** end)63   void VisitPointers(Object** start, Object** end) {
64     for (Object** current = start; current < end; current++) {
65       if ((*current)->IsHeapObject()) {
66         HeapObject* object = HeapObject::cast(*current);
67         CHECK(heap_->mark_compact_collector()->IsMarked(object));
68       }
69     }
70   }
71 
VisitEmbeddedPointer(RelocInfo * rinfo)72   void VisitEmbeddedPointer(RelocInfo* rinfo) {
73     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
74     if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
75       Object* p = rinfo->target_object();
76       VisitPointer(&p);
77     }
78   }
79 
VisitCell(RelocInfo * rinfo)80   void VisitCell(RelocInfo* rinfo) {
81     Code* code = rinfo->host();
82     DCHECK(rinfo->rmode() == RelocInfo::CELL);
83     if (!code->IsWeakObject(rinfo->target_cell())) {
84       ObjectVisitor::VisitCell(rinfo);
85     }
86   }
87 
88  private:
89   Heap* heap_;
90 };
91 
92 
VerifyMarking(Heap * heap,Address bottom,Address top)93 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
94   VerifyMarkingVisitor visitor(heap);
95   HeapObject* object;
96   Address next_object_must_be_here_or_later = bottom;
97 
98   for (Address current = bottom; current < top; current += kPointerSize) {
99     object = HeapObject::FromAddress(current);
100     if (MarkCompactCollector::IsMarked(object)) {
101       CHECK(current >= next_object_must_be_here_or_later);
102       object->Iterate(&visitor);
103       next_object_must_be_here_or_later = current + object->Size();
104     }
105   }
106 }
107 
108 
VerifyMarking(NewSpace * space)109 static void VerifyMarking(NewSpace* space) {
110   Address end = space->top();
111   NewSpacePageIterator it(space->bottom(), end);
112   // The bottom position is at the start of its page. Allows us to use
113   // page->area_start() as start of range on all pages.
114   CHECK_EQ(space->bottom(),
115            NewSpacePage::FromAddress(space->bottom())->area_start());
116   while (it.has_next()) {
117     NewSpacePage* page = it.next();
118     Address limit = it.has_next() ? page->area_end() : end;
119     CHECK(limit == end || !page->Contains(end));
120     VerifyMarking(space->heap(), page->area_start(), limit);
121   }
122 }
123 
124 
VerifyMarking(PagedSpace * space)125 static void VerifyMarking(PagedSpace* space) {
126   PageIterator it(space);
127 
128   while (it.has_next()) {
129     Page* p = it.next();
130     VerifyMarking(space->heap(), p->area_start(), p->area_end());
131   }
132 }
133 
134 
VerifyMarking(Heap * heap)135 static void VerifyMarking(Heap* heap) {
136   VerifyMarking(heap->old_pointer_space());
137   VerifyMarking(heap->old_data_space());
138   VerifyMarking(heap->code_space());
139   VerifyMarking(heap->cell_space());
140   VerifyMarking(heap->property_cell_space());
141   VerifyMarking(heap->map_space());
142   VerifyMarking(heap->new_space());
143 
144   VerifyMarkingVisitor visitor(heap);
145 
146   LargeObjectIterator it(heap->lo_space());
147   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
148     if (MarkCompactCollector::IsMarked(obj)) {
149       obj->Iterate(&visitor);
150     }
151   }
152 
153   heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
154 }
155 
156 
157 class VerifyEvacuationVisitor : public ObjectVisitor {
158  public:
VisitPointers(Object ** start,Object ** end)159   void VisitPointers(Object** start, Object** end) {
160     for (Object** current = start; current < end; current++) {
161       if ((*current)->IsHeapObject()) {
162         HeapObject* object = HeapObject::cast(*current);
163         CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
164       }
165     }
166   }
167 };
168 
169 
VerifyEvacuation(Page * page)170 static void VerifyEvacuation(Page* page) {
171   VerifyEvacuationVisitor visitor;
172   HeapObjectIterator iterator(page, NULL);
173   for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
174        heap_object = iterator.Next()) {
175     // We skip free space objects.
176     if (!heap_object->IsFiller()) {
177       heap_object->Iterate(&visitor);
178     }
179   }
180 }
181 
182 
VerifyEvacuation(NewSpace * space)183 static void VerifyEvacuation(NewSpace* space) {
184   NewSpacePageIterator it(space->bottom(), space->top());
185   VerifyEvacuationVisitor visitor;
186 
187   while (it.has_next()) {
188     NewSpacePage* page = it.next();
189     Address current = page->area_start();
190     Address limit = it.has_next() ? page->area_end() : space->top();
191     CHECK(limit == space->top() || !page->Contains(space->top()));
192     while (current < limit) {
193       HeapObject* object = HeapObject::FromAddress(current);
194       object->Iterate(&visitor);
195       current += object->Size();
196     }
197   }
198 }
199 
200 
VerifyEvacuation(Heap * heap,PagedSpace * space)201 static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
202   if (FLAG_use_allocation_folding &&
203       (space == heap->old_pointer_space() || space == heap->old_data_space())) {
204     return;
205   }
206   PageIterator it(space);
207 
208   while (it.has_next()) {
209     Page* p = it.next();
210     if (p->IsEvacuationCandidate()) continue;
211     VerifyEvacuation(p);
212   }
213 }
214 
215 
VerifyEvacuation(Heap * heap)216 static void VerifyEvacuation(Heap* heap) {
217   VerifyEvacuation(heap, heap->old_pointer_space());
218   VerifyEvacuation(heap, heap->old_data_space());
219   VerifyEvacuation(heap, heap->code_space());
220   VerifyEvacuation(heap, heap->cell_space());
221   VerifyEvacuation(heap, heap->property_cell_space());
222   VerifyEvacuation(heap, heap->map_space());
223   VerifyEvacuation(heap->new_space());
224 
225   VerifyEvacuationVisitor visitor;
226   heap->IterateStrongRoots(&visitor, VISIT_ALL);
227 }
228 #endif  // VERIFY_HEAP
229 
230 
231 #ifdef DEBUG
232 class VerifyNativeContextSeparationVisitor : public ObjectVisitor {
233  public:
VerifyNativeContextSeparationVisitor()234   VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
235 
VisitPointers(Object ** start,Object ** end)236   void VisitPointers(Object** start, Object** end) {
237     for (Object** current = start; current < end; current++) {
238       if ((*current)->IsHeapObject()) {
239         HeapObject* object = HeapObject::cast(*current);
240         if (object->IsString()) continue;
241         switch (object->map()->instance_type()) {
242           case JS_FUNCTION_TYPE:
243             CheckContext(JSFunction::cast(object)->context());
244             break;
245           case JS_GLOBAL_PROXY_TYPE:
246             CheckContext(JSGlobalProxy::cast(object)->native_context());
247             break;
248           case JS_GLOBAL_OBJECT_TYPE:
249           case JS_BUILTINS_OBJECT_TYPE:
250             CheckContext(GlobalObject::cast(object)->native_context());
251             break;
252           case JS_ARRAY_TYPE:
253           case JS_DATE_TYPE:
254           case JS_OBJECT_TYPE:
255           case JS_REGEXP_TYPE:
256             VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
257             break;
258           case MAP_TYPE:
259             VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
260             VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
261             break;
262           case FIXED_ARRAY_TYPE:
263             if (object->IsContext()) {
264               CheckContext(object);
265             } else {
266               FixedArray* array = FixedArray::cast(object);
267               int length = array->length();
268               // Set array length to zero to prevent cycles while iterating
269               // over array bodies, this is easier than intrusive marking.
270               array->set_length(0);
271               array->IterateBody(FIXED_ARRAY_TYPE, FixedArray::SizeFor(length),
272                                  this);
273               array->set_length(length);
274             }
275             break;
276           case CELL_TYPE:
277           case JS_PROXY_TYPE:
278           case JS_VALUE_TYPE:
279           case TYPE_FEEDBACK_INFO_TYPE:
280             object->Iterate(this);
281             break;
282           case DECLARED_ACCESSOR_INFO_TYPE:
283           case EXECUTABLE_ACCESSOR_INFO_TYPE:
284           case BYTE_ARRAY_TYPE:
285           case CALL_HANDLER_INFO_TYPE:
286           case CODE_TYPE:
287           case FIXED_DOUBLE_ARRAY_TYPE:
288           case HEAP_NUMBER_TYPE:
289           case MUTABLE_HEAP_NUMBER_TYPE:
290           case INTERCEPTOR_INFO_TYPE:
291           case ODDBALL_TYPE:
292           case SCRIPT_TYPE:
293           case SHARED_FUNCTION_INFO_TYPE:
294             break;
295           default:
296             UNREACHABLE();
297         }
298       }
299     }
300   }
301 
302  private:
CheckContext(Object * context)303   void CheckContext(Object* context) {
304     if (!context->IsContext()) return;
305     Context* native_context = Context::cast(context)->native_context();
306     if (current_native_context_ == NULL) {
307       current_native_context_ = native_context;
308     } else {
309       CHECK_EQ(current_native_context_, native_context);
310     }
311   }
312 
313   Context* current_native_context_;
314 };
315 
316 
VerifyNativeContextSeparation(Heap * heap)317 static void VerifyNativeContextSeparation(Heap* heap) {
318   HeapObjectIterator it(heap->code_space());
319 
320   for (Object* object = it.Next(); object != NULL; object = it.Next()) {
321     VerifyNativeContextSeparationVisitor visitor;
322     Code::cast(object)->CodeIterateBody(&visitor);
323   }
324 }
325 #endif
326 
327 
SetUp()328 void MarkCompactCollector::SetUp() {
329   free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
330   free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
331 }
332 
333 
TearDown()334 void MarkCompactCollector::TearDown() { AbortCompaction(); }
335 
336 
AddEvacuationCandidate(Page * p)337 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
338   p->MarkEvacuationCandidate();
339   evacuation_candidates_.Add(p);
340 }
341 
342 
TraceFragmentation(PagedSpace * space)343 static void TraceFragmentation(PagedSpace* space) {
344   int number_of_pages = space->CountTotalPages();
345   intptr_t reserved = (number_of_pages * space->AreaSize());
346   intptr_t free = reserved - space->SizeOfObjects();
347   PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
348          AllocationSpaceName(space->identity()), number_of_pages,
349          static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
350 }
351 
352 
StartCompaction(CompactionMode mode)353 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
354   if (!compacting_) {
355     DCHECK(evacuation_candidates_.length() == 0);
356 
357 #ifdef ENABLE_GDB_JIT_INTERFACE
358     // If GDBJIT interface is active disable compaction.
359     if (FLAG_gdbjit) return false;
360 #endif
361 
362     CollectEvacuationCandidates(heap()->old_pointer_space());
363     CollectEvacuationCandidates(heap()->old_data_space());
364 
365     if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
366                                     FLAG_incremental_code_compaction)) {
367       CollectEvacuationCandidates(heap()->code_space());
368     } else if (FLAG_trace_fragmentation) {
369       TraceFragmentation(heap()->code_space());
370     }
371 
372     if (FLAG_trace_fragmentation) {
373       TraceFragmentation(heap()->map_space());
374       TraceFragmentation(heap()->cell_space());
375       TraceFragmentation(heap()->property_cell_space());
376     }
377 
378     heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
379     heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
380     heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
381 
382     compacting_ = evacuation_candidates_.length() > 0;
383   }
384 
385   return compacting_;
386 }
387 
388 
CollectGarbage()389 void MarkCompactCollector::CollectGarbage() {
390   // Make sure that Prepare() has been called. The individual steps below will
391   // update the state as they proceed.
392   DCHECK(state_ == PREPARE_GC);
393 
394   MarkLiveObjects();
395   DCHECK(heap_->incremental_marking()->IsStopped());
396 
397   if (FLAG_collect_maps) ClearNonLiveReferences();
398 
399   ClearWeakCollections();
400 
401 #ifdef VERIFY_HEAP
402   if (FLAG_verify_heap) {
403     VerifyMarking(heap_);
404   }
405 #endif
406 
407   SweepSpaces();
408 
409 #ifdef DEBUG
410   if (FLAG_verify_native_context_separation) {
411     VerifyNativeContextSeparation(heap_);
412   }
413 #endif
414 
415 #ifdef VERIFY_HEAP
416   if (heap()->weak_embedded_objects_verification_enabled()) {
417     VerifyWeakEmbeddedObjectsInCode();
418   }
419   if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
420     VerifyOmittedMapChecks();
421   }
422 #endif
423 
424   Finish();
425 
426   if (marking_parity_ == EVEN_MARKING_PARITY) {
427     marking_parity_ = ODD_MARKING_PARITY;
428   } else {
429     DCHECK(marking_parity_ == ODD_MARKING_PARITY);
430     marking_parity_ = EVEN_MARKING_PARITY;
431   }
432 }
433 
434 
435 #ifdef VERIFY_HEAP
VerifyMarkbitsAreClean(PagedSpace * space)436 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
437   PageIterator it(space);
438 
439   while (it.has_next()) {
440     Page* p = it.next();
441     CHECK(p->markbits()->IsClean());
442     CHECK_EQ(0, p->LiveBytes());
443   }
444 }
445 
446 
VerifyMarkbitsAreClean(NewSpace * space)447 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
448   NewSpacePageIterator it(space->bottom(), space->top());
449 
450   while (it.has_next()) {
451     NewSpacePage* p = it.next();
452     CHECK(p->markbits()->IsClean());
453     CHECK_EQ(0, p->LiveBytes());
454   }
455 }
456 
457 
VerifyMarkbitsAreClean()458 void MarkCompactCollector::VerifyMarkbitsAreClean() {
459   VerifyMarkbitsAreClean(heap_->old_pointer_space());
460   VerifyMarkbitsAreClean(heap_->old_data_space());
461   VerifyMarkbitsAreClean(heap_->code_space());
462   VerifyMarkbitsAreClean(heap_->cell_space());
463   VerifyMarkbitsAreClean(heap_->property_cell_space());
464   VerifyMarkbitsAreClean(heap_->map_space());
465   VerifyMarkbitsAreClean(heap_->new_space());
466 
467   LargeObjectIterator it(heap_->lo_space());
468   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
469     MarkBit mark_bit = Marking::MarkBitFrom(obj);
470     CHECK(Marking::IsWhite(mark_bit));
471     CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
472   }
473 }
474 
475 
VerifyWeakEmbeddedObjectsInCode()476 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
477   HeapObjectIterator code_iterator(heap()->code_space());
478   for (HeapObject* obj = code_iterator.Next(); obj != NULL;
479        obj = code_iterator.Next()) {
480     Code* code = Code::cast(obj);
481     if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
482     if (WillBeDeoptimized(code)) continue;
483     code->VerifyEmbeddedObjectsDependency();
484   }
485 }
486 
487 
VerifyOmittedMapChecks()488 void MarkCompactCollector::VerifyOmittedMapChecks() {
489   HeapObjectIterator iterator(heap()->map_space());
490   for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
491     Map* map = Map::cast(obj);
492     map->VerifyOmittedMapChecks();
493   }
494 }
495 #endif  // VERIFY_HEAP
496 
497 
ClearMarkbitsInPagedSpace(PagedSpace * space)498 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
499   PageIterator it(space);
500 
501   while (it.has_next()) {
502     Bitmap::Clear(it.next());
503   }
504 }
505 
506 
ClearMarkbitsInNewSpace(NewSpace * space)507 static void ClearMarkbitsInNewSpace(NewSpace* space) {
508   NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
509 
510   while (it.has_next()) {
511     Bitmap::Clear(it.next());
512   }
513 }
514 
515 
ClearMarkbits()516 void MarkCompactCollector::ClearMarkbits() {
517   ClearMarkbitsInPagedSpace(heap_->code_space());
518   ClearMarkbitsInPagedSpace(heap_->map_space());
519   ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
520   ClearMarkbitsInPagedSpace(heap_->old_data_space());
521   ClearMarkbitsInPagedSpace(heap_->cell_space());
522   ClearMarkbitsInPagedSpace(heap_->property_cell_space());
523   ClearMarkbitsInNewSpace(heap_->new_space());
524 
525   LargeObjectIterator it(heap_->lo_space());
526   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
527     MarkBit mark_bit = Marking::MarkBitFrom(obj);
528     mark_bit.Clear();
529     mark_bit.Next().Clear();
530     Page::FromAddress(obj->address())->ResetProgressBar();
531     Page::FromAddress(obj->address())->ResetLiveBytes();
532   }
533 }
534 
535 
536 class MarkCompactCollector::SweeperTask : public v8::Task {
537  public:
SweeperTask(Heap * heap,PagedSpace * space)538   SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
539 
~SweeperTask()540   virtual ~SweeperTask() {}
541 
542  private:
543   // v8::Task overrides.
Run()544   virtual void Run() OVERRIDE {
545     heap_->mark_compact_collector()->SweepInParallel(space_, 0);
546     heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
547   }
548 
549   Heap* heap_;
550   PagedSpace* space_;
551 
552   DISALLOW_COPY_AND_ASSIGN(SweeperTask);
553 };
554 
555 
StartSweeperThreads()556 void MarkCompactCollector::StartSweeperThreads() {
557   DCHECK(free_list_old_pointer_space_.get()->IsEmpty());
558   DCHECK(free_list_old_data_space_.get()->IsEmpty());
559   sweeping_in_progress_ = true;
560   for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
561     isolate()->sweeper_threads()[i]->StartSweeping();
562   }
563   if (FLAG_job_based_sweeping) {
564     V8::GetCurrentPlatform()->CallOnBackgroundThread(
565         new SweeperTask(heap(), heap()->old_data_space()),
566         v8::Platform::kShortRunningTask);
567     V8::GetCurrentPlatform()->CallOnBackgroundThread(
568         new SweeperTask(heap(), heap()->old_pointer_space()),
569         v8::Platform::kShortRunningTask);
570   }
571 }
572 
573 
EnsureSweepingCompleted()574 void MarkCompactCollector::EnsureSweepingCompleted() {
575   DCHECK(sweeping_in_progress_ == true);
576 
577   // If sweeping is not completed, we try to complete it here. If we do not
578   // have sweeper threads we have to complete since we do not have a good
579   // indicator for a swept space in that case.
580   if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) {
581     SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
582     SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
583   }
584 
585   for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
586     isolate()->sweeper_threads()[i]->WaitForSweeperThread();
587   }
588   if (FLAG_job_based_sweeping) {
589     // Wait twice for both jobs.
590     pending_sweeper_jobs_semaphore_.Wait();
591     pending_sweeper_jobs_semaphore_.Wait();
592   }
593   ParallelSweepSpacesComplete();
594   sweeping_in_progress_ = false;
595   RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
596   RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
597   heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
598   heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
599 
600 #ifdef VERIFY_HEAP
601   if (FLAG_verify_heap) {
602     VerifyEvacuation(heap_);
603   }
604 #endif
605 }
606 
607 
IsSweepingCompleted()608 bool MarkCompactCollector::IsSweepingCompleted() {
609   for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
610     if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) {
611       return false;
612     }
613   }
614 
615   if (FLAG_job_based_sweeping) {
616     if (!pending_sweeper_jobs_semaphore_.WaitFor(
617             base::TimeDelta::FromSeconds(0))) {
618       return false;
619     }
620     pending_sweeper_jobs_semaphore_.Signal();
621   }
622 
623   return true;
624 }
625 
626 
RefillFreeList(PagedSpace * space)627 void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
628   FreeList* free_list;
629 
630   if (space == heap()->old_pointer_space()) {
631     free_list = free_list_old_pointer_space_.get();
632   } else if (space == heap()->old_data_space()) {
633     free_list = free_list_old_data_space_.get();
634   } else {
635     // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
636     // to only refill them for old data and pointer spaces.
637     return;
638   }
639 
640   intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
641   space->AddToAccountingStats(freed_bytes);
642   space->DecrementUnsweptFreeBytes(freed_bytes);
643 }
644 
645 
AreSweeperThreadsActivated()646 bool MarkCompactCollector::AreSweeperThreadsActivated() {
647   return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
648 }
649 
650 
TransferMark(Address old_start,Address new_start)651 void Marking::TransferMark(Address old_start, Address new_start) {
652   // This is only used when resizing an object.
653   DCHECK(MemoryChunk::FromAddress(old_start) ==
654          MemoryChunk::FromAddress(new_start));
655 
656   if (!heap_->incremental_marking()->IsMarking()) return;
657 
658   // If the mark doesn't move, we don't check the color of the object.
659   // It doesn't matter whether the object is black, since it hasn't changed
660   // size, so the adjustment to the live data count will be zero anyway.
661   if (old_start == new_start) return;
662 
663   MarkBit new_mark_bit = MarkBitFrom(new_start);
664   MarkBit old_mark_bit = MarkBitFrom(old_start);
665 
666 #ifdef DEBUG
667   ObjectColor old_color = Color(old_mark_bit);
668 #endif
669 
670   if (Marking::IsBlack(old_mark_bit)) {
671     old_mark_bit.Clear();
672     DCHECK(IsWhite(old_mark_bit));
673     Marking::MarkBlack(new_mark_bit);
674     return;
675   } else if (Marking::IsGrey(old_mark_bit)) {
676     old_mark_bit.Clear();
677     old_mark_bit.Next().Clear();
678     DCHECK(IsWhite(old_mark_bit));
679     heap_->incremental_marking()->WhiteToGreyAndPush(
680         HeapObject::FromAddress(new_start), new_mark_bit);
681     heap_->incremental_marking()->RestartIfNotMarking();
682   }
683 
684 #ifdef DEBUG
685   ObjectColor new_color = Color(new_mark_bit);
686   DCHECK(new_color == old_color);
687 #endif
688 }
689 
690 
AllocationSpaceName(AllocationSpace space)691 const char* AllocationSpaceName(AllocationSpace space) {
692   switch (space) {
693     case NEW_SPACE:
694       return "NEW_SPACE";
695     case OLD_POINTER_SPACE:
696       return "OLD_POINTER_SPACE";
697     case OLD_DATA_SPACE:
698       return "OLD_DATA_SPACE";
699     case CODE_SPACE:
700       return "CODE_SPACE";
701     case MAP_SPACE:
702       return "MAP_SPACE";
703     case CELL_SPACE:
704       return "CELL_SPACE";
705     case PROPERTY_CELL_SPACE:
706       return "PROPERTY_CELL_SPACE";
707     case LO_SPACE:
708       return "LO_SPACE";
709     default:
710       UNREACHABLE();
711   }
712 
713   return NULL;
714 }
715 
716 
717 // Returns zero for pages that have so little fragmentation that it is not
718 // worth defragmenting them.  Otherwise a positive integer that gives an
719 // estimate of fragmentation on an arbitrary scale.
FreeListFragmentation(PagedSpace * space,Page * p)720 static int FreeListFragmentation(PagedSpace* space, Page* p) {
721   // If page was not swept then there are no free list items on it.
722   if (!p->WasSwept()) {
723     if (FLAG_trace_fragmentation) {
724       PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
725              AllocationSpaceName(space->identity()), p->LiveBytes());
726     }
727     return 0;
728   }
729 
730   PagedSpace::SizeStats sizes;
731   space->ObtainFreeListStatistics(p, &sizes);
732 
733   intptr_t ratio;
734   intptr_t ratio_threshold;
735   intptr_t area_size = space->AreaSize();
736   if (space->identity() == CODE_SPACE) {
737     ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
738     ratio_threshold = 10;
739   } else {
740     ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
741     ratio_threshold = 15;
742   }
743 
744   if (FLAG_trace_fragmentation) {
745     PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
746            reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
747            static_cast<int>(sizes.small_size_),
748            static_cast<double>(sizes.small_size_ * 100) / area_size,
749            static_cast<int>(sizes.medium_size_),
750            static_cast<double>(sizes.medium_size_ * 100) / area_size,
751            static_cast<int>(sizes.large_size_),
752            static_cast<double>(sizes.large_size_ * 100) / area_size,
753            static_cast<int>(sizes.huge_size_),
754            static_cast<double>(sizes.huge_size_ * 100) / area_size,
755            (ratio > ratio_threshold) ? "[fragmented]" : "");
756   }
757 
758   if (FLAG_always_compact && sizes.Total() != area_size) {
759     return 1;
760   }
761 
762   if (ratio <= ratio_threshold) return 0;  // Not fragmented.
763 
764   return static_cast<int>(ratio - ratio_threshold);
765 }
766 
767 
CollectEvacuationCandidates(PagedSpace * space)768 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
769   DCHECK(space->identity() == OLD_POINTER_SPACE ||
770          space->identity() == OLD_DATA_SPACE ||
771          space->identity() == CODE_SPACE);
772 
773   static const int kMaxMaxEvacuationCandidates = 1000;
774   int number_of_pages = space->CountTotalPages();
775   int max_evacuation_candidates =
776       static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
777 
778   if (FLAG_stress_compaction || FLAG_always_compact) {
779     max_evacuation_candidates = kMaxMaxEvacuationCandidates;
780   }
781 
782   class Candidate {
783    public:
784     Candidate() : fragmentation_(0), page_(NULL) {}
785     Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
786 
787     int fragmentation() { return fragmentation_; }
788     Page* page() { return page_; }
789 
790    private:
791     int fragmentation_;
792     Page* page_;
793   };
794 
795   enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
796 
797   CompactionMode mode = COMPACT_FREE_LISTS;
798 
799   intptr_t reserved = number_of_pages * space->AreaSize();
800   intptr_t over_reserved = reserved - space->SizeOfObjects();
801   static const intptr_t kFreenessThreshold = 50;
802 
803   if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
804     // If reduction of memory footprint was requested, we are aggressive
805     // about choosing pages to free.  We expect that half-empty pages
806     // are easier to compact so slightly bump the limit.
807     mode = REDUCE_MEMORY_FOOTPRINT;
808     max_evacuation_candidates += 2;
809   }
810 
811 
812   if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
813     // If over-usage is very high (more than a third of the space), we
814     // try to free all mostly empty pages.  We expect that almost empty
815     // pages are even easier to compact so bump the limit even more.
816     mode = REDUCE_MEMORY_FOOTPRINT;
817     max_evacuation_candidates *= 2;
818   }
819 
820   if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
821     PrintF(
822         "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
823         "evacuation candidate limit: %d\n",
824         static_cast<double>(over_reserved) / MB,
825         static_cast<double>(reserved) / MB,
826         static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
827   }
828 
829   intptr_t estimated_release = 0;
830 
831   Candidate candidates[kMaxMaxEvacuationCandidates];
832 
833   max_evacuation_candidates =
834       Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
835 
836   int count = 0;
837   int fragmentation = 0;
838   Candidate* least = NULL;
839 
840   PageIterator it(space);
841   if (it.has_next()) it.next();  // Never compact the first page.
842 
843   while (it.has_next()) {
844     Page* p = it.next();
845     p->ClearEvacuationCandidate();
846 
847     if (FLAG_stress_compaction) {
848       unsigned int counter = space->heap()->ms_count();
849       uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
850       if ((counter & 1) == (page_number & 1)) fragmentation = 1;
851     } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
852       // Don't try to release too many pages.
853       if (estimated_release >= over_reserved) {
854         continue;
855       }
856 
857       intptr_t free_bytes = 0;
858 
859       if (!p->WasSwept()) {
860         free_bytes = (p->area_size() - p->LiveBytes());
861       } else {
862         PagedSpace::SizeStats sizes;
863         space->ObtainFreeListStatistics(p, &sizes);
864         free_bytes = sizes.Total();
865       }
866 
867       int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
868 
869       if (free_pct >= kFreenessThreshold) {
870         estimated_release += free_bytes;
871         fragmentation = free_pct;
872       } else {
873         fragmentation = 0;
874       }
875 
876       if (FLAG_trace_fragmentation) {
877         PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
878                AllocationSpaceName(space->identity()),
879                static_cast<int>(free_bytes),
880                static_cast<double>(free_bytes * 100) / p->area_size(),
881                (fragmentation > 0) ? "[fragmented]" : "");
882       }
883     } else {
884       fragmentation = FreeListFragmentation(space, p);
885     }
886 
887     if (fragmentation != 0) {
888       if (count < max_evacuation_candidates) {
889         candidates[count++] = Candidate(fragmentation, p);
890       } else {
891         if (least == NULL) {
892           for (int i = 0; i < max_evacuation_candidates; i++) {
893             if (least == NULL ||
894                 candidates[i].fragmentation() < least->fragmentation()) {
895               least = candidates + i;
896             }
897           }
898         }
899         if (least->fragmentation() < fragmentation) {
900           *least = Candidate(fragmentation, p);
901           least = NULL;
902         }
903       }
904     }
905   }
906 
907   for (int i = 0; i < count; i++) {
908     AddEvacuationCandidate(candidates[i].page());
909   }
910 
911   if (count > 0 && FLAG_trace_fragmentation) {
912     PrintF("Collected %d evacuation candidates for space %s\n", count,
913            AllocationSpaceName(space->identity()));
914   }
915 }
916 
917 
AbortCompaction()918 void MarkCompactCollector::AbortCompaction() {
919   if (compacting_) {
920     int npages = evacuation_candidates_.length();
921     for (int i = 0; i < npages; i++) {
922       Page* p = evacuation_candidates_[i];
923       slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
924       p->ClearEvacuationCandidate();
925       p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
926     }
927     compacting_ = false;
928     evacuation_candidates_.Rewind(0);
929     invalidated_code_.Rewind(0);
930   }
931   DCHECK_EQ(0, evacuation_candidates_.length());
932 }
933 
934 
Prepare()935 void MarkCompactCollector::Prepare() {
936   was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
937 
938 #ifdef DEBUG
939   DCHECK(state_ == IDLE);
940   state_ = PREPARE_GC;
941 #endif
942 
943   DCHECK(!FLAG_never_compact || !FLAG_always_compact);
944 
945   if (sweeping_in_progress()) {
946     // Instead of waiting we could also abort the sweeper threads here.
947     EnsureSweepingCompleted();
948   }
949 
950   // Clear marking bits if incremental marking is aborted.
951   if (was_marked_incrementally_ && abort_incremental_marking_) {
952     heap()->incremental_marking()->Abort();
953     ClearMarkbits();
954     AbortWeakCollections();
955     AbortCompaction();
956     was_marked_incrementally_ = false;
957   }
958 
959   // Don't start compaction if we are in the middle of incremental
960   // marking cycle. We did not collect any slots.
961   if (!FLAG_never_compact && !was_marked_incrementally_) {
962     StartCompaction(NON_INCREMENTAL_COMPACTION);
963   }
964 
965   PagedSpaces spaces(heap());
966   for (PagedSpace* space = spaces.next(); space != NULL;
967        space = spaces.next()) {
968     space->PrepareForMarkCompact();
969   }
970 
971 #ifdef VERIFY_HEAP
972   if (!was_marked_incrementally_ && FLAG_verify_heap) {
973     VerifyMarkbitsAreClean();
974   }
975 #endif
976 }
977 
978 
Finish()979 void MarkCompactCollector::Finish() {
980 #ifdef DEBUG
981   DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
982   state_ = IDLE;
983 #endif
984   // The stub cache is not traversed during GC; clear the cache to
985   // force lazy re-initialization of it. This must be done after the
986   // GC, because it relies on the new address of certain old space
987   // objects (empty string, illegal builtin).
988   isolate()->stub_cache()->Clear();
989 
990   if (have_code_to_deoptimize_) {
991     // Some code objects were marked for deoptimization during the GC.
992     Deoptimizer::DeoptimizeMarkedCode(isolate());
993     have_code_to_deoptimize_ = false;
994   }
995 }
996 
997 
998 // -------------------------------------------------------------------------
999 // Phase 1: tracing and marking live objects.
1000 //   before: all objects are in normal state.
1001 //   after: a live object's map pointer is marked as '00'.
1002 
1003 // Marking all live objects in the heap as part of mark-sweep or mark-compact
1004 // collection.  Before marking, all objects are in their normal state.  After
1005 // marking, live objects' map pointers are marked indicating that the object
1006 // has been found reachable.
1007 //
1008 // The marking algorithm is a (mostly) depth-first (because of possible stack
1009 // overflow) traversal of the graph of objects reachable from the roots.  It
1010 // uses an explicit stack of pointers rather than recursion.  The young
1011 // generation's inactive ('from') space is used as a marking stack.  The
1012 // objects in the marking stack are the ones that have been reached and marked
1013 // but their children have not yet been visited.
1014 //
1015 // The marking stack can overflow during traversal.  In that case, we set an
1016 // overflow flag.  When the overflow flag is set, we continue marking objects
1017 // reachable from the objects on the marking stack, but no longer push them on
1018 // the marking stack.  Instead, we mark them as both marked and overflowed.
1019 // When the stack is in the overflowed state, objects marked as overflowed
1020 // have been reached and marked but their children have not been visited yet.
1021 // After emptying the marking stack, we clear the overflow flag and traverse
1022 // the heap looking for objects marked as overflowed, push them on the stack,
1023 // and continue with marking.  This process repeats until all reachable
1024 // objects have been marked.
1025 
ProcessJSFunctionCandidates()1026 void CodeFlusher::ProcessJSFunctionCandidates() {
1027   Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
1028   Object* undefined = isolate_->heap()->undefined_value();
1029 
1030   JSFunction* candidate = jsfunction_candidates_head_;
1031   JSFunction* next_candidate;
1032   while (candidate != NULL) {
1033     next_candidate = GetNextCandidate(candidate);
1034     ClearNextCandidate(candidate, undefined);
1035 
1036     SharedFunctionInfo* shared = candidate->shared();
1037 
1038     Code* code = shared->code();
1039     MarkBit code_mark = Marking::MarkBitFrom(code);
1040     if (!code_mark.Get()) {
1041       if (FLAG_trace_code_flushing && shared->is_compiled()) {
1042         PrintF("[code-flushing clears: ");
1043         shared->ShortPrint();
1044         PrintF(" - age: %d]\n", code->GetAge());
1045       }
1046       shared->set_code(lazy_compile);
1047       candidate->set_code(lazy_compile);
1048     } else {
1049       candidate->set_code(code);
1050     }
1051 
1052     // We are in the middle of a GC cycle so the write barrier in the code
1053     // setter did not record the slot update and we have to do that manually.
1054     Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1055     Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
1056     isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot,
1057                                                                     target);
1058 
1059     Object** shared_code_slot =
1060         HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1061     isolate_->heap()->mark_compact_collector()->RecordSlot(
1062         shared_code_slot, shared_code_slot, *shared_code_slot);
1063 
1064     candidate = next_candidate;
1065   }
1066 
1067   jsfunction_candidates_head_ = NULL;
1068 }
1069 
1070 
ProcessSharedFunctionInfoCandidates()1071 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1072   Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
1073 
1074   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1075   SharedFunctionInfo* next_candidate;
1076   while (candidate != NULL) {
1077     next_candidate = GetNextCandidate(candidate);
1078     ClearNextCandidate(candidate);
1079 
1080     Code* code = candidate->code();
1081     MarkBit code_mark = Marking::MarkBitFrom(code);
1082     if (!code_mark.Get()) {
1083       if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1084         PrintF("[code-flushing clears: ");
1085         candidate->ShortPrint();
1086         PrintF(" - age: %d]\n", code->GetAge());
1087       }
1088       candidate->set_code(lazy_compile);
1089     }
1090 
1091     Object** code_slot =
1092         HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1093     isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
1094                                                            *code_slot);
1095 
1096     candidate = next_candidate;
1097   }
1098 
1099   shared_function_info_candidates_head_ = NULL;
1100 }
1101 
1102 
ProcessOptimizedCodeMaps()1103 void CodeFlusher::ProcessOptimizedCodeMaps() {
1104   STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
1105 
1106   SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1107   SharedFunctionInfo* next_holder;
1108 
1109   while (holder != NULL) {
1110     next_holder = GetNextCodeMap(holder);
1111     ClearNextCodeMap(holder);
1112 
1113     FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
1114     int new_length = SharedFunctionInfo::kEntriesStart;
1115     int old_length = code_map->length();
1116     for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
1117          i += SharedFunctionInfo::kEntryLength) {
1118       Code* code =
1119           Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
1120       if (!Marking::MarkBitFrom(code).Get()) continue;
1121 
1122       // Move every slot in the entry.
1123       for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
1124         int dst_index = new_length++;
1125         Object** slot = code_map->RawFieldOfElementAt(dst_index);
1126         Object* object = code_map->get(i + j);
1127         code_map->set(dst_index, object);
1128         if (j == SharedFunctionInfo::kOsrAstIdOffset) {
1129           DCHECK(object->IsSmi());
1130         } else {
1131           DCHECK(
1132               Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
1133           isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
1134                                                                  *slot);
1135         }
1136       }
1137     }
1138 
1139     // Trim the optimized code map if entries have been removed.
1140     if (new_length < old_length) {
1141       holder->TrimOptimizedCodeMap(old_length - new_length);
1142     }
1143 
1144     holder = next_holder;
1145   }
1146 
1147   optimized_code_map_holder_head_ = NULL;
1148 }
1149 
1150 
EvictCandidate(SharedFunctionInfo * shared_info)1151 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1152   // Make sure previous flushing decisions are revisited.
1153   isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1154 
1155   if (FLAG_trace_code_flushing) {
1156     PrintF("[code-flushing abandons function-info: ");
1157     shared_info->ShortPrint();
1158     PrintF("]\n");
1159   }
1160 
1161   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1162   SharedFunctionInfo* next_candidate;
1163   if (candidate == shared_info) {
1164     next_candidate = GetNextCandidate(shared_info);
1165     shared_function_info_candidates_head_ = next_candidate;
1166     ClearNextCandidate(shared_info);
1167   } else {
1168     while (candidate != NULL) {
1169       next_candidate = GetNextCandidate(candidate);
1170 
1171       if (next_candidate == shared_info) {
1172         next_candidate = GetNextCandidate(shared_info);
1173         SetNextCandidate(candidate, next_candidate);
1174         ClearNextCandidate(shared_info);
1175         break;
1176       }
1177 
1178       candidate = next_candidate;
1179     }
1180   }
1181 }
1182 
1183 
EvictCandidate(JSFunction * function)1184 void CodeFlusher::EvictCandidate(JSFunction* function) {
1185   DCHECK(!function->next_function_link()->IsUndefined());
1186   Object* undefined = isolate_->heap()->undefined_value();
1187 
1188   // Make sure previous flushing decisions are revisited.
1189   isolate_->heap()->incremental_marking()->RecordWrites(function);
1190   isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1191 
1192   if (FLAG_trace_code_flushing) {
1193     PrintF("[code-flushing abandons closure: ");
1194     function->shared()->ShortPrint();
1195     PrintF("]\n");
1196   }
1197 
1198   JSFunction* candidate = jsfunction_candidates_head_;
1199   JSFunction* next_candidate;
1200   if (candidate == function) {
1201     next_candidate = GetNextCandidate(function);
1202     jsfunction_candidates_head_ = next_candidate;
1203     ClearNextCandidate(function, undefined);
1204   } else {
1205     while (candidate != NULL) {
1206       next_candidate = GetNextCandidate(candidate);
1207 
1208       if (next_candidate == function) {
1209         next_candidate = GetNextCandidate(function);
1210         SetNextCandidate(candidate, next_candidate);
1211         ClearNextCandidate(function, undefined);
1212         break;
1213       }
1214 
1215       candidate = next_candidate;
1216     }
1217   }
1218 }
1219 
1220 
EvictOptimizedCodeMap(SharedFunctionInfo * code_map_holder)1221 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1222   DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
1223               ->get(SharedFunctionInfo::kNextMapIndex)
1224               ->IsUndefined());
1225 
1226   // Make sure previous flushing decisions are revisited.
1227   isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1228 
1229   if (FLAG_trace_code_flushing) {
1230     PrintF("[code-flushing abandons code-map: ");
1231     code_map_holder->ShortPrint();
1232     PrintF("]\n");
1233   }
1234 
1235   SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1236   SharedFunctionInfo* next_holder;
1237   if (holder == code_map_holder) {
1238     next_holder = GetNextCodeMap(code_map_holder);
1239     optimized_code_map_holder_head_ = next_holder;
1240     ClearNextCodeMap(code_map_holder);
1241   } else {
1242     while (holder != NULL) {
1243       next_holder = GetNextCodeMap(holder);
1244 
1245       if (next_holder == code_map_holder) {
1246         next_holder = GetNextCodeMap(code_map_holder);
1247         SetNextCodeMap(holder, next_holder);
1248         ClearNextCodeMap(code_map_holder);
1249         break;
1250       }
1251 
1252       holder = next_holder;
1253     }
1254   }
1255 }
1256 
1257 
EvictJSFunctionCandidates()1258 void CodeFlusher::EvictJSFunctionCandidates() {
1259   JSFunction* candidate = jsfunction_candidates_head_;
1260   JSFunction* next_candidate;
1261   while (candidate != NULL) {
1262     next_candidate = GetNextCandidate(candidate);
1263     EvictCandidate(candidate);
1264     candidate = next_candidate;
1265   }
1266   DCHECK(jsfunction_candidates_head_ == NULL);
1267 }
1268 
1269 
EvictSharedFunctionInfoCandidates()1270 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1271   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1272   SharedFunctionInfo* next_candidate;
1273   while (candidate != NULL) {
1274     next_candidate = GetNextCandidate(candidate);
1275     EvictCandidate(candidate);
1276     candidate = next_candidate;
1277   }
1278   DCHECK(shared_function_info_candidates_head_ == NULL);
1279 }
1280 
1281 
EvictOptimizedCodeMaps()1282 void CodeFlusher::EvictOptimizedCodeMaps() {
1283   SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1284   SharedFunctionInfo* next_holder;
1285   while (holder != NULL) {
1286     next_holder = GetNextCodeMap(holder);
1287     EvictOptimizedCodeMap(holder);
1288     holder = next_holder;
1289   }
1290   DCHECK(optimized_code_map_holder_head_ == NULL);
1291 }
1292 
1293 
IteratePointersToFromSpace(ObjectVisitor * v)1294 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1295   Heap* heap = isolate_->heap();
1296 
1297   JSFunction** slot = &jsfunction_candidates_head_;
1298   JSFunction* candidate = jsfunction_candidates_head_;
1299   while (candidate != NULL) {
1300     if (heap->InFromSpace(candidate)) {
1301       v->VisitPointer(reinterpret_cast<Object**>(slot));
1302     }
1303     candidate = GetNextCandidate(*slot);
1304     slot = GetNextCandidateSlot(*slot);
1305   }
1306 }
1307 
1308 
~MarkCompactCollector()1309 MarkCompactCollector::~MarkCompactCollector() {
1310   if (code_flusher_ != NULL) {
1311     delete code_flusher_;
1312     code_flusher_ = NULL;
1313   }
1314 }
1315 
1316 
ShortCircuitConsString(Object ** p)1317 static inline HeapObject* ShortCircuitConsString(Object** p) {
1318   // Optimization: If the heap object pointed to by p is a non-internalized
1319   // cons string whose right substring is HEAP->empty_string, update
1320   // it in place to its left substring.  Return the updated value.
1321   //
1322   // Here we assume that if we change *p, we replace it with a heap object
1323   // (i.e., the left substring of a cons string is always a heap object).
1324   //
1325   // The check performed is:
1326   //   object->IsConsString() && !object->IsInternalizedString() &&
1327   //   (ConsString::cast(object)->second() == HEAP->empty_string())
1328   // except the maps for the object and its possible substrings might be
1329   // marked.
1330   HeapObject* object = HeapObject::cast(*p);
1331   if (!FLAG_clever_optimizations) return object;
1332   Map* map = object->map();
1333   InstanceType type = map->instance_type();
1334   if (!IsShortcutCandidate(type)) return object;
1335 
1336   Object* second = reinterpret_cast<ConsString*>(object)->second();
1337   Heap* heap = map->GetHeap();
1338   if (second != heap->empty_string()) {
1339     return object;
1340   }
1341 
1342   // Since we don't have the object's start, it is impossible to update the
1343   // page dirty marks. Therefore, we only replace the string with its left
1344   // substring when page dirty marks do not change.
1345   Object* first = reinterpret_cast<ConsString*>(object)->first();
1346   if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1347 
1348   *p = first;
1349   return HeapObject::cast(first);
1350 }
1351 
1352 
1353 class MarkCompactMarkingVisitor
1354     : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1355  public:
1356   static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map,
1357                                    HeapObject* obj);
1358 
1359   static void ObjectStatsCountFixedArray(
1360       FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
1361       FixedArraySubInstanceType dictionary_type);
1362 
1363   template <MarkCompactMarkingVisitor::VisitorId id>
1364   class ObjectStatsTracker {
1365    public:
1366     static inline void Visit(Map* map, HeapObject* obj);
1367   };
1368 
1369   static void Initialize();
1370 
INLINE(static void VisitPointer (Heap * heap,Object ** p))1371   INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1372     MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1373   }
1374 
INLINE(static void VisitPointers (Heap * heap,Object ** start,Object ** end))1375   INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1376     // Mark all objects pointed to in [start, end).
1377     const int kMinRangeForMarkingRecursion = 64;
1378     if (end - start >= kMinRangeForMarkingRecursion) {
1379       if (VisitUnmarkedObjects(heap, start, end)) return;
1380       // We are close to a stack overflow, so just mark the objects.
1381     }
1382     MarkCompactCollector* collector = heap->mark_compact_collector();
1383     for (Object** p = start; p < end; p++) {
1384       MarkObjectByPointer(collector, start, p);
1385     }
1386   }
1387 
1388   // Marks the object black and pushes it on the marking stack.
INLINE(static void MarkObject (Heap * heap,HeapObject * object))1389   INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1390     MarkBit mark = Marking::MarkBitFrom(object);
1391     heap->mark_compact_collector()->MarkObject(object, mark);
1392   }
1393 
1394   // Marks the object black without pushing it on the marking stack.
1395   // Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush (Heap * heap,HeapObject * object))1396   INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1397     MarkBit mark_bit = Marking::MarkBitFrom(object);
1398     if (!mark_bit.Get()) {
1399       heap->mark_compact_collector()->SetMark(object, mark_bit);
1400       return true;
1401     }
1402     return false;
1403   }
1404 
1405   // Mark object pointed to by p.
INLINE(static void MarkObjectByPointer (MarkCompactCollector * collector,Object ** anchor_slot,Object ** p))1406   INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1407                                          Object** anchor_slot, Object** p)) {
1408     if (!(*p)->IsHeapObject()) return;
1409     HeapObject* object = ShortCircuitConsString(p);
1410     collector->RecordSlot(anchor_slot, p, object);
1411     MarkBit mark = Marking::MarkBitFrom(object);
1412     collector->MarkObject(object, mark);
1413   }
1414 
1415 
1416   // Visit an unmarked object.
INLINE(static void VisitUnmarkedObject (MarkCompactCollector * collector,HeapObject * obj))1417   INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1418                                          HeapObject* obj)) {
1419 #ifdef DEBUG
1420     DCHECK(collector->heap()->Contains(obj));
1421     DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1422 #endif
1423     Map* map = obj->map();
1424     Heap* heap = obj->GetHeap();
1425     MarkBit mark = Marking::MarkBitFrom(obj);
1426     heap->mark_compact_collector()->SetMark(obj, mark);
1427     // Mark the map pointer and the body.
1428     MarkBit map_mark = Marking::MarkBitFrom(map);
1429     heap->mark_compact_collector()->MarkObject(map, map_mark);
1430     IterateBody(map, obj);
1431   }
1432 
1433   // Visit all unmarked objects pointed to by [start, end).
1434   // Returns false if the operation fails (lack of stack space).
INLINE(static bool VisitUnmarkedObjects (Heap * heap,Object ** start,Object ** end))1435   INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
1436                                           Object** end)) {
1437     // Return false is we are close to the stack limit.
1438     StackLimitCheck check(heap->isolate());
1439     if (check.HasOverflowed()) return false;
1440 
1441     MarkCompactCollector* collector = heap->mark_compact_collector();
1442     // Visit the unmarked objects.
1443     for (Object** p = start; p < end; p++) {
1444       Object* o = *p;
1445       if (!o->IsHeapObject()) continue;
1446       collector->RecordSlot(start, p, o);
1447       HeapObject* obj = HeapObject::cast(o);
1448       MarkBit mark = Marking::MarkBitFrom(obj);
1449       if (mark.Get()) continue;
1450       VisitUnmarkedObject(collector, obj);
1451     }
1452     return true;
1453   }
1454 
1455  private:
1456   template <int id>
1457   static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1458 
1459   // Code flushing support.
1460 
1461   static const int kRegExpCodeThreshold = 5;
1462 
UpdateRegExpCodeAgeAndFlush(Heap * heap,JSRegExp * re,bool is_one_byte)1463   static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1464                                           bool is_one_byte) {
1465     // Make sure that the fixed array is in fact initialized on the RegExp.
1466     // We could potentially trigger a GC when initializing the RegExp.
1467     if (HeapObject::cast(re->data())->map()->instance_type() !=
1468         FIXED_ARRAY_TYPE)
1469       return;
1470 
1471     // Make sure this is a RegExp that actually contains code.
1472     if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1473 
1474     Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1475     if (!code->IsSmi() &&
1476         HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1477       // Save a copy that can be reinstated if we need the code again.
1478       re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1479 
1480       // Saving a copy might create a pointer into compaction candidate
1481       // that was not observed by marker.  This might happen if JSRegExp data
1482       // was marked through the compilation cache before marker reached JSRegExp
1483       // object.
1484       FixedArray* data = FixedArray::cast(re->data());
1485       Object** slot =
1486           data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1487       heap->mark_compact_collector()->RecordSlot(slot, slot, code);
1488 
1489       // Set a number in the 0-255 range to guarantee no smi overflow.
1490       re->SetDataAt(JSRegExp::code_index(is_one_byte),
1491                     Smi::FromInt(heap->sweep_generation() & 0xff));
1492     } else if (code->IsSmi()) {
1493       int value = Smi::cast(code)->value();
1494       // The regexp has not been compiled yet or there was a compilation error.
1495       if (value == JSRegExp::kUninitializedValue ||
1496           value == JSRegExp::kCompilationErrorValue) {
1497         return;
1498       }
1499 
1500       // Check if we should flush now.
1501       if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1502         re->SetDataAt(JSRegExp::code_index(is_one_byte),
1503                       Smi::FromInt(JSRegExp::kUninitializedValue));
1504         re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1505                       Smi::FromInt(JSRegExp::kUninitializedValue));
1506       }
1507     }
1508   }
1509 
1510 
1511   // Works by setting the current sweep_generation (as a smi) in the
1512   // code object place in the data array of the RegExp and keeps a copy
1513   // around that can be reinstated if we reuse the RegExp before flushing.
1514   // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1515   // we flush the code.
VisitRegExpAndFlushCode(Map * map,HeapObject * object)1516   static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1517     Heap* heap = map->GetHeap();
1518     MarkCompactCollector* collector = heap->mark_compact_collector();
1519     if (!collector->is_code_flushing_enabled()) {
1520       VisitJSRegExp(map, object);
1521       return;
1522     }
1523     JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1524     // Flush code or set age on both one byte and two byte code.
1525     UpdateRegExpCodeAgeAndFlush(heap, re, true);
1526     UpdateRegExpCodeAgeAndFlush(heap, re, false);
1527     // Visit the fields of the RegExp, including the updated FixedArray.
1528     VisitJSRegExp(map, object);
1529   }
1530 
1531   static VisitorDispatchTable<Callback> non_count_table_;
1532 };
1533 
1534 
ObjectStatsCountFixedArray(FixedArrayBase * fixed_array,FixedArraySubInstanceType fast_type,FixedArraySubInstanceType dictionary_type)1535 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
1536     FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
1537     FixedArraySubInstanceType dictionary_type) {
1538   Heap* heap = fixed_array->map()->GetHeap();
1539   if (fixed_array->map() != heap->fixed_cow_array_map() &&
1540       fixed_array->map() != heap->fixed_double_array_map() &&
1541       fixed_array != heap->empty_fixed_array()) {
1542     if (fixed_array->IsDictionary()) {
1543       heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
1544     } else {
1545       heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
1546     }
1547   }
1548 }
1549 
1550 
ObjectStatsVisitBase(MarkCompactMarkingVisitor::VisitorId id,Map * map,HeapObject * obj)1551 void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1552     MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
1553   Heap* heap = map->GetHeap();
1554   int object_size = obj->Size();
1555   heap->RecordObjectStats(map->instance_type(), object_size);
1556   non_count_table_.GetVisitorById(id)(map, obj);
1557   if (obj->IsJSObject()) {
1558     JSObject* object = JSObject::cast(obj);
1559     ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
1560                                FAST_ELEMENTS_SUB_TYPE);
1561     ObjectStatsCountFixedArray(object->properties(),
1562                                DICTIONARY_PROPERTIES_SUB_TYPE,
1563                                FAST_PROPERTIES_SUB_TYPE);
1564   }
1565 }
1566 
1567 
1568 template <MarkCompactMarkingVisitor::VisitorId id>
Visit(Map * map,HeapObject * obj)1569 void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map,
1570                                                               HeapObject* obj) {
1571   ObjectStatsVisitBase(id, map, obj);
1572 }
1573 
1574 
1575 template <>
1576 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1577     MarkCompactMarkingVisitor::kVisitMap> {
1578  public:
Visit(Map * map,HeapObject * obj)1579   static inline void Visit(Map* map, HeapObject* obj) {
1580     Heap* heap = map->GetHeap();
1581     Map* map_obj = Map::cast(obj);
1582     DCHECK(map->instance_type() == MAP_TYPE);
1583     DescriptorArray* array = map_obj->instance_descriptors();
1584     if (map_obj->owns_descriptors() &&
1585         array != heap->empty_descriptor_array()) {
1586       int fixed_array_size = array->Size();
1587       heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1588                                          fixed_array_size);
1589     }
1590     if (map_obj->HasTransitionArray()) {
1591       int fixed_array_size = map_obj->transitions()->Size();
1592       heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
1593                                          fixed_array_size);
1594     }
1595     if (map_obj->has_code_cache()) {
1596       CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1597       heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
1598                                          cache->default_cache()->Size());
1599       if (!cache->normal_type_cache()->IsUndefined()) {
1600         heap->RecordFixedArraySubTypeStats(
1601             MAP_CODE_CACHE_SUB_TYPE,
1602             FixedArray::cast(cache->normal_type_cache())->Size());
1603       }
1604     }
1605     ObjectStatsVisitBase(kVisitMap, map, obj);
1606   }
1607 };
1608 
1609 
1610 template <>
1611 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1612     MarkCompactMarkingVisitor::kVisitCode> {
1613  public:
Visit(Map * map,HeapObject * obj)1614   static inline void Visit(Map* map, HeapObject* obj) {
1615     Heap* heap = map->GetHeap();
1616     int object_size = obj->Size();
1617     DCHECK(map->instance_type() == CODE_TYPE);
1618     Code* code_obj = Code::cast(obj);
1619     heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
1620                                  object_size);
1621     ObjectStatsVisitBase(kVisitCode, map, obj);
1622   }
1623 };
1624 
1625 
1626 template <>
1627 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1628     MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1629  public:
Visit(Map * map,HeapObject * obj)1630   static inline void Visit(Map* map, HeapObject* obj) {
1631     Heap* heap = map->GetHeap();
1632     SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1633     if (sfi->scope_info() != heap->empty_fixed_array()) {
1634       heap->RecordFixedArraySubTypeStats(
1635           SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
1636     }
1637     ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1638   }
1639 };
1640 
1641 
1642 template <>
1643 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1644     MarkCompactMarkingVisitor::kVisitFixedArray> {
1645  public:
Visit(Map * map,HeapObject * obj)1646   static inline void Visit(Map* map, HeapObject* obj) {
1647     Heap* heap = map->GetHeap();
1648     FixedArray* fixed_array = FixedArray::cast(obj);
1649     if (fixed_array == heap->string_table()) {
1650       heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
1651                                          fixed_array->Size());
1652     }
1653     ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1654   }
1655 };
1656 
1657 
Initialize()1658 void MarkCompactMarkingVisitor::Initialize() {
1659   StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1660 
1661   table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1662 
1663   if (FLAG_track_gc_object_stats) {
1664     // Copy the visitor table to make call-through possible.
1665     non_count_table_.CopyFrom(&table_);
1666 #define VISITOR_ID_COUNT_FUNCTION(id) \
1667   table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1668     VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1669 #undef VISITOR_ID_COUNT_FUNCTION
1670   }
1671 }
1672 
1673 
1674 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1675     MarkCompactMarkingVisitor::non_count_table_;
1676 
1677 
1678 class CodeMarkingVisitor : public ThreadVisitor {
1679  public:
CodeMarkingVisitor(MarkCompactCollector * collector)1680   explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1681       : collector_(collector) {}
1682 
VisitThread(Isolate * isolate,ThreadLocalTop * top)1683   void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1684     collector_->PrepareThreadForCodeFlushing(isolate, top);
1685   }
1686 
1687  private:
1688   MarkCompactCollector* collector_;
1689 };
1690 
1691 
1692 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1693  public:
SharedFunctionInfoMarkingVisitor(MarkCompactCollector * collector)1694   explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1695       : collector_(collector) {}
1696 
VisitPointers(Object ** start,Object ** end)1697   void VisitPointers(Object** start, Object** end) {
1698     for (Object** p = start; p < end; p++) VisitPointer(p);
1699   }
1700 
VisitPointer(Object ** slot)1701   void VisitPointer(Object** slot) {
1702     Object* obj = *slot;
1703     if (obj->IsSharedFunctionInfo()) {
1704       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1705       MarkBit shared_mark = Marking::MarkBitFrom(shared);
1706       MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1707       collector_->MarkObject(shared->code(), code_mark);
1708       collector_->MarkObject(shared, shared_mark);
1709     }
1710   }
1711 
1712  private:
1713   MarkCompactCollector* collector_;
1714 };
1715 
1716 
PrepareThreadForCodeFlushing(Isolate * isolate,ThreadLocalTop * top)1717 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1718                                                         ThreadLocalTop* top) {
1719   for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1720     // Note: for the frame that has a pending lazy deoptimization
1721     // StackFrame::unchecked_code will return a non-optimized code object for
1722     // the outermost function and StackFrame::LookupCode will return
1723     // actual optimized code object.
1724     StackFrame* frame = it.frame();
1725     Code* code = frame->unchecked_code();
1726     MarkBit code_mark = Marking::MarkBitFrom(code);
1727     MarkObject(code, code_mark);
1728     if (frame->is_optimized()) {
1729       MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1730                                                           frame->LookupCode());
1731     }
1732   }
1733 }
1734 
1735 
PrepareForCodeFlushing()1736 void MarkCompactCollector::PrepareForCodeFlushing() {
1737   // Enable code flushing for non-incremental cycles.
1738   if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1739     EnableCodeFlushing(!was_marked_incrementally_);
1740   }
1741 
1742   // If code flushing is disabled, there is no need to prepare for it.
1743   if (!is_code_flushing_enabled()) return;
1744 
1745   // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1746   // relies on it being marked before any other descriptor array.
1747   HeapObject* descriptor_array = heap()->empty_descriptor_array();
1748   MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1749   MarkObject(descriptor_array, descriptor_array_mark);
1750 
1751   // Make sure we are not referencing the code from the stack.
1752   DCHECK(this == heap()->mark_compact_collector());
1753   PrepareThreadForCodeFlushing(heap()->isolate(),
1754                                heap()->isolate()->thread_local_top());
1755 
1756   // Iterate the archived stacks in all threads to check if
1757   // the code is referenced.
1758   CodeMarkingVisitor code_marking_visitor(this);
1759   heap()->isolate()->thread_manager()->IterateArchivedThreads(
1760       &code_marking_visitor);
1761 
1762   SharedFunctionInfoMarkingVisitor visitor(this);
1763   heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1764   heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1765 
1766   ProcessMarkingDeque();
1767 }
1768 
1769 
1770 // Visitor class for marking heap roots.
1771 class RootMarkingVisitor : public ObjectVisitor {
1772  public:
RootMarkingVisitor(Heap * heap)1773   explicit RootMarkingVisitor(Heap* heap)
1774       : collector_(heap->mark_compact_collector()) {}
1775 
VisitPointer(Object ** p)1776   void VisitPointer(Object** p) { MarkObjectByPointer(p); }
1777 
VisitPointers(Object ** start,Object ** end)1778   void VisitPointers(Object** start, Object** end) {
1779     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1780   }
1781 
1782   // Skip the weak next code link in a code object, which is visited in
1783   // ProcessTopOptimizedFrame.
VisitNextCodeLink(Object ** p)1784   void VisitNextCodeLink(Object** p) {}
1785 
1786  private:
MarkObjectByPointer(Object ** p)1787   void MarkObjectByPointer(Object** p) {
1788     if (!(*p)->IsHeapObject()) return;
1789 
1790     // Replace flat cons strings in place.
1791     HeapObject* object = ShortCircuitConsString(p);
1792     MarkBit mark_bit = Marking::MarkBitFrom(object);
1793     if (mark_bit.Get()) return;
1794 
1795     Map* map = object->map();
1796     // Mark the object.
1797     collector_->SetMark(object, mark_bit);
1798 
1799     // Mark the map pointer and body, and push them on the marking stack.
1800     MarkBit map_mark = Marking::MarkBitFrom(map);
1801     collector_->MarkObject(map, map_mark);
1802     MarkCompactMarkingVisitor::IterateBody(map, object);
1803 
1804     // Mark all the objects reachable from the map and body.  May leave
1805     // overflowed objects in the heap.
1806     collector_->EmptyMarkingDeque();
1807   }
1808 
1809   MarkCompactCollector* collector_;
1810 };
1811 
1812 
1813 // Helper class for pruning the string table.
1814 template <bool finalize_external_strings>
1815 class StringTableCleaner : public ObjectVisitor {
1816  public:
StringTableCleaner(Heap * heap)1817   explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
1818 
VisitPointers(Object ** start,Object ** end)1819   virtual void VisitPointers(Object** start, Object** end) {
1820     // Visit all HeapObject pointers in [start, end).
1821     for (Object** p = start; p < end; p++) {
1822       Object* o = *p;
1823       if (o->IsHeapObject() &&
1824           !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1825         if (finalize_external_strings) {
1826           DCHECK(o->IsExternalString());
1827           heap_->FinalizeExternalString(String::cast(*p));
1828         } else {
1829           pointers_removed_++;
1830         }
1831         // Set the entry to the_hole_value (as deleted).
1832         *p = heap_->the_hole_value();
1833       }
1834     }
1835   }
1836 
PointersRemoved()1837   int PointersRemoved() {
1838     DCHECK(!finalize_external_strings);
1839     return pointers_removed_;
1840   }
1841 
1842  private:
1843   Heap* heap_;
1844   int pointers_removed_;
1845 };
1846 
1847 
1848 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1849 typedef StringTableCleaner<true> ExternalStringTableCleaner;
1850 
1851 
1852 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1853 // are retained.
1854 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1855  public:
RetainAs(Object * object)1856   virtual Object* RetainAs(Object* object) {
1857     if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1858       return object;
1859     } else if (object->IsAllocationSite() &&
1860                !(AllocationSite::cast(object)->IsZombie())) {
1861       // "dead" AllocationSites need to live long enough for a traversal of new
1862       // space. These sites get a one-time reprieve.
1863       AllocationSite* site = AllocationSite::cast(object);
1864       site->MarkZombie();
1865       site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1866       return object;
1867     } else {
1868       return NULL;
1869     }
1870   }
1871 };
1872 
1873 
1874 // Fill the marking stack with overflowed objects returned by the given
1875 // iterator.  Stop when the marking stack is filled or the end of the space
1876 // is reached, whichever comes first.
1877 template <class T>
DiscoverGreyObjectsWithIterator(Heap * heap,MarkingDeque * marking_deque,T * it)1878 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1879                                             MarkingDeque* marking_deque,
1880                                             T* it) {
1881   // The caller should ensure that the marking stack is initially not full,
1882   // so that we don't waste effort pointlessly scanning for objects.
1883   DCHECK(!marking_deque->IsFull());
1884 
1885   Map* filler_map = heap->one_pointer_filler_map();
1886   for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1887     MarkBit markbit = Marking::MarkBitFrom(object);
1888     if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1889       Marking::GreyToBlack(markbit);
1890       MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1891       marking_deque->PushBlack(object);
1892       if (marking_deque->IsFull()) return;
1893     }
1894   }
1895 }
1896 
1897 
1898 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1899 
1900 
DiscoverGreyObjectsOnPage(MarkingDeque * marking_deque,MemoryChunk * p)1901 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1902                                       MemoryChunk* p) {
1903   DCHECK(!marking_deque->IsFull());
1904   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1905   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1906   DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1907   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1908 
1909   for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1910     Address cell_base = it.CurrentCellBase();
1911     MarkBit::CellType* cell = it.CurrentCell();
1912 
1913     const MarkBit::CellType current_cell = *cell;
1914     if (current_cell == 0) continue;
1915 
1916     MarkBit::CellType grey_objects;
1917     if (it.HasNext()) {
1918       const MarkBit::CellType next_cell = *(cell + 1);
1919       grey_objects = current_cell & ((current_cell >> 1) |
1920                                      (next_cell << (Bitmap::kBitsPerCell - 1)));
1921     } else {
1922       grey_objects = current_cell & (current_cell >> 1);
1923     }
1924 
1925     int offset = 0;
1926     while (grey_objects != 0) {
1927       int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
1928       grey_objects >>= trailing_zeros;
1929       offset += trailing_zeros;
1930       MarkBit markbit(cell, 1 << offset, false);
1931       DCHECK(Marking::IsGrey(markbit));
1932       Marking::GreyToBlack(markbit);
1933       Address addr = cell_base + offset * kPointerSize;
1934       HeapObject* object = HeapObject::FromAddress(addr);
1935       MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1936       marking_deque->PushBlack(object);
1937       if (marking_deque->IsFull()) return;
1938       offset += 2;
1939       grey_objects >>= 2;
1940     }
1941 
1942     grey_objects >>= (Bitmap::kBitsPerCell - 1);
1943   }
1944 }
1945 
1946 
DiscoverAndEvacuateBlackObjectsOnPage(NewSpace * new_space,NewSpacePage * p)1947 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
1948     NewSpace* new_space, NewSpacePage* p) {
1949   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1950   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1951   DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1952   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1953 
1954   MarkBit::CellType* cells = p->markbits()->cells();
1955   int survivors_size = 0;
1956 
1957   for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1958     Address cell_base = it.CurrentCellBase();
1959     MarkBit::CellType* cell = it.CurrentCell();
1960 
1961     MarkBit::CellType current_cell = *cell;
1962     if (current_cell == 0) continue;
1963 
1964     int offset = 0;
1965     while (current_cell != 0) {
1966       int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
1967       current_cell >>= trailing_zeros;
1968       offset += trailing_zeros;
1969       Address address = cell_base + offset * kPointerSize;
1970       HeapObject* object = HeapObject::FromAddress(address);
1971 
1972       int size = object->Size();
1973       survivors_size += size;
1974 
1975       Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1976 
1977       offset++;
1978       current_cell >>= 1;
1979 
1980       // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1981       if (heap()->ShouldBePromoted(object->address(), size) &&
1982           TryPromoteObject(object, size)) {
1983         continue;
1984       }
1985 
1986       AllocationResult allocation = new_space->AllocateRaw(size);
1987       if (allocation.IsRetry()) {
1988         if (!new_space->AddFreshPage()) {
1989           // Shouldn't happen. We are sweeping linearly, and to-space
1990           // has the same number of pages as from-space, so there is
1991           // always room.
1992           UNREACHABLE();
1993         }
1994         allocation = new_space->AllocateRaw(size);
1995         DCHECK(!allocation.IsRetry());
1996       }
1997       Object* target = allocation.ToObjectChecked();
1998 
1999       MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
2000       heap()->IncrementSemiSpaceCopiedObjectSize(size);
2001     }
2002     *cells = 0;
2003   }
2004   return survivors_size;
2005 }
2006 
2007 
DiscoverGreyObjectsInSpace(Heap * heap,MarkingDeque * marking_deque,PagedSpace * space)2008 static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
2009                                        PagedSpace* space) {
2010   PageIterator it(space);
2011   while (it.has_next()) {
2012     Page* p = it.next();
2013     DiscoverGreyObjectsOnPage(marking_deque, p);
2014     if (marking_deque->IsFull()) return;
2015   }
2016 }
2017 
2018 
DiscoverGreyObjectsInNewSpace(Heap * heap,MarkingDeque * marking_deque)2019 static void DiscoverGreyObjectsInNewSpace(Heap* heap,
2020                                           MarkingDeque* marking_deque) {
2021   NewSpace* space = heap->new_space();
2022   NewSpacePageIterator it(space->bottom(), space->top());
2023   while (it.has_next()) {
2024     NewSpacePage* page = it.next();
2025     DiscoverGreyObjectsOnPage(marking_deque, page);
2026     if (marking_deque->IsFull()) return;
2027   }
2028 }
2029 
2030 
IsUnmarkedHeapObject(Object ** p)2031 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2032   Object* o = *p;
2033   if (!o->IsHeapObject()) return false;
2034   HeapObject* heap_object = HeapObject::cast(o);
2035   MarkBit mark = Marking::MarkBitFrom(heap_object);
2036   return !mark.Get();
2037 }
2038 
2039 
IsUnmarkedHeapObjectWithHeap(Heap * heap,Object ** p)2040 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2041                                                         Object** p) {
2042   Object* o = *p;
2043   DCHECK(o->IsHeapObject());
2044   HeapObject* heap_object = HeapObject::cast(o);
2045   MarkBit mark = Marking::MarkBitFrom(heap_object);
2046   return !mark.Get();
2047 }
2048 
2049 
MarkStringTable(RootMarkingVisitor * visitor)2050 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
2051   StringTable* string_table = heap()->string_table();
2052   // Mark the string table itself.
2053   MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2054   if (!string_table_mark.Get()) {
2055     // String table could have already been marked by visiting the handles list.
2056     SetMark(string_table, string_table_mark);
2057   }
2058   // Explicitly mark the prefix.
2059   string_table->IteratePrefix(visitor);
2060   ProcessMarkingDeque();
2061 }
2062 
2063 
MarkAllocationSite(AllocationSite * site)2064 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
2065   MarkBit mark_bit = Marking::MarkBitFrom(site);
2066   SetMark(site, mark_bit);
2067 }
2068 
2069 
MarkRoots(RootMarkingVisitor * visitor)2070 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2071   // Mark the heap roots including global variables, stack variables,
2072   // etc., and all objects reachable from them.
2073   heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2074 
2075   // Handle the string table specially.
2076   MarkStringTable(visitor);
2077 
2078   MarkWeakObjectToCodeTable();
2079 
2080   // There may be overflowed objects in the heap.  Visit them now.
2081   while (marking_deque_.overflowed()) {
2082     RefillMarkingDeque();
2083     EmptyMarkingDeque();
2084   }
2085 }
2086 
2087 
MarkImplicitRefGroups()2088 void MarkCompactCollector::MarkImplicitRefGroups() {
2089   List<ImplicitRefGroup*>* ref_groups =
2090       isolate()->global_handles()->implicit_ref_groups();
2091 
2092   int last = 0;
2093   for (int i = 0; i < ref_groups->length(); i++) {
2094     ImplicitRefGroup* entry = ref_groups->at(i);
2095     DCHECK(entry != NULL);
2096 
2097     if (!IsMarked(*entry->parent)) {
2098       (*ref_groups)[last++] = entry;
2099       continue;
2100     }
2101 
2102     Object*** children = entry->children;
2103     // A parent object is marked, so mark all child heap objects.
2104     for (size_t j = 0; j < entry->length; ++j) {
2105       if ((*children[j])->IsHeapObject()) {
2106         HeapObject* child = HeapObject::cast(*children[j]);
2107         MarkBit mark = Marking::MarkBitFrom(child);
2108         MarkObject(child, mark);
2109       }
2110     }
2111 
2112     // Once the entire group has been marked, dispose it because it's
2113     // not needed anymore.
2114     delete entry;
2115   }
2116   ref_groups->Rewind(last);
2117 }
2118 
2119 
MarkWeakObjectToCodeTable()2120 void MarkCompactCollector::MarkWeakObjectToCodeTable() {
2121   HeapObject* weak_object_to_code_table =
2122       HeapObject::cast(heap()->weak_object_to_code_table());
2123   if (!IsMarked(weak_object_to_code_table)) {
2124     MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2125     SetMark(weak_object_to_code_table, mark);
2126   }
2127 }
2128 
2129 
2130 // Mark all objects reachable from the objects on the marking stack.
2131 // Before: the marking stack contains zero or more heap object pointers.
2132 // After: the marking stack is empty, and all objects reachable from the
2133 // marking stack have been marked, or are overflowed in the heap.
EmptyMarkingDeque()2134 void MarkCompactCollector::EmptyMarkingDeque() {
2135   while (!marking_deque_.IsEmpty()) {
2136     HeapObject* object = marking_deque_.Pop();
2137     DCHECK(object->IsHeapObject());
2138     DCHECK(heap()->Contains(object));
2139     DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2140 
2141     Map* map = object->map();
2142     MarkBit map_mark = Marking::MarkBitFrom(map);
2143     MarkObject(map, map_mark);
2144 
2145     MarkCompactMarkingVisitor::IterateBody(map, object);
2146   }
2147 }
2148 
2149 
2150 // Sweep the heap for overflowed objects, clear their overflow bits, and
2151 // push them on the marking stack.  Stop early if the marking stack fills
2152 // before sweeping completes.  If sweeping completes, there are no remaining
2153 // overflowed objects in the heap so the overflow flag on the markings stack
2154 // is cleared.
RefillMarkingDeque()2155 void MarkCompactCollector::RefillMarkingDeque() {
2156   DCHECK(marking_deque_.overflowed());
2157 
2158   DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2159   if (marking_deque_.IsFull()) return;
2160 
2161   DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
2162                              heap()->old_pointer_space());
2163   if (marking_deque_.IsFull()) return;
2164 
2165   DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space());
2166   if (marking_deque_.IsFull()) return;
2167 
2168   DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
2169   if (marking_deque_.IsFull()) return;
2170 
2171   DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
2172   if (marking_deque_.IsFull()) return;
2173 
2174   DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
2175   if (marking_deque_.IsFull()) return;
2176 
2177   DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
2178                              heap()->property_cell_space());
2179   if (marking_deque_.IsFull()) return;
2180 
2181   LargeObjectIterator lo_it(heap()->lo_space());
2182   DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
2183   if (marking_deque_.IsFull()) return;
2184 
2185   marking_deque_.ClearOverflowed();
2186 }
2187 
2188 
2189 // Mark all objects reachable (transitively) from objects on the marking
2190 // stack.  Before: the marking stack contains zero or more heap object
2191 // pointers.  After: the marking stack is empty and there are no overflowed
2192 // objects in the heap.
ProcessMarkingDeque()2193 void MarkCompactCollector::ProcessMarkingDeque() {
2194   EmptyMarkingDeque();
2195   while (marking_deque_.overflowed()) {
2196     RefillMarkingDeque();
2197     EmptyMarkingDeque();
2198   }
2199 }
2200 
2201 
2202 // Mark all objects reachable (transitively) from objects on the marking
2203 // stack including references only considered in the atomic marking pause.
ProcessEphemeralMarking(ObjectVisitor * visitor)2204 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2205   bool work_to_do = true;
2206   DCHECK(marking_deque_.IsEmpty());
2207   while (work_to_do) {
2208     isolate()->global_handles()->IterateObjectGroups(
2209         visitor, &IsUnmarkedHeapObjectWithHeap);
2210     MarkImplicitRefGroups();
2211     ProcessWeakCollections();
2212     work_to_do = !marking_deque_.IsEmpty();
2213     ProcessMarkingDeque();
2214   }
2215 }
2216 
2217 
ProcessTopOptimizedFrame(ObjectVisitor * visitor)2218 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2219   for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2220        !it.done(); it.Advance()) {
2221     if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2222       return;
2223     }
2224     if (it.frame()->type() == StackFrame::OPTIMIZED) {
2225       Code* code = it.frame()->LookupCode();
2226       if (!code->CanDeoptAt(it.frame()->pc())) {
2227         code->CodeIterateBody(visitor);
2228       }
2229       ProcessMarkingDeque();
2230       return;
2231     }
2232   }
2233 }
2234 
2235 
MarkLiveObjects()2236 void MarkCompactCollector::MarkLiveObjects() {
2237   GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
2238   double start_time = 0.0;
2239   if (FLAG_print_cumulative_gc_stat) {
2240     start_time = base::OS::TimeCurrentMillis();
2241   }
2242   // The recursive GC marker detects when it is nearing stack overflow,
2243   // and switches to a different marking system.  JS interrupts interfere
2244   // with the C stack limit check.
2245   PostponeInterruptsScope postpone(isolate());
2246 
2247   bool incremental_marking_overflowed = false;
2248   IncrementalMarking* incremental_marking = heap_->incremental_marking();
2249   if (was_marked_incrementally_) {
2250     // Finalize the incremental marking and check whether we had an overflow.
2251     // Both markers use grey color to mark overflowed objects so
2252     // non-incremental marker can deal with them as if overflow
2253     // occured during normal marking.
2254     // But incremental marker uses a separate marking deque
2255     // so we have to explicitly copy its overflow state.
2256     incremental_marking->Finalize();
2257     incremental_marking_overflowed =
2258         incremental_marking->marking_deque()->overflowed();
2259     incremental_marking->marking_deque()->ClearOverflowed();
2260   } else {
2261     // Abort any pending incremental activities e.g. incremental sweeping.
2262     incremental_marking->Abort();
2263   }
2264 
2265 #ifdef DEBUG
2266   DCHECK(state_ == PREPARE_GC);
2267   state_ = MARK_LIVE_OBJECTS;
2268 #endif
2269   // The to space contains live objects, a page in from space is used as a
2270   // marking stack.
2271   Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2272   Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2273   if (FLAG_force_marking_deque_overflows) {
2274     marking_deque_end = marking_deque_start + 64 * kPointerSize;
2275   }
2276   marking_deque_.Initialize(marking_deque_start, marking_deque_end);
2277   DCHECK(!marking_deque_.overflowed());
2278 
2279   if (incremental_marking_overflowed) {
2280     // There are overflowed objects left in the heap after incremental marking.
2281     marking_deque_.SetOverflowed();
2282   }
2283 
2284   PrepareForCodeFlushing();
2285 
2286   if (was_marked_incrementally_) {
2287     // There is no write barrier on cells so we have to scan them now at the end
2288     // of the incremental marking.
2289     {
2290       HeapObjectIterator cell_iterator(heap()->cell_space());
2291       HeapObject* cell;
2292       while ((cell = cell_iterator.Next()) != NULL) {
2293         DCHECK(cell->IsCell());
2294         if (IsMarked(cell)) {
2295           int offset = Cell::kValueOffset;
2296           MarkCompactMarkingVisitor::VisitPointer(
2297               heap(), reinterpret_cast<Object**>(cell->address() + offset));
2298         }
2299       }
2300     }
2301     {
2302       HeapObjectIterator js_global_property_cell_iterator(
2303           heap()->property_cell_space());
2304       HeapObject* cell;
2305       while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2306         DCHECK(cell->IsPropertyCell());
2307         if (IsMarked(cell)) {
2308           MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2309         }
2310       }
2311     }
2312   }
2313 
2314   RootMarkingVisitor root_visitor(heap());
2315   MarkRoots(&root_visitor);
2316 
2317   ProcessTopOptimizedFrame(&root_visitor);
2318 
2319   // The objects reachable from the roots are marked, yet unreachable
2320   // objects are unmarked.  Mark objects reachable due to host
2321   // application specific logic or through Harmony weak maps.
2322   ProcessEphemeralMarking(&root_visitor);
2323 
2324   // The objects reachable from the roots, weak maps or object groups
2325   // are marked, yet unreachable objects are unmarked.  Mark objects
2326   // reachable only from weak global handles.
2327   //
2328   // First we identify nonlive weak handles and mark them as pending
2329   // destruction.
2330   heap()->isolate()->global_handles()->IdentifyWeakHandles(
2331       &IsUnmarkedHeapObject);
2332   // Then we mark the objects and process the transitive closure.
2333   heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2334   while (marking_deque_.overflowed()) {
2335     RefillMarkingDeque();
2336     EmptyMarkingDeque();
2337   }
2338 
2339   // Repeat host application specific and Harmony weak maps marking to
2340   // mark unmarked objects reachable from the weak roots.
2341   ProcessEphemeralMarking(&root_visitor);
2342 
2343   AfterMarking();
2344 
2345   if (FLAG_print_cumulative_gc_stat) {
2346     heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
2347   }
2348 }
2349 
2350 
AfterMarking()2351 void MarkCompactCollector::AfterMarking() {
2352   // Object literal map caches reference strings (cache keys) and maps
2353   // (cache values). At this point still useful maps have already been
2354   // marked. Mark the keys for the alive values before we process the
2355   // string table.
2356   ProcessMapCaches();
2357 
2358   // Prune the string table removing all strings only pointed to by the
2359   // string table.  Cannot use string_table() here because the string
2360   // table is marked.
2361   StringTable* string_table = heap()->string_table();
2362   InternalizedStringTableCleaner internalized_visitor(heap());
2363   string_table->IterateElements(&internalized_visitor);
2364   string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2365 
2366   ExternalStringTableCleaner external_visitor(heap());
2367   heap()->external_string_table_.Iterate(&external_visitor);
2368   heap()->external_string_table_.CleanUp();
2369 
2370   // Process the weak references.
2371   MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2372   heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2373 
2374   // Remove object groups after marking phase.
2375   heap()->isolate()->global_handles()->RemoveObjectGroups();
2376   heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2377 
2378   // Flush code from collected candidates.
2379   if (is_code_flushing_enabled()) {
2380     code_flusher_->ProcessCandidates();
2381     // If incremental marker does not support code flushing, we need to
2382     // disable it before incremental marking steps for next cycle.
2383     if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2384       EnableCodeFlushing(false);
2385     }
2386   }
2387 
2388   if (FLAG_track_gc_object_stats) {
2389     heap()->CheckpointObjectStats();
2390   }
2391 }
2392 
2393 
ProcessMapCaches()2394 void MarkCompactCollector::ProcessMapCaches() {
2395   Object* raw_context = heap()->native_contexts_list();
2396   while (raw_context != heap()->undefined_value()) {
2397     Context* context = reinterpret_cast<Context*>(raw_context);
2398     if (IsMarked(context)) {
2399       HeapObject* raw_map_cache =
2400           HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2401       // A map cache may be reachable from the stack. In this case
2402       // it's already transitively marked and it's too late to clean
2403       // up its parts.
2404       if (!IsMarked(raw_map_cache) &&
2405           raw_map_cache != heap()->undefined_value()) {
2406         MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2407         int existing_elements = map_cache->NumberOfElements();
2408         int used_elements = 0;
2409         for (int i = MapCache::kElementsStartIndex; i < map_cache->length();
2410              i += MapCache::kEntrySize) {
2411           Object* raw_key = map_cache->get(i);
2412           if (raw_key == heap()->undefined_value() ||
2413               raw_key == heap()->the_hole_value())
2414             continue;
2415           STATIC_ASSERT(MapCache::kEntrySize == 2);
2416           Object* raw_map = map_cache->get(i + 1);
2417           if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2418             ++used_elements;
2419           } else {
2420             // Delete useless entries with unmarked maps.
2421             DCHECK(raw_map->IsMap());
2422             map_cache->set_the_hole(i);
2423             map_cache->set_the_hole(i + 1);
2424           }
2425         }
2426         if (used_elements == 0) {
2427           context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2428         } else {
2429           // Note: we don't actually shrink the cache here to avoid
2430           // extra complexity during GC. We rely on subsequent cache
2431           // usages (EnsureCapacity) to do this.
2432           map_cache->ElementsRemoved(existing_elements - used_elements);
2433           MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2434           MarkObject(map_cache, map_cache_markbit);
2435         }
2436       }
2437     }
2438     // Move to next element in the list.
2439     raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2440   }
2441   ProcessMarkingDeque();
2442 }
2443 
2444 
ClearNonLiveReferences()2445 void MarkCompactCollector::ClearNonLiveReferences() {
2446   // Iterate over the map space, setting map transitions that go from
2447   // a marked map to an unmarked map to null transitions.  This action
2448   // is carried out only on maps of JSObjects and related subtypes.
2449   HeapObjectIterator map_iterator(heap()->map_space());
2450   for (HeapObject* obj = map_iterator.Next(); obj != NULL;
2451        obj = map_iterator.Next()) {
2452     Map* map = Map::cast(obj);
2453 
2454     if (!map->CanTransition()) continue;
2455 
2456     MarkBit map_mark = Marking::MarkBitFrom(map);
2457     ClearNonLivePrototypeTransitions(map);
2458     ClearNonLiveMapTransitions(map, map_mark);
2459 
2460     if (map_mark.Get()) {
2461       ClearNonLiveDependentCode(map->dependent_code());
2462     } else {
2463       ClearDependentCode(map->dependent_code());
2464       map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2465     }
2466   }
2467 
2468   // Iterate over property cell space, removing dependent code that is not
2469   // otherwise kept alive by strong references.
2470   HeapObjectIterator cell_iterator(heap_->property_cell_space());
2471   for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
2472        cell = cell_iterator.Next()) {
2473     if (IsMarked(cell)) {
2474       ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2475     }
2476   }
2477 
2478   // Iterate over allocation sites, removing dependent code that is not
2479   // otherwise kept alive by strong references.
2480   Object* undefined = heap()->undefined_value();
2481   for (Object* site = heap()->allocation_sites_list(); site != undefined;
2482        site = AllocationSite::cast(site)->weak_next()) {
2483     if (IsMarked(site)) {
2484       ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
2485     }
2486   }
2487 
2488   if (heap_->weak_object_to_code_table()->IsHashTable()) {
2489     WeakHashTable* table =
2490         WeakHashTable::cast(heap_->weak_object_to_code_table());
2491     uint32_t capacity = table->Capacity();
2492     for (uint32_t i = 0; i < capacity; i++) {
2493       uint32_t key_index = table->EntryToIndex(i);
2494       Object* key = table->get(key_index);
2495       if (!table->IsKey(key)) continue;
2496       uint32_t value_index = table->EntryToValueIndex(i);
2497       Object* value = table->get(value_index);
2498       if (key->IsCell() && !IsMarked(key)) {
2499         Cell* cell = Cell::cast(key);
2500         Object* object = cell->value();
2501         if (IsMarked(object)) {
2502           MarkBit mark = Marking::MarkBitFrom(cell);
2503           SetMark(cell, mark);
2504           Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
2505           RecordSlot(value_slot, value_slot, *value_slot);
2506         }
2507       }
2508       if (IsMarked(key)) {
2509         if (!IsMarked(value)) {
2510           HeapObject* obj = HeapObject::cast(value);
2511           MarkBit mark = Marking::MarkBitFrom(obj);
2512           SetMark(obj, mark);
2513         }
2514         ClearNonLiveDependentCode(DependentCode::cast(value));
2515       } else {
2516         ClearDependentCode(DependentCode::cast(value));
2517         table->set(key_index, heap_->the_hole_value());
2518         table->set(value_index, heap_->the_hole_value());
2519         table->ElementRemoved();
2520       }
2521     }
2522   }
2523 }
2524 
2525 
ClearNonLivePrototypeTransitions(Map * map)2526 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2527   int number_of_transitions = map->NumberOfProtoTransitions();
2528   FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2529 
2530   int new_number_of_transitions = 0;
2531   const int header = Map::kProtoTransitionHeaderSize;
2532   const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2533   const int map_offset = header + Map::kProtoTransitionMapOffset;
2534   const int step = Map::kProtoTransitionElementsPerEntry;
2535   for (int i = 0; i < number_of_transitions; i++) {
2536     Object* prototype = prototype_transitions->get(proto_offset + i * step);
2537     Object* cached_map = prototype_transitions->get(map_offset + i * step);
2538     if (IsMarked(prototype) && IsMarked(cached_map)) {
2539       DCHECK(!prototype->IsUndefined());
2540       int proto_index = proto_offset + new_number_of_transitions * step;
2541       int map_index = map_offset + new_number_of_transitions * step;
2542       if (new_number_of_transitions != i) {
2543         prototype_transitions->set(proto_index, prototype,
2544                                    UPDATE_WRITE_BARRIER);
2545         prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER);
2546       }
2547       Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
2548       RecordSlot(slot, slot, prototype);
2549       new_number_of_transitions++;
2550     }
2551   }
2552 
2553   if (new_number_of_transitions != number_of_transitions) {
2554     map->SetNumberOfProtoTransitions(new_number_of_transitions);
2555   }
2556 
2557   // Fill slots that became free with undefined value.
2558   for (int i = new_number_of_transitions * step;
2559        i < number_of_transitions * step; i++) {
2560     prototype_transitions->set_undefined(header + i);
2561   }
2562 }
2563 
2564 
ClearNonLiveMapTransitions(Map * map,MarkBit map_mark)2565 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2566                                                       MarkBit map_mark) {
2567   Object* potential_parent = map->GetBackPointer();
2568   if (!potential_parent->IsMap()) return;
2569   Map* parent = Map::cast(potential_parent);
2570 
2571   // Follow back pointer, check whether we are dealing with a map transition
2572   // from a live map to a dead path and in case clear transitions of parent.
2573   bool current_is_alive = map_mark.Get();
2574   bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2575   if (!current_is_alive && parent_is_alive) {
2576     ClearMapTransitions(parent);
2577   }
2578 }
2579 
2580 
2581 // Clear a possible back pointer in case the transition leads to a dead map.
2582 // Return true in case a back pointer has been cleared and false otherwise.
ClearMapBackPointer(Map * target)2583 bool MarkCompactCollector::ClearMapBackPointer(Map* target) {
2584   if (Marking::MarkBitFrom(target).Get()) return false;
2585   target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER);
2586   return true;
2587 }
2588 
2589 
ClearMapTransitions(Map * map)2590 void MarkCompactCollector::ClearMapTransitions(Map* map) {
2591   // If there are no transitions to be cleared, return.
2592   // TODO(verwaest) Should be an assert, otherwise back pointers are not
2593   // properly cleared.
2594   if (!map->HasTransitionArray()) return;
2595 
2596   TransitionArray* t = map->transitions();
2597 
2598   int transition_index = 0;
2599 
2600   DescriptorArray* descriptors = map->instance_descriptors();
2601   bool descriptors_owner_died = false;
2602 
2603   // Compact all live descriptors to the left.
2604   for (int i = 0; i < t->number_of_transitions(); ++i) {
2605     Map* target = t->GetTarget(i);
2606     if (ClearMapBackPointer(target)) {
2607       if (target->instance_descriptors() == descriptors) {
2608         descriptors_owner_died = true;
2609       }
2610     } else {
2611       if (i != transition_index) {
2612         Name* key = t->GetKey(i);
2613         t->SetKey(transition_index, key);
2614         Object** key_slot = t->GetKeySlot(transition_index);
2615         RecordSlot(key_slot, key_slot, key);
2616         // Target slots do not need to be recorded since maps are not compacted.
2617         t->SetTarget(transition_index, t->GetTarget(i));
2618       }
2619       transition_index++;
2620     }
2621   }
2622 
2623   // If there are no transitions to be cleared, return.
2624   // TODO(verwaest) Should be an assert, otherwise back pointers are not
2625   // properly cleared.
2626   if (transition_index == t->number_of_transitions()) return;
2627 
2628   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2629 
2630   if (descriptors_owner_died) {
2631     if (number_of_own_descriptors > 0) {
2632       TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
2633       DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2634       map->set_owns_descriptors(true);
2635     } else {
2636       DCHECK(descriptors == heap_->empty_descriptor_array());
2637     }
2638   }
2639 
2640   // Note that we never eliminate a transition array, though we might right-trim
2641   // such that number_of_transitions() == 0. If this assumption changes,
2642   // TransitionArray::CopyInsert() will need to deal with the case that a
2643   // transition array disappeared during GC.
2644   int trim = t->number_of_transitions() - transition_index;
2645   if (trim > 0) {
2646     heap_->RightTrimFixedArray<Heap::FROM_GC>(
2647         t, t->IsSimpleTransition() ? trim
2648                                    : trim * TransitionArray::kTransitionSize);
2649   }
2650   DCHECK(map->HasTransitionArray());
2651 }
2652 
2653 
TrimDescriptorArray(Map * map,DescriptorArray * descriptors,int number_of_own_descriptors)2654 void MarkCompactCollector::TrimDescriptorArray(Map* map,
2655                                                DescriptorArray* descriptors,
2656                                                int number_of_own_descriptors) {
2657   int number_of_descriptors = descriptors->number_of_descriptors_storage();
2658   int to_trim = number_of_descriptors - number_of_own_descriptors;
2659   if (to_trim == 0) return;
2660 
2661   heap_->RightTrimFixedArray<Heap::FROM_GC>(
2662       descriptors, to_trim * DescriptorArray::kDescriptorSize);
2663   descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2664 
2665   if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2666   descriptors->Sort();
2667 }
2668 
2669 
TrimEnumCache(Map * map,DescriptorArray * descriptors)2670 void MarkCompactCollector::TrimEnumCache(Map* map,
2671                                          DescriptorArray* descriptors) {
2672   int live_enum = map->EnumLength();
2673   if (live_enum == kInvalidEnumCacheSentinel) {
2674     live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
2675   }
2676   if (live_enum == 0) return descriptors->ClearEnumCache();
2677 
2678   FixedArray* enum_cache = descriptors->GetEnumCache();
2679 
2680   int to_trim = enum_cache->length() - live_enum;
2681   if (to_trim <= 0) return;
2682   heap_->RightTrimFixedArray<Heap::FROM_GC>(descriptors->GetEnumCache(),
2683                                             to_trim);
2684 
2685   if (!descriptors->HasEnumIndicesCache()) return;
2686   FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
2687   heap_->RightTrimFixedArray<Heap::FROM_GC>(enum_indices_cache, to_trim);
2688 }
2689 
2690 
ClearDependentICList(Object * head)2691 void MarkCompactCollector::ClearDependentICList(Object* head) {
2692   Object* current = head;
2693   Object* undefined = heap()->undefined_value();
2694   while (current != undefined) {
2695     Code* code = Code::cast(current);
2696     if (IsMarked(code)) {
2697       DCHECK(code->is_weak_stub());
2698       IC::InvalidateMaps(code);
2699     }
2700     current = code->next_code_link();
2701     code->set_next_code_link(undefined);
2702   }
2703 }
2704 
2705 
ClearDependentCode(DependentCode * entries)2706 void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
2707   DisallowHeapAllocation no_allocation;
2708   DependentCode::GroupStartIndexes starts(entries);
2709   int number_of_entries = starts.number_of_entries();
2710   if (number_of_entries == 0) return;
2711   int g = DependentCode::kWeakICGroup;
2712   if (starts.at(g) != starts.at(g + 1)) {
2713     int i = starts.at(g);
2714     DCHECK(i + 1 == starts.at(g + 1));
2715     Object* head = entries->object_at(i);
2716     ClearDependentICList(head);
2717   }
2718   g = DependentCode::kWeakCodeGroup;
2719   for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2720     // If the entry is compilation info then the map must be alive,
2721     // and ClearDependentCode shouldn't be called.
2722     DCHECK(entries->is_code_at(i));
2723     Code* code = entries->code_at(i);
2724     if (IsMarked(code) && !code->marked_for_deoptimization()) {
2725       DependentCode::SetMarkedForDeoptimization(
2726           code, static_cast<DependentCode::DependencyGroup>(g));
2727       code->InvalidateEmbeddedObjects();
2728       have_code_to_deoptimize_ = true;
2729     }
2730   }
2731   for (int i = 0; i < number_of_entries; i++) {
2732     entries->clear_at(i);
2733   }
2734 }
2735 
2736 
ClearNonLiveDependentCodeInGroup(DependentCode * entries,int group,int start,int end,int new_start)2737 int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
2738     DependentCode* entries, int group, int start, int end, int new_start) {
2739   int survived = 0;
2740   if (group == DependentCode::kWeakICGroup) {
2741     // Dependent weak IC stubs form a linked list and only the head is stored
2742     // in the dependent code array.
2743     if (start != end) {
2744       DCHECK(start + 1 == end);
2745       Object* old_head = entries->object_at(start);
2746       MarkCompactWeakObjectRetainer retainer;
2747       Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
2748       entries->set_object_at(new_start, head);
2749       Object** slot = entries->slot_at(new_start);
2750       RecordSlot(slot, slot, head);
2751       // We do not compact this group even if the head is undefined,
2752       // more dependent ICs are likely to be added later.
2753       survived = 1;
2754     }
2755   } else {
2756     for (int i = start; i < end; i++) {
2757       Object* obj = entries->object_at(i);
2758       DCHECK(obj->IsCode() || IsMarked(obj));
2759       if (IsMarked(obj) &&
2760           (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2761         if (new_start + survived != i) {
2762           entries->set_object_at(new_start + survived, obj);
2763         }
2764         Object** slot = entries->slot_at(new_start + survived);
2765         RecordSlot(slot, slot, obj);
2766         survived++;
2767       }
2768     }
2769   }
2770   entries->set_number_of_entries(
2771       static_cast<DependentCode::DependencyGroup>(group), survived);
2772   return survived;
2773 }
2774 
2775 
ClearNonLiveDependentCode(DependentCode * entries)2776 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
2777   DisallowHeapAllocation no_allocation;
2778   DependentCode::GroupStartIndexes starts(entries);
2779   int number_of_entries = starts.number_of_entries();
2780   if (number_of_entries == 0) return;
2781   int new_number_of_entries = 0;
2782   // Go through all groups, remove dead codes and compact.
2783   for (int g = 0; g < DependentCode::kGroupCount; g++) {
2784     int survived = ClearNonLiveDependentCodeInGroup(
2785         entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
2786     new_number_of_entries += survived;
2787   }
2788   for (int i = new_number_of_entries; i < number_of_entries; i++) {
2789     entries->clear_at(i);
2790   }
2791 }
2792 
2793 
ProcessWeakCollections()2794 void MarkCompactCollector::ProcessWeakCollections() {
2795   GCTracer::Scope gc_scope(heap()->tracer(),
2796                            GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2797   Object* weak_collection_obj = heap()->encountered_weak_collections();
2798   while (weak_collection_obj != Smi::FromInt(0)) {
2799     JSWeakCollection* weak_collection =
2800         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2801     DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2802     if (weak_collection->table()->IsHashTable()) {
2803       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2804       Object** anchor = reinterpret_cast<Object**>(table->address());
2805       for (int i = 0; i < table->Capacity(); i++) {
2806         if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2807           Object** key_slot =
2808               table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2809           RecordSlot(anchor, key_slot, *key_slot);
2810           Object** value_slot =
2811               table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2812           MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
2813                                                          value_slot);
2814         }
2815       }
2816     }
2817     weak_collection_obj = weak_collection->next();
2818   }
2819 }
2820 
2821 
ClearWeakCollections()2822 void MarkCompactCollector::ClearWeakCollections() {
2823   GCTracer::Scope gc_scope(heap()->tracer(),
2824                            GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2825   Object* weak_collection_obj = heap()->encountered_weak_collections();
2826   while (weak_collection_obj != Smi::FromInt(0)) {
2827     JSWeakCollection* weak_collection =
2828         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2829     DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2830     if (weak_collection->table()->IsHashTable()) {
2831       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2832       for (int i = 0; i < table->Capacity(); i++) {
2833         HeapObject* key = HeapObject::cast(table->KeyAt(i));
2834         if (!MarkCompactCollector::IsMarked(key)) {
2835           table->RemoveEntry(i);
2836         }
2837       }
2838     }
2839     weak_collection_obj = weak_collection->next();
2840     weak_collection->set_next(heap()->undefined_value());
2841   }
2842   heap()->set_encountered_weak_collections(Smi::FromInt(0));
2843 }
2844 
2845 
AbortWeakCollections()2846 void MarkCompactCollector::AbortWeakCollections() {
2847   GCTracer::Scope gc_scope(heap()->tracer(),
2848                            GCTracer::Scope::MC_WEAKCOLLECTION_ABORT);
2849   Object* weak_collection_obj = heap()->encountered_weak_collections();
2850   while (weak_collection_obj != Smi::FromInt(0)) {
2851     JSWeakCollection* weak_collection =
2852         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2853     weak_collection_obj = weak_collection->next();
2854     weak_collection->set_next(heap()->undefined_value());
2855   }
2856   heap()->set_encountered_weak_collections(Smi::FromInt(0));
2857 }
2858 
2859 
RecordMigratedSlot(Object * value,Address slot)2860 void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
2861   if (heap_->InNewSpace(value)) {
2862     heap_->store_buffer()->Mark(slot);
2863   } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2864     SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2865                        reinterpret_cast<Object**>(slot),
2866                        SlotsBuffer::IGNORE_OVERFLOW);
2867   }
2868 }
2869 
2870 
2871 // We scavange new space simultaneously with sweeping. This is done in two
2872 // passes.
2873 //
2874 // The first pass migrates all alive objects from one semispace to another or
2875 // promotes them to old space.  Forwarding address is written directly into
2876 // first word of object without any encoding.  If object is dead we write
2877 // NULL as a forwarding address.
2878 //
2879 // The second pass updates pointers to new space in all spaces.  It is possible
2880 // to encounter pointers to dead new space objects during traversal of pointers
2881 // to new space.  We should clear them to avoid encountering them during next
2882 // pointer iteration.  This is an issue if the store buffer overflows and we
2883 // have to scan the entire old space, including dead objects, looking for
2884 // pointers to new space.
MigrateObject(HeapObject * dst,HeapObject * src,int size,AllocationSpace dest)2885 void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
2886                                          int size, AllocationSpace dest) {
2887   Address dst_addr = dst->address();
2888   Address src_addr = src->address();
2889   DCHECK(heap()->AllowedToBeMigrated(src, dest));
2890   DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
2891   if (dest == OLD_POINTER_SPACE) {
2892     Address src_slot = src_addr;
2893     Address dst_slot = dst_addr;
2894     DCHECK(IsAligned(size, kPointerSize));
2895 
2896     for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2897       Object* value = Memory::Object_at(src_slot);
2898 
2899       Memory::Object_at(dst_slot) = value;
2900 
2901       if (!src->MayContainRawValues()) {
2902         RecordMigratedSlot(value, dst_slot);
2903       }
2904 
2905       src_slot += kPointerSize;
2906       dst_slot += kPointerSize;
2907     }
2908 
2909     if (compacting_ && dst->IsJSFunction()) {
2910       Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
2911       Address code_entry = Memory::Address_at(code_entry_slot);
2912 
2913       if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2914         SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2915                            SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2916                            SlotsBuffer::IGNORE_OVERFLOW);
2917       }
2918     } else if (dst->IsConstantPoolArray()) {
2919       // We special case ConstantPoolArrays since they could contain integers
2920       // value entries which look like tagged pointers.
2921       // TODO(mstarzinger): restructure this code to avoid this special-casing.
2922       ConstantPoolArray* array = ConstantPoolArray::cast(dst);
2923       ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
2924       while (!code_iter.is_finished()) {
2925         Address code_entry_slot =
2926             dst_addr + array->OffsetOfElementAt(code_iter.next_index());
2927         Address code_entry = Memory::Address_at(code_entry_slot);
2928 
2929         if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2930           SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2931                              SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2932                              SlotsBuffer::IGNORE_OVERFLOW);
2933         }
2934       }
2935       ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
2936       while (!heap_iter.is_finished()) {
2937         Address heap_slot =
2938             dst_addr + array->OffsetOfElementAt(heap_iter.next_index());
2939         Object* value = Memory::Object_at(heap_slot);
2940         RecordMigratedSlot(value, heap_slot);
2941       }
2942     }
2943   } else if (dest == CODE_SPACE) {
2944     PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2945     heap()->MoveBlock(dst_addr, src_addr, size);
2946     SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2947                        SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
2948                        SlotsBuffer::IGNORE_OVERFLOW);
2949     Code::cast(dst)->Relocate(dst_addr - src_addr);
2950   } else {
2951     DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2952     heap()->MoveBlock(dst_addr, src_addr, size);
2953   }
2954   heap()->OnMoveEvent(dst, src, size);
2955   Memory::Address_at(src_addr) = dst_addr;
2956 }
2957 
2958 
2959 // Visitor for updating pointers from live objects in old spaces to new space.
2960 // It does not expect to encounter pointers to dead objects.
2961 class PointersUpdatingVisitor : public ObjectVisitor {
2962  public:
PointersUpdatingVisitor(Heap * heap)2963   explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2964 
VisitPointer(Object ** p)2965   void VisitPointer(Object** p) { UpdatePointer(p); }
2966 
VisitPointers(Object ** start,Object ** end)2967   void VisitPointers(Object** start, Object** end) {
2968     for (Object** p = start; p < end; p++) UpdatePointer(p);
2969   }
2970 
VisitEmbeddedPointer(RelocInfo * rinfo)2971   void VisitEmbeddedPointer(RelocInfo* rinfo) {
2972     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2973     Object* target = rinfo->target_object();
2974     Object* old_target = target;
2975     VisitPointer(&target);
2976     // Avoid unnecessary changes that might unnecessary flush the instruction
2977     // cache.
2978     if (target != old_target) {
2979       rinfo->set_target_object(target);
2980     }
2981   }
2982 
VisitCodeTarget(RelocInfo * rinfo)2983   void VisitCodeTarget(RelocInfo* rinfo) {
2984     DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
2985     Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2986     Object* old_target = target;
2987     VisitPointer(&target);
2988     if (target != old_target) {
2989       rinfo->set_target_address(Code::cast(target)->instruction_start());
2990     }
2991   }
2992 
VisitCodeAgeSequence(RelocInfo * rinfo)2993   void VisitCodeAgeSequence(RelocInfo* rinfo) {
2994     DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2995     Object* stub = rinfo->code_age_stub();
2996     DCHECK(stub != NULL);
2997     VisitPointer(&stub);
2998     if (stub != rinfo->code_age_stub()) {
2999       rinfo->set_code_age_stub(Code::cast(stub));
3000     }
3001   }
3002 
VisitDebugTarget(RelocInfo * rinfo)3003   void VisitDebugTarget(RelocInfo* rinfo) {
3004     DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
3005             rinfo->IsPatchedReturnSequence()) ||
3006            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
3007             rinfo->IsPatchedDebugBreakSlotSequence()));
3008     Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
3009     VisitPointer(&target);
3010     rinfo->set_call_address(Code::cast(target)->instruction_start());
3011   }
3012 
UpdateSlot(Heap * heap,Object ** slot)3013   static inline void UpdateSlot(Heap* heap, Object** slot) {
3014     Object* obj = *slot;
3015 
3016     if (!obj->IsHeapObject()) return;
3017 
3018     HeapObject* heap_obj = HeapObject::cast(obj);
3019 
3020     MapWord map_word = heap_obj->map_word();
3021     if (map_word.IsForwardingAddress()) {
3022       DCHECK(heap->InFromSpace(heap_obj) ||
3023              MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
3024       HeapObject* target = map_word.ToForwardingAddress();
3025       *slot = target;
3026       DCHECK(!heap->InFromSpace(target) &&
3027              !MarkCompactCollector::IsOnEvacuationCandidate(target));
3028     }
3029   }
3030 
3031  private:
UpdatePointer(Object ** p)3032   inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
3033 
3034   Heap* heap_;
3035 };
3036 
3037 
UpdatePointer(HeapObject ** address,HeapObject * object)3038 static void UpdatePointer(HeapObject** address, HeapObject* object) {
3039   Address new_addr = Memory::Address_at(object->address());
3040 
3041   // The new space sweep will overwrite the map word of dead objects
3042   // with NULL. In this case we do not need to transfer this entry to
3043   // the store buffer which we are rebuilding.
3044   // We perform the pointer update with a no barrier compare-and-swap. The
3045   // compare and swap may fail in the case where the pointer update tries to
3046   // update garbage memory which was concurrently accessed by the sweeper.
3047   if (new_addr != NULL) {
3048     base::NoBarrier_CompareAndSwap(
3049         reinterpret_cast<base::AtomicWord*>(address),
3050         reinterpret_cast<base::AtomicWord>(object),
3051         reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
3052   }
3053 }
3054 
3055 
UpdateReferenceInExternalStringTableEntry(Heap * heap,Object ** p)3056 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
3057                                                          Object** p) {
3058   MapWord map_word = HeapObject::cast(*p)->map_word();
3059 
3060   if (map_word.IsForwardingAddress()) {
3061     return String::cast(map_word.ToForwardingAddress());
3062   }
3063 
3064   return String::cast(*p);
3065 }
3066 
3067 
TryPromoteObject(HeapObject * object,int object_size)3068 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
3069                                             int object_size) {
3070   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3071 
3072   OldSpace* target_space = heap()->TargetSpace(object);
3073 
3074   DCHECK(target_space == heap()->old_pointer_space() ||
3075          target_space == heap()->old_data_space());
3076   HeapObject* target;
3077   AllocationResult allocation = target_space->AllocateRaw(object_size);
3078   if (allocation.To(&target)) {
3079     MigrateObject(target, object, object_size, target_space->identity());
3080     heap()->IncrementPromotedObjectsSize(object_size);
3081     return true;
3082   }
3083 
3084   return false;
3085 }
3086 
3087 
EvacuateNewSpace()3088 void MarkCompactCollector::EvacuateNewSpace() {
3089   // There are soft limits in the allocation code, designed trigger a mark
3090   // sweep collection by failing allocations.  But since we are already in
3091   // a mark-sweep allocation, there is no sense in trying to trigger one.
3092   AlwaysAllocateScope scope(isolate());
3093 
3094   NewSpace* new_space = heap()->new_space();
3095 
3096   // Store allocation range before flipping semispaces.
3097   Address from_bottom = new_space->bottom();
3098   Address from_top = new_space->top();
3099 
3100   // Flip the semispaces.  After flipping, to space is empty, from space has
3101   // live objects.
3102   new_space->Flip();
3103   new_space->ResetAllocationInfo();
3104 
3105   int survivors_size = 0;
3106 
3107   // First pass: traverse all objects in inactive semispace, remove marks,
3108   // migrate live objects and write forwarding addresses.  This stage puts
3109   // new entries in the store buffer and may cause some pages to be marked
3110   // scan-on-scavenge.
3111   NewSpacePageIterator it(from_bottom, from_top);
3112   while (it.has_next()) {
3113     NewSpacePage* p = it.next();
3114     survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
3115   }
3116 
3117   heap_->IncrementYoungSurvivorsCounter(survivors_size);
3118   new_space->set_age_mark(new_space->top());
3119 }
3120 
3121 
EvacuateLiveObjectsFromPage(Page * p)3122 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
3123   AlwaysAllocateScope always_allocate(isolate());
3124   PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3125   DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3126   p->SetWasSwept();
3127 
3128   int offsets[16];
3129 
3130   for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3131     Address cell_base = it.CurrentCellBase();
3132     MarkBit::CellType* cell = it.CurrentCell();
3133 
3134     if (*cell == 0) continue;
3135 
3136     int live_objects = MarkWordToObjectStarts(*cell, offsets);
3137     for (int i = 0; i < live_objects; i++) {
3138       Address object_addr = cell_base + offsets[i] * kPointerSize;
3139       HeapObject* object = HeapObject::FromAddress(object_addr);
3140       DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3141 
3142       int size = object->Size();
3143 
3144       HeapObject* target_object;
3145       AllocationResult allocation = space->AllocateRaw(size);
3146       if (!allocation.To(&target_object)) {
3147         // If allocation failed, use emergency memory and re-try allocation.
3148         CHECK(space->HasEmergencyMemory());
3149         space->UseEmergencyMemory();
3150         allocation = space->AllocateRaw(size);
3151       }
3152       if (!allocation.To(&target_object)) {
3153         // OS refused to give us memory.
3154         V8::FatalProcessOutOfMemory("Evacuation");
3155         return;
3156       }
3157 
3158       MigrateObject(target_object, object, size, space->identity());
3159       DCHECK(object->map_word().IsForwardingAddress());
3160     }
3161 
3162     // Clear marking bits for current cell.
3163     *cell = 0;
3164   }
3165   p->ResetLiveBytes();
3166 }
3167 
3168 
EvacuatePages()3169 void MarkCompactCollector::EvacuatePages() {
3170   int npages = evacuation_candidates_.length();
3171   for (int i = 0; i < npages; i++) {
3172     Page* p = evacuation_candidates_[i];
3173     DCHECK(p->IsEvacuationCandidate() ||
3174            p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3175     DCHECK(static_cast<int>(p->parallel_sweeping()) ==
3176            MemoryChunk::SWEEPING_DONE);
3177     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3178     // Allocate emergency memory for the case when compaction fails due to out
3179     // of memory.
3180     if (!space->HasEmergencyMemory()) {
3181       space->CreateEmergencyMemory();
3182     }
3183     if (p->IsEvacuationCandidate()) {
3184       // During compaction we might have to request a new page. Check that we
3185       // have an emergency page and the space still has room for that.
3186       if (space->HasEmergencyMemory() && space->CanExpand()) {
3187         EvacuateLiveObjectsFromPage(p);
3188       } else {
3189         // Without room for expansion evacuation is not guaranteed to succeed.
3190         // Pessimistically abandon unevacuated pages.
3191         for (int j = i; j < npages; j++) {
3192           Page* page = evacuation_candidates_[j];
3193           slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3194           page->ClearEvacuationCandidate();
3195           page->SetFlag(Page::RESCAN_ON_EVACUATION);
3196         }
3197         break;
3198       }
3199     }
3200   }
3201   if (npages > 0) {
3202     // Release emergency memory.
3203     PagedSpaces spaces(heap());
3204     for (PagedSpace* space = spaces.next(); space != NULL;
3205          space = spaces.next()) {
3206       if (space->HasEmergencyMemory()) {
3207         space->FreeEmergencyMemory();
3208       }
3209     }
3210   }
3211 }
3212 
3213 
3214 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3215  public:
RetainAs(Object * object)3216   virtual Object* RetainAs(Object* object) {
3217     if (object->IsHeapObject()) {
3218       HeapObject* heap_object = HeapObject::cast(object);
3219       MapWord map_word = heap_object->map_word();
3220       if (map_word.IsForwardingAddress()) {
3221         return map_word.ToForwardingAddress();
3222       }
3223     }
3224     return object;
3225   }
3226 };
3227 
3228 
UpdateSlot(Isolate * isolate,ObjectVisitor * v,SlotsBuffer::SlotType slot_type,Address addr)3229 static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
3230                               SlotsBuffer::SlotType slot_type, Address addr) {
3231   switch (slot_type) {
3232     case SlotsBuffer::CODE_TARGET_SLOT: {
3233       RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3234       rinfo.Visit(isolate, v);
3235       break;
3236     }
3237     case SlotsBuffer::CODE_ENTRY_SLOT: {
3238       v->VisitCodeEntry(addr);
3239       break;
3240     }
3241     case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3242       HeapObject* obj = HeapObject::FromAddress(addr);
3243       Code::cast(obj)->CodeIterateBody(v);
3244       break;
3245     }
3246     case SlotsBuffer::DEBUG_TARGET_SLOT: {
3247       RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3248       if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3249       break;
3250     }
3251     case SlotsBuffer::JS_RETURN_SLOT: {
3252       RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3253       if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3254       break;
3255     }
3256     case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3257       RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3258       rinfo.Visit(isolate, v);
3259       break;
3260     }
3261     default:
3262       UNREACHABLE();
3263       break;
3264   }
3265 }
3266 
3267 
3268 enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
3269 
3270 
3271 enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
3272 
3273 
3274 enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
3275 
3276 
3277 template <MarkCompactCollector::SweepingParallelism mode>
Free(PagedSpace * space,FreeList * free_list,Address start,int size)3278 static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
3279                      int size) {
3280   if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3281     DCHECK(free_list == NULL);
3282     return space->Free(start, size);
3283   } else {
3284     // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
3285     return size - free_list->Free(start, size);
3286   }
3287 }
3288 
3289 
3290 // Sweeps a page. After sweeping the page can be iterated.
3291 // Slots in live objects pointing into evacuation candidates are updated
3292 // if requested.
3293 // Returns the size of the biggest continuous freed memory chunk in bytes.
3294 template <SweepingMode sweeping_mode,
3295           MarkCompactCollector::SweepingParallelism parallelism,
3296           SkipListRebuildingMode skip_list_mode,
3297           FreeSpaceTreatmentMode free_space_mode>
Sweep(PagedSpace * space,FreeList * free_list,Page * p,ObjectVisitor * v)3298 static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
3299                  ObjectVisitor* v) {
3300   DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3301   DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3302             space->identity() == CODE_SPACE);
3303   DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3304   DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3305          sweeping_mode == SWEEP_ONLY);
3306 
3307   Address free_start = p->area_start();
3308   DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3309   int offsets[16];
3310 
3311   SkipList* skip_list = p->skip_list();
3312   int curr_region = -1;
3313   if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3314     skip_list->Clear();
3315   }
3316 
3317   intptr_t freed_bytes = 0;
3318   intptr_t max_freed_bytes = 0;
3319 
3320   for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3321     Address cell_base = it.CurrentCellBase();
3322     MarkBit::CellType* cell = it.CurrentCell();
3323     int live_objects = MarkWordToObjectStarts(*cell, offsets);
3324     int live_index = 0;
3325     for (; live_objects != 0; live_objects--) {
3326       Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3327       if (free_end != free_start) {
3328         int size = static_cast<int>(free_end - free_start);
3329         if (free_space_mode == ZAP_FREE_SPACE) {
3330           memset(free_start, 0xcc, size);
3331         }
3332         freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3333         max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3334 #ifdef ENABLE_GDB_JIT_INTERFACE
3335         if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3336           GDBJITInterface::RemoveCodeRange(free_start, free_end);
3337         }
3338 #endif
3339       }
3340       HeapObject* live_object = HeapObject::FromAddress(free_end);
3341       DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3342       Map* map = live_object->map();
3343       int size = live_object->SizeFromMap(map);
3344       if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3345         live_object->IterateBody(map->instance_type(), size, v);
3346       }
3347       if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3348         int new_region_start = SkipList::RegionNumber(free_end);
3349         int new_region_end =
3350             SkipList::RegionNumber(free_end + size - kPointerSize);
3351         if (new_region_start != curr_region || new_region_end != curr_region) {
3352           skip_list->AddObject(free_end, size);
3353           curr_region = new_region_end;
3354         }
3355       }
3356       free_start = free_end + size;
3357     }
3358     // Clear marking bits for current cell.
3359     *cell = 0;
3360   }
3361   if (free_start != p->area_end()) {
3362     int size = static_cast<int>(p->area_end() - free_start);
3363     if (free_space_mode == ZAP_FREE_SPACE) {
3364       memset(free_start, 0xcc, size);
3365     }
3366     freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3367     max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3368 #ifdef ENABLE_GDB_JIT_INTERFACE
3369     if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3370       GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3371     }
3372 #endif
3373   }
3374   p->ResetLiveBytes();
3375 
3376   if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3377     // When concurrent sweeping is active, the page will be marked after
3378     // sweeping by the main thread.
3379     p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
3380   } else {
3381     p->SetWasSwept();
3382   }
3383   return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3384 }
3385 
3386 
SetMarkBitsUnderInvalidatedCode(Code * code,bool value)3387 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3388   Page* p = Page::FromAddress(code->address());
3389 
3390   if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3391     return false;
3392   }
3393 
3394   Address code_start = code->address();
3395   Address code_end = code_start + code->Size();
3396 
3397   uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3398   uint32_t end_index =
3399       MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3400 
3401   Bitmap* b = p->markbits();
3402 
3403   MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3404   MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3405 
3406   MarkBit::CellType* start_cell = start_mark_bit.cell();
3407   MarkBit::CellType* end_cell = end_mark_bit.cell();
3408 
3409   if (value) {
3410     MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3411     MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3412 
3413     if (start_cell == end_cell) {
3414       *start_cell |= start_mask & end_mask;
3415     } else {
3416       *start_cell |= start_mask;
3417       for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3418         *cell = ~0;
3419       }
3420       *end_cell |= end_mask;
3421     }
3422   } else {
3423     for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
3424       *cell = 0;
3425     }
3426   }
3427 
3428   return true;
3429 }
3430 
3431 
IsOnInvalidatedCodeObject(Address addr)3432 static bool IsOnInvalidatedCodeObject(Address addr) {
3433   // We did not record any slots in large objects thus
3434   // we can safely go to the page from the slot address.
3435   Page* p = Page::FromAddress(addr);
3436 
3437   // First check owner's identity because old pointer and old data spaces
3438   // are swept lazily and might still have non-zero mark-bits on some
3439   // pages.
3440   if (p->owner()->identity() != CODE_SPACE) return false;
3441 
3442   // In code space only bits on evacuation candidates (but we don't record
3443   // any slots on them) and under invalidated code objects are non-zero.
3444   MarkBit mark_bit =
3445       p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3446 
3447   return mark_bit.Get();
3448 }
3449 
3450 
InvalidateCode(Code * code)3451 void MarkCompactCollector::InvalidateCode(Code* code) {
3452   if (heap_->incremental_marking()->IsCompacting() &&
3453       !ShouldSkipEvacuationSlotRecording(code)) {
3454     DCHECK(compacting_);
3455 
3456     // If the object is white than no slots were recorded on it yet.
3457     MarkBit mark_bit = Marking::MarkBitFrom(code);
3458     if (Marking::IsWhite(mark_bit)) return;
3459 
3460     invalidated_code_.Add(code);
3461   }
3462 }
3463 
3464 
3465 // Return true if the given code is deoptimized or will be deoptimized.
WillBeDeoptimized(Code * code)3466 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3467   return code->is_optimized_code() && code->marked_for_deoptimization();
3468 }
3469 
3470 
MarkInvalidatedCode()3471 bool MarkCompactCollector::MarkInvalidatedCode() {
3472   bool code_marked = false;
3473 
3474   int length = invalidated_code_.length();
3475   for (int i = 0; i < length; i++) {
3476     Code* code = invalidated_code_[i];
3477 
3478     if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3479       code_marked = true;
3480     }
3481   }
3482 
3483   return code_marked;
3484 }
3485 
3486 
RemoveDeadInvalidatedCode()3487 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3488   int length = invalidated_code_.length();
3489   for (int i = 0; i < length; i++) {
3490     if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3491   }
3492 }
3493 
3494 
ProcessInvalidatedCode(ObjectVisitor * visitor)3495 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3496   int length = invalidated_code_.length();
3497   for (int i = 0; i < length; i++) {
3498     Code* code = invalidated_code_[i];
3499     if (code != NULL) {
3500       code->Iterate(visitor);
3501       SetMarkBitsUnderInvalidatedCode(code, false);
3502     }
3503   }
3504   invalidated_code_.Rewind(0);
3505 }
3506 
3507 
EvacuateNewSpaceAndCandidates()3508 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3509   Heap::RelocationLock relocation_lock(heap());
3510 
3511   bool code_slots_filtering_required;
3512   {
3513     GCTracer::Scope gc_scope(heap()->tracer(),
3514                              GCTracer::Scope::MC_SWEEP_NEWSPACE);
3515     code_slots_filtering_required = MarkInvalidatedCode();
3516     EvacuateNewSpace();
3517   }
3518 
3519   {
3520     GCTracer::Scope gc_scope(heap()->tracer(),
3521                              GCTracer::Scope::MC_EVACUATE_PAGES);
3522     EvacuatePages();
3523   }
3524 
3525   // Second pass: find pointers to new space and update them.
3526   PointersUpdatingVisitor updating_visitor(heap());
3527 
3528   {
3529     GCTracer::Scope gc_scope(heap()->tracer(),
3530                              GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3531     // Update pointers in to space.
3532     SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3533                             heap()->new_space()->top());
3534     for (HeapObject* object = to_it.Next(); object != NULL;
3535          object = to_it.Next()) {
3536       Map* map = object->map();
3537       object->IterateBody(map->instance_type(), object->SizeFromMap(map),
3538                           &updating_visitor);
3539     }
3540   }
3541 
3542   {
3543     GCTracer::Scope gc_scope(heap()->tracer(),
3544                              GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3545     // Update roots.
3546     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3547   }
3548 
3549   {
3550     GCTracer::Scope gc_scope(heap()->tracer(),
3551                              GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3552     StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
3553                                   &Heap::ScavengeStoreBufferCallback);
3554     heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
3555         &UpdatePointer);
3556   }
3557 
3558   {
3559     GCTracer::Scope gc_scope(heap()->tracer(),
3560                              GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3561     SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
3562                                        code_slots_filtering_required);
3563     if (FLAG_trace_fragmentation) {
3564       PrintF("  migration slots buffer: %d\n",
3565              SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3566     }
3567 
3568     if (compacting_ && was_marked_incrementally_) {
3569       // It's difficult to filter out slots recorded for large objects.
3570       LargeObjectIterator it(heap_->lo_space());
3571       for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3572         // LargeObjectSpace is not swept yet thus we have to skip
3573         // dead objects explicitly.
3574         if (!IsMarked(obj)) continue;
3575 
3576         Page* p = Page::FromAddress(obj->address());
3577         if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3578           obj->Iterate(&updating_visitor);
3579           p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3580         }
3581       }
3582     }
3583   }
3584 
3585   int npages = evacuation_candidates_.length();
3586   {
3587     GCTracer::Scope gc_scope(
3588         heap()->tracer(),
3589         GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3590     for (int i = 0; i < npages; i++) {
3591       Page* p = evacuation_candidates_[i];
3592       DCHECK(p->IsEvacuationCandidate() ||
3593              p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3594 
3595       if (p->IsEvacuationCandidate()) {
3596         SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
3597                                            code_slots_filtering_required);
3598         if (FLAG_trace_fragmentation) {
3599           PrintF("  page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3600                  SlotsBuffer::SizeOfChain(p->slots_buffer()));
3601         }
3602 
3603         // Important: skip list should be cleared only after roots were updated
3604         // because root iteration traverses the stack and might have to find
3605         // code objects from non-updated pc pointing into evacuation candidate.
3606         SkipList* list = p->skip_list();
3607         if (list != NULL) list->Clear();
3608       } else {
3609         if (FLAG_gc_verbose) {
3610           PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3611                  reinterpret_cast<intptr_t>(p));
3612         }
3613         PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3614         p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3615 
3616         switch (space->identity()) {
3617           case OLD_DATA_SPACE:
3618             Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3619                   IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3620                                                        &updating_visitor);
3621             break;
3622           case OLD_POINTER_SPACE:
3623             Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3624                   IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3625                                                        &updating_visitor);
3626             break;
3627           case CODE_SPACE:
3628             if (FLAG_zap_code_space) {
3629               Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3630                     REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
3631                                                        &updating_visitor);
3632             } else {
3633               Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3634                     REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3635                                                           &updating_visitor);
3636             }
3637             break;
3638           default:
3639             UNREACHABLE();
3640             break;
3641         }
3642       }
3643     }
3644   }
3645 
3646   GCTracer::Scope gc_scope(heap()->tracer(),
3647                            GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3648 
3649   // Update pointers from cells.
3650   HeapObjectIterator cell_iterator(heap_->cell_space());
3651   for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
3652        cell = cell_iterator.Next()) {
3653     if (cell->IsCell()) {
3654       Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3655     }
3656   }
3657 
3658   HeapObjectIterator js_global_property_cell_iterator(
3659       heap_->property_cell_space());
3660   for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
3661        cell = js_global_property_cell_iterator.Next()) {
3662     if (cell->IsPropertyCell()) {
3663       PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3664     }
3665   }
3666 
3667   heap_->string_table()->Iterate(&updating_visitor);
3668   updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
3669   if (heap_->weak_object_to_code_table()->IsHashTable()) {
3670     WeakHashTable* table =
3671         WeakHashTable::cast(heap_->weak_object_to_code_table());
3672     table->Iterate(&updating_visitor);
3673     table->Rehash(heap_->isolate()->factory()->undefined_value());
3674   }
3675 
3676   // Update pointers from external string table.
3677   heap_->UpdateReferencesInExternalStringTable(
3678       &UpdateReferenceInExternalStringTableEntry);
3679 
3680   EvacuationWeakObjectRetainer evacuation_object_retainer;
3681   heap()->ProcessWeakReferences(&evacuation_object_retainer);
3682 
3683   // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3684   // under it.
3685   ProcessInvalidatedCode(&updating_visitor);
3686 
3687   heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3688 
3689   slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3690   DCHECK(migration_slots_buffer_ == NULL);
3691 }
3692 
3693 
MoveEvacuationCandidatesToEndOfPagesList()3694 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
3695   int npages = evacuation_candidates_.length();
3696   for (int i = 0; i < npages; i++) {
3697     Page* p = evacuation_candidates_[i];
3698     if (!p->IsEvacuationCandidate()) continue;
3699     p->Unlink();
3700     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3701     p->InsertAfter(space->LastPage());
3702   }
3703 }
3704 
3705 
ReleaseEvacuationCandidates()3706 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3707   int npages = evacuation_candidates_.length();
3708   for (int i = 0; i < npages; i++) {
3709     Page* p = evacuation_candidates_[i];
3710     if (!p->IsEvacuationCandidate()) continue;
3711     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3712     space->Free(p->area_start(), p->area_size());
3713     p->set_scan_on_scavenge(false);
3714     slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3715     p->ResetLiveBytes();
3716     space->ReleasePage(p);
3717   }
3718   evacuation_candidates_.Rewind(0);
3719   compacting_ = false;
3720   heap()->FreeQueuedChunks();
3721 }
3722 
3723 
3724 static const int kStartTableEntriesPerLine = 5;
3725 static const int kStartTableLines = 171;
3726 static const int kStartTableInvalidLine = 127;
3727 static const int kStartTableUnusedEntry = 126;
3728 
3729 #define _ kStartTableUnusedEntry
3730 #define X kStartTableInvalidLine
3731 // Mark-bit to object start offset table.
3732 //
3733 // The line is indexed by the mark bits in a byte.  The first number on
3734 // the line describes the number of live object starts for the line and the
3735 // other numbers on the line describe the offsets (in words) of the object
3736 // starts.
3737 //
3738 // Since objects are at least 2 words large we don't have entries for two
3739 // consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
3740 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3741     0, _, _,
3742     _, _,  // 0
3743     1, 0, _,
3744     _, _,  // 1
3745     1, 1, _,
3746     _, _,  // 2
3747     X, _, _,
3748     _, _,  // 3
3749     1, 2, _,
3750     _, _,  // 4
3751     2, 0, 2,
3752     _, _,  // 5
3753     X, _, _,
3754     _, _,  // 6
3755     X, _, _,
3756     _, _,  // 7
3757     1, 3, _,
3758     _, _,  // 8
3759     2, 0, 3,
3760     _, _,  // 9
3761     2, 1, 3,
3762     _, _,  // 10
3763     X, _, _,
3764     _, _,  // 11
3765     X, _, _,
3766     _, _,  // 12
3767     X, _, _,
3768     _, _,  // 13
3769     X, _, _,
3770     _, _,  // 14
3771     X, _, _,
3772     _, _,  // 15
3773     1, 4, _,
3774     _, _,  // 16
3775     2, 0, 4,
3776     _, _,  // 17
3777     2, 1, 4,
3778     _, _,  // 18
3779     X, _, _,
3780     _, _,  // 19
3781     2, 2, 4,
3782     _, _,  // 20
3783     3, 0, 2,
3784     4, _,  // 21
3785     X, _, _,
3786     _, _,  // 22
3787     X, _, _,
3788     _, _,  // 23
3789     X, _, _,
3790     _, _,  // 24
3791     X, _, _,
3792     _, _,  // 25
3793     X, _, _,
3794     _, _,  // 26
3795     X, _, _,
3796     _, _,  // 27
3797     X, _, _,
3798     _, _,  // 28
3799     X, _, _,
3800     _, _,  // 29
3801     X, _, _,
3802     _, _,  // 30
3803     X, _, _,
3804     _, _,  // 31
3805     1, 5, _,
3806     _, _,  // 32
3807     2, 0, 5,
3808     _, _,  // 33
3809     2, 1, 5,
3810     _, _,  // 34
3811     X, _, _,
3812     _, _,  // 35
3813     2, 2, 5,
3814     _, _,  // 36
3815     3, 0, 2,
3816     5, _,  // 37
3817     X, _, _,
3818     _, _,  // 38
3819     X, _, _,
3820     _, _,  // 39
3821     2, 3, 5,
3822     _, _,  // 40
3823     3, 0, 3,
3824     5, _,  // 41
3825     3, 1, 3,
3826     5, _,  // 42
3827     X, _, _,
3828     _, _,  // 43
3829     X, _, _,
3830     _, _,  // 44
3831     X, _, _,
3832     _, _,  // 45
3833     X, _, _,
3834     _, _,  // 46
3835     X, _, _,
3836     _, _,  // 47
3837     X, _, _,
3838     _, _,  // 48
3839     X, _, _,
3840     _, _,  // 49
3841     X, _, _,
3842     _, _,  // 50
3843     X, _, _,
3844     _, _,  // 51
3845     X, _, _,
3846     _, _,  // 52
3847     X, _, _,
3848     _, _,  // 53
3849     X, _, _,
3850     _, _,  // 54
3851     X, _, _,
3852     _, _,  // 55
3853     X, _, _,
3854     _, _,  // 56
3855     X, _, _,
3856     _, _,  // 57
3857     X, _, _,
3858     _, _,  // 58
3859     X, _, _,
3860     _, _,  // 59
3861     X, _, _,
3862     _, _,  // 60
3863     X, _, _,
3864     _, _,  // 61
3865     X, _, _,
3866     _, _,  // 62
3867     X, _, _,
3868     _, _,  // 63
3869     1, 6, _,
3870     _, _,  // 64
3871     2, 0, 6,
3872     _, _,  // 65
3873     2, 1, 6,
3874     _, _,  // 66
3875     X, _, _,
3876     _, _,  // 67
3877     2, 2, 6,
3878     _, _,  // 68
3879     3, 0, 2,
3880     6, _,  // 69
3881     X, _, _,
3882     _, _,  // 70
3883     X, _, _,
3884     _, _,  // 71
3885     2, 3, 6,
3886     _, _,  // 72
3887     3, 0, 3,
3888     6, _,  // 73
3889     3, 1, 3,
3890     6, _,  // 74
3891     X, _, _,
3892     _, _,  // 75
3893     X, _, _,
3894     _, _,  // 76
3895     X, _, _,
3896     _, _,  // 77
3897     X, _, _,
3898     _, _,  // 78
3899     X, _, _,
3900     _, _,  // 79
3901     2, 4, 6,
3902     _, _,  // 80
3903     3, 0, 4,
3904     6, _,  // 81
3905     3, 1, 4,
3906     6, _,  // 82
3907     X, _, _,
3908     _, _,  // 83
3909     3, 2, 4,
3910     6, _,  // 84
3911     4, 0, 2,
3912     4, 6,  // 85
3913     X, _, _,
3914     _, _,  // 86
3915     X, _, _,
3916     _, _,  // 87
3917     X, _, _,
3918     _, _,  // 88
3919     X, _, _,
3920     _, _,  // 89
3921     X, _, _,
3922     _, _,  // 90
3923     X, _, _,
3924     _, _,  // 91
3925     X, _, _,
3926     _, _,  // 92
3927     X, _, _,
3928     _, _,  // 93
3929     X, _, _,
3930     _, _,  // 94
3931     X, _, _,
3932     _, _,  // 95
3933     X, _, _,
3934     _, _,  // 96
3935     X, _, _,
3936     _, _,  // 97
3937     X, _, _,
3938     _, _,  // 98
3939     X, _, _,
3940     _, _,  // 99
3941     X, _, _,
3942     _, _,  // 100
3943     X, _, _,
3944     _, _,  // 101
3945     X, _, _,
3946     _, _,  // 102
3947     X, _, _,
3948     _, _,  // 103
3949     X, _, _,
3950     _, _,  // 104
3951     X, _, _,
3952     _, _,  // 105
3953     X, _, _,
3954     _, _,  // 106
3955     X, _, _,
3956     _, _,  // 107
3957     X, _, _,
3958     _, _,  // 108
3959     X, _, _,
3960     _, _,  // 109
3961     X, _, _,
3962     _, _,  // 110
3963     X, _, _,
3964     _, _,  // 111
3965     X, _, _,
3966     _, _,  // 112
3967     X, _, _,
3968     _, _,  // 113
3969     X, _, _,
3970     _, _,  // 114
3971     X, _, _,
3972     _, _,  // 115
3973     X, _, _,
3974     _, _,  // 116
3975     X, _, _,
3976     _, _,  // 117
3977     X, _, _,
3978     _, _,  // 118
3979     X, _, _,
3980     _, _,  // 119
3981     X, _, _,
3982     _, _,  // 120
3983     X, _, _,
3984     _, _,  // 121
3985     X, _, _,
3986     _, _,  // 122
3987     X, _, _,
3988     _, _,  // 123
3989     X, _, _,
3990     _, _,  // 124
3991     X, _, _,
3992     _, _,  // 125
3993     X, _, _,
3994     _, _,  // 126
3995     X, _, _,
3996     _, _,  // 127
3997     1, 7, _,
3998     _, _,  // 128
3999     2, 0, 7,
4000     _, _,  // 129
4001     2, 1, 7,
4002     _, _,  // 130
4003     X, _, _,
4004     _, _,  // 131
4005     2, 2, 7,
4006     _, _,  // 132
4007     3, 0, 2,
4008     7, _,  // 133
4009     X, _, _,
4010     _, _,  // 134
4011     X, _, _,
4012     _, _,  // 135
4013     2, 3, 7,
4014     _, _,  // 136
4015     3, 0, 3,
4016     7, _,  // 137
4017     3, 1, 3,
4018     7, _,  // 138
4019     X, _, _,
4020     _, _,  // 139
4021     X, _, _,
4022     _, _,  // 140
4023     X, _, _,
4024     _, _,  // 141
4025     X, _, _,
4026     _, _,  // 142
4027     X, _, _,
4028     _, _,  // 143
4029     2, 4, 7,
4030     _, _,  // 144
4031     3, 0, 4,
4032     7, _,  // 145
4033     3, 1, 4,
4034     7, _,  // 146
4035     X, _, _,
4036     _, _,  // 147
4037     3, 2, 4,
4038     7, _,  // 148
4039     4, 0, 2,
4040     4, 7,  // 149
4041     X, _, _,
4042     _, _,  // 150
4043     X, _, _,
4044     _, _,  // 151
4045     X, _, _,
4046     _, _,  // 152
4047     X, _, _,
4048     _, _,  // 153
4049     X, _, _,
4050     _, _,  // 154
4051     X, _, _,
4052     _, _,  // 155
4053     X, _, _,
4054     _, _,  // 156
4055     X, _, _,
4056     _, _,  // 157
4057     X, _, _,
4058     _, _,  // 158
4059     X, _, _,
4060     _, _,  // 159
4061     2, 5, 7,
4062     _, _,  // 160
4063     3, 0, 5,
4064     7, _,  // 161
4065     3, 1, 5,
4066     7, _,  // 162
4067     X, _, _,
4068     _, _,  // 163
4069     3, 2, 5,
4070     7, _,  // 164
4071     4, 0, 2,
4072     5, 7,  // 165
4073     X, _, _,
4074     _, _,  // 166
4075     X, _, _,
4076     _, _,  // 167
4077     3, 3, 5,
4078     7, _,  // 168
4079     4, 0, 3,
4080     5, 7,  // 169
4081     4, 1, 3,
4082     5, 7  // 170
4083 };
4084 #undef _
4085 #undef X
4086 
4087 
4088 // Takes a word of mark bits.  Returns the number of objects that start in the
4089 // range.  Puts the offsets of the words in the supplied array.
MarkWordToObjectStarts(uint32_t mark_bits,int * starts)4090 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
4091   int objects = 0;
4092   int offset = 0;
4093 
4094   // No consecutive 1 bits.
4095   DCHECK((mark_bits & 0x180) != 0x180);
4096   DCHECK((mark_bits & 0x18000) != 0x18000);
4097   DCHECK((mark_bits & 0x1800000) != 0x1800000);
4098 
4099   while (mark_bits != 0) {
4100     int byte = (mark_bits & 0xff);
4101     mark_bits >>= 8;
4102     if (byte != 0) {
4103       DCHECK(byte < kStartTableLines);  // No consecutive 1 bits.
4104       char* table = kStartTable + byte * kStartTableEntriesPerLine;
4105       int objects_in_these_8_words = table[0];
4106       DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
4107       DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
4108       for (int i = 0; i < objects_in_these_8_words; i++) {
4109         starts[objects++] = offset + table[1 + i];
4110       }
4111     }
4112     offset += 8;
4113   }
4114   return objects;
4115 }
4116 
4117 
SweepInParallel(PagedSpace * space,int required_freed_bytes)4118 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
4119                                           int required_freed_bytes) {
4120   int max_freed = 0;
4121   int max_freed_overall = 0;
4122   PageIterator it(space);
4123   while (it.has_next()) {
4124     Page* p = it.next();
4125     max_freed = SweepInParallel(p, space);
4126     DCHECK(max_freed >= 0);
4127     if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
4128       return max_freed;
4129     }
4130     max_freed_overall = Max(max_freed, max_freed_overall);
4131     if (p == space->end_of_unswept_pages()) break;
4132   }
4133   return max_freed_overall;
4134 }
4135 
4136 
SweepInParallel(Page * page,PagedSpace * space)4137 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
4138   int max_freed = 0;
4139   if (page->TryParallelSweeping()) {
4140     FreeList* free_list = space == heap()->old_pointer_space()
4141                               ? free_list_old_pointer_space_.get()
4142                               : free_list_old_data_space_.get();
4143     FreeList private_free_list(space);
4144     max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
4145                       IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
4146     free_list->Concatenate(&private_free_list);
4147   }
4148   return max_freed;
4149 }
4150 
4151 
SweepSpace(PagedSpace * space,SweeperType sweeper)4152 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
4153   space->ClearStats();
4154 
4155   // We defensively initialize end_of_unswept_pages_ here with the first page
4156   // of the pages list.
4157   space->set_end_of_unswept_pages(space->FirstPage());
4158 
4159   PageIterator it(space);
4160 
4161   int pages_swept = 0;
4162   bool unused_page_present = false;
4163   bool parallel_sweeping_active = false;
4164 
4165   while (it.has_next()) {
4166     Page* p = it.next();
4167     DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4168 
4169     // Clear sweeping flags indicating that marking bits are still intact.
4170     p->ClearWasSwept();
4171 
4172     if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
4173         p->IsEvacuationCandidate()) {
4174       // Will be processed in EvacuateNewSpaceAndCandidates.
4175       DCHECK(evacuation_candidates_.length() > 0);
4176       continue;
4177     }
4178 
4179     // One unused page is kept, all further are released before sweeping them.
4180     if (p->LiveBytes() == 0) {
4181       if (unused_page_present) {
4182         if (FLAG_gc_verbose) {
4183           PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4184                  reinterpret_cast<intptr_t>(p));
4185         }
4186         // Adjust unswept free bytes because releasing a page expects said
4187         // counter to be accurate for unswept pages.
4188         space->IncreaseUnsweptFreeBytes(p);
4189         space->ReleasePage(p);
4190         continue;
4191       }
4192       unused_page_present = true;
4193     }
4194 
4195     switch (sweeper) {
4196       case CONCURRENT_SWEEPING:
4197       case PARALLEL_SWEEPING:
4198         if (!parallel_sweeping_active) {
4199           if (FLAG_gc_verbose) {
4200             PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
4201                    reinterpret_cast<intptr_t>(p));
4202           }
4203           Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
4204                 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4205           pages_swept++;
4206           parallel_sweeping_active = true;
4207         } else {
4208           if (FLAG_gc_verbose) {
4209             PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
4210                    reinterpret_cast<intptr_t>(p));
4211           }
4212           p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
4213           space->IncreaseUnsweptFreeBytes(p);
4214         }
4215         space->set_end_of_unswept_pages(p);
4216         break;
4217       case SEQUENTIAL_SWEEPING: {
4218         if (FLAG_gc_verbose) {
4219           PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
4220         }
4221         if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
4222           Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
4223                 ZAP_FREE_SPACE>(space, NULL, p, NULL);
4224         } else if (space->identity() == CODE_SPACE) {
4225           Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
4226                 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4227         } else {
4228           Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
4229                 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4230         }
4231         pages_swept++;
4232         break;
4233       }
4234       default: { UNREACHABLE(); }
4235     }
4236   }
4237 
4238   if (FLAG_gc_verbose) {
4239     PrintF("SweepSpace: %s (%d pages swept)\n",
4240            AllocationSpaceName(space->identity()), pages_swept);
4241   }
4242 
4243   // Give pages that are queued to be freed back to the OS.
4244   heap()->FreeQueuedChunks();
4245 }
4246 
4247 
ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type)4248 static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
4249   return type == MarkCompactCollector::PARALLEL_SWEEPING ||
4250          type == MarkCompactCollector::CONCURRENT_SWEEPING;
4251 }
4252 
4253 
ShouldWaitForSweeperThreads(MarkCompactCollector::SweeperType type)4254 static bool ShouldWaitForSweeperThreads(
4255     MarkCompactCollector::SweeperType type) {
4256   return type == MarkCompactCollector::PARALLEL_SWEEPING;
4257 }
4258 
4259 
SweepSpaces()4260 void MarkCompactCollector::SweepSpaces() {
4261   GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4262   double start_time = 0.0;
4263   if (FLAG_print_cumulative_gc_stat) {
4264     start_time = base::OS::TimeCurrentMillis();
4265   }
4266 
4267 #ifdef DEBUG
4268   state_ = SWEEP_SPACES;
4269 #endif
4270   SweeperType how_to_sweep = CONCURRENT_SWEEPING;
4271   if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_SWEEPING;
4272   if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_SWEEPING;
4273 
4274   MoveEvacuationCandidatesToEndOfPagesList();
4275 
4276   // Noncompacting collections simply sweep the spaces to clear the mark
4277   // bits and free the nonlive blocks (for old and map spaces).  We sweep
4278   // the map space last because freeing non-live maps overwrites them and
4279   // the other spaces rely on possibly non-live maps to get the sizes for
4280   // non-live objects.
4281   {
4282     GCTracer::Scope sweep_scope(heap()->tracer(),
4283                                 GCTracer::Scope::MC_SWEEP_OLDSPACE);
4284     {
4285       SequentialSweepingScope scope(this);
4286       SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4287       SweepSpace(heap()->old_data_space(), how_to_sweep);
4288     }
4289 
4290     if (ShouldStartSweeperThreads(how_to_sweep)) {
4291       StartSweeperThreads();
4292     }
4293 
4294     if (ShouldWaitForSweeperThreads(how_to_sweep)) {
4295       EnsureSweepingCompleted();
4296     }
4297   }
4298   RemoveDeadInvalidatedCode();
4299 
4300   {
4301     GCTracer::Scope sweep_scope(heap()->tracer(),
4302                                 GCTracer::Scope::MC_SWEEP_CODE);
4303     SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
4304   }
4305 
4306   {
4307     GCTracer::Scope sweep_scope(heap()->tracer(),
4308                                 GCTracer::Scope::MC_SWEEP_CELL);
4309     SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING);
4310     SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING);
4311   }
4312 
4313   EvacuateNewSpaceAndCandidates();
4314 
4315   // ClearNonLiveTransitions depends on precise sweeping of map space to
4316   // detect whether unmarked map became dead in this collection or in one
4317   // of the previous ones.
4318   {
4319     GCTracer::Scope sweep_scope(heap()->tracer(),
4320                                 GCTracer::Scope::MC_SWEEP_MAP);
4321     SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING);
4322   }
4323 
4324   // Deallocate unmarked objects and clear marked bits for marked objects.
4325   heap_->lo_space()->FreeUnmarkedObjects();
4326 
4327   // Deallocate evacuated candidate pages.
4328   ReleaseEvacuationCandidates();
4329 
4330   if (FLAG_print_cumulative_gc_stat) {
4331     heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
4332                                      start_time);
4333   }
4334 }
4335 
4336 
ParallelSweepSpaceComplete(PagedSpace * space)4337 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4338   PageIterator it(space);
4339   while (it.has_next()) {
4340     Page* p = it.next();
4341     if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
4342       p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
4343       p->SetWasSwept();
4344     }
4345     DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4346   }
4347 }
4348 
4349 
ParallelSweepSpacesComplete()4350 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4351   ParallelSweepSpaceComplete(heap()->old_pointer_space());
4352   ParallelSweepSpaceComplete(heap()->old_data_space());
4353 }
4354 
4355 
EnableCodeFlushing(bool enable)4356 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
4357   if (isolate()->debug()->is_loaded() ||
4358       isolate()->debug()->has_break_points()) {
4359     enable = false;
4360   }
4361 
4362   if (enable) {
4363     if (code_flusher_ != NULL) return;
4364     code_flusher_ = new CodeFlusher(isolate());
4365   } else {
4366     if (code_flusher_ == NULL) return;
4367     code_flusher_->EvictAllCandidates();
4368     delete code_flusher_;
4369     code_flusher_ = NULL;
4370   }
4371 
4372   if (FLAG_trace_code_flushing) {
4373     PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4374   }
4375 }
4376 
4377 
4378 // TODO(1466) ReportDeleteIfNeeded is not called currently.
4379 // Our profiling tools do not expect intersections between
4380 // code objects. We should either reenable it or change our tools.
ReportDeleteIfNeeded(HeapObject * obj,Isolate * isolate)4381 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4382                                                 Isolate* isolate) {
4383   if (obj->IsCode()) {
4384     PROFILE(isolate, CodeDeleteEvent(obj->address()));
4385   }
4386 }
4387 
4388 
isolate() const4389 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
4390 
4391 
Initialize()4392 void MarkCompactCollector::Initialize() {
4393   MarkCompactMarkingVisitor::Initialize();
4394   IncrementalMarking::Initialize();
4395 }
4396 
4397 
IsTypedSlot(ObjectSlot slot)4398 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4399   return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4400 }
4401 
4402 
AddTo(SlotsBufferAllocator * allocator,SlotsBuffer ** buffer_address,SlotType type,Address addr,AdditionMode mode)4403 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4404                         SlotsBuffer** buffer_address, SlotType type,
4405                         Address addr, AdditionMode mode) {
4406   SlotsBuffer* buffer = *buffer_address;
4407   if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4408     if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4409       allocator->DeallocateChain(buffer_address);
4410       return false;
4411     }
4412     buffer = allocator->AllocateBuffer(buffer);
4413     *buffer_address = buffer;
4414   }
4415   DCHECK(buffer->HasSpaceForTypedSlot());
4416   buffer->Add(reinterpret_cast<ObjectSlot>(type));
4417   buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4418   return true;
4419 }
4420 
4421 
SlotTypeForRMode(RelocInfo::Mode rmode)4422 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4423   if (RelocInfo::IsCodeTarget(rmode)) {
4424     return SlotsBuffer::CODE_TARGET_SLOT;
4425   } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4426     return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4427   } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4428     return SlotsBuffer::DEBUG_TARGET_SLOT;
4429   } else if (RelocInfo::IsJSReturn(rmode)) {
4430     return SlotsBuffer::JS_RETURN_SLOT;
4431   }
4432   UNREACHABLE();
4433   return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4434 }
4435 
4436 
RecordRelocSlot(RelocInfo * rinfo,Object * target)4437 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4438   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4439   RelocInfo::Mode rmode = rinfo->rmode();
4440   if (target_page->IsEvacuationCandidate() &&
4441       (rinfo->host() == NULL ||
4442        !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4443     bool success;
4444     if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
4445       // This doesn't need to be typed since it is just a normal heap pointer.
4446       Object** target_pointer =
4447           reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
4448       success = SlotsBuffer::AddTo(
4449           &slots_buffer_allocator_, target_page->slots_buffer_address(),
4450           target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW);
4451     } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
4452       success = SlotsBuffer::AddTo(
4453           &slots_buffer_allocator_, target_page->slots_buffer_address(),
4454           SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(),
4455           SlotsBuffer::FAIL_ON_OVERFLOW);
4456     } else {
4457       success = SlotsBuffer::AddTo(
4458           &slots_buffer_allocator_, target_page->slots_buffer_address(),
4459           SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
4460     }
4461     if (!success) {
4462       EvictEvacuationCandidate(target_page);
4463     }
4464   }
4465 }
4466 
4467 
RecordCodeEntrySlot(Address slot,Code * target)4468 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4469   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4470   if (target_page->IsEvacuationCandidate() &&
4471       !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4472     if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4473                             target_page->slots_buffer_address(),
4474                             SlotsBuffer::CODE_ENTRY_SLOT, slot,
4475                             SlotsBuffer::FAIL_ON_OVERFLOW)) {
4476       EvictEvacuationCandidate(target_page);
4477     }
4478   }
4479 }
4480 
4481 
RecordCodeTargetPatch(Address pc,Code * target)4482 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4483   DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4484   if (is_compacting()) {
4485     Code* host =
4486         isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
4487             pc);
4488     MarkBit mark_bit = Marking::MarkBitFrom(host);
4489     if (Marking::IsBlack(mark_bit)) {
4490       RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4491       RecordRelocSlot(&rinfo, target);
4492     }
4493   }
4494 }
4495 
4496 
DecodeSlotType(SlotsBuffer::ObjectSlot slot)4497 static inline SlotsBuffer::SlotType DecodeSlotType(
4498     SlotsBuffer::ObjectSlot slot) {
4499   return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4500 }
4501 
4502 
UpdateSlots(Heap * heap)4503 void SlotsBuffer::UpdateSlots(Heap* heap) {
4504   PointersUpdatingVisitor v(heap);
4505 
4506   for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4507     ObjectSlot slot = slots_[slot_idx];
4508     if (!IsTypedSlot(slot)) {
4509       PointersUpdatingVisitor::UpdateSlot(heap, slot);
4510     } else {
4511       ++slot_idx;
4512       DCHECK(slot_idx < idx_);
4513       UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
4514                  reinterpret_cast<Address>(slots_[slot_idx]));
4515     }
4516   }
4517 }
4518 
4519 
UpdateSlotsWithFilter(Heap * heap)4520 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4521   PointersUpdatingVisitor v(heap);
4522 
4523   for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4524     ObjectSlot slot = slots_[slot_idx];
4525     if (!IsTypedSlot(slot)) {
4526       if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4527         PointersUpdatingVisitor::UpdateSlot(heap, slot);
4528       }
4529     } else {
4530       ++slot_idx;
4531       DCHECK(slot_idx < idx_);
4532       Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4533       if (!IsOnInvalidatedCodeObject(pc)) {
4534         UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
4535                    reinterpret_cast<Address>(slots_[slot_idx]));
4536       }
4537     }
4538   }
4539 }
4540 
4541 
AllocateBuffer(SlotsBuffer * next_buffer)4542 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4543   return new SlotsBuffer(next_buffer);
4544 }
4545 
4546 
DeallocateBuffer(SlotsBuffer * buffer)4547 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4548   delete buffer;
4549 }
4550 
4551 
DeallocateChain(SlotsBuffer ** buffer_address)4552 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4553   SlotsBuffer* buffer = *buffer_address;
4554   while (buffer != NULL) {
4555     SlotsBuffer* next_buffer = buffer->next();
4556     DeallocateBuffer(buffer);
4557     buffer = next_buffer;
4558   }
4559   *buffer_address = NULL;
4560 }
4561 }
4562 }  // namespace v8::internal
4563