1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/incremental-marking.h"
6 
7 #include "src/code-stubs.h"
8 #include "src/compilation-cache.h"
9 #include "src/conversions.h"
10 #include "src/heap/gc-idle-time-handler.h"
11 #include "src/heap/gc-tracer.h"
12 #include "src/heap/mark-compact-inl.h"
13 #include "src/heap/objects-visiting.h"
14 #include "src/heap/objects-visiting-inl.h"
15 #include "src/v8.h"
16 
17 namespace v8 {
18 namespace internal {
19 
IdleStepActions()20 IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
21   return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
22                      IncrementalMarking::FORCE_MARKING,
23                      IncrementalMarking::DO_NOT_FORCE_COMPLETION);
24 }
25 
26 
IncrementalMarking(Heap * heap)27 IncrementalMarking::IncrementalMarking(Heap* heap)
28     : heap_(heap),
29       observer_(*this, kAllocatedThreshold),
30       state_(STOPPED),
31       is_compacting_(false),
32       steps_count_(0),
33       old_generation_space_available_at_start_of_incremental_(0),
34       old_generation_space_used_at_start_of_incremental_(0),
35       bytes_rescanned_(0),
36       should_hurry_(false),
37       marking_speed_(0),
38       bytes_scanned_(0),
39       allocated_(0),
40       write_barriers_invoked_since_last_step_(0),
41       idle_marking_delay_counter_(0),
42       no_marking_scope_depth_(0),
43       unscanned_bytes_of_large_object_(0),
44       was_activated_(false),
45       finalize_marking_completed_(false),
46       incremental_marking_finalization_rounds_(0),
47       request_type_(COMPLETE_MARKING) {}
48 
49 
BaseRecordWrite(HeapObject * obj,Object * value)50 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
51   HeapObject* value_heap_obj = HeapObject::cast(value);
52   MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
53   DCHECK(!Marking::IsImpossible(value_bit));
54 
55   MarkBit obj_bit = Marking::MarkBitFrom(obj);
56   DCHECK(!Marking::IsImpossible(obj_bit));
57   bool is_black = Marking::IsBlack(obj_bit);
58 
59   if (is_black && Marking::IsWhite(value_bit)) {
60     WhiteToGreyAndPush(value_heap_obj, value_bit);
61     RestartIfNotMarking();
62   }
63   return is_compacting_ && is_black;
64 }
65 
66 
RecordWriteSlow(HeapObject * obj,Object ** slot,Object * value)67 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
68                                          Object* value) {
69   if (BaseRecordWrite(obj, value) && slot != NULL) {
70     // Object is not going to be rescanned we need to record the slot.
71     heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
72   }
73 }
74 
75 
RecordWriteFromCode(HeapObject * obj,Object ** slot,Isolate * isolate)76 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
77                                              Isolate* isolate) {
78   DCHECK(obj->IsHeapObject());
79   IncrementalMarking* marking = isolate->heap()->incremental_marking();
80 
81   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
82   int counter = chunk->write_barrier_counter();
83   if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
84     marking->write_barriers_invoked_since_last_step_ +=
85         MemoryChunk::kWriteBarrierCounterGranularity -
86         chunk->write_barrier_counter();
87     chunk->set_write_barrier_counter(
88         MemoryChunk::kWriteBarrierCounterGranularity);
89   }
90 
91   marking->RecordWrite(obj, slot, *slot);
92 }
93 
94 
RecordCodeTargetPatch(Code * host,Address pc,HeapObject * value)95 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
96                                                HeapObject* value) {
97   if (IsMarking()) {
98     RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
99     RecordWriteIntoCode(host, &rinfo, value);
100   }
101 }
102 
103 
RecordCodeTargetPatch(Address pc,HeapObject * value)104 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
105   if (IsMarking()) {
106     Code* host = heap_->isolate()
107                      ->inner_pointer_to_code_cache()
108                      ->GcSafeFindCodeForInnerPointer(pc);
109     RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
110     RecordWriteIntoCode(host, &rinfo, value);
111   }
112 }
113 
114 
RecordWriteOfCodeEntrySlow(JSFunction * host,Object ** slot,Code * value)115 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
116                                                     Object** slot,
117                                                     Code* value) {
118   if (BaseRecordWrite(host, value)) {
119     DCHECK(slot != NULL);
120     heap_->mark_compact_collector()->RecordCodeEntrySlot(
121         host, reinterpret_cast<Address>(slot), value);
122   }
123 }
124 
125 
RecordWriteIntoCodeSlow(HeapObject * obj,RelocInfo * rinfo,Object * value)126 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
127                                                  RelocInfo* rinfo,
128                                                  Object* value) {
129   if (BaseRecordWrite(obj, value)) {
130       // Object is not going to be rescanned.  We need to record the slot.
131       heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
132                                                        Code::cast(value));
133   }
134 }
135 
136 
RecordWrites(HeapObject * obj)137 void IncrementalMarking::RecordWrites(HeapObject* obj) {
138   if (IsMarking()) {
139     MarkBit obj_bit = Marking::MarkBitFrom(obj);
140     if (Marking::IsBlack(obj_bit)) {
141       MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
142       if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
143         chunk->set_progress_bar(0);
144       }
145       BlackToGreyAndUnshift(obj, obj_bit);
146       RestartIfNotMarking();
147     }
148   }
149 }
150 
151 
BlackToGreyAndUnshift(HeapObject * obj,MarkBit mark_bit)152 void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
153                                                MarkBit mark_bit) {
154   DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
155   DCHECK(obj->Size() >= 2 * kPointerSize);
156   DCHECK(IsMarking());
157   Marking::BlackToGrey(mark_bit);
158   int obj_size = obj->Size();
159   MemoryChunk::IncrementLiveBytesFromGC(obj, -obj_size);
160   bytes_scanned_ -= obj_size;
161   int64_t old_bytes_rescanned = bytes_rescanned_;
162   bytes_rescanned_ = old_bytes_rescanned + obj_size;
163   if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
164     if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
165       // If we have queued twice the heap size for rescanning then we are
166       // going around in circles, scanning the same objects again and again
167       // as the program mutates the heap faster than we can incrementally
168       // trace it.  In this case we switch to non-incremental marking in
169       // order to finish off this marking phase.
170       if (FLAG_trace_incremental_marking) {
171         PrintIsolate(
172             heap()->isolate(),
173             "Hurrying incremental marking because of lack of progress\n");
174       }
175       marking_speed_ = kMaxMarkingSpeed;
176     }
177   }
178 
179   heap_->mark_compact_collector()->marking_deque()->Unshift(obj);
180 }
181 
182 
WhiteToGreyAndPush(HeapObject * obj,MarkBit mark_bit)183 void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
184   Marking::WhiteToGrey(mark_bit);
185   heap_->mark_compact_collector()->marking_deque()->Push(obj);
186 }
187 
188 
MarkObjectGreyDoNotEnqueue(Object * obj)189 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
190   if (obj->IsHeapObject()) {
191     HeapObject* heap_obj = HeapObject::cast(obj);
192     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
193     if (Marking::IsBlack(mark_bit)) {
194       MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
195     }
196     Marking::AnyToGrey(mark_bit);
197   }
198 }
199 
200 
MarkBlackOrKeepBlack(HeapObject * heap_object,MarkBit mark_bit,int size)201 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
202                                         MarkBit mark_bit, int size) {
203   DCHECK(!Marking::IsImpossible(mark_bit));
204   if (Marking::IsBlack(mark_bit)) return;
205   Marking::MarkBlack(mark_bit);
206   MemoryChunk::IncrementLiveBytesFromGC(heap_object, size);
207 }
208 
209 
210 class IncrementalMarkingMarkingVisitor
211     : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
212  public:
Initialize()213   static void Initialize() {
214     StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
215     table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
216     table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
217     table_.Register(kVisitJSRegExp, &VisitJSRegExp);
218   }
219 
220   static const int kProgressBarScanningChunk = 32 * 1024;
221 
VisitFixedArrayIncremental(Map * map,HeapObject * object)222   static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
223     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
224     // TODO(mstarzinger): Move setting of the flag to the allocation site of
225     // the array. The visitor should just check the flag.
226     if (FLAG_use_marking_progress_bar &&
227         chunk->owner()->identity() == LO_SPACE) {
228       chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
229     }
230     if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
231       Heap* heap = map->GetHeap();
232       // When using a progress bar for large fixed arrays, scan only a chunk of
233       // the array and try to push it onto the marking deque again until it is
234       // fully scanned. Fall back to scanning it through to the end in case this
235       // fails because of a full deque.
236       int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
237       int start_offset =
238           Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
239       int end_offset =
240           Min(object_size, start_offset + kProgressBarScanningChunk);
241       int already_scanned_offset = start_offset;
242       bool scan_until_end = false;
243       do {
244         VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
245                       HeapObject::RawField(object, end_offset));
246         start_offset = end_offset;
247         end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
248         scan_until_end =
249             heap->mark_compact_collector()->marking_deque()->IsFull();
250       } while (scan_until_end && start_offset < object_size);
251       chunk->set_progress_bar(start_offset);
252       if (start_offset < object_size) {
253         if (Marking::IsGrey(Marking::MarkBitFrom(object))) {
254           heap->mark_compact_collector()->marking_deque()->Unshift(object);
255         } else {
256           DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
257           heap->mark_compact_collector()->UnshiftBlack(object);
258         }
259         heap->incremental_marking()->NotifyIncompleteScanOfObject(
260             object_size - (start_offset - already_scanned_offset));
261       }
262     } else {
263       FixedArrayVisitor::Visit(map, object);
264     }
265   }
266 
VisitNativeContextIncremental(Map * map,HeapObject * object)267   static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
268     Context* context = Context::cast(object);
269 
270     // We will mark cache black with a separate pass when we finish marking.
271     // Note that GC can happen when the context is not fully initialized,
272     // so the cache can be undefined.
273     Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
274     if (!cache->IsUndefined()) {
275       MarkObjectGreyDoNotEnqueue(cache);
276     }
277     VisitNativeContext(map, context);
278   }
279 
INLINE(static void VisitPointer (Heap * heap,HeapObject * object,Object ** p))280   INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
281     Object* target = *p;
282     if (target->IsHeapObject()) {
283       heap->mark_compact_collector()->RecordSlot(object, p, target);
284       MarkObject(heap, target);
285     }
286   }
287 
INLINE(static void VisitPointers (Heap * heap,HeapObject * object,Object ** start,Object ** end))288   INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
289                                    Object** start, Object** end)) {
290     for (Object** p = start; p < end; p++) {
291       Object* target = *p;
292       if (target->IsHeapObject()) {
293         heap->mark_compact_collector()->RecordSlot(object, p, target);
294         MarkObject(heap, target);
295       }
296     }
297   }
298 
299   // Marks the object grey and pushes it on the marking stack.
INLINE(static void MarkObject (Heap * heap,Object * obj))300   INLINE(static void MarkObject(Heap* heap, Object* obj)) {
301     IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
302   }
303 
304   // Marks the object black without pushing it on the marking stack.
305   // Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush (Heap * heap,Object * obj))306   INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
307     HeapObject* heap_object = HeapObject::cast(obj);
308     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
309     if (Marking::IsWhite(mark_bit)) {
310       Marking::MarkBlack(mark_bit);
311       MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
312       return true;
313     }
314     return false;
315   }
316 };
317 
318 
319 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
320  public:
IncrementalMarkingRootMarkingVisitor(IncrementalMarking * incremental_marking)321   explicit IncrementalMarkingRootMarkingVisitor(
322       IncrementalMarking* incremental_marking)
323       : heap_(incremental_marking->heap()) {}
324 
VisitPointer(Object ** p)325   void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
326 
VisitPointers(Object ** start,Object ** end)327   void VisitPointers(Object** start, Object** end) override {
328     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
329   }
330 
331  private:
MarkObjectByPointer(Object ** p)332   void MarkObjectByPointer(Object** p) {
333     Object* obj = *p;
334     if (!obj->IsHeapObject()) return;
335 
336     IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
337   }
338 
339   Heap* heap_;
340 };
341 
342 
Initialize()343 void IncrementalMarking::Initialize() {
344   IncrementalMarkingMarkingVisitor::Initialize();
345 }
346 
347 
SetOldSpacePageFlags(MemoryChunk * chunk,bool is_marking,bool is_compacting)348 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
349                                               bool is_marking,
350                                               bool is_compacting) {
351   if (is_marking) {
352     chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
353     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
354   } else {
355     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
356     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
357   }
358 }
359 
360 
SetNewSpacePageFlags(MemoryChunk * chunk,bool is_marking)361 void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
362                                               bool is_marking) {
363   chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
364   if (is_marking) {
365     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
366   } else {
367     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
368   }
369   chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
370 }
371 
372 
DeactivateIncrementalWriteBarrierForSpace(PagedSpace * space)373 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
374     PagedSpace* space) {
375   PageIterator it(space);
376   while (it.has_next()) {
377     Page* p = it.next();
378     SetOldSpacePageFlags(p, false, false);
379   }
380 }
381 
382 
DeactivateIncrementalWriteBarrierForSpace(NewSpace * space)383 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
384     NewSpace* space) {
385   NewSpacePageIterator it(space);
386   while (it.has_next()) {
387     NewSpacePage* p = it.next();
388     SetNewSpacePageFlags(p, false);
389   }
390 }
391 
392 
DeactivateIncrementalWriteBarrier()393 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
394   DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
395   DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
396   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
397   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
398 
399   LargePage* lop = heap_->lo_space()->first_page();
400   while (lop->is_valid()) {
401     SetOldSpacePageFlags(lop, false, false);
402     lop = lop->next_page();
403   }
404 }
405 
406 
ActivateIncrementalWriteBarrier(PagedSpace * space)407 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
408   PageIterator it(space);
409   while (it.has_next()) {
410     Page* p = it.next();
411     SetOldSpacePageFlags(p, true, is_compacting_);
412   }
413 }
414 
415 
ActivateIncrementalWriteBarrier(NewSpace * space)416 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
417   NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
418   while (it.has_next()) {
419     NewSpacePage* p = it.next();
420     SetNewSpacePageFlags(p, true);
421   }
422 }
423 
424 
ActivateIncrementalWriteBarrier()425 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
426   ActivateIncrementalWriteBarrier(heap_->old_space());
427   ActivateIncrementalWriteBarrier(heap_->map_space());
428   ActivateIncrementalWriteBarrier(heap_->code_space());
429   ActivateIncrementalWriteBarrier(heap_->new_space());
430 
431   LargePage* lop = heap_->lo_space()->first_page();
432   while (lop->is_valid()) {
433     SetOldSpacePageFlags(lop, true, is_compacting_);
434     lop = lop->next_page();
435   }
436 }
437 
438 
ShouldActivateEvenWithoutIdleNotification()439 bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
440   return CanBeActivated() &&
441          heap_->HeapIsFullEnoughToStartIncrementalMarking(
442              heap_->old_generation_allocation_limit());
443 }
444 
445 
WasActivated()446 bool IncrementalMarking::WasActivated() { return was_activated_; }
447 
448 
CanBeActivated()449 bool IncrementalMarking::CanBeActivated() {
450 #ifndef DEBUG
451   static const intptr_t kActivationThreshold = 8 * MB;
452 #else
453   // TODO(gc) consider setting this to some low level so that some
454   // debug tests run with incremental marking and some without.
455   static const intptr_t kActivationThreshold = 0;
456 #endif
457   // Only start incremental marking in a safe state: 1) when incremental
458   // marking is turned on, 2) when we are currently not in a GC, and
459   // 3) when we are currently not serializing or deserializing the heap.
460   // Don't switch on for very small heaps.
461   return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
462          heap_->deserialization_complete() &&
463          !heap_->isolate()->serializer_enabled() &&
464          heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
465 }
466 
467 
ActivateGeneratedStub(Code * stub)468 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
469   DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
470 
471   if (!IsMarking()) {
472     // Initially stub is generated in STORE_BUFFER_ONLY mode thus
473     // we don't need to do anything if incremental marking is
474     // not active.
475   } else if (IsCompacting()) {
476     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
477   } else {
478     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
479   }
480 }
481 
482 
NotifyOfHighPromotionRate()483 void IncrementalMarking::NotifyOfHighPromotionRate() {
484   if (IsMarking()) {
485     if (marking_speed_ < kFastMarking) {
486       if (FLAG_trace_gc) {
487         PrintIsolate(heap()->isolate(),
488                      "Increasing marking speed to %d "
489                      "due to high promotion rate\n",
490                      static_cast<int>(kFastMarking));
491       }
492       marking_speed_ = kFastMarking;
493     }
494   }
495 }
496 
497 
PatchIncrementalMarkingRecordWriteStubs(Heap * heap,RecordWriteStub::Mode mode)498 static void PatchIncrementalMarkingRecordWriteStubs(
499     Heap* heap, RecordWriteStub::Mode mode) {
500   UnseededNumberDictionary* stubs = heap->code_stubs();
501 
502   int capacity = stubs->Capacity();
503   for (int i = 0; i < capacity; i++) {
504     Object* k = stubs->KeyAt(i);
505     if (stubs->IsKey(k)) {
506       uint32_t key = NumberToUint32(k);
507 
508       if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
509         Object* e = stubs->ValueAt(i);
510         if (e->IsCode()) {
511           RecordWriteStub::Patch(Code::cast(e), mode);
512         }
513       }
514     }
515   }
516 }
517 
518 
Start(const char * reason)519 void IncrementalMarking::Start(const char* reason) {
520   if (FLAG_trace_incremental_marking) {
521     PrintF("[IncrementalMarking] Start (%s)\n",
522            (reason == nullptr) ? "unknown reason" : reason);
523   }
524   DCHECK(FLAG_incremental_marking);
525   DCHECK(state_ == STOPPED);
526   DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
527   DCHECK(!heap_->isolate()->serializer_enabled());
528 
529   HistogramTimerScope incremental_marking_scope(
530       heap_->isolate()->counters()->gc_incremental_marking_start());
531   ResetStepCounters();
532 
533   was_activated_ = true;
534 
535   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
536     StartMarking();
537   } else {
538     if (FLAG_trace_incremental_marking) {
539       PrintF("[IncrementalMarking] Start sweeping.\n");
540     }
541     state_ = SWEEPING;
542   }
543 
544   heap_->new_space()->AddInlineAllocationObserver(&observer_);
545 
546   incremental_marking_job()->Start(heap_);
547 }
548 
549 
StartMarking()550 void IncrementalMarking::StartMarking() {
551   if (FLAG_trace_incremental_marking) {
552     PrintF("[IncrementalMarking] Start marking\n");
553   }
554 
555   is_compacting_ = !FLAG_never_compact &&
556                    heap_->mark_compact_collector()->StartCompaction(
557                        MarkCompactCollector::INCREMENTAL_COMPACTION);
558 
559   state_ = MARKING;
560 
561   RecordWriteStub::Mode mode = is_compacting_
562                                    ? RecordWriteStub::INCREMENTAL_COMPACTION
563                                    : RecordWriteStub::INCREMENTAL;
564 
565   PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
566 
567   heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
568       MarkCompactCollector::kMaxMarkingDequeSize);
569 
570   ActivateIncrementalWriteBarrier();
571 
572 // Marking bits are cleared by the sweeper.
573 #ifdef VERIFY_HEAP
574   if (FLAG_verify_heap) {
575     heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
576   }
577 #endif
578 
579   heap_->CompletelyClearInstanceofCache();
580   heap_->isolate()->compilation_cache()->MarkCompactPrologue();
581 
582   if (FLAG_cleanup_code_caches_at_gc) {
583     // We will mark cache black with a separate pass
584     // when we finish marking.
585     MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
586   }
587 
588   // Mark strong roots grey.
589   IncrementalMarkingRootMarkingVisitor visitor(this);
590   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
591 
592   // Ready to start incremental marking.
593   if (FLAG_trace_incremental_marking) {
594     PrintF("[IncrementalMarking] Running\n");
595   }
596 }
597 
598 
MarkRoots()599 void IncrementalMarking::MarkRoots() {
600   DCHECK(!finalize_marking_completed_);
601   DCHECK(IsMarking());
602 
603   IncrementalMarkingRootMarkingVisitor visitor(this);
604   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
605 }
606 
607 
MarkObjectGroups()608 void IncrementalMarking::MarkObjectGroups() {
609   DCHECK(!finalize_marking_completed_);
610   DCHECK(IsMarking());
611 
612   IncrementalMarkingRootMarkingVisitor visitor(this);
613   heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
614   heap_->isolate()->global_handles()->IterateObjectGroups(
615       &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
616   heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
617   heap_->isolate()->global_handles()->RemoveObjectGroups();
618 }
619 
620 
ProcessWeakCells()621 void IncrementalMarking::ProcessWeakCells() {
622   DCHECK(!finalize_marking_completed_);
623   DCHECK(IsMarking());
624 
625   Object* the_hole_value = heap()->the_hole_value();
626   Object* weak_cell_obj = heap()->encountered_weak_cells();
627   Object* weak_cell_head = Smi::FromInt(0);
628   WeakCell* prev_weak_cell_obj = NULL;
629   while (weak_cell_obj != Smi::FromInt(0)) {
630     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
631     // We do not insert cleared weak cells into the list, so the value
632     // cannot be a Smi here.
633     HeapObject* value = HeapObject::cast(weak_cell->value());
634     // Remove weak cells with live objects from the list, they do not need
635     // clearing.
636     if (MarkCompactCollector::IsMarked(value)) {
637       // Record slot, if value is pointing to an evacuation candidate.
638       Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
639       heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
640       // Remove entry somewhere after top.
641       if (prev_weak_cell_obj != NULL) {
642         prev_weak_cell_obj->set_next(weak_cell->next());
643       }
644       weak_cell_obj = weak_cell->next();
645       weak_cell->clear_next(the_hole_value);
646     } else {
647       if (weak_cell_head == Smi::FromInt(0)) {
648         weak_cell_head = weak_cell;
649       }
650       prev_weak_cell_obj = weak_cell;
651       weak_cell_obj = weak_cell->next();
652     }
653   }
654   // Top may have changed.
655   heap()->set_encountered_weak_cells(weak_cell_head);
656 }
657 
658 
ShouldRetainMap(Map * map,int age)659 bool ShouldRetainMap(Map* map, int age) {
660   if (age == 0) {
661     // The map has aged. Do not retain this map.
662     return false;
663   }
664   Object* constructor = map->GetConstructor();
665   if (!constructor->IsHeapObject() ||
666       Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(constructor)))) {
667     // The constructor is dead, no new objects with this map can
668     // be created. Do not retain this map.
669     return false;
670   }
671   return true;
672 }
673 
674 
RetainMaps()675 void IncrementalMarking::RetainMaps() {
676   // Do not retain dead maps if flag disables it or there is
677   // - memory pressure (reduce_memory_footprint_),
678   // - GC is requested by tests or dev-tools (abort_incremental_marking_).
679   bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
680                                    heap()->ShouldAbortIncrementalMarking() ||
681                                    FLAG_retain_maps_for_n_gc == 0;
682   ArrayList* retained_maps = heap()->retained_maps();
683   int length = retained_maps->Length();
684   // The number_of_disposed_maps separates maps in the retained_maps
685   // array that were created before and after context disposal.
686   // We do not age and retain disposed maps to avoid memory leaks.
687   int number_of_disposed_maps = heap()->number_of_disposed_maps_;
688   for (int i = 0; i < length; i += 2) {
689     DCHECK(retained_maps->Get(i)->IsWeakCell());
690     WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
691     if (cell->cleared()) continue;
692     int age = Smi::cast(retained_maps->Get(i + 1))->value();
693     int new_age;
694     Map* map = Map::cast(cell->value());
695     MarkBit map_mark = Marking::MarkBitFrom(map);
696     if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
697         Marking::IsWhite(map_mark)) {
698       if (ShouldRetainMap(map, age)) {
699         MarkObject(heap(), map);
700       }
701       Object* prototype = map->prototype();
702       if (age > 0 && prototype->IsHeapObject() &&
703           Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
704         // The prototype is not marked, age the map.
705         new_age = age - 1;
706       } else {
707         // The prototype and the constructor are marked, this map keeps only
708         // transition tree alive, not JSObjects. Do not age the map.
709         new_age = age;
710       }
711     } else {
712       new_age = FLAG_retain_maps_for_n_gc;
713     }
714     // Compact the array and update the age.
715     if (new_age != age) {
716       retained_maps->Set(i + 1, Smi::FromInt(new_age));
717     }
718   }
719 }
720 
721 
FinalizeIncrementally()722 void IncrementalMarking::FinalizeIncrementally() {
723   DCHECK(!finalize_marking_completed_);
724   DCHECK(IsMarking());
725 
726   double start = heap_->MonotonicallyIncreasingTimeInMs();
727 
728   int old_marking_deque_top =
729       heap_->mark_compact_collector()->marking_deque()->top();
730 
731   // After finishing incremental marking, we try to discover all unmarked
732   // objects to reduce the marking load in the final pause.
733   // 1) We scan and mark the roots again to find all changes to the root set.
734   // 2) We mark the object groups.
735   // 3) Age and retain maps embedded in optimized code.
736   // 4) Remove weak cell with live values from the list of weak cells, they
737   // do not need processing during GC.
738   MarkRoots();
739   MarkObjectGroups();
740   if (incremental_marking_finalization_rounds_ == 0) {
741     // Map retaining is needed for perfromance, not correctness,
742     // so we can do it only once at the beginning of the finalization.
743     RetainMaps();
744   }
745   ProcessWeakCells();
746 
747   int marking_progress =
748       abs(old_marking_deque_top -
749           heap_->mark_compact_collector()->marking_deque()->top());
750 
751   double end = heap_->MonotonicallyIncreasingTimeInMs();
752   double delta = end - start;
753   heap_->tracer()->AddMarkingTime(delta);
754   heap_->tracer()->AddIncrementalMarkingFinalizationStep(delta);
755   if (FLAG_trace_incremental_marking) {
756     PrintF(
757         "[IncrementalMarking] Finalize incrementally round %d, "
758         "spent %d ms, marking progress %d.\n",
759         static_cast<int>(delta), incremental_marking_finalization_rounds_,
760         marking_progress);
761   }
762 
763   ++incremental_marking_finalization_rounds_;
764   if ((incremental_marking_finalization_rounds_ >=
765        FLAG_max_incremental_marking_finalization_rounds) ||
766       (marking_progress <
767        FLAG_min_progress_during_incremental_marking_finalization)) {
768     finalize_marking_completed_ = true;
769   }
770 }
771 
772 
UpdateMarkingDequeAfterScavenge()773 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
774   if (!IsMarking()) return;
775 
776   MarkingDeque* marking_deque =
777       heap_->mark_compact_collector()->marking_deque();
778   int current = marking_deque->bottom();
779   int mask = marking_deque->mask();
780   int limit = marking_deque->top();
781   HeapObject** array = marking_deque->array();
782   int new_top = current;
783 
784   Map* filler_map = heap_->one_pointer_filler_map();
785 
786   while (current != limit) {
787     HeapObject* obj = array[current];
788     DCHECK(obj->IsHeapObject());
789     current = ((current + 1) & mask);
790     if (heap_->InNewSpace(obj)) {
791       MapWord map_word = obj->map_word();
792       if (map_word.IsForwardingAddress()) {
793         HeapObject* dest = map_word.ToForwardingAddress();
794         array[new_top] = dest;
795         new_top = ((new_top + 1) & mask);
796         DCHECK(new_top != marking_deque->bottom());
797 #ifdef DEBUG
798         MarkBit mark_bit = Marking::MarkBitFrom(obj);
799         DCHECK(Marking::IsGrey(mark_bit) ||
800                (obj->IsFiller() && Marking::IsWhite(mark_bit)));
801 #endif
802       }
803     } else if (obj->map() != filler_map) {
804       // Skip one word filler objects that appear on the
805       // stack when we perform in place array shift.
806       array[new_top] = obj;
807       new_top = ((new_top + 1) & mask);
808       DCHECK(new_top != marking_deque->bottom());
809 #ifdef DEBUG
810       MarkBit mark_bit = Marking::MarkBitFrom(obj);
811       MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
812       DCHECK(Marking::IsGrey(mark_bit) ||
813              (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
814              (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
815               Marking::IsBlack(mark_bit)));
816 #endif
817     }
818   }
819   marking_deque->set_top(new_top);
820 }
821 
822 
VisitObject(Map * map,HeapObject * obj,int size)823 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
824   MarkObject(heap_, map);
825 
826   IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
827 
828   MarkBit mark_bit = Marking::MarkBitFrom(obj);
829 #if ENABLE_SLOW_DCHECKS
830   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
831   SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
832               (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
833               (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
834                Marking::IsBlack(mark_bit)));
835 #endif
836   MarkBlackOrKeepBlack(obj, mark_bit, size);
837 }
838 
839 
MarkObject(Heap * heap,HeapObject * obj)840 void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
841   MarkBit mark_bit = Marking::MarkBitFrom(obj);
842   if (Marking::IsWhite(mark_bit)) {
843     heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
844   }
845 }
846 
847 
ProcessMarkingDeque(intptr_t bytes_to_process)848 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
849   intptr_t bytes_processed = 0;
850   Map* filler_map = heap_->one_pointer_filler_map();
851   MarkingDeque* marking_deque =
852       heap_->mark_compact_collector()->marking_deque();
853   while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
854     HeapObject* obj = marking_deque->Pop();
855 
856     // Explicitly skip one word fillers. Incremental markbit patterns are
857     // correct only for objects that occupy at least two words.
858     Map* map = obj->map();
859     if (map == filler_map) continue;
860 
861     int size = obj->SizeFromMap(map);
862     unscanned_bytes_of_large_object_ = 0;
863     VisitObject(map, obj, size);
864     bytes_processed += size - unscanned_bytes_of_large_object_;
865   }
866   return bytes_processed;
867 }
868 
869 
ProcessMarkingDeque()870 void IncrementalMarking::ProcessMarkingDeque() {
871   Map* filler_map = heap_->one_pointer_filler_map();
872   MarkingDeque* marking_deque =
873       heap_->mark_compact_collector()->marking_deque();
874   while (!marking_deque->IsEmpty()) {
875     HeapObject* obj = marking_deque->Pop();
876 
877     // Explicitly skip one word fillers. Incremental markbit patterns are
878     // correct only for objects that occupy at least two words.
879     Map* map = obj->map();
880     if (map == filler_map) continue;
881 
882     VisitObject(map, obj, obj->SizeFromMap(map));
883   }
884 }
885 
886 
Hurry()887 void IncrementalMarking::Hurry() {
888   if (state() == MARKING) {
889     double start = 0.0;
890     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
891       start = heap_->MonotonicallyIncreasingTimeInMs();
892       if (FLAG_trace_incremental_marking) {
893         PrintF("[IncrementalMarking] Hurry\n");
894       }
895     }
896     // TODO(gc) hurry can mark objects it encounters black as mutator
897     // was stopped.
898     ProcessMarkingDeque();
899     state_ = COMPLETE;
900     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
901       double end = heap_->MonotonicallyIncreasingTimeInMs();
902       double delta = end - start;
903       heap_->tracer()->AddMarkingTime(delta);
904       if (FLAG_trace_incremental_marking) {
905         PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
906                static_cast<int>(delta));
907       }
908     }
909   }
910 
911   if (FLAG_cleanup_code_caches_at_gc) {
912     PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
913     Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
914     MemoryChunk::IncrementLiveBytesFromGC(poly_cache,
915                                           PolymorphicCodeCache::kSize);
916   }
917 
918   Object* context = heap_->native_contexts_list();
919   while (!context->IsUndefined()) {
920     // GC can happen when the context is not fully initialized,
921     // so the cache can be undefined.
922     HeapObject* cache = HeapObject::cast(
923         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
924     if (!cache->IsUndefined()) {
925       MarkBit mark_bit = Marking::MarkBitFrom(cache);
926       if (Marking::IsGrey(mark_bit)) {
927         Marking::GreyToBlack(mark_bit);
928         MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
929       }
930     }
931     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
932   }
933 }
934 
935 
Stop()936 void IncrementalMarking::Stop() {
937   if (IsStopped()) return;
938   if (FLAG_trace_incremental_marking) {
939     PrintF("[IncrementalMarking] Stopping.\n");
940   }
941 
942   heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
943   IncrementalMarking::set_should_hurry(false);
944   ResetStepCounters();
945   if (IsMarking()) {
946     PatchIncrementalMarkingRecordWriteStubs(heap_,
947                                             RecordWriteStub::STORE_BUFFER_ONLY);
948     DeactivateIncrementalWriteBarrier();
949 
950     if (is_compacting_) {
951       LargeObjectIterator it(heap_->lo_space());
952       for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
953         Page* p = Page::FromAddress(obj->address());
954         if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
955           p->ClearFlag(Page::RESCAN_ON_EVACUATION);
956         }
957       }
958     }
959   }
960   heap_->isolate()->stack_guard()->ClearGC();
961   state_ = STOPPED;
962   is_compacting_ = false;
963 }
964 
965 
Finalize()966 void IncrementalMarking::Finalize() {
967   Hurry();
968   state_ = STOPPED;
969   is_compacting_ = false;
970 
971   heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
972   IncrementalMarking::set_should_hurry(false);
973   ResetStepCounters();
974   PatchIncrementalMarkingRecordWriteStubs(heap_,
975                                           RecordWriteStub::STORE_BUFFER_ONLY);
976   DeactivateIncrementalWriteBarrier();
977   DCHECK(heap_->mark_compact_collector()->marking_deque()->IsEmpty());
978   heap_->isolate()->stack_guard()->ClearGC();
979 }
980 
981 
FinalizeMarking(CompletionAction action)982 void IncrementalMarking::FinalizeMarking(CompletionAction action) {
983   DCHECK(!finalize_marking_completed_);
984   if (FLAG_trace_incremental_marking) {
985     PrintF(
986         "[IncrementalMarking] requesting finalization of incremental "
987         "marking.\n");
988   }
989   request_type_ = FINALIZATION;
990   if (action == GC_VIA_STACK_GUARD) {
991     heap_->isolate()->stack_guard()->RequestGC();
992   }
993 }
994 
995 
MarkingComplete(CompletionAction action)996 void IncrementalMarking::MarkingComplete(CompletionAction action) {
997   state_ = COMPLETE;
998   // We will set the stack guard to request a GC now.  This will mean the rest
999   // of the GC gets performed as soon as possible (we can't do a GC here in a
1000   // record-write context).  If a few things get allocated between now and then
1001   // that shouldn't make us do a scavenge and keep being incremental, so we set
1002   // the should-hurry flag to indicate that there can't be much work left to do.
1003   set_should_hurry(true);
1004   if (FLAG_trace_incremental_marking) {
1005     PrintF("[IncrementalMarking] Complete (normal).\n");
1006   }
1007   request_type_ = COMPLETE_MARKING;
1008   if (action == GC_VIA_STACK_GUARD) {
1009     heap_->isolate()->stack_guard()->RequestGC();
1010   }
1011 }
1012 
1013 
Epilogue()1014 void IncrementalMarking::Epilogue() {
1015   was_activated_ = false;
1016   finalize_marking_completed_ = false;
1017   incremental_marking_finalization_rounds_ = 0;
1018 }
1019 
1020 
AdvanceIncrementalMarking(intptr_t step_size_in_bytes,double deadline_in_ms,IncrementalMarking::StepActions step_actions)1021 double IncrementalMarking::AdvanceIncrementalMarking(
1022     intptr_t step_size_in_bytes, double deadline_in_ms,
1023     IncrementalMarking::StepActions step_actions) {
1024   DCHECK(!IsStopped());
1025 
1026   if (step_size_in_bytes == 0) {
1027     step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
1028         static_cast<size_t>(GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs),
1029         static_cast<size_t>(
1030             heap()
1031                 ->tracer()
1032                 ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()));
1033   }
1034 
1035   double remaining_time_in_ms = 0.0;
1036   do {
1037     Step(step_size_in_bytes, step_actions.completion_action,
1038          step_actions.force_marking, step_actions.force_completion);
1039     remaining_time_in_ms =
1040         deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
1041   } while (remaining_time_in_ms >=
1042                2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
1043            !IsComplete() &&
1044            !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
1045   return remaining_time_in_ms;
1046 }
1047 
1048 
OldSpaceStep(intptr_t allocated)1049 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
1050   if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
1051     heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
1052                                     "old space step");
1053   } else {
1054     Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
1055   }
1056 }
1057 
1058 
SpeedUp()1059 void IncrementalMarking::SpeedUp() {
1060   bool speed_up = false;
1061 
1062   if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
1063     if (FLAG_trace_incremental_marking) {
1064       PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n",
1065                    static_cast<int>(kMarkingSpeedAccellerationInterval));
1066     }
1067     speed_up = true;
1068   }
1069 
1070   bool space_left_is_very_small =
1071       (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
1072 
1073   bool only_1_nth_of_space_that_was_available_still_left =
1074       (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
1075        old_generation_space_available_at_start_of_incremental_);
1076 
1077   if (space_left_is_very_small ||
1078       only_1_nth_of_space_that_was_available_still_left) {
1079     if (FLAG_trace_incremental_marking)
1080       PrintIsolate(heap()->isolate(),
1081                    "Speed up marking because of low space left\n");
1082     speed_up = true;
1083   }
1084 
1085   bool size_of_old_space_multiplied_by_n_during_marking =
1086       (heap_->PromotedTotalSize() >
1087        (marking_speed_ + 1) *
1088            old_generation_space_used_at_start_of_incremental_);
1089   if (size_of_old_space_multiplied_by_n_during_marking) {
1090     speed_up = true;
1091     if (FLAG_trace_incremental_marking) {
1092       PrintIsolate(heap()->isolate(),
1093                    "Speed up marking because of heap size increase\n");
1094     }
1095   }
1096 
1097   int64_t promoted_during_marking =
1098       heap_->PromotedTotalSize() -
1099       old_generation_space_used_at_start_of_incremental_;
1100   intptr_t delay = marking_speed_ * MB;
1101   intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
1102 
1103   // We try to scan at at least twice the speed that we are allocating.
1104   if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
1105     if (FLAG_trace_incremental_marking) {
1106       PrintIsolate(heap()->isolate(),
1107                    "Speed up marking because marker was not keeping up\n");
1108     }
1109     speed_up = true;
1110   }
1111 
1112   if (speed_up) {
1113     if (state_ != MARKING) {
1114       if (FLAG_trace_incremental_marking) {
1115         PrintIsolate(heap()->isolate(),
1116                      "Postponing speeding up marking until marking starts\n");
1117       }
1118     } else {
1119       marking_speed_ += kMarkingSpeedAccelleration;
1120       marking_speed_ = static_cast<int>(
1121           Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
1122       if (FLAG_trace_incremental_marking) {
1123         PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n",
1124                      marking_speed_);
1125       }
1126     }
1127   }
1128 }
1129 
1130 
Step(intptr_t allocated_bytes,CompletionAction action,ForceMarkingAction marking,ForceCompletionAction completion)1131 intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
1132                                   CompletionAction action,
1133                                   ForceMarkingAction marking,
1134                                   ForceCompletionAction completion) {
1135   DCHECK(allocated_bytes >= 0);
1136 
1137   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
1138       (state_ != SWEEPING && state_ != MARKING)) {
1139     return 0;
1140   }
1141 
1142   allocated_ += allocated_bytes;
1143 
1144   if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
1145       write_barriers_invoked_since_last_step_ <
1146           kWriteBarriersInvokedThreshold) {
1147     return 0;
1148   }
1149 
1150   // If an idle notification happened recently, we delay marking steps.
1151   if (marking == DO_NOT_FORCE_MARKING &&
1152       heap_->RecentIdleNotificationHappened()) {
1153     return 0;
1154   }
1155 
1156   if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;
1157 
1158   intptr_t bytes_processed = 0;
1159   {
1160     HistogramTimerScope incremental_marking_scope(
1161         heap_->isolate()->counters()->gc_incremental_marking());
1162     double start = heap_->MonotonicallyIncreasingTimeInMs();
1163 
1164     // The marking speed is driven either by the allocation rate or by the rate
1165     // at which we are having to check the color of objects in the write
1166     // barrier.
1167     // It is possible for a tight non-allocating loop to run a lot of write
1168     // barriers before we get here and check them (marking can only take place
1169     // on
1170     // allocation), so to reduce the lumpiness we don't use the write barriers
1171     // invoked since last step directly to determine the amount of work to do.
1172     intptr_t bytes_to_process =
1173         marking_speed_ *
1174         Max(allocated_, write_barriers_invoked_since_last_step_);
1175     allocated_ = 0;
1176     write_barriers_invoked_since_last_step_ = 0;
1177 
1178     bytes_scanned_ += bytes_to_process;
1179 
1180     if (state_ == SWEEPING) {
1181       if (heap_->mark_compact_collector()->sweeping_in_progress() &&
1182           (heap_->mark_compact_collector()->IsSweepingCompleted() ||
1183            !FLAG_concurrent_sweeping)) {
1184         heap_->mark_compact_collector()->EnsureSweepingCompleted();
1185       }
1186       if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1187         bytes_scanned_ = 0;
1188         StartMarking();
1189       }
1190     } else if (state_ == MARKING) {
1191       bytes_processed = ProcessMarkingDeque(bytes_to_process);
1192       if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
1193         if (completion == FORCE_COMPLETION ||
1194             IsIdleMarkingDelayCounterLimitReached()) {
1195           if (!finalize_marking_completed_) {
1196             FinalizeMarking(action);
1197           } else {
1198             MarkingComplete(action);
1199           }
1200         } else {
1201           IncrementIdleMarkingDelayCounter();
1202         }
1203       }
1204     }
1205 
1206     steps_count_++;
1207 
1208     // Speed up marking if we are marking too slow or if we are almost done
1209     // with marking.
1210     SpeedUp();
1211 
1212     double end = heap_->MonotonicallyIncreasingTimeInMs();
1213     double duration = (end - start);
1214     // Note that we report zero bytes here when sweeping was in progress or
1215     // when we just started incremental marking. In these cases we did not
1216     // process the marking deque.
1217     heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1218   }
1219   return bytes_processed;
1220 }
1221 
1222 
ResetStepCounters()1223 void IncrementalMarking::ResetStepCounters() {
1224   steps_count_ = 0;
1225   old_generation_space_available_at_start_of_incremental_ =
1226       SpaceLeftInOldSpace();
1227   old_generation_space_used_at_start_of_incremental_ =
1228       heap_->PromotedTotalSize();
1229   bytes_rescanned_ = 0;
1230   marking_speed_ = kInitialMarkingSpeed;
1231   bytes_scanned_ = 0;
1232   write_barriers_invoked_since_last_step_ = 0;
1233 }
1234 
1235 
SpaceLeftInOldSpace()1236 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1237   return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1238 }
1239 
1240 
IsIdleMarkingDelayCounterLimitReached()1241 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1242   return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1243 }
1244 
1245 
IncrementIdleMarkingDelayCounter()1246 void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1247   idle_marking_delay_counter_++;
1248 }
1249 
1250 
ClearIdleMarkingDelayCounter()1251 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1252   idle_marking_delay_counter_ = 0;
1253 }
1254 }  // namespace internal
1255 }  // namespace v8
1256