1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/incremental-marking.h"
6
7 #include "src/code-stubs.h"
8 #include "src/compilation-cache.h"
9 #include "src/conversions.h"
10 #include "src/heap/gc-idle-time-handler.h"
11 #include "src/heap/gc-tracer.h"
12 #include "src/heap/mark-compact-inl.h"
13 #include "src/heap/object-stats.h"
14 #include "src/heap/objects-visiting-inl.h"
15 #include "src/heap/objects-visiting.h"
16 #include "src/tracing/trace-event.h"
17 #include "src/v8.h"
18
19 namespace v8 {
20 namespace internal {
21
IncrementalMarking(Heap * heap)22 IncrementalMarking::IncrementalMarking(Heap* heap)
23 : heap_(heap),
24 state_(STOPPED),
25 initial_old_generation_size_(0),
26 bytes_marked_ahead_of_schedule_(0),
27 unscanned_bytes_of_large_object_(0),
28 idle_marking_delay_counter_(0),
29 incremental_marking_finalization_rounds_(0),
30 is_compacting_(false),
31 should_hurry_(false),
32 was_activated_(false),
33 black_allocation_(false),
34 finalize_marking_completed_(false),
35 request_type_(NONE),
36 new_generation_observer_(*this, kAllocatedThreshold),
37 old_generation_observer_(*this, kAllocatedThreshold) {}
38
BaseRecordWrite(HeapObject * obj,Object * value)39 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
40 HeapObject* value_heap_obj = HeapObject::cast(value);
41 MarkBit value_bit = ObjectMarking::MarkBitFrom(value_heap_obj);
42 DCHECK(!Marking::IsImpossible(value_bit));
43
44 MarkBit obj_bit = ObjectMarking::MarkBitFrom(obj);
45 DCHECK(!Marking::IsImpossible(obj_bit));
46 bool is_black = Marking::IsBlack(obj_bit);
47
48 if (is_black && Marking::IsWhite(value_bit)) {
49 WhiteToGreyAndPush(value_heap_obj, value_bit);
50 RestartIfNotMarking();
51 }
52 return is_compacting_ && is_black;
53 }
54
55
RecordWriteSlow(HeapObject * obj,Object ** slot,Object * value)56 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
57 Object* value) {
58 if (BaseRecordWrite(obj, value) && slot != NULL) {
59 // Object is not going to be rescanned we need to record the slot.
60 heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
61 }
62 }
63
64
RecordWriteFromCode(HeapObject * obj,Object ** slot,Isolate * isolate)65 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
66 Isolate* isolate) {
67 DCHECK(obj->IsHeapObject());
68 isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
69 }
70
71 // static
RecordWriteOfCodeEntryFromCode(JSFunction * host,Object ** slot,Isolate * isolate)72 void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
73 Object** slot,
74 Isolate* isolate) {
75 DCHECK(host->IsJSFunction());
76 IncrementalMarking* marking = isolate->heap()->incremental_marking();
77 Code* value = Code::cast(
78 Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot)));
79 marking->RecordWriteOfCodeEntry(host, slot, value);
80 }
81
RecordCodeTargetPatch(Code * host,Address pc,HeapObject * value)82 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
83 HeapObject* value) {
84 if (IsMarking()) {
85 RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
86 RecordWriteIntoCode(host, &rinfo, value);
87 }
88 }
89
90
RecordCodeTargetPatch(Address pc,HeapObject * value)91 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
92 if (IsMarking()) {
93 Code* host = heap_->isolate()
94 ->inner_pointer_to_code_cache()
95 ->GcSafeFindCodeForInnerPointer(pc);
96 RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
97 RecordWriteIntoCode(host, &rinfo, value);
98 }
99 }
100
101
RecordWriteOfCodeEntrySlow(JSFunction * host,Object ** slot,Code * value)102 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
103 Object** slot,
104 Code* value) {
105 if (BaseRecordWrite(host, value)) {
106 DCHECK(slot != NULL);
107 heap_->mark_compact_collector()->RecordCodeEntrySlot(
108 host, reinterpret_cast<Address>(slot), value);
109 }
110 }
111
RecordWriteIntoCodeSlow(Code * host,RelocInfo * rinfo,Object * value)112 void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
113 Object* value) {
114 if (BaseRecordWrite(host, value)) {
115 // Object is not going to be rescanned. We need to record the slot.
116 heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
117 }
118 }
119
120
WhiteToGreyAndPush(HeapObject * obj,MarkBit mark_bit)121 void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
122 Marking::WhiteToGrey(mark_bit);
123 heap_->mark_compact_collector()->marking_deque()->Push(obj);
124 }
125
126
MarkObjectGreyDoNotEnqueue(Object * obj)127 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
128 if (obj->IsHeapObject()) {
129 HeapObject* heap_obj = HeapObject::cast(obj);
130 MarkBit mark_bit = ObjectMarking::MarkBitFrom(HeapObject::cast(obj));
131 if (Marking::IsBlack(mark_bit)) {
132 MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
133 }
134 Marking::AnyToGrey(mark_bit);
135 }
136 }
137
TransferMark(Heap * heap,Address old_start,Address new_start)138 void IncrementalMarking::TransferMark(Heap* heap, Address old_start,
139 Address new_start) {
140 // This is only used when resizing an object.
141 DCHECK(MemoryChunk::FromAddress(old_start) ==
142 MemoryChunk::FromAddress(new_start));
143
144 if (!heap->incremental_marking()->IsMarking()) return;
145
146 // If the mark doesn't move, we don't check the color of the object.
147 // It doesn't matter whether the object is black, since it hasn't changed
148 // size, so the adjustment to the live data count will be zero anyway.
149 if (old_start == new_start) return;
150
151 MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(new_start);
152 MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(old_start);
153
154 #ifdef DEBUG
155 Marking::ObjectColor old_color = Marking::Color(old_mark_bit);
156 #endif
157
158 if (Marking::IsBlack(old_mark_bit)) {
159 Marking::BlackToWhite(old_mark_bit);
160 Marking::MarkBlack(new_mark_bit);
161 return;
162 } else if (Marking::IsGrey(old_mark_bit)) {
163 Marking::GreyToWhite(old_mark_bit);
164 heap->incremental_marking()->WhiteToGreyAndPush(
165 HeapObject::FromAddress(new_start), new_mark_bit);
166 heap->incremental_marking()->RestartIfNotMarking();
167 }
168
169 #ifdef DEBUG
170 Marking::ObjectColor new_color = Marking::Color(new_mark_bit);
171 DCHECK(new_color == old_color);
172 #endif
173 }
174
175 class IncrementalMarkingMarkingVisitor
176 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
177 public:
Initialize()178 static void Initialize() {
179 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
180 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
181 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
182 }
183
184 static const int kProgressBarScanningChunk = 32 * 1024;
185
VisitFixedArrayIncremental(Map * map,HeapObject * object)186 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
187 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
188 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
189 DCHECK(!FLAG_use_marking_progress_bar ||
190 chunk->owner()->identity() == LO_SPACE);
191 Heap* heap = map->GetHeap();
192 // When using a progress bar for large fixed arrays, scan only a chunk of
193 // the array and try to push it onto the marking deque again until it is
194 // fully scanned. Fall back to scanning it through to the end in case this
195 // fails because of a full deque.
196 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
197 int start_offset =
198 Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
199 int end_offset =
200 Min(object_size, start_offset + kProgressBarScanningChunk);
201 int already_scanned_offset = start_offset;
202 bool scan_until_end = false;
203 do {
204 VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
205 HeapObject::RawField(object, end_offset));
206 start_offset = end_offset;
207 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
208 scan_until_end =
209 heap->mark_compact_collector()->marking_deque()->IsFull();
210 } while (scan_until_end && start_offset < object_size);
211 chunk->set_progress_bar(start_offset);
212 if (start_offset < object_size) {
213 if (Marking::IsGrey(ObjectMarking::MarkBitFrom(object))) {
214 heap->mark_compact_collector()->marking_deque()->Unshift(object);
215 } else {
216 DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
217 heap->mark_compact_collector()->UnshiftBlack(object);
218 }
219 heap->incremental_marking()->NotifyIncompleteScanOfObject(
220 object_size - (start_offset - already_scanned_offset));
221 }
222 } else {
223 FixedArrayVisitor::Visit(map, object);
224 }
225 }
226
VisitNativeContextIncremental(Map * map,HeapObject * object)227 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
228 Context* context = Context::cast(object);
229
230 // We will mark cache black with a separate pass when we finish marking.
231 // Note that GC can happen when the context is not fully initialized,
232 // so the cache can be undefined.
233 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
234 if (!cache->IsUndefined(map->GetIsolate())) {
235 MarkObjectGreyDoNotEnqueue(cache);
236 }
237 VisitNativeContext(map, context);
238 }
239
INLINE(static void VisitPointer (Heap * heap,HeapObject * object,Object ** p))240 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
241 Object* target = *p;
242 if (target->IsHeapObject()) {
243 heap->mark_compact_collector()->RecordSlot(object, p, target);
244 MarkObject(heap, target);
245 }
246 }
247
INLINE(static void VisitPointers (Heap * heap,HeapObject * object,Object ** start,Object ** end))248 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
249 Object** start, Object** end)) {
250 for (Object** p = start; p < end; p++) {
251 Object* target = *p;
252 if (target->IsHeapObject()) {
253 heap->mark_compact_collector()->RecordSlot(object, p, target);
254 MarkObject(heap, target);
255 }
256 }
257 }
258
259 // Marks the object grey and pushes it on the marking stack.
INLINE(static void MarkObject (Heap * heap,Object * obj))260 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
261 IncrementalMarking::MarkGrey(heap, HeapObject::cast(obj));
262 }
263
264 // Marks the object black without pushing it on the marking stack.
265 // Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush (Heap * heap,Object * obj))266 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
267 HeapObject* heap_object = HeapObject::cast(obj);
268 MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
269 if (Marking::IsWhite(mark_bit)) {
270 Marking::MarkBlack(mark_bit);
271 MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
272 return true;
273 }
274 return false;
275 }
276 };
277
IterateBlackObject(HeapObject * object)278 void IncrementalMarking::IterateBlackObject(HeapObject* object) {
279 if (IsMarking() && Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
280 Page* page = Page::FromAddress(object->address());
281 if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
282 // IterateBlackObject requires us to visit the whole object.
283 page->ResetProgressBar();
284 }
285 Map* map = object->map();
286 MarkGrey(heap_, map);
287 IncrementalMarkingMarkingVisitor::IterateBody(map, object);
288 }
289 }
290
291 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
292 public:
IncrementalMarkingRootMarkingVisitor(IncrementalMarking * incremental_marking)293 explicit IncrementalMarkingRootMarkingVisitor(
294 IncrementalMarking* incremental_marking)
295 : heap_(incremental_marking->heap()) {}
296
VisitPointer(Object ** p)297 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
298
VisitPointers(Object ** start,Object ** end)299 void VisitPointers(Object** start, Object** end) override {
300 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
301 }
302
303 private:
MarkObjectByPointer(Object ** p)304 void MarkObjectByPointer(Object** p) {
305 Object* obj = *p;
306 if (!obj->IsHeapObject()) return;
307
308 IncrementalMarking::MarkGrey(heap_, HeapObject::cast(obj));
309 }
310
311 Heap* heap_;
312 };
313
314
Initialize()315 void IncrementalMarking::Initialize() {
316 IncrementalMarkingMarkingVisitor::Initialize();
317 }
318
319
SetOldSpacePageFlags(MemoryChunk * chunk,bool is_marking,bool is_compacting)320 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
321 bool is_marking,
322 bool is_compacting) {
323 if (is_marking) {
324 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
325 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
326 } else {
327 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
328 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
329 }
330 }
331
332
SetNewSpacePageFlags(MemoryChunk * chunk,bool is_marking)333 void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
334 bool is_marking) {
335 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
336 if (is_marking) {
337 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
338 } else {
339 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
340 }
341 }
342
343
DeactivateIncrementalWriteBarrierForSpace(PagedSpace * space)344 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
345 PagedSpace* space) {
346 for (Page* p : *space) {
347 SetOldSpacePageFlags(p, false, false);
348 }
349 }
350
351
DeactivateIncrementalWriteBarrierForSpace(NewSpace * space)352 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
353 NewSpace* space) {
354 for (Page* p : *space) {
355 SetNewSpacePageFlags(p, false);
356 }
357 }
358
359
DeactivateIncrementalWriteBarrier()360 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
361 DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
362 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
363 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
364 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
365
366 for (LargePage* lop : *heap_->lo_space()) {
367 SetOldSpacePageFlags(lop, false, false);
368 }
369 }
370
371
ActivateIncrementalWriteBarrier(PagedSpace * space)372 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
373 for (Page* p : *space) {
374 SetOldSpacePageFlags(p, true, is_compacting_);
375 }
376 }
377
378
ActivateIncrementalWriteBarrier(NewSpace * space)379 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
380 for (Page* p : *space) {
381 SetNewSpacePageFlags(p, true);
382 }
383 }
384
385
ActivateIncrementalWriteBarrier()386 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
387 ActivateIncrementalWriteBarrier(heap_->old_space());
388 ActivateIncrementalWriteBarrier(heap_->map_space());
389 ActivateIncrementalWriteBarrier(heap_->code_space());
390 ActivateIncrementalWriteBarrier(heap_->new_space());
391
392 for (LargePage* lop : *heap_->lo_space()) {
393 SetOldSpacePageFlags(lop, true, is_compacting_);
394 }
395 }
396
397
WasActivated()398 bool IncrementalMarking::WasActivated() { return was_activated_; }
399
400
CanBeActivated()401 bool IncrementalMarking::CanBeActivated() {
402 // Only start incremental marking in a safe state: 1) when incremental
403 // marking is turned on, 2) when we are currently not in a GC, and
404 // 3) when we are currently not serializing or deserializing the heap.
405 return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
406 heap_->deserialization_complete() &&
407 !heap_->isolate()->serializer_enabled();
408 }
409
410
ActivateGeneratedStub(Code * stub)411 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
412 DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
413
414 if (!IsMarking()) {
415 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
416 // we don't need to do anything if incremental marking is
417 // not active.
418 } else if (IsCompacting()) {
419 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
420 } else {
421 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
422 }
423 }
424
425
PatchIncrementalMarkingRecordWriteStubs(Heap * heap,RecordWriteStub::Mode mode)426 static void PatchIncrementalMarkingRecordWriteStubs(
427 Heap* heap, RecordWriteStub::Mode mode) {
428 UnseededNumberDictionary* stubs = heap->code_stubs();
429
430 int capacity = stubs->Capacity();
431 Isolate* isolate = heap->isolate();
432 for (int i = 0; i < capacity; i++) {
433 Object* k = stubs->KeyAt(i);
434 if (stubs->IsKey(isolate, k)) {
435 uint32_t key = NumberToUint32(k);
436
437 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
438 Object* e = stubs->ValueAt(i);
439 if (e->IsCode()) {
440 RecordWriteStub::Patch(Code::cast(e), mode);
441 }
442 }
443 }
444 }
445 }
446
Start(GarbageCollectionReason gc_reason)447 void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
448 if (FLAG_trace_incremental_marking) {
449 int old_generation_size_mb =
450 static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
451 int old_generation_limit_mb =
452 static_cast<int>(heap()->old_generation_allocation_limit() / MB);
453 heap()->isolate()->PrintWithTimestamp(
454 "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
455 "slack %dMB\n",
456 Heap::GarbageCollectionReasonToString(gc_reason),
457 old_generation_size_mb, old_generation_limit_mb,
458 Max(0, old_generation_limit_mb - old_generation_size_mb));
459 }
460 DCHECK(FLAG_incremental_marking);
461 DCHECK(state_ == STOPPED);
462 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
463 DCHECK(!heap_->isolate()->serializer_enabled());
464
465 Counters* counters = heap_->isolate()->counters();
466
467 counters->incremental_marking_reason()->AddSample(
468 static_cast<int>(gc_reason));
469 HistogramTimerScope incremental_marking_scope(
470 counters->gc_incremental_marking_start());
471 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
472 heap_->tracer()->NotifyIncrementalMarkingStart();
473
474 start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
475 initial_old_generation_size_ = heap_->PromotedSpaceSizeOfObjects();
476 old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
477 bytes_allocated_ = 0;
478 bytes_marked_ahead_of_schedule_ = 0;
479 should_hurry_ = false;
480 was_activated_ = true;
481
482 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
483 StartMarking();
484 } else {
485 if (FLAG_trace_incremental_marking) {
486 heap()->isolate()->PrintWithTimestamp(
487 "[IncrementalMarking] Start sweeping.\n");
488 }
489 state_ = SWEEPING;
490 }
491
492 SpaceIterator it(heap_);
493 while (it.has_next()) {
494 Space* space = it.next();
495 if (space == heap_->new_space()) {
496 space->AddAllocationObserver(&new_generation_observer_);
497 } else {
498 space->AddAllocationObserver(&old_generation_observer_);
499 }
500 }
501
502 incremental_marking_job()->Start(heap_);
503 }
504
505
StartMarking()506 void IncrementalMarking::StartMarking() {
507 if (heap_->isolate()->serializer_enabled()) {
508 // Black allocation currently starts when we start incremental marking,
509 // but we cannot enable black allocation while deserializing. Hence, we
510 // have to delay the start of incremental marking in that case.
511 if (FLAG_trace_incremental_marking) {
512 heap()->isolate()->PrintWithTimestamp(
513 "[IncrementalMarking] Start delayed - serializer\n");
514 }
515 return;
516 }
517 if (FLAG_trace_incremental_marking) {
518 heap()->isolate()->PrintWithTimestamp(
519 "[IncrementalMarking] Start marking\n");
520 }
521
522 is_compacting_ =
523 !FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction();
524
525 state_ = MARKING;
526
527 if (heap_->UsingEmbedderHeapTracer()) {
528 TRACE_GC(heap()->tracer(),
529 GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
530 heap_->embedder_heap_tracer()->TracePrologue();
531 }
532
533 RecordWriteStub::Mode mode = is_compacting_
534 ? RecordWriteStub::INCREMENTAL_COMPACTION
535 : RecordWriteStub::INCREMENTAL;
536
537 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
538
539 heap_->mark_compact_collector()->marking_deque()->StartUsing();
540
541 ActivateIncrementalWriteBarrier();
542
543 // Marking bits are cleared by the sweeper.
544 #ifdef VERIFY_HEAP
545 if (FLAG_verify_heap) {
546 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
547 }
548 #endif
549
550 heap_->CompletelyClearInstanceofCache();
551 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
552
553 // Mark strong roots grey.
554 IncrementalMarkingRootMarkingVisitor visitor(this);
555 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
556
557 // Ready to start incremental marking.
558 if (FLAG_trace_incremental_marking) {
559 heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
560 }
561 }
562
StartBlackAllocation()563 void IncrementalMarking::StartBlackAllocation() {
564 DCHECK(FLAG_black_allocation);
565 DCHECK(IsMarking());
566 black_allocation_ = true;
567 heap()->old_space()->MarkAllocationInfoBlack();
568 heap()->map_space()->MarkAllocationInfoBlack();
569 heap()->code_space()->MarkAllocationInfoBlack();
570 if (FLAG_trace_incremental_marking) {
571 heap()->isolate()->PrintWithTimestamp(
572 "[IncrementalMarking] Black allocation started\n");
573 }
574 }
575
FinishBlackAllocation()576 void IncrementalMarking::FinishBlackAllocation() {
577 if (black_allocation_) {
578 black_allocation_ = false;
579 if (FLAG_trace_incremental_marking) {
580 heap()->isolate()->PrintWithTimestamp(
581 "[IncrementalMarking] Black allocation finished\n");
582 }
583 }
584 }
585
AbortBlackAllocation()586 void IncrementalMarking::AbortBlackAllocation() {
587 if (FLAG_trace_incremental_marking) {
588 heap()->isolate()->PrintWithTimestamp(
589 "[IncrementalMarking] Black allocation aborted\n");
590 }
591 }
592
MarkRoots()593 void IncrementalMarking::MarkRoots() {
594 DCHECK(!finalize_marking_completed_);
595 DCHECK(IsMarking());
596
597 IncrementalMarkingRootMarkingVisitor visitor(this);
598 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
599 }
600
601
MarkObjectGroups()602 void IncrementalMarking::MarkObjectGroups() {
603 TRACE_GC(heap_->tracer(),
604 GCTracer::Scope::MC_INCREMENTAL_FINALIZE_OBJECT_GROUPING);
605
606 DCHECK(!heap_->UsingEmbedderHeapTracer());
607 DCHECK(!finalize_marking_completed_);
608 DCHECK(IsMarking());
609
610 IncrementalMarkingRootMarkingVisitor visitor(this);
611 heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkGrey);
612 heap_->isolate()->global_handles()->IterateObjectGroups(
613 &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
614 heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
615 heap_->isolate()->global_handles()->RemoveObjectGroups();
616 }
617
618
ProcessWeakCells()619 void IncrementalMarking::ProcessWeakCells() {
620 DCHECK(!finalize_marking_completed_);
621 DCHECK(IsMarking());
622
623 Object* the_hole_value = heap()->the_hole_value();
624 Object* weak_cell_obj = heap()->encountered_weak_cells();
625 Object* weak_cell_head = Smi::kZero;
626 WeakCell* prev_weak_cell_obj = NULL;
627 while (weak_cell_obj != Smi::kZero) {
628 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
629 // We do not insert cleared weak cells into the list, so the value
630 // cannot be a Smi here.
631 HeapObject* value = HeapObject::cast(weak_cell->value());
632 // Remove weak cells with live objects from the list, they do not need
633 // clearing.
634 if (MarkCompactCollector::IsMarked(value)) {
635 // Record slot, if value is pointing to an evacuation candidate.
636 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
637 heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
638 // Remove entry somewhere after top.
639 if (prev_weak_cell_obj != NULL) {
640 prev_weak_cell_obj->set_next(weak_cell->next());
641 }
642 weak_cell_obj = weak_cell->next();
643 weak_cell->clear_next(the_hole_value);
644 } else {
645 if (weak_cell_head == Smi::kZero) {
646 weak_cell_head = weak_cell;
647 }
648 prev_weak_cell_obj = weak_cell;
649 weak_cell_obj = weak_cell->next();
650 }
651 }
652 // Top may have changed.
653 heap()->set_encountered_weak_cells(weak_cell_head);
654 }
655
656
ShouldRetainMap(Map * map,int age)657 bool ShouldRetainMap(Map* map, int age) {
658 if (age == 0) {
659 // The map has aged. Do not retain this map.
660 return false;
661 }
662 Object* constructor = map->GetConstructor();
663 if (!constructor->IsHeapObject() ||
664 Marking::IsWhite(
665 ObjectMarking::MarkBitFrom(HeapObject::cast(constructor)))) {
666 // The constructor is dead, no new objects with this map can
667 // be created. Do not retain this map.
668 return false;
669 }
670 return true;
671 }
672
673
RetainMaps()674 void IncrementalMarking::RetainMaps() {
675 // Do not retain dead maps if flag disables it or there is
676 // - memory pressure (reduce_memory_footprint_),
677 // - GC is requested by tests or dev-tools (abort_incremental_marking_).
678 bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
679 heap()->ShouldAbortIncrementalMarking() ||
680 FLAG_retain_maps_for_n_gc == 0;
681 ArrayList* retained_maps = heap()->retained_maps();
682 int length = retained_maps->Length();
683 // The number_of_disposed_maps separates maps in the retained_maps
684 // array that were created before and after context disposal.
685 // We do not age and retain disposed maps to avoid memory leaks.
686 int number_of_disposed_maps = heap()->number_of_disposed_maps_;
687 for (int i = 0; i < length; i += 2) {
688 DCHECK(retained_maps->Get(i)->IsWeakCell());
689 WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
690 if (cell->cleared()) continue;
691 int age = Smi::cast(retained_maps->Get(i + 1))->value();
692 int new_age;
693 Map* map = Map::cast(cell->value());
694 MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
695 if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
696 Marking::IsWhite(map_mark)) {
697 if (ShouldRetainMap(map, age)) {
698 MarkGrey(heap(), map);
699 }
700 Object* prototype = map->prototype();
701 if (age > 0 && prototype->IsHeapObject() &&
702 Marking::IsWhite(
703 ObjectMarking::MarkBitFrom(HeapObject::cast(prototype)))) {
704 // The prototype is not marked, age the map.
705 new_age = age - 1;
706 } else {
707 // The prototype and the constructor are marked, this map keeps only
708 // transition tree alive, not JSObjects. Do not age the map.
709 new_age = age;
710 }
711 } else {
712 new_age = FLAG_retain_maps_for_n_gc;
713 }
714 // Compact the array and update the age.
715 if (new_age != age) {
716 retained_maps->Set(i + 1, Smi::FromInt(new_age));
717 }
718 }
719 }
720
FinalizeIncrementally()721 void IncrementalMarking::FinalizeIncrementally() {
722 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
723 DCHECK(!finalize_marking_completed_);
724 DCHECK(IsMarking());
725
726 double start = heap_->MonotonicallyIncreasingTimeInMs();
727
728 int old_marking_deque_top =
729 heap_->mark_compact_collector()->marking_deque()->top();
730
731 // After finishing incremental marking, we try to discover all unmarked
732 // objects to reduce the marking load in the final pause.
733 // 1) We scan and mark the roots again to find all changes to the root set.
734 // 2) We mark the object groups.
735 // 3) Age and retain maps embedded in optimized code.
736 // 4) Remove weak cell with live values from the list of weak cells, they
737 // do not need processing during GC.
738 MarkRoots();
739 if (!heap_->UsingEmbedderHeapTracer()) {
740 MarkObjectGroups();
741 }
742 if (incremental_marking_finalization_rounds_ == 0) {
743 // Map retaining is needed for perfromance, not correctness,
744 // so we can do it only once at the beginning of the finalization.
745 RetainMaps();
746 }
747 ProcessWeakCells();
748
749 int marking_progress =
750 abs(old_marking_deque_top -
751 heap_->mark_compact_collector()->marking_deque()->top());
752
753 marking_progress += static_cast<int>(heap_->wrappers_to_trace());
754
755 double end = heap_->MonotonicallyIncreasingTimeInMs();
756 double delta = end - start;
757 if (FLAG_trace_incremental_marking) {
758 heap()->isolate()->PrintWithTimestamp(
759 "[IncrementalMarking] Finalize incrementally round %d, "
760 "spent %d ms, marking progress %d.\n",
761 static_cast<int>(delta), incremental_marking_finalization_rounds_,
762 marking_progress);
763 }
764
765 ++incremental_marking_finalization_rounds_;
766 if ((incremental_marking_finalization_rounds_ >=
767 FLAG_max_incremental_marking_finalization_rounds) ||
768 (marking_progress <
769 FLAG_min_progress_during_incremental_marking_finalization)) {
770 finalize_marking_completed_ = true;
771 }
772
773 if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
774 !black_allocation_) {
775 // TODO(hpayer): Move to an earlier point as soon as we make faster marking
776 // progress.
777 StartBlackAllocation();
778 }
779 }
780
781
UpdateMarkingDequeAfterScavenge()782 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
783 if (!IsMarking()) return;
784
785 MarkingDeque* marking_deque =
786 heap_->mark_compact_collector()->marking_deque();
787 int current = marking_deque->bottom();
788 int mask = marking_deque->mask();
789 int limit = marking_deque->top();
790 HeapObject** array = marking_deque->array();
791 int new_top = current;
792
793 Map* filler_map = heap_->one_pointer_filler_map();
794
795 while (current != limit) {
796 HeapObject* obj = array[current];
797 DCHECK(obj->IsHeapObject());
798 current = ((current + 1) & mask);
799 // Only pointers to from space have to be updated.
800 if (heap_->InFromSpace(obj)) {
801 MapWord map_word = obj->map_word();
802 // There may be objects on the marking deque that do not exist anymore,
803 // e.g. left trimmed objects or objects from the root set (frames).
804 // If these object are dead at scavenging time, their marking deque
805 // entries will not point to forwarding addresses. Hence, we can discard
806 // them.
807 if (map_word.IsForwardingAddress()) {
808 HeapObject* dest = map_word.ToForwardingAddress();
809 if (Marking::IsBlack(ObjectMarking::MarkBitFrom(dest->address())))
810 continue;
811 array[new_top] = dest;
812 new_top = ((new_top + 1) & mask);
813 DCHECK(new_top != marking_deque->bottom());
814 #ifdef DEBUG
815 MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
816 DCHECK(Marking::IsGrey(mark_bit) ||
817 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
818 #endif
819 }
820 } else if (obj->map() != filler_map) {
821 // Skip one word filler objects that appear on the
822 // stack when we perform in place array shift.
823 array[new_top] = obj;
824 new_top = ((new_top + 1) & mask);
825 DCHECK(new_top != marking_deque->bottom());
826 #ifdef DEBUG
827 MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
828 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
829 DCHECK(Marking::IsGrey(mark_bit) ||
830 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
831 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
832 Marking::IsBlack(mark_bit)));
833 #endif
834 }
835 }
836 marking_deque->set_top(new_top);
837 }
838
839
VisitObject(Map * map,HeapObject * obj,int size)840 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
841 MarkGrey(heap_, map);
842
843 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
844
845 #if ENABLE_SLOW_DCHECKS
846 MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
847 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
848 SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
849 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
850 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
851 Marking::IsBlack(mark_bit)));
852 #endif
853 MarkBlack(obj, size);
854 }
855
MarkGrey(Heap * heap,HeapObject * object)856 void IncrementalMarking::MarkGrey(Heap* heap, HeapObject* object) {
857 MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
858 if (Marking::IsWhite(mark_bit)) {
859 heap->incremental_marking()->WhiteToGreyAndPush(object, mark_bit);
860 }
861 }
862
MarkBlack(HeapObject * obj,int size)863 void IncrementalMarking::MarkBlack(HeapObject* obj, int size) {
864 MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
865 if (Marking::IsBlack(mark_bit)) return;
866 Marking::GreyToBlack(mark_bit);
867 MemoryChunk::IncrementLiveBytesFromGC(obj, size);
868 }
869
ProcessMarkingDeque(intptr_t bytes_to_process,ForceCompletionAction completion)870 intptr_t IncrementalMarking::ProcessMarkingDeque(
871 intptr_t bytes_to_process, ForceCompletionAction completion) {
872 intptr_t bytes_processed = 0;
873 MarkingDeque* marking_deque =
874 heap_->mark_compact_collector()->marking_deque();
875 while (!marking_deque->IsEmpty() && (bytes_processed < bytes_to_process ||
876 completion == FORCE_COMPLETION)) {
877 HeapObject* obj = marking_deque->Pop();
878
879 // Left trimming may result in white filler objects on the marking deque.
880 // Ignore these objects.
881 if (obj->IsFiller()) {
882 DCHECK(Marking::IsImpossible(ObjectMarking::MarkBitFrom(obj)) ||
883 Marking::IsWhite(ObjectMarking::MarkBitFrom(obj)));
884 continue;
885 }
886
887 Map* map = obj->map();
888 int size = obj->SizeFromMap(map);
889 unscanned_bytes_of_large_object_ = 0;
890 VisitObject(map, obj, size);
891 bytes_processed += size - unscanned_bytes_of_large_object_;
892 }
893 return bytes_processed;
894 }
895
896
Hurry()897 void IncrementalMarking::Hurry() {
898 // A scavenge may have pushed new objects on the marking deque (due to black
899 // allocation) even in COMPLETE state. This may happen if scavenges are
900 // forced e.g. in tests. It should not happen when COMPLETE was set when
901 // incremental marking finished and a regular GC was triggered after that
902 // because should_hurry_ will force a full GC.
903 if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
904 double start = 0.0;
905 if (FLAG_trace_incremental_marking) {
906 start = heap_->MonotonicallyIncreasingTimeInMs();
907 if (FLAG_trace_incremental_marking) {
908 heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
909 }
910 }
911 // TODO(gc) hurry can mark objects it encounters black as mutator
912 // was stopped.
913 ProcessMarkingDeque(0, FORCE_COMPLETION);
914 state_ = COMPLETE;
915 if (FLAG_trace_incremental_marking) {
916 double end = heap_->MonotonicallyIncreasingTimeInMs();
917 double delta = end - start;
918 if (FLAG_trace_incremental_marking) {
919 heap()->isolate()->PrintWithTimestamp(
920 "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
921 static_cast<int>(delta));
922 }
923 }
924 }
925
926 Object* context = heap_->native_contexts_list();
927 while (!context->IsUndefined(heap_->isolate())) {
928 // GC can happen when the context is not fully initialized,
929 // so the cache can be undefined.
930 HeapObject* cache = HeapObject::cast(
931 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
932 if (!cache->IsUndefined(heap_->isolate())) {
933 MarkBit mark_bit = ObjectMarking::MarkBitFrom(cache);
934 if (Marking::IsGrey(mark_bit)) {
935 Marking::GreyToBlack(mark_bit);
936 MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
937 }
938 }
939 context = Context::cast(context)->next_context_link();
940 }
941 }
942
943
Stop()944 void IncrementalMarking::Stop() {
945 if (IsStopped()) return;
946 if (FLAG_trace_incremental_marking) {
947 int old_generation_size_mb =
948 static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
949 int old_generation_limit_mb =
950 static_cast<int>(heap()->old_generation_allocation_limit() / MB);
951 heap()->isolate()->PrintWithTimestamp(
952 "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
953 "overshoot %dMB\n",
954 old_generation_size_mb, old_generation_limit_mb,
955 Max(0, old_generation_size_mb - old_generation_limit_mb));
956 }
957
958 SpaceIterator it(heap_);
959 while (it.has_next()) {
960 Space* space = it.next();
961 if (space == heap_->new_space()) {
962 space->RemoveAllocationObserver(&new_generation_observer_);
963 } else {
964 space->RemoveAllocationObserver(&old_generation_observer_);
965 }
966 }
967
968 IncrementalMarking::set_should_hurry(false);
969 if (IsMarking()) {
970 PatchIncrementalMarkingRecordWriteStubs(heap_,
971 RecordWriteStub::STORE_BUFFER_ONLY);
972 DeactivateIncrementalWriteBarrier();
973 }
974 heap_->isolate()->stack_guard()->ClearGC();
975 state_ = STOPPED;
976 is_compacting_ = false;
977 FinishBlackAllocation();
978 }
979
980
Finalize()981 void IncrementalMarking::Finalize() {
982 Hurry();
983 Stop();
984 }
985
986
FinalizeMarking(CompletionAction action)987 void IncrementalMarking::FinalizeMarking(CompletionAction action) {
988 DCHECK(!finalize_marking_completed_);
989 if (FLAG_trace_incremental_marking) {
990 heap()->isolate()->PrintWithTimestamp(
991 "[IncrementalMarking] requesting finalization of incremental "
992 "marking.\n");
993 }
994 request_type_ = FINALIZATION;
995 if (action == GC_VIA_STACK_GUARD) {
996 heap_->isolate()->stack_guard()->RequestGC();
997 }
998 }
999
1000
MarkingComplete(CompletionAction action)1001 void IncrementalMarking::MarkingComplete(CompletionAction action) {
1002 state_ = COMPLETE;
1003 // We will set the stack guard to request a GC now. This will mean the rest
1004 // of the GC gets performed as soon as possible (we can't do a GC here in a
1005 // record-write context). If a few things get allocated between now and then
1006 // that shouldn't make us do a scavenge and keep being incremental, so we set
1007 // the should-hurry flag to indicate that there can't be much work left to do.
1008 set_should_hurry(true);
1009 if (FLAG_trace_incremental_marking) {
1010 heap()->isolate()->PrintWithTimestamp(
1011 "[IncrementalMarking] Complete (normal).\n");
1012 }
1013 request_type_ = COMPLETE_MARKING;
1014 if (action == GC_VIA_STACK_GUARD) {
1015 heap_->isolate()->stack_guard()->RequestGC();
1016 }
1017 }
1018
1019
Epilogue()1020 void IncrementalMarking::Epilogue() {
1021 was_activated_ = false;
1022 finalize_marking_completed_ = false;
1023 incremental_marking_finalization_rounds_ = 0;
1024 }
1025
AdvanceIncrementalMarking(double deadline_in_ms,CompletionAction completion_action,ForceCompletionAction force_completion,StepOrigin step_origin)1026 double IncrementalMarking::AdvanceIncrementalMarking(
1027 double deadline_in_ms, CompletionAction completion_action,
1028 ForceCompletionAction force_completion, StepOrigin step_origin) {
1029 DCHECK(!IsStopped());
1030
1031 double remaining_time_in_ms = 0.0;
1032 intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
1033 kStepSizeInMs,
1034 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
1035
1036 do {
1037 Step(step_size_in_bytes, completion_action, force_completion, step_origin);
1038 remaining_time_in_ms =
1039 deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
1040 } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
1041 !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
1042 return remaining_time_in_ms;
1043 }
1044
1045
FinalizeSweeping()1046 void IncrementalMarking::FinalizeSweeping() {
1047 DCHECK(state_ == SWEEPING);
1048 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
1049 (!FLAG_concurrent_sweeping ||
1050 !heap_->mark_compact_collector()->sweeper().AreSweeperTasksRunning())) {
1051 heap_->mark_compact_collector()->EnsureSweepingCompleted();
1052 }
1053 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1054 StartMarking();
1055 }
1056 }
1057
StepSizeToKeepUpWithAllocations()1058 size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
1059 // Update bytes_allocated_ based on the allocation counter.
1060 size_t current_counter = heap_->OldGenerationAllocationCounter();
1061 bytes_allocated_ += current_counter - old_generation_allocation_counter_;
1062 old_generation_allocation_counter_ = current_counter;
1063 return bytes_allocated_;
1064 }
1065
StepSizeToMakeProgress()1066 size_t IncrementalMarking::StepSizeToMakeProgress() {
1067 // We increase step size gradually based on the time passed in order to
1068 // leave marking work to standalone tasks. The ramp up duration and the
1069 // target step count are chosen based on benchmarks.
1070 const int kRampUpIntervalMs = 300;
1071 const size_t kTargetStepCount = 128;
1072 const size_t kTargetStepCountAtOOM = 16;
1073 size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
1074
1075 if (heap()->IsCloseToOutOfMemory(oom_slack)) {
1076 return heap()->PromotedSpaceSizeOfObjects() / kTargetStepCountAtOOM;
1077 }
1078
1079 size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
1080 IncrementalMarking::kAllocatedThreshold);
1081 double time_passed_ms =
1082 heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
1083 double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
1084 return static_cast<size_t>(factor * step_size);
1085 }
1086
AdvanceIncrementalMarkingOnAllocation()1087 void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
1088 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
1089 (state_ != SWEEPING && state_ != MARKING)) {
1090 return;
1091 }
1092
1093 size_t bytes_to_process =
1094 StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
1095
1096 if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) {
1097 // The first step after Scavenge will see many allocated bytes.
1098 // Cap the step size to distribute the marking work more uniformly.
1099 size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
1100 kMaxStepSizeInMs,
1101 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
1102 bytes_to_process = Min(bytes_to_process, max_step_size);
1103
1104 size_t bytes_processed = 0;
1105 if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
1106 // Steps performed in tasks have put us ahead of schedule.
1107 // We skip processing of marking dequeue here and thus
1108 // shift marking time from inside V8 to standalone tasks.
1109 bytes_marked_ahead_of_schedule_ -= bytes_to_process;
1110 bytes_processed = bytes_to_process;
1111 } else {
1112 bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
1113 FORCE_COMPLETION, StepOrigin::kV8);
1114 }
1115 bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
1116 }
1117 }
1118
Step(size_t bytes_to_process,CompletionAction action,ForceCompletionAction completion,StepOrigin step_origin)1119 size_t IncrementalMarking::Step(size_t bytes_to_process,
1120 CompletionAction action,
1121 ForceCompletionAction completion,
1122 StepOrigin step_origin) {
1123 HistogramTimerScope incremental_marking_scope(
1124 heap_->isolate()->counters()->gc_incremental_marking());
1125 TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
1126 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
1127 double start = heap_->MonotonicallyIncreasingTimeInMs();
1128
1129 if (state_ == SWEEPING) {
1130 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
1131 FinalizeSweeping();
1132 }
1133
1134 size_t bytes_processed = 0;
1135 if (state_ == MARKING) {
1136 const bool incremental_wrapper_tracing =
1137 FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer();
1138 const bool process_wrappers =
1139 incremental_wrapper_tracing &&
1140 (heap_->RequiresImmediateWrapperProcessing() ||
1141 heap_->mark_compact_collector()->marking_deque()->IsEmpty());
1142 bool wrapper_work_left = incremental_wrapper_tracing;
1143 if (!process_wrappers) {
1144 bytes_processed = ProcessMarkingDeque(bytes_to_process);
1145 if (step_origin == StepOrigin::kTask) {
1146 bytes_marked_ahead_of_schedule_ += bytes_processed;
1147 }
1148 } else {
1149 const double wrapper_deadline =
1150 heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
1151 TRACE_GC(heap()->tracer(),
1152 GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
1153 heap_->RegisterWrappersWithEmbedderHeapTracer();
1154 wrapper_work_left = heap_->embedder_heap_tracer()->AdvanceTracing(
1155 wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
1156 EmbedderHeapTracer::ForceCompletionAction::
1157 DO_NOT_FORCE_COMPLETION));
1158 }
1159
1160 if (heap_->mark_compact_collector()->marking_deque()->IsEmpty() &&
1161 !wrapper_work_left) {
1162 if (completion == FORCE_COMPLETION ||
1163 IsIdleMarkingDelayCounterLimitReached()) {
1164 if (!finalize_marking_completed_) {
1165 FinalizeMarking(action);
1166 } else {
1167 MarkingComplete(action);
1168 }
1169 } else {
1170 IncrementIdleMarkingDelayCounter();
1171 }
1172 }
1173 }
1174
1175 double end = heap_->MonotonicallyIncreasingTimeInMs();
1176 double duration = (end - start);
1177 // Note that we report zero bytes here when sweeping was in progress or
1178 // when we just started incremental marking. In these cases we did not
1179 // process the marking deque.
1180 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1181 if (FLAG_trace_incremental_marking) {
1182 heap_->isolate()->PrintWithTimestamp(
1183 "[IncrementalMarking] Step %s %zu bytes (%zu) in %.1f\n",
1184 step_origin == StepOrigin::kV8 ? "in v8" : "in task", bytes_processed,
1185 bytes_to_process, duration);
1186 }
1187 return bytes_processed;
1188 }
1189
1190
IsIdleMarkingDelayCounterLimitReached()1191 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1192 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1193 }
1194
1195
IncrementIdleMarkingDelayCounter()1196 void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1197 idle_marking_delay_counter_++;
1198 }
1199
1200
ClearIdleMarkingDelayCounter()1201 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1202 idle_marking_delay_counter_ = 0;
1203 }
1204
1205 } // namespace internal
1206 } // namespace v8
1207