1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_HEAP_INCREMENTAL_MARKING_H_ 6 #define V8_HEAP_INCREMENTAL_MARKING_H_ 7 8 9 #include "src/execution.h" 10 #include "src/heap/mark-compact.h" 11 #include "src/objects.h" 12 13 namespace v8 { 14 namespace internal { 15 16 17 class IncrementalMarking { 18 public: 19 enum State { STOPPED, SWEEPING, MARKING, COMPLETE }; 20 21 enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD }; 22 23 explicit IncrementalMarking(Heap* heap); 24 25 static void Initialize(); 26 27 void TearDown(); 28 state()29 State state() { 30 DCHECK(state_ == STOPPED || FLAG_incremental_marking); 31 return state_; 32 } 33 should_hurry()34 bool should_hurry() { return should_hurry_; } set_should_hurry(bool val)35 void set_should_hurry(bool val) { should_hurry_ = val; } 36 IsStopped()37 inline bool IsStopped() { return state() == STOPPED; } 38 INLINE(bool IsMarking ())39 INLINE(bool IsMarking()) { return state() >= MARKING; } 40 IsMarkingIncomplete()41 inline bool IsMarkingIncomplete() { return state() == MARKING; } 42 IsComplete()43 inline bool IsComplete() { return state() == COMPLETE; } 44 45 bool WorthActivating(); 46 47 bool ShouldActivate(); 48 49 enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION }; 50 51 void Start(CompactionFlag flag = ALLOW_COMPACTION); 52 53 void Stop(); 54 55 void PrepareForScavenge(); 56 57 void UpdateMarkingDequeAfterScavenge(); 58 59 void Hurry(); 60 61 void Finalize(); 62 63 void Abort(); 64 65 void MarkingComplete(CompletionAction action); 66 67 // It's hard to know how much work the incremental marker should do to make 68 // progress in the face of the mutator creating new work for it. We start 69 // of at a moderate rate of work and gradually increase the speed of the 70 // incremental marker until it completes. 71 // Do some marking every time this much memory has been allocated or that many 72 // heavy (color-checking) write barriers have been invoked. 73 static const intptr_t kAllocatedThreshold = 65536; 74 static const intptr_t kWriteBarriersInvokedThreshold = 32768; 75 // Start off by marking this many times more memory than has been allocated. 76 static const intptr_t kInitialMarkingSpeed = 1; 77 // But if we are promoting a lot of data we need to mark faster to keep up 78 // with the data that is entering the old space through promotion. 79 static const intptr_t kFastMarking = 3; 80 // After this many steps we increase the marking/allocating factor. 81 static const intptr_t kMarkingSpeedAccellerationInterval = 1024; 82 // This is how much we increase the marking/allocating factor by. 83 static const intptr_t kMarkingSpeedAccelleration = 2; 84 static const intptr_t kMaxMarkingSpeed = 1000; 85 86 void OldSpaceStep(intptr_t allocated); 87 88 void Step(intptr_t allocated, CompletionAction action, 89 bool force_marking = false); 90 RestartIfNotMarking()91 inline void RestartIfNotMarking() { 92 if (state_ == COMPLETE) { 93 state_ = MARKING; 94 if (FLAG_trace_incremental_marking) { 95 PrintF("[IncrementalMarking] Restarting (new grey objects)\n"); 96 } 97 } 98 } 99 100 static void RecordWriteFromCode(HeapObject* obj, Object** slot, 101 Isolate* isolate); 102 103 // Record a slot for compaction. Returns false for objects that are 104 // guaranteed to be rescanned or not guaranteed to survive. 105 // 106 // No slots in white objects should be recorded, as some slots are typed and 107 // cannot be interpreted correctly if the underlying object does not survive 108 // the incremental cycle (stays white). 109 INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value)); 110 INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value)); 111 INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo, 112 Object* value)); 113 INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot, 114 Code* value)); 115 116 117 void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value); 118 void RecordWriteIntoCodeSlow(HeapObject* obj, RelocInfo* rinfo, 119 Object* value); 120 void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value); 121 void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value); 122 void RecordCodeTargetPatch(Address pc, HeapObject* value); 123 124 inline void RecordWrites(HeapObject* obj); 125 126 inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit); 127 128 inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit); 129 SetOldSpacePageFlags(MemoryChunk * chunk)130 inline void SetOldSpacePageFlags(MemoryChunk* chunk) { 131 SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting()); 132 } 133 SetNewSpacePageFlags(NewSpacePage * chunk)134 inline void SetNewSpacePageFlags(NewSpacePage* chunk) { 135 SetNewSpacePageFlags(chunk, IsMarking()); 136 } 137 marking_deque()138 MarkingDeque* marking_deque() { return &marking_deque_; } 139 IsCompacting()140 bool IsCompacting() { return IsMarking() && is_compacting_; } 141 142 void ActivateGeneratedStub(Code* stub); 143 NotifyOfHighPromotionRate()144 void NotifyOfHighPromotionRate() { 145 if (IsMarking()) { 146 if (marking_speed_ < kFastMarking) { 147 if (FLAG_trace_gc) { 148 PrintPID( 149 "Increasing marking speed to %d " 150 "due to high promotion rate\n", 151 static_cast<int>(kFastMarking)); 152 } 153 marking_speed_ = kFastMarking; 154 } 155 } 156 } 157 EnterNoMarkingScope()158 void EnterNoMarkingScope() { no_marking_scope_depth_++; } 159 LeaveNoMarkingScope()160 void LeaveNoMarkingScope() { no_marking_scope_depth_--; } 161 162 void UncommitMarkingDeque(); 163 NotifyIncompleteScanOfObject(int unscanned_bytes)164 void NotifyIncompleteScanOfObject(int unscanned_bytes) { 165 unscanned_bytes_of_large_object_ = unscanned_bytes; 166 } 167 168 private: 169 int64_t SpaceLeftInOldSpace(); 170 171 void SpeedUp(); 172 173 void ResetStepCounters(); 174 175 void StartMarking(CompactionFlag flag); 176 177 void ActivateIncrementalWriteBarrier(PagedSpace* space); 178 static void ActivateIncrementalWriteBarrier(NewSpace* space); 179 void ActivateIncrementalWriteBarrier(); 180 181 static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space); 182 static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space); 183 void DeactivateIncrementalWriteBarrier(); 184 185 static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking, 186 bool is_compacting); 187 188 static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking); 189 190 void EnsureMarkingDequeIsCommitted(); 191 192 INLINE(void ProcessMarkingDeque()); 193 194 INLINE(intptr_t ProcessMarkingDeque(intptr_t bytes_to_process)); 195 196 INLINE(void VisitObject(Map* map, HeapObject* obj, int size)); 197 198 Heap* heap_; 199 200 State state_; 201 bool is_compacting_; 202 203 base::VirtualMemory* marking_deque_memory_; 204 bool marking_deque_memory_committed_; 205 MarkingDeque marking_deque_; 206 207 int steps_count_; 208 int64_t old_generation_space_available_at_start_of_incremental_; 209 int64_t old_generation_space_used_at_start_of_incremental_; 210 int64_t bytes_rescanned_; 211 bool should_hurry_; 212 int marking_speed_; 213 intptr_t bytes_scanned_; 214 intptr_t allocated_; 215 intptr_t write_barriers_invoked_since_last_step_; 216 217 int no_marking_scope_depth_; 218 219 int unscanned_bytes_of_large_object_; 220 221 DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); 222 }; 223 } 224 } // namespace v8::internal 225 226 #endif // V8_HEAP_INCREMENTAL_MARKING_H_ 227