1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/memory-reducer.h"
6
7 #include "src/flags.h"
8 #include "src/heap/gc-tracer.h"
9 #include "src/heap/heap-inl.h"
10 #include "src/utils.h"
11 #include "src/v8.h"
12
13 namespace v8 {
14 namespace internal {
15
16 const int MemoryReducer::kLongDelayMs = 8000;
17 const int MemoryReducer::kShortDelayMs = 500;
18 const int MemoryReducer::kWatchdogDelayMs = 100000;
19 const int MemoryReducer::kMaxNumberOfGCs = 3;
20
TimerTask(MemoryReducer * memory_reducer)21 MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
22 : CancelableTask(memory_reducer->heap()->isolate()),
23 memory_reducer_(memory_reducer) {}
24
25
RunInternal()26 void MemoryReducer::TimerTask::RunInternal() {
27 Heap* heap = memory_reducer_->heap();
28 Event event;
29 double time_ms = heap->MonotonicallyIncreasingTimeInMs();
30 heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
31 heap->OldGenerationAllocationCounter());
32 bool low_allocation_rate = heap->HasLowAllocationRate();
33 bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
34 if (FLAG_trace_gc_verbose) {
35 heap->isolate()->PrintWithTimestamp(
36 "Memory reducer: %s, %s\n",
37 low_allocation_rate ? "low alloc" : "high alloc",
38 optimize_for_memory ? "background" : "foreground");
39 }
40 event.type = kTimer;
41 event.time_ms = time_ms;
42 // The memory reducer will start incremental markig if
43 // 1) mutator is likely idle: js call rate is low and allocation rate is low.
44 // 2) mutator is in background: optimize for memory flag is set.
45 event.should_start_incremental_gc =
46 low_allocation_rate || optimize_for_memory;
47 event.can_start_incremental_gc =
48 heap->incremental_marking()->IsStopped() &&
49 (heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
50 memory_reducer_->NotifyTimer(event);
51 }
52
53
NotifyTimer(const Event & event)54 void MemoryReducer::NotifyTimer(const Event& event) {
55 DCHECK_EQ(kTimer, event.type);
56 DCHECK_EQ(kWait, state_.action);
57 state_ = Step(state_, event);
58 if (state_.action == kRun) {
59 DCHECK(heap()->incremental_marking()->IsStopped());
60 DCHECK(FLAG_incremental_marking);
61 if (FLAG_trace_gc_verbose) {
62 heap()->isolate()->PrintWithTimestamp("Memory reducer: started GC #%d\n",
63 state_.started_gcs);
64 }
65 heap()->StartIdleIncrementalMarking(
66 GarbageCollectionReason::kMemoryReducer);
67 } else if (state_.action == kWait) {
68 if (!heap()->incremental_marking()->IsStopped() &&
69 heap()->ShouldOptimizeForMemoryUsage()) {
70 // Make progress with pending incremental marking if memory usage has
71 // higher priority than latency. This is important for background tabs
72 // that do not send idle notifications.
73 const int kIncrementalMarkingDelayMs = 500;
74 double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
75 kIncrementalMarkingDelayMs;
76 heap()->incremental_marking()->AdvanceIncrementalMarking(
77 deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
78 IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
79 heap()->FinalizeIncrementalMarkingIfComplete(
80 GarbageCollectionReason::kFinalizeMarkingViaTask);
81 }
82 // Re-schedule the timer.
83 ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
84 if (FLAG_trace_gc_verbose) {
85 heap()->isolate()->PrintWithTimestamp(
86 "Memory reducer: waiting for %.f ms\n",
87 state_.next_gc_start_ms - event.time_ms);
88 }
89 }
90 }
91
92
NotifyMarkCompact(const Event & event)93 void MemoryReducer::NotifyMarkCompact(const Event& event) {
94 DCHECK_EQ(kMarkCompact, event.type);
95 Action old_action = state_.action;
96 state_ = Step(state_, event);
97 if (old_action != kWait && state_.action == kWait) {
98 // If we are transitioning to the WAIT state, start the timer.
99 ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
100 }
101 if (old_action == kRun) {
102 if (FLAG_trace_gc_verbose) {
103 heap()->isolate()->PrintWithTimestamp(
104 "Memory reducer: finished GC #%d (%s)\n", state_.started_gcs,
105 state_.action == kWait ? "will do more" : "done");
106 }
107 }
108 }
109
NotifyPossibleGarbage(const Event & event)110 void MemoryReducer::NotifyPossibleGarbage(const Event& event) {
111 DCHECK_EQ(kPossibleGarbage, event.type);
112 Action old_action = state_.action;
113 state_ = Step(state_, event);
114 if (old_action != kWait && state_.action == kWait) {
115 // If we are transitioning to the WAIT state, start the timer.
116 ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
117 }
118 }
119
120
WatchdogGC(const State & state,const Event & event)121 bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
122 return state.last_gc_time_ms != 0 &&
123 event.time_ms > state.last_gc_time_ms + kWatchdogDelayMs;
124 }
125
126
127 // For specification of this function see the comment for MemoryReducer class.
Step(const State & state,const Event & event)128 MemoryReducer::State MemoryReducer::Step(const State& state,
129 const Event& event) {
130 if (!FLAG_incremental_marking || !FLAG_memory_reducer) {
131 return State(kDone, 0, 0, state.last_gc_time_ms);
132 }
133 switch (state.action) {
134 case kDone:
135 if (event.type == kTimer) {
136 return state;
137 } else {
138 DCHECK(event.type == kPossibleGarbage || event.type == kMarkCompact);
139 return State(
140 kWait, 0, event.time_ms + kLongDelayMs,
141 event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms);
142 }
143 case kWait:
144 switch (event.type) {
145 case kPossibleGarbage:
146 return state;
147 case kTimer:
148 if (state.started_gcs >= kMaxNumberOfGCs) {
149 return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms);
150 } else if (event.can_start_incremental_gc &&
151 (event.should_start_incremental_gc ||
152 WatchdogGC(state, event))) {
153 if (state.next_gc_start_ms <= event.time_ms) {
154 return State(kRun, state.started_gcs + 1, 0.0,
155 state.last_gc_time_ms);
156 } else {
157 return state;
158 }
159 } else {
160 return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
161 state.last_gc_time_ms);
162 }
163 case kMarkCompact:
164 return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
165 event.time_ms);
166 }
167 case kRun:
168 if (event.type != kMarkCompact) {
169 return state;
170 } else {
171 if (state.started_gcs < kMaxNumberOfGCs &&
172 (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
173 return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
174 event.time_ms);
175 } else {
176 return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms);
177 }
178 }
179 }
180 UNREACHABLE();
181 return State(kDone, 0, 0, 0.0); // Make the compiler happy.
182 }
183
184
ScheduleTimer(double time_ms,double delay_ms)185 void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
186 DCHECK(delay_ms > 0);
187 // Leave some room for precision error in task scheduler.
188 const double kSlackMs = 100;
189 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
190 auto timer_task = new MemoryReducer::TimerTask(this);
191 V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(
192 isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
193 }
194
TearDown()195 void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
196
197 } // namespace internal
198 } // namespace v8
199