1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
6 
7 #include "src/base/atomicops.h"
8 #include "src/compilation-info.h"
9 #include "src/compiler.h"
10 #include "src/full-codegen/full-codegen.h"
11 #include "src/isolate.h"
12 #include "src/tracing/trace-event.h"
13 #include "src/v8.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 namespace {
19 
DisposeCompilationJob(CompilationJob * job,bool restore_function_code)20 void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
21   if (restore_function_code) {
22     Handle<JSFunction> function = job->info()->closure();
23     function->ReplaceCode(function->shared()->code());
24     // TODO(mvstanton): We can't call ensureliterals here due to allocation,
25     // but we probably shouldn't call ReplaceCode either, as this
26     // sometimes runs on the worker thread!
27     // JSFunction::EnsureLiterals(function);
28   }
29   delete job;
30 }
31 
32 }  // namespace
33 
34 class OptimizingCompileDispatcher::CompileTask : public v8::Task {
35  public:
CompileTask(Isolate * isolate)36   explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
37     OptimizingCompileDispatcher* dispatcher =
38         isolate_->optimizing_compile_dispatcher();
39     base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
40     ++dispatcher->ref_count_;
41   }
42 
~CompileTask()43   virtual ~CompileTask() {}
44 
45  private:
46   // v8::Task overrides.
Run()47   void Run() override {
48     DisallowHeapAllocation no_allocation;
49     DisallowHandleAllocation no_handles;
50     DisallowHandleDereference no_deref;
51 
52     OptimizingCompileDispatcher* dispatcher =
53         isolate_->optimizing_compile_dispatcher();
54     {
55       TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
56 
57       TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
58                    "V8.RecompileConcurrent");
59 
60       if (dispatcher->recompilation_delay_ != 0) {
61         base::OS::Sleep(base::TimeDelta::FromMilliseconds(
62             dispatcher->recompilation_delay_));
63       }
64 
65       dispatcher->CompileNext(dispatcher->NextInput(true));
66     }
67     {
68       base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
69       if (--dispatcher->ref_count_ == 0) {
70         dispatcher->ref_count_zero_.NotifyOne();
71       }
72     }
73   }
74 
75   Isolate* isolate_;
76 
77   DISALLOW_COPY_AND_ASSIGN(CompileTask);
78 };
79 
~OptimizingCompileDispatcher()80 OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
81 #ifdef DEBUG
82   {
83     base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
84     DCHECK_EQ(0, ref_count_);
85   }
86 #endif
87   DCHECK_EQ(0, input_queue_length_);
88   DeleteArray(input_queue_);
89 }
90 
NextInput(bool check_if_flushing)91 CompilationJob* OptimizingCompileDispatcher::NextInput(bool check_if_flushing) {
92   base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
93   if (input_queue_length_ == 0) return NULL;
94   CompilationJob* job = input_queue_[InputQueueIndex(0)];
95   DCHECK_NOT_NULL(job);
96   input_queue_shift_ = InputQueueIndex(1);
97   input_queue_length_--;
98   if (check_if_flushing) {
99     if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
100       AllowHandleDereference allow_handle_dereference;
101       DisposeCompilationJob(job, true);
102       return NULL;
103     }
104   }
105   return job;
106 }
107 
CompileNext(CompilationJob * job)108 void OptimizingCompileDispatcher::CompileNext(CompilationJob* job) {
109   if (!job) return;
110 
111   // The function may have already been optimized by OSR.  Simply continue.
112   CompilationJob::Status status = job->ExecuteJob();
113   USE(status);  // Prevent an unused-variable error.
114 
115   // The function may have already been optimized by OSR.  Simply continue.
116   // Use a mutex to make sure that functions marked for install
117   // are always also queued.
118   base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
119   output_queue_.push(job);
120   isolate_->stack_guard()->RequestInstallCode();
121 }
122 
FlushOutputQueue(bool restore_function_code)123 void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
124   for (;;) {
125     CompilationJob* job = NULL;
126     {
127       base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
128       if (output_queue_.empty()) return;
129       job = output_queue_.front();
130       output_queue_.pop();
131     }
132 
133     DisposeCompilationJob(job, restore_function_code);
134   }
135 }
136 
Flush(BlockingBehavior blocking_behavior)137 void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
138   if (FLAG_block_concurrent_recompilation) Unblock();
139   if (blocking_behavior == BlockingBehavior::kDontBlock) {
140     base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
141     while (input_queue_length_ > 0) {
142       CompilationJob* job = input_queue_[InputQueueIndex(0)];
143       DCHECK_NOT_NULL(job);
144       input_queue_shift_ = InputQueueIndex(1);
145       input_queue_length_--;
146       DisposeCompilationJob(job, true);
147     }
148     FlushOutputQueue(true);
149     if (FLAG_trace_concurrent_recompilation) {
150       PrintF("  ** Flushed concurrent recompilation queues (not blocking).\n");
151     }
152     return;
153   }
154   base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
155   if (FLAG_block_concurrent_recompilation) Unblock();
156   {
157     base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
158     while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
159     base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
160   }
161   FlushOutputQueue(true);
162   if (FLAG_trace_concurrent_recompilation) {
163     PrintF("  ** Flushed concurrent recompilation queues.\n");
164   }
165 }
166 
Stop()167 void OptimizingCompileDispatcher::Stop() {
168   base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
169   if (FLAG_block_concurrent_recompilation) Unblock();
170   {
171     base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
172     while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
173     base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
174   }
175 
176   if (recompilation_delay_ != 0) {
177     // At this point the optimizing compiler thread's event loop has stopped.
178     // There is no need for a mutex when reading input_queue_length_.
179     while (input_queue_length_ > 0) CompileNext(NextInput());
180     InstallOptimizedFunctions();
181   } else {
182     FlushOutputQueue(false);
183   }
184 }
185 
InstallOptimizedFunctions()186 void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
187   HandleScope handle_scope(isolate_);
188 
189   for (;;) {
190     CompilationJob* job = NULL;
191     {
192       base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
193       if (output_queue_.empty()) return;
194       job = output_queue_.front();
195       output_queue_.pop();
196     }
197     CompilationInfo* info = job->info();
198     Handle<JSFunction> function(*info->closure());
199     if (function->IsOptimized()) {
200       if (FLAG_trace_concurrent_recompilation) {
201         PrintF("  ** Aborting compilation for ");
202         function->ShortPrint();
203         PrintF(" as it has already been optimized.\n");
204       }
205       DisposeCompilationJob(job, false);
206     } else {
207       Compiler::FinalizeCompilationJob(job);
208     }
209   }
210 }
211 
QueueForOptimization(CompilationJob * job)212 void OptimizingCompileDispatcher::QueueForOptimization(CompilationJob* job) {
213   DCHECK(IsQueueAvailable());
214   {
215     // Add job to the back of the input queue.
216     base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
217     DCHECK_LT(input_queue_length_, input_queue_capacity_);
218     input_queue_[InputQueueIndex(input_queue_length_)] = job;
219     input_queue_length_++;
220   }
221   if (FLAG_block_concurrent_recompilation) {
222     blocked_jobs_++;
223   } else {
224     V8::GetCurrentPlatform()->CallOnBackgroundThread(
225         new CompileTask(isolate_), v8::Platform::kShortRunningTask);
226   }
227 }
228 
Unblock()229 void OptimizingCompileDispatcher::Unblock() {
230   while (blocked_jobs_ > 0) {
231     V8::GetCurrentPlatform()->CallOnBackgroundThread(
232         new CompileTask(isolate_), v8::Platform::kShortRunningTask);
233     blocked_jobs_--;
234   }
235 }
236 
237 }  // namespace internal
238 }  // namespace v8
239