1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/optimizing-compiler-thread.h"
6 
7 #include "src/v8.h"
8 
9 #include "src/base/atomicops.h"
10 #include "src/full-codegen.h"
11 #include "src/hydrogen.h"
12 #include "src/isolate.h"
13 #include "src/v8threads.h"
14 
15 namespace v8 {
16 namespace internal {
17 
~OptimizingCompilerThread()18 OptimizingCompilerThread::~OptimizingCompilerThread() {
19   DCHECK_EQ(0, input_queue_length_);
20   DeleteArray(input_queue_);
21   if (FLAG_concurrent_osr) {
22 #ifdef DEBUG
23     for (int i = 0; i < osr_buffer_capacity_; i++) {
24       CHECK_EQ(NULL, osr_buffer_[i]);
25     }
26 #endif
27     DeleteArray(osr_buffer_);
28   }
29 }
30 
31 
Run()32 void OptimizingCompilerThread::Run() {
33 #ifdef DEBUG
34   { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
35     thread_id_ = ThreadId::Current().ToInteger();
36   }
37 #endif
38   Isolate::SetIsolateThreadLocals(isolate_, NULL);
39   DisallowHeapAllocation no_allocation;
40   DisallowHandleAllocation no_handles;
41   DisallowHandleDereference no_deref;
42 
43   base::ElapsedTimer total_timer;
44   if (FLAG_trace_concurrent_recompilation) total_timer.Start();
45 
46   while (true) {
47     input_queue_semaphore_.Wait();
48     TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
49 
50     if (FLAG_concurrent_recompilation_delay != 0) {
51       base::OS::Sleep(FLAG_concurrent_recompilation_delay);
52     }
53 
54     switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
55       case CONTINUE:
56         break;
57       case STOP:
58         if (FLAG_trace_concurrent_recompilation) {
59           time_spent_total_ = total_timer.Elapsed();
60         }
61         stop_semaphore_.Signal();
62         return;
63       case FLUSH:
64         // The main thread is blocked, waiting for the stop semaphore.
65         { AllowHandleDereference allow_handle_dereference;
66           FlushInputQueue(true);
67         }
68         base::Release_Store(&stop_thread_,
69                             static_cast<base::AtomicWord>(CONTINUE));
70         stop_semaphore_.Signal();
71         // Return to start of consumer loop.
72         continue;
73     }
74 
75     base::ElapsedTimer compiling_timer;
76     if (FLAG_trace_concurrent_recompilation) compiling_timer.Start();
77 
78     CompileNext();
79 
80     if (FLAG_trace_concurrent_recompilation) {
81       time_spent_compiling_ += compiling_timer.Elapsed();
82     }
83   }
84 }
85 
86 
NextInput()87 OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
88   base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
89   if (input_queue_length_ == 0) return NULL;
90   OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
91   DCHECK_NE(NULL, job);
92   input_queue_shift_ = InputQueueIndex(1);
93   input_queue_length_--;
94   return job;
95 }
96 
97 
CompileNext()98 void OptimizingCompilerThread::CompileNext() {
99   OptimizedCompileJob* job = NextInput();
100   DCHECK_NE(NULL, job);
101 
102   // The function may have already been optimized by OSR.  Simply continue.
103   OptimizedCompileJob::Status status = job->OptimizeGraph();
104   USE(status);   // Prevent an unused-variable error in release mode.
105   DCHECK(status != OptimizedCompileJob::FAILED);
106 
107   // The function may have already been optimized by OSR.  Simply continue.
108   // Use a mutex to make sure that functions marked for install
109   // are always also queued.
110   output_queue_.Enqueue(job);
111   isolate_->stack_guard()->RequestInstallCode();
112 }
113 
114 
DisposeOptimizedCompileJob(OptimizedCompileJob * job,bool restore_function_code)115 static void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
116                                        bool restore_function_code) {
117   // The recompile job is allocated in the CompilationInfo's zone.
118   CompilationInfo* info = job->info();
119   if (restore_function_code) {
120     if (info->is_osr()) {
121       if (!job->IsWaitingForInstall()) {
122         // Remove stack check that guards OSR entry on original code.
123         Handle<Code> code = info->unoptimized_code();
124         uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
125         BackEdgeTable::RemoveStackCheck(code, offset);
126       }
127     } else {
128       Handle<JSFunction> function = info->closure();
129       function->ReplaceCode(function->shared()->code());
130     }
131   }
132   delete info;
133 }
134 
135 
FlushInputQueue(bool restore_function_code)136 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
137   OptimizedCompileJob* job;
138   while ((job = NextInput())) {
139     // This should not block, since we have one signal on the input queue
140     // semaphore corresponding to each element in the input queue.
141     input_queue_semaphore_.Wait();
142     // OSR jobs are dealt with separately.
143     if (!job->info()->is_osr()) {
144       DisposeOptimizedCompileJob(job, restore_function_code);
145     }
146   }
147 }
148 
149 
FlushOutputQueue(bool restore_function_code)150 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
151   OptimizedCompileJob* job;
152   while (output_queue_.Dequeue(&job)) {
153     // OSR jobs are dealt with separately.
154     if (!job->info()->is_osr()) {
155       DisposeOptimizedCompileJob(job, restore_function_code);
156     }
157   }
158 }
159 
160 
FlushOsrBuffer(bool restore_function_code)161 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
162   for (int i = 0; i < osr_buffer_capacity_; i++) {
163     if (osr_buffer_[i] != NULL) {
164       DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
165       osr_buffer_[i] = NULL;
166     }
167   }
168 }
169 
170 
Flush()171 void OptimizingCompilerThread::Flush() {
172   DCHECK(!IsOptimizerThread());
173   base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
174   if (FLAG_block_concurrent_recompilation) Unblock();
175   input_queue_semaphore_.Signal();
176   stop_semaphore_.Wait();
177   FlushOutputQueue(true);
178   if (FLAG_concurrent_osr) FlushOsrBuffer(true);
179   if (FLAG_trace_concurrent_recompilation) {
180     PrintF("  ** Flushed concurrent recompilation queues.\n");
181   }
182 }
183 
184 
Stop()185 void OptimizingCompilerThread::Stop() {
186   DCHECK(!IsOptimizerThread());
187   base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
188   if (FLAG_block_concurrent_recompilation) Unblock();
189   input_queue_semaphore_.Signal();
190   stop_semaphore_.Wait();
191 
192   if (FLAG_concurrent_recompilation_delay != 0) {
193     // At this point the optimizing compiler thread's event loop has stopped.
194     // There is no need for a mutex when reading input_queue_length_.
195     while (input_queue_length_ > 0) CompileNext();
196     InstallOptimizedFunctions();
197   } else {
198     FlushInputQueue(false);
199     FlushOutputQueue(false);
200   }
201 
202   if (FLAG_concurrent_osr) FlushOsrBuffer(false);
203 
204   if (FLAG_trace_concurrent_recompilation) {
205     double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
206     PrintF("  ** Compiler thread did %.2f%% useful work\n", percentage);
207   }
208 
209   if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
210       FLAG_concurrent_osr) {
211     PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
212   }
213 
214   Join();
215 }
216 
217 
InstallOptimizedFunctions()218 void OptimizingCompilerThread::InstallOptimizedFunctions() {
219   DCHECK(!IsOptimizerThread());
220   HandleScope handle_scope(isolate_);
221 
222   OptimizedCompileJob* job;
223   while (output_queue_.Dequeue(&job)) {
224     CompilationInfo* info = job->info();
225     Handle<JSFunction> function(*info->closure());
226     if (info->is_osr()) {
227       if (FLAG_trace_osr) {
228         PrintF("[COSR - ");
229         function->ShortPrint();
230         PrintF(" is ready for install and entry at AST id %d]\n",
231                info->osr_ast_id().ToInt());
232       }
233       job->WaitForInstall();
234       // Remove stack check that guards OSR entry on original code.
235       Handle<Code> code = info->unoptimized_code();
236       uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
237       BackEdgeTable::RemoveStackCheck(code, offset);
238     } else {
239       if (function->IsOptimized()) {
240         if (FLAG_trace_concurrent_recompilation) {
241           PrintF("  ** Aborting compilation for ");
242           function->ShortPrint();
243           PrintF(" as it has already been optimized.\n");
244         }
245         DisposeOptimizedCompileJob(job, false);
246       } else {
247         Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
248         function->ReplaceCode(
249             code.is_null() ? function->shared()->code() : *code);
250       }
251     }
252   }
253 }
254 
255 
QueueForOptimization(OptimizedCompileJob * job)256 void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
257   DCHECK(IsQueueAvailable());
258   DCHECK(!IsOptimizerThread());
259   CompilationInfo* info = job->info();
260   if (info->is_osr()) {
261     osr_attempts_++;
262     AddToOsrBuffer(job);
263     // Add job to the front of the input queue.
264     base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
265     DCHECK_LT(input_queue_length_, input_queue_capacity_);
266     // Move shift_ back by one.
267     input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
268     input_queue_[InputQueueIndex(0)] = job;
269     input_queue_length_++;
270   } else {
271     // Add job to the back of the input queue.
272     base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
273     DCHECK_LT(input_queue_length_, input_queue_capacity_);
274     input_queue_[InputQueueIndex(input_queue_length_)] = job;
275     input_queue_length_++;
276   }
277   if (FLAG_block_concurrent_recompilation) {
278     blocked_jobs_++;
279   } else {
280     input_queue_semaphore_.Signal();
281   }
282 }
283 
284 
Unblock()285 void OptimizingCompilerThread::Unblock() {
286   DCHECK(!IsOptimizerThread());
287   while (blocked_jobs_ > 0) {
288     input_queue_semaphore_.Signal();
289     blocked_jobs_--;
290   }
291 }
292 
293 
FindReadyOSRCandidate(Handle<JSFunction> function,BailoutId osr_ast_id)294 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
295     Handle<JSFunction> function, BailoutId osr_ast_id) {
296   DCHECK(!IsOptimizerThread());
297   for (int i = 0; i < osr_buffer_capacity_; i++) {
298     OptimizedCompileJob* current = osr_buffer_[i];
299     if (current != NULL &&
300         current->IsWaitingForInstall() &&
301         current->info()->HasSameOsrEntry(function, osr_ast_id)) {
302       osr_hits_++;
303       osr_buffer_[i] = NULL;
304       return current;
305     }
306   }
307   return NULL;
308 }
309 
310 
IsQueuedForOSR(Handle<JSFunction> function,BailoutId osr_ast_id)311 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
312                                               BailoutId osr_ast_id) {
313   DCHECK(!IsOptimizerThread());
314   for (int i = 0; i < osr_buffer_capacity_; i++) {
315     OptimizedCompileJob* current = osr_buffer_[i];
316     if (current != NULL &&
317         current->info()->HasSameOsrEntry(function, osr_ast_id)) {
318       return !current->IsWaitingForInstall();
319     }
320   }
321   return false;
322 }
323 
324 
IsQueuedForOSR(JSFunction * function)325 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
326   DCHECK(!IsOptimizerThread());
327   for (int i = 0; i < osr_buffer_capacity_; i++) {
328     OptimizedCompileJob* current = osr_buffer_[i];
329     if (current != NULL && *current->info()->closure() == function) {
330       return !current->IsWaitingForInstall();
331     }
332   }
333   return false;
334 }
335 
336 
AddToOsrBuffer(OptimizedCompileJob * job)337 void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
338   DCHECK(!IsOptimizerThread());
339   // Find the next slot that is empty or has a stale job.
340   OptimizedCompileJob* stale = NULL;
341   while (true) {
342     stale = osr_buffer_[osr_buffer_cursor_];
343     if (stale == NULL || stale->IsWaitingForInstall()) break;
344     osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
345   }
346 
347   // Add to found slot and dispose the evicted job.
348   if (stale != NULL) {
349     DCHECK(stale->IsWaitingForInstall());
350     CompilationInfo* info = stale->info();
351     if (FLAG_trace_osr) {
352       PrintF("[COSR - Discarded ");
353       info->closure()->PrintName();
354       PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
355     }
356     DisposeOptimizedCompileJob(stale, false);
357   }
358   osr_buffer_[osr_buffer_cursor_] = job;
359   osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
360 }
361 
362 
363 #ifdef DEBUG
IsOptimizerThread(Isolate * isolate)364 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
365   return isolate->concurrent_recompilation_enabled() &&
366          isolate->optimizing_compiler_thread()->IsOptimizerThread();
367 }
368 
369 
IsOptimizerThread()370 bool OptimizingCompilerThread::IsOptimizerThread() {
371   base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
372   return ThreadId::Current().ToInteger() == thread_id_;
373 }
374 #endif
375 
376 
377 } }  // namespace v8::internal
378