1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jit.h"
18 
19 #include <dlfcn.h>
20 
21 #include "art_method-inl.h"
22 #include "base/enums.h"
23 #include "base/file_utils.h"
24 #include "base/logging.h"  // For VLOG.
25 #include "base/memory_tool.h"
26 #include "base/runtime_debug.h"
27 #include "base/scoped_flock.h"
28 #include "base/utils.h"
29 #include "class_root.h"
30 #include "debugger.h"
31 #include "dex/type_lookup_table.h"
32 #include "entrypoints/runtime_asm_entrypoints.h"
33 #include "interpreter/interpreter.h"
34 #include "jit-inl.h"
35 #include "jit_code_cache.h"
36 #include "jni/java_vm_ext.h"
37 #include "mirror/method_handle_impl.h"
38 #include "mirror/var_handle.h"
39 #include "oat_file.h"
40 #include "oat_file_manager.h"
41 #include "oat_quick_method_header.h"
42 #include "profile/profile_compilation_info.h"
43 #include "profile_saver.h"
44 #include "runtime.h"
45 #include "runtime_options.h"
46 #include "stack.h"
47 #include "stack_map.h"
48 #include "thread-inl.h"
49 #include "thread_list.h"
50 
51 namespace art {
52 namespace jit {
53 
54 static constexpr bool kEnableOnStackReplacement = true;
55 
56 // Different compilation threshold constants. These can be overridden on the command line.
57 static constexpr size_t kJitDefaultCompileThreshold           = 10000;  // Non-debug default.
58 static constexpr size_t kJitStressDefaultCompileThreshold     = 100;    // Fast-debug build.
59 static constexpr size_t kJitSlowStressDefaultCompileThreshold = 2;      // Slow-debug build.
60 
61 // JIT compiler
62 void* Jit::jit_library_handle_ = nullptr;
63 void* Jit::jit_compiler_handle_ = nullptr;
64 void* (*Jit::jit_load_)(void) = nullptr;
65 void (*Jit::jit_unload_)(void*) = nullptr;
66 bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool) = nullptr;
67 void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr;
68 bool (*Jit::jit_generate_debug_info_)(void*) = nullptr;
69 void (*Jit::jit_update_options_)(void*) = nullptr;
70 
71 struct StressModeHelper {
72   DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
73 };
74 DEFINE_RUNTIME_DEBUG_FLAG(StressModeHelper, kSlowMode);
75 
RoundUpThreshold(uint32_t threshold)76 uint32_t JitOptions::RoundUpThreshold(uint32_t threshold) {
77   if (threshold > kJitSamplesBatchSize) {
78     threshold = RoundUp(threshold, kJitSamplesBatchSize);
79   }
80   CHECK_LE(threshold, std::numeric_limits<uint16_t>::max());
81   return threshold;
82 }
83 
CreateFromRuntimeArguments(const RuntimeArgumentMap & options)84 JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
85   auto* jit_options = new JitOptions;
86   jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
87 
88   jit_options->code_cache_initial_capacity_ =
89       options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity);
90   jit_options->code_cache_max_capacity_ =
91       options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity);
92   jit_options->dump_info_on_shutdown_ =
93       options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
94   jit_options->profile_saver_options_ =
95       options.GetOrDefault(RuntimeArgumentMap::ProfileSaverOpts);
96   jit_options->thread_pool_pthread_priority_ =
97       options.GetOrDefault(RuntimeArgumentMap::JITPoolThreadPthreadPriority);
98 
99   if (options.Exists(RuntimeArgumentMap::JITCompileThreshold)) {
100     jit_options->compile_threshold_ = *options.Get(RuntimeArgumentMap::JITCompileThreshold);
101   } else {
102     jit_options->compile_threshold_ =
103         kIsDebugBuild
104             ? (StressModeHelper::kSlowMode
105                    ? kJitSlowStressDefaultCompileThreshold
106                    : kJitStressDefaultCompileThreshold)
107             : kJitDefaultCompileThreshold;
108   }
109   jit_options->compile_threshold_ = RoundUpThreshold(jit_options->compile_threshold_);
110 
111   if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) {
112     jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold);
113   } else {
114     jit_options->warmup_threshold_ = jit_options->compile_threshold_ / 2;
115   }
116   jit_options->warmup_threshold_ = RoundUpThreshold(jit_options->warmup_threshold_);
117 
118   if (options.Exists(RuntimeArgumentMap::JITOsrThreshold)) {
119     jit_options->osr_threshold_ = *options.Get(RuntimeArgumentMap::JITOsrThreshold);
120   } else {
121     jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2;
122     if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) {
123       jit_options->osr_threshold_ =
124           RoundDown(std::numeric_limits<uint16_t>::max(), kJitSamplesBatchSize);
125     }
126   }
127   jit_options->osr_threshold_ = RoundUpThreshold(jit_options->osr_threshold_);
128 
129   if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) {
130     jit_options->priority_thread_weight_ =
131         *options.Get(RuntimeArgumentMap::JITPriorityThreadWeight);
132     if (jit_options->priority_thread_weight_ > jit_options->warmup_threshold_) {
133       LOG(FATAL) << "Priority thread weight is above the warmup threshold.";
134     } else if (jit_options->priority_thread_weight_ == 0) {
135       LOG(FATAL) << "Priority thread weight cannot be 0.";
136     }
137   } else {
138     jit_options->priority_thread_weight_ = std::max(
139         jit_options->warmup_threshold_ / Jit::kDefaultPriorityThreadWeightRatio,
140         static_cast<size_t>(1));
141   }
142 
143   if (options.Exists(RuntimeArgumentMap::JITInvokeTransitionWeight)) {
144     jit_options->invoke_transition_weight_ =
145         *options.Get(RuntimeArgumentMap::JITInvokeTransitionWeight);
146     if (jit_options->invoke_transition_weight_ > jit_options->warmup_threshold_) {
147       LOG(FATAL) << "Invoke transition weight is above the warmup threshold.";
148     } else if (jit_options->invoke_transition_weight_  == 0) {
149       LOG(FATAL) << "Invoke transition weight cannot be 0.";
150     }
151   } else {
152     jit_options->invoke_transition_weight_ = std::max(
153         jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio,
154         static_cast<size_t>(1));
155   }
156 
157   return jit_options;
158 }
159 
DumpInfo(std::ostream & os)160 void Jit::DumpInfo(std::ostream& os) {
161   code_cache_->Dump(os);
162   cumulative_timings_.Dump(os);
163   MutexLock mu(Thread::Current(), lock_);
164   memory_use_.PrintMemoryUse(os);
165 }
166 
DumpForSigQuit(std::ostream & os)167 void Jit::DumpForSigQuit(std::ostream& os) {
168   DumpInfo(os);
169   ProfileSaver::DumpInstanceInfo(os);
170 }
171 
AddTimingLogger(const TimingLogger & logger)172 void Jit::AddTimingLogger(const TimingLogger& logger) {
173   cumulative_timings_.AddLogger(logger);
174 }
175 
Jit(JitCodeCache * code_cache,JitOptions * options)176 Jit::Jit(JitCodeCache* code_cache, JitOptions* options)
177     : code_cache_(code_cache),
178       options_(options),
179       cumulative_timings_("JIT timings"),
180       memory_use_("Memory used for compilation", 16),
181       lock_("JIT memory use lock") {}
182 
Create(JitCodeCache * code_cache,JitOptions * options)183 Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
184   if (jit_load_ == nullptr) {
185     LOG(WARNING) << "Not creating JIT: library not loaded";
186     return nullptr;
187   }
188   jit_compiler_handle_ = (jit_load_)();
189   if (jit_compiler_handle_ == nullptr) {
190     LOG(WARNING) << "Not creating JIT: failed to allocate a compiler";
191     return nullptr;
192   }
193   std::unique_ptr<Jit> jit(new Jit(code_cache, options));
194 
195   // If the code collector is enabled, check if that still holds:
196   // With 'perf', we want a 1-1 mapping between an address and a method.
197   // We aren't able to keep method pointers live during the instrumentation method entry trampoline
198   // so we will just disable jit-gc if we are doing that.
199   if (code_cache->GetGarbageCollectCode()) {
200     code_cache->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
201         !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
202   }
203 
204   VLOG(jit) << "JIT created with initial_capacity="
205       << PrettySize(options->GetCodeCacheInitialCapacity())
206       << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
207       << ", compile_threshold=" << options->GetCompileThreshold()
208       << ", profile_saver_options=" << options->GetProfileSaverOptions();
209 
210   // Notify native debugger about the classes already loaded before the creation of the jit.
211   jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
212   return jit.release();
213 }
214 
215 template <typename T>
LoadSymbol(T * address,const char * name,std::string * error_msg)216 bool Jit::LoadSymbol(T* address, const char* name, std::string* error_msg) {
217   *address = reinterpret_cast<T>(dlsym(jit_library_handle_, name));
218   if (*address == nullptr) {
219     *error_msg = std::string("JIT couldn't find ") + name + std::string(" entry point");
220     return false;
221   }
222   return true;
223 }
224 
LoadCompilerLibrary(std::string * error_msg)225 bool Jit::LoadCompilerLibrary(std::string* error_msg) {
226   jit_library_handle_ = dlopen(
227       kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW);
228   if (jit_library_handle_ == nullptr) {
229     std::ostringstream oss;
230     oss << "JIT could not load libart-compiler.so: " << dlerror();
231     *error_msg = oss.str();
232     return false;
233   }
234   bool all_resolved = true;
235   all_resolved = all_resolved && LoadSymbol(&jit_load_, "jit_load", error_msg);
236   all_resolved = all_resolved && LoadSymbol(&jit_unload_, "jit_unload", error_msg);
237   all_resolved = all_resolved && LoadSymbol(&jit_compile_method_, "jit_compile_method", error_msg);
238   all_resolved = all_resolved && LoadSymbol(&jit_types_loaded_, "jit_types_loaded", error_msg);
239   all_resolved = all_resolved && LoadSymbol(&jit_update_options_, "jit_update_options", error_msg);
240   all_resolved = all_resolved &&
241       LoadSymbol(&jit_generate_debug_info_, "jit_generate_debug_info", error_msg);
242   if (!all_resolved) {
243     dlclose(jit_library_handle_);
244     return false;
245   }
246   return true;
247 }
248 
CompileMethod(ArtMethod * method,Thread * self,bool baseline,bool osr)249 bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr) {
250   DCHECK(Runtime::Current()->UseJitCompilation());
251   DCHECK(!method->IsRuntimeMethod());
252 
253   RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
254   // Don't compile the method if it has breakpoints.
255   if (cb->IsMethodBeingInspected(method) && !cb->IsMethodSafeToJit(method)) {
256     VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
257               << " due to not being safe to jit according to runtime-callbacks. For example, there"
258               << " could be breakpoints in this method.";
259     return false;
260   }
261 
262   // Don't compile the method if we are supposed to be deoptimized.
263   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
264   if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) {
265     VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to deoptimization";
266     return false;
267   }
268 
269   // If we get a request to compile a proxy method, we pass the actual Java method
270   // of that proxy method, as the compiler does not expect a proxy method.
271   ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
272   if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr)) {
273     return false;
274   }
275 
276   VLOG(jit) << "Compiling method "
277             << ArtMethod::PrettyMethod(method_to_compile)
278             << " osr=" << std::boolalpha << osr;
279   bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, baseline, osr);
280   code_cache_->DoneCompiling(method_to_compile, self, osr);
281   if (!success) {
282     VLOG(jit) << "Failed to compile method "
283               << ArtMethod::PrettyMethod(method_to_compile)
284               << " osr=" << std::boolalpha << osr;
285   }
286   if (kIsDebugBuild) {
287     if (self->IsExceptionPending()) {
288       mirror::Throwable* exception = self->GetException();
289       LOG(FATAL) << "No pending exception expected after compiling "
290                  << ArtMethod::PrettyMethod(method)
291                  << ": "
292                  << exception->Dump();
293     }
294   }
295   return success;
296 }
297 
WaitForWorkersToBeCreated()298 void Jit::WaitForWorkersToBeCreated() {
299   if (thread_pool_ != nullptr) {
300     thread_pool_->WaitForWorkersToBeCreated();
301   }
302 }
303 
DeleteThreadPool()304 void Jit::DeleteThreadPool() {
305   Thread* self = Thread::Current();
306   DCHECK(Runtime::Current()->IsShuttingDown(self));
307   if (thread_pool_ != nullptr) {
308     std::unique_ptr<ThreadPool> pool;
309     {
310       ScopedSuspendAll ssa(__FUNCTION__);
311       // Clear thread_pool_ field while the threads are suspended.
312       // A mutator in the 'AddSamples' method will check against it.
313       pool = std::move(thread_pool_);
314     }
315 
316     // When running sanitized, let all tasks finish to not leak. Otherwise just clear the queue.
317     if (!kRunningOnMemoryTool) {
318       pool->StopWorkers(self);
319       pool->RemoveAllTasks(self);
320     }
321     // We could just suspend all threads, but we know those threads
322     // will finish in a short period, so it's not worth adding a suspend logic
323     // here. Besides, this is only done for shutdown.
324     pool->Wait(self, false, false);
325   }
326 }
327 
StartProfileSaver(const std::string & filename,const std::vector<std::string> & code_paths)328 void Jit::StartProfileSaver(const std::string& filename,
329                             const std::vector<std::string>& code_paths) {
330   if (options_->GetSaveProfilingInfo()) {
331     ProfileSaver::Start(options_->GetProfileSaverOptions(), filename, code_cache_, code_paths);
332   }
333 }
334 
StopProfileSaver()335 void Jit::StopProfileSaver() {
336   if (options_->GetSaveProfilingInfo() && ProfileSaver::IsStarted()) {
337     ProfileSaver::Stop(options_->DumpJitInfoOnShutdown());
338   }
339 }
340 
JitAtFirstUse()341 bool Jit::JitAtFirstUse() {
342   return HotMethodThreshold() == 0;
343 }
344 
CanInvokeCompiledCode(ArtMethod * method)345 bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
346   return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
347 }
348 
~Jit()349 Jit::~Jit() {
350   DCHECK(!options_->GetSaveProfilingInfo() || !ProfileSaver::IsStarted());
351   if (options_->DumpJitInfoOnShutdown()) {
352     DumpInfo(LOG_STREAM(INFO));
353     Runtime::Current()->DumpDeoptimizations(LOG_STREAM(INFO));
354   }
355   DeleteThreadPool();
356   if (jit_compiler_handle_ != nullptr) {
357     jit_unload_(jit_compiler_handle_);
358     jit_compiler_handle_ = nullptr;
359   }
360   if (jit_library_handle_ != nullptr) {
361     dlclose(jit_library_handle_);
362     jit_library_handle_ = nullptr;
363   }
364 }
365 
NewTypeLoadedIfUsingJit(mirror::Class * type)366 void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
367   if (!Runtime::Current()->UseJitCompilation()) {
368     // No need to notify if we only use the JIT to save profiles.
369     return;
370   }
371   jit::Jit* jit = Runtime::Current()->GetJit();
372   if (jit_generate_debug_info_(jit->jit_compiler_handle_)) {
373     DCHECK(jit->jit_types_loaded_ != nullptr);
374     jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1);
375   }
376 }
377 
DumpTypeInfoForLoadedTypes(ClassLinker * linker)378 void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
379   struct CollectClasses : public ClassVisitor {
380     bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
381       classes_.push_back(klass.Ptr());
382       return true;
383     }
384     std::vector<mirror::Class*> classes_;
385   };
386 
387   if (jit_generate_debug_info_(jit_compiler_handle_)) {
388     ScopedObjectAccess so(Thread::Current());
389 
390     CollectClasses visitor;
391     linker->VisitClasses(&visitor);
392     jit_types_loaded_(jit_compiler_handle_, visitor.classes_.data(), visitor.classes_.size());
393   }
394 }
395 
396 extern "C" void art_quick_osr_stub(void** stack,
397                                    size_t stack_size_in_bytes,
398                                    const uint8_t* native_pc,
399                                    JValue* result,
400                                    const char* shorty,
401                                    Thread* self);
402 
MaybeDoOnStackReplacement(Thread * thread,ArtMethod * method,uint32_t dex_pc,int32_t dex_pc_offset,JValue * result)403 bool Jit::MaybeDoOnStackReplacement(Thread* thread,
404                                     ArtMethod* method,
405                                     uint32_t dex_pc,
406                                     int32_t dex_pc_offset,
407                                     JValue* result) {
408   if (!kEnableOnStackReplacement) {
409     return false;
410   }
411 
412   Jit* jit = Runtime::Current()->GetJit();
413   if (jit == nullptr) {
414     return false;
415   }
416 
417   if (UNLIKELY(__builtin_frame_address(0) < thread->GetStackEnd())) {
418     // Don't attempt to do an OSR if we are close to the stack limit. Since
419     // the interpreter frames are still on stack, OSR has the potential
420     // to stack overflow even for a simple loop.
421     // b/27094810.
422     return false;
423   }
424 
425   // Get the actual Java method if this method is from a proxy class. The compiler
426   // and the JIT code cache do not expect methods from proxy classes.
427   method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
428 
429   // Cheap check if the method has been compiled already. That's an indicator that we should
430   // osr into it.
431   if (!jit->GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
432     return false;
433   }
434 
435   // Fetch some data before looking up for an OSR method. We don't want thread
436   // suspension once we hold an OSR method, as the JIT code cache could delete the OSR
437   // method while we are being suspended.
438   CodeItemDataAccessor accessor(method->DexInstructionData());
439   const size_t number_of_vregs = accessor.RegistersSize();
440   const char* shorty = method->GetShorty();
441   std::string method_name(VLOG_IS_ON(jit) ? method->PrettyMethod() : "");
442   void** memory = nullptr;
443   size_t frame_size = 0;
444   ShadowFrame* shadow_frame = nullptr;
445   const uint8_t* native_pc = nullptr;
446 
447   {
448     ScopedAssertNoThreadSuspension sts("Holding OSR method");
449     const OatQuickMethodHeader* osr_method = jit->GetCodeCache()->LookupOsrMethodHeader(method);
450     if (osr_method == nullptr) {
451       // No osr method yet, just return to the interpreter.
452       return false;
453     }
454 
455     CodeInfo code_info(osr_method);
456 
457     // Find stack map starting at the target dex_pc.
458     StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset);
459     if (!stack_map.IsValid()) {
460       // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
461       // hope that the next branch has one.
462       return false;
463     }
464 
465     // Before allowing the jump, make sure no code is actively inspecting the method to avoid
466     // jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively
467     // disable OSR when single stepping, but that's currently hard to know at this point.
468     if (Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method)) {
469       return false;
470     }
471 
472     // We found a stack map, now fill the frame with dex register values from the interpreter's
473     // shadow frame.
474     DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map);
475 
476     frame_size = osr_method->GetFrameSizeInBytes();
477 
478     // Allocate memory to put shadow frame values. The osr stub will copy that memory to
479     // stack.
480     // Note that we could pass the shadow frame to the stub, and let it copy the values there,
481     // but that is engineering complexity not worth the effort for something like OSR.
482     memory = reinterpret_cast<void**>(malloc(frame_size));
483     CHECK(memory != nullptr);
484     memset(memory, 0, frame_size);
485 
486     // Art ABI: ArtMethod is at the bottom of the stack.
487     memory[0] = method;
488 
489     shadow_frame = thread->PopShadowFrame();
490     if (vreg_map.empty()) {
491       // If we don't have a dex register map, then there are no live dex registers at
492       // this dex pc.
493     } else {
494       DCHECK_EQ(vreg_map.size(), number_of_vregs);
495       for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
496         DexRegisterLocation::Kind location = vreg_map[vreg].GetKind();
497         if (location == DexRegisterLocation::Kind::kNone) {
498           // Dex register is dead or uninitialized.
499           continue;
500         }
501 
502         if (location == DexRegisterLocation::Kind::kConstant) {
503           // We skip constants because the compiled code knows how to handle them.
504           continue;
505         }
506 
507         DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack);
508 
509         int32_t vreg_value = shadow_frame->GetVReg(vreg);
510         int32_t slot_offset = vreg_map[vreg].GetStackOffsetInBytes();
511         DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
512         DCHECK_GT(slot_offset, 0);
513         (reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value;
514       }
515     }
516 
517     native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
518         osr_method->GetEntryPoint();
519     VLOG(jit) << "Jumping to "
520               << method_name
521               << "@"
522               << std::hex << reinterpret_cast<uintptr_t>(native_pc);
523   }
524 
525   {
526     ManagedStack fragment;
527     thread->PushManagedStackFragment(&fragment);
528     (*art_quick_osr_stub)(memory,
529                           frame_size,
530                           native_pc,
531                           result,
532                           shorty,
533                           thread);
534 
535     if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) {
536       thread->DeoptimizeWithDeoptimizationException(result);
537     }
538     thread->PopManagedStackFragment(fragment);
539   }
540   free(memory);
541   thread->PushShadowFrame(shadow_frame);
542   VLOG(jit) << "Done running OSR code for " << method_name;
543   return true;
544 }
545 
AddMemoryUsage(ArtMethod * method,size_t bytes)546 void Jit::AddMemoryUsage(ArtMethod* method, size_t bytes) {
547   if (bytes > 4 * MB) {
548     LOG(INFO) << "Compiler allocated "
549               << PrettySize(bytes)
550               << " to compile "
551               << ArtMethod::PrettyMethod(method);
552   }
553   MutexLock mu(Thread::Current(), lock_);
554   memory_use_.AddValue(bytes);
555 }
556 
557 class JitCompileTask final : public Task {
558  public:
559   enum class TaskKind {
560     kAllocateProfile,
561     kCompile,
562     kCompileBaseline,
563     kCompileOsr,
564   };
565 
JitCompileTask(ArtMethod * method,TaskKind kind)566   JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind), klass_(nullptr) {
567     ScopedObjectAccess soa(Thread::Current());
568     // For a non-bootclasspath class, add a global ref to the class to prevent class unloading
569     // until compilation is done.
570     if (method->GetDeclaringClass()->GetClassLoader() != nullptr) {
571       klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass());
572       CHECK(klass_ != nullptr);
573     }
574   }
575 
~JitCompileTask()576   ~JitCompileTask() {
577     if (klass_ != nullptr) {
578       ScopedObjectAccess soa(Thread::Current());
579       soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
580     }
581   }
582 
Run(Thread * self)583   void Run(Thread* self) override {
584     ScopedObjectAccess soa(self);
585     switch (kind_) {
586       case TaskKind::kCompile:
587       case TaskKind::kCompileBaseline:
588       case TaskKind::kCompileOsr: {
589         Runtime::Current()->GetJit()->CompileMethod(
590             method_,
591             self,
592             /* baseline= */ (kind_ == TaskKind::kCompileBaseline),
593             /* osr= */ (kind_ == TaskKind::kCompileOsr));
594         break;
595       }
596       case TaskKind::kAllocateProfile: {
597         if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
598           VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
599         }
600         break;
601       }
602     }
603     ProfileSaver::NotifyJitActivity();
604   }
605 
Finalize()606   void Finalize() override {
607     delete this;
608   }
609 
610  private:
611   ArtMethod* const method_;
612   const TaskKind kind_;
613   jobject klass_;
614 
615   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
616 };
617 
618 class ZygoteTask final : public Task {
619  public:
ZygoteTask()620   ZygoteTask() {}
621 
Run(Thread * self)622   void Run(Thread* self) override {
623     Runtime* runtime = Runtime::Current();
624     std::string profile_file;
625     for (const std::string& option : runtime->GetImageCompilerOptions()) {
626       if (android::base::StartsWith(option, "--profile-file=")) {
627         profile_file = option.substr(strlen("--profile-file="));
628         break;
629       }
630     }
631 
632     const std::vector<const DexFile*>& boot_class_path =
633         runtime->GetClassLinker()->GetBootClassPath();
634     ScopedNullHandle<mirror::ClassLoader> null_handle;
635     // We add to the queue for zygote so that we can fork processes in-between
636     // compilations.
637     runtime->GetJit()->CompileMethodsFromProfile(
638         self, boot_class_path, profile_file, null_handle, /* add_to_queue= */ true);
639   }
640 
Finalize()641   void Finalize() override {
642     delete this;
643   }
644 
645  private:
646   DISALLOW_COPY_AND_ASSIGN(ZygoteTask);
647 };
648 
GetProfileFile(const std::string & dex_location)649 static std::string GetProfileFile(const std::string& dex_location) {
650   // Hardcoded assumption where the profile file is.
651   // TODO(ngeoffray): this is brittle and we would need to change change if we
652   // wanted to do more eager JITting of methods in a profile. This is
653   // currently only for system server.
654   return dex_location + ".prof";
655 }
656 
657 class JitProfileTask final : public Task {
658  public:
JitProfileTask(const std::vector<std::unique_ptr<const DexFile>> & dex_files,ObjPtr<mirror::ClassLoader> class_loader)659   JitProfileTask(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
660                  ObjPtr<mirror::ClassLoader> class_loader) {
661     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
662     for (const auto& dex_file : dex_files) {
663       dex_files_.push_back(dex_file.get());
664       // Register the dex file so that we can guarantee it doesn't get deleted
665       // while reading it during the task.
666       class_linker->RegisterDexFile(*dex_file.get(), class_loader);
667     }
668     ScopedObjectAccess soa(Thread::Current());
669     class_loader_ = soa.Vm()->AddGlobalRef(soa.Self(), class_loader.Ptr());
670   }
671 
Run(Thread * self)672   void Run(Thread* self) override {
673     ScopedObjectAccess soa(self);
674     StackHandleScope<1> hs(self);
675     Handle<mirror::ClassLoader> loader = hs.NewHandle<mirror::ClassLoader>(
676         soa.Decode<mirror::ClassLoader>(class_loader_));
677     Runtime::Current()->GetJit()->CompileMethodsFromProfile(
678         self,
679         dex_files_,
680         GetProfileFile(dex_files_[0]->GetLocation()),
681         loader,
682         /* add_to_queue= */ false);
683   }
684 
Finalize()685   void Finalize() override {
686     delete this;
687   }
688 
689  private:
690   std::vector<const DexFile*> dex_files_;
691   jobject class_loader_;
692 
693   DISALLOW_COPY_AND_ASSIGN(JitProfileTask);
694 };
695 
CreateThreadPool()696 void Jit::CreateThreadPool() {
697   // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
698   // is not null when we instrument.
699 
700   // We need peers as we may report the JIT thread, e.g., in the debugger.
701   constexpr bool kJitPoolNeedsPeers = true;
702   thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
703 
704   thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
705   Start();
706 
707   // If we're not using the default boot image location, request a JIT task to
708   // compile all methods in the boot image profile.
709   Runtime* runtime = Runtime::Current();
710   if (runtime->IsZygote() && runtime->IsUsingApexBootImageLocation() && UseJitCompilation()) {
711     thread_pool_->AddTask(Thread::Current(), new ZygoteTask());
712   }
713 }
714 
RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>> & dex_files,ObjPtr<mirror::ClassLoader> class_loader)715 void Jit::RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
716                            ObjPtr<mirror::ClassLoader> class_loader) {
717   if (dex_files.empty()) {
718     return;
719   }
720   Runtime* runtime = Runtime::Current();
721   if (runtime->IsSystemServer() && runtime->IsUsingApexBootImageLocation() && UseJitCompilation()) {
722     thread_pool_->AddTask(Thread::Current(), new JitProfileTask(dex_files, class_loader));
723   }
724 }
725 
CompileMethodsFromProfile(Thread * self,const std::vector<const DexFile * > & dex_files,const std::string & profile_file,Handle<mirror::ClassLoader> class_loader,bool add_to_queue)726 void Jit::CompileMethodsFromProfile(
727     Thread* self,
728     const std::vector<const DexFile*>& dex_files,
729     const std::string& profile_file,
730     Handle<mirror::ClassLoader> class_loader,
731     bool add_to_queue) {
732 
733   if (profile_file.empty()) {
734     LOG(WARNING) << "Expected a profile file in JIT zygote mode";
735     return;
736   }
737 
738   std::string error_msg;
739   ScopedFlock profile = LockedFile::Open(
740       profile_file.c_str(), O_RDONLY, /* block= */ false, &error_msg);
741 
742   // Return early if we're unable to obtain a lock on the profile.
743   if (profile.get() == nullptr) {
744     LOG(ERROR) << "Cannot lock profile: " << error_msg;
745     return;
746   }
747 
748   ProfileCompilationInfo profile_info;
749   if (!profile_info.Load(profile->Fd())) {
750     LOG(ERROR) << "Could not load profile file";
751     return;
752   }
753   ScopedObjectAccess soa(self);
754   StackHandleScope<1> hs(self);
755   MutableHandle<mirror::DexCache> dex_cache = hs.NewHandle<mirror::DexCache>(nullptr);
756   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
757   for (const DexFile* dex_file : dex_files) {
758     if (LocationIsOnRuntimeModule(dex_file->GetLocation().c_str())) {
759       // The runtime module jars are already preopted.
760       continue;
761     }
762     // To speed up class lookups, generate a type lookup table for
763     // the dex file.
764     if (dex_file->GetOatDexFile() == nullptr) {
765       TypeLookupTable type_lookup_table = TypeLookupTable::Create(*dex_file);
766       type_lookup_tables_.push_back(
767             std::make_unique<art::OatDexFile>(std::move(type_lookup_table)));
768       dex_file->SetOatDexFile(type_lookup_tables_.back().get());
769     }
770 
771     std::set<dex::TypeIndex> class_types;
772     std::set<uint16_t> all_methods;
773     if (!profile_info.GetClassesAndMethods(*dex_file,
774                                            &class_types,
775                                            &all_methods,
776                                            &all_methods,
777                                            &all_methods)) {
778       // This means the profile file did not reference the dex file, which is the case
779       // if there's no classes and methods of that dex file in the profile.
780       continue;
781     }
782     dex_cache.Assign(class_linker->FindDexCache(self, *dex_file));
783     CHECK(dex_cache != nullptr) << "Could not find dex cache for " << dex_file->GetLocation();
784 
785     for (uint16_t method_idx : all_methods) {
786       ArtMethod* method = class_linker->ResolveMethodWithoutInvokeType(
787           method_idx, dex_cache, class_loader);
788       if (method == nullptr) {
789         self->ClearException();
790         continue;
791       }
792       if (!method->IsCompilable() || !method->IsInvokable()) {
793         continue;
794       }
795       const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
796       if (class_linker->IsQuickToInterpreterBridge(entry_point) ||
797           class_linker->IsQuickGenericJniStub(entry_point) ||
798           class_linker->IsQuickResolutionStub(entry_point)) {
799         if (!method->IsNative()) {
800           // The compiler requires a ProfilingInfo object for non-native methods.
801           ProfilingInfo::Create(self, method, /* retry_allocation= */ true);
802         }
803         // Special case ZygoteServer class so that it gets compiled before the
804         // zygote enters it. This avoids needing to do OSR during app startup.
805         // TODO: have a profile instead.
806         if (!add_to_queue || method->GetDeclaringClass()->DescriptorEquals(
807                 "Lcom/android/internal/os/ZygoteServer;")) {
808           CompileMethod(method, self, /* baseline= */ false, /* osr= */ false);
809         } else {
810           thread_pool_->AddTask(self,
811               new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
812         }
813       }
814     }
815   }
816 }
817 
IgnoreSamplesForMethod(ArtMethod * method)818 static bool IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
819   if (method->IsClassInitializer() || !method->IsCompilable()) {
820     // We do not want to compile such methods.
821     return true;
822   }
823   if (method->IsNative()) {
824     ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
825     if (klass == GetClassRoot<mirror::MethodHandle>() ||
826         klass == GetClassRoot<mirror::VarHandle>()) {
827       // MethodHandle and VarHandle invocation methods are required to throw an
828       // UnsupportedOperationException if invoked reflectively. We achieve this by having native
829       // implementations that arise the exception. We need to disable JIT compilation of these JNI
830       // methods as it can lead to transitioning between JIT compiled JNI stubs and generic JNI
831       // stubs. Since these stubs have different stack representations we can then crash in stack
832       // walking (b/78151261).
833       return true;
834     }
835   }
836   return false;
837 }
838 
MaybeCompileMethod(Thread * self,ArtMethod * method,uint32_t old_count,uint32_t new_count,bool with_backedges)839 bool Jit::MaybeCompileMethod(Thread* self,
840                              ArtMethod* method,
841                              uint32_t old_count,
842                              uint32_t new_count,
843                              bool with_backedges) {
844   if (thread_pool_ == nullptr) {
845     // Should only see this when shutting down, starting up, or in safe mode.
846     DCHECK(Runtime::Current()->IsShuttingDown(self) ||
847            !Runtime::Current()->IsFinishedStarting() ||
848            Runtime::Current()->IsSafeMode());
849     return false;
850   }
851   if (IgnoreSamplesForMethod(method)) {
852     return false;
853   }
854   if (HotMethodThreshold() == 0) {
855     // Tests might request JIT on first use (compiled synchronously in the interpreter).
856     return false;
857   }
858   DCHECK(thread_pool_ != nullptr);
859   DCHECK_GT(WarmMethodThreshold(), 0);
860   DCHECK_GT(HotMethodThreshold(), WarmMethodThreshold());
861   DCHECK_GT(OSRMethodThreshold(), HotMethodThreshold());
862   DCHECK_GE(PriorityThreadWeight(), 1);
863   DCHECK_LE(PriorityThreadWeight(), HotMethodThreshold());
864 
865   if (old_count < WarmMethodThreshold() && new_count >= WarmMethodThreshold()) {
866     // Note: Native method have no "warm" state or profiling info.
867     if (!method->IsNative() && method->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
868       bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
869       if (success) {
870         VLOG(jit) << "Start profiling " << method->PrettyMethod();
871       }
872 
873       if (thread_pool_ == nullptr) {
874         // Calling ProfilingInfo::Create might put us in a suspended state, which could
875         // lead to the thread pool being deleted when we are shutting down.
876         DCHECK(Runtime::Current()->IsShuttingDown(self));
877         return false;
878       }
879 
880       if (!success) {
881         // We failed allocating. Instead of doing the collection on the Java thread, we push
882         // an allocation to a compiler thread, that will do the collection.
883         thread_pool_->AddTask(
884             self, new JitCompileTask(method, JitCompileTask::TaskKind::kAllocateProfile));
885       }
886     }
887   }
888   if (UseJitCompilation()) {
889     if (old_count == 0 &&
890         method->IsNative() &&
891         Runtime::Current()->IsUsingApexBootImageLocation()) {
892       // jitzygote: Compile JNI stub on first use to avoid the expensive generic stub.
893       CompileMethod(method, self, /* baseline= */ false, /* osr= */ false);
894       return true;
895     }
896     if (old_count < HotMethodThreshold() && new_count >= HotMethodThreshold()) {
897       if (!code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
898         DCHECK(thread_pool_ != nullptr);
899         thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
900       }
901     }
902     if (old_count < OSRMethodThreshold() && new_count >= OSRMethodThreshold()) {
903       if (!with_backedges) {
904         return false;
905       }
906       DCHECK(!method->IsNative());  // No back edges reported for native methods.
907       if (!code_cache_->IsOsrCompiled(method)) {
908         DCHECK(thread_pool_ != nullptr);
909         thread_pool_->AddTask(
910             self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr));
911       }
912     }
913   }
914   return true;
915 }
916 
917 class ScopedSetRuntimeThread {
918  public:
ScopedSetRuntimeThread(Thread * self)919   explicit ScopedSetRuntimeThread(Thread* self)
920       : self_(self), was_runtime_thread_(self_->IsRuntimeThread()) {
921     self_->SetIsRuntimeThread(true);
922   }
923 
~ScopedSetRuntimeThread()924   ~ScopedSetRuntimeThread() {
925     self_->SetIsRuntimeThread(was_runtime_thread_);
926   }
927 
928  private:
929   Thread* self_;
930   bool was_runtime_thread_;
931 };
932 
MethodEntered(Thread * thread,ArtMethod * method)933 void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
934   Runtime* runtime = Runtime::Current();
935   if (UNLIKELY(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse())) {
936     ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
937     if (np_method->IsCompilable()) {
938       if (!np_method->IsNative()) {
939         // The compiler requires a ProfilingInfo object for non-native methods.
940         ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
941       }
942       JitCompileTask compile_task(method, JitCompileTask::TaskKind::kCompile);
943       // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
944       ScopedSetRuntimeThread ssrt(thread);
945       compile_task.Run(thread);
946     }
947     return;
948   }
949 
950   ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
951   // Update the entrypoint if the ProfilingInfo has one. The interpreter will call it
952   // instead of interpreting the method. We don't update it for instrumentation as the entrypoint
953   // must remain the instrumentation entrypoint.
954   if ((profiling_info != nullptr) &&
955       (profiling_info->GetSavedEntryPoint() != nullptr) &&
956       (method->GetEntryPointFromQuickCompiledCode() != GetQuickInstrumentationEntryPoint())) {
957     Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
958         method, profiling_info->GetSavedEntryPoint());
959   } else {
960     AddSamples(thread, method, 1, /* with_backedges= */false);
961   }
962 }
963 
InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object,ArtMethod * caller,uint32_t dex_pc,ArtMethod * callee ATTRIBUTE_UNUSED)964 void Jit::InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object,
965                                    ArtMethod* caller,
966                                    uint32_t dex_pc,
967                                    ArtMethod* callee ATTRIBUTE_UNUSED) {
968   ScopedAssertNoThreadSuspension ants(__FUNCTION__);
969   DCHECK(this_object != nullptr);
970   ProfilingInfo* info = caller->GetProfilingInfo(kRuntimePointerSize);
971   if (info != nullptr) {
972     info->AddInvokeInfo(dex_pc, this_object->GetClass());
973   }
974 }
975 
WaitForCompilationToFinish(Thread * self)976 void Jit::WaitForCompilationToFinish(Thread* self) {
977   if (thread_pool_ != nullptr) {
978     thread_pool_->Wait(self, false, false);
979   }
980 }
981 
Stop()982 void Jit::Stop() {
983   Thread* self = Thread::Current();
984   // TODO(ngeoffray): change API to not require calling WaitForCompilationToFinish twice.
985   WaitForCompilationToFinish(self);
986   GetThreadPool()->StopWorkers(self);
987   WaitForCompilationToFinish(self);
988 }
989 
Start()990 void Jit::Start() {
991   GetThreadPool()->StartWorkers(Thread::Current());
992 }
993 
ScopedJitSuspend()994 ScopedJitSuspend::ScopedJitSuspend() {
995   jit::Jit* jit = Runtime::Current()->GetJit();
996   was_on_ = (jit != nullptr) && (jit->GetThreadPool() != nullptr);
997   if (was_on_) {
998     jit->Stop();
999   }
1000 }
1001 
~ScopedJitSuspend()1002 ScopedJitSuspend::~ScopedJitSuspend() {
1003   if (was_on_) {
1004     DCHECK(Runtime::Current()->GetJit() != nullptr);
1005     DCHECK(Runtime::Current()->GetJit()->GetThreadPool() != nullptr);
1006     Runtime::Current()->GetJit()->Start();
1007   }
1008 }
1009 
PostForkChildAction(bool is_system_server,bool is_zygote)1010 void Jit::PostForkChildAction(bool is_system_server, bool is_zygote) {
1011   if (is_zygote) {
1012     // Remove potential tasks that have been inherited from the zygote. Child zygotes
1013     // currently don't need the whole boot image compiled (ie webview_zygote).
1014     thread_pool_->RemoveAllTasks(Thread::Current());
1015     // Don't transition if this is for a child zygote.
1016     return;
1017   }
1018   if (Runtime::Current()->IsSafeMode()) {
1019     // Delete the thread pool, we are not going to JIT.
1020     thread_pool_.reset(nullptr);
1021     return;
1022   }
1023   // At this point, the compiler options have been adjusted to the particular configuration
1024   // of the forked child. Parse them again.
1025   jit_update_options_(jit_compiler_handle_);
1026 
1027   // Adjust the status of code cache collection: the status from zygote was to not collect.
1028   code_cache_->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
1029       !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
1030 
1031   if (thread_pool_ != nullptr) {
1032     if (!is_system_server) {
1033       // Remove potential tasks that have been inherited from the zygote.
1034       // We keep the queue for system server, as not having those methods compiled
1035       // impacts app startup.
1036       thread_pool_->RemoveAllTasks(Thread::Current());
1037     } else if (Runtime::Current()->IsUsingApexBootImageLocation() && UseJitCompilation()) {
1038       // Disable garbage collection: we don't want it to delete methods we're compiling
1039       // through boot and system server profiles.
1040       // TODO(ngeoffray): Fix this so we still collect deoptimized and unused code.
1041       code_cache_->SetGarbageCollectCode(false);
1042     }
1043 
1044     // Resume JIT compilation.
1045     thread_pool_->CreateThreads();
1046   }
1047 }
1048 
PreZygoteFork()1049 void Jit::PreZygoteFork() {
1050   if (thread_pool_ == nullptr) {
1051     return;
1052   }
1053   thread_pool_->DeleteThreads();
1054 }
1055 
PostZygoteFork()1056 void Jit::PostZygoteFork() {
1057   if (thread_pool_ == nullptr) {
1058     return;
1059   }
1060   thread_pool_->CreateThreads();
1061 }
1062 
1063 }  // namespace jit
1064 }  // namespace art
1065