1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jit.h"
18 
19 #include <dlfcn.h>
20 
21 #include "art_method-inl.h"
22 #include "base/enums.h"
23 #include "base/file_utils.h"
24 #include "base/logging.h"  // For VLOG.
25 #include "base/memfd.h"
26 #include "base/memory_tool.h"
27 #include "base/runtime_debug.h"
28 #include "base/scoped_flock.h"
29 #include "base/utils.h"
30 #include "class_root.h"
31 #include "debugger.h"
32 #include "dex/type_lookup_table.h"
33 #include "gc/space/image_space.h"
34 #include "entrypoints/entrypoint_utils-inl.h"
35 #include "entrypoints/runtime_asm_entrypoints.h"
36 #include "image-inl.h"
37 #include "interpreter/interpreter.h"
38 #include "jit-inl.h"
39 #include "jit_code_cache.h"
40 #include "jni/java_vm_ext.h"
41 #include "mirror/method_handle_impl.h"
42 #include "mirror/var_handle.h"
43 #include "oat_file.h"
44 #include "oat_file_manager.h"
45 #include "oat_quick_method_header.h"
46 #include "profile/profile_boot_info.h"
47 #include "profile/profile_compilation_info.h"
48 #include "profile_saver.h"
49 #include "runtime.h"
50 #include "runtime_options.h"
51 #include "stack.h"
52 #include "stack_map.h"
53 #include "thread-inl.h"
54 #include "thread_list.h"
55 
56 using android::base::unique_fd;
57 
58 namespace art {
59 namespace jit {
60 
61 static constexpr bool kEnableOnStackReplacement = true;
62 
63 // Maximum permitted threshold value.
64 static constexpr uint32_t kJitMaxThreshold = std::numeric_limits<uint16_t>::max();
65 
66 // Different compilation threshold constants. These can be overridden on the command line.
67 
68 // Non-debug default
69 static constexpr uint32_t kJitDefaultCompileThreshold = 20 * kJitSamplesBatchSize;
70 // Fast-debug build.
71 static constexpr uint32_t kJitStressDefaultCompileThreshold = 2 * kJitSamplesBatchSize;
72 // Slow-debug build.
73 static constexpr uint32_t kJitSlowStressDefaultCompileThreshold = 2;
74 
75 // Different warm-up threshold constants. These default to the equivalent compile thresholds divided
76 // by 2, but can be overridden at the command-line.
77 static constexpr uint32_t kJitDefaultWarmUpThreshold = kJitDefaultCompileThreshold / 2;
78 static constexpr uint32_t kJitStressDefaultWarmUpThreshold = kJitStressDefaultCompileThreshold / 2;
79 static constexpr uint32_t kJitSlowStressDefaultWarmUpThreshold =
80     kJitSlowStressDefaultCompileThreshold / 2;
81 
82 DEFINE_RUNTIME_DEBUG_FLAG(Jit, kSlowMode);
83 
84 // JIT compiler
85 void* Jit::jit_library_handle_ = nullptr;
86 JitCompilerInterface* Jit::jit_compiler_ = nullptr;
87 JitCompilerInterface* (*Jit::jit_load_)(void) = nullptr;
88 
CreateFromRuntimeArguments(const RuntimeArgumentMap & options)89 JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
90   auto* jit_options = new JitOptions;
91   jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
92   jit_options->use_tiered_jit_compilation_ =
93       options.GetOrDefault(RuntimeArgumentMap::UseTieredJitCompilation);
94 
95   jit_options->code_cache_initial_capacity_ =
96       options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity);
97   jit_options->code_cache_max_capacity_ =
98       options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity);
99   jit_options->dump_info_on_shutdown_ =
100       options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
101   jit_options->profile_saver_options_ =
102       options.GetOrDefault(RuntimeArgumentMap::ProfileSaverOpts);
103   jit_options->thread_pool_pthread_priority_ =
104       options.GetOrDefault(RuntimeArgumentMap::JITPoolThreadPthreadPriority);
105 
106   // Set default compile threshold to aide with sanity checking defaults.
107   jit_options->compile_threshold_ =
108       kIsDebugBuild
109       ? (Jit::kSlowMode
110          ? kJitSlowStressDefaultCompileThreshold
111          : kJitStressDefaultCompileThreshold)
112       : kJitDefaultCompileThreshold;
113 
114   // When not running in slow-mode, thresholds are quantized to kJitSamplesbatchsize.
115   const uint32_t kJitThresholdStep = Jit::kSlowMode ? 1u : kJitSamplesBatchSize;
116 
117   // Set default warm-up threshold to aide with sanity checking defaults.
118   jit_options->warmup_threshold_ =
119       kIsDebugBuild ? (Jit::kSlowMode
120                        ? kJitSlowStressDefaultWarmUpThreshold
121                        : kJitStressDefaultWarmUpThreshold)
122       : kJitDefaultWarmUpThreshold;
123 
124   // Warmup threshold should be less than compile threshold (so long as compile threshold is not
125   // zero == JIT-on-first-use).
126   DCHECK_LT(jit_options->warmup_threshold_, jit_options->compile_threshold_);
127   DCHECK_EQ(RoundUp(jit_options->warmup_threshold_, kJitThresholdStep),
128             jit_options->warmup_threshold_);
129 
130   if (options.Exists(RuntimeArgumentMap::JITCompileThreshold)) {
131     jit_options->compile_threshold_ = *options.Get(RuntimeArgumentMap::JITCompileThreshold);
132   }
133   jit_options->compile_threshold_ = RoundUp(jit_options->compile_threshold_, kJitThresholdStep);
134 
135   if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) {
136     jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold);
137   }
138   jit_options->warmup_threshold_ = RoundUp(jit_options->warmup_threshold_, kJitThresholdStep);
139 
140   if (options.Exists(RuntimeArgumentMap::JITOsrThreshold)) {
141     jit_options->osr_threshold_ = *options.Get(RuntimeArgumentMap::JITOsrThreshold);
142   } else {
143     jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2;
144     if (jit_options->osr_threshold_ > kJitMaxThreshold) {
145       jit_options->osr_threshold_ =
146           RoundDown(kJitMaxThreshold, kJitThresholdStep);
147     }
148   }
149   jit_options->osr_threshold_ = RoundUp(jit_options->osr_threshold_, kJitThresholdStep);
150 
151   // Enforce ordering constraints between thresholds if not jit-on-first-use (when the compile
152   // threshold is 0).
153   if (jit_options->compile_threshold_ != 0) {
154     // Clamp thresholds such that OSR > compile > warm-up (see Jit::MaybeCompileMethod).
155     jit_options->osr_threshold_ = std::clamp(jit_options->osr_threshold_,
156                                              2u * kJitThresholdStep,
157                                              RoundDown(kJitMaxThreshold, kJitThresholdStep));
158     jit_options->compile_threshold_ = std::clamp(jit_options->compile_threshold_,
159                                                  kJitThresholdStep,
160                                                  jit_options->osr_threshold_ - kJitThresholdStep);
161     jit_options->warmup_threshold_ =
162         std::clamp(jit_options->warmup_threshold_,
163                    0u,
164                    jit_options->compile_threshold_ - kJitThresholdStep);
165   }
166 
167   if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) {
168     jit_options->priority_thread_weight_ =
169         *options.Get(RuntimeArgumentMap::JITPriorityThreadWeight);
170     if (jit_options->priority_thread_weight_ > jit_options->warmup_threshold_) {
171       LOG(FATAL) << "Priority thread weight is above the warmup threshold.";
172     } else if (jit_options->priority_thread_weight_ == 0) {
173       LOG(FATAL) << "Priority thread weight cannot be 0.";
174     }
175   } else {
176     jit_options->priority_thread_weight_ = std::max(
177         jit_options->warmup_threshold_ / Jit::kDefaultPriorityThreadWeightRatio,
178         static_cast<size_t>(1));
179   }
180 
181   if (options.Exists(RuntimeArgumentMap::JITInvokeTransitionWeight)) {
182     jit_options->invoke_transition_weight_ =
183         *options.Get(RuntimeArgumentMap::JITInvokeTransitionWeight);
184     if (jit_options->invoke_transition_weight_ > jit_options->warmup_threshold_) {
185       LOG(FATAL) << "Invoke transition weight is above the warmup threshold.";
186     } else if (jit_options->invoke_transition_weight_  == 0) {
187       LOG(FATAL) << "Invoke transition weight cannot be 0.";
188     }
189   } else {
190     jit_options->invoke_transition_weight_ = std::max(
191         jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio,
192         static_cast<size_t>(1));
193   }
194 
195   return jit_options;
196 }
197 
DumpInfo(std::ostream & os)198 void Jit::DumpInfo(std::ostream& os) {
199   code_cache_->Dump(os);
200   cumulative_timings_.Dump(os);
201   MutexLock mu(Thread::Current(), lock_);
202   memory_use_.PrintMemoryUse(os);
203 }
204 
DumpForSigQuit(std::ostream & os)205 void Jit::DumpForSigQuit(std::ostream& os) {
206   DumpInfo(os);
207   ProfileSaver::DumpInstanceInfo(os);
208 }
209 
AddTimingLogger(const TimingLogger & logger)210 void Jit::AddTimingLogger(const TimingLogger& logger) {
211   cumulative_timings_.AddLogger(logger);
212 }
213 
Jit(JitCodeCache * code_cache,JitOptions * options)214 Jit::Jit(JitCodeCache* code_cache, JitOptions* options)
215     : code_cache_(code_cache),
216       options_(options),
217       boot_completed_lock_("Jit::boot_completed_lock_"),
218       cumulative_timings_("JIT timings"),
219       memory_use_("Memory used for compilation", 16),
220       lock_("JIT memory use lock"),
221       zygote_mapping_methods_(),
222       fd_methods_(-1),
223       fd_methods_size_(0) {}
224 
Create(JitCodeCache * code_cache,JitOptions * options)225 Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
226   if (jit_load_ == nullptr) {
227     LOG(WARNING) << "Not creating JIT: library not loaded";
228     return nullptr;
229   }
230   jit_compiler_ = (jit_load_)();
231   if (jit_compiler_ == nullptr) {
232     LOG(WARNING) << "Not creating JIT: failed to allocate a compiler";
233     return nullptr;
234   }
235   std::unique_ptr<Jit> jit(new Jit(code_cache, options));
236 
237   // If the code collector is enabled, check if that still holds:
238   // With 'perf', we want a 1-1 mapping between an address and a method.
239   // We aren't able to keep method pointers live during the instrumentation method entry trampoline
240   // so we will just disable jit-gc if we are doing that.
241   if (code_cache->GetGarbageCollectCode()) {
242     code_cache->SetGarbageCollectCode(!jit_compiler_->GenerateDebugInfo() &&
243         !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
244   }
245 
246   VLOG(jit) << "JIT created with initial_capacity="
247       << PrettySize(options->GetCodeCacheInitialCapacity())
248       << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
249       << ", compile_threshold=" << options->GetCompileThreshold()
250       << ", profile_saver_options=" << options->GetProfileSaverOptions();
251 
252   // We want to know whether the compiler is compiling baseline, as this
253   // affects how we GC ProfilingInfos.
254   for (const std::string& option : Runtime::Current()->GetCompilerOptions()) {
255     if (option == "--baseline") {
256       options->SetUseBaselineCompiler();
257       break;
258     }
259   }
260 
261   // Notify native debugger about the classes already loaded before the creation of the jit.
262   jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
263   return jit.release();
264 }
265 
266 template <typename T>
LoadSymbol(T * address,const char * name,std::string * error_msg)267 bool Jit::LoadSymbol(T* address, const char* name, std::string* error_msg) {
268   *address = reinterpret_cast<T>(dlsym(jit_library_handle_, name));
269   if (*address == nullptr) {
270     *error_msg = std::string("JIT couldn't find ") + name + std::string(" entry point");
271     return false;
272   }
273   return true;
274 }
275 
LoadCompilerLibrary(std::string * error_msg)276 bool Jit::LoadCompilerLibrary(std::string* error_msg) {
277   jit_library_handle_ = dlopen(
278       kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW);
279   if (jit_library_handle_ == nullptr) {
280     std::ostringstream oss;
281     oss << "JIT could not load libart-compiler.so: " << dlerror();
282     *error_msg = oss.str();
283     return false;
284   }
285   if (!LoadSymbol(&jit_load_, "jit_load", error_msg)) {
286     dlclose(jit_library_handle_);
287     return false;
288   }
289   return true;
290 }
291 
CompileMethod(ArtMethod * method,Thread * self,bool baseline,bool osr,bool prejit)292 bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr, bool prejit) {
293   DCHECK(Runtime::Current()->UseJitCompilation());
294   DCHECK(!method->IsRuntimeMethod());
295 
296   RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
297   // Don't compile the method if it has breakpoints.
298   if (cb->IsMethodBeingInspected(method) && !cb->IsMethodSafeToJit(method)) {
299     VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
300               << " due to not being safe to jit according to runtime-callbacks. For example, there"
301               << " could be breakpoints in this method.";
302     return false;
303   }
304 
305   if (!method->IsCompilable()) {
306     DCHECK(method->GetDeclaringClass()->IsObsoleteObject() ||
307            method->IsProxyMethod()) << method->PrettyMethod();
308     VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to method being made "
309               << "obsolete while waiting for JIT task to run. This probably happened due to "
310               << "concurrent structural class redefinition.";
311     return false;
312   }
313 
314   // Don't compile the method if we are supposed to be deoptimized.
315   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
316   if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) {
317     VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to deoptimization";
318     return false;
319   }
320 
321   JitMemoryRegion* region = GetCodeCache()->GetCurrentRegion();
322   if (osr && GetCodeCache()->IsSharedRegion(*region)) {
323     VLOG(jit) << "JIT not osr compiling "
324               << method->PrettyMethod()
325               << " due to using shared region";
326     return false;
327   }
328 
329   // If we get a request to compile a proxy method, we pass the actual Java method
330   // of that proxy method, as the compiler does not expect a proxy method.
331   ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
332   if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr, prejit, baseline, region)) {
333     return false;
334   }
335 
336   VLOG(jit) << "Compiling method "
337             << ArtMethod::PrettyMethod(method_to_compile)
338             << " osr=" << std::boolalpha << osr
339             << " baseline=" << std::boolalpha << baseline;
340   bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, baseline, osr);
341   code_cache_->DoneCompiling(method_to_compile, self, osr);
342   if (!success) {
343     VLOG(jit) << "Failed to compile method "
344               << ArtMethod::PrettyMethod(method_to_compile)
345               << " osr=" << std::boolalpha << osr;
346   }
347   if (kIsDebugBuild) {
348     if (self->IsExceptionPending()) {
349       mirror::Throwable* exception = self->GetException();
350       LOG(FATAL) << "No pending exception expected after compiling "
351                  << ArtMethod::PrettyMethod(method)
352                  << ": "
353                  << exception->Dump();
354     }
355   }
356   return success;
357 }
358 
WaitForWorkersToBeCreated()359 void Jit::WaitForWorkersToBeCreated() {
360   if (thread_pool_ != nullptr) {
361     thread_pool_->WaitForWorkersToBeCreated();
362   }
363 }
364 
DeleteThreadPool()365 void Jit::DeleteThreadPool() {
366   Thread* self = Thread::Current();
367   if (thread_pool_ != nullptr) {
368     std::unique_ptr<ThreadPool> pool;
369     {
370       ScopedSuspendAll ssa(__FUNCTION__);
371       // Clear thread_pool_ field while the threads are suspended.
372       // A mutator in the 'AddSamples' method will check against it.
373       pool = std::move(thread_pool_);
374     }
375 
376     // When running sanitized, let all tasks finish to not leak. Otherwise just clear the queue.
377     if (!kRunningOnMemoryTool) {
378       pool->StopWorkers(self);
379       pool->RemoveAllTasks(self);
380     }
381     // We could just suspend all threads, but we know those threads
382     // will finish in a short period, so it's not worth adding a suspend logic
383     // here. Besides, this is only done for shutdown.
384     pool->Wait(self, false, false);
385   }
386 }
387 
StartProfileSaver(const std::string & filename,const std::vector<std::string> & code_paths)388 void Jit::StartProfileSaver(const std::string& filename,
389                             const std::vector<std::string>& code_paths) {
390   if (options_->GetSaveProfilingInfo()) {
391     ProfileSaver::Start(options_->GetProfileSaverOptions(), filename, code_cache_, code_paths);
392   }
393 }
394 
StopProfileSaver()395 void Jit::StopProfileSaver() {
396   if (options_->GetSaveProfilingInfo() && ProfileSaver::IsStarted()) {
397     ProfileSaver::Stop(options_->DumpJitInfoOnShutdown());
398   }
399 }
400 
JitAtFirstUse()401 bool Jit::JitAtFirstUse() {
402   return HotMethodThreshold() == 0;
403 }
404 
CanInvokeCompiledCode(ArtMethod * method)405 bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
406   return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
407 }
408 
~Jit()409 Jit::~Jit() {
410   DCHECK(!options_->GetSaveProfilingInfo() || !ProfileSaver::IsStarted());
411   if (options_->DumpJitInfoOnShutdown()) {
412     DumpInfo(LOG_STREAM(INFO));
413     Runtime::Current()->DumpDeoptimizations(LOG_STREAM(INFO));
414   }
415   DeleteThreadPool();
416   if (jit_compiler_ != nullptr) {
417     delete jit_compiler_;
418     jit_compiler_ = nullptr;
419   }
420   if (jit_library_handle_ != nullptr) {
421     dlclose(jit_library_handle_);
422     jit_library_handle_ = nullptr;
423   }
424 }
425 
NewTypeLoadedIfUsingJit(mirror::Class * type)426 void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
427   if (!Runtime::Current()->UseJitCompilation()) {
428     // No need to notify if we only use the JIT to save profiles.
429     return;
430   }
431   jit::Jit* jit = Runtime::Current()->GetJit();
432   if (jit->jit_compiler_->GenerateDebugInfo()) {
433     jit_compiler_->TypesLoaded(&type, 1);
434   }
435 }
436 
DumpTypeInfoForLoadedTypes(ClassLinker * linker)437 void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
438   struct CollectClasses : public ClassVisitor {
439     bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
440       classes_.push_back(klass.Ptr());
441       return true;
442     }
443     std::vector<mirror::Class*> classes_;
444   };
445 
446   if (jit_compiler_->GenerateDebugInfo()) {
447     ScopedObjectAccess so(Thread::Current());
448 
449     CollectClasses visitor;
450     linker->VisitClasses(&visitor);
451     jit_compiler_->TypesLoaded(visitor.classes_.data(), visitor.classes_.size());
452   }
453 }
454 
455 extern "C" void art_quick_osr_stub(void** stack,
456                                    size_t stack_size_in_bytes,
457                                    const uint8_t* native_pc,
458                                    JValue* result,
459                                    const char* shorty,
460                                    Thread* self);
461 
PrepareForOsr(ArtMethod * method,uint32_t dex_pc,uint32_t * vregs)462 OsrData* Jit::PrepareForOsr(ArtMethod* method, uint32_t dex_pc, uint32_t* vregs) {
463   if (!kEnableOnStackReplacement) {
464     return nullptr;
465   }
466 
467   // Cheap check if the method has been compiled already. That's an indicator that we should
468   // osr into it.
469   if (!GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
470     return nullptr;
471   }
472 
473   // Fetch some data before looking up for an OSR method. We don't want thread
474   // suspension once we hold an OSR method, as the JIT code cache could delete the OSR
475   // method while we are being suspended.
476   CodeItemDataAccessor accessor(method->DexInstructionData());
477   const size_t number_of_vregs = accessor.RegistersSize();
478   std::string method_name(VLOG_IS_ON(jit) ? method->PrettyMethod() : "");
479   OsrData* osr_data = nullptr;
480 
481   {
482     ScopedAssertNoThreadSuspension sts("Holding OSR method");
483     const OatQuickMethodHeader* osr_method = GetCodeCache()->LookupOsrMethodHeader(method);
484     if (osr_method == nullptr) {
485       // No osr method yet, just return to the interpreter.
486       return nullptr;
487     }
488 
489     CodeInfo code_info(osr_method);
490 
491     // Find stack map starting at the target dex_pc.
492     StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc);
493     if (!stack_map.IsValid()) {
494       // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
495       // hope that the next branch has one.
496       return nullptr;
497     }
498 
499     // We found a stack map, now fill the frame with dex register values from the interpreter's
500     // shadow frame.
501     DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map);
502     DCHECK_EQ(vreg_map.size(), number_of_vregs);
503 
504     size_t frame_size = osr_method->GetFrameSizeInBytes();
505 
506     // Allocate memory to put shadow frame values. The osr stub will copy that memory to
507     // stack.
508     // Note that we could pass the shadow frame to the stub, and let it copy the values there,
509     // but that is engineering complexity not worth the effort for something like OSR.
510     osr_data = reinterpret_cast<OsrData*>(malloc(sizeof(OsrData) + frame_size));
511     if (osr_data == nullptr) {
512       return nullptr;
513     }
514     memset(osr_data, 0, sizeof(OsrData) + frame_size);
515     osr_data->frame_size = frame_size;
516 
517     // Art ABI: ArtMethod is at the bottom of the stack.
518     osr_data->memory[0] = method;
519 
520     if (vreg_map.empty()) {
521       // If we don't have a dex register map, then there are no live dex registers at
522       // this dex pc.
523     } else {
524       for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
525         DexRegisterLocation::Kind location = vreg_map[vreg].GetKind();
526         if (location == DexRegisterLocation::Kind::kNone) {
527           // Dex register is dead or uninitialized.
528           continue;
529         }
530 
531         if (location == DexRegisterLocation::Kind::kConstant) {
532           // We skip constants because the compiled code knows how to handle them.
533           continue;
534         }
535 
536         DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack);
537 
538         int32_t vreg_value = vregs[vreg];
539         int32_t slot_offset = vreg_map[vreg].GetStackOffsetInBytes();
540         DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
541         DCHECK_GT(slot_offset, 0);
542         (reinterpret_cast<int32_t*>(osr_data->memory))[slot_offset / sizeof(int32_t)] = vreg_value;
543       }
544     }
545 
546     osr_data->native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
547         osr_method->GetEntryPoint();
548     VLOG(jit) << "Jumping to "
549               << method_name
550               << "@"
551               << std::hex << reinterpret_cast<uintptr_t>(osr_data->native_pc);
552   }
553   return osr_data;
554 }
555 
MaybeDoOnStackReplacement(Thread * thread,ArtMethod * method,uint32_t dex_pc,int32_t dex_pc_offset,JValue * result)556 bool Jit::MaybeDoOnStackReplacement(Thread* thread,
557                                     ArtMethod* method,
558                                     uint32_t dex_pc,
559                                     int32_t dex_pc_offset,
560                                     JValue* result) {
561   Jit* jit = Runtime::Current()->GetJit();
562   if (jit == nullptr) {
563     return false;
564   }
565 
566   if (UNLIKELY(__builtin_frame_address(0) < thread->GetStackEnd())) {
567     // Don't attempt to do an OSR if we are close to the stack limit. Since
568     // the interpreter frames are still on stack, OSR has the potential
569     // to stack overflow even for a simple loop.
570     // b/27094810.
571     return false;
572   }
573 
574   // Get the actual Java method if this method is from a proxy class. The compiler
575   // and the JIT code cache do not expect methods from proxy classes.
576   method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
577 
578   // Before allowing the jump, make sure no code is actively inspecting the method to avoid
579   // jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively
580   // disable OSR when single stepping, but that's currently hard to know at this point.
581   if (Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method)) {
582     return false;
583   }
584 
585   ShadowFrame* shadow_frame = thread->GetManagedStack()->GetTopShadowFrame();
586   OsrData* osr_data = jit->PrepareForOsr(method,
587                                          dex_pc + dex_pc_offset,
588                                          shadow_frame->GetVRegArgs(0));
589 
590   if (osr_data == nullptr) {
591     return false;
592   }
593 
594   {
595     thread->PopShadowFrame();
596     ManagedStack fragment;
597     thread->PushManagedStackFragment(&fragment);
598     (*art_quick_osr_stub)(osr_data->memory,
599                           osr_data->frame_size,
600                           osr_data->native_pc,
601                           result,
602                           method->GetShorty(),
603                           thread);
604 
605     if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) {
606       thread->DeoptimizeWithDeoptimizationException(result);
607     }
608     thread->PopManagedStackFragment(fragment);
609   }
610   free(osr_data);
611   thread->PushShadowFrame(shadow_frame);
612   VLOG(jit) << "Done running OSR code for " << method->PrettyMethod();
613   return true;
614 }
615 
AddMemoryUsage(ArtMethod * method,size_t bytes)616 void Jit::AddMemoryUsage(ArtMethod* method, size_t bytes) {
617   if (bytes > 4 * MB) {
618     LOG(INFO) << "Compiler allocated "
619               << PrettySize(bytes)
620               << " to compile "
621               << ArtMethod::PrettyMethod(method);
622   }
623   MutexLock mu(Thread::Current(), lock_);
624   memory_use_.AddValue(bytes);
625 }
626 
NotifyZygoteCompilationDone()627 void Jit::NotifyZygoteCompilationDone() {
628   if (fd_methods_ == -1) {
629     return;
630   }
631 
632   size_t offset = 0;
633   for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
634     const ImageHeader& header = space->GetImageHeader();
635     const ImageSection& section = header.GetMethodsSection();
636     // Because mremap works at page boundaries, we can only handle methods
637     // within a page range. For methods that falls above or below the range,
638     // the child processes will copy their contents to their private mapping
639     // in `child_mapping_methods`. See `MapBootImageMethods`.
640     uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
641     uint8_t* page_end =
642         AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
643     if (page_end > page_start) {
644       uint64_t capacity = page_end - page_start;
645       memcpy(zygote_mapping_methods_.Begin() + offset, page_start, capacity);
646       offset += capacity;
647     }
648   }
649 
650   // Do an msync to ensure we are not affected by writes still being in caches.
651   if (msync(zygote_mapping_methods_.Begin(), fd_methods_size_, MS_SYNC) != 0) {
652     PLOG(WARNING) << "Failed to sync boot image methods memory";
653     code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
654     return;
655   }
656 
657   // We don't need the shared mapping anymore, and we need to drop it in case
658   // the file hasn't been sealed writable.
659   zygote_mapping_methods_ = MemMap::Invalid();
660 
661   // Seal writes now. Zygote and children will map the memory private in order
662   // to write to it.
663   if (fcntl(fd_methods_, F_ADD_SEALS, F_SEAL_SEAL | F_SEAL_WRITE) == -1) {
664     PLOG(WARNING) << "Failed to seal boot image methods file descriptor";
665     code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
666     return;
667   }
668 
669   std::string error_str;
670   MemMap child_mapping_methods = MemMap::MapFile(
671       fd_methods_size_,
672       PROT_READ | PROT_WRITE,
673       MAP_PRIVATE,
674       fd_methods_,
675       /* start= */ 0,
676       /* low_4gb= */ false,
677       "boot-image-methods",
678       &error_str);
679 
680   if (!child_mapping_methods.IsValid()) {
681     LOG(WARNING) << "Failed to create child mapping of boot image methods: " << error_str;
682     code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
683     return;
684   }
685 
686   // Ensure the contents are the same as before: there was a window between
687   // the memcpy and the sealing where other processes could have changed the
688   // contents.
689   // Note this would not be needed if we could have used F_SEAL_FUTURE_WRITE,
690   // see b/143833776.
691   offset = 0;
692   for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
693     const ImageHeader& header = space->GetImageHeader();
694     const ImageSection& section = header.GetMethodsSection();
695     // Because mremap works at page boundaries, we can only handle methods
696     // within a page range. For methods that falls above or below the range,
697     // the child processes will copy their contents to their private mapping
698     // in `child_mapping_methods`. See `MapBootImageMethods`.
699     uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
700     uint8_t* page_end =
701         AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
702     if (page_end > page_start) {
703       uint64_t capacity = page_end - page_start;
704       if (memcmp(child_mapping_methods.Begin() + offset, page_start, capacity) != 0) {
705         LOG(WARNING) << "Contents differ in boot image methods data";
706         code_cache_->GetZygoteMap()->SetCompilationState(
707             ZygoteCompilationState::kNotifiedFailure);
708         return;
709       }
710       offset += capacity;
711     }
712   }
713 
714   // Future spawned processes don't need the fd anymore.
715   fd_methods_.reset();
716 
717   // In order to have the zygote and children share the memory, we also remap
718   // the memory into the zygote process.
719   offset = 0;
720   for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
721     const ImageHeader& header = space->GetImageHeader();
722     const ImageSection& section = header.GetMethodsSection();
723     // Because mremap works at page boundaries, we can only handle methods
724     // within a page range. For methods that falls above or below the range,
725     // the child processes will copy their contents to their private mapping
726     // in `child_mapping_methods`. See `MapBootImageMethods`.
727     uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
728     uint8_t* page_end =
729         AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
730     if (page_end > page_start) {
731       uint64_t capacity = page_end - page_start;
732       if (mremap(child_mapping_methods.Begin() + offset,
733                  capacity,
734                  capacity,
735                  MREMAP_FIXED | MREMAP_MAYMOVE,
736                  page_start) == MAP_FAILED) {
737         // Failing to remap is safe as the process will just use the old
738         // contents.
739         PLOG(WARNING) << "Failed mremap of boot image methods of " << space->GetImageFilename();
740       }
741       offset += capacity;
742     }
743   }
744 
745   LOG(INFO) << "Successfully notified child processes on sharing boot image methods";
746 
747   // Mark that compilation of boot classpath is done, and memory can now be
748   // shared. Other processes will pick up this information.
749   code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedOk);
750 
751   // The private mapping created for this process has been mremaped. We can
752   // reset it.
753   child_mapping_methods.Reset();
754 }
755 
756 class JitCompileTask final : public Task {
757  public:
758   enum class TaskKind {
759     kAllocateProfile,
760     kCompile,
761     kCompileBaseline,
762     kCompileOsr,
763     kPreCompile,
764   };
765 
JitCompileTask(ArtMethod * method,TaskKind kind)766   JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind), klass_(nullptr) {
767     ScopedObjectAccess soa(Thread::Current());
768     // For a non-bootclasspath class, add a global ref to the class to prevent class unloading
769     // until compilation is done.
770     // When we precompile, this is either with boot classpath methods, or main
771     // class loader methods, so we don't need to keep a global reference.
772     if (method->GetDeclaringClass()->GetClassLoader() != nullptr &&
773         kind_ != TaskKind::kPreCompile) {
774       klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass());
775       CHECK(klass_ != nullptr);
776     }
777   }
778 
~JitCompileTask()779   ~JitCompileTask() {
780     if (klass_ != nullptr) {
781       ScopedObjectAccess soa(Thread::Current());
782       soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
783     }
784   }
785 
Run(Thread * self)786   void Run(Thread* self) override {
787     {
788       ScopedObjectAccess soa(self);
789       switch (kind_) {
790         case TaskKind::kPreCompile:
791         case TaskKind::kCompile:
792         case TaskKind::kCompileBaseline:
793         case TaskKind::kCompileOsr: {
794           Runtime::Current()->GetJit()->CompileMethod(
795               method_,
796               self,
797               /* baseline= */ (kind_ == TaskKind::kCompileBaseline),
798               /* osr= */ (kind_ == TaskKind::kCompileOsr),
799               /* prejit= */ (kind_ == TaskKind::kPreCompile));
800           break;
801         }
802         case TaskKind::kAllocateProfile: {
803           if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
804             VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
805           }
806           break;
807         }
808       }
809     }
810     ProfileSaver::NotifyJitActivity();
811   }
812 
Finalize()813   void Finalize() override {
814     delete this;
815   }
816 
817  private:
818   ArtMethod* const method_;
819   const TaskKind kind_;
820   jobject klass_;
821 
822   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
823 };
824 
GetProfileFile(const std::string & dex_location)825 static std::string GetProfileFile(const std::string& dex_location) {
826   // Hardcoded assumption where the profile file is.
827   // TODO(ngeoffray): this is brittle and we would need to change change if we
828   // wanted to do more eager JITting of methods in a profile. This is
829   // currently only for system server.
830   return dex_location + ".prof";
831 }
832 
GetBootProfileFile(const std::string & profile)833 static std::string GetBootProfileFile(const std::string& profile) {
834   // The boot profile can be found next to the compilation profile, with a
835   // different extension.
836   return ReplaceFileExtension(profile, "bprof");
837 }
838 
839 /**
840  * A JIT task to run after all profile compilation is done.
841  */
842 class JitDoneCompilingProfileTask final : public SelfDeletingTask {
843  public:
JitDoneCompilingProfileTask(const std::vector<const DexFile * > & dex_files)844   explicit JitDoneCompilingProfileTask(const std::vector<const DexFile*>& dex_files)
845       : dex_files_(dex_files) {}
846 
Run(Thread * self ATTRIBUTE_UNUSED)847   void Run(Thread* self ATTRIBUTE_UNUSED) override {
848     // Madvise DONTNEED dex files now that we're done compiling methods.
849     for (const DexFile* dex_file : dex_files_) {
850       if (IsAddressKnownBackedByFileOrShared(dex_file->Begin())) {
851         int result = madvise(const_cast<uint8_t*>(AlignDown(dex_file->Begin(), kPageSize)),
852                              RoundUp(dex_file->Size(), kPageSize),
853                              MADV_DONTNEED);
854         if (result == -1) {
855           PLOG(WARNING) << "Madvise failed";
856         }
857       }
858     }
859 
860     if (Runtime::Current()->IsZygote()) {
861       // Record that we are done compiling the profile.
862       Runtime::Current()->GetJit()->GetCodeCache()->GetZygoteMap()->SetCompilationState(
863           ZygoteCompilationState::kDone);
864     }
865   }
866 
867  private:
868   std::vector<const DexFile*> dex_files_;
869 
870   DISALLOW_COPY_AND_ASSIGN(JitDoneCompilingProfileTask);
871 };
872 
873 /**
874  * A JIT task to run Java verification of boot classpath classes that were not
875  * verified at compile-time.
876  */
877 class ZygoteVerificationTask final : public Task {
878  public:
ZygoteVerificationTask()879   ZygoteVerificationTask() {}
880 
Run(Thread * self)881   void Run(Thread* self) override {
882     // We are going to load class and run verification, which may also need to load
883     // classes. If the thread cannot load classes (typically when the runtime is
884     // debuggable), then just return.
885     if (!self->CanLoadClasses()) {
886       return;
887     }
888     Runtime* runtime = Runtime::Current();
889     ClassLinker* linker = runtime->GetClassLinker();
890     const std::vector<const DexFile*>& boot_class_path =
891         runtime->GetClassLinker()->GetBootClassPath();
892     ScopedObjectAccess soa(self);
893     StackHandleScope<1> hs(self);
894     MutableHandle<mirror::Class> klass = hs.NewHandle<mirror::Class>(nullptr);
895     uint64_t start_ns = ThreadCpuNanoTime();
896     uint64_t number_of_classes = 0;
897     for (const DexFile* dex_file : boot_class_path) {
898       if (dex_file->GetOatDexFile() != nullptr &&
899           dex_file->GetOatDexFile()->GetOatFile() != nullptr) {
900         // If backed by an .oat file, we have already run verification at
901         // compile-time. Note that some classes may still have failed
902         // verification there if they reference updatable mainline module
903         // classes.
904         continue;
905       }
906       for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
907         const dex::ClassDef& class_def = dex_file->GetClassDef(i);
908         const char* descriptor = dex_file->GetClassDescriptor(class_def);
909         ScopedNullHandle<mirror::ClassLoader> null_loader;
910         klass.Assign(linker->FindClass(self, descriptor, null_loader));
911         if (klass == nullptr) {
912           self->ClearException();
913           LOG(WARNING) << "Could not find " << descriptor;
914           continue;
915         }
916         ++number_of_classes;
917         if (linker->VerifyClass(self, klass) == verifier::FailureKind::kHardFailure) {
918           DCHECK(self->IsExceptionPending());
919           LOG(FATAL) << "Methods in the boot classpath failed to verify: "
920                      << self->GetException()->Dump();
921         }
922         CHECK(!self->IsExceptionPending());
923       }
924     }
925     LOG(INFO) << "Verified "
926               << number_of_classes
927               << " classes from mainline modules in "
928               << PrettyDuration(ThreadCpuNanoTime() - start_ns);
929   }
930 };
931 
932 class ZygoteTask final : public Task {
933  public:
ZygoteTask()934   ZygoteTask() {}
935 
Run(Thread * self)936   void Run(Thread* self) override {
937     Runtime* runtime = Runtime::Current();
938     uint32_t added_to_queue = 0;
939     for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
940       const std::string& profile_file = space->GetProfileFile();
941       if (profile_file.empty()) {
942         continue;
943       }
944       LOG(INFO) << "JIT Zygote looking at profile " << profile_file;
945 
946       const std::vector<const DexFile*>& boot_class_path =
947           runtime->GetClassLinker()->GetBootClassPath();
948       ScopedNullHandle<mirror::ClassLoader> null_handle;
949       // We add to the queue for zygote so that we can fork processes in-between
950       // compilations.
951       if (Runtime::Current()->IsPrimaryZygote()) {
952         std::string boot_profile = GetBootProfileFile(profile_file);
953         // We avoid doing compilation at boot for the secondary zygote, as apps
954         // forked from it are not critical for boot.
955         added_to_queue += runtime->GetJit()->CompileMethodsFromBootProfile(
956             self, boot_class_path, boot_profile, null_handle, /* add_to_queue= */ true);
957       }
958       added_to_queue += runtime->GetJit()->CompileMethodsFromProfile(
959           self, boot_class_path, profile_file, null_handle, /* add_to_queue= */ true);
960     }
961 
962     JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
963     code_cache->GetZygoteMap()->Initialize(added_to_queue);
964   }
965 
Finalize()966   void Finalize() override {
967     delete this;
968   }
969 
970  private:
971   DISALLOW_COPY_AND_ASSIGN(ZygoteTask);
972 };
973 
974 class JitProfileTask final : public Task {
975  public:
JitProfileTask(const std::vector<std::unique_ptr<const DexFile>> & dex_files,jobject class_loader)976   JitProfileTask(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
977                  jobject class_loader) {
978     ScopedObjectAccess soa(Thread::Current());
979     StackHandleScope<1> hs(soa.Self());
980     Handle<mirror::ClassLoader> h_loader(hs.NewHandle(
981         soa.Decode<mirror::ClassLoader>(class_loader)));
982     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
983     for (const auto& dex_file : dex_files) {
984       dex_files_.push_back(dex_file.get());
985       // Register the dex file so that we can guarantee it doesn't get deleted
986       // while reading it during the task.
987       class_linker->RegisterDexFile(*dex_file.get(), h_loader.Get());
988     }
989     // We also create our own global ref to use this class loader later.
990     class_loader_ = soa.Vm()->AddGlobalRef(soa.Self(), h_loader.Get());
991   }
992 
Run(Thread * self)993   void Run(Thread* self) override {
994     ScopedObjectAccess soa(self);
995     StackHandleScope<1> hs(self);
996     Handle<mirror::ClassLoader> loader = hs.NewHandle<mirror::ClassLoader>(
997         soa.Decode<mirror::ClassLoader>(class_loader_));
998 
999     std::string profile = GetProfileFile(dex_files_[0]->GetLocation());
1000     std::string boot_profile = GetBootProfileFile(profile);
1001 
1002     Jit* jit = Runtime::Current()->GetJit();
1003 
1004     jit->CompileMethodsFromBootProfile(
1005         self,
1006         dex_files_,
1007         boot_profile,
1008         loader,
1009         /* add_to_queue= */ false);
1010 
1011     jit->CompileMethodsFromProfile(
1012         self,
1013         dex_files_,
1014         profile,
1015         loader,
1016         /* add_to_queue= */ true);
1017   }
1018 
Finalize()1019   void Finalize() override {
1020     delete this;
1021   }
1022 
~JitProfileTask()1023   ~JitProfileTask() {
1024     ScopedObjectAccess soa(Thread::Current());
1025     soa.Vm()->DeleteGlobalRef(soa.Self(), class_loader_);
1026   }
1027 
1028  private:
1029   std::vector<const DexFile*> dex_files_;
1030   jobject class_loader_;
1031 
1032   DISALLOW_COPY_AND_ASSIGN(JitProfileTask);
1033 };
1034 
CopyIfDifferent(void * s1,const void * s2,size_t n)1035 static void CopyIfDifferent(void* s1, const void* s2, size_t n) {
1036   if (memcmp(s1, s2, n) != 0) {
1037     memcpy(s1, s2, n);
1038   }
1039 }
1040 
MapBootImageMethods()1041 void Jit::MapBootImageMethods() {
1042   if (Runtime::Current()->IsJavaDebuggable()) {
1043     LOG(INFO) << "Not mapping boot image methods due to process being debuggable";
1044     return;
1045   }
1046   CHECK_NE(fd_methods_.get(), -1);
1047   if (!code_cache_->GetZygoteMap()->CanMapBootImageMethods()) {
1048     LOG(WARNING) << "Not mapping boot image methods due to error from zygote";
1049     // We don't need the fd anymore.
1050     fd_methods_.reset();
1051     return;
1052   }
1053 
1054   std::string error_str;
1055   MemMap child_mapping_methods = MemMap::MapFile(
1056       fd_methods_size_,
1057       PROT_READ | PROT_WRITE,
1058       MAP_PRIVATE,
1059       fd_methods_,
1060       /* start= */ 0,
1061       /* low_4gb= */ false,
1062       "boot-image-methods",
1063       &error_str);
1064 
1065   // We don't need the fd anymore.
1066   fd_methods_.reset();
1067 
1068   if (!child_mapping_methods.IsValid()) {
1069     LOG(WARNING) << "Failed to create child mapping of boot image methods: " << error_str;
1070     return;
1071   }
1072   size_t offset = 0;
1073   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1074   for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
1075     const ImageHeader& header = space->GetImageHeader();
1076     const ImageSection& section = header.GetMethodsSection();
1077     uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
1078     uint8_t* page_end =
1079         AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
1080     if (page_end <= page_start) {
1081       // Section doesn't contain one aligned entire page.
1082       continue;
1083     }
1084     uint64_t capacity = page_end - page_start;
1085     // Walk over methods in the boot image, and check for ones whose class is
1086     // not initialized in the process, but are in the zygote process. For
1087     // such methods, we need their entrypoints to be stubs that do the
1088     // initialization check.
1089     header.VisitPackedArtMethods([&](ArtMethod& method) NO_THREAD_SAFETY_ANALYSIS {
1090       if (method.IsRuntimeMethod()) {
1091         return;
1092       }
1093       if (method.GetDeclaringClassUnchecked()->IsVisiblyInitialized() ||
1094           !method.IsStatic() ||
1095           method.IsConstructor()) {
1096         // Method does not need any stub.
1097         return;
1098       }
1099 
1100       //  We are going to mremap the child mapping into the image:
1101       //
1102       //                            ImageSection       ChildMappingMethods
1103       //
1104       //         section start -->  -----------
1105       //                            |         |
1106       //                            |         |
1107       //            page_start -->  |         |   <-----   -----------
1108       //                            |         |            |         |
1109       //                            |         |            |         |
1110       //                            |         |            |         |
1111       //                            |         |            |         |
1112       //                            |         |            |         |
1113       //                            |         |            |         |
1114       //                            |         |            |         |
1115       //             page_end  -->  |         |   <-----   -----------
1116       //                            |         |
1117       //         section end   -->  -----------
1118 
1119 
1120       uint8_t* pointer = reinterpret_cast<uint8_t*>(&method);
1121       // Note: We could refactor this to only check if the ArtMethod entrypoint is inside the
1122       // page region. This would remove the need for the edge case handling below.
1123       if (pointer >= page_start && pointer + sizeof(ArtMethod) < page_end) {
1124         // For all the methods in the mapping, put the entrypoint to the
1125         // resolution stub.
1126         ArtMethod* new_method = reinterpret_cast<ArtMethod*>(
1127             child_mapping_methods.Begin() + offset + (pointer - page_start));
1128         const void* code = new_method->GetEntryPointFromQuickCompiledCode();
1129         if (!class_linker->IsQuickGenericJniStub(code) &&
1130             !class_linker->IsQuickToInterpreterBridge(code) &&
1131             !class_linker->IsQuickResolutionStub(code)) {
1132           LOG(INFO) << "Putting back the resolution stub to an ArtMethod";
1133           new_method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
1134         }
1135       } else if (pointer < page_start && (pointer + sizeof(ArtMethod)) > page_start) {
1136         LOG(INFO) << "Copying parts of the contents of an ArtMethod spanning page_start";
1137         // If the method spans `page_start`, copy the contents of the child
1138         // into the pages we are going to remap into the image.
1139         //
1140         //         section start -->  -----------
1141         //                            |         |
1142         //                            |         |
1143         //            page_start -->  |/////////|            -----------
1144         //                            |/////////| -> copy -> |/////////|
1145         //                            |         |            |         |
1146         //
1147         CopyIfDifferent(child_mapping_methods.Begin() + offset,
1148                         page_start,
1149                         pointer + sizeof(ArtMethod) - page_start);
1150       } else if (pointer < page_end && (pointer + sizeof(ArtMethod)) > page_end) {
1151         LOG(INFO) << "Copying parts of the contents of an ArtMethod spanning page_end";
1152         // If the method spans `page_end`, copy the contents of the child
1153         // into the pages we are going to remap into the image.
1154         //
1155         //                            |         |            |         |
1156         //                            |/////////| -> copy -> |/////////|
1157         //             page_end  -->  |/////////|            -----------
1158         //                            |         |
1159         //         section end   -->  -----------
1160         //
1161         size_t bytes_to_copy = (page_end - pointer);
1162         CopyIfDifferent(child_mapping_methods.Begin() + offset + capacity - bytes_to_copy,
1163                         page_end - bytes_to_copy,
1164                         bytes_to_copy);
1165       }
1166     }, space->Begin(), kRuntimePointerSize);
1167 
1168     // Map the memory in the boot image range.
1169     if (mremap(child_mapping_methods.Begin() + offset,
1170                capacity,
1171                capacity,
1172                MREMAP_FIXED | MREMAP_MAYMOVE,
1173                page_start) == MAP_FAILED) {
1174       PLOG(WARNING) << "Fail to mremap boot image methods for " << space->GetImageFilename();
1175     }
1176     offset += capacity;
1177   }
1178 
1179   // The private mapping created for this process has been mremaped. We can
1180   // reset it.
1181   child_mapping_methods.Reset();
1182   LOG(INFO) << "Successfully mapped boot image methods";
1183 }
1184 
1185 // Return whether a boot image has a profile. This means we'll need to pre-JIT
1186 // methods in that profile for performance.
HasImageWithProfile()1187 static bool HasImageWithProfile() {
1188   for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
1189     if (!space->GetProfileFile().empty()) {
1190       return true;
1191     }
1192   }
1193   return false;
1194 }
1195 
CreateThreadPool()1196 void Jit::CreateThreadPool() {
1197   // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
1198   // is not null when we instrument.
1199 
1200   // We need peers as we may report the JIT thread, e.g., in the debugger.
1201   constexpr bool kJitPoolNeedsPeers = true;
1202   thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
1203 
1204   thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
1205   Start();
1206 
1207   Runtime* runtime = Runtime::Current();
1208   if (runtime->IsZygote()) {
1209     // To speed up class lookups, generate a type lookup table for
1210     // dex files not backed by oat file.
1211     for (const DexFile* dex_file : runtime->GetClassLinker()->GetBootClassPath()) {
1212       if (dex_file->GetOatDexFile() == nullptr) {
1213         TypeLookupTable type_lookup_table = TypeLookupTable::Create(*dex_file);
1214         type_lookup_tables_.push_back(
1215             std::make_unique<art::OatDexFile>(std::move(type_lookup_table)));
1216         dex_file->SetOatDexFile(type_lookup_tables_.back().get());
1217       }
1218     }
1219 
1220     // Add a task that will verify boot classpath jars that were not
1221     // pre-compiled.
1222     thread_pool_->AddTask(Thread::Current(), new ZygoteVerificationTask());
1223   }
1224 
1225   if (runtime->IsZygote() && HasImageWithProfile() && UseJitCompilation()) {
1226     // If we have an image with a profile, request a JIT task to
1227     // compile all methods in that profile.
1228     thread_pool_->AddTask(Thread::Current(), new ZygoteTask());
1229 
1230     // And create mappings to share boot image methods memory from the zygote to
1231     // child processes.
1232 
1233     // Compute the total capacity required for the boot image methods.
1234     uint64_t total_capacity = 0;
1235     for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
1236       const ImageHeader& header = space->GetImageHeader();
1237       const ImageSection& section = header.GetMethodsSection();
1238       // Mappings need to be at the page level.
1239       uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
1240       uint8_t* page_end =
1241           AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
1242       if (page_end > page_start) {
1243         total_capacity += (page_end - page_start);
1244       }
1245     }
1246 
1247     // Create the child and zygote mappings to the boot image methods.
1248     if (total_capacity > 0) {
1249       // Start with '/boot' and end with '.art' to match the pattern recognized
1250       // by android_os_Debug.cpp for boot images.
1251       const char* name = "/boot-image-methods.art";
1252       unique_fd mem_fd = unique_fd(art::memfd_create(name, /* flags= */ MFD_ALLOW_SEALING));
1253       if (mem_fd.get() == -1) {
1254         PLOG(WARNING) << "Could not create boot image methods file descriptor";
1255         return;
1256       }
1257       if (ftruncate(mem_fd.get(), total_capacity) != 0) {
1258         PLOG(WARNING) << "Failed to truncate boot image methods file to " << total_capacity;
1259         return;
1260       }
1261       std::string error_str;
1262 
1263       // Create the shared mapping eagerly, as this prevents other processes
1264       // from adding the writable seal.
1265       zygote_mapping_methods_ = MemMap::MapFile(
1266         total_capacity,
1267         PROT_READ | PROT_WRITE,
1268         MAP_SHARED,
1269         mem_fd,
1270         /* start= */ 0,
1271         /* low_4gb= */ false,
1272         "boot-image-methods",
1273         &error_str);
1274 
1275       if (!zygote_mapping_methods_.IsValid()) {
1276         LOG(WARNING) << "Failed to create zygote mapping of boot image methods:  " << error_str;
1277         return;
1278       }
1279       if (zygote_mapping_methods_.MadviseDontFork() != 0) {
1280         LOG(WARNING) << "Failed to madvise dont fork boot image methods";
1281         zygote_mapping_methods_ = MemMap();
1282         return;
1283       }
1284 
1285       // We should use the F_SEAL_FUTURE_WRITE flag, but this has unexpected
1286       // behavior on private mappings after fork (the mapping becomes shared between
1287       // parent and children), see b/143833776.
1288       // We will seal the write once we are done writing to the shared mapping.
1289       if (fcntl(mem_fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW) == -1) {
1290         PLOG(WARNING) << "Failed to seal boot image methods file descriptor";
1291         zygote_mapping_methods_ = MemMap();
1292         return;
1293       }
1294       fd_methods_ = unique_fd(mem_fd.release());
1295       fd_methods_size_ = total_capacity;
1296     }
1297   }
1298 }
1299 
RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>> & dex_files,jobject class_loader)1300 void Jit::RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
1301                            jobject class_loader) {
1302   if (dex_files.empty()) {
1303     return;
1304   }
1305   Runtime* runtime = Runtime::Current();
1306   // If the runtime is debuggable, no need to precompile methods.
1307   if (runtime->IsSystemServer() &&
1308       UseJitCompilation() && HasImageWithProfile() &&
1309       !runtime->IsJavaDebuggable()) {
1310     thread_pool_->AddTask(Thread::Current(), new JitProfileTask(dex_files, class_loader));
1311   }
1312 }
1313 
CompileMethodFromProfile(Thread * self,ClassLinker * class_linker,uint32_t method_idx,Handle<mirror::DexCache> dex_cache,Handle<mirror::ClassLoader> class_loader,bool add_to_queue,bool compile_after_boot)1314 bool Jit::CompileMethodFromProfile(Thread* self,
1315                                    ClassLinker* class_linker,
1316                                    uint32_t method_idx,
1317                                    Handle<mirror::DexCache> dex_cache,
1318                                    Handle<mirror::ClassLoader> class_loader,
1319                                    bool add_to_queue,
1320                                    bool compile_after_boot) {
1321   ArtMethod* method = class_linker->ResolveMethodWithoutInvokeType(
1322       method_idx, dex_cache, class_loader);
1323   if (method == nullptr) {
1324     self->ClearException();
1325     return false;
1326   }
1327   if (!method->IsCompilable() || !method->IsInvokable()) {
1328     return false;
1329   }
1330   if (method->IsPreCompiled()) {
1331     // Already seen by another profile.
1332     return false;
1333   }
1334   const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
1335   if (class_linker->IsQuickToInterpreterBridge(entry_point) ||
1336       class_linker->IsQuickGenericJniStub(entry_point) ||
1337       // We explicitly check for the stub. The trampoline is for methods backed by
1338       // a .oat file that has a compiled version of the method.
1339       (entry_point == GetQuickResolutionStub())) {
1340     method->SetPreCompiled();
1341     if (!add_to_queue) {
1342       CompileMethod(method, self, /* baseline= */ false, /* osr= */ false, /* prejit= */ true);
1343     } else {
1344       Task* task = new JitCompileTask(method, JitCompileTask::TaskKind::kPreCompile);
1345       if (compile_after_boot) {
1346         MutexLock mu(Thread::Current(), boot_completed_lock_);
1347         if (!boot_completed_) {
1348           tasks_after_boot_.push_back(task);
1349           return true;
1350         }
1351         DCHECK(tasks_after_boot_.empty());
1352       }
1353       thread_pool_->AddTask(self, task);
1354       return true;
1355     }
1356   }
1357   return false;
1358 }
1359 
CompileMethodsFromBootProfile(Thread * self,const std::vector<const DexFile * > & dex_files,const std::string & profile_file,Handle<mirror::ClassLoader> class_loader,bool add_to_queue)1360 uint32_t Jit::CompileMethodsFromBootProfile(
1361     Thread* self,
1362     const std::vector<const DexFile*>& dex_files,
1363     const std::string& profile_file,
1364     Handle<mirror::ClassLoader> class_loader,
1365     bool add_to_queue) {
1366   unix_file::FdFile profile(profile_file.c_str(), O_RDONLY, true);
1367 
1368   if (profile.Fd() == -1) {
1369     PLOG(WARNING) << "No boot profile: " << profile_file;
1370     return 0u;
1371   }
1372 
1373   ProfileBootInfo profile_info;
1374   if (!profile_info.Load(profile.Fd(), dex_files)) {
1375     LOG(ERROR) << "Could not load profile file: " << profile_file;
1376     return 0u;
1377   }
1378 
1379   ScopedObjectAccess soa(self);
1380   VariableSizedHandleScope handles(self);
1381   std::vector<Handle<mirror::DexCache>> dex_caches;
1382   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1383   for (const DexFile* dex_file : profile_info.GetDexFiles()) {
1384     dex_caches.push_back(handles.NewHandle(class_linker->FindDexCache(self, *dex_file)));
1385   }
1386 
1387   uint32_t added_to_queue = 0;
1388   for (const std::pair<uint32_t, uint32_t>& pair : profile_info.GetMethods()) {
1389     if (CompileMethodFromProfile(self,
1390                                  class_linker,
1391                                  pair.second,
1392                                  dex_caches[pair.first],
1393                                  class_loader,
1394                                  add_to_queue,
1395                                  /*compile_after_boot=*/false)) {
1396       ++added_to_queue;
1397     }
1398   }
1399   return added_to_queue;
1400 }
1401 
CompileMethodsFromProfile(Thread * self,const std::vector<const DexFile * > & dex_files,const std::string & profile_file,Handle<mirror::ClassLoader> class_loader,bool add_to_queue)1402 uint32_t Jit::CompileMethodsFromProfile(
1403     Thread* self,
1404     const std::vector<const DexFile*>& dex_files,
1405     const std::string& profile_file,
1406     Handle<mirror::ClassLoader> class_loader,
1407     bool add_to_queue) {
1408 
1409   if (profile_file.empty()) {
1410     LOG(WARNING) << "Expected a profile file in JIT zygote mode";
1411     return 0u;
1412   }
1413 
1414   // We don't generate boot profiles on device, therefore we don't
1415   // need to lock the file.
1416   unix_file::FdFile profile(profile_file.c_str(), O_RDONLY, true);
1417 
1418   if (profile.Fd() == -1) {
1419     PLOG(WARNING) << "No profile: " << profile_file;
1420     return 0u;
1421   }
1422 
1423   ProfileCompilationInfo profile_info;
1424   if (!profile_info.Load(profile.Fd())) {
1425     LOG(ERROR) << "Could not load profile file";
1426     return 0u;
1427   }
1428   ScopedObjectAccess soa(self);
1429   StackHandleScope<1> hs(self);
1430   MutableHandle<mirror::DexCache> dex_cache = hs.NewHandle<mirror::DexCache>(nullptr);
1431   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1432   uint32_t added_to_queue = 0u;
1433   for (const DexFile* dex_file : dex_files) {
1434     if (LocationIsOnArtModule(dex_file->GetLocation().c_str())) {
1435       // The ART module jars are already preopted.
1436       continue;
1437     }
1438 
1439     std::set<dex::TypeIndex> class_types;
1440     std::set<uint16_t> all_methods;
1441     if (!profile_info.GetClassesAndMethods(*dex_file,
1442                                            &class_types,
1443                                            &all_methods,
1444                                            &all_methods,
1445                                            &all_methods)) {
1446       // This means the profile file did not reference the dex file, which is the case
1447       // if there's no classes and methods of that dex file in the profile.
1448       continue;
1449     }
1450     dex_cache.Assign(class_linker->FindDexCache(self, *dex_file));
1451     CHECK(dex_cache != nullptr) << "Could not find dex cache for " << dex_file->GetLocation();
1452 
1453     for (uint16_t method_idx : all_methods) {
1454       if (CompileMethodFromProfile(self,
1455                                    class_linker,
1456                                    method_idx,
1457                                    dex_cache,
1458                                    class_loader,
1459                                    add_to_queue,
1460                                    /*compile_after_boot=*/true)) {
1461         ++added_to_queue;
1462       }
1463     }
1464   }
1465 
1466   // Add a task to run when all compilation is done.
1467   JitDoneCompilingProfileTask* task = new JitDoneCompilingProfileTask(dex_files);
1468   MutexLock mu(Thread::Current(), boot_completed_lock_);
1469   if (!boot_completed_) {
1470     tasks_after_boot_.push_back(task);
1471   } else {
1472     DCHECK(tasks_after_boot_.empty());
1473     thread_pool_->AddTask(self, task);
1474   }
1475   return added_to_queue;
1476 }
1477 
IgnoreSamplesForMethod(ArtMethod * method)1478 static bool IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
1479   if (method->IsClassInitializer() || !method->IsCompilable() || method->IsPreCompiled()) {
1480     // We do not want to compile such methods.
1481     return true;
1482   }
1483   if (method->IsNative()) {
1484     ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
1485     if (klass == GetClassRoot<mirror::MethodHandle>() ||
1486         klass == GetClassRoot<mirror::VarHandle>()) {
1487       // MethodHandle and VarHandle invocation methods are required to throw an
1488       // UnsupportedOperationException if invoked reflectively. We achieve this by having native
1489       // implementations that raise the exception. We need to disable JIT compilation of these JNI
1490       // methods as it can lead to transitioning between JIT compiled JNI stubs and generic JNI
1491       // stubs. Since these stubs have different stack representations we can then crash in stack
1492       // walking (b/78151261).
1493       return true;
1494     }
1495   }
1496   return false;
1497 }
1498 
MaybeCompileMethod(Thread * self,ArtMethod * method,uint32_t old_count,uint32_t new_count,bool with_backedges)1499 bool Jit::MaybeCompileMethod(Thread* self,
1500                              ArtMethod* method,
1501                              uint32_t old_count,
1502                              uint32_t new_count,
1503                              bool with_backedges) {
1504   if (thread_pool_ == nullptr) {
1505     return false;
1506   }
1507   if (UNLIKELY(method->IsPreCompiled()) && !with_backedges /* don't check for OSR */) {
1508     if (!NeedsClinitCheckBeforeCall(method) ||
1509         method->GetDeclaringClass()->IsVisiblyInitialized()) {
1510       const void* entry_point = code_cache_->GetSavedEntryPointOfPreCompiledMethod(method);
1511       if (entry_point != nullptr) {
1512         Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(method, entry_point);
1513         return true;
1514       }
1515     }
1516   }
1517 
1518   if (IgnoreSamplesForMethod(method)) {
1519     return false;
1520   }
1521   if (HotMethodThreshold() == 0) {
1522     // Tests might request JIT on first use (compiled synchronously in the interpreter).
1523     return false;
1524   }
1525   DCHECK_GT(WarmMethodThreshold(), 0);
1526   DCHECK_GT(HotMethodThreshold(), WarmMethodThreshold());
1527   DCHECK_GT(OSRMethodThreshold(), HotMethodThreshold());
1528   DCHECK_GE(PriorityThreadWeight(), 1);
1529   DCHECK_LE(PriorityThreadWeight(), HotMethodThreshold());
1530 
1531   if (old_count < WarmMethodThreshold() && new_count >= WarmMethodThreshold()) {
1532     // Note: Native method have no "warm" state or profiling info.
1533     if (!method->IsNative() &&
1534         (method->GetProfilingInfo(kRuntimePointerSize) == nullptr) &&
1535         code_cache_->CanAllocateProfilingInfo() &&
1536         !options_->UseTieredJitCompilation()) {
1537       bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
1538       if (success) {
1539         VLOG(jit) << "Start profiling " << method->PrettyMethod();
1540       }
1541 
1542       if (thread_pool_ == nullptr) {
1543         // Calling ProfilingInfo::Create might put us in a suspended state, which could
1544         // lead to the thread pool being deleted when we are shutting down.
1545         return false;
1546       }
1547 
1548       if (!success) {
1549         // We failed allocating. Instead of doing the collection on the Java thread, we push
1550         // an allocation to a compiler thread, that will do the collection.
1551         thread_pool_->AddTask(
1552             self, new JitCompileTask(method, JitCompileTask::TaskKind::kAllocateProfile));
1553       }
1554     }
1555   }
1556   if (UseJitCompilation()) {
1557     if (old_count < HotMethodThreshold() && new_count >= HotMethodThreshold()) {
1558       if (!code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
1559         DCHECK(thread_pool_ != nullptr);
1560         JitCompileTask::TaskKind kind =
1561             (options_->UseTieredJitCompilation() || options_->UseBaselineCompiler())
1562                 ? JitCompileTask::TaskKind::kCompileBaseline
1563                 : JitCompileTask::TaskKind::kCompile;
1564         thread_pool_->AddTask(self, new JitCompileTask(method, kind));
1565       }
1566     }
1567     if (old_count < OSRMethodThreshold() && new_count >= OSRMethodThreshold()) {
1568       if (!with_backedges) {
1569         return false;
1570       }
1571       DCHECK(!method->IsNative());  // No back edges reported for native methods.
1572       if (!code_cache_->IsOsrCompiled(method)) {
1573         DCHECK(thread_pool_ != nullptr);
1574         thread_pool_->AddTask(
1575             self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr));
1576       }
1577     }
1578   }
1579   return true;
1580 }
1581 
EnqueueOptimizedCompilation(ArtMethod * method,Thread * self)1582 void Jit::EnqueueOptimizedCompilation(ArtMethod* method, Thread* self) {
1583   if (thread_pool_ == nullptr) {
1584     return;
1585   }
1586   // We arrive here after a baseline compiled code has reached its baseline
1587   // hotness threshold. If tiered compilation is enabled, enqueue a compilation
1588   // task that will compile optimize the method.
1589   if (options_->UseTieredJitCompilation()) {
1590     thread_pool_->AddTask(
1591         self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
1592   }
1593 }
1594 
1595 class ScopedSetRuntimeThread {
1596  public:
ScopedSetRuntimeThread(Thread * self)1597   explicit ScopedSetRuntimeThread(Thread* self)
1598       : self_(self), was_runtime_thread_(self_->IsRuntimeThread()) {
1599     self_->SetIsRuntimeThread(true);
1600   }
1601 
~ScopedSetRuntimeThread()1602   ~ScopedSetRuntimeThread() {
1603     self_->SetIsRuntimeThread(was_runtime_thread_);
1604   }
1605 
1606  private:
1607   Thread* self_;
1608   bool was_runtime_thread_;
1609 };
1610 
MethodEntered(Thread * thread,ArtMethod * method)1611 void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
1612   Runtime* runtime = Runtime::Current();
1613   if (UNLIKELY(runtime->UseJitCompilation() && JitAtFirstUse())) {
1614     ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
1615     if (np_method->IsCompilable()) {
1616       if (!np_method->IsNative() && GetCodeCache()->CanAllocateProfilingInfo()) {
1617         // The compiler requires a ProfilingInfo object for non-native methods.
1618         ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
1619       }
1620       // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
1621       // conflicts with jitzygote optimizations.
1622       JitCompileTask compile_task(method, JitCompileTask::TaskKind::kCompile);
1623       // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
1624       ScopedSetRuntimeThread ssrt(thread);
1625       compile_task.Run(thread);
1626     }
1627     return;
1628   }
1629 
1630   ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
1631   // Update the entrypoint if the ProfilingInfo has one. The interpreter will call it
1632   // instead of interpreting the method. We don't update it for instrumentation as the entrypoint
1633   // must remain the instrumentation entrypoint.
1634   if ((profiling_info != nullptr) &&
1635       (profiling_info->GetSavedEntryPoint() != nullptr) &&
1636       (method->GetEntryPointFromQuickCompiledCode() != GetQuickInstrumentationEntryPoint())) {
1637     Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
1638         method, profiling_info->GetSavedEntryPoint());
1639   } else {
1640     AddSamples(thread, method, 1, /* with_backedges= */false);
1641   }
1642 }
1643 
InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object,ArtMethod * caller,uint32_t dex_pc,ArtMethod * callee ATTRIBUTE_UNUSED)1644 void Jit::InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object,
1645                                    ArtMethod* caller,
1646                                    uint32_t dex_pc,
1647                                    ArtMethod* callee ATTRIBUTE_UNUSED) {
1648   ScopedAssertNoThreadSuspension ants(__FUNCTION__);
1649   DCHECK(this_object != nullptr);
1650   ProfilingInfo* info = caller->GetProfilingInfo(kRuntimePointerSize);
1651   if (info != nullptr) {
1652     info->AddInvokeInfo(dex_pc, this_object->GetClass());
1653   }
1654 }
1655 
WaitForCompilationToFinish(Thread * self)1656 void Jit::WaitForCompilationToFinish(Thread* self) {
1657   if (thread_pool_ != nullptr) {
1658     thread_pool_->Wait(self, false, false);
1659   }
1660 }
1661 
Stop()1662 void Jit::Stop() {
1663   Thread* self = Thread::Current();
1664   // TODO(ngeoffray): change API to not require calling WaitForCompilationToFinish twice.
1665   WaitForCompilationToFinish(self);
1666   GetThreadPool()->StopWorkers(self);
1667   WaitForCompilationToFinish(self);
1668 }
1669 
Start()1670 void Jit::Start() {
1671   GetThreadPool()->StartWorkers(Thread::Current());
1672 }
1673 
ScopedJitSuspend()1674 ScopedJitSuspend::ScopedJitSuspend() {
1675   jit::Jit* jit = Runtime::Current()->GetJit();
1676   was_on_ = (jit != nullptr) && (jit->GetThreadPool() != nullptr);
1677   if (was_on_) {
1678     jit->Stop();
1679   }
1680 }
1681 
~ScopedJitSuspend()1682 ScopedJitSuspend::~ScopedJitSuspend() {
1683   if (was_on_) {
1684     DCHECK(Runtime::Current()->GetJit() != nullptr);
1685     DCHECK(Runtime::Current()->GetJit()->GetThreadPool() != nullptr);
1686     Runtime::Current()->GetJit()->Start();
1687   }
1688 }
1689 
RunPollingThread(void * arg)1690 static void* RunPollingThread(void* arg) {
1691   Jit* jit = reinterpret_cast<Jit*>(arg);
1692   do {
1693     sleep(10);
1694   } while (!jit->GetCodeCache()->GetZygoteMap()->IsCompilationNotified());
1695 
1696   // We will suspend other threads: we can only do that if we're attached to the
1697   // runtime.
1698   Runtime* runtime = Runtime::Current();
1699   bool thread_attached = runtime->AttachCurrentThread(
1700       "BootImagePollingThread",
1701       /* as_daemon= */ true,
1702       /* thread_group= */ nullptr,
1703       /* create_peer= */ false);
1704   CHECK(thread_attached);
1705 
1706   {
1707     // Prevent other threads from running while we are remapping the boot image
1708     // ArtMethod's. Native threads might still be running, but they cannot
1709     // change the contents of ArtMethod's.
1710     ScopedSuspendAll ssa(__FUNCTION__);
1711     runtime->GetJit()->MapBootImageMethods();
1712   }
1713 
1714   Runtime::Current()->DetachCurrentThread();
1715   return nullptr;
1716 }
1717 
PostForkChildAction(bool is_system_server,bool is_zygote)1718 void Jit::PostForkChildAction(bool is_system_server, bool is_zygote) {
1719   // Clear the potential boot tasks inherited from the zygote.
1720   {
1721     MutexLock mu(Thread::Current(), boot_completed_lock_);
1722     tasks_after_boot_.clear();
1723   }
1724 
1725   Runtime* const runtime = Runtime::Current();
1726   // Check if we'll need to remap the boot image methods.
1727   if (!is_zygote && fd_methods_ != -1) {
1728     // Create a thread that will poll the status of zygote compilation, and map
1729     // the private mapping of boot image methods.
1730     // For child zygote, we instead query IsCompilationNotified() post zygote fork.
1731     zygote_mapping_methods_.ResetInForkedProcess();
1732     pthread_t polling_thread;
1733     pthread_attr_t attr;
1734     CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
1735     CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),
1736                        "PTHREAD_CREATE_DETACHED");
1737     CHECK_PTHREAD_CALL(
1738         pthread_create,
1739         (&polling_thread, &attr, RunPollingThread, reinterpret_cast<void*>(this)),
1740         "Methods maps thread");
1741   }
1742 
1743   if (is_zygote || runtime->IsSafeMode()) {
1744     // Delete the thread pool, we are not going to JIT.
1745     thread_pool_.reset(nullptr);
1746     return;
1747   }
1748   // At this point, the compiler options have been adjusted to the particular configuration
1749   // of the forked child. Parse them again.
1750   jit_compiler_->ParseCompilerOptions();
1751 
1752   // Adjust the status of code cache collection: the status from zygote was to not collect.
1753   code_cache_->SetGarbageCollectCode(!jit_compiler_->GenerateDebugInfo() &&
1754       !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
1755 
1756   if (is_system_server && HasImageWithProfile()) {
1757     // Disable garbage collection: we don't want it to delete methods we're compiling
1758     // through boot and system server profiles.
1759     // TODO(ngeoffray): Fix this so we still collect deoptimized and unused code.
1760     code_cache_->SetGarbageCollectCode(false);
1761   }
1762 
1763   // We do this here instead of PostZygoteFork, as NativeDebugInfoPostFork only
1764   // applies to a child.
1765   NativeDebugInfoPostFork();
1766 }
1767 
PreZygoteFork()1768 void Jit::PreZygoteFork() {
1769   if (thread_pool_ == nullptr) {
1770     return;
1771   }
1772   thread_pool_->DeleteThreads();
1773 
1774   NativeDebugInfoPreFork();
1775 }
1776 
PostZygoteFork()1777 void Jit::PostZygoteFork() {
1778   if (thread_pool_ == nullptr) {
1779     // If this is a child zygote, check if we need to remap the boot image
1780     // methods.
1781     if (Runtime::Current()->IsZygote() &&
1782         fd_methods_ != -1 &&
1783         code_cache_->GetZygoteMap()->IsCompilationNotified()) {
1784       ScopedSuspendAll ssa(__FUNCTION__);
1785       MapBootImageMethods();
1786     }
1787     return;
1788   }
1789   if (Runtime::Current()->IsZygote() &&
1790       code_cache_->GetZygoteMap()->IsCompilationDoneButNotNotified()) {
1791     // Copy the boot image methods data to the mappings we created to share
1792     // with the children. We do this here as we are the only thread running and
1793     // we don't risk other threads concurrently updating the ArtMethod's.
1794     CHECK_EQ(GetTaskCount(), 1);
1795     NotifyZygoteCompilationDone();
1796     CHECK(code_cache_->GetZygoteMap()->IsCompilationNotified());
1797   }
1798   thread_pool_->CreateThreads();
1799 }
1800 
BootCompleted()1801 void Jit::BootCompleted() {
1802   Thread* self = Thread::Current();
1803   std::deque<Task*> tasks;
1804   {
1805     MutexLock mu(self, boot_completed_lock_);
1806     tasks = std::move(tasks_after_boot_);
1807     boot_completed_ = true;
1808   }
1809   for (Task* task : tasks) {
1810     thread_pool_->AddTask(self, task);
1811   }
1812 }
1813 
CanEncodeMethod(ArtMethod * method,bool is_for_shared_region) const1814 bool Jit::CanEncodeMethod(ArtMethod* method, bool is_for_shared_region) const {
1815   return !is_for_shared_region ||
1816       Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(method->GetDeclaringClass());
1817 }
1818 
CanEncodeClass(ObjPtr<mirror::Class> cls,bool is_for_shared_region) const1819 bool Jit::CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
1820   return !is_for_shared_region || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(cls);
1821 }
1822 
CanEncodeString(ObjPtr<mirror::String> string,bool is_for_shared_region) const1823 bool Jit::CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const {
1824   return !is_for_shared_region || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string);
1825 }
1826 
CanAssumeInitialized(ObjPtr<mirror::Class> cls,bool is_for_shared_region) const1827 bool Jit::CanAssumeInitialized(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
1828   if (!is_for_shared_region) {
1829     return cls->IsInitialized();
1830   } else {
1831     // Look up the class status in the oat file.
1832     const DexFile& dex_file = *cls->GetDexCache()->GetDexFile();
1833     const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
1834     // In case we run without an image there won't be a backing oat file.
1835     if (oat_dex_file == nullptr || oat_dex_file->GetOatFile() == nullptr) {
1836       return false;
1837     }
1838     uint16_t class_def_index = cls->GetDexClassDefIndex();
1839     return oat_dex_file->GetOatClass(class_def_index).GetStatus() >= ClassStatus::kInitialized;
1840   }
1841 }
1842 
EnqueueCompilationFromNterp(ArtMethod * method,Thread * self)1843 void Jit::EnqueueCompilationFromNterp(ArtMethod* method, Thread* self) {
1844   if (thread_pool_ == nullptr) {
1845     return;
1846   }
1847   if (GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
1848     // If we already have compiled code for it, nterp may be stuck in a loop.
1849     // Compile OSR.
1850     thread_pool_->AddTask(
1851         self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr));
1852     return;
1853   }
1854   if (GetCodeCache()->CanAllocateProfilingInfo()) {
1855     ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
1856     thread_pool_->AddTask(
1857         self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileBaseline));
1858   } else {
1859     thread_pool_->AddTask(
1860         self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
1861   }
1862 }
1863 
1864 }  // namespace jit
1865 }  // namespace art
1866