1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "runtime.h"
18
19 // sys/mount.h has to come before linux/fs.h due to redefinition of MS_RDONLY, MS_BIND, etc
20 #include <sys/mount.h>
21 #ifdef __linux__
22 #include <linux/fs.h>
23 #include <sys/prctl.h>
24 #endif
25
26 #include <signal.h>
27 #include <sys/syscall.h>
28 #include "base/memory_tool.h"
29
30 #include <cstdio>
31 #include <cstdlib>
32 #include <limits>
33 #include <memory_representation.h>
34 #include <vector>
35 #include <fcntl.h>
36
37 #include "JniConstants.h"
38 #include "ScopedLocalRef.h"
39 #include "arch/arm/quick_method_frame_info_arm.h"
40 #include "arch/arm/registers_arm.h"
41 #include "arch/arm64/quick_method_frame_info_arm64.h"
42 #include "arch/arm64/registers_arm64.h"
43 #include "arch/instruction_set_features.h"
44 #include "arch/mips/quick_method_frame_info_mips.h"
45 #include "arch/mips/registers_mips.h"
46 #include "arch/mips64/quick_method_frame_info_mips64.h"
47 #include "arch/mips64/registers_mips64.h"
48 #include "arch/x86/quick_method_frame_info_x86.h"
49 #include "arch/x86/registers_x86.h"
50 #include "arch/x86_64/quick_method_frame_info_x86_64.h"
51 #include "arch/x86_64/registers_x86_64.h"
52 #include "art_field-inl.h"
53 #include "art_method-inl.h"
54 #include "asm_support.h"
55 #include "atomic.h"
56 #include "base/arena_allocator.h"
57 #include "base/dumpable.h"
58 #include "base/stl_util.h"
59 #include "base/systrace.h"
60 #include "base/unix_file/fd_file.h"
61 #include "class_linker-inl.h"
62 #include "compiler_callbacks.h"
63 #include "compiler_filter.h"
64 #include "debugger.h"
65 #include "elf_file.h"
66 #include "entrypoints/runtime_asm_entrypoints.h"
67 #include "experimental_flags.h"
68 #include "fault_handler.h"
69 #include "gc/accounting/card_table-inl.h"
70 #include "gc/heap.h"
71 #include "gc/space/image_space.h"
72 #include "gc/space/space-inl.h"
73 #include "handle_scope-inl.h"
74 #include "image-inl.h"
75 #include "instrumentation.h"
76 #include "intern_table.h"
77 #include "interpreter/interpreter.h"
78 #include "jit/jit.h"
79 #include "jni_internal.h"
80 #include "linear_alloc.h"
81 #include "lambda/box_table.h"
82 #include "mirror/array.h"
83 #include "mirror/class-inl.h"
84 #include "mirror/class_loader.h"
85 #include "mirror/field.h"
86 #include "mirror/method.h"
87 #include "mirror/stack_trace_element.h"
88 #include "mirror/throwable.h"
89 #include "monitor.h"
90 #include "native/dalvik_system_DexFile.h"
91 #include "native/dalvik_system_VMDebug.h"
92 #include "native/dalvik_system_VMRuntime.h"
93 #include "native/dalvik_system_VMStack.h"
94 #include "native/dalvik_system_ZygoteHooks.h"
95 #include "native/java_lang_Class.h"
96 #include "native/java_lang_DexCache.h"
97 #include "native/java_lang_Object.h"
98 #include "native/java_lang_String.h"
99 #include "native/java_lang_StringFactory.h"
100 #include "native/java_lang_System.h"
101 #include "native/java_lang_Thread.h"
102 #include "native/java_lang_Throwable.h"
103 #include "native/java_lang_VMClassLoader.h"
104 #include "native/java_lang_ref_FinalizerReference.h"
105 #include "native/java_lang_ref_Reference.h"
106 #include "native/java_lang_reflect_AbstractMethod.h"
107 #include "native/java_lang_reflect_Array.h"
108 #include "native/java_lang_reflect_Constructor.h"
109 #include "native/java_lang_reflect_Field.h"
110 #include "native/java_lang_reflect_Method.h"
111 #include "native/java_lang_reflect_Proxy.h"
112 #include "native/java_util_concurrent_atomic_AtomicLong.h"
113 #include "native/libcore_util_CharsetUtils.h"
114 #include "native/org_apache_harmony_dalvik_ddmc_DdmServer.h"
115 #include "native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
116 #include "native/sun_misc_Unsafe.h"
117 #include "native_bridge_art_interface.h"
118 #include "oat_file.h"
119 #include "oat_file_manager.h"
120 #include "os.h"
121 #include "parsed_options.h"
122 #include "profiler.h"
123 #include "jit/profile_saver.h"
124 #include "quick/quick_method_frame_info.h"
125 #include "reflection.h"
126 #include "runtime_options.h"
127 #include "ScopedLocalRef.h"
128 #include "scoped_thread_state_change.h"
129 #include "sigchain.h"
130 #include "signal_catcher.h"
131 #include "signal_set.h"
132 #include "thread.h"
133 #include "thread_list.h"
134 #include "trace.h"
135 #include "transaction.h"
136 #include "utils.h"
137 #include "verifier/method_verifier.h"
138 #include "well_known_classes.h"
139
140 namespace art {
141
142 // If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
143 static constexpr bool kEnableJavaStackTraceHandler = false;
144 // Tuned by compiling GmsCore under perf and measuring time spent in DescriptorEquals for class
145 // linking.
146 static constexpr double kLowMemoryMinLoadFactor = 0.5;
147 static constexpr double kLowMemoryMaxLoadFactor = 0.8;
148 static constexpr double kNormalMinLoadFactor = 0.4;
149 static constexpr double kNormalMaxLoadFactor = 0.7;
150 Runtime* Runtime::instance_ = nullptr;
151
152 struct TraceConfig {
153 Trace::TraceMode trace_mode;
154 Trace::TraceOutputMode trace_output_mode;
155 std::string trace_file;
156 size_t trace_file_size;
157 };
158
Runtime()159 Runtime::Runtime()
160 : resolution_method_(nullptr),
161 imt_conflict_method_(nullptr),
162 imt_unimplemented_method_(nullptr),
163 instruction_set_(kNone),
164 compiler_callbacks_(nullptr),
165 is_zygote_(false),
166 must_relocate_(false),
167 is_concurrent_gc_enabled_(true),
168 is_explicit_gc_disabled_(false),
169 dex2oat_enabled_(true),
170 image_dex2oat_enabled_(true),
171 default_stack_size_(0),
172 heap_(nullptr),
173 max_spins_before_thin_lock_inflation_(Monitor::kDefaultMaxSpinsBeforeThinLockInflation),
174 monitor_list_(nullptr),
175 monitor_pool_(nullptr),
176 thread_list_(nullptr),
177 intern_table_(nullptr),
178 class_linker_(nullptr),
179 signal_catcher_(nullptr),
180 java_vm_(nullptr),
181 fault_message_lock_("Fault message lock"),
182 fault_message_(""),
183 threads_being_born_(0),
184 shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
185 shutting_down_(false),
186 shutting_down_started_(false),
187 started_(false),
188 finished_starting_(false),
189 vfprintf_(nullptr),
190 exit_(nullptr),
191 abort_(nullptr),
192 stats_enabled_(false),
193 is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL),
194 instrumentation_(),
195 main_thread_group_(nullptr),
196 system_thread_group_(nullptr),
197 system_class_loader_(nullptr),
198 dump_gc_performance_on_shutdown_(false),
199 preinitialization_transaction_(nullptr),
200 verify_(verifier::VerifyMode::kNone),
201 allow_dex_file_fallback_(true),
202 target_sdk_version_(0),
203 implicit_null_checks_(false),
204 implicit_so_checks_(false),
205 implicit_suspend_checks_(false),
206 no_sig_chain_(false),
207 force_native_bridge_(false),
208 is_native_bridge_loaded_(false),
209 is_native_debuggable_(false),
210 zygote_max_failed_boots_(0),
211 experimental_flags_(ExperimentalFlags::kNone),
212 oat_file_manager_(nullptr),
213 is_low_memory_mode_(false),
214 safe_mode_(false),
215 dump_native_stack_on_sig_quit_(true),
216 pruned_dalvik_cache_(false),
217 // Initially assume we perceive jank in case the process state is never updated.
218 process_state_(kProcessStateJankPerceptible),
219 zygote_no_threads_(false) {
220 CheckAsmSupportOffsetsAndSizes();
221 std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
222 interpreter::CheckInterpreterAsmConstants();
223 }
224
~Runtime()225 Runtime::~Runtime() {
226 ScopedTrace trace("Runtime shutdown");
227 if (is_native_bridge_loaded_) {
228 UnloadNativeBridge();
229 }
230
231 if (dump_gc_performance_on_shutdown_) {
232 // This can't be called from the Heap destructor below because it
233 // could call RosAlloc::InspectAll() which needs the thread_list
234 // to be still alive.
235 heap_->DumpGcPerformanceInfo(LOG(INFO));
236 }
237
238 Thread* self = Thread::Current();
239 const bool attach_shutdown_thread = self == nullptr;
240 if (attach_shutdown_thread) {
241 CHECK(AttachCurrentThread("Shutdown thread", false, nullptr, false));
242 self = Thread::Current();
243 } else {
244 LOG(WARNING) << "Current thread not detached in Runtime shutdown";
245 }
246
247 {
248 ScopedTrace trace2("Wait for shutdown cond");
249 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
250 shutting_down_started_ = true;
251 while (threads_being_born_ > 0) {
252 shutdown_cond_->Wait(self);
253 }
254 shutting_down_ = true;
255 }
256 // Shutdown and wait for the daemons.
257 CHECK(self != nullptr);
258 if (IsFinishedStarting()) {
259 ScopedTrace trace2("Waiting for Daemons");
260 self->ClearException();
261 self->GetJniEnv()->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
262 WellKnownClasses::java_lang_Daemons_stop);
263 }
264
265 Trace::Shutdown();
266
267 if (attach_shutdown_thread) {
268 DetachCurrentThread();
269 self = nullptr;
270 }
271
272 // Make sure to let the GC complete if it is running.
273 heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
274 heap_->DeleteThreadPool();
275 if (jit_ != nullptr) {
276 ScopedTrace trace2("Delete jit");
277 VLOG(jit) << "Deleting jit thread pool";
278 // Delete thread pool before the thread list since we don't want to wait forever on the
279 // JIT compiler threads.
280 jit_->DeleteThreadPool();
281 // Similarly, stop the profile saver thread before deleting the thread list.
282 jit_->StopProfileSaver();
283 }
284
285 // Make sure our internal threads are dead before we start tearing down things they're using.
286 Dbg::StopJdwp();
287 delete signal_catcher_;
288
289 // Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
290 {
291 ScopedTrace trace2("Delete thread list");
292 delete thread_list_;
293 }
294 // Delete the JIT after thread list to ensure that there is no remaining threads which could be
295 // accessing the instrumentation when we delete it.
296 if (jit_ != nullptr) {
297 VLOG(jit) << "Deleting jit";
298 jit_.reset(nullptr);
299 }
300
301 // Shutdown the fault manager if it was initialized.
302 fault_manager.Shutdown();
303
304 ScopedTrace trace2("Delete state");
305 delete monitor_list_;
306 delete monitor_pool_;
307 delete class_linker_;
308 delete heap_;
309 delete intern_table_;
310 delete java_vm_;
311 delete oat_file_manager_;
312 Thread::Shutdown();
313 QuasiAtomic::Shutdown();
314 verifier::MethodVerifier::Shutdown();
315
316 // Destroy allocators before shutting down the MemMap because they may use it.
317 linear_alloc_.reset();
318 low_4gb_arena_pool_.reset();
319 arena_pool_.reset();
320 jit_arena_pool_.reset();
321 MemMap::Shutdown();
322
323 // TODO: acquire a static mutex on Runtime to avoid racing.
324 CHECK(instance_ == nullptr || instance_ == this);
325 instance_ = nullptr;
326 }
327
328 struct AbortState {
Dumpart::AbortState329 void Dump(std::ostream& os) const {
330 if (gAborting > 1) {
331 os << "Runtime aborting --- recursively, so no thread-specific detail!\n";
332 return;
333 }
334 gAborting++;
335 os << "Runtime aborting...\n";
336 if (Runtime::Current() == nullptr) {
337 os << "(Runtime does not yet exist!)\n";
338 DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr);
339 return;
340 }
341 Thread* self = Thread::Current();
342 if (self == nullptr) {
343 os << "(Aborting thread was not attached to runtime!)\n";
344 DumpKernelStack(os, GetTid(), " kernel: ", false);
345 DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr);
346 } else {
347 os << "Aborting thread:\n";
348 if (Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self)) {
349 DumpThread(os, self);
350 } else {
351 if (Locks::mutator_lock_->SharedTryLock(self)) {
352 DumpThread(os, self);
353 Locks::mutator_lock_->SharedUnlock(self);
354 }
355 }
356 }
357 DumpAllThreads(os, self);
358 }
359
360 // No thread-safety analysis as we do explicitly test for holding the mutator lock.
DumpThreadart::AbortState361 void DumpThread(std::ostream& os, Thread* self) const NO_THREAD_SAFETY_ANALYSIS {
362 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self));
363 self->Dump(os);
364 if (self->IsExceptionPending()) {
365 mirror::Throwable* exception = self->GetException();
366 os << "Pending exception " << exception->Dump();
367 }
368 }
369
DumpAllThreadsart::AbortState370 void DumpAllThreads(std::ostream& os, Thread* self) const {
371 Runtime* runtime = Runtime::Current();
372 if (runtime != nullptr) {
373 ThreadList* thread_list = runtime->GetThreadList();
374 if (thread_list != nullptr) {
375 bool tll_already_held = Locks::thread_list_lock_->IsExclusiveHeld(self);
376 bool ml_already_held = Locks::mutator_lock_->IsSharedHeld(self);
377 if (!tll_already_held || !ml_already_held) {
378 os << "Dumping all threads without appropriate locks held:"
379 << (!tll_already_held ? " thread list lock" : "")
380 << (!ml_already_held ? " mutator lock" : "")
381 << "\n";
382 }
383 os << "All threads:\n";
384 thread_list->Dump(os);
385 }
386 }
387 }
388 };
389
Abort()390 void Runtime::Abort() {
391 gAborting++; // set before taking any locks
392
393 // Ensure that we don't have multiple threads trying to abort at once,
394 // which would result in significantly worse diagnostics.
395 MutexLock mu(Thread::Current(), *Locks::abort_lock_);
396
397 // Get any pending output out of the way.
398 fflush(nullptr);
399
400 // Many people have difficulty distinguish aborts from crashes,
401 // so be explicit.
402 AbortState state;
403 LOG(INTERNAL_FATAL) << Dumpable<AbortState>(state);
404
405 // Call the abort hook if we have one.
406 if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
407 LOG(INTERNAL_FATAL) << "Calling abort hook...";
408 Runtime::Current()->abort_();
409 // notreached
410 LOG(INTERNAL_FATAL) << "Unexpectedly returned from abort hook!";
411 }
412
413 #if defined(__GLIBC__)
414 // TODO: we ought to be able to use pthread_kill(3) here (or abort(3),
415 // which POSIX defines in terms of raise(3), which POSIX defines in terms
416 // of pthread_kill(3)). On Linux, though, libcorkscrew can't unwind through
417 // libpthread, which means the stacks we dump would be useless. Calling
418 // tgkill(2) directly avoids that.
419 syscall(__NR_tgkill, getpid(), GetTid(), SIGABRT);
420 // TODO: LLVM installs it's own SIGABRT handler so exit to be safe... Can we disable that in LLVM?
421 // If not, we could use sigaction(3) before calling tgkill(2) and lose this call to exit(3).
422 exit(1);
423 #else
424 abort();
425 #endif
426 // notreached
427 }
428
PreZygoteFork()429 void Runtime::PreZygoteFork() {
430 heap_->PreZygoteFork();
431 }
432
CallExitHook(jint status)433 void Runtime::CallExitHook(jint status) {
434 if (exit_ != nullptr) {
435 ScopedThreadStateChange tsc(Thread::Current(), kNative);
436 exit_(status);
437 LOG(WARNING) << "Exit hook returned instead of exiting!";
438 }
439 }
440
SweepSystemWeaks(IsMarkedVisitor * visitor)441 void Runtime::SweepSystemWeaks(IsMarkedVisitor* visitor) {
442 GetInternTable()->SweepInternTableWeaks(visitor);
443 GetMonitorList()->SweepMonitorList(visitor);
444 GetJavaVM()->SweepJniWeakGlobals(visitor);
445 GetHeap()->SweepAllocationRecords(visitor);
446 GetLambdaBoxTable()->SweepWeakBoxedLambdas(visitor);
447 }
448
ParseOptions(const RuntimeOptions & raw_options,bool ignore_unrecognized,RuntimeArgumentMap * runtime_options)449 bool Runtime::ParseOptions(const RuntimeOptions& raw_options,
450 bool ignore_unrecognized,
451 RuntimeArgumentMap* runtime_options) {
452 InitLogging(/* argv */ nullptr); // Calls Locks::Init() as a side effect.
453 bool parsed = ParsedOptions::Parse(raw_options, ignore_unrecognized, runtime_options);
454 if (!parsed) {
455 LOG(ERROR) << "Failed to parse options";
456 return false;
457 }
458 return true;
459 }
460
Create(RuntimeArgumentMap && runtime_options)461 bool Runtime::Create(RuntimeArgumentMap&& runtime_options) {
462 // TODO: acquire a static mutex on Runtime to avoid racing.
463 if (Runtime::instance_ != nullptr) {
464 return false;
465 }
466 instance_ = new Runtime;
467 if (!instance_->Init(std::move(runtime_options))) {
468 // TODO: Currently deleting the instance will abort the runtime on destruction. Now This will
469 // leak memory, instead. Fix the destructor. b/19100793.
470 // delete instance_;
471 instance_ = nullptr;
472 return false;
473 }
474 return true;
475 }
476
Create(const RuntimeOptions & raw_options,bool ignore_unrecognized)477 bool Runtime::Create(const RuntimeOptions& raw_options, bool ignore_unrecognized) {
478 RuntimeArgumentMap runtime_options;
479 return ParseOptions(raw_options, ignore_unrecognized, &runtime_options) &&
480 Create(std::move(runtime_options));
481 }
482
CreateSystemClassLoader(Runtime * runtime)483 static jobject CreateSystemClassLoader(Runtime* runtime) {
484 if (runtime->IsAotCompiler() && !runtime->GetCompilerCallbacks()->IsBootImage()) {
485 return nullptr;
486 }
487
488 ScopedObjectAccess soa(Thread::Current());
489 ClassLinker* cl = Runtime::Current()->GetClassLinker();
490 auto pointer_size = cl->GetImagePointerSize();
491
492 StackHandleScope<2> hs(soa.Self());
493 Handle<mirror::Class> class_loader_class(
494 hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader)));
495 CHECK(cl->EnsureInitialized(soa.Self(), class_loader_class, true, true));
496
497 ArtMethod* getSystemClassLoader = class_loader_class->FindDirectMethod(
498 "getSystemClassLoader", "()Ljava/lang/ClassLoader;", pointer_size);
499 CHECK(getSystemClassLoader != nullptr);
500
501 JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
502 JNIEnv* env = soa.Self()->GetJniEnv();
503 ScopedLocalRef<jobject> system_class_loader(env, soa.AddLocalReference<jobject>(result.GetL()));
504 CHECK(system_class_loader.get() != nullptr);
505
506 soa.Self()->SetClassLoaderOverride(system_class_loader.get());
507
508 Handle<mirror::Class> thread_class(
509 hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread)));
510 CHECK(cl->EnsureInitialized(soa.Self(), thread_class, true, true));
511
512 ArtField* contextClassLoader =
513 thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
514 CHECK(contextClassLoader != nullptr);
515
516 // We can't run in a transaction yet.
517 contextClassLoader->SetObject<false>(soa.Self()->GetPeer(),
518 soa.Decode<mirror::ClassLoader*>(system_class_loader.get()));
519
520 return env->NewGlobalRef(system_class_loader.get());
521 }
522
GetPatchoatExecutable() const523 std::string Runtime::GetPatchoatExecutable() const {
524 if (!patchoat_executable_.empty()) {
525 return patchoat_executable_;
526 }
527 std::string patchoat_executable(GetAndroidRoot());
528 patchoat_executable += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
529 return patchoat_executable;
530 }
531
GetCompilerExecutable() const532 std::string Runtime::GetCompilerExecutable() const {
533 if (!compiler_executable_.empty()) {
534 return compiler_executable_;
535 }
536 std::string compiler_executable(GetAndroidRoot());
537 compiler_executable += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
538 return compiler_executable;
539 }
540
Start()541 bool Runtime::Start() {
542 VLOG(startup) << "Runtime::Start entering";
543
544 CHECK(!no_sig_chain_) << "A started runtime should have sig chain enabled";
545
546 // If a debug host build, disable ptrace restriction for debugging and test timeout thread dump.
547 // Only 64-bit as prctl() may fail in 32 bit userspace on a 64-bit kernel.
548 #if defined(__linux__) && !defined(__ANDROID__) && defined(__x86_64__)
549 if (kIsDebugBuild) {
550 CHECK_EQ(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY), 0);
551 }
552 #endif
553
554 // Restore main thread state to kNative as expected by native code.
555 Thread* self = Thread::Current();
556
557 self->TransitionFromRunnableToSuspended(kNative);
558
559 started_ = true;
560
561 // Create the JIT either if we have to use JIT compilation or save profiling info.
562 // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
563 // recoding profiles. Maybe we should consider changing the name to be more clear it's
564 // not only about compiling. b/28295073.
565 if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
566 std::string error_msg;
567 if (!IsZygote()) {
568 // If we are the zygote then we need to wait until after forking to create the code cache
569 // due to SELinux restrictions on r/w/x memory regions.
570 CreateJit();
571 } else if (jit_options_->UseJitCompilation()) {
572 if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
573 // Try to load compiler pre zygote to reduce PSS. b/27744947
574 LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
575 }
576 }
577 }
578
579 if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
580 ScopedObjectAccess soa(self);
581 StackHandleScope<2> hs(soa.Self());
582
583 auto class_class(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
584 auto field_class(hs.NewHandle<mirror::Class>(mirror::Field::StaticClass()));
585
586 class_linker_->EnsureInitialized(soa.Self(), class_class, true, true);
587 // Field class is needed for register_java_net_InetAddress in libcore, b/28153851.
588 class_linker_->EnsureInitialized(soa.Self(), field_class, true, true);
589 }
590
591 // InitNativeMethods needs to be after started_ so that the classes
592 // it touches will have methods linked to the oat file if necessary.
593 {
594 ScopedTrace trace2("InitNativeMethods");
595 InitNativeMethods();
596 }
597
598 // Initialize well known thread group values that may be accessed threads while attaching.
599 InitThreadGroups(self);
600
601 Thread::FinishStartup();
602
603 system_class_loader_ = CreateSystemClassLoader(this);
604
605 if (is_zygote_) {
606 if (!InitZygote()) {
607 return false;
608 }
609 } else {
610 if (is_native_bridge_loaded_) {
611 PreInitializeNativeBridge(".");
612 }
613 NativeBridgeAction action = force_native_bridge_
614 ? NativeBridgeAction::kInitialize
615 : NativeBridgeAction::kUnload;
616 InitNonZygoteOrPostFork(self->GetJniEnv(),
617 /* is_system_server */ false,
618 action,
619 GetInstructionSetString(kRuntimeISA));
620 }
621
622 StartDaemonThreads();
623
624 {
625 ScopedObjectAccess soa(self);
626 self->GetJniEnv()->locals.AssertEmpty();
627 }
628
629 VLOG(startup) << "Runtime::Start exiting";
630 finished_starting_ = true;
631
632 if (profiler_options_.IsEnabled() && !profile_output_filename_.empty()) {
633 // User has asked for a profile using -Xenable-profiler.
634 // Create the profile file if it doesn't exist.
635 int fd = open(profile_output_filename_.c_str(), O_RDWR|O_CREAT|O_EXCL, 0660);
636 if (fd >= 0) {
637 close(fd);
638 } else if (errno != EEXIST) {
639 LOG(WARNING) << "Failed to access the profile file. Profiler disabled.";
640 }
641 }
642
643 if (trace_config_.get() != nullptr && trace_config_->trace_file != "") {
644 ScopedThreadStateChange tsc(self, kWaitingForMethodTracingStart);
645 Trace::Start(trace_config_->trace_file.c_str(),
646 -1,
647 static_cast<int>(trace_config_->trace_file_size),
648 0,
649 trace_config_->trace_output_mode,
650 trace_config_->trace_mode,
651 0);
652 }
653
654 return true;
655 }
656
EndThreadBirth()657 void Runtime::EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
658 DCHECK_GT(threads_being_born_, 0U);
659 threads_being_born_--;
660 if (shutting_down_started_ && threads_being_born_ == 0) {
661 shutdown_cond_->Broadcast(Thread::Current());
662 }
663 }
664
665 // Do zygote-mode-only initialization.
InitZygote()666 bool Runtime::InitZygote() {
667 #ifdef __linux__
668 // zygote goes into its own process group
669 setpgid(0, 0);
670
671 // See storage config details at http://source.android.com/tech/storage/
672 // Create private mount namespace shared by all children
673 if (unshare(CLONE_NEWNS) == -1) {
674 PLOG(ERROR) << "Failed to unshare()";
675 return false;
676 }
677
678 // Mark rootfs as being a slave so that changes from default
679 // namespace only flow into our children.
680 if (mount("rootfs", "/", nullptr, (MS_SLAVE | MS_REC), nullptr) == -1) {
681 PLOG(ERROR) << "Failed to mount() rootfs as MS_SLAVE";
682 return false;
683 }
684
685 // Create a staging tmpfs that is shared by our children; they will
686 // bind mount storage into their respective private namespaces, which
687 // are isolated from each other.
688 const char* target_base = getenv("EMULATED_STORAGE_TARGET");
689 if (target_base != nullptr) {
690 if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV,
691 "uid=0,gid=1028,mode=0751") == -1) {
692 PLOG(ERROR) << "Failed to mount tmpfs to " << target_base;
693 return false;
694 }
695 }
696
697 return true;
698 #else
699 UNIMPLEMENTED(FATAL);
700 return false;
701 #endif
702 }
703
InitNonZygoteOrPostFork(JNIEnv * env,bool is_system_server,NativeBridgeAction action,const char * isa)704 void Runtime::InitNonZygoteOrPostFork(
705 JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa) {
706 is_zygote_ = false;
707
708 if (is_native_bridge_loaded_) {
709 switch (action) {
710 case NativeBridgeAction::kUnload:
711 UnloadNativeBridge();
712 is_native_bridge_loaded_ = false;
713 break;
714
715 case NativeBridgeAction::kInitialize:
716 InitializeNativeBridge(env, isa);
717 break;
718 }
719 }
720
721 // Create the thread pools.
722 heap_->CreateThreadPool();
723 // Reset the gc performance data at zygote fork so that the GCs
724 // before fork aren't attributed to an app.
725 heap_->ResetGcPerformanceInfo();
726
727
728 if (!is_system_server &&
729 !safe_mode_ &&
730 (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) &&
731 jit_.get() == nullptr) {
732 // Note that when running ART standalone (not zygote, nor zygote fork),
733 // the jit may have already been created.
734 CreateJit();
735 }
736
737 StartSignalCatcher();
738
739 // Start the JDWP thread. If the command-line debugger flags specified "suspend=y",
740 // this will pause the runtime, so we probably want this to come last.
741 Dbg::StartJdwp();
742 }
743
StartSignalCatcher()744 void Runtime::StartSignalCatcher() {
745 if (!is_zygote_) {
746 signal_catcher_ = new SignalCatcher(stack_trace_file_);
747 }
748 }
749
IsShuttingDown(Thread * self)750 bool Runtime::IsShuttingDown(Thread* self) {
751 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
752 return IsShuttingDownLocked();
753 }
754
IsDebuggable() const755 bool Runtime::IsDebuggable() const {
756 const OatFile* oat_file = GetOatFileManager().GetPrimaryOatFile();
757 return oat_file != nullptr && oat_file->IsDebuggable();
758 }
759
StartDaemonThreads()760 void Runtime::StartDaemonThreads() {
761 ScopedTrace trace(__FUNCTION__);
762 VLOG(startup) << "Runtime::StartDaemonThreads entering";
763
764 Thread* self = Thread::Current();
765
766 // Must be in the kNative state for calling native methods.
767 CHECK_EQ(self->GetState(), kNative);
768
769 JNIEnv* env = self->GetJniEnv();
770 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
771 WellKnownClasses::java_lang_Daemons_start);
772 if (env->ExceptionCheck()) {
773 env->ExceptionDescribe();
774 LOG(FATAL) << "Error starting java.lang.Daemons";
775 }
776
777 VLOG(startup) << "Runtime::StartDaemonThreads exiting";
778 }
779
780 // Attempts to open dex files from image(s). Given the image location, try to find the oat file
781 // and open it to get the stored dex file. If the image is the first for a multi-image boot
782 // classpath, go on and also open the other images.
OpenDexFilesFromImage(const std::string & image_location,std::vector<std::unique_ptr<const DexFile>> * dex_files,size_t * failures)783 static bool OpenDexFilesFromImage(const std::string& image_location,
784 std::vector<std::unique_ptr<const DexFile>>* dex_files,
785 size_t* failures) {
786 DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
787
788 // Use a work-list approach, so that we can easily reuse the opening code.
789 std::vector<std::string> image_locations;
790 image_locations.push_back(image_location);
791
792 for (size_t index = 0; index < image_locations.size(); ++index) {
793 std::string system_filename;
794 bool has_system = false;
795 std::string cache_filename_unused;
796 bool dalvik_cache_exists_unused;
797 bool has_cache_unused;
798 bool is_global_cache_unused;
799 bool found_image = gc::space::ImageSpace::FindImageFilename(image_locations[index].c_str(),
800 kRuntimeISA,
801 &system_filename,
802 &has_system,
803 &cache_filename_unused,
804 &dalvik_cache_exists_unused,
805 &has_cache_unused,
806 &is_global_cache_unused);
807
808 if (!found_image || !has_system) {
809 return false;
810 }
811
812 // We are falling back to non-executable use of the oat file because patching failed, presumably
813 // due to lack of space.
814 std::string oat_filename =
815 ImageHeader::GetOatLocationFromImageLocation(system_filename.c_str());
816 std::string oat_location =
817 ImageHeader::GetOatLocationFromImageLocation(image_locations[index].c_str());
818 // Note: in the multi-image case, the image location may end in ".jar," and not ".art." Handle
819 // that here.
820 if (EndsWith(oat_location, ".jar")) {
821 oat_location.replace(oat_location.length() - 3, 3, "oat");
822 }
823
824 std::unique_ptr<File> file(OS::OpenFileForReading(oat_filename.c_str()));
825 if (file.get() == nullptr) {
826 return false;
827 }
828 std::string error_msg;
829 std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.release(),
830 false,
831 false,
832 /*low_4gb*/false,
833 &error_msg));
834 if (elf_file.get() == nullptr) {
835 return false;
836 }
837 std::unique_ptr<const OatFile> oat_file(
838 OatFile::OpenWithElfFile(elf_file.release(), oat_location, nullptr, &error_msg));
839 if (oat_file == nullptr) {
840 LOG(WARNING) << "Unable to use '" << oat_filename << "' because " << error_msg;
841 return false;
842 }
843
844 for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
845 if (oat_dex_file == nullptr) {
846 *failures += 1;
847 continue;
848 }
849 std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
850 if (dex_file.get() == nullptr) {
851 *failures += 1;
852 } else {
853 dex_files->push_back(std::move(dex_file));
854 }
855 }
856
857 if (index == 0) {
858 // First file. See if this is a multi-image environment, and if so, enqueue the other images.
859 const OatHeader& boot_oat_header = oat_file->GetOatHeader();
860 const char* boot_cp = boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
861 if (boot_cp != nullptr) {
862 gc::space::ImageSpace::CreateMultiImageLocations(image_locations[0],
863 boot_cp,
864 &image_locations);
865 }
866 }
867
868 Runtime::Current()->GetOatFileManager().RegisterOatFile(std::move(oat_file));
869 }
870 return true;
871 }
872
873
OpenDexFiles(const std::vector<std::string> & dex_filenames,const std::vector<std::string> & dex_locations,const std::string & image_location,std::vector<std::unique_ptr<const DexFile>> * dex_files)874 static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
875 const std::vector<std::string>& dex_locations,
876 const std::string& image_location,
877 std::vector<std::unique_ptr<const DexFile>>* dex_files) {
878 DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
879 size_t failure_count = 0;
880 if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
881 return failure_count;
882 }
883 failure_count = 0;
884 for (size_t i = 0; i < dex_filenames.size(); i++) {
885 const char* dex_filename = dex_filenames[i].c_str();
886 const char* dex_location = dex_locations[i].c_str();
887 std::string error_msg;
888 if (!OS::FileExists(dex_filename)) {
889 LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
890 continue;
891 }
892 if (!DexFile::Open(dex_filename, dex_location, &error_msg, dex_files)) {
893 LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
894 ++failure_count;
895 }
896 }
897 return failure_count;
898 }
899
SetSentinel(mirror::Object * sentinel)900 void Runtime::SetSentinel(mirror::Object* sentinel) {
901 CHECK(sentinel_.Read() == nullptr);
902 CHECK(sentinel != nullptr);
903 CHECK(!heap_->IsMovableObject(sentinel));
904 sentinel_ = GcRoot<mirror::Object>(sentinel);
905 }
906
Init(RuntimeArgumentMap && runtime_options_in)907 bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
908 RuntimeArgumentMap runtime_options(std::move(runtime_options_in));
909 ScopedTrace trace(__FUNCTION__);
910 CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
911
912 MemMap::Init();
913
914 using Opt = RuntimeArgumentMap;
915 VLOG(startup) << "Runtime::Init -verbose:startup enabled";
916
917 QuasiAtomic::Startup();
918
919 oat_file_manager_ = new OatFileManager;
920
921 Thread::SetSensitiveThreadHook(runtime_options.GetOrDefault(Opt::HookIsSensitiveThread));
922 Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold));
923
924 boot_class_path_string_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
925 class_path_string_ = runtime_options.ReleaseOrDefault(Opt::ClassPath);
926 properties_ = runtime_options.ReleaseOrDefault(Opt::PropertiesList);
927
928 compiler_callbacks_ = runtime_options.GetOrDefault(Opt::CompilerCallbacksPtr);
929 patchoat_executable_ = runtime_options.ReleaseOrDefault(Opt::PatchOat);
930 must_relocate_ = runtime_options.GetOrDefault(Opt::Relocate);
931 is_zygote_ = runtime_options.Exists(Opt::Zygote);
932 is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
933 dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::Dex2Oat);
934 image_dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::ImageDex2Oat);
935 dump_native_stack_on_sig_quit_ = runtime_options.GetOrDefault(Opt::DumpNativeStackOnSigQuit);
936
937 vfprintf_ = runtime_options.GetOrDefault(Opt::HookVfprintf);
938 exit_ = runtime_options.GetOrDefault(Opt::HookExit);
939 abort_ = runtime_options.GetOrDefault(Opt::HookAbort);
940
941 default_stack_size_ = runtime_options.GetOrDefault(Opt::StackSize);
942 stack_trace_file_ = runtime_options.ReleaseOrDefault(Opt::StackTraceFile);
943
944 compiler_executable_ = runtime_options.ReleaseOrDefault(Opt::Compiler);
945 compiler_options_ = runtime_options.ReleaseOrDefault(Opt::CompilerOptions);
946 image_compiler_options_ = runtime_options.ReleaseOrDefault(Opt::ImageCompilerOptions);
947 image_location_ = runtime_options.GetOrDefault(Opt::Image);
948
949 max_spins_before_thin_lock_inflation_ =
950 runtime_options.GetOrDefault(Opt::MaxSpinsBeforeThinLockInflation);
951
952 monitor_list_ = new MonitorList;
953 monitor_pool_ = MonitorPool::Create();
954 thread_list_ = new ThreadList;
955 intern_table_ = new InternTable;
956
957 verify_ = runtime_options.GetOrDefault(Opt::Verify);
958 allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
959
960 no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
961 force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
962
963 Split(runtime_options.GetOrDefault(Opt::CpuAbiList), ',', &cpu_abilist_);
964
965 fingerprint_ = runtime_options.ReleaseOrDefault(Opt::Fingerprint);
966
967 if (runtime_options.GetOrDefault(Opt::Interpret)) {
968 GetInstrumentation()->ForceInterpretOnly();
969 }
970
971 zygote_max_failed_boots_ = runtime_options.GetOrDefault(Opt::ZygoteMaxFailedBoots);
972 experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental);
973 is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode);
974
975 {
976 CompilerFilter::Filter filter;
977 std::string filter_str = runtime_options.GetOrDefault(Opt::OatFileManagerCompilerFilter);
978 if (!CompilerFilter::ParseCompilerFilter(filter_str.c_str(), &filter)) {
979 LOG(ERROR) << "Cannot parse compiler filter " << filter_str;
980 return false;
981 }
982 OatFileManager::SetCompilerFilter(filter);
983 }
984
985 XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
986 heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
987 runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
988 runtime_options.GetOrDefault(Opt::HeapMinFree),
989 runtime_options.GetOrDefault(Opt::HeapMaxFree),
990 runtime_options.GetOrDefault(Opt::HeapTargetUtilization),
991 runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier),
992 runtime_options.GetOrDefault(Opt::MemoryMaximumSize),
993 runtime_options.GetOrDefault(Opt::NonMovingSpaceCapacity),
994 runtime_options.GetOrDefault(Opt::Image),
995 runtime_options.GetOrDefault(Opt::ImageInstructionSet),
996 xgc_option.collector_type_,
997 runtime_options.GetOrDefault(Opt::BackgroundGc),
998 runtime_options.GetOrDefault(Opt::LargeObjectSpace),
999 runtime_options.GetOrDefault(Opt::LargeObjectThreshold),
1000 runtime_options.GetOrDefault(Opt::ParallelGCThreads),
1001 runtime_options.GetOrDefault(Opt::ConcGCThreads),
1002 runtime_options.Exists(Opt::LowMemoryMode),
1003 runtime_options.GetOrDefault(Opt::LongPauseLogThreshold),
1004 runtime_options.GetOrDefault(Opt::LongGCLogThreshold),
1005 runtime_options.Exists(Opt::IgnoreMaxFootprint),
1006 runtime_options.GetOrDefault(Opt::UseTLAB),
1007 xgc_option.verify_pre_gc_heap_,
1008 xgc_option.verify_pre_sweeping_heap_,
1009 xgc_option.verify_post_gc_heap_,
1010 xgc_option.verify_pre_gc_rosalloc_,
1011 xgc_option.verify_pre_sweeping_rosalloc_,
1012 xgc_option.verify_post_gc_rosalloc_,
1013 xgc_option.gcstress_,
1014 runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM),
1015 runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs));
1016
1017 if (!heap_->HasBootImageSpace() && !allow_dex_file_fallback_) {
1018 LOG(ERROR) << "Dex file fallback disabled, cannot continue without image.";
1019 return false;
1020 }
1021
1022 dump_gc_performance_on_shutdown_ = runtime_options.Exists(Opt::DumpGCPerformanceOnShutdown);
1023
1024 if (runtime_options.Exists(Opt::JdwpOptions)) {
1025 Dbg::ConfigureJdwp(runtime_options.GetOrDefault(Opt::JdwpOptions));
1026 }
1027
1028 jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options));
1029 if (IsAotCompiler()) {
1030 // If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
1031 // this case.
1032 // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
1033 // null and we don't create the jit.
1034 jit_options_->SetUseJitCompilation(false);
1035 jit_options_->SetSaveProfilingInfo(false);
1036 }
1037
1038 // Allocate a global table of boxed lambda objects <-> closures.
1039 lambda_box_table_ = MakeUnique<lambda::BoxTable>();
1040
1041 // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
1042 // can't be trimmed as easily.
1043 const bool use_malloc = IsAotCompiler();
1044 arena_pool_.reset(new ArenaPool(use_malloc, /* low_4gb */ false));
1045 jit_arena_pool_.reset(
1046 new ArenaPool(/* use_malloc */ false, /* low_4gb */ false, "CompilerMetadata"));
1047
1048 if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
1049 // 4gb, no malloc. Explanation in header.
1050 low_4gb_arena_pool_.reset(new ArenaPool(/* use_malloc */ false, /* low_4gb */ true));
1051 }
1052 linear_alloc_.reset(CreateLinearAlloc());
1053
1054 BlockSignals();
1055 InitPlatformSignalHandlers();
1056
1057 // Change the implicit checks flags based on runtime architecture.
1058 switch (kRuntimeISA) {
1059 case kArm:
1060 case kThumb2:
1061 case kX86:
1062 case kArm64:
1063 case kX86_64:
1064 case kMips:
1065 case kMips64:
1066 implicit_null_checks_ = true;
1067 // Installing stack protection does not play well with valgrind.
1068 implicit_so_checks_ = !(RUNNING_ON_MEMORY_TOOL && kMemoryToolIsValgrind);
1069 break;
1070 default:
1071 // Keep the defaults.
1072 break;
1073 }
1074
1075 if (!no_sig_chain_) {
1076 // Dex2Oat's Runtime does not need the signal chain or the fault handler.
1077
1078 // Initialize the signal chain so that any calls to sigaction get
1079 // correctly routed to the next in the chain regardless of whether we
1080 // have claimed the signal or not.
1081 InitializeSignalChain();
1082
1083 if (implicit_null_checks_ || implicit_so_checks_ || implicit_suspend_checks_) {
1084 fault_manager.Init();
1085
1086 // These need to be in a specific order. The null point check handler must be
1087 // after the suspend check and stack overflow check handlers.
1088 //
1089 // Note: the instances attach themselves to the fault manager and are handled by it. The manager
1090 // will delete the instance on Shutdown().
1091 if (implicit_suspend_checks_) {
1092 new SuspensionHandler(&fault_manager);
1093 }
1094
1095 if (implicit_so_checks_) {
1096 new StackOverflowHandler(&fault_manager);
1097 }
1098
1099 if (implicit_null_checks_) {
1100 new NullPointerHandler(&fault_manager);
1101 }
1102
1103 if (kEnableJavaStackTraceHandler) {
1104 new JavaStackTraceHandler(&fault_manager);
1105 }
1106 }
1107 }
1108
1109 java_vm_ = new JavaVMExt(this, runtime_options);
1110
1111 Thread::Startup();
1112
1113 // ClassLinker needs an attached thread, but we can't fully attach a thread without creating
1114 // objects. We can't supply a thread group yet; it will be fixed later. Since we are the main
1115 // thread, we do not get a java peer.
1116 Thread* self = Thread::Attach("main", false, nullptr, false);
1117 CHECK_EQ(self->GetThreadId(), ThreadList::kMainThreadId);
1118 CHECK(self != nullptr);
1119
1120 // Set us to runnable so tools using a runtime can allocate and GC by default
1121 self->TransitionFromSuspendedToRunnable();
1122
1123 // Now we're attached, we can take the heap locks and validate the heap.
1124 GetHeap()->EnableObjectValidation();
1125
1126 CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U);
1127 class_linker_ = new ClassLinker(intern_table_);
1128 if (GetHeap()->HasBootImageSpace()) {
1129 std::string error_msg;
1130 bool result = class_linker_->InitFromBootImage(&error_msg);
1131 if (!result) {
1132 LOG(ERROR) << "Could not initialize from image: " << error_msg;
1133 return false;
1134 }
1135 if (kIsDebugBuild) {
1136 for (auto image_space : GetHeap()->GetBootImageSpaces()) {
1137 image_space->VerifyImageAllocations();
1138 }
1139 }
1140 if (boot_class_path_string_.empty()) {
1141 // The bootclasspath is not explicitly specified: construct it from the loaded dex files.
1142 const std::vector<const DexFile*>& boot_class_path = GetClassLinker()->GetBootClassPath();
1143 std::vector<std::string> dex_locations;
1144 dex_locations.reserve(boot_class_path.size());
1145 for (const DexFile* dex_file : boot_class_path) {
1146 dex_locations.push_back(dex_file->GetLocation());
1147 }
1148 boot_class_path_string_ = Join(dex_locations, ':');
1149 }
1150 {
1151 ScopedTrace trace2("AddImageStringsToTable");
1152 GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
1153 }
1154 {
1155 ScopedTrace trace2("MoveImageClassesToClassTable");
1156 GetClassLinker()->AddBootImageClassesToClassTable();
1157 }
1158 } else {
1159 std::vector<std::string> dex_filenames;
1160 Split(boot_class_path_string_, ':', &dex_filenames);
1161
1162 std::vector<std::string> dex_locations;
1163 if (!runtime_options.Exists(Opt::BootClassPathLocations)) {
1164 dex_locations = dex_filenames;
1165 } else {
1166 dex_locations = runtime_options.GetOrDefault(Opt::BootClassPathLocations);
1167 CHECK_EQ(dex_filenames.size(), dex_locations.size());
1168 }
1169
1170 std::vector<std::unique_ptr<const DexFile>> boot_class_path;
1171 if (runtime_options.Exists(Opt::BootClassPathDexList)) {
1172 boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
1173 } else {
1174 OpenDexFiles(dex_filenames,
1175 dex_locations,
1176 runtime_options.GetOrDefault(Opt::Image),
1177 &boot_class_path);
1178 }
1179 instruction_set_ = runtime_options.GetOrDefault(Opt::ImageInstructionSet);
1180 std::string error_msg;
1181 if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) {
1182 LOG(ERROR) << "Could not initialize without image: " << error_msg;
1183 return false;
1184 }
1185
1186 // TODO: Should we move the following to InitWithoutImage?
1187 SetInstructionSet(instruction_set_);
1188 for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
1189 Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
1190 if (!HasCalleeSaveMethod(type)) {
1191 SetCalleeSaveMethod(CreateCalleeSaveMethod(), type);
1192 }
1193 }
1194 }
1195
1196 CHECK(class_linker_ != nullptr);
1197
1198 verifier::MethodVerifier::Init();
1199
1200 if (runtime_options.Exists(Opt::MethodTrace)) {
1201 trace_config_.reset(new TraceConfig());
1202 trace_config_->trace_file = runtime_options.ReleaseOrDefault(Opt::MethodTraceFile);
1203 trace_config_->trace_file_size = runtime_options.ReleaseOrDefault(Opt::MethodTraceFileSize);
1204 trace_config_->trace_mode = Trace::TraceMode::kMethodTracing;
1205 trace_config_->trace_output_mode = runtime_options.Exists(Opt::MethodTraceStreaming) ?
1206 Trace::TraceOutputMode::kStreaming :
1207 Trace::TraceOutputMode::kFile;
1208 }
1209
1210 {
1211 auto&& profiler_options = runtime_options.ReleaseOrDefault(Opt::ProfilerOpts);
1212 profile_output_filename_ = profiler_options.output_file_name_;
1213
1214 // TODO: Don't do this, just change ProfilerOptions to include the output file name?
1215 ProfilerOptions other_options(
1216 profiler_options.enabled_,
1217 profiler_options.period_s_,
1218 profiler_options.duration_s_,
1219 profiler_options.interval_us_,
1220 profiler_options.backoff_coefficient_,
1221 profiler_options.start_immediately_,
1222 profiler_options.top_k_threshold_,
1223 profiler_options.top_k_change_threshold_,
1224 profiler_options.profile_type_,
1225 profiler_options.max_stack_depth_);
1226
1227 profiler_options_ = other_options;
1228 }
1229
1230 // TODO: move this to just be an Trace::Start argument
1231 Trace::SetDefaultClockSource(runtime_options.GetOrDefault(Opt::ProfileClock));
1232
1233 // Pre-allocate an OutOfMemoryError for the double-OOME case.
1234 self->ThrowNewException("Ljava/lang/OutOfMemoryError;",
1235 "OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
1236 "no stack trace available");
1237 pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException());
1238 self->ClearException();
1239
1240 // Pre-allocate a NoClassDefFoundError for the common case of failing to find a system class
1241 // ahead of checking the application's class loader.
1242 self->ThrowNewException("Ljava/lang/NoClassDefFoundError;",
1243 "Class not found using the boot class loader; no stack trace available");
1244 pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(self->GetException());
1245 self->ClearException();
1246
1247 // Look for a native bridge.
1248 //
1249 // The intended flow here is, in the case of a running system:
1250 //
1251 // Runtime::Init() (zygote):
1252 // LoadNativeBridge -> dlopen from cmd line parameter.
1253 // |
1254 // V
1255 // Runtime::Start() (zygote):
1256 // No-op wrt native bridge.
1257 // |
1258 // | start app
1259 // V
1260 // DidForkFromZygote(action)
1261 // action = kUnload -> dlclose native bridge.
1262 // action = kInitialize -> initialize library
1263 //
1264 //
1265 // The intended flow here is, in the case of a simple dalvikvm call:
1266 //
1267 // Runtime::Init():
1268 // LoadNativeBridge -> dlopen from cmd line parameter.
1269 // |
1270 // V
1271 // Runtime::Start():
1272 // DidForkFromZygote(kInitialize) -> try to initialize any native bridge given.
1273 // No-op wrt native bridge.
1274 {
1275 std::string native_bridge_file_name = runtime_options.ReleaseOrDefault(Opt::NativeBridge);
1276 is_native_bridge_loaded_ = LoadNativeBridge(native_bridge_file_name);
1277 }
1278
1279 VLOG(startup) << "Runtime::Init exiting";
1280
1281 return true;
1282 }
1283
InitNativeMethods()1284 void Runtime::InitNativeMethods() {
1285 VLOG(startup) << "Runtime::InitNativeMethods entering";
1286 Thread* self = Thread::Current();
1287 JNIEnv* env = self->GetJniEnv();
1288
1289 // Must be in the kNative state for calling native methods (JNI_OnLoad code).
1290 CHECK_EQ(self->GetState(), kNative);
1291
1292 // First set up JniConstants, which is used by both the runtime's built-in native
1293 // methods and libcore.
1294 JniConstants::init(env);
1295
1296 // Then set up the native methods provided by the runtime itself.
1297 RegisterRuntimeNativeMethods(env);
1298
1299 // Initialize classes used in JNI. The initialization requires runtime native
1300 // methods to be loaded first.
1301 WellKnownClasses::Init(env);
1302
1303 // Then set up libjavacore / libopenjdk, which are just a regular JNI libraries with
1304 // a regular JNI_OnLoad. Most JNI libraries can just use System.loadLibrary, but
1305 // libcore can't because it's the library that implements System.loadLibrary!
1306 {
1307 std::string error_msg;
1308 if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, nullptr, &error_msg)) {
1309 LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << error_msg;
1310 }
1311 }
1312 {
1313 constexpr const char* kOpenJdkLibrary = kIsDebugBuild
1314 ? "libopenjdkd.so"
1315 : "libopenjdk.so";
1316 std::string error_msg;
1317 if (!java_vm_->LoadNativeLibrary(env, kOpenJdkLibrary, nullptr, nullptr, &error_msg)) {
1318 LOG(FATAL) << "LoadNativeLibrary failed for \"" << kOpenJdkLibrary << "\": " << error_msg;
1319 }
1320 }
1321
1322 // Initialize well known classes that may invoke runtime native methods.
1323 WellKnownClasses::LateInit(env);
1324
1325 VLOG(startup) << "Runtime::InitNativeMethods exiting";
1326 }
1327
ReclaimArenaPoolMemory()1328 void Runtime::ReclaimArenaPoolMemory() {
1329 arena_pool_->LockReclaimMemory();
1330 }
1331
InitThreadGroups(Thread * self)1332 void Runtime::InitThreadGroups(Thread* self) {
1333 JNIEnvExt* env = self->GetJniEnv();
1334 ScopedJniEnvLocalRefState env_state(env);
1335 main_thread_group_ =
1336 env->NewGlobalRef(env->GetStaticObjectField(
1337 WellKnownClasses::java_lang_ThreadGroup,
1338 WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
1339 CHECK(main_thread_group_ != nullptr || IsAotCompiler());
1340 system_thread_group_ =
1341 env->NewGlobalRef(env->GetStaticObjectField(
1342 WellKnownClasses::java_lang_ThreadGroup,
1343 WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
1344 CHECK(system_thread_group_ != nullptr || IsAotCompiler());
1345 }
1346
GetMainThreadGroup() const1347 jobject Runtime::GetMainThreadGroup() const {
1348 CHECK(main_thread_group_ != nullptr || IsAotCompiler());
1349 return main_thread_group_;
1350 }
1351
GetSystemThreadGroup() const1352 jobject Runtime::GetSystemThreadGroup() const {
1353 CHECK(system_thread_group_ != nullptr || IsAotCompiler());
1354 return system_thread_group_;
1355 }
1356
GetSystemClassLoader() const1357 jobject Runtime::GetSystemClassLoader() const {
1358 CHECK(system_class_loader_ != nullptr || IsAotCompiler());
1359 return system_class_loader_;
1360 }
1361
RegisterRuntimeNativeMethods(JNIEnv * env)1362 void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
1363 register_dalvik_system_DexFile(env);
1364 register_dalvik_system_VMDebug(env);
1365 register_dalvik_system_VMRuntime(env);
1366 register_dalvik_system_VMStack(env);
1367 register_dalvik_system_ZygoteHooks(env);
1368 register_java_lang_Class(env);
1369 register_java_lang_DexCache(env);
1370 register_java_lang_Object(env);
1371 register_java_lang_ref_FinalizerReference(env);
1372 register_java_lang_reflect_AbstractMethod(env);
1373 register_java_lang_reflect_Array(env);
1374 register_java_lang_reflect_Constructor(env);
1375 register_java_lang_reflect_Field(env);
1376 register_java_lang_reflect_Method(env);
1377 register_java_lang_reflect_Proxy(env);
1378 register_java_lang_ref_Reference(env);
1379 register_java_lang_String(env);
1380 register_java_lang_StringFactory(env);
1381 register_java_lang_System(env);
1382 register_java_lang_Thread(env);
1383 register_java_lang_Throwable(env);
1384 register_java_lang_VMClassLoader(env);
1385 register_java_util_concurrent_atomic_AtomicLong(env);
1386 register_libcore_util_CharsetUtils(env);
1387 register_org_apache_harmony_dalvik_ddmc_DdmServer(env);
1388 register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(env);
1389 register_sun_misc_Unsafe(env);
1390 }
1391
DumpForSigQuit(std::ostream & os)1392 void Runtime::DumpForSigQuit(std::ostream& os) {
1393 // Dumping for SIGQIT may cause deadlocks if the the debugger is active. b/26118154
1394 if (Dbg::IsDebuggerActive()) {
1395 LOG(INFO) << "Skipping DumpForSigQuit due to active debugger";
1396 return;
1397 }
1398 GetClassLinker()->DumpForSigQuit(os);
1399 GetInternTable()->DumpForSigQuit(os);
1400 GetJavaVM()->DumpForSigQuit(os);
1401 GetHeap()->DumpForSigQuit(os);
1402 oat_file_manager_->DumpForSigQuit(os);
1403 if (GetJit() != nullptr) {
1404 GetJit()->DumpForSigQuit(os);
1405 } else {
1406 os << "Running non JIT\n";
1407 }
1408 TrackedAllocators::Dump(os);
1409 os << "\n";
1410
1411 thread_list_->DumpForSigQuit(os);
1412 BaseMutex::DumpAll(os);
1413 }
1414
DumpLockHolders(std::ostream & os)1415 void Runtime::DumpLockHolders(std::ostream& os) {
1416 uint64_t mutator_lock_owner = Locks::mutator_lock_->GetExclusiveOwnerTid();
1417 pid_t thread_list_lock_owner = GetThreadList()->GetLockOwner();
1418 pid_t classes_lock_owner = GetClassLinker()->GetClassesLockOwner();
1419 pid_t dex_lock_owner = GetClassLinker()->GetDexLockOwner();
1420 if ((thread_list_lock_owner | classes_lock_owner | dex_lock_owner) != 0) {
1421 os << "Mutator lock exclusive owner tid: " << mutator_lock_owner << "\n"
1422 << "ThreadList lock owner tid: " << thread_list_lock_owner << "\n"
1423 << "ClassLinker classes lock owner tid: " << classes_lock_owner << "\n"
1424 << "ClassLinker dex lock owner tid: " << dex_lock_owner << "\n";
1425 }
1426 }
1427
SetStatsEnabled(bool new_state)1428 void Runtime::SetStatsEnabled(bool new_state) {
1429 Thread* self = Thread::Current();
1430 MutexLock mu(self, *Locks::instrument_entrypoints_lock_);
1431 if (new_state == true) {
1432 GetStats()->Clear(~0);
1433 // TODO: wouldn't it make more sense to clear _all_ threads' stats?
1434 self->GetStats()->Clear(~0);
1435 if (stats_enabled_ != new_state) {
1436 GetInstrumentation()->InstrumentQuickAllocEntryPointsLocked();
1437 }
1438 } else if (stats_enabled_ != new_state) {
1439 GetInstrumentation()->UninstrumentQuickAllocEntryPointsLocked();
1440 }
1441 stats_enabled_ = new_state;
1442 }
1443
ResetStats(int kinds)1444 void Runtime::ResetStats(int kinds) {
1445 GetStats()->Clear(kinds & 0xffff);
1446 // TODO: wouldn't it make more sense to clear _all_ threads' stats?
1447 Thread::Current()->GetStats()->Clear(kinds >> 16);
1448 }
1449
GetStat(int kind)1450 int32_t Runtime::GetStat(int kind) {
1451 RuntimeStats* stats;
1452 if (kind < (1<<16)) {
1453 stats = GetStats();
1454 } else {
1455 stats = Thread::Current()->GetStats();
1456 kind >>= 16;
1457 }
1458 switch (kind) {
1459 case KIND_ALLOCATED_OBJECTS:
1460 return stats->allocated_objects;
1461 case KIND_ALLOCATED_BYTES:
1462 return stats->allocated_bytes;
1463 case KIND_FREED_OBJECTS:
1464 return stats->freed_objects;
1465 case KIND_FREED_BYTES:
1466 return stats->freed_bytes;
1467 case KIND_GC_INVOCATIONS:
1468 return stats->gc_for_alloc_count;
1469 case KIND_CLASS_INIT_COUNT:
1470 return stats->class_init_count;
1471 case KIND_CLASS_INIT_TIME:
1472 // Convert ns to us, reduce to 32 bits.
1473 return static_cast<int>(stats->class_init_time_ns / 1000);
1474 case KIND_EXT_ALLOCATED_OBJECTS:
1475 case KIND_EXT_ALLOCATED_BYTES:
1476 case KIND_EXT_FREED_OBJECTS:
1477 case KIND_EXT_FREED_BYTES:
1478 return 0; // backward compatibility
1479 default:
1480 LOG(FATAL) << "Unknown statistic " << kind;
1481 return -1; // unreachable
1482 }
1483 }
1484
BlockSignals()1485 void Runtime::BlockSignals() {
1486 SignalSet signals;
1487 signals.Add(SIGPIPE);
1488 // SIGQUIT is used to dump the runtime's state (including stack traces).
1489 signals.Add(SIGQUIT);
1490 // SIGUSR1 is used to initiate a GC.
1491 signals.Add(SIGUSR1);
1492 signals.Block();
1493 }
1494
AttachCurrentThread(const char * thread_name,bool as_daemon,jobject thread_group,bool create_peer)1495 bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
1496 bool create_peer) {
1497 ScopedTrace trace(__FUNCTION__);
1498 return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != nullptr;
1499 }
1500
DetachCurrentThread()1501 void Runtime::DetachCurrentThread() {
1502 ScopedTrace trace(__FUNCTION__);
1503 Thread* self = Thread::Current();
1504 if (self == nullptr) {
1505 LOG(FATAL) << "attempting to detach thread that is not attached";
1506 }
1507 if (self->HasManagedStack()) {
1508 LOG(FATAL) << *Thread::Current() << " attempting to detach while still running code";
1509 }
1510 thread_list_->Unregister(self);
1511 }
1512
GetPreAllocatedOutOfMemoryError()1513 mirror::Throwable* Runtime::GetPreAllocatedOutOfMemoryError() {
1514 mirror::Throwable* oome = pre_allocated_OutOfMemoryError_.Read();
1515 if (oome == nullptr) {
1516 LOG(ERROR) << "Failed to return pre-allocated OOME";
1517 }
1518 return oome;
1519 }
1520
GetPreAllocatedNoClassDefFoundError()1521 mirror::Throwable* Runtime::GetPreAllocatedNoClassDefFoundError() {
1522 mirror::Throwable* ncdfe = pre_allocated_NoClassDefFoundError_.Read();
1523 if (ncdfe == nullptr) {
1524 LOG(ERROR) << "Failed to return pre-allocated NoClassDefFoundError";
1525 }
1526 return ncdfe;
1527 }
1528
VisitConstantRoots(RootVisitor * visitor)1529 void Runtime::VisitConstantRoots(RootVisitor* visitor) {
1530 // Visit the classes held as static in mirror classes, these can be visited concurrently and only
1531 // need to be visited once per GC since they never change.
1532 mirror::Class::VisitRoots(visitor);
1533 mirror::Constructor::VisitRoots(visitor);
1534 mirror::Reference::VisitRoots(visitor);
1535 mirror::Method::VisitRoots(visitor);
1536 mirror::StackTraceElement::VisitRoots(visitor);
1537 mirror::String::VisitRoots(visitor);
1538 mirror::Throwable::VisitRoots(visitor);
1539 mirror::Field::VisitRoots(visitor);
1540 // Visit all the primitive array types classes.
1541 mirror::PrimitiveArray<uint8_t>::VisitRoots(visitor); // BooleanArray
1542 mirror::PrimitiveArray<int8_t>::VisitRoots(visitor); // ByteArray
1543 mirror::PrimitiveArray<uint16_t>::VisitRoots(visitor); // CharArray
1544 mirror::PrimitiveArray<double>::VisitRoots(visitor); // DoubleArray
1545 mirror::PrimitiveArray<float>::VisitRoots(visitor); // FloatArray
1546 mirror::PrimitiveArray<int32_t>::VisitRoots(visitor); // IntArray
1547 mirror::PrimitiveArray<int64_t>::VisitRoots(visitor); // LongArray
1548 mirror::PrimitiveArray<int16_t>::VisitRoots(visitor); // ShortArray
1549 // Visiting the roots of these ArtMethods is not currently required since all the GcRoots are
1550 // null.
1551 BufferedRootVisitor<16> buffered_visitor(visitor, RootInfo(kRootVMInternal));
1552 const size_t pointer_size = GetClassLinker()->GetImagePointerSize();
1553 if (HasResolutionMethod()) {
1554 resolution_method_->VisitRoots(buffered_visitor, pointer_size);
1555 }
1556 if (HasImtConflictMethod()) {
1557 imt_conflict_method_->VisitRoots(buffered_visitor, pointer_size);
1558 }
1559 if (imt_unimplemented_method_ != nullptr) {
1560 imt_unimplemented_method_->VisitRoots(buffered_visitor, pointer_size);
1561 }
1562 for (size_t i = 0; i < kLastCalleeSaveType; ++i) {
1563 auto* m = reinterpret_cast<ArtMethod*>(callee_save_methods_[i]);
1564 if (m != nullptr) {
1565 m->VisitRoots(buffered_visitor, pointer_size);
1566 }
1567 }
1568 }
1569
VisitConcurrentRoots(RootVisitor * visitor,VisitRootFlags flags)1570 void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
1571 intern_table_->VisitRoots(visitor, flags);
1572 class_linker_->VisitRoots(visitor, flags);
1573 heap_->VisitAllocationRecords(visitor);
1574 if ((flags & kVisitRootFlagNewRoots) == 0) {
1575 // Guaranteed to have no new roots in the constant roots.
1576 VisitConstantRoots(visitor);
1577 }
1578 Dbg::VisitRoots(visitor);
1579 }
1580
VisitTransactionRoots(RootVisitor * visitor)1581 void Runtime::VisitTransactionRoots(RootVisitor* visitor) {
1582 if (preinitialization_transaction_ != nullptr) {
1583 preinitialization_transaction_->VisitRoots(visitor);
1584 }
1585 }
1586
VisitNonThreadRoots(RootVisitor * visitor)1587 void Runtime::VisitNonThreadRoots(RootVisitor* visitor) {
1588 java_vm_->VisitRoots(visitor);
1589 sentinel_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
1590 pre_allocated_OutOfMemoryError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
1591 pre_allocated_NoClassDefFoundError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
1592 verifier::MethodVerifier::VisitStaticRoots(visitor);
1593 VisitTransactionRoots(visitor);
1594 }
1595
VisitNonConcurrentRoots(RootVisitor * visitor)1596 void Runtime::VisitNonConcurrentRoots(RootVisitor* visitor) {
1597 thread_list_->VisitRoots(visitor);
1598 VisitNonThreadRoots(visitor);
1599 }
1600
VisitThreadRoots(RootVisitor * visitor)1601 void Runtime::VisitThreadRoots(RootVisitor* visitor) {
1602 thread_list_->VisitRoots(visitor);
1603 }
1604
FlipThreadRoots(Closure * thread_flip_visitor,Closure * flip_callback,gc::collector::GarbageCollector * collector)1605 size_t Runtime::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
1606 gc::collector::GarbageCollector* collector) {
1607 return thread_list_->FlipThreadRoots(thread_flip_visitor, flip_callback, collector);
1608 }
1609
VisitRoots(RootVisitor * visitor,VisitRootFlags flags)1610 void Runtime::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
1611 VisitNonConcurrentRoots(visitor);
1612 VisitConcurrentRoots(visitor, flags);
1613 }
1614
VisitImageRoots(RootVisitor * visitor)1615 void Runtime::VisitImageRoots(RootVisitor* visitor) {
1616 for (auto* space : GetHeap()->GetContinuousSpaces()) {
1617 if (space->IsImageSpace()) {
1618 auto* image_space = space->AsImageSpace();
1619 const auto& image_header = image_space->GetImageHeader();
1620 for (size_t i = 0; i < ImageHeader::kImageRootsMax; ++i) {
1621 auto* obj = image_header.GetImageRoot(static_cast<ImageHeader::ImageRoot>(i));
1622 if (obj != nullptr) {
1623 auto* after_obj = obj;
1624 visitor->VisitRoot(&after_obj, RootInfo(kRootStickyClass));
1625 CHECK_EQ(after_obj, obj);
1626 }
1627 }
1628 }
1629 }
1630 }
1631
CreateImtConflictMethod(LinearAlloc * linear_alloc)1632 ArtMethod* Runtime::CreateImtConflictMethod(LinearAlloc* linear_alloc) {
1633 ClassLinker* const class_linker = GetClassLinker();
1634 ArtMethod* method = class_linker->CreateRuntimeMethod(linear_alloc);
1635 // When compiling, the code pointer will get set later when the image is loaded.
1636 const size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
1637 if (IsAotCompiler()) {
1638 method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
1639 } else {
1640 method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
1641 }
1642 // Create empty conflict table.
1643 method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc),
1644 pointer_size);
1645 return method;
1646 }
1647
SetImtConflictMethod(ArtMethod * method)1648 void Runtime::SetImtConflictMethod(ArtMethod* method) {
1649 CHECK(method != nullptr);
1650 CHECK(method->IsRuntimeMethod());
1651 imt_conflict_method_ = method;
1652 }
1653
CreateResolutionMethod()1654 ArtMethod* Runtime::CreateResolutionMethod() {
1655 auto* method = GetClassLinker()->CreateRuntimeMethod(GetLinearAlloc());
1656 // When compiling, the code pointer will get set later when the image is loaded.
1657 if (IsAotCompiler()) {
1658 size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
1659 method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
1660 } else {
1661 method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
1662 }
1663 return method;
1664 }
1665
CreateCalleeSaveMethod()1666 ArtMethod* Runtime::CreateCalleeSaveMethod() {
1667 auto* method = GetClassLinker()->CreateRuntimeMethod(GetLinearAlloc());
1668 size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
1669 method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
1670 DCHECK_NE(instruction_set_, kNone);
1671 DCHECK(method->IsRuntimeMethod());
1672 return method;
1673 }
1674
DisallowNewSystemWeaks()1675 void Runtime::DisallowNewSystemWeaks() {
1676 CHECK(!kUseReadBarrier);
1677 monitor_list_->DisallowNewMonitors();
1678 intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
1679 java_vm_->DisallowNewWeakGlobals();
1680 heap_->DisallowNewAllocationRecords();
1681 lambda_box_table_->DisallowNewWeakBoxedLambdas();
1682 }
1683
AllowNewSystemWeaks()1684 void Runtime::AllowNewSystemWeaks() {
1685 CHECK(!kUseReadBarrier);
1686 monitor_list_->AllowNewMonitors();
1687 intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping.
1688 java_vm_->AllowNewWeakGlobals();
1689 heap_->AllowNewAllocationRecords();
1690 lambda_box_table_->AllowNewWeakBoxedLambdas();
1691 }
1692
BroadcastForNewSystemWeaks()1693 void Runtime::BroadcastForNewSystemWeaks() {
1694 // This is used for the read barrier case that uses the thread-local
1695 // Thread::GetWeakRefAccessEnabled() flag.
1696 CHECK(kUseReadBarrier);
1697 monitor_list_->BroadcastForNewMonitors();
1698 intern_table_->BroadcastForNewInterns();
1699 java_vm_->BroadcastForNewWeakGlobals();
1700 heap_->BroadcastForNewAllocationRecords();
1701 lambda_box_table_->BroadcastForNewWeakBoxedLambdas();
1702 }
1703
SetInstructionSet(InstructionSet instruction_set)1704 void Runtime::SetInstructionSet(InstructionSet instruction_set) {
1705 instruction_set_ = instruction_set;
1706 if ((instruction_set_ == kThumb2) || (instruction_set_ == kArm)) {
1707 for (int i = 0; i != kLastCalleeSaveType; ++i) {
1708 CalleeSaveType type = static_cast<CalleeSaveType>(i);
1709 callee_save_method_frame_infos_[i] = arm::ArmCalleeSaveMethodFrameInfo(type);
1710 }
1711 } else if (instruction_set_ == kMips) {
1712 for (int i = 0; i != kLastCalleeSaveType; ++i) {
1713 CalleeSaveType type = static_cast<CalleeSaveType>(i);
1714 callee_save_method_frame_infos_[i] = mips::MipsCalleeSaveMethodFrameInfo(type);
1715 }
1716 } else if (instruction_set_ == kMips64) {
1717 for (int i = 0; i != kLastCalleeSaveType; ++i) {
1718 CalleeSaveType type = static_cast<CalleeSaveType>(i);
1719 callee_save_method_frame_infos_[i] = mips64::Mips64CalleeSaveMethodFrameInfo(type);
1720 }
1721 } else if (instruction_set_ == kX86) {
1722 for (int i = 0; i != kLastCalleeSaveType; ++i) {
1723 CalleeSaveType type = static_cast<CalleeSaveType>(i);
1724 callee_save_method_frame_infos_[i] = x86::X86CalleeSaveMethodFrameInfo(type);
1725 }
1726 } else if (instruction_set_ == kX86_64) {
1727 for (int i = 0; i != kLastCalleeSaveType; ++i) {
1728 CalleeSaveType type = static_cast<CalleeSaveType>(i);
1729 callee_save_method_frame_infos_[i] = x86_64::X86_64CalleeSaveMethodFrameInfo(type);
1730 }
1731 } else if (instruction_set_ == kArm64) {
1732 for (int i = 0; i != kLastCalleeSaveType; ++i) {
1733 CalleeSaveType type = static_cast<CalleeSaveType>(i);
1734 callee_save_method_frame_infos_[i] = arm64::Arm64CalleeSaveMethodFrameInfo(type);
1735 }
1736 } else {
1737 UNIMPLEMENTED(FATAL) << instruction_set_;
1738 }
1739 }
1740
SetCalleeSaveMethod(ArtMethod * method,CalleeSaveType type)1741 void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) {
1742 DCHECK_LT(static_cast<int>(type), static_cast<int>(kLastCalleeSaveType));
1743 CHECK(method != nullptr);
1744 callee_save_methods_[type] = reinterpret_cast<uintptr_t>(method);
1745 }
1746
RegisterAppInfo(const std::vector<std::string> & code_paths,const std::string & profile_output_filename,const std::string & foreign_dex_profile_path,const std::string & app_dir)1747 void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
1748 const std::string& profile_output_filename,
1749 const std::string& foreign_dex_profile_path,
1750 const std::string& app_dir) {
1751 if (jit_.get() == nullptr) {
1752 // We are not JITing. Nothing to do.
1753 return;
1754 }
1755
1756 VLOG(profiler) << "Register app with " << profile_output_filename
1757 << " " << Join(code_paths, ':');
1758
1759 if (profile_output_filename.empty()) {
1760 LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty.";
1761 return;
1762 }
1763 if (!FileExists(profile_output_filename)) {
1764 LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits.";
1765 return;
1766 }
1767 if (code_paths.empty()) {
1768 LOG(WARNING) << "JIT profile information will not be recorded: code paths is empty.";
1769 return;
1770 }
1771
1772 profile_output_filename_ = profile_output_filename;
1773 jit_->StartProfileSaver(profile_output_filename,
1774 code_paths,
1775 foreign_dex_profile_path,
1776 app_dir);
1777 }
1778
NotifyDexLoaded(const std::string & dex_location)1779 void Runtime::NotifyDexLoaded(const std::string& dex_location) {
1780 VLOG(profiler) << "Notify dex loaded: " << dex_location;
1781 // We know that if the ProfileSaver is started then we can record profile information.
1782 if (ProfileSaver::IsStarted()) {
1783 ProfileSaver::NotifyDexUse(dex_location);
1784 }
1785 }
1786
1787 // Transaction support.
EnterTransactionMode(Transaction * transaction)1788 void Runtime::EnterTransactionMode(Transaction* transaction) {
1789 DCHECK(IsAotCompiler());
1790 DCHECK(transaction != nullptr);
1791 DCHECK(!IsActiveTransaction());
1792 preinitialization_transaction_ = transaction;
1793 }
1794
ExitTransactionMode()1795 void Runtime::ExitTransactionMode() {
1796 DCHECK(IsAotCompiler());
1797 DCHECK(IsActiveTransaction());
1798 preinitialization_transaction_ = nullptr;
1799 }
1800
IsTransactionAborted() const1801 bool Runtime::IsTransactionAborted() const {
1802 if (!IsActiveTransaction()) {
1803 return false;
1804 } else {
1805 DCHECK(IsAotCompiler());
1806 return preinitialization_transaction_->IsAborted();
1807 }
1808 }
1809
AbortTransactionAndThrowAbortError(Thread * self,const std::string & abort_message)1810 void Runtime::AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message) {
1811 DCHECK(IsAotCompiler());
1812 DCHECK(IsActiveTransaction());
1813 // Throwing an exception may cause its class initialization. If we mark the transaction
1814 // aborted before that, we may warn with a false alarm. Throwing the exception before
1815 // marking the transaction aborted avoids that.
1816 preinitialization_transaction_->ThrowAbortError(self, &abort_message);
1817 preinitialization_transaction_->Abort(abort_message);
1818 }
1819
ThrowTransactionAbortError(Thread * self)1820 void Runtime::ThrowTransactionAbortError(Thread* self) {
1821 DCHECK(IsAotCompiler());
1822 DCHECK(IsActiveTransaction());
1823 // Passing nullptr means we rethrow an exception with the earlier transaction abort message.
1824 preinitialization_transaction_->ThrowAbortError(self, nullptr);
1825 }
1826
RecordWriteFieldBoolean(mirror::Object * obj,MemberOffset field_offset,uint8_t value,bool is_volatile) const1827 void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
1828 uint8_t value, bool is_volatile) const {
1829 DCHECK(IsAotCompiler());
1830 DCHECK(IsActiveTransaction());
1831 preinitialization_transaction_->RecordWriteFieldBoolean(obj, field_offset, value, is_volatile);
1832 }
1833
RecordWriteFieldByte(mirror::Object * obj,MemberOffset field_offset,int8_t value,bool is_volatile) const1834 void Runtime::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
1835 int8_t value, bool is_volatile) const {
1836 DCHECK(IsAotCompiler());
1837 DCHECK(IsActiveTransaction());
1838 preinitialization_transaction_->RecordWriteFieldByte(obj, field_offset, value, is_volatile);
1839 }
1840
RecordWriteFieldChar(mirror::Object * obj,MemberOffset field_offset,uint16_t value,bool is_volatile) const1841 void Runtime::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
1842 uint16_t value, bool is_volatile) const {
1843 DCHECK(IsAotCompiler());
1844 DCHECK(IsActiveTransaction());
1845 preinitialization_transaction_->RecordWriteFieldChar(obj, field_offset, value, is_volatile);
1846 }
1847
RecordWriteFieldShort(mirror::Object * obj,MemberOffset field_offset,int16_t value,bool is_volatile) const1848 void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
1849 int16_t value, bool is_volatile) const {
1850 DCHECK(IsAotCompiler());
1851 DCHECK(IsActiveTransaction());
1852 preinitialization_transaction_->RecordWriteFieldShort(obj, field_offset, value, is_volatile);
1853 }
1854
RecordWriteField32(mirror::Object * obj,MemberOffset field_offset,uint32_t value,bool is_volatile) const1855 void Runtime::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset,
1856 uint32_t value, bool is_volatile) const {
1857 DCHECK(IsAotCompiler());
1858 DCHECK(IsActiveTransaction());
1859 preinitialization_transaction_->RecordWriteField32(obj, field_offset, value, is_volatile);
1860 }
1861
RecordWriteField64(mirror::Object * obj,MemberOffset field_offset,uint64_t value,bool is_volatile) const1862 void Runtime::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset,
1863 uint64_t value, bool is_volatile) const {
1864 DCHECK(IsAotCompiler());
1865 DCHECK(IsActiveTransaction());
1866 preinitialization_transaction_->RecordWriteField64(obj, field_offset, value, is_volatile);
1867 }
1868
RecordWriteFieldReference(mirror::Object * obj,MemberOffset field_offset,mirror::Object * value,bool is_volatile) const1869 void Runtime::RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
1870 mirror::Object* value, bool is_volatile) const {
1871 DCHECK(IsAotCompiler());
1872 DCHECK(IsActiveTransaction());
1873 preinitialization_transaction_->RecordWriteFieldReference(obj, field_offset, value, is_volatile);
1874 }
1875
RecordWriteArray(mirror::Array * array,size_t index,uint64_t value) const1876 void Runtime::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const {
1877 DCHECK(IsAotCompiler());
1878 DCHECK(IsActiveTransaction());
1879 preinitialization_transaction_->RecordWriteArray(array, index, value);
1880 }
1881
RecordStrongStringInsertion(mirror::String * s) const1882 void Runtime::RecordStrongStringInsertion(mirror::String* s) const {
1883 DCHECK(IsAotCompiler());
1884 DCHECK(IsActiveTransaction());
1885 preinitialization_transaction_->RecordStrongStringInsertion(s);
1886 }
1887
RecordWeakStringInsertion(mirror::String * s) const1888 void Runtime::RecordWeakStringInsertion(mirror::String* s) const {
1889 DCHECK(IsAotCompiler());
1890 DCHECK(IsActiveTransaction());
1891 preinitialization_transaction_->RecordWeakStringInsertion(s);
1892 }
1893
RecordStrongStringRemoval(mirror::String * s) const1894 void Runtime::RecordStrongStringRemoval(mirror::String* s) const {
1895 DCHECK(IsAotCompiler());
1896 DCHECK(IsActiveTransaction());
1897 preinitialization_transaction_->RecordStrongStringRemoval(s);
1898 }
1899
RecordWeakStringRemoval(mirror::String * s) const1900 void Runtime::RecordWeakStringRemoval(mirror::String* s) const {
1901 DCHECK(IsAotCompiler());
1902 DCHECK(IsActiveTransaction());
1903 preinitialization_transaction_->RecordWeakStringRemoval(s);
1904 }
1905
SetFaultMessage(const std::string & message)1906 void Runtime::SetFaultMessage(const std::string& message) {
1907 MutexLock mu(Thread::Current(), fault_message_lock_);
1908 fault_message_ = message;
1909 }
1910
AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string> * argv) const1911 void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* argv)
1912 const {
1913 if (GetInstrumentation()->InterpretOnly()) {
1914 argv->push_back("--compiler-filter=interpret-only");
1915 }
1916
1917 // Make the dex2oat instruction set match that of the launching runtime. If we have multiple
1918 // architecture support, dex2oat may be compiled as a different instruction-set than that
1919 // currently being executed.
1920 std::string instruction_set("--instruction-set=");
1921 instruction_set += GetInstructionSetString(kRuntimeISA);
1922 argv->push_back(instruction_set);
1923
1924 std::unique_ptr<const InstructionSetFeatures> features(InstructionSetFeatures::FromCppDefines());
1925 std::string feature_string("--instruction-set-features=");
1926 feature_string += features->GetFeatureString();
1927 argv->push_back(feature_string);
1928 }
1929
CreateJit()1930 void Runtime::CreateJit() {
1931 CHECK(!IsAotCompiler());
1932 if (kIsDebugBuild && GetInstrumentation()->IsForcedInterpretOnly()) {
1933 DCHECK(!jit_options_->UseJitCompilation());
1934 }
1935 std::string error_msg;
1936 jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
1937 if (jit_.get() == nullptr) {
1938 LOG(WARNING) << "Failed to create JIT " << error_msg;
1939 }
1940 }
1941
CanRelocate() const1942 bool Runtime::CanRelocate() const {
1943 return !IsAotCompiler() || compiler_callbacks_->IsRelocationPossible();
1944 }
1945
IsCompilingBootImage() const1946 bool Runtime::IsCompilingBootImage() const {
1947 return IsCompiler() && compiler_callbacks_->IsBootImage();
1948 }
1949
SetResolutionMethod(ArtMethod * method)1950 void Runtime::SetResolutionMethod(ArtMethod* method) {
1951 CHECK(method != nullptr);
1952 CHECK(method->IsRuntimeMethod()) << method;
1953 resolution_method_ = method;
1954 }
1955
SetImtUnimplementedMethod(ArtMethod * method)1956 void Runtime::SetImtUnimplementedMethod(ArtMethod* method) {
1957 CHECK(method != nullptr);
1958 CHECK(method->IsRuntimeMethod());
1959 imt_unimplemented_method_ = method;
1960 }
1961
FixupConflictTables()1962 void Runtime::FixupConflictTables() {
1963 // We can only do this after the class linker is created.
1964 const size_t pointer_size = GetClassLinker()->GetImagePointerSize();
1965 if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) {
1966 imt_unimplemented_method_->SetImtConflictTable(
1967 ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
1968 pointer_size);
1969 }
1970 if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) {
1971 imt_conflict_method_->SetImtConflictTable(
1972 ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
1973 pointer_size);
1974 }
1975 }
1976
IsVerificationEnabled() const1977 bool Runtime::IsVerificationEnabled() const {
1978 return verify_ == verifier::VerifyMode::kEnable ||
1979 verify_ == verifier::VerifyMode::kSoftFail;
1980 }
1981
IsVerificationSoftFail() const1982 bool Runtime::IsVerificationSoftFail() const {
1983 return verify_ == verifier::VerifyMode::kSoftFail;
1984 }
1985
CreateLinearAlloc()1986 LinearAlloc* Runtime::CreateLinearAlloc() {
1987 // For 64 bit compilers, it needs to be in low 4GB in the case where we are cross compiling for a
1988 // 32 bit target. In this case, we have 32 bit pointers in the dex cache arrays which can't hold
1989 // when we have 64 bit ArtMethod pointers.
1990 return (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA))
1991 ? new LinearAlloc(low_4gb_arena_pool_.get())
1992 : new LinearAlloc(arena_pool_.get());
1993 }
1994
GetHashTableMinLoadFactor() const1995 double Runtime::GetHashTableMinLoadFactor() const {
1996 return is_low_memory_mode_ ? kLowMemoryMinLoadFactor : kNormalMinLoadFactor;
1997 }
1998
GetHashTableMaxLoadFactor() const1999 double Runtime::GetHashTableMaxLoadFactor() const {
2000 return is_low_memory_mode_ ? kLowMemoryMaxLoadFactor : kNormalMaxLoadFactor;
2001 }
2002
UpdateProcessState(ProcessState process_state)2003 void Runtime::UpdateProcessState(ProcessState process_state) {
2004 ProcessState old_process_state = process_state_;
2005 process_state_ = process_state;
2006 GetHeap()->UpdateProcessState(old_process_state, process_state);
2007 }
2008
RegisterSensitiveThread() const2009 void Runtime::RegisterSensitiveThread() const {
2010 Thread::SetJitSensitiveThread();
2011 }
2012
2013 // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
UseJitCompilation() const2014 bool Runtime::UseJitCompilation() const {
2015 return (jit_ != nullptr) && jit_->UseJitCompilation();
2016 }
2017
2018 // Returns true if profile saving is enabled. GetJit() will be not null in this case.
SaveProfileInfo() const2019 bool Runtime::SaveProfileInfo() const {
2020 return (jit_ != nullptr) && jit_->SaveProfilingInfo();
2021 }
2022
2023 } // namespace art
2024