1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_RUNTIME_H_
18 #define ART_RUNTIME_RUNTIME_H_
19 
20 #include <jni.h>
21 #include <stdio.h>
22 
23 #include <iosfwd>
24 #include <set>
25 #include <string>
26 #include <utility>
27 #include <vector>
28 
29 #include "arch/instruction_set.h"
30 #include "base/macros.h"
31 #include "base/mutex.h"
32 #include "deoptimization_kind.h"
33 #include "dex_file_types.h"
34 #include "experimental_flags.h"
35 #include "gc_root.h"
36 #include "instrumentation.h"
37 #include "jobject_comparator.h"
38 #include "method_reference.h"
39 #include "obj_ptr.h"
40 #include "object_callbacks.h"
41 #include "offsets.h"
42 #include "process_state.h"
43 #include "quick/quick_method_frame_info.h"
44 #include "runtime_stats.h"
45 
46 namespace art {
47 
48 namespace gc {
49   class AbstractSystemWeakHolder;
50   class Heap;
51   namespace collector {
52     class GarbageCollector;
53   }  // namespace collector
54 }  // namespace gc
55 
56 namespace jit {
57   class Jit;
58   class JitOptions;
59 }  // namespace jit
60 
61 namespace mirror {
62   class Array;
63   class ClassLoader;
64   class DexCache;
65   template<class T> class ObjectArray;
66   template<class T> class PrimitiveArray;
67   typedef PrimitiveArray<int8_t> ByteArray;
68   class String;
69   class Throwable;
70 }  // namespace mirror
71 namespace ti {
72   class Agent;
73 }  // namespace ti
74 namespace verifier {
75   class MethodVerifier;
76   enum class VerifyMode : int8_t;
77 }  // namespace verifier
78 class ArenaPool;
79 class ArtMethod;
80 class ClassHierarchyAnalysis;
81 class ClassLinker;
82 class Closure;
83 class CompilerCallbacks;
84 class DexFile;
85 class InternTable;
86 class JavaVMExt;
87 class LinearAlloc;
88 class MonitorList;
89 class MonitorPool;
90 class NullPointerHandler;
91 class OatFileManager;
92 class Plugin;
93 struct RuntimeArgumentMap;
94 class RuntimeCallbacks;
95 class SignalCatcher;
96 class StackOverflowHandler;
97 class SuspensionHandler;
98 class ThreadList;
99 class Trace;
100 struct TraceConfig;
101 class Transaction;
102 
103 typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
104 
105 class Runtime {
106  public:
107   // Parse raw runtime options.
108   static bool ParseOptions(const RuntimeOptions& raw_options,
109                            bool ignore_unrecognized,
110                            RuntimeArgumentMap* runtime_options);
111 
112   // Creates and initializes a new runtime.
113   static bool Create(RuntimeArgumentMap&& runtime_options)
114       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
115 
116   // Creates and initializes a new runtime.
117   static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
118       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
119 
120   // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
IsAotCompiler()121   bool IsAotCompiler() const {
122     return !UseJitCompilation() && IsCompiler();
123   }
124 
125   // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
IsCompiler()126   bool IsCompiler() const {
127     return compiler_callbacks_ != nullptr;
128   }
129 
130   // If a compiler, are we compiling a boot image?
131   bool IsCompilingBootImage() const;
132 
133   bool CanRelocate() const;
134 
ShouldRelocate()135   bool ShouldRelocate() const {
136     return must_relocate_ && CanRelocate();
137   }
138 
MustRelocateIfPossible()139   bool MustRelocateIfPossible() const {
140     return must_relocate_;
141   }
142 
IsDex2OatEnabled()143   bool IsDex2OatEnabled() const {
144     return dex2oat_enabled_ && IsImageDex2OatEnabled();
145   }
146 
IsImageDex2OatEnabled()147   bool IsImageDex2OatEnabled() const {
148     return image_dex2oat_enabled_;
149   }
150 
GetCompilerCallbacks()151   CompilerCallbacks* GetCompilerCallbacks() {
152     return compiler_callbacks_;
153   }
154 
SetCompilerCallbacks(CompilerCallbacks * callbacks)155   void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
156     CHECK(callbacks != nullptr);
157     compiler_callbacks_ = callbacks;
158   }
159 
IsZygote()160   bool IsZygote() const {
161     return is_zygote_;
162   }
163 
IsExplicitGcDisabled()164   bool IsExplicitGcDisabled() const {
165     return is_explicit_gc_disabled_;
166   }
167 
168   std::string GetCompilerExecutable() const;
169   std::string GetPatchoatExecutable() const;
170 
GetCompilerOptions()171   const std::vector<std::string>& GetCompilerOptions() const {
172     return compiler_options_;
173   }
174 
AddCompilerOption(const std::string & option)175   void AddCompilerOption(const std::string& option) {
176     compiler_options_.push_back(option);
177   }
178 
GetImageCompilerOptions()179   const std::vector<std::string>& GetImageCompilerOptions() const {
180     return image_compiler_options_;
181   }
182 
GetImageLocation()183   const std::string& GetImageLocation() const {
184     return image_location_;
185   }
186 
187   // Starts a runtime, which may cause threads to be started and code to run.
188   bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
189 
190   bool IsShuttingDown(Thread* self);
IsShuttingDownLocked()191   bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
192     return shutting_down_;
193   }
194 
NumberOfThreadsBeingBorn()195   size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
196     return threads_being_born_;
197   }
198 
StartThreadBirth()199   void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
200     threads_being_born_++;
201   }
202 
203   void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
204 
IsStarted()205   bool IsStarted() const {
206     return started_;
207   }
208 
IsFinishedStarting()209   bool IsFinishedStarting() const {
210     return finished_starting_;
211   }
212 
Current()213   static Runtime* Current() {
214     return instance_;
215   }
216 
217   // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
218   // callers should prefer.
219   NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
220 
221   // Returns the "main" ThreadGroup, used when attaching user threads.
222   jobject GetMainThreadGroup() const;
223 
224   // Returns the "system" ThreadGroup, used when attaching our internal threads.
225   jobject GetSystemThreadGroup() const;
226 
227   // Returns the system ClassLoader which represents the CLASSPATH.
228   jobject GetSystemClassLoader() const;
229 
230   // Attaches the calling native thread to the runtime.
231   bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
232                            bool create_peer);
233 
234   void CallExitHook(jint status);
235 
236   // Detaches the current native thread from the runtime.
237   void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
238 
239   void DumpDeoptimizations(std::ostream& os);
240   void DumpForSigQuit(std::ostream& os);
241   void DumpLockHolders(std::ostream& os);
242 
243   ~Runtime();
244 
GetBootClassPathString()245   const std::string& GetBootClassPathString() const {
246     return boot_class_path_string_;
247   }
248 
GetClassPathString()249   const std::string& GetClassPathString() const {
250     return class_path_string_;
251   }
252 
GetClassLinker()253   ClassLinker* GetClassLinker() const {
254     return class_linker_;
255   }
256 
GetDefaultStackSize()257   size_t GetDefaultStackSize() const {
258     return default_stack_size_;
259   }
260 
GetHeap()261   gc::Heap* GetHeap() const {
262     return heap_;
263   }
264 
GetInternTable()265   InternTable* GetInternTable() const {
266     DCHECK(intern_table_ != nullptr);
267     return intern_table_;
268   }
269 
GetJavaVM()270   JavaVMExt* GetJavaVM() const {
271     return java_vm_.get();
272   }
273 
GetMaxSpinsBeforeThinLockInflation()274   size_t GetMaxSpinsBeforeThinLockInflation() const {
275     return max_spins_before_thin_lock_inflation_;
276   }
277 
GetMonitorList()278   MonitorList* GetMonitorList() const {
279     return monitor_list_;
280   }
281 
GetMonitorPool()282   MonitorPool* GetMonitorPool() const {
283     return monitor_pool_;
284   }
285 
286   // Is the given object the special object used to mark a cleared JNI weak global?
287   bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
288 
289   // Get the special object used to mark a cleared JNI weak global.
290   mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
291 
292   mirror::Throwable* GetPreAllocatedOutOfMemoryError() REQUIRES_SHARED(Locks::mutator_lock_);
293 
294   mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
295       REQUIRES_SHARED(Locks::mutator_lock_);
296 
GetProperties()297   const std::vector<std::string>& GetProperties() const {
298     return properties_;
299   }
300 
GetThreadList()301   ThreadList* GetThreadList() const {
302     return thread_list_;
303   }
304 
GetVersion()305   static const char* GetVersion() {
306     return "2.1.0";
307   }
308 
IsMethodHandlesEnabled()309   bool IsMethodHandlesEnabled() const {
310     return true;
311   }
312 
313   void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
314   void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
315   // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
316   // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
317   // access is reenabled.
318   void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
319 
320   // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
321   // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
322   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
323       REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
324       REQUIRES_SHARED(Locks::mutator_lock_);
325 
326   // Visit image roots, only used for hprof since the GC uses the image space mod union table
327   // instead.
328   void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
329 
330   // Visit all of the roots we can do safely do concurrently.
331   void VisitConcurrentRoots(RootVisitor* visitor,
332                             VisitRootFlags flags = kVisitRootFlagAllRoots)
333       REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
334       REQUIRES_SHARED(Locks::mutator_lock_);
335 
336   // Visit all of the non thread roots, we can do this with mutators unpaused.
337   void VisitNonThreadRoots(RootVisitor* visitor)
338       REQUIRES_SHARED(Locks::mutator_lock_);
339 
340   void VisitTransactionRoots(RootVisitor* visitor)
341       REQUIRES_SHARED(Locks::mutator_lock_);
342 
343   // Flip thread roots from from-space refs to to-space refs.
344   size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
345                          gc::collector::GarbageCollector* collector)
346       REQUIRES(!Locks::mutator_lock_);
347 
348   // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
349   // system weak is updated to be the visitor's returned value.
350   void SweepSystemWeaks(IsMarkedVisitor* visitor)
351       REQUIRES_SHARED(Locks::mutator_lock_);
352 
353   // Returns a special method that calls into a trampoline for runtime method resolution
354   ArtMethod* GetResolutionMethod();
355 
HasResolutionMethod()356   bool HasResolutionMethod() const {
357     return resolution_method_ != nullptr;
358   }
359 
360   void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
ClearResolutionMethod()361   void ClearResolutionMethod() {
362     resolution_method_ = nullptr;
363   }
364 
365   ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
366 
367   // Returns a special method that calls into a trampoline for runtime imt conflicts.
368   ArtMethod* GetImtConflictMethod();
369   ArtMethod* GetImtUnimplementedMethod();
370 
HasImtConflictMethod()371   bool HasImtConflictMethod() const {
372     return imt_conflict_method_ != nullptr;
373   }
374 
ClearImtConflictMethod()375   void ClearImtConflictMethod() {
376     imt_conflict_method_ = nullptr;
377   }
378 
379   void FixupConflictTables();
380   void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
381   void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
382 
383   ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
384       REQUIRES_SHARED(Locks::mutator_lock_);
385 
ClearImtUnimplementedMethod()386   void ClearImtUnimplementedMethod() {
387     imt_unimplemented_method_ = nullptr;
388   }
389 
390   // Returns a special method that describes all callee saves being spilled to the stack.
391   enum CalleeSaveType {
392     kSaveAllCalleeSaves,  // All callee-save registers.
393     kSaveRefsOnly,        // Only those callee-save registers that can hold references.
394     kSaveRefsAndArgs,     // References (see above) and arguments (usually caller-save registers).
395     kSaveEverything,      // All registers, including both callee-save and caller-save.
396     kLastCalleeSaveType   // Value used for iteration
397   };
398 
HasCalleeSaveMethod(CalleeSaveType type)399   bool HasCalleeSaveMethod(CalleeSaveType type) const {
400     return callee_save_methods_[type] != 0u;
401   }
402 
403   ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
404       REQUIRES_SHARED(Locks::mutator_lock_);
405 
406   ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
407       REQUIRES_SHARED(Locks::mutator_lock_);
408 
GetCalleeSaveMethodFrameInfo(CalleeSaveType type)409   QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
410     return callee_save_method_frame_infos_[type];
411   }
412 
413   QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
414       REQUIRES_SHARED(Locks::mutator_lock_);
415 
GetCalleeSaveMethodOffset(CalleeSaveType type)416   static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
417     return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
418   }
419 
GetInstructionSet()420   InstructionSet GetInstructionSet() const {
421     return instruction_set_;
422   }
423 
424   void SetInstructionSet(InstructionSet instruction_set);
425   void ClearInstructionSet();
426 
427   void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
428   void ClearCalleeSaveMethods();
429 
430   ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
431 
432   int32_t GetStat(int kind);
433 
GetStats()434   RuntimeStats* GetStats() {
435     return &stats_;
436   }
437 
HasStatsEnabled()438   bool HasStatsEnabled() const {
439     return stats_enabled_;
440   }
441 
442   void ResetStats(int kinds);
443 
444   void SetStatsEnabled(bool new_state)
445       REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
446 
447   enum class NativeBridgeAction {  // private
448     kUnload,
449     kInitialize
450   };
451 
GetJit()452   jit::Jit* GetJit() const {
453     return jit_.get();
454   }
455 
456   // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
457   bool UseJitCompilation() const;
458 
459   void PreZygoteFork();
460   void InitNonZygoteOrPostFork(
461       JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa);
462 
GetInstrumentation()463   const instrumentation::Instrumentation* GetInstrumentation() const {
464     return &instrumentation_;
465   }
466 
GetInstrumentation()467   instrumentation::Instrumentation* GetInstrumentation() {
468     return &instrumentation_;
469   }
470 
471   void RegisterAppInfo(const std::vector<std::string>& code_paths,
472                        const std::string& profile_output_filename);
473 
474   // Transaction support.
IsActiveTransaction()475   bool IsActiveTransaction() const {
476     return preinitialization_transaction_ != nullptr;
477   }
478   void EnterTransactionMode(Transaction* transaction);
479   void ExitTransactionMode();
480   bool IsTransactionAborted() const;
481 
482   void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
483       REQUIRES_SHARED(Locks::mutator_lock_);
484   void ThrowTransactionAbortError(Thread* self)
485       REQUIRES_SHARED(Locks::mutator_lock_);
486 
487   void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
488                                bool is_volatile) const;
489   void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
490                             bool is_volatile) const;
491   void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
492                             bool is_volatile) const;
493   void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
494                           bool is_volatile) const;
495   void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
496                           bool is_volatile) const;
497   void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
498                           bool is_volatile) const;
499   void RecordWriteFieldReference(mirror::Object* obj,
500                                  MemberOffset field_offset,
501                                  ObjPtr<mirror::Object> value,
502                                  bool is_volatile) const
503       REQUIRES_SHARED(Locks::mutator_lock_);
504   void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
505       REQUIRES_SHARED(Locks::mutator_lock_);
506   void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
507       REQUIRES(Locks::intern_table_lock_);
508   void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
509       REQUIRES(Locks::intern_table_lock_);
510   void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
511       REQUIRES(Locks::intern_table_lock_);
512   void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
513       REQUIRES(Locks::intern_table_lock_);
514   void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
515       REQUIRES_SHARED(Locks::mutator_lock_);
516 
517   void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
518   // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
519   // with the unexpected_signal_lock_.
GetFaultMessage()520   const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
521     return fault_message_;
522   }
523 
524   void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
525 
ExplicitStackOverflowChecks()526   bool ExplicitStackOverflowChecks() const {
527     return !implicit_so_checks_;
528   }
529 
530   bool IsVerificationEnabled() const;
531   bool IsVerificationSoftFail() const;
532 
IsDexFileFallbackEnabled()533   bool IsDexFileFallbackEnabled() const {
534     return allow_dex_file_fallback_;
535   }
536 
GetCpuAbilist()537   const std::vector<std::string>& GetCpuAbilist() const {
538     return cpu_abilist_;
539   }
540 
IsRunningOnMemoryTool()541   bool IsRunningOnMemoryTool() const {
542     return is_running_on_memory_tool_;
543   }
544 
SetTargetSdkVersion(int32_t version)545   void SetTargetSdkVersion(int32_t version) {
546     target_sdk_version_ = version;
547   }
548 
GetTargetSdkVersion()549   int32_t GetTargetSdkVersion() const {
550     return target_sdk_version_;
551   }
552 
GetZygoteMaxFailedBoots()553   uint32_t GetZygoteMaxFailedBoots() const {
554     return zygote_max_failed_boots_;
555   }
556 
AreExperimentalFlagsEnabled(ExperimentalFlags flags)557   bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
558     return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
559   }
560 
561   // Create the JIT and instrumentation and code cache.
562   void CreateJit();
563 
GetArenaPool()564   ArenaPool* GetArenaPool() {
565     return arena_pool_.get();
566   }
GetJitArenaPool()567   ArenaPool* GetJitArenaPool() {
568     return jit_arena_pool_.get();
569   }
GetArenaPool()570   const ArenaPool* GetArenaPool() const {
571     return arena_pool_.get();
572   }
573 
574   void ReclaimArenaPoolMemory();
575 
GetLinearAlloc()576   LinearAlloc* GetLinearAlloc() {
577     return linear_alloc_.get();
578   }
579 
GetJITOptions()580   jit::JitOptions* GetJITOptions() {
581     return jit_options_.get();
582   }
583 
IsJavaDebuggable()584   bool IsJavaDebuggable() const {
585     return is_java_debuggable_;
586   }
587 
588   void SetJavaDebuggable(bool value);
589 
590   // Deoptimize the boot image, called for Java debuggable apps.
591   void DeoptimizeBootImage();
592 
IsNativeDebuggable()593   bool IsNativeDebuggable() const {
594     return is_native_debuggable_;
595   }
596 
SetNativeDebuggable(bool value)597   void SetNativeDebuggable(bool value) {
598     is_native_debuggable_ = value;
599   }
600 
601   // Returns the build fingerprint, if set. Otherwise an empty string is returned.
GetFingerprint()602   std::string GetFingerprint() {
603     return fingerprint_;
604   }
605 
606   // Called from class linker.
607   void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
608 
609   // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
610   LinearAlloc* CreateLinearAlloc();
611 
GetOatFileManager()612   OatFileManager& GetOatFileManager() const {
613     DCHECK(oat_file_manager_ != nullptr);
614     return *oat_file_manager_;
615   }
616 
617   double GetHashTableMinLoadFactor() const;
618   double GetHashTableMaxLoadFactor() const;
619 
SetSafeMode(bool mode)620   void SetSafeMode(bool mode) {
621     safe_mode_ = mode;
622   }
623 
GetDumpNativeStackOnSigQuit()624   bool GetDumpNativeStackOnSigQuit() const {
625     return dump_native_stack_on_sig_quit_;
626   }
627 
GetPrunedDalvikCache()628   bool GetPrunedDalvikCache() const {
629     return pruned_dalvik_cache_;
630   }
631 
SetPrunedDalvikCache(bool pruned)632   void SetPrunedDalvikCache(bool pruned) {
633     pruned_dalvik_cache_ = pruned;
634   }
635 
636   void UpdateProcessState(ProcessState process_state);
637 
638   // Returns true if we currently care about long mutator pause.
InJankPerceptibleProcessState()639   bool InJankPerceptibleProcessState() const {
640     return process_state_ == kProcessStateJankPerceptible;
641   }
642 
643   void RegisterSensitiveThread() const;
644 
SetZygoteNoThreadSection(bool val)645   void SetZygoteNoThreadSection(bool val) {
646     zygote_no_threads_ = val;
647   }
648 
IsZygoteNoThreadSection()649   bool IsZygoteNoThreadSection() const {
650     return zygote_no_threads_;
651   }
652 
653   // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
654   // optimization that makes it impossible to deoptimize.
655   bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
656 
657   // Returns a saved copy of the environment (getenv/setenv values).
658   // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
GetEnvSnapshot()659   char** GetEnvSnapshot() const {
660     return env_snapshot_.GetSnapshot();
661   }
662 
663   void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
664   void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
665 
GetClassHierarchyAnalysis()666   ClassHierarchyAnalysis* GetClassHierarchyAnalysis() {
667     return cha_;
668   }
669 
670   NO_RETURN
671   static void Aborter(const char* abort_message);
672 
673   void AttachAgent(const std::string& agent_arg);
674 
GetAgents()675   const std::list<ti::Agent>& GetAgents() const {
676     return agents_;
677   }
678 
679   RuntimeCallbacks* GetRuntimeCallbacks();
680 
681   void InitThreadGroups(Thread* self);
682 
SetDumpGCPerformanceOnShutdown(bool value)683   void SetDumpGCPerformanceOnShutdown(bool value) {
684     dump_gc_performance_on_shutdown_ = value;
685   }
686 
IncrementDeoptimizationCount(DeoptimizationKind kind)687   void IncrementDeoptimizationCount(DeoptimizationKind kind) {
688     DCHECK_LE(kind, DeoptimizationKind::kLast);
689     deoptimization_counts_[static_cast<size_t>(kind)]++;
690   }
691 
GetNumberOfDeoptimizations()692   uint32_t GetNumberOfDeoptimizations() const {
693     uint32_t result = 0;
694     for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
695       result += deoptimization_counts_[i];
696     }
697     return result;
698   }
699 
700  private:
701   static void InitPlatformSignalHandlers();
702 
703   Runtime();
704 
705   void BlockSignals();
706 
707   bool Init(RuntimeArgumentMap&& runtime_options)
708       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
709   void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
710   void RegisterRuntimeNativeMethods(JNIEnv* env);
711 
712   void StartDaemonThreads();
713   void StartSignalCatcher();
714 
715   void MaybeSaveJitProfilingInfo();
716 
717   // Visit all of the thread roots.
718   void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
719       REQUIRES_SHARED(Locks::mutator_lock_);
720 
721   // Visit all other roots which must be done with mutators suspended.
722   void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
723       REQUIRES_SHARED(Locks::mutator_lock_);
724 
725   // Constant roots are the roots which never change after the runtime is initialized, they only
726   // need to be visited once per GC cycle.
727   void VisitConstantRoots(RootVisitor* visitor)
728       REQUIRES_SHARED(Locks::mutator_lock_);
729 
730   // A pointer to the active runtime or null.
731   static Runtime* instance_;
732 
733   // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
734   static constexpr int kProfileForground = 0;
735   static constexpr int kProfileBackground = 1;
736 
737   // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
738   uint64_t callee_save_methods_[kLastCalleeSaveType];
739   GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
740   GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
741   ArtMethod* resolution_method_;
742   ArtMethod* imt_conflict_method_;
743   // Unresolved method has the same behavior as the conflict method, it is used by the class linker
744   // for differentiating between unfilled imt slots vs conflict slots in superclasses.
745   ArtMethod* imt_unimplemented_method_;
746 
747   // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
748   // JDWP (invalid references).
749   GcRoot<mirror::Object> sentinel_;
750 
751   InstructionSet instruction_set_;
752   QuickMethodFrameInfo callee_save_method_frame_infos_[kLastCalleeSaveType];
753 
754   CompilerCallbacks* compiler_callbacks_;
755   bool is_zygote_;
756   bool must_relocate_;
757   bool is_concurrent_gc_enabled_;
758   bool is_explicit_gc_disabled_;
759   bool dex2oat_enabled_;
760   bool image_dex2oat_enabled_;
761 
762   std::string compiler_executable_;
763   std::string patchoat_executable_;
764   std::vector<std::string> compiler_options_;
765   std::vector<std::string> image_compiler_options_;
766   std::string image_location_;
767 
768   std::string boot_class_path_string_;
769   std::string class_path_string_;
770   std::vector<std::string> properties_;
771 
772   std::list<ti::Agent> agents_;
773   std::vector<Plugin> plugins_;
774 
775   // The default stack size for managed threads created by the runtime.
776   size_t default_stack_size_;
777 
778   gc::Heap* heap_;
779 
780   std::unique_ptr<ArenaPool> jit_arena_pool_;
781   std::unique_ptr<ArenaPool> arena_pool_;
782   // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
783   // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
784   // since the field arrays are int arrays in this case.
785   std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
786 
787   // Shared linear alloc for now.
788   std::unique_ptr<LinearAlloc> linear_alloc_;
789 
790   // The number of spins that are done before thread suspension is used to forcibly inflate.
791   size_t max_spins_before_thin_lock_inflation_;
792   MonitorList* monitor_list_;
793   MonitorPool* monitor_pool_;
794 
795   ThreadList* thread_list_;
796 
797   InternTable* intern_table_;
798 
799   ClassLinker* class_linker_;
800 
801   SignalCatcher* signal_catcher_;
802   std::string stack_trace_file_;
803 
804   std::unique_ptr<JavaVMExt> java_vm_;
805 
806   std::unique_ptr<jit::Jit> jit_;
807   std::unique_ptr<jit::JitOptions> jit_options_;
808 
809   // Fault message, printed when we get a SIGSEGV.
810   Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
811   std::string fault_message_ GUARDED_BY(fault_message_lock_);
812 
813   // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
814   // the shutdown lock so that threads aren't born while we're shutting down.
815   size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
816 
817   // Waited upon until no threads are being born.
818   std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
819 
820   // Set when runtime shutdown is past the point that new threads may attach.
821   bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
822 
823   // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
824   bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
825 
826   bool started_;
827 
828   // New flag added which tells us if the runtime has finished starting. If
829   // this flag is set then the Daemon threads are created and the class loader
830   // is created. This flag is needed for knowing if its safe to request CMS.
831   bool finished_starting_;
832 
833   // Hooks supported by JNI_CreateJavaVM
834   jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
835   void (*exit_)(jint status);
836   void (*abort_)();
837 
838   bool stats_enabled_;
839   RuntimeStats stats_;
840 
841   const bool is_running_on_memory_tool_;
842 
843   std::unique_ptr<TraceConfig> trace_config_;
844 
845   instrumentation::Instrumentation instrumentation_;
846 
847   jobject main_thread_group_;
848   jobject system_thread_group_;
849 
850   // As returned by ClassLoader.getSystemClassLoader().
851   jobject system_class_loader_;
852 
853   // If true, then we dump the GC cumulative timings on shutdown.
854   bool dump_gc_performance_on_shutdown_;
855 
856   // Transaction used for pre-initializing classes at compilation time.
857   Transaction* preinitialization_transaction_;
858 
859   // If kNone, verification is disabled. kEnable by default.
860   verifier::VerifyMode verify_;
861 
862   // If true, the runtime may use dex files directly with the interpreter if an oat file is not
863   // available/usable.
864   bool allow_dex_file_fallback_;
865 
866   // List of supported cpu abis.
867   std::vector<std::string> cpu_abilist_;
868 
869   // Specifies target SDK version to allow workarounds for certain API levels.
870   int32_t target_sdk_version_;
871 
872   // Implicit checks flags.
873   bool implicit_null_checks_;       // NullPointer checks are implicit.
874   bool implicit_so_checks_;         // StackOverflow checks are implicit.
875   bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
876 
877   // Whether or not the sig chain (and implicitly the fault handler) should be
878   // disabled. Tools like dex2oat or patchoat don't need them. This enables
879   // building a statically link version of dex2oat.
880   bool no_sig_chain_;
881 
882   // Force the use of native bridge even if the app ISA matches the runtime ISA.
883   bool force_native_bridge_;
884 
885   // Whether or not a native bridge has been loaded.
886   //
887   // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
888   // if standard dlopen fails to load native library associated with native activity, it calls to
889   // the native bridge to load it and then gets the trampoline for the entry to native activity.
890   //
891   // The option 'native_bridge_library_filename' specifies the name of the native bridge.
892   // When non-empty the native bridge will be loaded from the given file. An empty value means
893   // that there's no native bridge.
894   bool is_native_bridge_loaded_;
895 
896   // Whether we are running under native debugger.
897   bool is_native_debuggable_;
898 
899   // Whether Java code needs to be debuggable.
900   bool is_java_debuggable_;
901 
902   // The maximum number of failed boots we allow before pruning the dalvik cache
903   // and trying again. This option is only inspected when we're running as a
904   // zygote.
905   uint32_t zygote_max_failed_boots_;
906 
907   // Enable experimental opcodes that aren't fully specified yet. The intent is to
908   // eventually publish them as public-usable opcodes, but they aren't ready yet.
909   //
910   // Experimental opcodes should not be used by other production code.
911   ExperimentalFlags experimental_flags_;
912 
913   // Contains the build fingerprint, if given as a parameter.
914   std::string fingerprint_;
915 
916   // Oat file manager, keeps track of what oat files are open.
917   OatFileManager* oat_file_manager_;
918 
919   // Whether or not we are on a low RAM device.
920   bool is_low_memory_mode_;
921 
922   // Whether the application should run in safe mode, that is, interpreter only.
923   bool safe_mode_;
924 
925   // Whether threads should dump their native stack on SIGQUIT.
926   bool dump_native_stack_on_sig_quit_;
927 
928   // Whether the dalvik cache was pruned when initializing the runtime.
929   bool pruned_dalvik_cache_;
930 
931   // Whether or not we currently care about pause times.
932   ProcessState process_state_;
933 
934   // Whether zygote code is in a section that should not start threads.
935   bool zygote_no_threads_;
936 
937   // Saved environment.
938   class EnvSnapshot {
939    public:
940     EnvSnapshot() = default;
941     void TakeSnapshot();
942     char** GetSnapshot() const;
943 
944    private:
945     std::unique_ptr<char*[]> c_env_vector_;
946     std::vector<std::unique_ptr<std::string>> name_value_pairs_;
947 
948     DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
949   } env_snapshot_;
950 
951   // Generic system-weak holders.
952   std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
953 
954   ClassHierarchyAnalysis* cha_;
955 
956   std::unique_ptr<RuntimeCallbacks> callbacks_;
957 
958   std::atomic<uint32_t> deoptimization_counts_[
959       static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
960 
961   DISALLOW_COPY_AND_ASSIGN(Runtime);
962 };
963 std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
964 
965 }  // namespace art
966 
967 #endif  // ART_RUNTIME_RUNTIME_H_
968