1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_RUNTIME_H_ 18 #define ART_RUNTIME_RUNTIME_H_ 19 20 #include <jni.h> 21 #include <stdio.h> 22 23 #include <iosfwd> 24 #include <set> 25 #include <string> 26 #include <utility> 27 #include <vector> 28 29 #include "base/allocator.h" 30 #include "compiler_callbacks.h" 31 #include "gc_root.h" 32 #include "instrumentation.h" 33 #include "instruction_set.h" 34 #include "jobject_comparator.h" 35 #include "object_callbacks.h" 36 #include "offsets.h" 37 #include "profiler_options.h" 38 #include "quick/quick_method_frame_info.h" 39 #include "runtime_stats.h" 40 #include "safe_map.h" 41 42 namespace art { 43 44 namespace gc { 45 class Heap; 46 } // namespace gc 47 namespace mirror { 48 class ArtMethod; 49 class ClassLoader; 50 class Array; 51 template<class T> class ObjectArray; 52 template<class T> class PrimitiveArray; 53 typedef PrimitiveArray<int8_t> ByteArray; 54 class String; 55 class Throwable; 56 } // namespace mirror 57 namespace verifier { 58 class MethodVerifier; 59 } 60 class ClassLinker; 61 class DexFile; 62 class InternTable; 63 class JavaVMExt; 64 class MonitorList; 65 class MonitorPool; 66 class NullPointerHandler; 67 class SignalCatcher; 68 class StackOverflowHandler; 69 class SuspensionHandler; 70 class ThreadList; 71 class Trace; 72 class Transaction; 73 74 typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions; 75 76 // Not all combinations of flags are valid. You may not visit all roots as well as the new roots 77 // (no logical reason to do this). You also may not start logging new roots and stop logging new 78 // roots (also no logical reason to do this). 79 enum VisitRootFlags : uint8_t { 80 kVisitRootFlagAllRoots = 0x1, 81 kVisitRootFlagNewRoots = 0x2, 82 kVisitRootFlagStartLoggingNewRoots = 0x4, 83 kVisitRootFlagStopLoggingNewRoots = 0x8, 84 kVisitRootFlagClearRootLog = 0x10, 85 }; 86 87 class Runtime { 88 public: 89 // Creates and initializes a new runtime. 90 static bool Create(const RuntimeOptions& options, bool ignore_unrecognized) 91 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); 92 IsCompiler()93 bool IsCompiler() const { 94 return compiler_callbacks_ != nullptr; 95 } 96 CanRelocate()97 bool CanRelocate() const { 98 return !IsCompiler() || compiler_callbacks_->IsRelocationPossible(); 99 } 100 ShouldRelocate()101 bool ShouldRelocate() const { 102 return must_relocate_ && CanRelocate(); 103 } 104 MustRelocateIfPossible()105 bool MustRelocateIfPossible() const { 106 return must_relocate_; 107 } 108 IsDex2OatEnabled()109 bool IsDex2OatEnabled() const { 110 return dex2oat_enabled_ && IsImageDex2OatEnabled(); 111 } 112 IsImageDex2OatEnabled()113 bool IsImageDex2OatEnabled() const { 114 return image_dex2oat_enabled_; 115 } 116 GetCompilerCallbacks()117 CompilerCallbacks* GetCompilerCallbacks() { 118 return compiler_callbacks_; 119 } 120 IsZygote()121 bool IsZygote() const { 122 return is_zygote_; 123 } 124 IsExplicitGcDisabled()125 bool IsExplicitGcDisabled() const { 126 return is_explicit_gc_disabled_; 127 } 128 129 std::string GetCompilerExecutable() const; 130 std::string GetPatchoatExecutable() const; 131 GetCompilerOptions()132 const std::vector<std::string>& GetCompilerOptions() const { 133 return compiler_options_; 134 } 135 AddCompilerOption(std::string option)136 void AddCompilerOption(std::string option) { 137 compiler_options_.push_back(option); 138 } 139 GetImageCompilerOptions()140 const std::vector<std::string>& GetImageCompilerOptions() const { 141 return image_compiler_options_; 142 } 143 GetImageLocation()144 const std::string& GetImageLocation() const { 145 return image_location_; 146 } 147 GetProfilerOptions()148 const ProfilerOptions& GetProfilerOptions() const { 149 return profiler_options_; 150 } 151 152 // Starts a runtime, which may cause threads to be started and code to run. 153 bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_); 154 155 bool IsShuttingDown(Thread* self); IsShuttingDownLocked()156 bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { 157 return shutting_down_; 158 } 159 NumberOfThreadsBeingBorn()160 size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { 161 return threads_being_born_; 162 } 163 StartThreadBirth()164 void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { 165 threads_being_born_++; 166 } 167 168 void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_); 169 IsStarted()170 bool IsStarted() const { 171 return started_; 172 } 173 IsFinishedStarting()174 bool IsFinishedStarting() const { 175 return finished_starting_; 176 } 177 Current()178 static Runtime* Current() { 179 return instance_; 180 } 181 182 // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most 183 // callers should prefer. 184 // This isn't marked ((noreturn)) because then gcc will merge multiple calls 185 // in a single function together. This reduces code size slightly, but means 186 // that the native stack trace we get may point at the wrong call site. 187 static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_); 188 189 // Returns the "main" ThreadGroup, used when attaching user threads. 190 jobject GetMainThreadGroup() const; 191 192 // Returns the "system" ThreadGroup, used when attaching our internal threads. 193 jobject GetSystemThreadGroup() const; 194 195 // Returns the system ClassLoader which represents the CLASSPATH. 196 jobject GetSystemClassLoader() const; 197 198 // Attaches the calling native thread to the runtime. 199 bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group, 200 bool create_peer); 201 202 void CallExitHook(jint status); 203 204 // Detaches the current native thread from the runtime. 205 void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_); 206 207 void DumpForSigQuit(std::ostream& os) 208 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 209 void DumpLockHolders(std::ostream& os); 210 211 ~Runtime(); 212 GetBootClassPathString()213 const std::string& GetBootClassPathString() const { 214 return boot_class_path_string_; 215 } 216 GetClassPathString()217 const std::string& GetClassPathString() const { 218 return class_path_string_; 219 } 220 GetClassLinker()221 ClassLinker* GetClassLinker() const { 222 return class_linker_; 223 } 224 GetDefaultStackSize()225 size_t GetDefaultStackSize() const { 226 return default_stack_size_; 227 } 228 GetHeap()229 gc::Heap* GetHeap() const { 230 return heap_; 231 } 232 GetInternTable()233 InternTable* GetInternTable() const { 234 DCHECK(intern_table_ != NULL); 235 return intern_table_; 236 } 237 GetJavaVM()238 JavaVMExt* GetJavaVM() const { 239 return java_vm_; 240 } 241 GetMaxSpinsBeforeThinkLockInflation()242 size_t GetMaxSpinsBeforeThinkLockInflation() const { 243 return max_spins_before_thin_lock_inflation_; 244 } 245 GetMonitorList()246 MonitorList* GetMonitorList() const { 247 return monitor_list_; 248 } 249 GetMonitorPool()250 MonitorPool* GetMonitorPool() const { 251 return monitor_pool_; 252 } 253 254 mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 255 256 mirror::Throwable* GetPreAllocatedNoClassDefFoundError() 257 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 258 GetProperties()259 const std::vector<std::string>& GetProperties() const { 260 return properties_; 261 } 262 GetThreadList()263 ThreadList* GetThreadList() const { 264 return thread_list_; 265 } 266 GetVersion()267 static const char* GetVersion() { 268 return "2.1.0"; 269 } 270 271 void DisallowNewSystemWeaks() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 272 void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 273 274 // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If 275 // clean_dirty is true then dirty roots will be marked as non-dirty after visiting. 276 void VisitRoots(RootCallback* visitor, void* arg, VisitRootFlags flags = kVisitRootFlagAllRoots) 277 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 278 279 // Visit all of the roots we can do safely do concurrently. 280 void VisitConcurrentRoots(RootCallback* visitor, void* arg, 281 VisitRootFlags flags = kVisitRootFlagAllRoots) 282 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 283 284 // Visit all of the non thread roots, we can do this with mutators unpaused. 285 void VisitNonThreadRoots(RootCallback* visitor, void* arg) 286 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 287 288 // Visit all other roots which must be done with mutators suspended. 289 void VisitNonConcurrentRoots(RootCallback* visitor, void* arg) 290 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 291 292 // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the 293 // system weak is updated to be the visitor's returned value. 294 void SweepSystemWeaks(IsMarkedCallback* visitor, void* arg) 295 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 296 297 // Constant roots are the roots which never change after the runtime is initialized, they only 298 // need to be visited once per GC cycle. 299 void VisitConstantRoots(RootCallback* callback, void* arg) 300 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 301 302 // Returns a special method that calls into a trampoline for runtime method resolution 303 mirror::ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 304 HasResolutionMethod()305 bool HasResolutionMethod() const { 306 return !resolution_method_.IsNull(); 307 } 308 SetResolutionMethod(mirror::ArtMethod * method)309 void SetResolutionMethod(mirror::ArtMethod* method) { 310 resolution_method_ = GcRoot<mirror::ArtMethod>(method); 311 } 312 313 mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 314 315 // Returns a special method that calls into a trampoline for runtime imt conflicts. 316 mirror::ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 317 mirror::ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 318 HasImtConflictMethod()319 bool HasImtConflictMethod() const { 320 return !imt_conflict_method_.IsNull(); 321 } 322 SetImtConflictMethod(mirror::ArtMethod * method)323 void SetImtConflictMethod(mirror::ArtMethod* method) { 324 imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method); 325 } SetImtUnimplementedMethod(mirror::ArtMethod * method)326 void SetImtUnimplementedMethod(mirror::ArtMethod* method) { 327 imt_unimplemented_method_ = GcRoot<mirror::ArtMethod>(method); 328 } 329 330 mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 331 332 // Returns an imt with every entry set to conflict, used as default imt for all classes. 333 mirror::ObjectArray<mirror::ArtMethod>* GetDefaultImt() 334 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 335 HasDefaultImt()336 bool HasDefaultImt() const { 337 return !default_imt_.IsNull(); 338 } 339 SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod> * imt)340 void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) { 341 default_imt_ = GcRoot<mirror::ObjectArray<mirror::ArtMethod>>(imt); 342 } 343 344 mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl) 345 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 346 347 // Returns a special method that describes all callee saves being spilled to the stack. 348 enum CalleeSaveType { 349 kSaveAll, 350 kRefsOnly, 351 kRefsAndArgs, 352 kLastCalleeSaveType // Value used for iteration 353 }; 354 HasCalleeSaveMethod(CalleeSaveType type)355 bool HasCalleeSaveMethod(CalleeSaveType type) const { 356 return !callee_save_methods_[type].IsNull(); 357 } 358 359 mirror::ArtMethod* GetCalleeSaveMethod(CalleeSaveType type) 360 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 361 362 mirror::ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type) 363 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 364 GetCalleeSaveMethodFrameInfo(CalleeSaveType type)365 QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const { 366 return callee_save_method_frame_infos_[type]; 367 } 368 369 QuickMethodFrameInfo GetRuntimeMethodFrameInfo(mirror::ArtMethod* method) 370 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 371 GetCalleeSaveMethodOffset(CalleeSaveType type)372 static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) { 373 return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]); 374 } 375 GetInstructionSet()376 InstructionSet GetInstructionSet() const { 377 return instruction_set_; 378 } 379 380 void SetInstructionSet(InstructionSet instruction_set); 381 382 void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type); 383 384 mirror::ArtMethod* CreateCalleeSaveMethod(CalleeSaveType type) 385 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 386 387 int32_t GetStat(int kind); 388 GetStats()389 RuntimeStats* GetStats() { 390 return &stats_; 391 } 392 HasStatsEnabled()393 bool HasStatsEnabled() const { 394 return stats_enabled_; 395 } 396 397 void ResetStats(int kinds); 398 399 void SetStatsEnabled(bool new_state) LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_, 400 Locks::mutator_lock_); 401 402 enum class NativeBridgeAction { // private 403 kUnload, 404 kInitialize 405 }; 406 void PreZygoteFork(); 407 bool InitZygote(); 408 void DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const char* isa); 409 GetInstrumentation()410 const instrumentation::Instrumentation* GetInstrumentation() const { 411 return &instrumentation_; 412 } 413 GetInstrumentation()414 instrumentation::Instrumentation* GetInstrumentation() { 415 return &instrumentation_; 416 } 417 UseCompileTimeClassPath()418 bool UseCompileTimeClassPath() const { 419 return use_compile_time_class_path_; 420 } 421 422 void AddMethodVerifier(verifier::MethodVerifier* verifier) LOCKS_EXCLUDED(method_verifier_lock_); 423 void RemoveMethodVerifier(verifier::MethodVerifier* verifier) 424 LOCKS_EXCLUDED(method_verifier_lock_); 425 426 const std::vector<const DexFile*>& GetCompileTimeClassPath(jobject class_loader); 427 void SetCompileTimeClassPath(jobject class_loader, std::vector<const DexFile*>& class_path); 428 429 void StartProfiler(const char* profile_output_filename); 430 void UpdateProfilerState(int state); 431 432 // Transaction support. IsActiveTransaction()433 bool IsActiveTransaction() const { 434 return preinitialization_transaction_ != nullptr; 435 } 436 void EnterTransactionMode(Transaction* transaction); 437 void ExitTransactionMode(); 438 void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value, 439 bool is_volatile) const; 440 void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value, 441 bool is_volatile) const; 442 void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset, 443 mirror::Object* value, bool is_volatile) const; 444 void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const 445 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 446 void RecordStrongStringInsertion(mirror::String* s) const 447 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 448 void RecordWeakStringInsertion(mirror::String* s) const 449 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 450 void RecordStrongStringRemoval(mirror::String* s) const 451 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 452 void RecordWeakStringRemoval(mirror::String* s) const 453 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 454 455 void SetFaultMessage(const std::string& message); 456 // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations 457 // with the unexpected_signal_lock_. GetFaultMessage()458 const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS { 459 return fault_message_; 460 } 461 462 void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const; 463 ExplicitNullChecks()464 bool ExplicitNullChecks() const { 465 return null_pointer_handler_ == nullptr; 466 } 467 ExplicitSuspendChecks()468 bool ExplicitSuspendChecks() const { 469 return suspend_handler_ == nullptr; 470 } 471 ExplicitStackOverflowChecks()472 bool ExplicitStackOverflowChecks() const { 473 return stack_overflow_handler_ == nullptr; 474 } 475 IsVerificationEnabled()476 bool IsVerificationEnabled() const { 477 return verify_; 478 } 479 RunningOnValgrind()480 bool RunningOnValgrind() const { 481 return running_on_valgrind_; 482 } 483 SetTargetSdkVersion(int32_t version)484 void SetTargetSdkVersion(int32_t version) { 485 target_sdk_version_ = version; 486 } 487 GetTargetSdkVersion()488 int32_t GetTargetSdkVersion() const { 489 return target_sdk_version_; 490 } 491 GetDefaultInstructionSetFeatures()492 static const char* GetDefaultInstructionSetFeatures() { 493 return kDefaultInstructionSetFeatures; 494 } 495 496 private: 497 static void InitPlatformSignalHandlers(); 498 499 Runtime(); 500 501 void BlockSignals(); 502 503 bool Init(const RuntimeOptions& options, bool ignore_unrecognized) 504 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); 505 void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_); 506 void InitThreadGroups(Thread* self); 507 void RegisterRuntimeNativeMethods(JNIEnv* env); 508 509 void StartDaemonThreads(); 510 void StartSignalCatcher(); 511 512 // A pointer to the active runtime or NULL. 513 static Runtime* instance_; 514 515 static const char* kDefaultInstructionSetFeatures; 516 517 // NOTE: these must match the gc::ProcessState values as they come directly from the framework. 518 static constexpr int kProfileForground = 0; 519 static constexpr int kProfileBackgrouud = 1; 520 521 GcRoot<mirror::ArtMethod> callee_save_methods_[kLastCalleeSaveType]; 522 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_; 523 GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_; 524 GcRoot<mirror::ArtMethod> resolution_method_; 525 GcRoot<mirror::ArtMethod> imt_conflict_method_; 526 // Unresolved method has the same behavior as the conflict method, it is used by the class linker 527 // for differentiating between unfilled imt slots vs conflict slots in superclasses. 528 GcRoot<mirror::ArtMethod> imt_unimplemented_method_; 529 GcRoot<mirror::ObjectArray<mirror::ArtMethod>> default_imt_; 530 531 InstructionSet instruction_set_; 532 QuickMethodFrameInfo callee_save_method_frame_infos_[kLastCalleeSaveType]; 533 534 CompilerCallbacks* compiler_callbacks_; 535 bool is_zygote_; 536 bool must_relocate_; 537 bool is_concurrent_gc_enabled_; 538 bool is_explicit_gc_disabled_; 539 bool dex2oat_enabled_; 540 bool image_dex2oat_enabled_; 541 542 std::string compiler_executable_; 543 std::string patchoat_executable_; 544 std::vector<std::string> compiler_options_; 545 std::vector<std::string> image_compiler_options_; 546 std::string image_location_; 547 548 std::string boot_class_path_string_; 549 std::string class_path_string_; 550 std::vector<std::string> properties_; 551 552 // The default stack size for managed threads created by the runtime. 553 size_t default_stack_size_; 554 555 gc::Heap* heap_; 556 557 // The number of spins that are done before thread suspension is used to forcibly inflate. 558 size_t max_spins_before_thin_lock_inflation_; 559 MonitorList* monitor_list_; 560 MonitorPool* monitor_pool_; 561 562 ThreadList* thread_list_; 563 564 InternTable* intern_table_; 565 566 ClassLinker* class_linker_; 567 568 SignalCatcher* signal_catcher_; 569 std::string stack_trace_file_; 570 571 JavaVMExt* java_vm_; 572 573 // Fault message, printed when we get a SIGSEGV. 574 Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 575 std::string fault_message_ GUARDED_BY(fault_message_lock_); 576 577 // Method verifier set, used so that we can update their GC roots. 578 Mutex method_verifier_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 579 std::set<verifier::MethodVerifier*> method_verifiers_; 580 581 // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by 582 // the shutdown lock so that threads aren't born while we're shutting down. 583 size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_); 584 585 // Waited upon until no threads are being born. 586 std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_); 587 588 // Set when runtime shutdown is past the point that new threads may attach. 589 bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_); 590 591 // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_. 592 bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_); 593 594 bool started_; 595 596 // New flag added which tells us if the runtime has finished starting. If 597 // this flag is set then the Daemon threads are created and the class loader 598 // is created. This flag is needed for knowing if its safe to request CMS. 599 bool finished_starting_; 600 601 // Hooks supported by JNI_CreateJavaVM 602 jint (*vfprintf_)(FILE* stream, const char* format, va_list ap); 603 void (*exit_)(jint status); 604 void (*abort_)(); 605 606 bool stats_enabled_; 607 RuntimeStats stats_; 608 609 const bool running_on_valgrind_; 610 611 std::string profile_output_filename_; 612 ProfilerOptions profiler_options_; 613 bool profiler_started_; 614 615 bool method_trace_; 616 std::string method_trace_file_; 617 size_t method_trace_file_size_; 618 instrumentation::Instrumentation instrumentation_; 619 620 typedef AllocationTrackingSafeMap<jobject, std::vector<const DexFile*>, 621 kAllocatorTagCompileTimeClassPath, JobjectComparator> 622 CompileTimeClassPaths; 623 CompileTimeClassPaths compile_time_class_paths_; 624 bool use_compile_time_class_path_; 625 626 jobject main_thread_group_; 627 jobject system_thread_group_; 628 629 // As returned by ClassLoader.getSystemClassLoader(). 630 jobject system_class_loader_; 631 632 // If true, then we dump the GC cumulative timings on shutdown. 633 bool dump_gc_performance_on_shutdown_; 634 635 // Transaction used for pre-initializing classes at compilation time. 636 Transaction* preinitialization_transaction_; 637 NullPointerHandler* null_pointer_handler_; 638 SuspensionHandler* suspend_handler_; 639 StackOverflowHandler* stack_overflow_handler_; 640 641 // If false, verification is disabled. True by default. 642 bool verify_; 643 644 // Specifies target SDK version to allow workarounds for certain API levels. 645 int32_t target_sdk_version_; 646 647 // Implicit checks flags. 648 bool implicit_null_checks_; // NullPointer checks are implicit. 649 bool implicit_so_checks_; // StackOverflow checks are implicit. 650 bool implicit_suspend_checks_; // Thread suspension checks are implicit. 651 652 // Whether or not a native bridge has been loaded. 653 // 654 // The native bridge allows running native code compiled for a foreign ISA. The way it works is, 655 // if standard dlopen fails to load native library associated with native activity, it calls to 656 // the native bridge to load it and then gets the trampoline for the entry to native activity. 657 // 658 // The option 'native_bridge_library_filename' specifies the name of the native bridge. 659 // When non-empty the native bridge will be loaded from the given file. An empty value means 660 // that there's no native bridge. 661 bool is_native_bridge_loaded_; 662 663 DISALLOW_COPY_AND_ASSIGN(Runtime); 664 }; 665 666 } // namespace art 667 668 #endif // ART_RUNTIME_RUNTIME_H_ 669