1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_ISOLATE_H_ 6 #define V8_ISOLATE_H_ 7 8 #include <memory> 9 #include <queue> 10 11 #include "include/v8-debug.h" 12 #include "src/allocation.h" 13 #include "src/base/atomicops.h" 14 #include "src/builtins/builtins.h" 15 #include "src/contexts.h" 16 #include "src/date.h" 17 #include "src/execution.h" 18 #include "src/frames.h" 19 #include "src/futex-emulation.h" 20 #include "src/global-handles.h" 21 #include "src/handles.h" 22 #include "src/heap/heap.h" 23 #include "src/messages.h" 24 #include "src/regexp/regexp-stack.h" 25 #include "src/runtime/runtime.h" 26 #include "src/zone/zone.h" 27 28 namespace v8 { 29 30 namespace base { 31 class RandomNumberGenerator; 32 } 33 34 namespace internal { 35 36 class AccessCompilerData; 37 class AddressToIndexHashMap; 38 class BasicBlockProfiler; 39 class Bootstrapper; 40 class CancelableTaskManager; 41 class CallInterfaceDescriptorData; 42 class CodeAgingHelper; 43 class CodeEventDispatcher; 44 class CodeGenerator; 45 class CodeRange; 46 class CodeStubDescriptor; 47 class CodeTracer; 48 class CompilationCache; 49 class CompilerDispatcherTracer; 50 class CompilationStatistics; 51 class ContextSlotCache; 52 class Counters; 53 class CpuFeatures; 54 class CpuProfiler; 55 class DeoptimizerData; 56 class DescriptorLookupCache; 57 class Deserializer; 58 class EmptyStatement; 59 class ExternalCallbackScope; 60 class ExternalReferenceTable; 61 class Factory; 62 class HandleScopeImplementer; 63 class HeapObjectToIndexHashMap; 64 class HeapProfiler; 65 class HStatistics; 66 class HTracer; 67 class InlineRuntimeFunctionsTable; 68 class InnerPointerToCodeCache; 69 class Logger; 70 class MaterializedObjectStore; 71 class OptimizingCompileDispatcher; 72 class RegExpStack; 73 class RuntimeProfiler; 74 class SaveContext; 75 class StatsTable; 76 class StringTracker; 77 class StubCache; 78 class SweeperThread; 79 class ThreadManager; 80 class ThreadState; 81 class ThreadVisitor; // Defined in v8threads.h 82 class UnicodeCache; 83 template <StateTag Tag> class VMState; 84 85 // 'void function pointer', used to roundtrip the 86 // ExternalReference::ExternalReferenceRedirector since we can not include 87 // assembler.h, where it is defined, here. 88 typedef void* ExternalReferenceRedirectorPointer(); 89 90 91 class Debug; 92 class PromiseOnStack; 93 class Redirection; 94 class Simulator; 95 96 namespace interpreter { 97 class Interpreter; 98 } 99 100 #define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \ 101 do { \ 102 Isolate* __isolate__ = (isolate); \ 103 if (__isolate__->has_scheduled_exception()) { \ 104 return __isolate__->PromoteScheduledException(); \ 105 } \ 106 } while (false) 107 108 // Macros for MaybeHandle. 109 110 #define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \ 111 do { \ 112 Isolate* __isolate__ = (isolate); \ 113 if (__isolate__->has_scheduled_exception()) { \ 114 __isolate__->PromoteScheduledException(); \ 115 return value; \ 116 } \ 117 } while (false) 118 119 #define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \ 120 RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>()) 121 122 #define RETURN_RESULT_OR_FAILURE(isolate, call) \ 123 do { \ 124 Handle<Object> __result__; \ 125 Isolate* __isolate__ = (isolate); \ 126 if (!(call).ToHandle(&__result__)) { \ 127 DCHECK(__isolate__->has_pending_exception()); \ 128 return __isolate__->heap()->exception(); \ 129 } \ 130 return *__result__; \ 131 } while (false) 132 133 #define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \ 134 do { \ 135 if (!(call).ToHandle(&dst)) { \ 136 DCHECK((isolate)->has_pending_exception()); \ 137 return value; \ 138 } \ 139 } while (false) 140 141 #define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \ 142 do { \ 143 Isolate* __isolate__ = (isolate); \ 144 ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call, \ 145 __isolate__->heap()->exception()); \ 146 } while (false) 147 148 #define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \ 149 ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>()) 150 151 #define THROW_NEW_ERROR(isolate, call, T) \ 152 do { \ 153 Isolate* __isolate__ = (isolate); \ 154 return __isolate__->Throw<T>(__isolate__->factory()->call); \ 155 } while (false) 156 157 #define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call) \ 158 do { \ 159 Isolate* __isolate__ = (isolate); \ 160 return __isolate__->Throw(*__isolate__->factory()->call); \ 161 } while (false) 162 163 #define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \ 164 do { \ 165 if ((call).is_null()) { \ 166 DCHECK((isolate)->has_pending_exception()); \ 167 return value; \ 168 } \ 169 } while (false) 170 171 #define RETURN_FAILURE_ON_EXCEPTION(isolate, call) \ 172 do { \ 173 Isolate* __isolate__ = (isolate); \ 174 RETURN_ON_EXCEPTION_VALUE(__isolate__, call, \ 175 __isolate__->heap()->exception()); \ 176 } while (false); 177 178 #define RETURN_ON_EXCEPTION(isolate, call, T) \ 179 RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>()) 180 181 182 #define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \ 183 C(Handler, handler) \ 184 C(CEntryFP, c_entry_fp) \ 185 C(CFunction, c_function) \ 186 C(Context, context) \ 187 C(PendingException, pending_exception) \ 188 C(PendingHandlerContext, pending_handler_context) \ 189 C(PendingHandlerCode, pending_handler_code) \ 190 C(PendingHandlerOffset, pending_handler_offset) \ 191 C(PendingHandlerFP, pending_handler_fp) \ 192 C(PendingHandlerSP, pending_handler_sp) \ 193 C(ExternalCaughtException, external_caught_exception) \ 194 C(JSEntrySP, js_entry_sp) 195 196 #define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var, \ 197 limit_check, increment, body) \ 198 do { \ 199 loop_var_type init; \ 200 loop_var_type for_with_handle_limit = loop_var; \ 201 Isolate* for_with_handle_isolate = isolate; \ 202 while (limit_check) { \ 203 for_with_handle_limit += 1024; \ 204 HandleScope loop_scope(for_with_handle_isolate); \ 205 for (; limit_check && loop_var < for_with_handle_limit; increment) { \ 206 body \ 207 } \ 208 } \ 209 } while (false) 210 211 // Platform-independent, reliable thread identifier. 212 class ThreadId { 213 public: 214 // Creates an invalid ThreadId. ThreadId()215 ThreadId() { base::NoBarrier_Store(&id_, kInvalidId); } 216 217 ThreadId& operator=(const ThreadId& other) { 218 base::NoBarrier_Store(&id_, base::NoBarrier_Load(&other.id_)); 219 return *this; 220 } 221 222 // Returns ThreadId for current thread. Current()223 static ThreadId Current() { return ThreadId(GetCurrentThreadId()); } 224 225 // Returns invalid ThreadId (guaranteed not to be equal to any thread). Invalid()226 static ThreadId Invalid() { return ThreadId(kInvalidId); } 227 228 // Compares ThreadIds for equality. INLINE(bool Equals (const ThreadId & other)const)229 INLINE(bool Equals(const ThreadId& other) const) { 230 return base::NoBarrier_Load(&id_) == base::NoBarrier_Load(&other.id_); 231 } 232 233 // Checks whether this ThreadId refers to any thread. INLINE(bool IsValid ()const)234 INLINE(bool IsValid() const) { 235 return base::NoBarrier_Load(&id_) != kInvalidId; 236 } 237 238 // Converts ThreadId to an integer representation 239 // (required for public API: V8::V8::GetCurrentThreadId). ToInteger()240 int ToInteger() const { return static_cast<int>(base::NoBarrier_Load(&id_)); } 241 242 // Converts ThreadId to an integer representation 243 // (required for public API: V8::V8::TerminateExecution). FromInteger(int id)244 static ThreadId FromInteger(int id) { return ThreadId(id); } 245 246 private: 247 static const int kInvalidId = -1; 248 ThreadId(int id)249 explicit ThreadId(int id) { base::NoBarrier_Store(&id_, id); } 250 251 static int AllocateThreadId(); 252 253 static int GetCurrentThreadId(); 254 255 base::Atomic32 id_; 256 257 static base::Atomic32 highest_thread_id_; 258 259 friend class Isolate; 260 }; 261 262 263 #define FIELD_ACCESSOR(type, name) \ 264 inline void set_##name(type v) { name##_ = v; } \ 265 inline type name() const { return name##_; } 266 267 268 class ThreadLocalTop BASE_EMBEDDED { 269 public: 270 // Does early low-level initialization that does not depend on the 271 // isolate being present. 272 ThreadLocalTop(); 273 274 // Initialize the thread data. 275 void Initialize(); 276 277 // Get the top C++ try catch handler or NULL if none are registered. 278 // 279 // This method is not guaranteed to return an address that can be 280 // used for comparison with addresses into the JS stack. If such an 281 // address is needed, use try_catch_handler_address. FIELD_ACCESSOR(v8::TryCatch *,try_catch_handler)282 FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler) 283 284 // Get the address of the top C++ try catch handler or NULL if 285 // none are registered. 286 // 287 // This method always returns an address that can be compared to 288 // pointers into the JavaScript stack. When running on actual 289 // hardware, try_catch_handler_address and TryCatchHandler return 290 // the same pointer. When running on a simulator with a separate JS 291 // stack, try_catch_handler_address returns a JS stack address that 292 // corresponds to the place on the JS stack where the C++ handler 293 // would have been if the stack were not separate. 294 Address try_catch_handler_address() { 295 return reinterpret_cast<Address>( 296 v8::TryCatch::JSStackComparableAddress(try_catch_handler())); 297 } 298 299 void Free(); 300 301 Isolate* isolate_; 302 // The context where the current execution method is created and for variable 303 // lookups. 304 Context* context_; 305 ThreadId thread_id_; 306 Object* pending_exception_; 307 308 // Communication channel between Isolate::FindHandler and the CEntryStub. 309 Context* pending_handler_context_; 310 Code* pending_handler_code_; 311 intptr_t pending_handler_offset_; 312 Address pending_handler_fp_; 313 Address pending_handler_sp_; 314 315 // Communication channel between Isolate::Throw and message consumers. 316 bool rethrowing_message_; 317 Object* pending_message_obj_; 318 319 // Use a separate value for scheduled exceptions to preserve the 320 // invariants that hold about pending_exception. We may want to 321 // unify them later. 322 Object* scheduled_exception_; 323 bool external_caught_exception_; 324 SaveContext* save_context_; 325 326 // Stack. 327 Address c_entry_fp_; // the frame pointer of the top c entry frame 328 Address handler_; // try-blocks are chained through the stack 329 Address c_function_; // C function that was called at c entry. 330 331 // Throwing an exception may cause a Promise rejection. For this purpose 332 // we keep track of a stack of nested promises and the corresponding 333 // try-catch handlers. 334 PromiseOnStack* promise_on_stack_; 335 336 #ifdef USE_SIMULATOR 337 Simulator* simulator_; 338 #endif 339 340 Address js_entry_sp_; // the stack pointer of the bottom JS entry frame 341 // the external callback we're currently in 342 ExternalCallbackScope* external_callback_scope_; 343 StateTag current_vm_state_; 344 345 // Call back function to report unsafe JS accesses. 346 v8::FailedAccessCheckCallback failed_access_check_callback_; 347 348 private: 349 void InitializeInternal(); 350 351 v8::TryCatch* try_catch_handler_; 352 }; 353 354 355 #if USE_SIMULATOR 356 357 #define ISOLATE_INIT_SIMULATOR_LIST(V) \ 358 V(bool, simulator_initialized, false) \ 359 V(base::CustomMatcherHashMap*, simulator_i_cache, NULL) \ 360 V(Redirection*, simulator_redirection, NULL) 361 #else 362 363 #define ISOLATE_INIT_SIMULATOR_LIST(V) 364 365 #endif 366 367 368 #ifdef DEBUG 369 370 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \ 371 V(CommentStatistic, paged_space_comments_statistics, \ 372 CommentStatistic::kMaxComments + 1) \ 373 V(int, code_kind_statistics, AbstractCode::NUMBER_OF_KINDS) 374 #else 375 376 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) 377 378 #endif 379 380 #define ISOLATE_INIT_ARRAY_LIST(V) \ 381 /* SerializerDeserializer state. */ \ 382 V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \ 383 V(int, bad_char_shift_table, kUC16AlphabetSize) \ 384 V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \ 385 V(int, suffix_table, (kBMMaxShift + 1)) \ 386 V(uint32_t, private_random_seed, 2) \ 387 ISOLATE_INIT_DEBUG_ARRAY_LIST(V) 388 389 typedef List<HeapObject*> DebugObjectCache; 390 391 #define ISOLATE_INIT_LIST(V) \ 392 /* Assembler state. */ \ 393 V(FatalErrorCallback, exception_behavior, nullptr) \ 394 V(OOMErrorCallback, oom_behavior, nullptr) \ 395 V(LogEventCallback, event_logger, nullptr) \ 396 V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \ 397 V(ExternalReferenceRedirectorPointer*, external_reference_redirector, \ 398 nullptr) \ 399 /* State for Relocatable. */ \ 400 V(Relocatable*, relocatable_top, nullptr) \ 401 V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \ 402 V(Object*, string_stream_current_security_token, nullptr) \ 403 V(ExternalReferenceTable*, external_reference_table, nullptr) \ 404 V(intptr_t*, api_external_references, nullptr) \ 405 V(AddressToIndexHashMap*, external_reference_map, nullptr) \ 406 V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \ 407 V(v8::DeserializeInternalFieldsCallback, \ 408 deserialize_internal_fields_callback, nullptr) \ 409 V(int, pending_microtask_count, 0) \ 410 V(int, debug_microtask_count, 0) \ 411 V(HStatistics*, hstatistics, nullptr) \ 412 V(CompilationStatistics*, turbo_statistics, nullptr) \ 413 V(HTracer*, htracer, nullptr) \ 414 V(CodeTracer*, code_tracer, nullptr) \ 415 V(bool, fp_stubs_generated, false) \ 416 V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \ 417 V(PromiseRejectCallback, promise_reject_callback, nullptr) \ 418 V(const v8::StartupData*, snapshot_blob, nullptr) \ 419 V(int, code_and_metadata_size, 0) \ 420 V(int, bytecode_and_metadata_size, 0) \ 421 /* true if being profiled. Causes collection of extra compile info. */ \ 422 V(bool, is_profiling, false) \ 423 /* true if a trace is being formatted through Error.prepareStackTrace. */ \ 424 V(bool, formatting_stack_trace, false) \ 425 ISOLATE_INIT_SIMULATOR_LIST(V) 426 427 #define THREAD_LOCAL_TOP_ACCESSOR(type, name) \ 428 inline void set_##name(type v) { thread_local_top_.name##_ = v; } \ 429 inline type name() const { return thread_local_top_.name##_; } 430 431 #define THREAD_LOCAL_TOP_ADDRESS(type, name) \ 432 type* name##_address() { return &thread_local_top_.name##_; } 433 434 435 class Isolate { 436 // These forward declarations are required to make the friend declarations in 437 // PerIsolateThreadData work on some older versions of gcc. 438 class ThreadDataTable; 439 class EntryStackItem; 440 public: 441 ~Isolate(); 442 443 // A thread has a PerIsolateThreadData instance for each isolate that it has 444 // entered. That instance is allocated when the isolate is initially entered 445 // and reused on subsequent entries. 446 class PerIsolateThreadData { 447 public: PerIsolateThreadData(Isolate * isolate,ThreadId thread_id)448 PerIsolateThreadData(Isolate* isolate, ThreadId thread_id) 449 : isolate_(isolate), 450 thread_id_(thread_id), 451 stack_limit_(0), 452 thread_state_(NULL), 453 #if USE_SIMULATOR 454 simulator_(NULL), 455 #endif 456 next_(NULL), 457 prev_(NULL) { } 458 ~PerIsolateThreadData(); isolate()459 Isolate* isolate() const { return isolate_; } thread_id()460 ThreadId thread_id() const { return thread_id_; } 461 FIELD_ACCESSOR(uintptr_t,stack_limit)462 FIELD_ACCESSOR(uintptr_t, stack_limit) 463 FIELD_ACCESSOR(ThreadState*, thread_state) 464 465 #if USE_SIMULATOR 466 FIELD_ACCESSOR(Simulator*, simulator) 467 #endif 468 469 bool Matches(Isolate* isolate, ThreadId thread_id) const { 470 return isolate_ == isolate && thread_id_.Equals(thread_id); 471 } 472 473 private: 474 Isolate* isolate_; 475 ThreadId thread_id_; 476 uintptr_t stack_limit_; 477 ThreadState* thread_state_; 478 479 #if USE_SIMULATOR 480 Simulator* simulator_; 481 #endif 482 483 PerIsolateThreadData* next_; 484 PerIsolateThreadData* prev_; 485 486 friend class Isolate; 487 friend class ThreadDataTable; 488 friend class EntryStackItem; 489 490 DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData); 491 }; 492 493 494 enum AddressId { 495 #define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address, 496 FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM) 497 #undef DECLARE_ENUM 498 kIsolateAddressCount 499 }; 500 501 static void InitializeOncePerProcess(); 502 503 // Returns the PerIsolateThreadData for the current thread (or NULL if one is 504 // not currently set). CurrentPerIsolateThreadData()505 static PerIsolateThreadData* CurrentPerIsolateThreadData() { 506 return reinterpret_cast<PerIsolateThreadData*>( 507 base::Thread::GetThreadLocal(per_isolate_thread_data_key_)); 508 } 509 510 // Returns the isolate inside which the current thread is running. INLINE(static Isolate * Current ())511 INLINE(static Isolate* Current()) { 512 DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1); 513 Isolate* isolate = reinterpret_cast<Isolate*>( 514 base::Thread::GetExistingThreadLocal(isolate_key_)); 515 DCHECK(isolate != NULL); 516 return isolate; 517 } 518 519 // Usually called by Init(), but can be called early e.g. to allow 520 // testing components that require logging but not the whole 521 // isolate. 522 // 523 // Safe to call more than once. 524 void InitializeLoggingAndCounters(); 525 526 bool Init(Deserializer* des); 527 528 // True if at least one thread Enter'ed this isolate. IsInUse()529 bool IsInUse() { return entry_stack_ != NULL; } 530 531 // Destroys the non-default isolates. 532 // Sets default isolate into "has_been_disposed" state rather then destroying, 533 // for legacy API reasons. 534 void TearDown(); 535 536 static void GlobalTearDown(); 537 538 void ClearSerializerData(); 539 540 // Find the PerThread for this particular (isolate, thread) combination 541 // If one does not yet exist, return null. 542 PerIsolateThreadData* FindPerThreadDataForThisThread(); 543 544 // Find the PerThread for given (isolate, thread) combination 545 // If one does not yet exist, return null. 546 PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id); 547 548 // Discard the PerThread for this particular (isolate, thread) combination 549 // If one does not yet exist, no-op. 550 void DiscardPerThreadDataForThisThread(); 551 552 // Returns the key used to store the pointer to the current isolate. 553 // Used internally for V8 threads that do not execute JavaScript but still 554 // are part of the domain of an isolate (like the context switcher). isolate_key()555 static base::Thread::LocalStorageKey isolate_key() { 556 return isolate_key_; 557 } 558 559 // Returns the key used to store process-wide thread IDs. thread_id_key()560 static base::Thread::LocalStorageKey thread_id_key() { 561 return thread_id_key_; 562 } 563 564 static base::Thread::LocalStorageKey per_isolate_thread_data_key(); 565 566 // Mutex for serializing access to break control structures. break_access()567 base::RecursiveMutex* break_access() { return &break_access_; } 568 569 Address get_address_from_id(AddressId id); 570 571 // Access to top context (where the current function object was created). context()572 Context* context() { return thread_local_top_.context_; } 573 inline void set_context(Context* context); context_address()574 Context** context_address() { return &thread_local_top_.context_; } 575 576 THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context) 577 578 // Access to current thread id. 579 THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id) 580 581 // Interface to pending exception. 582 inline Object* pending_exception(); 583 inline void set_pending_exception(Object* exception_obj); 584 inline void clear_pending_exception(); 585 586 THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception) 587 588 inline bool has_pending_exception(); 589 THREAD_LOCAL_TOP_ADDRESS(Context *,pending_handler_context)590 THREAD_LOCAL_TOP_ADDRESS(Context*, pending_handler_context) 591 THREAD_LOCAL_TOP_ADDRESS(Code*, pending_handler_code) 592 THREAD_LOCAL_TOP_ADDRESS(intptr_t, pending_handler_offset) 593 THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp) 594 THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp) 595 596 THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception) 597 598 v8::TryCatch* try_catch_handler() { 599 return thread_local_top_.try_catch_handler(); 600 } external_caught_exception_address()601 bool* external_caught_exception_address() { 602 return &thread_local_top_.external_caught_exception_; 603 } 604 605 THREAD_LOCAL_TOP_ADDRESS(Object*, scheduled_exception) 606 607 inline void clear_pending_message(); pending_message_obj_address()608 Address pending_message_obj_address() { 609 return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_); 610 } 611 612 inline Object* scheduled_exception(); 613 inline bool has_scheduled_exception(); 614 inline void clear_scheduled_exception(); 615 616 bool IsJavaScriptHandlerOnTop(Object* exception); 617 bool IsExternalHandlerOnTop(Object* exception); 618 619 inline bool is_catchable_by_javascript(Object* exception); 620 inline bool is_catchable_by_wasm(Object* exception); 621 622 // JS execution stack (see frames.h). c_entry_fp(ThreadLocalTop * thread)623 static Address c_entry_fp(ThreadLocalTop* thread) { 624 return thread->c_entry_fp_; 625 } handler(ThreadLocalTop * thread)626 static Address handler(ThreadLocalTop* thread) { return thread->handler_; } c_function()627 Address c_function() { return thread_local_top_.c_function_; } 628 c_entry_fp_address()629 inline Address* c_entry_fp_address() { 630 return &thread_local_top_.c_entry_fp_; 631 } handler_address()632 inline Address* handler_address() { return &thread_local_top_.handler_; } c_function_address()633 inline Address* c_function_address() { 634 return &thread_local_top_.c_function_; 635 } 636 637 // Bottom JS entry. js_entry_sp()638 Address js_entry_sp() { 639 return thread_local_top_.js_entry_sp_; 640 } js_entry_sp_address()641 inline Address* js_entry_sp_address() { 642 return &thread_local_top_.js_entry_sp_; 643 } 644 645 // Returns the global object of the current context. It could be 646 // a builtin object, or a JS global object. 647 inline Handle<JSGlobalObject> global_object(); 648 649 // Returns the global proxy object of the current context. 650 inline Handle<JSObject> global_proxy(); 651 ArchiveSpacePerThread()652 static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); } FreeThreadResources()653 void FreeThreadResources() { thread_local_top_.Free(); } 654 655 // This method is called by the api after operations that may throw 656 // exceptions. If an exception was thrown and not handled by an external 657 // handler the exception is scheduled to be rethrown when we return to running 658 // JavaScript code. If an exception is scheduled true is returned. 659 bool OptionalRescheduleException(bool is_bottom_call); 660 661 // Push and pop a promise and the current try-catch handler. 662 void PushPromise(Handle<JSObject> promise); 663 void PopPromise(); 664 665 // Return the relevant Promise that a throw/rejection pertains to, based 666 // on the contents of the Promise stack 667 Handle<Object> GetPromiseOnStackOnThrow(); 668 669 // Heuristically guess whether a Promise is handled by user catch handler 670 bool PromiseHasUserDefinedRejectHandler(Handle<Object> promise); 671 672 class ExceptionScope { 673 public: 674 // Scope currently can only be used for regular exceptions, 675 // not termination exception. 676 inline explicit ExceptionScope(Isolate* isolate); 677 inline ~ExceptionScope(); 678 679 private: 680 Isolate* isolate_; 681 Handle<Object> pending_exception_; 682 }; 683 684 void SetCaptureStackTraceForUncaughtExceptions( 685 bool capture, 686 int frame_limit, 687 StackTrace::StackTraceOptions options); 688 689 void SetAbortOnUncaughtExceptionCallback( 690 v8::Isolate::AbortOnUncaughtExceptionCallback callback); 691 692 enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose }; 693 void PrintCurrentStackTrace(FILE* out); 694 void PrintStack(StringStream* accumulator, 695 PrintStackMode mode = kPrintStackVerbose); 696 void PrintStack(FILE* out, PrintStackMode mode = kPrintStackVerbose); 697 Handle<String> StackTraceString(); 698 NO_INLINE(void PushStackTraceAndDie(unsigned int magic, void* ptr1, 699 void* ptr2, unsigned int magic2)); 700 Handle<JSArray> CaptureCurrentStackTrace( 701 int frame_limit, 702 StackTrace::StackTraceOptions options); 703 Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object, 704 FrameSkipMode mode, 705 Handle<Object> caller); 706 MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace( 707 Handle<JSReceiver> error_object); 708 MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace( 709 Handle<JSReceiver> error_object, FrameSkipMode mode, 710 Handle<Object> caller); 711 Handle<JSArray> GetDetailedStackTrace(Handle<JSObject> error_object); 712 713 // Returns if the given context may access the given global object. If 714 // the result is false, the pending exception is guaranteed to be 715 // set. 716 bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver); 717 718 void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback); 719 void ReportFailedAccessCheck(Handle<JSObject> receiver); 720 721 // Exception throwing support. The caller should use the result 722 // of Throw() as its return value. 723 Object* Throw(Object* exception, MessageLocation* location = NULL); 724 Object* ThrowIllegalOperation(); 725 726 template <typename T> 727 MUST_USE_RESULT MaybeHandle<T> Throw(Handle<Object> exception, 728 MessageLocation* location = NULL) { 729 Throw(*exception, location); 730 return MaybeHandle<T>(); 731 } 732 733 // Re-throw an exception. This involves no error reporting since error 734 // reporting was handled when the exception was thrown originally. 735 Object* ReThrow(Object* exception); 736 737 // Find the correct handler for the current pending exception. This also 738 // clears and returns the current pending exception. 739 Object* UnwindAndFindHandler(); 740 741 // Tries to predict whether an exception will be caught. Note that this can 742 // only produce an estimate, because it is undecidable whether a finally 743 // clause will consume or re-throw an exception. 744 enum CatchType { 745 NOT_CAUGHT, 746 CAUGHT_BY_JAVASCRIPT, 747 CAUGHT_BY_EXTERNAL, 748 CAUGHT_BY_DESUGARING, 749 CAUGHT_BY_PROMISE, 750 CAUGHT_BY_ASYNC_AWAIT 751 }; 752 CatchType PredictExceptionCatcher(); 753 754 void ScheduleThrow(Object* exception); 755 // Re-set pending message, script and positions reported to the TryCatch 756 // back to the TLS for re-use when rethrowing. 757 void RestorePendingMessageFromTryCatch(v8::TryCatch* handler); 758 // Un-schedule an exception that was caught by a TryCatch handler. 759 void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler); 760 void ReportPendingMessages(); 761 // Return pending location if any or unfilled structure. 762 MessageLocation GetMessageLocation(); 763 764 // Promote a scheduled exception to pending. Asserts has_scheduled_exception. 765 Object* PromoteScheduledException(); 766 767 // Attempts to compute the current source location, storing the 768 // result in the target out parameter. 769 bool ComputeLocation(MessageLocation* target); 770 bool ComputeLocationFromException(MessageLocation* target, 771 Handle<Object> exception); 772 bool ComputeLocationFromStackTrace(MessageLocation* target, 773 Handle<Object> exception); 774 775 Handle<JSMessageObject> CreateMessage(Handle<Object> exception, 776 MessageLocation* location); 777 778 // Out of resource exception helpers. 779 Object* StackOverflow(); 780 Object* TerminateExecution(); 781 void CancelTerminateExecution(); 782 783 void RequestInterrupt(InterruptCallback callback, void* data); 784 void InvokeApiInterruptCallbacks(); 785 786 // Administration 787 void Iterate(ObjectVisitor* v); 788 void Iterate(ObjectVisitor* v, ThreadLocalTop* t); 789 char* Iterate(ObjectVisitor* v, char* t); 790 void IterateThread(ThreadVisitor* v, char* t); 791 792 // Returns the current native context. 793 inline Handle<Context> native_context(); 794 inline Context* raw_native_context(); 795 796 // Returns the native context of the calling JavaScript code. That 797 // is, the native context of the top-most JavaScript frame. 798 Handle<Context> GetCallingNativeContext(); 799 800 void RegisterTryCatchHandler(v8::TryCatch* that); 801 void UnregisterTryCatchHandler(v8::TryCatch* that); 802 803 char* ArchiveThread(char* to); 804 char* RestoreThread(char* from); 805 806 static const int kUC16AlphabetSize = 256; // See StringSearchBase. 807 static const int kBMMaxShift = 250; // See StringSearchBase. 808 809 // Accessors. 810 #define GLOBAL_ACCESSOR(type, name, initialvalue) \ 811 inline type name() const { \ 812 DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \ 813 return name##_; \ 814 } \ 815 inline void set_##name(type value) { \ 816 DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \ 817 name##_ = value; \ 818 } 819 ISOLATE_INIT_LIST(GLOBAL_ACCESSOR) 820 #undef GLOBAL_ACCESSOR 821 822 #define GLOBAL_ARRAY_ACCESSOR(type, name, length) \ 823 inline type* name() { \ 824 DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \ 825 return &(name##_)[0]; \ 826 } ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)827 ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR) 828 #undef GLOBAL_ARRAY_ACCESSOR 829 830 #define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \ 831 inline Handle<type> name(); \ 832 inline bool is_##name(type* value); 833 NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR) 834 #undef NATIVE_CONTEXT_FIELD_ACCESSOR 835 836 Bootstrapper* bootstrapper() { return bootstrapper_; } counters()837 Counters* counters() { 838 // Call InitializeLoggingAndCounters() if logging is needed before 839 // the isolate is fully initialized. 840 DCHECK(counters_ != NULL); 841 return counters_; 842 } runtime_profiler()843 RuntimeProfiler* runtime_profiler() { return runtime_profiler_; } compilation_cache()844 CompilationCache* compilation_cache() { return compilation_cache_; } logger()845 Logger* logger() { 846 // Call InitializeLoggingAndCounters() if logging is needed before 847 // the isolate is fully initialized. 848 DCHECK(logger_ != NULL); 849 return logger_; 850 } stack_guard()851 StackGuard* stack_guard() { return &stack_guard_; } heap()852 Heap* heap() { return &heap_; } 853 StatsTable* stats_table(); load_stub_cache()854 StubCache* load_stub_cache() { return load_stub_cache_; } store_stub_cache()855 StubCache* store_stub_cache() { return store_stub_cache_; } code_aging_helper()856 CodeAgingHelper* code_aging_helper() { return code_aging_helper_; } deoptimizer_data()857 DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; } deoptimizer_lazy_throw()858 bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; } set_deoptimizer_lazy_throw(bool value)859 void set_deoptimizer_lazy_throw(bool value) { 860 deoptimizer_lazy_throw_ = value; 861 } thread_local_top()862 ThreadLocalTop* thread_local_top() { return &thread_local_top_; } materialized_object_store()863 MaterializedObjectStore* materialized_object_store() { 864 return materialized_object_store_; 865 } 866 context_slot_cache()867 ContextSlotCache* context_slot_cache() { 868 return context_slot_cache_; 869 } 870 descriptor_lookup_cache()871 DescriptorLookupCache* descriptor_lookup_cache() { 872 return descriptor_lookup_cache_; 873 } 874 handle_scope_data()875 HandleScopeData* handle_scope_data() { return &handle_scope_data_; } 876 handle_scope_implementer()877 HandleScopeImplementer* handle_scope_implementer() { 878 DCHECK(handle_scope_implementer_); 879 return handle_scope_implementer_; 880 } 881 unicode_cache()882 UnicodeCache* unicode_cache() { 883 return unicode_cache_; 884 } 885 inner_pointer_to_code_cache()886 InnerPointerToCodeCache* inner_pointer_to_code_cache() { 887 return inner_pointer_to_code_cache_; 888 } 889 global_handles()890 GlobalHandles* global_handles() { return global_handles_; } 891 eternal_handles()892 EternalHandles* eternal_handles() { return eternal_handles_; } 893 thread_manager()894 ThreadManager* thread_manager() { return thread_manager_; } 895 jsregexp_uncanonicalize()896 unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() { 897 return &jsregexp_uncanonicalize_; 898 } 899 jsregexp_canonrange()900 unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() { 901 return &jsregexp_canonrange_; 902 } 903 runtime_state()904 RuntimeState* runtime_state() { return &runtime_state_; } 905 builtins()906 Builtins* builtins() { return &builtins_; } 907 NotifyExtensionInstalled()908 void NotifyExtensionInstalled() { 909 has_installed_extensions_ = true; 910 } 911 has_installed_extensions()912 bool has_installed_extensions() { return has_installed_extensions_; } 913 914 unibrow::Mapping<unibrow::Ecma262Canonicalize>* regexp_macro_assembler_canonicalize()915 regexp_macro_assembler_canonicalize() { 916 return ®exp_macro_assembler_canonicalize_; 917 } 918 regexp_stack()919 RegExpStack* regexp_stack() { return regexp_stack_; } 920 regexp_indices()921 List<int>* regexp_indices() { return ®exp_indices_; } 922 923 unibrow::Mapping<unibrow::Ecma262Canonicalize>* interp_canonicalize_mapping()924 interp_canonicalize_mapping() { 925 return ®exp_macro_assembler_canonicalize_; 926 } 927 debug()928 Debug* debug() { return debug_; } 929 is_profiling_address()930 bool* is_profiling_address() { return &is_profiling_; } code_event_dispatcher()931 CodeEventDispatcher* code_event_dispatcher() const { 932 return code_event_dispatcher_.get(); 933 } heap_profiler()934 HeapProfiler* heap_profiler() const { return heap_profiler_; } 935 936 #ifdef DEBUG heap_histograms()937 HistogramInfo* heap_histograms() { return heap_histograms_; } 938 js_spill_information()939 JSObject::SpillInformation* js_spill_information() { 940 return &js_spill_information_; 941 } 942 #endif 943 factory()944 Factory* factory() { return reinterpret_cast<Factory*>(this); } 945 946 static const int kJSRegexpStaticOffsetsVectorSize = 128; 947 THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope *,external_callback_scope)948 THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope) 949 950 THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state) 951 952 void SetData(uint32_t slot, void* data) { 953 DCHECK(slot < Internals::kNumIsolateDataSlots); 954 embedder_data_[slot] = data; 955 } GetData(uint32_t slot)956 void* GetData(uint32_t slot) { 957 DCHECK(slot < Internals::kNumIsolateDataSlots); 958 return embedder_data_[slot]; 959 } 960 serializer_enabled()961 bool serializer_enabled() const { return serializer_enabled_; } snapshot_available()962 bool snapshot_available() const { 963 return snapshot_blob_ != NULL && snapshot_blob_->raw_size != 0; 964 } 965 IsDead()966 bool IsDead() { return has_fatal_error_; } SignalFatalError()967 void SignalFatalError() { has_fatal_error_ = true; } 968 969 bool use_crankshaft() const; 970 initialized_from_snapshot()971 bool initialized_from_snapshot() { return initialized_from_snapshot_; } 972 time_millis_since_init()973 double time_millis_since_init() { 974 return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_; 975 } 976 date_cache()977 DateCache* date_cache() { 978 return date_cache_; 979 } 980 set_date_cache(DateCache * date_cache)981 void set_date_cache(DateCache* date_cache) { 982 if (date_cache != date_cache_) { 983 delete date_cache_; 984 } 985 date_cache_ = date_cache; 986 } 987 988 Map* get_initial_js_array_map(ElementsKind kind); 989 990 static const int kProtectorValid = 1; 991 static const int kProtectorInvalid = 0; 992 993 bool IsFastArrayConstructorPrototypeChainIntact(); 994 inline bool IsArraySpeciesLookupChainIntact(); 995 inline bool IsHasInstanceLookupChainIntact(); 996 bool IsIsConcatSpreadableLookupChainIntact(); 997 bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver); 998 inline bool IsStringLengthOverflowIntact(); 999 inline bool IsArrayIteratorLookupChainIntact(); 1000 1001 // Avoid deopt loops if fast Array Iterators migrate to slow Array Iterators. 1002 inline bool IsFastArrayIterationIntact(); 1003 1004 // On intent to set an element in object, make sure that appropriate 1005 // notifications occur if the set is on the elements of the array or 1006 // object prototype. Also ensure that changes to prototype chain between 1007 // Array and Object fire notifications. 1008 void UpdateArrayProtectorOnSetElement(Handle<JSObject> object); UpdateArrayProtectorOnSetLength(Handle<JSObject> object)1009 void UpdateArrayProtectorOnSetLength(Handle<JSObject> object) { 1010 UpdateArrayProtectorOnSetElement(object); 1011 } UpdateArrayProtectorOnSetPrototype(Handle<JSObject> object)1012 void UpdateArrayProtectorOnSetPrototype(Handle<JSObject> object) { 1013 UpdateArrayProtectorOnSetElement(object); 1014 } UpdateArrayProtectorOnNormalizeElements(Handle<JSObject> object)1015 void UpdateArrayProtectorOnNormalizeElements(Handle<JSObject> object) { 1016 UpdateArrayProtectorOnSetElement(object); 1017 } 1018 void InvalidateArraySpeciesProtector(); 1019 void InvalidateHasInstanceProtector(); 1020 void InvalidateIsConcatSpreadableProtector(); 1021 void InvalidateStringLengthOverflowProtector(); 1022 void InvalidateArrayIteratorProtector(); 1023 1024 // Returns true if array is the initial array prototype in any native context. 1025 bool IsAnyInitialArrayPrototype(Handle<JSArray> array); 1026 1027 CallInterfaceDescriptorData* call_descriptor_data(int index); 1028 access_compiler_data()1029 AccessCompilerData* access_compiler_data() { return access_compiler_data_; } 1030 1031 void IterateDeferredHandles(ObjectVisitor* visitor); 1032 void LinkDeferredHandles(DeferredHandles* deferred_handles); 1033 void UnlinkDeferredHandles(DeferredHandles* deferred_handles); 1034 1035 #ifdef DEBUG 1036 bool IsDeferredHandle(Object** location); 1037 #endif // DEBUG 1038 concurrent_recompilation_enabled()1039 bool concurrent_recompilation_enabled() { 1040 // Thread is only available with flag enabled. 1041 DCHECK(optimizing_compile_dispatcher_ == NULL || 1042 FLAG_concurrent_recompilation); 1043 return optimizing_compile_dispatcher_ != NULL; 1044 } 1045 optimizing_compile_dispatcher()1046 OptimizingCompileDispatcher* optimizing_compile_dispatcher() { 1047 return optimizing_compile_dispatcher_; 1048 } 1049 id()1050 int id() const { return static_cast<int>(id_); } 1051 1052 HStatistics* GetHStatistics(); 1053 CompilationStatistics* GetTurboStatistics(); 1054 HTracer* GetHTracer(); 1055 CodeTracer* GetCodeTracer(); 1056 1057 void DumpAndResetCompilationStats(); 1058 function_entry_hook()1059 FunctionEntryHook function_entry_hook() { return function_entry_hook_; } set_function_entry_hook(FunctionEntryHook function_entry_hook)1060 void set_function_entry_hook(FunctionEntryHook function_entry_hook) { 1061 function_entry_hook_ = function_entry_hook; 1062 } 1063 stress_deopt_count_address()1064 void* stress_deopt_count_address() { return &stress_deopt_count_; } 1065 1066 V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator(); 1067 1068 // Generates a random number that is non-zero when masked 1069 // with the provided mask. 1070 int GenerateIdentityHash(uint32_t mask); 1071 1072 // Given an address occupied by a live code object, return that object. 1073 Object* FindCodeObject(Address a); 1074 NextOptimizationId()1075 int NextOptimizationId() { 1076 int id = next_optimization_id_++; 1077 if (!Smi::IsValid(next_optimization_id_)) { 1078 next_optimization_id_ = 0; 1079 } 1080 return id; 1081 } 1082 1083 // Get (and lazily initialize) the registry for per-isolate symbols. 1084 Handle<JSObject> GetSymbolRegistry(); 1085 1086 void AddCallCompletedCallback(CallCompletedCallback callback); 1087 void RemoveCallCompletedCallback(CallCompletedCallback callback); 1088 void FireCallCompletedCallback(); 1089 1090 void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback); 1091 void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback); 1092 inline void FireBeforeCallEnteredCallback(); 1093 1094 void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback); 1095 void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback); 1096 void FireMicrotasksCompletedCallback(); 1097 1098 void SetPromiseRejectCallback(PromiseRejectCallback callback); 1099 void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value, 1100 v8::PromiseRejectEvent event); 1101 1102 void PromiseReactionJob(Handle<PromiseReactionJobInfo> info, 1103 MaybeHandle<Object>* result, 1104 MaybeHandle<Object>* maybe_exception); 1105 void PromiseResolveThenableJob(Handle<PromiseResolveThenableJobInfo> info, 1106 MaybeHandle<Object>* result, 1107 MaybeHandle<Object>* maybe_exception); 1108 void EnqueueMicrotask(Handle<Object> microtask); 1109 void RunMicrotasks(); IsRunningMicrotasks()1110 bool IsRunningMicrotasks() const { return is_running_microtasks_; } GetNextDebugMicrotaskId()1111 int GetNextDebugMicrotaskId() { return debug_microtask_count_++; } 1112 1113 void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback); 1114 void CountUsage(v8::Isolate::UseCounterFeature feature); 1115 1116 BasicBlockProfiler* GetOrCreateBasicBlockProfiler(); basic_block_profiler()1117 BasicBlockProfiler* basic_block_profiler() { return basic_block_profiler_; } 1118 1119 std::string GetTurboCfgFileName(); 1120 1121 #if TRACE_MAPS GetNextUniqueSharedFunctionInfoId()1122 int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; } 1123 #endif 1124 1125 // Support for dynamically disabling tail call elimination. is_tail_call_elimination_enabled_address()1126 Address is_tail_call_elimination_enabled_address() { 1127 return reinterpret_cast<Address>(&is_tail_call_elimination_enabled_); 1128 } is_tail_call_elimination_enabled()1129 bool is_tail_call_elimination_enabled() const { 1130 return is_tail_call_elimination_enabled_; 1131 } 1132 void SetTailCallEliminationEnabled(bool enabled); 1133 1134 void AddDetachedContext(Handle<Context> context); 1135 void CheckDetachedContextsAfterGC(); 1136 partial_snapshot_cache()1137 List<Object*>* partial_snapshot_cache() { return &partial_snapshot_cache_; } 1138 set_array_buffer_allocator(v8::ArrayBuffer::Allocator * allocator)1139 void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) { 1140 array_buffer_allocator_ = allocator; 1141 } array_buffer_allocator()1142 v8::ArrayBuffer::Allocator* array_buffer_allocator() const { 1143 return array_buffer_allocator_; 1144 } 1145 futex_wait_list_node()1146 FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; } 1147 cancelable_task_manager()1148 CancelableTaskManager* cancelable_task_manager() { 1149 return cancelable_task_manager_; 1150 } 1151 interpreter()1152 interpreter::Interpreter* interpreter() const { return interpreter_; } 1153 allocator()1154 AccountingAllocator* allocator() { return allocator_; } 1155 compiler_dispatcher_tracer()1156 CompilerDispatcherTracer* compiler_dispatcher_tracer() const { 1157 return compiler_dispatcher_tracer_; 1158 } 1159 1160 bool IsInAnyContext(Object* object, uint32_t index); 1161 1162 void SetRAILMode(RAILMode rail_mode); 1163 1164 void IsolateInForegroundNotification(); 1165 1166 void IsolateInBackgroundNotification(); 1167 IsIsolateInBackground()1168 bool IsIsolateInBackground() { return is_isolate_in_background_; } 1169 1170 PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...); 1171 1172 #ifdef USE_SIMULATOR simulator_i_cache_mutex()1173 base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; } 1174 #endif 1175 1176 protected: 1177 explicit Isolate(bool enable_serializer); 1178 bool IsArrayOrObjectPrototype(Object* object); 1179 1180 private: 1181 friend struct GlobalState; 1182 friend struct InitializeGlobalState; 1183 Handle<JSObject> SetUpSubregistry(Handle<JSObject> registry, Handle<Map> map, 1184 const char* name); 1185 1186 // These fields are accessed through the API, offsets must be kept in sync 1187 // with v8::internal::Internals (in include/v8.h) constants. This is also 1188 // verified in Isolate::Init() using runtime checks. 1189 void* embedder_data_[Internals::kNumIsolateDataSlots]; 1190 Heap heap_; 1191 1192 // The per-process lock should be acquired before the ThreadDataTable is 1193 // modified. 1194 class ThreadDataTable { 1195 public: 1196 ThreadDataTable(); 1197 ~ThreadDataTable(); 1198 1199 PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id); 1200 void Insert(PerIsolateThreadData* data); 1201 void Remove(PerIsolateThreadData* data); 1202 void RemoveAllThreads(Isolate* isolate); 1203 1204 private: 1205 PerIsolateThreadData* list_; 1206 }; 1207 1208 // These items form a stack synchronously with threads Enter'ing and Exit'ing 1209 // the Isolate. The top of the stack points to a thread which is currently 1210 // running the Isolate. When the stack is empty, the Isolate is considered 1211 // not entered by any thread and can be Disposed. 1212 // If the same thread enters the Isolate more than once, the entry_count_ 1213 // is incremented rather then a new item pushed to the stack. 1214 class EntryStackItem { 1215 public: EntryStackItem(PerIsolateThreadData * previous_thread_data,Isolate * previous_isolate,EntryStackItem * previous_item)1216 EntryStackItem(PerIsolateThreadData* previous_thread_data, 1217 Isolate* previous_isolate, 1218 EntryStackItem* previous_item) 1219 : entry_count(1), 1220 previous_thread_data(previous_thread_data), 1221 previous_isolate(previous_isolate), 1222 previous_item(previous_item) { } 1223 1224 int entry_count; 1225 PerIsolateThreadData* previous_thread_data; 1226 Isolate* previous_isolate; 1227 EntryStackItem* previous_item; 1228 1229 private: 1230 DISALLOW_COPY_AND_ASSIGN(EntryStackItem); 1231 }; 1232 1233 static base::LazyMutex thread_data_table_mutex_; 1234 1235 static base::Thread::LocalStorageKey per_isolate_thread_data_key_; 1236 static base::Thread::LocalStorageKey isolate_key_; 1237 static base::Thread::LocalStorageKey thread_id_key_; 1238 static ThreadDataTable* thread_data_table_; 1239 1240 // A global counter for all generated Isolates, might overflow. 1241 static base::Atomic32 isolate_counter_; 1242 1243 #if DEBUG 1244 static base::Atomic32 isolate_key_created_; 1245 #endif 1246 1247 void Deinit(); 1248 1249 static void SetIsolateThreadLocals(Isolate* isolate, 1250 PerIsolateThreadData* data); 1251 1252 // Find the PerThread for this particular (isolate, thread) combination. 1253 // If one does not yet exist, allocate a new one. 1254 PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread(); 1255 1256 // Initializes the current thread to run this Isolate. 1257 // Not thread-safe. Multiple threads should not Enter/Exit the same isolate 1258 // at the same time, this should be prevented using external locking. 1259 void Enter(); 1260 1261 // Exits the current thread. The previosuly entered Isolate is restored 1262 // for the thread. 1263 // Not thread-safe. Multiple threads should not Enter/Exit the same isolate 1264 // at the same time, this should be prevented using external locking. 1265 void Exit(); 1266 1267 void InitializeThreadLocal(); 1268 1269 void MarkCompactPrologue(bool is_compacting, 1270 ThreadLocalTop* archived_thread_data); 1271 void MarkCompactEpilogue(bool is_compacting, 1272 ThreadLocalTop* archived_thread_data); 1273 1274 void FillCache(); 1275 1276 // Propagate pending exception message to the v8::TryCatch. 1277 // If there is no external try-catch or message was successfully propagated, 1278 // then return true. 1279 bool PropagatePendingExceptionToExternalTryCatch(); 1280 1281 // Remove per-frame stored materialized objects when we are unwinding 1282 // the frame. 1283 void RemoveMaterializedObjectsOnUnwind(StackFrame* frame); 1284 1285 void RunMicrotasksInternal(); 1286 RAILModeName(RAILMode rail_mode)1287 const char* RAILModeName(RAILMode rail_mode) const { 1288 switch (rail_mode) { 1289 case PERFORMANCE_RESPONSE: 1290 return "RESPONSE"; 1291 case PERFORMANCE_ANIMATION: 1292 return "ANIMATION"; 1293 case PERFORMANCE_IDLE: 1294 return "IDLE"; 1295 case PERFORMANCE_LOAD: 1296 return "LOAD"; 1297 } 1298 return ""; 1299 } 1300 1301 // TODO(alph): Remove along with the deprecated GetCpuProfiler(). 1302 friend v8::CpuProfiler* v8::Isolate::GetCpuProfiler(); cpu_profiler()1303 CpuProfiler* cpu_profiler() const { return cpu_profiler_; } 1304 1305 base::Atomic32 id_; 1306 EntryStackItem* entry_stack_; 1307 int stack_trace_nesting_level_; 1308 StringStream* incomplete_message_; 1309 Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT 1310 Bootstrapper* bootstrapper_; 1311 RuntimeProfiler* runtime_profiler_; 1312 CompilationCache* compilation_cache_; 1313 Counters* counters_; 1314 base::RecursiveMutex break_access_; 1315 Logger* logger_; 1316 StackGuard stack_guard_; 1317 StatsTable* stats_table_; 1318 StubCache* load_stub_cache_; 1319 StubCache* store_stub_cache_; 1320 CodeAgingHelper* code_aging_helper_; 1321 DeoptimizerData* deoptimizer_data_; 1322 bool deoptimizer_lazy_throw_; 1323 MaterializedObjectStore* materialized_object_store_; 1324 ThreadLocalTop thread_local_top_; 1325 bool capture_stack_trace_for_uncaught_exceptions_; 1326 int stack_trace_for_uncaught_exceptions_frame_limit_; 1327 StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_; 1328 ContextSlotCache* context_slot_cache_; 1329 DescriptorLookupCache* descriptor_lookup_cache_; 1330 HandleScopeData handle_scope_data_; 1331 HandleScopeImplementer* handle_scope_implementer_; 1332 UnicodeCache* unicode_cache_; 1333 AccountingAllocator* allocator_; 1334 InnerPointerToCodeCache* inner_pointer_to_code_cache_; 1335 GlobalHandles* global_handles_; 1336 EternalHandles* eternal_handles_; 1337 ThreadManager* thread_manager_; 1338 RuntimeState runtime_state_; 1339 Builtins builtins_; 1340 bool has_installed_extensions_; 1341 unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_; 1342 unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_; 1343 unibrow::Mapping<unibrow::Ecma262Canonicalize> 1344 regexp_macro_assembler_canonicalize_; 1345 RegExpStack* regexp_stack_; 1346 List<int> regexp_indices_; 1347 DateCache* date_cache_; 1348 CallInterfaceDescriptorData* call_descriptor_data_; 1349 AccessCompilerData* access_compiler_data_; 1350 base::RandomNumberGenerator* random_number_generator_; 1351 base::AtomicValue<RAILMode> rail_mode_; 1352 1353 // Whether the isolate has been created for snapshotting. 1354 bool serializer_enabled_; 1355 1356 // True if fatal error has been signaled for this isolate. 1357 bool has_fatal_error_; 1358 1359 // True if this isolate was initialized from a snapshot. 1360 bool initialized_from_snapshot_; 1361 1362 // True if ES2015 tail call elimination feature is enabled. 1363 bool is_tail_call_elimination_enabled_; 1364 1365 // True if the isolate is in background. This flag is used 1366 // to prioritize between memory usage and latency. 1367 bool is_isolate_in_background_; 1368 1369 // Time stamp at initialization. 1370 double time_millis_at_init_; 1371 1372 #ifdef DEBUG 1373 // A static array of histogram info for each type. 1374 HistogramInfo heap_histograms_[LAST_TYPE + 1]; 1375 JSObject::SpillInformation js_spill_information_; 1376 #endif 1377 1378 Debug* debug_; 1379 CpuProfiler* cpu_profiler_; 1380 HeapProfiler* heap_profiler_; 1381 std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_; 1382 FunctionEntryHook function_entry_hook_; 1383 1384 interpreter::Interpreter* interpreter_; 1385 1386 CompilerDispatcherTracer* compiler_dispatcher_tracer_; 1387 1388 typedef std::pair<InterruptCallback, void*> InterruptEntry; 1389 std::queue<InterruptEntry> api_interrupts_queue_; 1390 1391 #define GLOBAL_BACKING_STORE(type, name, initialvalue) \ 1392 type name##_; 1393 ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE) 1394 #undef GLOBAL_BACKING_STORE 1395 1396 #define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \ 1397 type name##_[length]; 1398 ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE) 1399 #undef GLOBAL_ARRAY_BACKING_STORE 1400 1401 #ifdef DEBUG 1402 // This class is huge and has a number of fields controlled by 1403 // preprocessor defines. Make sure the offsets of these fields agree 1404 // between compilation units. 1405 #define ISOLATE_FIELD_OFFSET(type, name, ignored) \ 1406 static const intptr_t name##_debug_offset_; 1407 ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET) 1408 ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET) 1409 #undef ISOLATE_FIELD_OFFSET 1410 #endif 1411 1412 DeferredHandles* deferred_handles_head_; 1413 OptimizingCompileDispatcher* optimizing_compile_dispatcher_; 1414 1415 // Counts deopt points if deopt_every_n_times is enabled. 1416 unsigned int stress_deopt_count_; 1417 1418 int next_optimization_id_; 1419 1420 #if TRACE_MAPS 1421 int next_unique_sfi_id_; 1422 #endif 1423 1424 // List of callbacks before a Call starts execution. 1425 List<BeforeCallEnteredCallback> before_call_entered_callbacks_; 1426 1427 // List of callbacks when a Call completes. 1428 List<CallCompletedCallback> call_completed_callbacks_; 1429 1430 // List of callbacks after microtasks were run. 1431 List<MicrotasksCompletedCallback> microtasks_completed_callbacks_; 1432 bool is_running_microtasks_; 1433 1434 v8::Isolate::UseCounterCallback use_counter_callback_; 1435 BasicBlockProfiler* basic_block_profiler_; 1436 1437 List<Object*> partial_snapshot_cache_; 1438 1439 v8::ArrayBuffer::Allocator* array_buffer_allocator_; 1440 1441 FutexWaitListNode futex_wait_list_node_; 1442 1443 CancelableTaskManager* cancelable_task_manager_; 1444 1445 v8::Isolate::AbortOnUncaughtExceptionCallback 1446 abort_on_uncaught_exception_callback_; 1447 1448 #ifdef USE_SIMULATOR 1449 base::Mutex simulator_i_cache_mutex_; 1450 #endif 1451 1452 friend class ExecutionAccess; 1453 friend class HandleScopeImplementer; 1454 friend class OptimizingCompileDispatcher; 1455 friend class SweeperThread; 1456 friend class ThreadManager; 1457 friend class Simulator; 1458 friend class StackGuard; 1459 friend class ThreadId; 1460 friend class v8::Isolate; 1461 friend class v8::Locker; 1462 friend class v8::Unlocker; 1463 friend class v8::SnapshotCreator; 1464 friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*); 1465 friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData, 1466 const char*); 1467 1468 DISALLOW_COPY_AND_ASSIGN(Isolate); 1469 }; 1470 1471 1472 #undef FIELD_ACCESSOR 1473 #undef THREAD_LOCAL_TOP_ACCESSOR 1474 1475 1476 class PromiseOnStack { 1477 public: PromiseOnStack(Handle<JSObject> promise,PromiseOnStack * prev)1478 PromiseOnStack(Handle<JSObject> promise, PromiseOnStack* prev) 1479 : promise_(promise), prev_(prev) {} promise()1480 Handle<JSObject> promise() { return promise_; } prev()1481 PromiseOnStack* prev() { return prev_; } 1482 1483 private: 1484 Handle<JSObject> promise_; 1485 PromiseOnStack* prev_; 1486 }; 1487 1488 1489 // If the GCC version is 4.1.x or 4.2.x an additional field is added to the 1490 // class as a work around for a bug in the generated code found with these 1491 // versions of GCC. See V8 issue 122 for details. 1492 class SaveContext BASE_EMBEDDED { 1493 public: 1494 explicit SaveContext(Isolate* isolate); 1495 ~SaveContext(); 1496 context()1497 Handle<Context> context() { return context_; } prev()1498 SaveContext* prev() { return prev_; } 1499 1500 // Returns true if this save context is below a given JavaScript frame. IsBelowFrame(StandardFrame * frame)1501 bool IsBelowFrame(StandardFrame* frame) { 1502 return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp()); 1503 } 1504 1505 private: 1506 Isolate* const isolate_; 1507 Handle<Context> context_; 1508 SaveContext* const prev_; 1509 Address c_entry_fp_; 1510 }; 1511 1512 1513 class AssertNoContextChange BASE_EMBEDDED { 1514 #ifdef DEBUG 1515 public: 1516 explicit AssertNoContextChange(Isolate* isolate); ~AssertNoContextChange()1517 ~AssertNoContextChange() { 1518 DCHECK(isolate_->context() == *context_); 1519 } 1520 1521 private: 1522 Isolate* isolate_; 1523 Handle<Context> context_; 1524 #else 1525 public: 1526 explicit AssertNoContextChange(Isolate* isolate) { } 1527 #endif 1528 }; 1529 1530 1531 class ExecutionAccess BASE_EMBEDDED { 1532 public: ExecutionAccess(Isolate * isolate)1533 explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) { 1534 Lock(isolate); 1535 } ~ExecutionAccess()1536 ~ExecutionAccess() { Unlock(isolate_); } 1537 Lock(Isolate * isolate)1538 static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); } Unlock(Isolate * isolate)1539 static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); } 1540 TryLock(Isolate * isolate)1541 static bool TryLock(Isolate* isolate) { 1542 return isolate->break_access()->TryLock(); 1543 } 1544 1545 private: 1546 Isolate* isolate_; 1547 }; 1548 1549 1550 // Support for checking for stack-overflows. 1551 class StackLimitCheck BASE_EMBEDDED { 1552 public: StackLimitCheck(Isolate * isolate)1553 explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { } 1554 1555 // Use this to check for stack-overflows in C++ code. HasOverflowed()1556 bool HasOverflowed() const { 1557 StackGuard* stack_guard = isolate_->stack_guard(); 1558 return GetCurrentStackPosition() < stack_guard->real_climit(); 1559 } 1560 1561 // Use this to check for interrupt request in C++ code. InterruptRequested()1562 bool InterruptRequested() { 1563 StackGuard* stack_guard = isolate_->stack_guard(); 1564 return GetCurrentStackPosition() < stack_guard->climit(); 1565 } 1566 1567 // Use this to check for stack-overflow when entering runtime from JS code. 1568 bool JsHasOverflowed(uintptr_t gap = 0) const; 1569 1570 private: 1571 Isolate* isolate_; 1572 }; 1573 1574 #define STACK_CHECK(isolate, result_value) \ 1575 do { \ 1576 StackLimitCheck stack_check(isolate); \ 1577 if (stack_check.HasOverflowed()) { \ 1578 isolate->Throw(*isolate->factory()->NewRangeError( \ 1579 MessageTemplate::kStackOverflow)); \ 1580 return result_value; \ 1581 } \ 1582 } while (false) 1583 1584 // Support for temporarily postponing interrupts. When the outermost 1585 // postpone scope is left the interrupts will be re-enabled and any 1586 // interrupts that occurred while in the scope will be taken into 1587 // account. 1588 class PostponeInterruptsScope BASE_EMBEDDED { 1589 public: 1590 PostponeInterruptsScope(Isolate* isolate, 1591 int intercept_mask = StackGuard::ALL_INTERRUPTS) 1592 : stack_guard_(isolate->stack_guard()), 1593 intercept_mask_(intercept_mask), 1594 intercepted_flags_(0) { 1595 stack_guard_->PushPostponeInterruptsScope(this); 1596 } 1597 ~PostponeInterruptsScope()1598 ~PostponeInterruptsScope() { 1599 stack_guard_->PopPostponeInterruptsScope(); 1600 } 1601 1602 // Find the bottom-most scope that intercepts this interrupt. 1603 // Return whether the interrupt has been intercepted. 1604 bool Intercept(StackGuard::InterruptFlag flag); 1605 1606 private: 1607 StackGuard* stack_guard_; 1608 int intercept_mask_; 1609 int intercepted_flags_; 1610 PostponeInterruptsScope* prev_; 1611 1612 friend class StackGuard; 1613 }; 1614 1615 1616 class CodeTracer final : public Malloced { 1617 public: CodeTracer(int isolate_id)1618 explicit CodeTracer(int isolate_id) 1619 : file_(NULL), 1620 scope_depth_(0) { 1621 if (!ShouldRedirect()) { 1622 file_ = stdout; 1623 return; 1624 } 1625 1626 if (FLAG_redirect_code_traces_to == NULL) { 1627 SNPrintF(filename_, 1628 "code-%d-%d.asm", 1629 base::OS::GetCurrentProcessId(), 1630 isolate_id); 1631 } else { 1632 StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length()); 1633 } 1634 1635 WriteChars(filename_.start(), "", 0, false); 1636 } 1637 1638 class Scope { 1639 public: Scope(CodeTracer * tracer)1640 explicit Scope(CodeTracer* tracer) : tracer_(tracer) { tracer->OpenFile(); } ~Scope()1641 ~Scope() { tracer_->CloseFile(); } 1642 file()1643 FILE* file() const { return tracer_->file(); } 1644 1645 private: 1646 CodeTracer* tracer_; 1647 }; 1648 OpenFile()1649 void OpenFile() { 1650 if (!ShouldRedirect()) { 1651 return; 1652 } 1653 1654 if (file_ == NULL) { 1655 file_ = base::OS::FOpen(filename_.start(), "ab"); 1656 } 1657 1658 scope_depth_++; 1659 } 1660 CloseFile()1661 void CloseFile() { 1662 if (!ShouldRedirect()) { 1663 return; 1664 } 1665 1666 if (--scope_depth_ == 0) { 1667 fclose(file_); 1668 file_ = NULL; 1669 } 1670 } 1671 file()1672 FILE* file() const { return file_; } 1673 1674 private: ShouldRedirect()1675 static bool ShouldRedirect() { 1676 return FLAG_redirect_code_traces; 1677 } 1678 1679 EmbeddedVector<char, 128> filename_; 1680 FILE* file_; 1681 int scope_depth_; 1682 }; 1683 1684 } // namespace internal 1685 } // namespace v8 1686 1687 #endif // V8_ISOLATE_H_ 1688