1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_GLOBALS_H_
6 #define V8_GLOBALS_H_
7 
8 #include <stddef.h>
9 #include <stdint.h>
10 
11 #include <ostream>
12 
13 #include "src/base/build_config.h"
14 #include "src/base/logging.h"
15 #include "src/base/macros.h"
16 
17 // Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
18 // warning flag and certain versions of GCC due to a bug:
19 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
20 // For now, we use the more involved template-based version from <limits>, but
21 // only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
22 #if V8_CC_GNU && V8_GNUC_PREREQ(2, 96, 0) && !V8_GNUC_PREREQ(4, 1, 0)
23 # include <limits>  // NOLINT
24 # define V8_INFINITY std::numeric_limits<double>::infinity()
25 #elif V8_LIBC_MSVCRT
26 # define V8_INFINITY HUGE_VAL
27 #elif V8_OS_AIX
28 #define V8_INFINITY (__builtin_inff())
29 #else
30 # define V8_INFINITY INFINITY
31 #endif
32 
33 namespace v8 {
34 
35 namespace base {
36 class Mutex;
37 class RecursiveMutex;
38 class VirtualMemory;
39 }
40 
41 namespace internal {
42 
43 // Determine whether we are running in a simulated environment.
44 // Setting USE_SIMULATOR explicitly from the build script will force
45 // the use of a simulated environment.
46 #if !defined(USE_SIMULATOR)
47 #if (V8_TARGET_ARCH_ARM64 && !V8_HOST_ARCH_ARM64)
48 #define USE_SIMULATOR 1
49 #endif
50 #if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
51 #define USE_SIMULATOR 1
52 #endif
53 #if (V8_TARGET_ARCH_PPC && !V8_HOST_ARCH_PPC)
54 #define USE_SIMULATOR 1
55 #endif
56 #if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
57 #define USE_SIMULATOR 1
58 #endif
59 #if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64)
60 #define USE_SIMULATOR 1
61 #endif
62 #endif
63 
64 // Determine whether the architecture uses an embedded constant pool
65 // (contiguous constant pool embedded in code object).
66 #if V8_TARGET_ARCH_PPC
67 #define V8_EMBEDDED_CONSTANT_POOL 1
68 #else
69 #define V8_EMBEDDED_CONSTANT_POOL 0
70 #endif
71 
72 #ifdef V8_TARGET_ARCH_ARM
73 // Set stack limit lower for ARM than for other architectures because
74 // stack allocating MacroAssembler takes 120K bytes.
75 // See issue crbug.com/405338
76 #define V8_DEFAULT_STACK_SIZE_KB 864
77 #else
78 // Slightly less than 1MB, since Windows' default stack size for
79 // the main execution thread is 1MB for both 32 and 64-bit.
80 #define V8_DEFAULT_STACK_SIZE_KB 984
81 #endif
82 
83 
84 // Determine whether double field unboxing feature is enabled.
85 #if V8_TARGET_ARCH_64_BIT
86 #define V8_DOUBLE_FIELDS_UNBOXING 1
87 #else
88 #define V8_DOUBLE_FIELDS_UNBOXING 0
89 #endif
90 
91 
92 typedef uint8_t byte;
93 typedef byte* Address;
94 
95 // -----------------------------------------------------------------------------
96 // Constants
97 
98 const int KB = 1024;
99 const int MB = KB * KB;
100 const int GB = KB * KB * KB;
101 const int kMaxInt = 0x7FFFFFFF;
102 const int kMinInt = -kMaxInt - 1;
103 const int kMaxInt8 = (1 << 7) - 1;
104 const int kMinInt8 = -(1 << 7);
105 const int kMaxUInt8 = (1 << 8) - 1;
106 const int kMinUInt8 = 0;
107 const int kMaxInt16 = (1 << 15) - 1;
108 const int kMinInt16 = -(1 << 15);
109 const int kMaxUInt16 = (1 << 16) - 1;
110 const int kMinUInt16 = 0;
111 
112 const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
113 
114 const int kCharSize      = sizeof(char);      // NOLINT
115 const int kShortSize     = sizeof(short);     // NOLINT
116 const int kIntSize       = sizeof(int);       // NOLINT
117 const int kInt32Size     = sizeof(int32_t);   // NOLINT
118 const int kInt64Size     = sizeof(int64_t);   // NOLINT
119 const int kFloatSize     = sizeof(float);     // NOLINT
120 const int kDoubleSize    = sizeof(double);    // NOLINT
121 const int kIntptrSize    = sizeof(intptr_t);  // NOLINT
122 const int kPointerSize   = sizeof(void*);     // NOLINT
123 #if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
124 const int kRegisterSize  = kPointerSize + kPointerSize;
125 #else
126 const int kRegisterSize  = kPointerSize;
127 #endif
128 const int kPCOnStackSize = kRegisterSize;
129 const int kFPOnStackSize = kRegisterSize;
130 
131 const int kDoubleSizeLog2 = 3;
132 
133 #if V8_HOST_ARCH_64_BIT
134 const int kPointerSizeLog2 = 3;
135 const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
136 const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
137 const bool kRequiresCodeRange = true;
138 #if V8_TARGET_ARCH_MIPS64
139 // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
140 // encoded immediate, the addresses have to be in range of 256MB aligned
141 // region. Used only for large object space.
142 const size_t kMaximalCodeRangeSize = 256 * MB;
143 #else
144 const size_t kMaximalCodeRangeSize = 512 * MB;
145 #endif
146 #if V8_OS_WIN
147 const size_t kMinimumCodeRangeSize = 4 * MB;
148 const size_t kReservedCodeRangePages = 1;
149 #else
150 const size_t kMinimumCodeRangeSize = 3 * MB;
151 const size_t kReservedCodeRangePages = 0;
152 #endif
153 #else
154 const int kPointerSizeLog2 = 2;
155 const intptr_t kIntptrSignBit = 0x80000000;
156 const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
157 #if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
158 // x32 port also requires code range.
159 const bool kRequiresCodeRange = true;
160 const size_t kMaximalCodeRangeSize = 256 * MB;
161 const size_t kMinimumCodeRangeSize = 3 * MB;
162 const size_t kReservedCodeRangePages = 0;
163 #else
164 const bool kRequiresCodeRange = false;
165 const size_t kMaximalCodeRangeSize = 0 * MB;
166 const size_t kMinimumCodeRangeSize = 0 * MB;
167 const size_t kReservedCodeRangePages = 0;
168 #endif
169 #endif
170 
171 STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
172 
173 const int kBitsPerByte = 8;
174 const int kBitsPerByteLog2 = 3;
175 const int kBitsPerPointer = kPointerSize * kBitsPerByte;
176 const int kBitsPerInt = kIntSize * kBitsPerByte;
177 
178 // IEEE 754 single precision floating point number bit layout.
179 const uint32_t kBinary32SignMask = 0x80000000u;
180 const uint32_t kBinary32ExponentMask = 0x7f800000u;
181 const uint32_t kBinary32MantissaMask = 0x007fffffu;
182 const int kBinary32ExponentBias = 127;
183 const int kBinary32MaxExponent  = 0xFE;
184 const int kBinary32MinExponent  = 0x01;
185 const int kBinary32MantissaBits = 23;
186 const int kBinary32ExponentShift = 23;
187 
188 // Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
189 // other bits set.
190 const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
191 
192 // Latin1/UTF-16 constants
193 // Code-point values in Unicode 4.0 are 21 bits wide.
194 // Code units in UTF-16 are 16 bits wide.
195 typedef uint16_t uc16;
196 typedef int32_t uc32;
197 const int kOneByteSize    = kCharSize;
198 const int kUC16Size     = sizeof(uc16);      // NOLINT
199 
200 // 128 bit SIMD value size.
201 const int kSimd128Size = 16;
202 
203 // Round up n to be a multiple of sz, where sz is a power of 2.
204 #define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1))
205 
206 
207 // FUNCTION_ADDR(f) gets the address of a C function f.
208 #define FUNCTION_ADDR(f)                                        \
209   (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
210 
211 
212 // FUNCTION_CAST<F>(addr) casts an address into a function
213 // of type F. Used to invoke generated code from within C.
214 template <typename F>
FUNCTION_CAST(Address addr)215 F FUNCTION_CAST(Address addr) {
216   return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
217 }
218 
219 
220 // Determine whether the architecture uses function descriptors
221 // which provide a level of indirection between the function pointer
222 // and the function entrypoint.
223 #if V8_HOST_ARCH_PPC && \
224     (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
225 #define USES_FUNCTION_DESCRIPTORS 1
226 #define FUNCTION_ENTRYPOINT_ADDRESS(f)       \
227   (reinterpret_cast<v8::internal::Address*>( \
228       &(reinterpret_cast<intptr_t*>(f)[0])))
229 #else
230 #define USES_FUNCTION_DESCRIPTORS 0
231 #endif
232 
233 
234 // -----------------------------------------------------------------------------
235 // Forward declarations for frequently used classes
236 // (sorted alphabetically)
237 
238 class FreeStoreAllocationPolicy;
239 template <typename T, class P = FreeStoreAllocationPolicy> class List;
240 
241 // -----------------------------------------------------------------------------
242 // Declarations for use in both the preparser and the rest of V8.
243 
244 // The Strict Mode (ECMA-262 5th edition, 4.2.2).
245 
246 enum LanguageMode {
247   // LanguageMode is expressed as a bitmask. Descriptions of the bits:
248   STRICT_BIT = 1 << 0,
249   STRONG_BIT = 1 << 1,
250   LANGUAGE_END,
251 
252   // Shorthands for some common language modes.
253   SLOPPY = 0,
254   STRICT = STRICT_BIT,
255   STRONG = STRICT_BIT | STRONG_BIT
256 };
257 
258 
259 inline std::ostream& operator<<(std::ostream& os, const LanguageMode& mode) {
260   switch (mode) {
261     case SLOPPY:
262       return os << "sloppy";
263     case STRICT:
264       return os << "strict";
265     case STRONG:
266       return os << "strong";
267     default:
268       return os << "unknown";
269   }
270 }
271 
272 
is_sloppy(LanguageMode language_mode)273 inline bool is_sloppy(LanguageMode language_mode) {
274   return (language_mode & STRICT_BIT) == 0;
275 }
276 
277 
is_strict(LanguageMode language_mode)278 inline bool is_strict(LanguageMode language_mode) {
279   return language_mode & STRICT_BIT;
280 }
281 
282 
is_strong(LanguageMode language_mode)283 inline bool is_strong(LanguageMode language_mode) {
284   return language_mode & STRONG_BIT;
285 }
286 
287 
is_valid_language_mode(int language_mode)288 inline bool is_valid_language_mode(int language_mode) {
289   return language_mode == SLOPPY || language_mode == STRICT ||
290          language_mode == STRONG;
291 }
292 
293 
construct_language_mode(bool strict_bit,bool strong_bit)294 inline LanguageMode construct_language_mode(bool strict_bit, bool strong_bit) {
295   int language_mode = 0;
296   if (strict_bit) language_mode |= STRICT_BIT;
297   if (strong_bit) language_mode |= STRONG_BIT;
298   DCHECK(is_valid_language_mode(language_mode));
299   return static_cast<LanguageMode>(language_mode);
300 }
301 
302 
303 // Strong mode behaviour must sometimes be signalled by a two valued enum where
304 // caching is involved, to prevent sloppy and strict mode from being incorrectly
305 // differentiated.
306 enum class Strength : bool {
307   WEAK,   // sloppy, strict behaviour
308   STRONG  // strong behaviour
309 };
310 
311 
is_strong(Strength strength)312 inline bool is_strong(Strength strength) {
313   return strength == Strength::STRONG;
314 }
315 
316 
317 inline std::ostream& operator<<(std::ostream& os, const Strength& strength) {
318   return os << (is_strong(strength) ? "strong" : "weak");
319 }
320 
321 
strength(LanguageMode language_mode)322 inline Strength strength(LanguageMode language_mode) {
323   return is_strong(language_mode) ? Strength::STRONG : Strength::WEAK;
324 }
325 
326 
hash_value(Strength strength)327 inline size_t hash_value(Strength strength) {
328   return static_cast<size_t>(strength);
329 }
330 
331 
332 // Mask for the sign bit in a smi.
333 const intptr_t kSmiSignMask = kIntptrSignBit;
334 
335 const int kObjectAlignmentBits = kPointerSizeLog2;
336 const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
337 const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
338 
339 // Desired alignment for pointers.
340 const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
341 const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
342 
343 // Desired alignment for double values.
344 const intptr_t kDoubleAlignment = 8;
345 const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
346 
347 // Desired alignment for 128 bit SIMD values.
348 const intptr_t kSimd128Alignment = 16;
349 const intptr_t kSimd128AlignmentMask = kSimd128Alignment - 1;
350 
351 // Desired alignment for generated code is 32 bytes (to improve cache line
352 // utilization).
353 const int kCodeAlignmentBits = 5;
354 const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
355 const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
356 
357 // The owner field of a page is tagged with the page header tag. We need that
358 // to find out if a slot is part of a large object. If we mask out the lower
359 // 0xfffff bits (1M pages), go to the owner offset, and see that this field
360 // is tagged with the page header tag, we can just look up the owner.
361 // Otherwise, we know that we are somewhere (not within the first 1M) in a
362 // large object.
363 const int kPageHeaderTag = 3;
364 const int kPageHeaderTagSize = 2;
365 const intptr_t kPageHeaderTagMask = (1 << kPageHeaderTagSize) - 1;
366 
367 
368 // Zap-value: The value used for zapping dead objects.
369 // Should be a recognizable hex value tagged as a failure.
370 #ifdef V8_HOST_ARCH_64_BIT
371 const Address kZapValue =
372     reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
373 const Address kHandleZapValue =
374     reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
375 const Address kGlobalHandleZapValue =
376     reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf));
377 const Address kFromSpaceZapValue =
378     reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
379 const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
380 const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
381 const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
382 #else
383 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
384 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
385 const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf);
386 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
387 const uint32_t kSlotsZapValue = 0xbeefdeef;
388 const uint32_t kDebugZapValue = 0xbadbaddb;
389 const uint32_t kFreeListZapValue = 0xfeed1eaf;
390 #endif
391 
392 const int kCodeZapValue = 0xbadc0de;
393 const uint32_t kPhantomReferenceZap = 0xca11bac;
394 
395 // On Intel architecture, cache line size is 64 bytes.
396 // On ARM it may be less (32 bytes), but as far this constant is
397 // used for aligning data, it doesn't hurt to align on a greater value.
398 #define PROCESSOR_CACHE_LINE_SIZE 64
399 
400 // Constants relevant to double precision floating point numbers.
401 // If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
402 const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
403 
404 
405 // -----------------------------------------------------------------------------
406 // Forward declarations for frequently used classes
407 
408 class AccessorInfo;
409 class Allocation;
410 class Arguments;
411 class Assembler;
412 class Code;
413 class CodeGenerator;
414 class CodeStub;
415 class Context;
416 class Debug;
417 class DebugInfo;
418 class Descriptor;
419 class DescriptorArray;
420 class TransitionArray;
421 class ExternalReference;
422 class FixedArray;
423 class FunctionTemplateInfo;
424 class MemoryChunk;
425 class SeededNumberDictionary;
426 class UnseededNumberDictionary;
427 class NameDictionary;
428 class GlobalDictionary;
429 template <typename T> class MaybeHandle;
430 template <typename T> class Handle;
431 class Heap;
432 class HeapObject;
433 class IC;
434 class InterceptorInfo;
435 class Isolate;
436 class JSReceiver;
437 class JSArray;
438 class JSFunction;
439 class JSObject;
440 class LargeObjectSpace;
441 class MacroAssembler;
442 class Map;
443 class MapSpace;
444 class MarkCompactCollector;
445 class NewSpace;
446 class Object;
447 class OldSpace;
448 class ParameterCount;
449 class Foreign;
450 class Scope;
451 class ScopeInfo;
452 class Script;
453 class Smi;
454 template <typename Config, class Allocator = FreeStoreAllocationPolicy>
455 class SplayTree;
456 class String;
457 class Symbol;
458 class Name;
459 class Struct;
460 class TypeFeedbackVector;
461 class Variable;
462 class RelocInfo;
463 class Deserializer;
464 class MessageLocation;
465 
466 typedef bool (*WeakSlotCallback)(Object** pointer);
467 
468 typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
469 
470 // -----------------------------------------------------------------------------
471 // Miscellaneous
472 
473 // NOTE: SpaceIterator depends on AllocationSpace enumeration values being
474 // consecutive.
475 // Keep this enum in sync with the ObjectSpace enum in v8.h
476 enum AllocationSpace {
477   NEW_SPACE,   // Semispaces collected with copying collector.
478   OLD_SPACE,   // May contain pointers to new space.
479   CODE_SPACE,  // No pointers to new space, marked executable.
480   MAP_SPACE,   // Only and all map objects.
481   LO_SPACE,    // Promoted large objects.
482 
483   FIRST_SPACE = NEW_SPACE,
484   LAST_SPACE = LO_SPACE,
485   FIRST_PAGED_SPACE = OLD_SPACE,
486   LAST_PAGED_SPACE = MAP_SPACE
487 };
488 const int kSpaceTagSize = 3;
489 const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
490 
491 enum AllocationAlignment {
492   kWordAligned,
493   kDoubleAligned,
494   kDoubleUnaligned,
495   kSimd128Unaligned
496 };
497 
498 // A flag that indicates whether objects should be pretenured when
499 // allocated (allocated directly into the old generation) or not
500 // (allocated in the young generation if the object size and type
501 // allows).
502 enum PretenureFlag { NOT_TENURED, TENURED };
503 
504 inline std::ostream& operator<<(std::ostream& os, const PretenureFlag& flag) {
505   switch (flag) {
506     case NOT_TENURED:
507       return os << "NotTenured";
508     case TENURED:
509       return os << "Tenured";
510   }
511   UNREACHABLE();
512   return os;
513 }
514 
515 enum MinimumCapacity {
516   USE_DEFAULT_MINIMUM_CAPACITY,
517   USE_CUSTOM_MINIMUM_CAPACITY
518 };
519 
520 enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
521 
522 enum Executability { NOT_EXECUTABLE, EXECUTABLE };
523 
524 enum VisitMode {
525   VISIT_ALL,
526   VISIT_ALL_IN_SCAVENGE,
527   VISIT_ALL_IN_SWEEP_NEWSPACE,
528   VISIT_ONLY_STRONG
529 };
530 
531 // Flag indicating whether code is built into the VM (one of the natives files).
532 enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
533 
534 // JavaScript defines two kinds of 'nil'.
535 enum NilValue { kNullValue, kUndefinedValue };
536 
537 // ParseRestriction is used to restrict the set of valid statements in a
538 // unit of compilation.  Restriction violations cause a syntax error.
539 enum ParseRestriction {
540   NO_PARSE_RESTRICTION,         // All expressions are allowed.
541   ONLY_SINGLE_FUNCTION_LITERAL  // Only a single FunctionLiteral expression.
542 };
543 
544 // A CodeDesc describes a buffer holding instructions and relocation
545 // information. The instructions start at the beginning of the buffer
546 // and grow forward, the relocation information starts at the end of
547 // the buffer and grows backward.  A constant pool may exist at the
548 // end of the instructions.
549 //
550 //  |<--------------- buffer_size ----------------------------------->|
551 //  |<------------- instr_size ---------->|        |<-- reloc_size -->|
552 //  |               |<- const_pool_size ->|                           |
553 //  +=====================================+========+==================+
554 //  |  instructions |        data         |  free  |    reloc info    |
555 //  +=====================================+========+==================+
556 //  ^
557 //  |
558 //  buffer
559 
560 struct CodeDesc {
561   byte* buffer;
562   int buffer_size;
563   int instr_size;
564   int reloc_size;
565   int constant_pool_size;
566   Assembler* origin;
567 };
568 
569 
570 // Callback function used for checking constraints when copying/relocating
571 // objects. Returns true if an object can be copied/relocated from its
572 // old_addr to a new_addr.
573 typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
574 
575 
576 // Callback function on inline caches, used for iterating over inline caches
577 // in compiled code.
578 typedef void (*InlineCacheCallback)(Code* code, Address ic);
579 
580 
581 // State for inline cache call sites. Aliased as IC::State.
582 enum InlineCacheState {
583   // Has never been executed.
584   UNINITIALIZED,
585   // Has been executed but monomorhic state has been delayed.
586   PREMONOMORPHIC,
587   // Has been executed and only one receiver type has been seen.
588   MONOMORPHIC,
589   // Check failed due to prototype (or map deprecation).
590   PROTOTYPE_FAILURE,
591   // Multiple receiver types have been seen.
592   POLYMORPHIC,
593   // Many receiver types have been seen.
594   MEGAMORPHIC,
595   // A generic handler is installed and no extra typefeedback is recorded.
596   GENERIC,
597   // Special state for debug break or step in prepare stubs.
598   DEBUG_STUB
599 };
600 
601 
602 enum CacheHolderFlag {
603   kCacheOnPrototype,
604   kCacheOnPrototypeReceiverIsDictionary,
605   kCacheOnPrototypeReceiverIsPrimitive,
606   kCacheOnReceiver
607 };
608 
609 
610 // The Store Buffer (GC).
611 typedef enum {
612   kStoreBufferFullEvent,
613   kStoreBufferStartScanningPagesEvent,
614   kStoreBufferScanningPageEvent
615 } StoreBufferEvent;
616 
617 
618 typedef void (*StoreBufferCallback)(Heap* heap,
619                                     MemoryChunk* page,
620                                     StoreBufferEvent event);
621 
622 
623 // Union used for fast testing of specific double values.
624 union DoubleRepresentation {
625   double  value;
626   int64_t bits;
DoubleRepresentation(double x)627   DoubleRepresentation(double x) { value = x; }
628   bool operator==(const DoubleRepresentation& other) const {
629     return bits == other.bits;
630   }
631 };
632 
633 
634 // Union used for customized checking of the IEEE double types
635 // inlined within v8 runtime, rather than going to the underlying
636 // platform headers and libraries
637 union IeeeDoubleLittleEndianArchType {
638   double d;
639   struct {
640     unsigned int man_low  :32;
641     unsigned int man_high :20;
642     unsigned int exp      :11;
643     unsigned int sign     :1;
644   } bits;
645 };
646 
647 
648 union IeeeDoubleBigEndianArchType {
649   double d;
650   struct {
651     unsigned int sign     :1;
652     unsigned int exp      :11;
653     unsigned int man_high :20;
654     unsigned int man_low  :32;
655   } bits;
656 };
657 
658 
659 // AccessorCallback
660 struct AccessorDescriptor {
661   Object* (*getter)(Isolate* isolate, Object* object, void* data);
662   Object* (*setter)(
663       Isolate* isolate, JSObject* object, Object* value, void* data);
664   void* data;
665 };
666 
667 
668 // -----------------------------------------------------------------------------
669 // Macros
670 
671 // Testers for test.
672 
673 #define HAS_SMI_TAG(value) \
674   ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
675 
676 // OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
677 #define OBJECT_POINTER_ALIGN(value)                             \
678   (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
679 
680 // POINTER_SIZE_ALIGN returns the value aligned as a pointer.
681 #define POINTER_SIZE_ALIGN(value)                               \
682   (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
683 
684 // CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
685 #define CODE_POINTER_ALIGN(value)                               \
686   (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
687 
688 // DOUBLE_POINTER_ALIGN returns the value algined for double pointers.
689 #define DOUBLE_POINTER_ALIGN(value) \
690   (((value) + kDoubleAlignmentMask) & ~kDoubleAlignmentMask)
691 
692 
693 // CPU feature flags.
694 enum CpuFeature {
695   // x86
696   SSE4_1,
697   SSE3,
698   SAHF,
699   AVX,
700   FMA3,
701   BMI1,
702   BMI2,
703   LZCNT,
704   POPCNT,
705   ATOM,
706   // ARM
707   VFP3,
708   ARMv7,
709   ARMv8,
710   SUDIV,
711   MLS,
712   UNALIGNED_ACCESSES,
713   MOVW_MOVT_IMMEDIATE_LOADS,
714   VFP32DREGS,
715   NEON,
716   // MIPS, MIPS64
717   FPU,
718   FP64FPU,
719   MIPSr1,
720   MIPSr2,
721   MIPSr6,
722   // ARM64
723   ALWAYS_ALIGN_CSP,
724   COHERENT_CACHE,
725   // PPC
726   FPR_GPR_MOV,
727   LWSYNC,
728   ISELECT,
729   NUMBER_OF_CPU_FEATURES
730 };
731 
732 
733 // Defines hints about receiver values based on structural knowledge.
734 enum class ConvertReceiverMode : unsigned {
735   kNullOrUndefined,     // Guaranteed to be null or undefined.
736   kNotNullOrUndefined,  // Guaranteed to never be null or undefined.
737   kAny                  // No specific knowledge about receiver.
738 };
739 
hash_value(ConvertReceiverMode mode)740 inline size_t hash_value(ConvertReceiverMode mode) {
741   return bit_cast<unsigned>(mode);
742 }
743 
744 inline std::ostream& operator<<(std::ostream& os, ConvertReceiverMode mode) {
745   switch (mode) {
746     case ConvertReceiverMode::kNullOrUndefined:
747       return os << "NULL_OR_UNDEFINED";
748     case ConvertReceiverMode::kNotNullOrUndefined:
749       return os << "NOT_NULL_OR_UNDEFINED";
750     case ConvertReceiverMode::kAny:
751       return os << "ANY";
752   }
753   UNREACHABLE();
754   return os;
755 }
756 
757 
758 // Used to specify if a macro instruction must perform a smi check on tagged
759 // values.
760 enum SmiCheckType {
761   DONT_DO_SMI_CHECK,
762   DO_SMI_CHECK
763 };
764 
765 
766 enum ScopeType {
767   EVAL_SCOPE,      // The top-level scope for an eval source.
768   FUNCTION_SCOPE,  // The top-level scope for a function.
769   MODULE_SCOPE,    // The scope introduced by a module literal
770   SCRIPT_SCOPE,    // The top-level scope for a script or a top-level eval.
771   CATCH_SCOPE,     // The scope introduced by catch.
772   BLOCK_SCOPE,     // The scope introduced by a new block.
773   WITH_SCOPE       // The scope introduced by with.
774 };
775 
776 // The mips architecture prior to revision 5 has inverted encoding for sNaN.
777 #if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6)) || \
778     (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6))
779 const uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
780 const uint32_t kHoleNanLower32 = 0xFFFF7FFF;
781 #else
782 const uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
783 const uint32_t kHoleNanLower32 = 0xFFF7FFFF;
784 #endif
785 
786 const uint64_t kHoleNanInt64 =
787     (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
788 
789 
790 // ES6 section 20.1.2.6 Number.MAX_SAFE_INTEGER
791 const double kMaxSafeInteger = 9007199254740991.0;  // 2^53-1
792 
793 
794 // The order of this enum has to be kept in sync with the predicates below.
795 enum VariableMode {
796   // User declared variables:
797   VAR,             // declared via 'var', and 'function' declarations
798 
799   CONST_LEGACY,    // declared via legacy 'const' declarations
800 
801   LET,             // declared via 'let' declarations (first lexical)
802 
803   CONST,           // declared via 'const' declarations
804 
805   IMPORT,          // declared via 'import' declarations (last lexical)
806 
807   // Variables introduced by the compiler:
808   TEMPORARY,       // temporary variables (not user-visible), stack-allocated
809                    // unless the scope as a whole has forced context allocation
810 
811   DYNAMIC,         // always require dynamic lookup (we don't know
812                    // the declaration)
813 
814   DYNAMIC_GLOBAL,  // requires dynamic lookup, but we know that the
815                    // variable is global unless it has been shadowed
816                    // by an eval-introduced variable
817 
818   DYNAMIC_LOCAL    // requires dynamic lookup, but we know that the
819                    // variable is local and where it is unless it
820                    // has been shadowed by an eval-introduced
821                    // variable
822 };
823 
824 
IsDynamicVariableMode(VariableMode mode)825 inline bool IsDynamicVariableMode(VariableMode mode) {
826   return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL;
827 }
828 
829 
IsDeclaredVariableMode(VariableMode mode)830 inline bool IsDeclaredVariableMode(VariableMode mode) {
831   return mode >= VAR && mode <= IMPORT;
832 }
833 
834 
IsLexicalVariableMode(VariableMode mode)835 inline bool IsLexicalVariableMode(VariableMode mode) {
836   return mode >= LET && mode <= IMPORT;
837 }
838 
839 
IsImmutableVariableMode(VariableMode mode)840 inline bool IsImmutableVariableMode(VariableMode mode) {
841   return mode == CONST || mode == CONST_LEGACY || mode == IMPORT;
842 }
843 
844 
845 enum class VariableLocation {
846   // Before and during variable allocation, a variable whose location is
847   // not yet determined.  After allocation, a variable looked up as a
848   // property on the global object (and possibly absent).  name() is the
849   // variable name, index() is invalid.
850   UNALLOCATED,
851 
852   // A slot in the parameter section on the stack.  index() is the
853   // parameter index, counting left-to-right.  The receiver is index -1;
854   // the first parameter is index 0.
855   PARAMETER,
856 
857   // A slot in the local section on the stack.  index() is the variable
858   // index in the stack frame, starting at 0.
859   LOCAL,
860 
861   // An indexed slot in a heap context.  index() is the variable index in
862   // the context object on the heap, starting at 0.  scope() is the
863   // corresponding scope.
864   CONTEXT,
865 
866   // An indexed slot in a script context that contains a respective global
867   // property cell.  name() is the variable name, index() is the variable
868   // index in the context object on the heap, starting at 0.  scope() is the
869   // corresponding script scope.
870   GLOBAL,
871 
872   // A named slot in a heap context.  name() is the variable name in the
873   // context object on the heap, with lookup starting at the current
874   // context.  index() is invalid.
875   LOOKUP
876 };
877 
878 
879 // ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
880 // and immutable bindings that can be in two states: initialized and
881 // uninitialized. In ES5 only immutable bindings have these two states. When
882 // accessing a binding, it needs to be checked for initialization. However in
883 // the following cases the binding is initialized immediately after creation
884 // so the initialization check can always be skipped:
885 // 1. Var declared local variables.
886 //      var foo;
887 // 2. A local variable introduced by a function declaration.
888 //      function foo() {}
889 // 3. Parameters
890 //      function x(foo) {}
891 // 4. Catch bound variables.
892 //      try {} catch (foo) {}
893 // 6. Function variables of named function expressions.
894 //      var x = function foo() {}
895 // 7. Implicit binding of 'this'.
896 // 8. Implicit binding of 'arguments' in functions.
897 //
898 // ES5 specified object environment records which are introduced by ES elements
899 // such as Program and WithStatement that associate identifier bindings with the
900 // properties of some object. In the specification only mutable bindings exist
901 // (which may be non-writable) and have no distinct initialization step. However
902 // V8 allows const declarations in global code with distinct creation and
903 // initialization steps which are represented by non-writable properties in the
904 // global object. As a result also these bindings need to be checked for
905 // initialization.
906 //
907 // The following enum specifies a flag that indicates if the binding needs a
908 // distinct initialization step (kNeedsInitialization) or if the binding is
909 // immediately initialized upon creation (kCreatedInitialized).
910 enum InitializationFlag {
911   kNeedsInitialization,
912   kCreatedInitialized
913 };
914 
915 
916 enum MaybeAssignedFlag { kNotAssigned, kMaybeAssigned };
917 
918 
919 // Serialized in PreparseData, so numeric values should not be changed.
920 enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
921 
922 
923 enum ClearExceptionFlag {
924   KEEP_EXCEPTION,
925   CLEAR_EXCEPTION
926 };
927 
928 
929 enum MinusZeroMode {
930   TREAT_MINUS_ZERO_AS_ZERO,
931   FAIL_ON_MINUS_ZERO
932 };
933 
934 
935 enum Signedness { kSigned, kUnsigned };
936 
937 
938 enum FunctionKind {
939   kNormalFunction = 0,
940   kArrowFunction = 1 << 0,
941   kGeneratorFunction = 1 << 1,
942   kConciseMethod = 1 << 2,
943   kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
944   kAccessorFunction = 1 << 3,
945   kDefaultConstructor = 1 << 4,
946   kSubclassConstructor = 1 << 5,
947   kBaseConstructor = 1 << 6,
948   kInObjectLiteral = 1 << 7,
949   kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
950   kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
951   kClassConstructor =
952       kBaseConstructor | kSubclassConstructor | kDefaultConstructor,
953   kConciseMethodInObjectLiteral = kConciseMethod | kInObjectLiteral,
954   kConciseGeneratorMethodInObjectLiteral =
955       kConciseGeneratorMethod | kInObjectLiteral,
956   kAccessorFunctionInObjectLiteral = kAccessorFunction | kInObjectLiteral,
957 };
958 
959 
IsValidFunctionKind(FunctionKind kind)960 inline bool IsValidFunctionKind(FunctionKind kind) {
961   return kind == FunctionKind::kNormalFunction ||
962          kind == FunctionKind::kArrowFunction ||
963          kind == FunctionKind::kGeneratorFunction ||
964          kind == FunctionKind::kConciseMethod ||
965          kind == FunctionKind::kConciseGeneratorMethod ||
966          kind == FunctionKind::kAccessorFunction ||
967          kind == FunctionKind::kDefaultBaseConstructor ||
968          kind == FunctionKind::kDefaultSubclassConstructor ||
969          kind == FunctionKind::kBaseConstructor ||
970          kind == FunctionKind::kSubclassConstructor ||
971          kind == FunctionKind::kConciseMethodInObjectLiteral ||
972          kind == FunctionKind::kConciseGeneratorMethodInObjectLiteral ||
973          kind == FunctionKind::kAccessorFunctionInObjectLiteral;
974 }
975 
976 
IsArrowFunction(FunctionKind kind)977 inline bool IsArrowFunction(FunctionKind kind) {
978   DCHECK(IsValidFunctionKind(kind));
979   return kind & FunctionKind::kArrowFunction;
980 }
981 
982 
IsGeneratorFunction(FunctionKind kind)983 inline bool IsGeneratorFunction(FunctionKind kind) {
984   DCHECK(IsValidFunctionKind(kind));
985   return kind & FunctionKind::kGeneratorFunction;
986 }
987 
988 
IsConciseMethod(FunctionKind kind)989 inline bool IsConciseMethod(FunctionKind kind) {
990   DCHECK(IsValidFunctionKind(kind));
991   return kind & FunctionKind::kConciseMethod;
992 }
993 
994 
IsAccessorFunction(FunctionKind kind)995 inline bool IsAccessorFunction(FunctionKind kind) {
996   DCHECK(IsValidFunctionKind(kind));
997   return kind & FunctionKind::kAccessorFunction;
998 }
999 
1000 
IsDefaultConstructor(FunctionKind kind)1001 inline bool IsDefaultConstructor(FunctionKind kind) {
1002   DCHECK(IsValidFunctionKind(kind));
1003   return kind & FunctionKind::kDefaultConstructor;
1004 }
1005 
1006 
IsBaseConstructor(FunctionKind kind)1007 inline bool IsBaseConstructor(FunctionKind kind) {
1008   DCHECK(IsValidFunctionKind(kind));
1009   return kind & FunctionKind::kBaseConstructor;
1010 }
1011 
1012 
IsSubclassConstructor(FunctionKind kind)1013 inline bool IsSubclassConstructor(FunctionKind kind) {
1014   DCHECK(IsValidFunctionKind(kind));
1015   return kind & FunctionKind::kSubclassConstructor;
1016 }
1017 
1018 
IsClassConstructor(FunctionKind kind)1019 inline bool IsClassConstructor(FunctionKind kind) {
1020   DCHECK(IsValidFunctionKind(kind));
1021   return kind & FunctionKind::kClassConstructor;
1022 }
1023 
1024 
IsConstructable(FunctionKind kind,LanguageMode mode)1025 inline bool IsConstructable(FunctionKind kind, LanguageMode mode) {
1026   if (IsAccessorFunction(kind)) return false;
1027   if (IsConciseMethod(kind) && !IsGeneratorFunction(kind)) return false;
1028   if (IsArrowFunction(kind)) return false;
1029   if (is_strong(mode)) return IsClassConstructor(kind);
1030   return true;
1031 }
1032 
1033 
IsInObjectLiteral(FunctionKind kind)1034 inline bool IsInObjectLiteral(FunctionKind kind) {
1035   DCHECK(IsValidFunctionKind(kind));
1036   return kind & FunctionKind::kInObjectLiteral;
1037 }
1038 
1039 
WithObjectLiteralBit(FunctionKind kind)1040 inline FunctionKind WithObjectLiteralBit(FunctionKind kind) {
1041   kind = static_cast<FunctionKind>(kind | FunctionKind::kInObjectLiteral);
1042   DCHECK(IsValidFunctionKind(kind));
1043   return kind;
1044 }
1045 }  // namespace internal
1046 }  // namespace v8
1047 
1048 namespace i = v8::internal;
1049 
1050 #endif  // V8_GLOBALS_H_
1051