1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 #include "src/assembler.h"
36 
37 #include <cmath>
38 #include "src/api.h"
39 #include "src/base/cpu.h"
40 #include "src/base/functional.h"
41 #include "src/base/lazy-instance.h"
42 #include "src/base/platform/platform.h"
43 #include "src/base/utils/random-number-generator.h"
44 #include "src/builtins.h"
45 #include "src/codegen.h"
46 #include "src/counters.h"
47 #include "src/debug/debug.h"
48 #include "src/deoptimizer.h"
49 #include "src/disassembler.h"
50 #include "src/execution.h"
51 #include "src/ic/ic.h"
52 #include "src/ic/stub-cache.h"
53 #include "src/ostreams.h"
54 #include "src/parsing/token.h"
55 #include "src/profiler/cpu-profiler.h"
56 #include "src/regexp/jsregexp.h"
57 #include "src/regexp/regexp-macro-assembler.h"
58 #include "src/regexp/regexp-stack.h"
59 #include "src/register-configuration.h"
60 #include "src/runtime/runtime.h"
61 #include "src/simulator.h"  // For flushing instruction cache.
62 #include "src/snapshot/serialize.h"
63 
64 #if V8_TARGET_ARCH_IA32
65 #include "src/ia32/assembler-ia32-inl.h"  // NOLINT
66 #elif V8_TARGET_ARCH_X64
67 #include "src/x64/assembler-x64-inl.h"  // NOLINT
68 #elif V8_TARGET_ARCH_ARM64
69 #include "src/arm64/assembler-arm64-inl.h"  // NOLINT
70 #elif V8_TARGET_ARCH_ARM
71 #include "src/arm/assembler-arm-inl.h"  // NOLINT
72 #elif V8_TARGET_ARCH_PPC
73 #include "src/ppc/assembler-ppc-inl.h"  // NOLINT
74 #elif V8_TARGET_ARCH_MIPS
75 #include "src/mips/assembler-mips-inl.h"  // NOLINT
76 #elif V8_TARGET_ARCH_MIPS64
77 #include "src/mips64/assembler-mips64-inl.h"  // NOLINT
78 #elif V8_TARGET_ARCH_X87
79 #include "src/x87/assembler-x87-inl.h"  // NOLINT
80 #else
81 #error "Unknown architecture."
82 #endif
83 
84 // Include native regexp-macro-assembler.
85 #ifndef V8_INTERPRETED_REGEXP
86 #if V8_TARGET_ARCH_IA32
87 #include "src/regexp/ia32/regexp-macro-assembler-ia32.h"  // NOLINT
88 #elif V8_TARGET_ARCH_X64
89 #include "src/regexp/x64/regexp-macro-assembler-x64.h"  // NOLINT
90 #elif V8_TARGET_ARCH_ARM64
91 #include "src/regexp/arm64/regexp-macro-assembler-arm64.h"  // NOLINT
92 #elif V8_TARGET_ARCH_ARM
93 #include "src/regexp/arm/regexp-macro-assembler-arm.h"  // NOLINT
94 #elif V8_TARGET_ARCH_PPC
95 #include "src/regexp/ppc/regexp-macro-assembler-ppc.h"  // NOLINT
96 #elif V8_TARGET_ARCH_MIPS
97 #include "src/regexp/mips/regexp-macro-assembler-mips.h"  // NOLINT
98 #elif V8_TARGET_ARCH_MIPS64
99 #include "src/regexp/mips64/regexp-macro-assembler-mips64.h"  // NOLINT
100 #elif V8_TARGET_ARCH_X87
101 #include "src/regexp/x87/regexp-macro-assembler-x87.h"  // NOLINT
102 #else  // Unknown architecture.
103 #error "Unknown architecture."
104 #endif  // Target architecture.
105 #endif  // V8_INTERPRETED_REGEXP
106 
107 namespace v8 {
108 namespace internal {
109 
110 // -----------------------------------------------------------------------------
111 // Common register code.
112 
ToString()113 const char* Register::ToString() {
114   // This is the mapping of allocation indices to registers.
115   DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
116   return RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
117       ->GetGeneralRegisterName(reg_code);
118 }
119 
120 
IsAllocatable() const121 bool Register::IsAllocatable() const {
122   return ((1 << reg_code) &
123           RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
124               ->allocatable_general_codes_mask()) != 0;
125 }
126 
127 
ToString()128 const char* DoubleRegister::ToString() {
129   // This is the mapping of allocation indices to registers.
130   DCHECK(reg_code >= 0 && reg_code < kMaxNumRegisters);
131   return RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
132       ->GetDoubleRegisterName(reg_code);
133 }
134 
135 
IsAllocatable() const136 bool DoubleRegister::IsAllocatable() const {
137   return ((1 << reg_code) &
138           RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
139               ->allocatable_double_codes_mask()) != 0;
140 }
141 
142 
143 // -----------------------------------------------------------------------------
144 // Common double constants.
145 
146 struct DoubleConstant BASE_EMBEDDED {
147 double min_int;
148 double one_half;
149 double minus_one_half;
150 double negative_infinity;
151 double the_hole_nan;
152 double uint32_bias;
153 };
154 
155 static DoubleConstant double_constants;
156 
157 const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
158 
159 static bool math_exp_data_initialized = false;
160 static base::Mutex* math_exp_data_mutex = NULL;
161 static double* math_exp_constants_array = NULL;
162 static double* math_exp_log_table_array = NULL;
163 
164 // -----------------------------------------------------------------------------
165 // Implementation of AssemblerBase
166 
AssemblerBase(Isolate * isolate,void * buffer,int buffer_size)167 AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
168     : isolate_(isolate),
169       jit_cookie_(0),
170       enabled_cpu_features_(0),
171       emit_debug_code_(FLAG_debug_code),
172       predictable_code_size_(false),
173       // We may use the assembler without an isolate.
174       serializer_enabled_(isolate && isolate->serializer_enabled()),
175       constant_pool_available_(false) {
176   DCHECK_NOT_NULL(isolate);
177   if (FLAG_mask_constants_with_cookie) {
178     jit_cookie_ = isolate->random_number_generator()->NextInt();
179   }
180   own_buffer_ = buffer == NULL;
181   if (buffer_size == 0) buffer_size = kMinimalBufferSize;
182   DCHECK(buffer_size > 0);
183   if (own_buffer_) buffer = NewArray<byte>(buffer_size);
184   buffer_ = static_cast<byte*>(buffer);
185   buffer_size_ = buffer_size;
186 
187   pc_ = buffer_;
188 }
189 
190 
~AssemblerBase()191 AssemblerBase::~AssemblerBase() {
192   if (own_buffer_) DeleteArray(buffer_);
193 }
194 
195 
FlushICache(Isolate * isolate,void * start,size_t size)196 void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
197   if (size == 0) return;
198   if (CpuFeatures::IsSupported(COHERENT_CACHE)) return;
199 
200 #if defined(USE_SIMULATOR)
201   Simulator::FlushICache(isolate->simulator_i_cache(), start, size);
202 #else
203   CpuFeatures::FlushICache(start, size);
204 #endif  // USE_SIMULATOR
205 }
206 
207 
Print()208 void AssemblerBase::Print() {
209   OFStream os(stdout);
210   v8::internal::Disassembler::Decode(isolate(), &os, buffer_, pc_, nullptr);
211 }
212 
213 
214 // -----------------------------------------------------------------------------
215 // Implementation of PredictableCodeSizeScope
216 
PredictableCodeSizeScope(AssemblerBase * assembler)217 PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler)
218     : PredictableCodeSizeScope(assembler, -1) {}
219 
220 
PredictableCodeSizeScope(AssemblerBase * assembler,int expected_size)221 PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
222                                                    int expected_size)
223     : assembler_(assembler),
224       expected_size_(expected_size),
225       start_offset_(assembler->pc_offset()),
226       old_value_(assembler->predictable_code_size()) {
227   assembler_->set_predictable_code_size(true);
228 }
229 
230 
~PredictableCodeSizeScope()231 PredictableCodeSizeScope::~PredictableCodeSizeScope() {
232   // TODO(svenpanne) Remove the 'if' when everything works.
233   if (expected_size_ >= 0) {
234     CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
235   }
236   assembler_->set_predictable_code_size(old_value_);
237 }
238 
239 
240 // -----------------------------------------------------------------------------
241 // Implementation of CpuFeatureScope
242 
243 #ifdef DEBUG
CpuFeatureScope(AssemblerBase * assembler,CpuFeature f)244 CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
245     : assembler_(assembler) {
246   DCHECK(CpuFeatures::IsSupported(f));
247   old_enabled_ = assembler_->enabled_cpu_features();
248   uint64_t mask = static_cast<uint64_t>(1) << f;
249   // TODO(svenpanne) This special case below doesn't belong here!
250 #if V8_TARGET_ARCH_ARM
251   // ARMv7 is implied by VFP3.
252   if (f == VFP3) {
253     mask |= static_cast<uint64_t>(1) << ARMv7;
254   }
255 #endif
256   assembler_->set_enabled_cpu_features(old_enabled_ | mask);
257 }
258 
259 
~CpuFeatureScope()260 CpuFeatureScope::~CpuFeatureScope() {
261   assembler_->set_enabled_cpu_features(old_enabled_);
262 }
263 #endif
264 
265 
266 bool CpuFeatures::initialized_ = false;
267 unsigned CpuFeatures::supported_ = 0;
268 unsigned CpuFeatures::cache_line_size_ = 0;
269 
270 
271 // -----------------------------------------------------------------------------
272 // Implementation of Label
273 
pos() const274 int Label::pos() const {
275   if (pos_ < 0) return -pos_ - 1;
276   if (pos_ > 0) return  pos_ - 1;
277   UNREACHABLE();
278   return 0;
279 }
280 
281 
282 // -----------------------------------------------------------------------------
283 // Implementation of RelocInfoWriter and RelocIterator
284 //
285 // Relocation information is written backwards in memory, from high addresses
286 // towards low addresses, byte by byte.  Therefore, in the encodings listed
287 // below, the first byte listed it at the highest address, and successive
288 // bytes in the record are at progressively lower addresses.
289 //
290 // Encoding
291 //
292 // The most common modes are given single-byte encodings.  Also, it is
293 // easy to identify the type of reloc info and skip unwanted modes in
294 // an iteration.
295 //
296 // The encoding relies on the fact that there are fewer than 14
297 // different relocation modes using standard non-compact encoding.
298 //
299 // The first byte of a relocation record has a tag in its low 2 bits:
300 // Here are the record schemes, depending on the low tag and optional higher
301 // tags.
302 //
303 // Low tag:
304 //   00: embedded_object:      [6-bit pc delta] 00
305 //
306 //   01: code_target:          [6-bit pc delta] 01
307 //
308 //   10: short_data_record:    [6-bit pc delta] 10 followed by
309 //                             [6-bit data delta] [2-bit data type tag]
310 //
311 //   11: long_record           [6 bit reloc mode] 11
312 //                             followed by pc delta
313 //                             followed by optional data depending on type.
314 //
315 //  2-bit data type tags, used in short_data_record and data_jump long_record:
316 //   code_target_with_id: 00
317 //   position:            01
318 //   statement_position:  10
319 //   deopt_reason:        11
320 //
321 //  If a pc delta exceeds 6 bits, it is split into a remainder that fits into
322 //  6 bits and a part that does not. The latter is encoded as a long record
323 //  with PC_JUMP as pseudo reloc info mode. The former is encoded as part of
324 //  the following record in the usual way. The long pc jump record has variable
325 //  length:
326 //               pc-jump:        [PC_JUMP] 11
327 //                               [7 bits data] 0
328 //                                  ...
329 //                               [7 bits data] 1
330 //               (Bits 6..31 of pc delta, with leading zeroes
331 //                dropped, and last non-zero chunk tagged with 1.)
332 
333 const int kTagBits = 2;
334 const int kTagMask = (1 << kTagBits) - 1;
335 const int kLongTagBits = 6;
336 const int kShortDataTypeTagBits = 2;
337 const int kShortDataBits = kBitsPerByte - kShortDataTypeTagBits;
338 
339 const int kEmbeddedObjectTag = 0;
340 const int kCodeTargetTag = 1;
341 const int kLocatableTag = 2;
342 const int kDefaultTag = 3;
343 
344 const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
345 const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
346 const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask;
347 
348 const int kChunkBits = 7;
349 const int kChunkMask = (1 << kChunkBits) - 1;
350 const int kLastChunkTagBits = 1;
351 const int kLastChunkTagMask = 1;
352 const int kLastChunkTag = 1;
353 
354 const int kCodeWithIdTag = 0;
355 const int kNonstatementPositionTag = 1;
356 const int kStatementPositionTag = 2;
357 const int kDeoptReasonTag = 3;
358 
359 
WriteLongPCJump(uint32_t pc_delta)360 uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
361   // Return if the pc_delta can fit in kSmallPCDeltaBits bits.
362   // Otherwise write a variable length PC jump for the bits that do
363   // not fit in the kSmallPCDeltaBits bits.
364   if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
365   WriteMode(RelocInfo::PC_JUMP);
366   uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
367   DCHECK(pc_jump > 0);
368   // Write kChunkBits size chunks of the pc_jump.
369   for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
370     byte b = pc_jump & kChunkMask;
371     *--pos_ = b << kLastChunkTagBits;
372   }
373   // Tag the last chunk so it can be identified.
374   *pos_ = *pos_ | kLastChunkTag;
375   // Return the remaining kSmallPCDeltaBits of the pc_delta.
376   return pc_delta & kSmallPCDeltaMask;
377 }
378 
379 
WriteShortTaggedPC(uint32_t pc_delta,int tag)380 void RelocInfoWriter::WriteShortTaggedPC(uint32_t pc_delta, int tag) {
381   // Write a byte of tagged pc-delta, possibly preceded by an explicit pc-jump.
382   pc_delta = WriteLongPCJump(pc_delta);
383   *--pos_ = pc_delta << kTagBits | tag;
384 }
385 
386 
WriteShortTaggedData(intptr_t data_delta,int tag)387 void RelocInfoWriter::WriteShortTaggedData(intptr_t data_delta, int tag) {
388   *--pos_ = static_cast<byte>(data_delta << kShortDataTypeTagBits | tag);
389 }
390 
391 
WriteMode(RelocInfo::Mode rmode)392 void RelocInfoWriter::WriteMode(RelocInfo::Mode rmode) {
393   STATIC_ASSERT(RelocInfo::NUMBER_OF_MODES <= (1 << kLongTagBits));
394   *--pos_ = static_cast<int>((rmode << kTagBits) | kDefaultTag);
395 }
396 
397 
WriteModeAndPC(uint32_t pc_delta,RelocInfo::Mode rmode)398 void RelocInfoWriter::WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode) {
399   // Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
400   pc_delta = WriteLongPCJump(pc_delta);
401   WriteMode(rmode);
402   *--pos_ = pc_delta;
403 }
404 
405 
WriteIntData(int number)406 void RelocInfoWriter::WriteIntData(int number) {
407   for (int i = 0; i < kIntSize; i++) {
408     *--pos_ = static_cast<byte>(number);
409     // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
410     number = number >> kBitsPerByte;
411   }
412 }
413 
414 
WriteData(intptr_t data_delta)415 void RelocInfoWriter::WriteData(intptr_t data_delta) {
416   for (int i = 0; i < kIntptrSize; i++) {
417     *--pos_ = static_cast<byte>(data_delta);
418     // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
419     data_delta = data_delta >> kBitsPerByte;
420   }
421 }
422 
423 
WritePosition(int pc_delta,int pos_delta,RelocInfo::Mode rmode)424 void RelocInfoWriter::WritePosition(int pc_delta, int pos_delta,
425                                     RelocInfo::Mode rmode) {
426   int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
427                                                     : kStatementPositionTag;
428   // Check if delta is small enough to fit in a tagged byte.
429   if (is_intn(pos_delta, kShortDataBits)) {
430     WriteShortTaggedPC(pc_delta, kLocatableTag);
431     WriteShortTaggedData(pos_delta, pos_type_tag);
432   } else {
433     // Otherwise, use costly encoding.
434     WriteModeAndPC(pc_delta, rmode);
435     WriteIntData(pos_delta);
436   }
437 }
438 
439 
FlushPosition()440 void RelocInfoWriter::FlushPosition() {
441   if (!next_position_candidate_flushed_) {
442     WritePosition(next_position_candidate_pc_delta_,
443                   next_position_candidate_pos_delta_, RelocInfo::POSITION);
444     next_position_candidate_pos_delta_ = 0;
445     next_position_candidate_pc_delta_ = 0;
446     next_position_candidate_flushed_ = true;
447   }
448 }
449 
450 
Write(const RelocInfo * rinfo)451 void RelocInfoWriter::Write(const RelocInfo* rinfo) {
452   RelocInfo::Mode rmode = rinfo->rmode();
453   if (rmode != RelocInfo::POSITION) {
454     FlushPosition();
455   }
456 #ifdef DEBUG
457   byte* begin_pos = pos_;
458 #endif
459   DCHECK(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
460   DCHECK(rinfo->pc() - last_pc_ >= 0);
461   // Use unsigned delta-encoding for pc.
462   uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
463 
464   // The two most common modes are given small tags, and usually fit in a byte.
465   if (rmode == RelocInfo::EMBEDDED_OBJECT) {
466     WriteShortTaggedPC(pc_delta, kEmbeddedObjectTag);
467   } else if (rmode == RelocInfo::CODE_TARGET) {
468     WriteShortTaggedPC(pc_delta, kCodeTargetTag);
469     DCHECK(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
470   } else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
471     // Use signed delta-encoding for id.
472     DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
473     int id_delta = static_cast<int>(rinfo->data()) - last_id_;
474     // Check if delta is small enough to fit in a tagged byte.
475     if (is_intn(id_delta, kShortDataBits)) {
476       WriteShortTaggedPC(pc_delta, kLocatableTag);
477       WriteShortTaggedData(id_delta, kCodeWithIdTag);
478     } else {
479       // Otherwise, use costly encoding.
480       WriteModeAndPC(pc_delta, rmode);
481       WriteIntData(id_delta);
482     }
483     last_id_ = static_cast<int>(rinfo->data());
484   } else if (rmode == RelocInfo::DEOPT_REASON) {
485     DCHECK(rinfo->data() < (1 << kShortDataBits));
486     WriteShortTaggedPC(pc_delta, kLocatableTag);
487     WriteShortTaggedData(rinfo->data(), kDeoptReasonTag);
488   } else if (RelocInfo::IsPosition(rmode)) {
489     // Use signed delta-encoding for position.
490     DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
491     int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
492     if (rmode == RelocInfo::STATEMENT_POSITION) {
493       WritePosition(pc_delta, pos_delta, rmode);
494     } else {
495       DCHECK_EQ(rmode, RelocInfo::POSITION);
496       if (pc_delta != 0 || last_mode_ != RelocInfo::POSITION) {
497         FlushPosition();
498         next_position_candidate_pc_delta_ = pc_delta;
499         next_position_candidate_pos_delta_ = pos_delta;
500       } else {
501         next_position_candidate_pos_delta_ += pos_delta;
502       }
503       next_position_candidate_flushed_ = false;
504     }
505     last_position_ = static_cast<int>(rinfo->data());
506   } else {
507     WriteModeAndPC(pc_delta, rmode);
508     if (RelocInfo::IsComment(rmode)) {
509       WriteData(rinfo->data());
510     } else if (RelocInfo::IsConstPool(rmode) ||
511                RelocInfo::IsVeneerPool(rmode)) {
512       WriteIntData(static_cast<int>(rinfo->data()));
513     }
514   }
515   last_pc_ = rinfo->pc();
516   last_mode_ = rmode;
517 #ifdef DEBUG
518   DCHECK(begin_pos - pos_ <= kMaxSize);
519 #endif
520 }
521 
522 
AdvanceGetTag()523 inline int RelocIterator::AdvanceGetTag() {
524   return *--pos_ & kTagMask;
525 }
526 
527 
GetMode()528 inline RelocInfo::Mode RelocIterator::GetMode() {
529   return static_cast<RelocInfo::Mode>((*pos_ >> kTagBits) &
530                                       ((1 << kLongTagBits) - 1));
531 }
532 
533 
ReadShortTaggedPC()534 inline void RelocIterator::ReadShortTaggedPC() {
535   rinfo_.pc_ += *pos_ >> kTagBits;
536 }
537 
538 
AdvanceReadPC()539 inline void RelocIterator::AdvanceReadPC() {
540   rinfo_.pc_ += *--pos_;
541 }
542 
543 
AdvanceReadId()544 void RelocIterator::AdvanceReadId() {
545   int x = 0;
546   for (int i = 0; i < kIntSize; i++) {
547     x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
548   }
549   last_id_ += x;
550   rinfo_.data_ = last_id_;
551 }
552 
553 
AdvanceReadInt()554 void RelocIterator::AdvanceReadInt() {
555   int x = 0;
556   for (int i = 0; i < kIntSize; i++) {
557     x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
558   }
559   rinfo_.data_ = x;
560 }
561 
562 
AdvanceReadPosition()563 void RelocIterator::AdvanceReadPosition() {
564   int x = 0;
565   for (int i = 0; i < kIntSize; i++) {
566     x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
567   }
568   last_position_ += x;
569   rinfo_.data_ = last_position_;
570 }
571 
572 
AdvanceReadData()573 void RelocIterator::AdvanceReadData() {
574   intptr_t x = 0;
575   for (int i = 0; i < kIntptrSize; i++) {
576     x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
577   }
578   rinfo_.data_ = x;
579 }
580 
581 
AdvanceReadLongPCJump()582 void RelocIterator::AdvanceReadLongPCJump() {
583   // Read the 32-kSmallPCDeltaBits most significant bits of the
584   // pc jump in kChunkBits bit chunks and shift them into place.
585   // Stop when the last chunk is encountered.
586   uint32_t pc_jump = 0;
587   for (int i = 0; i < kIntSize; i++) {
588     byte pc_jump_part = *--pos_;
589     pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits;
590     if ((pc_jump_part & kLastChunkTagMask) == 1) break;
591   }
592   // The least significant kSmallPCDeltaBits bits will be added
593   // later.
594   rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
595 }
596 
597 
GetShortDataTypeTag()598 inline int RelocIterator::GetShortDataTypeTag() {
599   return *pos_ & ((1 << kShortDataTypeTagBits) - 1);
600 }
601 
602 
ReadShortTaggedId()603 inline void RelocIterator::ReadShortTaggedId() {
604   int8_t signed_b = *pos_;
605   // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
606   last_id_ += signed_b >> kShortDataTypeTagBits;
607   rinfo_.data_ = last_id_;
608 }
609 
610 
ReadShortTaggedPosition()611 inline void RelocIterator::ReadShortTaggedPosition() {
612   int8_t signed_b = *pos_;
613   // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
614   last_position_ += signed_b >> kShortDataTypeTagBits;
615   rinfo_.data_ = last_position_;
616 }
617 
618 
ReadShortTaggedData()619 inline void RelocIterator::ReadShortTaggedData() {
620   uint8_t unsigned_b = *pos_;
621   rinfo_.data_ = unsigned_b >> kTagBits;
622 }
623 
624 
GetPositionModeFromTag(int tag)625 static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
626   DCHECK(tag == kNonstatementPositionTag ||
627          tag == kStatementPositionTag);
628   return (tag == kNonstatementPositionTag) ?
629          RelocInfo::POSITION :
630          RelocInfo::STATEMENT_POSITION;
631 }
632 
633 
next()634 void RelocIterator::next() {
635   DCHECK(!done());
636   // Basically, do the opposite of RelocInfoWriter::Write.
637   // Reading of data is as far as possible avoided for unwanted modes,
638   // but we must always update the pc.
639   //
640   // We exit this loop by returning when we find a mode we want.
641   while (pos_ > end_) {
642     int tag = AdvanceGetTag();
643     if (tag == kEmbeddedObjectTag) {
644       ReadShortTaggedPC();
645       if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
646     } else if (tag == kCodeTargetTag) {
647       ReadShortTaggedPC();
648       if (SetMode(RelocInfo::CODE_TARGET)) return;
649     } else if (tag == kLocatableTag) {
650       ReadShortTaggedPC();
651       Advance();
652       int data_type_tag = GetShortDataTypeTag();
653       if (data_type_tag == kCodeWithIdTag) {
654         if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
655           ReadShortTaggedId();
656           return;
657         }
658       } else if (data_type_tag == kDeoptReasonTag) {
659         if (SetMode(RelocInfo::DEOPT_REASON)) {
660           ReadShortTaggedData();
661           return;
662         }
663       } else {
664         DCHECK(data_type_tag == kNonstatementPositionTag ||
665                data_type_tag == kStatementPositionTag);
666         if (mode_mask_ & RelocInfo::kPositionMask) {
667           // Always update the position if we are interested in either
668           // statement positions or non-statement positions.
669           ReadShortTaggedPosition();
670           if (SetMode(GetPositionModeFromTag(data_type_tag))) return;
671         }
672       }
673     } else {
674       DCHECK(tag == kDefaultTag);
675       RelocInfo::Mode rmode = GetMode();
676       if (rmode == RelocInfo::PC_JUMP) {
677         AdvanceReadLongPCJump();
678       } else {
679         AdvanceReadPC();
680         if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
681           if (SetMode(rmode)) {
682             AdvanceReadId();
683             return;
684           }
685           Advance(kIntSize);
686         } else if (RelocInfo::IsComment(rmode)) {
687           if (SetMode(rmode)) {
688             AdvanceReadData();
689             return;
690           }
691           Advance(kIntptrSize);
692         } else if (RelocInfo::IsPosition(rmode)) {
693           if (mode_mask_ & RelocInfo::kPositionMask) {
694             // Always update the position if we are interested in either
695             // statement positions or non-statement positions.
696             AdvanceReadPosition();
697             if (SetMode(rmode)) return;
698           } else {
699             Advance(kIntSize);
700           }
701         } else if (RelocInfo::IsConstPool(rmode) ||
702                    RelocInfo::IsVeneerPool(rmode)) {
703           if (SetMode(rmode)) {
704             AdvanceReadInt();
705             return;
706           }
707           Advance(kIntSize);
708         } else if (SetMode(static_cast<RelocInfo::Mode>(rmode))) {
709           return;
710         }
711       }
712     }
713   }
714   if (code_age_sequence_ != NULL) {
715     byte* old_code_age_sequence = code_age_sequence_;
716     code_age_sequence_ = NULL;
717     if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
718       rinfo_.data_ = 0;
719       rinfo_.pc_ = old_code_age_sequence;
720       return;
721     }
722   }
723   done_ = true;
724 }
725 
726 
RelocIterator(Code * code,int mode_mask)727 RelocIterator::RelocIterator(Code* code, int mode_mask)
728     : rinfo_(code->map()->GetIsolate()) {
729   rinfo_.host_ = code;
730   rinfo_.pc_ = code->instruction_start();
731   rinfo_.data_ = 0;
732   // Relocation info is read backwards.
733   pos_ = code->relocation_start() + code->relocation_size();
734   end_ = code->relocation_start();
735   done_ = false;
736   mode_mask_ = mode_mask;
737   last_id_ = 0;
738   last_position_ = 0;
739   byte* sequence = code->FindCodeAgeSequence();
740   // We get the isolate from the map, because at serialization time
741   // the code pointer has been cloned and isn't really in heap space.
742   Isolate* isolate = code->map()->GetIsolate();
743   if (sequence != NULL && !Code::IsYoungSequence(isolate, sequence)) {
744     code_age_sequence_ = sequence;
745   } else {
746     code_age_sequence_ = NULL;
747   }
748   if (mode_mask_ == 0) pos_ = end_;
749   next();
750 }
751 
752 
RelocIterator(const CodeDesc & desc,int mode_mask)753 RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
754     : rinfo_(desc.origin->isolate()) {
755   rinfo_.pc_ = desc.buffer;
756   rinfo_.data_ = 0;
757   // Relocation info is read backwards.
758   pos_ = desc.buffer + desc.buffer_size;
759   end_ = pos_ - desc.reloc_size;
760   done_ = false;
761   mode_mask_ = mode_mask;
762   last_id_ = 0;
763   last_position_ = 0;
764   code_age_sequence_ = NULL;
765   if (mode_mask_ == 0) pos_ = end_;
766   next();
767 }
768 
769 
770 // -----------------------------------------------------------------------------
771 // Implementation of RelocInfo
772 
773 
774 #ifdef DEBUG
RequiresRelocation(const CodeDesc & desc)775 bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
776   // Ensure there are no code targets or embedded objects present in the
777   // deoptimization entries, they would require relocation after code
778   // generation.
779   int mode_mask = RelocInfo::kCodeTargetMask |
780                   RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
781                   RelocInfo::ModeMask(RelocInfo::CELL) |
782                   RelocInfo::kApplyMask;
783   RelocIterator it(desc, mode_mask);
784   return !it.done();
785 }
786 #endif
787 
788 
789 #ifdef ENABLE_DISASSEMBLER
RelocModeName(RelocInfo::Mode rmode)790 const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
791   switch (rmode) {
792     case NONE32:
793       return "no reloc 32";
794     case NONE64:
795       return "no reloc 64";
796     case EMBEDDED_OBJECT:
797       return "embedded object";
798     case DEBUGGER_STATEMENT:
799       return "debugger statement";
800     case CODE_TARGET:
801       return "code target";
802     case CODE_TARGET_WITH_ID:
803       return "code target with id";
804     case CELL:
805       return "property cell";
806     case RUNTIME_ENTRY:
807       return "runtime entry";
808     case COMMENT:
809       return "comment";
810     case POSITION:
811       return "position";
812     case STATEMENT_POSITION:
813       return "statement position";
814     case EXTERNAL_REFERENCE:
815       return "external reference";
816     case INTERNAL_REFERENCE:
817       return "internal reference";
818     case INTERNAL_REFERENCE_ENCODED:
819       return "encoded internal reference";
820     case DEOPT_REASON:
821       return "deopt reason";
822     case CONST_POOL:
823       return "constant pool";
824     case VENEER_POOL:
825       return "veneer pool";
826     case DEBUG_BREAK_SLOT_AT_POSITION:
827       return "debug break slot at position";
828     case DEBUG_BREAK_SLOT_AT_RETURN:
829       return "debug break slot at return";
830     case DEBUG_BREAK_SLOT_AT_CALL:
831       return "debug break slot at call";
832     case CODE_AGE_SEQUENCE:
833       return "code age sequence";
834     case GENERATOR_CONTINUATION:
835       return "generator continuation";
836     case NUMBER_OF_MODES:
837     case PC_JUMP:
838       UNREACHABLE();
839       return "number_of_modes";
840   }
841   return "unknown relocation type";
842 }
843 
844 
Print(Isolate * isolate,std::ostream & os)845 void RelocInfo::Print(Isolate* isolate, std::ostream& os) {  // NOLINT
846   os << static_cast<const void*>(pc_) << "  " << RelocModeName(rmode_);
847   if (IsComment(rmode_)) {
848     os << "  (" << reinterpret_cast<char*>(data_) << ")";
849   } else if (rmode_ == DEOPT_REASON) {
850     os << "  (" << Deoptimizer::GetDeoptReason(
851                        static_cast<Deoptimizer::DeoptReason>(data_)) << ")";
852   } else if (rmode_ == EMBEDDED_OBJECT) {
853     os << "  (" << Brief(target_object()) << ")";
854   } else if (rmode_ == EXTERNAL_REFERENCE) {
855     ExternalReferenceEncoder ref_encoder(isolate);
856     os << " ("
857        << ref_encoder.NameOfAddress(isolate, target_external_reference())
858        << ")  (" << static_cast<const void*>(target_external_reference())
859        << ")";
860   } else if (IsCodeTarget(rmode_)) {
861     Code* code = Code::GetCodeFromTargetAddress(target_address());
862     os << " (" << Code::Kind2String(code->kind()) << ")  ("
863        << static_cast<const void*>(target_address()) << ")";
864     if (rmode_ == CODE_TARGET_WITH_ID) {
865       os << " (id=" << static_cast<int>(data_) << ")";
866     }
867   } else if (IsPosition(rmode_)) {
868     os << "  (" << data() << ")";
869   } else if (IsRuntimeEntry(rmode_) &&
870              isolate->deoptimizer_data() != NULL) {
871     // Depotimization bailouts are stored as runtime entries.
872     int id = Deoptimizer::GetDeoptimizationId(
873         isolate, target_address(), Deoptimizer::EAGER);
874     if (id != Deoptimizer::kNotDeoptimizationEntry) {
875       os << "  (deoptimization bailout " << id << ")";
876     }
877   } else if (IsConstPool(rmode_)) {
878     os << " (size " << static_cast<int>(data_) << ")";
879   }
880 
881   os << "\n";
882 }
883 #endif  // ENABLE_DISASSEMBLER
884 
885 
886 #ifdef VERIFY_HEAP
Verify(Isolate * isolate)887 void RelocInfo::Verify(Isolate* isolate) {
888   switch (rmode_) {
889     case EMBEDDED_OBJECT:
890       Object::VerifyPointer(target_object());
891       break;
892     case CELL:
893       Object::VerifyPointer(target_cell());
894       break;
895     case DEBUGGER_STATEMENT:
896     case CODE_TARGET_WITH_ID:
897     case CODE_TARGET: {
898       // convert inline target address to code object
899       Address addr = target_address();
900       CHECK(addr != NULL);
901       // Check that we can find the right code object.
902       Code* code = Code::GetCodeFromTargetAddress(addr);
903       Object* found = isolate->FindCodeObject(addr);
904       CHECK(found->IsCode());
905       CHECK(code->address() == HeapObject::cast(found)->address());
906       break;
907     }
908     case INTERNAL_REFERENCE:
909     case INTERNAL_REFERENCE_ENCODED: {
910       Address target = target_internal_reference();
911       Address pc = target_internal_reference_address();
912       Code* code = Code::cast(isolate->FindCodeObject(pc));
913       CHECK(target >= code->instruction_start());
914       CHECK(target <= code->instruction_end());
915       break;
916     }
917     case RUNTIME_ENTRY:
918     case COMMENT:
919     case POSITION:
920     case STATEMENT_POSITION:
921     case EXTERNAL_REFERENCE:
922     case DEOPT_REASON:
923     case CONST_POOL:
924     case VENEER_POOL:
925     case DEBUG_BREAK_SLOT_AT_POSITION:
926     case DEBUG_BREAK_SLOT_AT_RETURN:
927     case DEBUG_BREAK_SLOT_AT_CALL:
928     case GENERATOR_CONTINUATION:
929     case NONE32:
930     case NONE64:
931       break;
932     case NUMBER_OF_MODES:
933     case PC_JUMP:
934       UNREACHABLE();
935       break;
936     case CODE_AGE_SEQUENCE:
937       DCHECK(Code::IsYoungSequence(isolate, pc_) || code_age_stub()->IsCode());
938       break;
939   }
940 }
941 #endif  // VERIFY_HEAP
942 
943 
944 // Implementation of ExternalReference
945 
SetUp()946 void ExternalReference::SetUp() {
947   double_constants.min_int = kMinInt;
948   double_constants.one_half = 0.5;
949   double_constants.minus_one_half = -0.5;
950   double_constants.the_hole_nan = bit_cast<double>(kHoleNanInt64);
951   double_constants.negative_infinity = -V8_INFINITY;
952   double_constants.uint32_bias =
953     static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
954 
955   math_exp_data_mutex = new base::Mutex();
956 }
957 
958 
InitializeMathExpData()959 void ExternalReference::InitializeMathExpData() {
960   // Early return?
961   if (math_exp_data_initialized) return;
962 
963   base::LockGuard<base::Mutex> lock_guard(math_exp_data_mutex);
964   if (!math_exp_data_initialized) {
965     // If this is changed, generated code must be adapted too.
966     const int kTableSizeBits = 11;
967     const int kTableSize = 1 << kTableSizeBits;
968     const double kTableSizeDouble = static_cast<double>(kTableSize);
969 
970     math_exp_constants_array = new double[9];
971     // Input values smaller than this always return 0.
972     math_exp_constants_array[0] = -708.39641853226408;
973     // Input values larger than this always return +Infinity.
974     math_exp_constants_array[1] = 709.78271289338397;
975     math_exp_constants_array[2] = V8_INFINITY;
976     // The rest is black magic. Do not attempt to understand it. It is
977     // loosely based on the "expd" function published at:
978     // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
979     const double constant3 = (1 << kTableSizeBits) / std::log(2.0);
980     math_exp_constants_array[3] = constant3;
981     math_exp_constants_array[4] =
982         static_cast<double>(static_cast<int64_t>(3) << 51);
983     math_exp_constants_array[5] = 1 / constant3;
984     math_exp_constants_array[6] = 3.0000000027955394;
985     math_exp_constants_array[7] = 0.16666666685227835;
986     math_exp_constants_array[8] = 1;
987 
988     math_exp_log_table_array = new double[kTableSize];
989     for (int i = 0; i < kTableSize; i++) {
990       double value = std::pow(2, i / kTableSizeDouble);
991       uint64_t bits = bit_cast<uint64_t, double>(value);
992       bits &= (static_cast<uint64_t>(1) << 52) - 1;
993       double mantissa = bit_cast<double, uint64_t>(bits);
994       math_exp_log_table_array[i] = mantissa;
995     }
996 
997     math_exp_data_initialized = true;
998   }
999 }
1000 
1001 
TearDownMathExpData()1002 void ExternalReference::TearDownMathExpData() {
1003   delete[] math_exp_constants_array;
1004   math_exp_constants_array = NULL;
1005   delete[] math_exp_log_table_array;
1006   math_exp_log_table_array = NULL;
1007   delete math_exp_data_mutex;
1008   math_exp_data_mutex = NULL;
1009 }
1010 
1011 
ExternalReference(Builtins::CFunctionId id,Isolate * isolate)1012 ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate)
1013   : address_(Redirect(isolate, Builtins::c_function_address(id))) {}
1014 
1015 
ExternalReference(ApiFunction * fun,Type type=ExternalReference::BUILTIN_CALL,Isolate * isolate=NULL)1016 ExternalReference::ExternalReference(
1017     ApiFunction* fun,
1018     Type type = ExternalReference::BUILTIN_CALL,
1019     Isolate* isolate = NULL)
1020   : address_(Redirect(isolate, fun->address(), type)) {}
1021 
1022 
ExternalReference(Builtins::Name name,Isolate * isolate)1023 ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate)
1024   : address_(isolate->builtins()->builtin_address(name)) {}
1025 
1026 
ExternalReference(Runtime::FunctionId id,Isolate * isolate)1027 ExternalReference::ExternalReference(Runtime::FunctionId id, Isolate* isolate)
1028     : address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {}
1029 
1030 
ExternalReference(const Runtime::Function * f,Isolate * isolate)1031 ExternalReference::ExternalReference(const Runtime::Function* f,
1032                                      Isolate* isolate)
1033     : address_(Redirect(isolate, f->entry)) {}
1034 
1035 
isolate_address(Isolate * isolate)1036 ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
1037   return ExternalReference(isolate);
1038 }
1039 
1040 
ExternalReference(StatsCounter * counter)1041 ExternalReference::ExternalReference(StatsCounter* counter)
1042   : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
1043 
1044 
ExternalReference(Isolate::AddressId id,Isolate * isolate)1045 ExternalReference::ExternalReference(Isolate::AddressId id, Isolate* isolate)
1046   : address_(isolate->get_address_from_id(id)) {}
1047 
1048 
ExternalReference(const SCTableReference & table_ref)1049 ExternalReference::ExternalReference(const SCTableReference& table_ref)
1050   : address_(table_ref.address()) {}
1051 
1052 
1053 ExternalReference ExternalReference::
incremental_marking_record_write_function(Isolate * isolate)1054     incremental_marking_record_write_function(Isolate* isolate) {
1055   return ExternalReference(Redirect(
1056       isolate,
1057       FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
1058 }
1059 
1060 
1061 ExternalReference ExternalReference::
store_buffer_overflow_function(Isolate * isolate)1062     store_buffer_overflow_function(Isolate* isolate) {
1063   return ExternalReference(Redirect(
1064       isolate,
1065       FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
1066 }
1067 
1068 
delete_handle_scope_extensions(Isolate * isolate)1069 ExternalReference ExternalReference::delete_handle_scope_extensions(
1070     Isolate* isolate) {
1071   return ExternalReference(Redirect(
1072       isolate,
1073       FUNCTION_ADDR(HandleScope::DeleteExtensions)));
1074 }
1075 
1076 
get_date_field_function(Isolate * isolate)1077 ExternalReference ExternalReference::get_date_field_function(
1078     Isolate* isolate) {
1079   return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
1080 }
1081 
1082 
get_make_code_young_function(Isolate * isolate)1083 ExternalReference ExternalReference::get_make_code_young_function(
1084     Isolate* isolate) {
1085   return ExternalReference(Redirect(
1086       isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung)));
1087 }
1088 
1089 
get_mark_code_as_executed_function(Isolate * isolate)1090 ExternalReference ExternalReference::get_mark_code_as_executed_function(
1091     Isolate* isolate) {
1092   return ExternalReference(Redirect(
1093       isolate, FUNCTION_ADDR(Code::MarkCodeAsExecuted)));
1094 }
1095 
1096 
date_cache_stamp(Isolate * isolate)1097 ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
1098   return ExternalReference(isolate->date_cache()->stamp_address());
1099 }
1100 
1101 
stress_deopt_count(Isolate * isolate)1102 ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
1103   return ExternalReference(isolate->stress_deopt_count_address());
1104 }
1105 
1106 
new_deoptimizer_function(Isolate * isolate)1107 ExternalReference ExternalReference::new_deoptimizer_function(
1108     Isolate* isolate) {
1109   return ExternalReference(
1110       Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New)));
1111 }
1112 
1113 
compute_output_frames_function(Isolate * isolate)1114 ExternalReference ExternalReference::compute_output_frames_function(
1115     Isolate* isolate) {
1116   return ExternalReference(
1117       Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
1118 }
1119 
1120 
log_enter_external_function(Isolate * isolate)1121 ExternalReference ExternalReference::log_enter_external_function(
1122     Isolate* isolate) {
1123   return ExternalReference(
1124       Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
1125 }
1126 
1127 
log_leave_external_function(Isolate * isolate)1128 ExternalReference ExternalReference::log_leave_external_function(
1129     Isolate* isolate) {
1130   return ExternalReference(
1131       Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
1132 }
1133 
1134 
keyed_lookup_cache_keys(Isolate * isolate)1135 ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
1136   return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
1137 }
1138 
1139 
keyed_lookup_cache_field_offsets(Isolate * isolate)1140 ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
1141     Isolate* isolate) {
1142   return ExternalReference(
1143       isolate->keyed_lookup_cache()->field_offsets_address());
1144 }
1145 
1146 
roots_array_start(Isolate * isolate)1147 ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
1148   return ExternalReference(isolate->heap()->roots_array_start());
1149 }
1150 
1151 
allocation_sites_list_address(Isolate * isolate)1152 ExternalReference ExternalReference::allocation_sites_list_address(
1153     Isolate* isolate) {
1154   return ExternalReference(isolate->heap()->allocation_sites_list_address());
1155 }
1156 
1157 
address_of_stack_limit(Isolate * isolate)1158 ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
1159   return ExternalReference(isolate->stack_guard()->address_of_jslimit());
1160 }
1161 
1162 
address_of_real_stack_limit(Isolate * isolate)1163 ExternalReference ExternalReference::address_of_real_stack_limit(
1164     Isolate* isolate) {
1165   return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
1166 }
1167 
1168 
address_of_regexp_stack_limit(Isolate * isolate)1169 ExternalReference ExternalReference::address_of_regexp_stack_limit(
1170     Isolate* isolate) {
1171   return ExternalReference(isolate->regexp_stack()->limit_address());
1172 }
1173 
1174 
new_space_start(Isolate * isolate)1175 ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
1176   return ExternalReference(isolate->heap()->NewSpaceStart());
1177 }
1178 
1179 
store_buffer_top(Isolate * isolate)1180 ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
1181   return ExternalReference(isolate->heap()->store_buffer_top_address());
1182 }
1183 
1184 
new_space_mask(Isolate * isolate)1185 ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
1186   return ExternalReference(reinterpret_cast<Address>(
1187       isolate->heap()->NewSpaceMask()));
1188 }
1189 
1190 
new_space_allocation_top_address(Isolate * isolate)1191 ExternalReference ExternalReference::new_space_allocation_top_address(
1192     Isolate* isolate) {
1193   return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
1194 }
1195 
1196 
new_space_allocation_limit_address(Isolate * isolate)1197 ExternalReference ExternalReference::new_space_allocation_limit_address(
1198     Isolate* isolate) {
1199   return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
1200 }
1201 
1202 
old_space_allocation_top_address(Isolate * isolate)1203 ExternalReference ExternalReference::old_space_allocation_top_address(
1204     Isolate* isolate) {
1205   return ExternalReference(isolate->heap()->OldSpaceAllocationTopAddress());
1206 }
1207 
1208 
old_space_allocation_limit_address(Isolate * isolate)1209 ExternalReference ExternalReference::old_space_allocation_limit_address(
1210     Isolate* isolate) {
1211   return ExternalReference(isolate->heap()->OldSpaceAllocationLimitAddress());
1212 }
1213 
1214 
handle_scope_level_address(Isolate * isolate)1215 ExternalReference ExternalReference::handle_scope_level_address(
1216     Isolate* isolate) {
1217   return ExternalReference(HandleScope::current_level_address(isolate));
1218 }
1219 
1220 
handle_scope_next_address(Isolate * isolate)1221 ExternalReference ExternalReference::handle_scope_next_address(
1222     Isolate* isolate) {
1223   return ExternalReference(HandleScope::current_next_address(isolate));
1224 }
1225 
1226 
handle_scope_limit_address(Isolate * isolate)1227 ExternalReference ExternalReference::handle_scope_limit_address(
1228     Isolate* isolate) {
1229   return ExternalReference(HandleScope::current_limit_address(isolate));
1230 }
1231 
1232 
scheduled_exception_address(Isolate * isolate)1233 ExternalReference ExternalReference::scheduled_exception_address(
1234     Isolate* isolate) {
1235   return ExternalReference(isolate->scheduled_exception_address());
1236 }
1237 
1238 
address_of_pending_message_obj(Isolate * isolate)1239 ExternalReference ExternalReference::address_of_pending_message_obj(
1240     Isolate* isolate) {
1241   return ExternalReference(isolate->pending_message_obj_address());
1242 }
1243 
1244 
address_of_min_int()1245 ExternalReference ExternalReference::address_of_min_int() {
1246   return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
1247 }
1248 
1249 
address_of_one_half()1250 ExternalReference ExternalReference::address_of_one_half() {
1251   return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half));
1252 }
1253 
1254 
address_of_minus_one_half()1255 ExternalReference ExternalReference::address_of_minus_one_half() {
1256   return ExternalReference(
1257       reinterpret_cast<void*>(&double_constants.minus_one_half));
1258 }
1259 
1260 
address_of_negative_infinity()1261 ExternalReference ExternalReference::address_of_negative_infinity() {
1262   return ExternalReference(
1263       reinterpret_cast<void*>(&double_constants.negative_infinity));
1264 }
1265 
1266 
address_of_the_hole_nan()1267 ExternalReference ExternalReference::address_of_the_hole_nan() {
1268   return ExternalReference(
1269       reinterpret_cast<void*>(&double_constants.the_hole_nan));
1270 }
1271 
1272 
address_of_uint32_bias()1273 ExternalReference ExternalReference::address_of_uint32_bias() {
1274   return ExternalReference(
1275       reinterpret_cast<void*>(&double_constants.uint32_bias));
1276 }
1277 
1278 
is_profiling_address(Isolate * isolate)1279 ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
1280   return ExternalReference(isolate->cpu_profiler()->is_profiling_address());
1281 }
1282 
1283 
invoke_function_callback(Isolate * isolate)1284 ExternalReference ExternalReference::invoke_function_callback(
1285     Isolate* isolate) {
1286   Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
1287   ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
1288   ApiFunction thunk_fun(thunk_address);
1289   return ExternalReference(&thunk_fun, thunk_type, isolate);
1290 }
1291 
1292 
invoke_accessor_getter_callback(Isolate * isolate)1293 ExternalReference ExternalReference::invoke_accessor_getter_callback(
1294     Isolate* isolate) {
1295   Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
1296   ExternalReference::Type thunk_type =
1297       ExternalReference::PROFILING_GETTER_CALL;
1298   ApiFunction thunk_fun(thunk_address);
1299   return ExternalReference(&thunk_fun, thunk_type, isolate);
1300 }
1301 
1302 
1303 #ifndef V8_INTERPRETED_REGEXP
1304 
re_check_stack_guard_state(Isolate * isolate)1305 ExternalReference ExternalReference::re_check_stack_guard_state(
1306     Isolate* isolate) {
1307   Address function;
1308 #if V8_TARGET_ARCH_X64
1309   function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
1310 #elif V8_TARGET_ARCH_IA32
1311   function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
1312 #elif V8_TARGET_ARCH_ARM64
1313   function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
1314 #elif V8_TARGET_ARCH_ARM
1315   function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
1316 #elif V8_TARGET_ARCH_PPC
1317   function = FUNCTION_ADDR(RegExpMacroAssemblerPPC::CheckStackGuardState);
1318 #elif V8_TARGET_ARCH_MIPS
1319   function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
1320 #elif V8_TARGET_ARCH_MIPS64
1321   function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
1322 #elif V8_TARGET_ARCH_X87
1323   function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
1324 #else
1325   UNREACHABLE();
1326 #endif
1327   return ExternalReference(Redirect(isolate, function));
1328 }
1329 
1330 
re_grow_stack(Isolate * isolate)1331 ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
1332   return ExternalReference(
1333       Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
1334 }
1335 
re_case_insensitive_compare_uc16(Isolate * isolate)1336 ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
1337     Isolate* isolate) {
1338   return ExternalReference(Redirect(
1339       isolate,
1340       FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
1341 }
1342 
1343 
re_word_character_map()1344 ExternalReference ExternalReference::re_word_character_map() {
1345   return ExternalReference(
1346       NativeRegExpMacroAssembler::word_character_map_address());
1347 }
1348 
address_of_static_offsets_vector(Isolate * isolate)1349 ExternalReference ExternalReference::address_of_static_offsets_vector(
1350     Isolate* isolate) {
1351   return ExternalReference(
1352       reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector()));
1353 }
1354 
address_of_regexp_stack_memory_address(Isolate * isolate)1355 ExternalReference ExternalReference::address_of_regexp_stack_memory_address(
1356     Isolate* isolate) {
1357   return ExternalReference(
1358       isolate->regexp_stack()->memory_address());
1359 }
1360 
address_of_regexp_stack_memory_size(Isolate * isolate)1361 ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
1362     Isolate* isolate) {
1363   return ExternalReference(isolate->regexp_stack()->memory_size_address());
1364 }
1365 
1366 #endif  // V8_INTERPRETED_REGEXP
1367 
1368 
math_log_double_function(Isolate * isolate)1369 ExternalReference ExternalReference::math_log_double_function(
1370     Isolate* isolate) {
1371   typedef double (*d2d)(double x);
1372   return ExternalReference(Redirect(isolate,
1373                                     FUNCTION_ADDR(static_cast<d2d>(std::log)),
1374                                     BUILTIN_FP_CALL));
1375 }
1376 
1377 
math_exp_constants(int constant_index)1378 ExternalReference ExternalReference::math_exp_constants(int constant_index) {
1379   DCHECK(math_exp_data_initialized);
1380   return ExternalReference(
1381       reinterpret_cast<void*>(math_exp_constants_array + constant_index));
1382 }
1383 
1384 
math_exp_log_table()1385 ExternalReference ExternalReference::math_exp_log_table() {
1386   DCHECK(math_exp_data_initialized);
1387   return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
1388 }
1389 
1390 
page_flags(Page * page)1391 ExternalReference ExternalReference::page_flags(Page* page) {
1392   return ExternalReference(reinterpret_cast<Address>(page) +
1393                            MemoryChunk::kFlagsOffset);
1394 }
1395 
1396 
ForDeoptEntry(Address entry)1397 ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
1398   return ExternalReference(entry);
1399 }
1400 
1401 
cpu_features()1402 ExternalReference ExternalReference::cpu_features() {
1403   DCHECK(CpuFeatures::initialized_);
1404   return ExternalReference(&CpuFeatures::supported_);
1405 }
1406 
1407 
debug_is_active_address(Isolate * isolate)1408 ExternalReference ExternalReference::debug_is_active_address(
1409     Isolate* isolate) {
1410   return ExternalReference(isolate->debug()->is_active_address());
1411 }
1412 
1413 
debug_after_break_target_address(Isolate * isolate)1414 ExternalReference ExternalReference::debug_after_break_target_address(
1415     Isolate* isolate) {
1416   return ExternalReference(isolate->debug()->after_break_target_address());
1417 }
1418 
1419 
virtual_handler_register(Isolate * isolate)1420 ExternalReference ExternalReference::virtual_handler_register(
1421     Isolate* isolate) {
1422   return ExternalReference(isolate->virtual_handler_register_address());
1423 }
1424 
1425 
virtual_slot_register(Isolate * isolate)1426 ExternalReference ExternalReference::virtual_slot_register(Isolate* isolate) {
1427   return ExternalReference(isolate->virtual_slot_register_address());
1428 }
1429 
1430 
runtime_function_table_address(Isolate * isolate)1431 ExternalReference ExternalReference::runtime_function_table_address(
1432     Isolate* isolate) {
1433   return ExternalReference(
1434       const_cast<Runtime::Function*>(Runtime::RuntimeFunctionTable(isolate)));
1435 }
1436 
1437 
power_helper(Isolate * isolate,double x,double y)1438 double power_helper(Isolate* isolate, double x, double y) {
1439   int y_int = static_cast<int>(y);
1440   if (y == y_int) {
1441     return power_double_int(x, y_int);  // Returns 1 if exponent is 0.
1442   }
1443   if (y == 0.5) {
1444     lazily_initialize_fast_sqrt(isolate);
1445     return (std::isinf(x)) ? V8_INFINITY
1446                            : fast_sqrt(x + 0.0, isolate);  // Convert -0 to +0.
1447   }
1448   if (y == -0.5) {
1449     lazily_initialize_fast_sqrt(isolate);
1450     return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0,
1451                                                  isolate);  // Convert -0 to +0.
1452   }
1453   return power_double_double(x, y);
1454 }
1455 
1456 
1457 // Helper function to compute x^y, where y is known to be an
1458 // integer. Uses binary decomposition to limit the number of
1459 // multiplications; see the discussion in "Hacker's Delight" by Henry
1460 // S. Warren, Jr., figure 11-6, page 213.
power_double_int(double x,int y)1461 double power_double_int(double x, int y) {
1462   double m = (y < 0) ? 1 / x : x;
1463   unsigned n = (y < 0) ? -y : y;
1464   double p = 1;
1465   while (n != 0) {
1466     if ((n & 1) != 0) p *= m;
1467     m *= m;
1468     if ((n & 2) != 0) p *= m;
1469     m *= m;
1470     n >>= 2;
1471   }
1472   return p;
1473 }
1474 
1475 
power_double_double(double x,double y)1476 double power_double_double(double x, double y) {
1477 #if (defined(__MINGW64_VERSION_MAJOR) &&                              \
1478      (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)) || \
1479     defined(V8_OS_AIX)
1480   // MinGW64 and AIX have a custom implementation for pow.  This handles certain
1481   // special cases that are different.
1482   if ((x == 0.0 || std::isinf(x)) && y != 0.0 && std::isfinite(y)) {
1483     double f;
1484     double result = ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
1485     /* retain sign if odd integer exponent */
1486     return ((std::modf(y, &f) == 0.0) && (static_cast<int64_t>(y) & 1))
1487                ? copysign(result, x)
1488                : result;
1489   }
1490 
1491   if (x == 2.0) {
1492     int y_int = static_cast<int>(y);
1493     if (y == y_int) {
1494       return std::ldexp(1.0, y_int);
1495     }
1496   }
1497 #endif
1498 
1499   // The checks for special cases can be dropped in ia32 because it has already
1500   // been done in generated code before bailing out here.
1501   if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
1502     return std::numeric_limits<double>::quiet_NaN();
1503   }
1504   return std::pow(x, y);
1505 }
1506 
1507 
power_double_double_function(Isolate * isolate)1508 ExternalReference ExternalReference::power_double_double_function(
1509     Isolate* isolate) {
1510   return ExternalReference(Redirect(isolate,
1511                                     FUNCTION_ADDR(power_double_double),
1512                                     BUILTIN_FP_FP_CALL));
1513 }
1514 
1515 
power_double_int_function(Isolate * isolate)1516 ExternalReference ExternalReference::power_double_int_function(
1517     Isolate* isolate) {
1518   return ExternalReference(Redirect(isolate,
1519                                     FUNCTION_ADDR(power_double_int),
1520                                     BUILTIN_FP_INT_CALL));
1521 }
1522 
1523 
EvalComparison(Token::Value op,double op1,double op2)1524 bool EvalComparison(Token::Value op, double op1, double op2) {
1525   DCHECK(Token::IsCompareOp(op));
1526   switch (op) {
1527     case Token::EQ:
1528     case Token::EQ_STRICT: return (op1 == op2);
1529     case Token::NE: return (op1 != op2);
1530     case Token::LT: return (op1 < op2);
1531     case Token::GT: return (op1 > op2);
1532     case Token::LTE: return (op1 <= op2);
1533     case Token::GTE: return (op1 >= op2);
1534     default:
1535       UNREACHABLE();
1536       return false;
1537   }
1538 }
1539 
1540 
mod_two_doubles_operation(Isolate * isolate)1541 ExternalReference ExternalReference::mod_two_doubles_operation(
1542     Isolate* isolate) {
1543   return ExternalReference(Redirect(isolate,
1544                                     FUNCTION_ADDR(modulo),
1545                                     BUILTIN_FP_FP_CALL));
1546 }
1547 
1548 
debug_step_in_enabled_address(Isolate * isolate)1549 ExternalReference ExternalReference::debug_step_in_enabled_address(
1550     Isolate* isolate) {
1551   return ExternalReference(isolate->debug()->step_in_enabled_address());
1552 }
1553 
1554 
fixed_typed_array_base_data_offset()1555 ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
1556   return ExternalReference(reinterpret_cast<void*>(
1557       FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
1558 }
1559 
1560 
operator ==(ExternalReference lhs,ExternalReference rhs)1561 bool operator==(ExternalReference lhs, ExternalReference rhs) {
1562   return lhs.address() == rhs.address();
1563 }
1564 
1565 
operator !=(ExternalReference lhs,ExternalReference rhs)1566 bool operator!=(ExternalReference lhs, ExternalReference rhs) {
1567   return !(lhs == rhs);
1568 }
1569 
1570 
hash_value(ExternalReference reference)1571 size_t hash_value(ExternalReference reference) {
1572   return base::hash<Address>()(reference.address());
1573 }
1574 
1575 
operator <<(std::ostream & os,ExternalReference reference)1576 std::ostream& operator<<(std::ostream& os, ExternalReference reference) {
1577   os << static_cast<const void*>(reference.address());
1578   const Runtime::Function* fn = Runtime::FunctionForEntry(reference.address());
1579   if (fn) os << "<" << fn->name << ".entry>";
1580   return os;
1581 }
1582 
1583 
RecordPosition(int pos)1584 void PositionsRecorder::RecordPosition(int pos) {
1585   DCHECK(pos != RelocInfo::kNoPosition);
1586   DCHECK(pos >= 0);
1587   state_.current_position = pos;
1588   LOG_CODE_EVENT(assembler_->isolate(),
1589                  CodeLinePosInfoAddPositionEvent(jit_handler_data_,
1590                                                  assembler_->pc_offset(),
1591                                                  pos));
1592 }
1593 
1594 
RecordStatementPosition(int pos)1595 void PositionsRecorder::RecordStatementPosition(int pos) {
1596   DCHECK(pos != RelocInfo::kNoPosition);
1597   DCHECK(pos >= 0);
1598   state_.current_statement_position = pos;
1599   LOG_CODE_EVENT(assembler_->isolate(),
1600                  CodeLinePosInfoAddStatementPositionEvent(
1601                      jit_handler_data_,
1602                      assembler_->pc_offset(),
1603                      pos));
1604 }
1605 
1606 
WriteRecordedPositions()1607 bool PositionsRecorder::WriteRecordedPositions() {
1608   bool written = false;
1609 
1610   // Write the statement position if it is different from what was written last
1611   // time.
1612   if (state_.current_statement_position != state_.written_statement_position) {
1613     EnsureSpace ensure_space(assembler_);
1614     assembler_->RecordRelocInfo(RelocInfo::STATEMENT_POSITION,
1615                                 state_.current_statement_position);
1616     state_.written_position = state_.current_statement_position;
1617     state_.written_statement_position = state_.current_statement_position;
1618     written = true;
1619   }
1620 
1621   // Write the position if it is different from what was written last time and
1622   // also different from the statement position that was just written.
1623   if (state_.current_position != state_.written_position) {
1624     EnsureSpace ensure_space(assembler_);
1625     assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position);
1626     state_.written_position = state_.current_position;
1627     written = true;
1628   }
1629 
1630   // Return whether something was written.
1631   return written;
1632 }
1633 
1634 
ConstantPoolBuilder(int ptr_reach_bits,int double_reach_bits)1635 ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
1636                                          int double_reach_bits) {
1637   info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
1638   info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits;
1639   info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
1640 }
1641 
1642 
NextAccess(ConstantPoolEntry::Type type) const1643 ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
1644     ConstantPoolEntry::Type type) const {
1645   const PerTypeEntryInfo& info = info_[type];
1646 
1647   if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
1648 
1649   int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count;
1650   int dbl_offset = dbl_count * kDoubleSize;
1651   int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count;
1652   int ptr_offset = ptr_count * kPointerSize + dbl_offset;
1653 
1654   if (type == ConstantPoolEntry::DOUBLE) {
1655     // Double overflow detection must take into account the reach for both types
1656     int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits;
1657     if (!is_uintn(dbl_offset, info.regular_reach_bits) ||
1658         (ptr_count > 0 &&
1659          !is_uintn(ptr_offset + kDoubleSize - kPointerSize, ptr_reach_bits))) {
1660       return ConstantPoolEntry::OVERFLOWED;
1661     }
1662   } else {
1663     DCHECK(type == ConstantPoolEntry::INTPTR);
1664     if (!is_uintn(ptr_offset, info.regular_reach_bits)) {
1665       return ConstantPoolEntry::OVERFLOWED;
1666     }
1667   }
1668 
1669   return ConstantPoolEntry::REGULAR;
1670 }
1671 
1672 
AddEntry(ConstantPoolEntry & entry,ConstantPoolEntry::Type type)1673 ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
1674     ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
1675   DCHECK(!emitted_label_.is_bound());
1676   PerTypeEntryInfo& info = info_[type];
1677   const int entry_size = ConstantPoolEntry::size(type);
1678   bool merged = false;
1679 
1680   if (entry.sharing_ok()) {
1681     // Try to merge entries
1682     std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
1683     int end = static_cast<int>(info.shared_entries.size());
1684     for (int i = 0; i < end; i++, it++) {
1685       if ((entry_size == kPointerSize) ? entry.value() == it->value()
1686                                        : entry.value64() == it->value64()) {
1687         // Merge with found entry.
1688         entry.set_merged_index(i);
1689         merged = true;
1690         break;
1691       }
1692     }
1693   }
1694 
1695   // By definition, merged entries have regular access.
1696   DCHECK(!merged || entry.merged_index() < info.regular_count);
1697   ConstantPoolEntry::Access access =
1698       (merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
1699 
1700   // Enforce an upper bound on search time by limiting the search to
1701   // unique sharable entries which fit in the regular section.
1702   if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
1703     info.shared_entries.push_back(entry);
1704   } else {
1705     info.entries.push_back(entry);
1706   }
1707 
1708   // We're done if we found a match or have already triggered the
1709   // overflow state.
1710   if (merged || info.overflow()) return access;
1711 
1712   if (access == ConstantPoolEntry::REGULAR) {
1713     info.regular_count++;
1714   } else {
1715     info.overflow_start = static_cast<int>(info.entries.size()) - 1;
1716   }
1717 
1718   return access;
1719 }
1720 
1721 
EmitSharedEntries(Assembler * assm,ConstantPoolEntry::Type type)1722 void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
1723                                             ConstantPoolEntry::Type type) {
1724   PerTypeEntryInfo& info = info_[type];
1725   std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
1726   const int entry_size = ConstantPoolEntry::size(type);
1727   int base = emitted_label_.pos();
1728   DCHECK(base > 0);
1729   int shared_end = static_cast<int>(shared_entries.size());
1730   std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
1731   for (int i = 0; i < shared_end; i++, shared_it++) {
1732     int offset = assm->pc_offset() - base;
1733     shared_it->set_offset(offset);  // Save offset for merged entries.
1734     if (entry_size == kPointerSize) {
1735       assm->dp(shared_it->value());
1736     } else {
1737       assm->dq(shared_it->value64());
1738     }
1739     DCHECK(is_uintn(offset, info.regular_reach_bits));
1740 
1741     // Patch load sequence with correct offset.
1742     assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset,
1743                                              ConstantPoolEntry::REGULAR, type);
1744   }
1745 }
1746 
1747 
EmitGroup(Assembler * assm,ConstantPoolEntry::Access access,ConstantPoolEntry::Type type)1748 void ConstantPoolBuilder::EmitGroup(Assembler* assm,
1749                                     ConstantPoolEntry::Access access,
1750                                     ConstantPoolEntry::Type type) {
1751   PerTypeEntryInfo& info = info_[type];
1752   const bool overflow = info.overflow();
1753   std::vector<ConstantPoolEntry>& entries = info.entries;
1754   std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
1755   const int entry_size = ConstantPoolEntry::size(type);
1756   int base = emitted_label_.pos();
1757   DCHECK(base > 0);
1758   int begin;
1759   int end;
1760 
1761   if (access == ConstantPoolEntry::REGULAR) {
1762     // Emit any shared entries first
1763     EmitSharedEntries(assm, type);
1764   }
1765 
1766   if (access == ConstantPoolEntry::REGULAR) {
1767     begin = 0;
1768     end = overflow ? info.overflow_start : static_cast<int>(entries.size());
1769   } else {
1770     DCHECK(access == ConstantPoolEntry::OVERFLOWED);
1771     if (!overflow) return;
1772     begin = info.overflow_start;
1773     end = static_cast<int>(entries.size());
1774   }
1775 
1776   std::vector<ConstantPoolEntry>::iterator it = entries.begin();
1777   if (begin > 0) std::advance(it, begin);
1778   for (int i = begin; i < end; i++, it++) {
1779     // Update constant pool if necessary and get the entry's offset.
1780     int offset;
1781     ConstantPoolEntry::Access entry_access;
1782     if (!it->is_merged()) {
1783       // Emit new entry
1784       offset = assm->pc_offset() - base;
1785       entry_access = access;
1786       if (entry_size == kPointerSize) {
1787         assm->dp(it->value());
1788       } else {
1789         assm->dq(it->value64());
1790       }
1791     } else {
1792       // Retrieve offset from shared entry.
1793       offset = shared_entries[it->merged_index()].offset();
1794       entry_access = ConstantPoolEntry::REGULAR;
1795     }
1796 
1797     DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
1798            is_uintn(offset, info.regular_reach_bits));
1799 
1800     // Patch load sequence with correct offset.
1801     assm->PatchConstantPoolAccessInstruction(it->position(), offset,
1802                                              entry_access, type);
1803   }
1804 }
1805 
1806 
1807 // Emit and return position of pool.  Zero implies no constant pool.
Emit(Assembler * assm)1808 int ConstantPoolBuilder::Emit(Assembler* assm) {
1809   bool emitted = emitted_label_.is_bound();
1810   bool empty = IsEmpty();
1811 
1812   if (!emitted) {
1813     // Mark start of constant pool.  Align if necessary.
1814     if (!empty) assm->DataAlign(kDoubleSize);
1815     assm->bind(&emitted_label_);
1816     if (!empty) {
1817       // Emit in groups based on access and type.
1818       // Emit doubles first for alignment purposes.
1819       EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
1820       EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
1821       if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
1822         assm->DataAlign(kDoubleSize);
1823         EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
1824                   ConstantPoolEntry::DOUBLE);
1825       }
1826       if (info_[ConstantPoolEntry::INTPTR].overflow()) {
1827         EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
1828                   ConstantPoolEntry::INTPTR);
1829       }
1830     }
1831   }
1832 
1833   return !empty ? emitted_label_.pos() : 0;
1834 }
1835 
1836 
1837 // Platform specific but identical code for all the platforms.
1838 
1839 
RecordDeoptReason(const int reason,const SourcePosition position)1840 void Assembler::RecordDeoptReason(const int reason,
1841                                   const SourcePosition position) {
1842   if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) {
1843     EnsureSpace ensure_space(this);
1844     int raw_position = position.IsUnknown() ? 0 : position.raw();
1845     RecordRelocInfo(RelocInfo::POSITION, raw_position);
1846     RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
1847   }
1848 }
1849 
1850 
RecordComment(const char * msg)1851 void Assembler::RecordComment(const char* msg) {
1852   if (FLAG_code_comments) {
1853     EnsureSpace ensure_space(this);
1854     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1855   }
1856 }
1857 
1858 
RecordGeneratorContinuation()1859 void Assembler::RecordGeneratorContinuation() {
1860   EnsureSpace ensure_space(this);
1861   RecordRelocInfo(RelocInfo::GENERATOR_CONTINUATION);
1862 }
1863 
1864 
RecordDebugBreakSlot(RelocInfo::Mode mode)1865 void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode) {
1866   EnsureSpace ensure_space(this);
1867   DCHECK(RelocInfo::IsDebugBreakSlot(mode));
1868   RecordRelocInfo(mode);
1869 }
1870 
1871 
DataAlign(int m)1872 void Assembler::DataAlign(int m) {
1873   DCHECK(m >= 2 && base::bits::IsPowerOfTwo32(m));
1874   while ((pc_offset() & (m - 1)) != 0) {
1875     db(0);
1876   }
1877 }
1878 }  // namespace internal
1879 }  // namespace v8
1880