1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_X87
6 
7 #include "src/crankshaft/x87/lithium-codegen-x87.h"
8 
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
13 #include "src/crankshaft/hydrogen-osr.h"
14 #include "src/deoptimizer.h"
15 #include "src/ic/ic.h"
16 #include "src/ic/stub-cache.h"
17 #include "src/x87/frames-x87.h"
18 
19 namespace v8 {
20 namespace internal {
21 
22 // When invoking builtins, we need to record the safepoint in the middle of
23 // the invoke instruction sequence generated by the macro assembler.
24 class SafepointGenerator final : public CallWrapper {
25  public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)26   SafepointGenerator(LCodeGen* codegen,
27                      LPointerMap* pointers,
28                      Safepoint::DeoptMode mode)
29       : codegen_(codegen),
30         pointers_(pointers),
31         deopt_mode_(mode) {}
~SafepointGenerator()32   virtual ~SafepointGenerator() {}
33 
BeforeCall(int call_size) const34   void BeforeCall(int call_size) const override {}
35 
AfterCall() const36   void AfterCall() const override {
37     codegen_->RecordSafepoint(pointers_, deopt_mode_);
38   }
39 
40  private:
41   LCodeGen* codegen_;
42   LPointerMap* pointers_;
43   Safepoint::DeoptMode deopt_mode_;
44 };
45 
46 
47 #define __ masm()->
48 
GenerateCode()49 bool LCodeGen::GenerateCode() {
50   LPhase phase("Z_Code generation", chunk());
51   DCHECK(is_unused());
52   status_ = GENERATING;
53 
54   // Open a frame scope to indicate that there is a frame on the stack.  The
55   // MANUAL indicates that the scope shouldn't actually generate code to set up
56   // the frame (that is done in GeneratePrologue).
57   FrameScope frame_scope(masm_, StackFrame::MANUAL);
58 
59   return GeneratePrologue() &&
60       GenerateBody() &&
61       GenerateDeferredCode() &&
62       GenerateJumpTable() &&
63       GenerateSafepointTable();
64 }
65 
66 
FinishCode(Handle<Code> code)67 void LCodeGen::FinishCode(Handle<Code> code) {
68   DCHECK(is_done());
69   code->set_stack_slots(GetTotalFrameSlotCount());
70   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
71   PopulateDeoptimizationData(code);
72   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
73     Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
74   }
75 }
76 
77 
78 #ifdef _MSC_VER
MakeSureStackPagesMapped(int offset)79 void LCodeGen::MakeSureStackPagesMapped(int offset) {
80   const int kPageSize = 4 * KB;
81   for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
82     __ mov(Operand(esp, offset), eax);
83   }
84 }
85 #endif
86 
87 
GeneratePrologue()88 bool LCodeGen::GeneratePrologue() {
89   DCHECK(is_generating());
90 
91   if (info()->IsOptimizing()) {
92     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
93   }
94 
95   info()->set_prologue_offset(masm_->pc_offset());
96   if (NeedsEagerFrame()) {
97     DCHECK(!frame_is_built_);
98     frame_is_built_ = true;
99     if (info()->IsStub()) {
100       __ StubPrologue(StackFrame::STUB);
101     } else {
102       __ Prologue(info()->GeneratePreagedPrologue());
103     }
104   }
105 
106   // Reserve space for the stack slots needed by the code.
107   int slots = GetStackSlotCount();
108   DCHECK(slots != 0 || !info()->IsOptimizing());
109   if (slots > 0) {
110     __ sub(Operand(esp), Immediate(slots * kPointerSize));
111 #ifdef _MSC_VER
112     MakeSureStackPagesMapped(slots * kPointerSize);
113 #endif
114     if (FLAG_debug_code) {
115       __ push(eax);
116       __ mov(Operand(eax), Immediate(slots));
117       Label loop;
118       __ bind(&loop);
119       __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue));
120       __ dec(eax);
121       __ j(not_zero, &loop);
122       __ pop(eax);
123     }
124   }
125 
126   // Initailize FPU state.
127   __ fninit();
128 
129   return !is_aborted();
130 }
131 
132 
DoPrologue(LPrologue * instr)133 void LCodeGen::DoPrologue(LPrologue* instr) {
134   Comment(";;; Prologue begin");
135 
136   // Possibly allocate a local context.
137   if (info_->scope()->NeedsContext()) {
138     Comment(";;; Allocate local context");
139     bool need_write_barrier = true;
140     // Argument to NewContext is the function, which is still in edi.
141     int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
142     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
143     if (info()->scope()->is_script_scope()) {
144       __ push(edi);
145       __ Push(info()->scope()->scope_info());
146       __ CallRuntime(Runtime::kNewScriptContext);
147       deopt_mode = Safepoint::kLazyDeopt;
148     } else {
149       if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
150         FastNewFunctionContextStub stub(isolate());
151         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
152                Immediate(slots));
153         __ CallStub(&stub);
154         // Result of FastNewFunctionContextStub is always in new space.
155         need_write_barrier = false;
156       } else {
157         __ push(edi);
158         __ CallRuntime(Runtime::kNewFunctionContext);
159       }
160     }
161     RecordSafepoint(deopt_mode);
162 
163     // Context is returned in eax.  It replaces the context passed to us.
164     // It's saved in the stack and kept live in esi.
165     __ mov(esi, eax);
166     __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
167 
168     // Copy parameters into context if necessary.
169     int num_parameters = info()->scope()->num_parameters();
170     int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
171     for (int i = first_parameter; i < num_parameters; i++) {
172       Variable* var = (i == -1) ? info()->scope()->receiver()
173                                 : info()->scope()->parameter(i);
174       if (var->IsContextSlot()) {
175         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
176             (num_parameters - 1 - i) * kPointerSize;
177         // Load parameter from stack.
178         __ mov(eax, Operand(ebp, parameter_offset));
179         // Store it in the context.
180         int context_offset = Context::SlotOffset(var->index());
181         __ mov(Operand(esi, context_offset), eax);
182         // Update the write barrier. This clobbers eax and ebx.
183         if (need_write_barrier) {
184           __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
185                                     kDontSaveFPRegs);
186         } else if (FLAG_debug_code) {
187           Label done;
188           __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
189           __ Abort(kExpectedNewSpaceObject);
190           __ bind(&done);
191         }
192       }
193     }
194     Comment(";;; End allocate local context");
195   }
196 
197   Comment(";;; Prologue end");
198 }
199 
200 
GenerateOsrPrologue()201 void LCodeGen::GenerateOsrPrologue() {
202   // Generate the OSR entry prologue at the first unknown OSR value, or if there
203   // are none, at the OSR entrypoint instruction.
204   if (osr_pc_offset_ >= 0) return;
205 
206   osr_pc_offset_ = masm()->pc_offset();
207 
208   // Interpreter is the first tier compiler now. It will run the code generated
209   // by TurboFan compiler which will always put "1" on x87 FPU stack.
210   // This behavior will affect crankshaft's x87 FPU stack depth check under
211   // debug mode.
212   // Need to reset the FPU stack here for this scenario.
213   __ fninit();
214 
215   // Adjust the frame size, subsuming the unoptimized frame into the
216   // optimized frame.
217   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
218   DCHECK(slots >= 0);
219   __ sub(esp, Immediate(slots * kPointerSize));
220 }
221 
222 
GenerateBodyInstructionPre(LInstruction * instr)223 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
224   if (instr->IsCall()) {
225     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
226   }
227   if (!instr->IsLazyBailout() && !instr->IsGap()) {
228     safepoints_.BumpLastLazySafepointIndex();
229   }
230   FlushX87StackIfNecessary(instr);
231 }
232 
233 
GenerateBodyInstructionPost(LInstruction * instr)234 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
235   // When return from function call, FPU should be initialized again.
236   if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) {
237     bool double_result = instr->HasDoubleRegisterResult();
238     if (double_result) {
239       __ lea(esp, Operand(esp, -kDoubleSize));
240       __ fstp_d(Operand(esp, 0));
241     }
242     __ fninit();
243     if (double_result) {
244       __ fld_d(Operand(esp, 0));
245       __ lea(esp, Operand(esp, kDoubleSize));
246     }
247   }
248   if (instr->IsGoto()) {
249     x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this);
250   } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
251              !instr->IsGap() && !instr->IsReturn()) {
252     if (instr->ClobbersDoubleRegisters(isolate())) {
253       if (instr->HasDoubleRegisterResult()) {
254         DCHECK_EQ(1, x87_stack_.depth());
255       } else {
256         DCHECK_EQ(0, x87_stack_.depth());
257       }
258     }
259     __ VerifyX87StackDepth(x87_stack_.depth());
260   }
261 }
262 
263 
GenerateJumpTable()264 bool LCodeGen::GenerateJumpTable() {
265   if (!jump_table_.length()) return !is_aborted();
266 
267   Label needs_frame;
268   Comment(";;; -------------------- Jump table --------------------");
269 
270   for (int i = 0; i < jump_table_.length(); i++) {
271     Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
272     __ bind(&table_entry->label);
273     Address entry = table_entry->address;
274     DeoptComment(table_entry->deopt_info);
275     if (table_entry->needs_frame) {
276       DCHECK(!info()->saves_caller_doubles());
277       __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
278       __ call(&needs_frame);
279     } else {
280       __ call(entry, RelocInfo::RUNTIME_ENTRY);
281     }
282   }
283   if (needs_frame.is_linked()) {
284     __ bind(&needs_frame);
285     /* stack layout
286        3: entry address
287        2: return address  <-- esp
288        1: garbage
289        0: garbage
290     */
291     __ push(MemOperand(esp, 0));                 // Copy return address.
292     __ push(MemOperand(esp, 2 * kPointerSize));  // Copy entry address.
293 
294     /* stack layout
295        4: entry address
296        3: return address
297        1: return address
298        0: entry address  <-- esp
299     */
300     __ mov(MemOperand(esp, 3 * kPointerSize), ebp);  // Save ebp.
301     // Fill ebp with the right stack frame address.
302     __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
303 
304     // This variant of deopt can only be used with stubs. Since we don't
305     // have a function pointer to install in the stack frame that we're
306     // building, install a special marker there instead.
307     DCHECK(info()->IsStub());
308     __ mov(MemOperand(esp, 2 * kPointerSize),
309            Immediate(Smi::FromInt(StackFrame::STUB)));
310 
311     /* stack layout
312        3: old ebp
313        2: stub marker
314        1: return address
315        0: entry address  <-- esp
316     */
317     __ ret(0);  // Call the continuation without clobbering registers.
318   }
319   return !is_aborted();
320 }
321 
322 
GenerateDeferredCode()323 bool LCodeGen::GenerateDeferredCode() {
324   DCHECK(is_generating());
325   if (deferred_.length() > 0) {
326     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
327       LDeferredCode* code = deferred_[i];
328       X87Stack copy(code->x87_stack());
329       x87_stack_ = copy;
330 
331       HValue* value =
332           instructions_->at(code->instruction_index())->hydrogen_value();
333       RecordAndWritePosition(value->position());
334 
335       Comment(";;; <@%d,#%d> "
336               "-------------------- Deferred %s --------------------",
337               code->instruction_index(),
338               code->instr()->hydrogen_value()->id(),
339               code->instr()->Mnemonic());
340       __ bind(code->entry());
341       if (NeedsDeferredFrame()) {
342         Comment(";;; Build frame");
343         DCHECK(!frame_is_built_);
344         DCHECK(info()->IsStub());
345         frame_is_built_ = true;
346         // Build the frame in such a way that esi isn't trashed.
347         __ push(ebp);  // Caller's frame pointer.
348         __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
349         __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
350         Comment(";;; Deferred code");
351       }
352       code->Generate();
353       if (NeedsDeferredFrame()) {
354         __ bind(code->done());
355         Comment(";;; Destroy frame");
356         DCHECK(frame_is_built_);
357         frame_is_built_ = false;
358         __ mov(esp, ebp);
359         __ pop(ebp);
360       }
361       __ jmp(code->exit());
362     }
363   }
364 
365   // Deferred code is the last part of the instruction sequence. Mark
366   // the generated code as done unless we bailed out.
367   if (!is_aborted()) status_ = DONE;
368   return !is_aborted();
369 }
370 
371 
GenerateSafepointTable()372 bool LCodeGen::GenerateSafepointTable() {
373   DCHECK(is_done());
374   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
375     // For lazy deoptimization we need space to patch a call after every call.
376     // Ensure there is always space for such patching, even if the code ends
377     // in a call.
378     int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
379     while (masm()->pc_offset() < target_offset) {
380       masm()->nop();
381     }
382   }
383   safepoints_.Emit(masm(), GetTotalFrameSlotCount());
384   return !is_aborted();
385 }
386 
387 
ToRegister(int code) const388 Register LCodeGen::ToRegister(int code) const {
389   return Register::from_code(code);
390 }
391 
392 
ToX87Register(int code) const393 X87Register LCodeGen::ToX87Register(int code) const {
394   return X87Register::from_code(code);
395 }
396 
397 
X87LoadForUsage(X87Register reg)398 void LCodeGen::X87LoadForUsage(X87Register reg) {
399   DCHECK(x87_stack_.Contains(reg));
400   x87_stack_.Fxch(reg);
401   x87_stack_.pop();
402 }
403 
404 
X87LoadForUsage(X87Register reg1,X87Register reg2)405 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
406   DCHECK(x87_stack_.Contains(reg1));
407   DCHECK(x87_stack_.Contains(reg2));
408   if (reg1.is(reg2) && x87_stack_.depth() == 1) {
409     __ fld(x87_stack_.st(reg1));
410     x87_stack_.push(reg1);
411     x87_stack_.pop();
412     x87_stack_.pop();
413   } else {
414     x87_stack_.Fxch(reg1, 1);
415     x87_stack_.Fxch(reg2);
416     x87_stack_.pop();
417     x87_stack_.pop();
418   }
419 }
420 
421 
GetLayout()422 int LCodeGen::X87Stack::GetLayout() {
423   int layout = stack_depth_;
424   for (int i = 0; i < stack_depth_; i++) {
425     layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3));
426   }
427 
428   return layout;
429 }
430 
431 
Fxch(X87Register reg,int other_slot)432 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
433   DCHECK(is_mutable_);
434   DCHECK(Contains(reg) && stack_depth_ > other_slot);
435   int i  = ArrayIndex(reg);
436   int st = st2idx(i);
437   if (st != other_slot) {
438     int other_i = st2idx(other_slot);
439     X87Register other = stack_[other_i];
440     stack_[other_i]   = reg;
441     stack_[i]         = other;
442     if (st == 0) {
443       __ fxch(other_slot);
444     } else if (other_slot == 0) {
445       __ fxch(st);
446     } else {
447       __ fxch(st);
448       __ fxch(other_slot);
449       __ fxch(st);
450     }
451   }
452 }
453 
454 
st2idx(int pos)455 int LCodeGen::X87Stack::st2idx(int pos) {
456   return stack_depth_ - pos - 1;
457 }
458 
459 
ArrayIndex(X87Register reg)460 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
461   for (int i = 0; i < stack_depth_; i++) {
462     if (stack_[i].is(reg)) return i;
463   }
464   UNREACHABLE();
465   return -1;
466 }
467 
468 
Contains(X87Register reg)469 bool LCodeGen::X87Stack::Contains(X87Register reg) {
470   for (int i = 0; i < stack_depth_; i++) {
471     if (stack_[i].is(reg)) return true;
472   }
473   return false;
474 }
475 
476 
Free(X87Register reg)477 void LCodeGen::X87Stack::Free(X87Register reg) {
478   DCHECK(is_mutable_);
479   DCHECK(Contains(reg));
480   int i  = ArrayIndex(reg);
481   int st = st2idx(i);
482   if (st > 0) {
483     // keep track of how fstp(i) changes the order of elements
484     int tos_i = st2idx(0);
485     stack_[i] = stack_[tos_i];
486   }
487   pop();
488   __ fstp(st);
489 }
490 
491 
X87Mov(X87Register dst,Operand src,X87OperandType opts)492 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
493   if (x87_stack_.Contains(dst)) {
494     x87_stack_.Fxch(dst);
495     __ fstp(0);
496   } else {
497     x87_stack_.push(dst);
498   }
499   X87Fld(src, opts);
500 }
501 
502 
X87Mov(X87Register dst,X87Register src,X87OperandType opts)503 void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) {
504   if (x87_stack_.Contains(dst)) {
505     x87_stack_.Fxch(dst);
506     __ fstp(0);
507     x87_stack_.pop();
508     // Push ST(i) onto the FPU register stack
509     __ fld(x87_stack_.st(src));
510     x87_stack_.push(dst);
511   } else {
512     // Push ST(i) onto the FPU register stack
513     __ fld(x87_stack_.st(src));
514     x87_stack_.push(dst);
515   }
516 }
517 
518 
X87Fld(Operand src,X87OperandType opts)519 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
520   DCHECK(!src.is_reg_only());
521   switch (opts) {
522     case kX87DoubleOperand:
523       __ fld_d(src);
524       break;
525     case kX87FloatOperand:
526       __ fld_s(src);
527       break;
528     case kX87IntOperand:
529       __ fild_s(src);
530       break;
531     default:
532       UNREACHABLE();
533   }
534 }
535 
536 
X87Mov(Operand dst,X87Register src,X87OperandType opts)537 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
538   DCHECK(!dst.is_reg_only());
539   x87_stack_.Fxch(src);
540   switch (opts) {
541     case kX87DoubleOperand:
542       __ fst_d(dst);
543       break;
544     case kX87FloatOperand:
545       __ fst_s(dst);
546       break;
547     case kX87IntOperand:
548       __ fist_s(dst);
549       break;
550     default:
551       UNREACHABLE();
552   }
553 }
554 
555 
PrepareToWrite(X87Register reg)556 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
557   DCHECK(is_mutable_);
558   if (Contains(reg)) {
559     Free(reg);
560   }
561   // Mark this register as the next register to write to
562   stack_[stack_depth_] = reg;
563 }
564 
565 
CommitWrite(X87Register reg)566 void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
567   DCHECK(is_mutable_);
568   // Assert the reg is prepared to write, but not on the virtual stack yet
569   DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) &&
570          stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
571   stack_depth_++;
572 }
573 
574 
X87PrepareBinaryOp(X87Register left,X87Register right,X87Register result)575 void LCodeGen::X87PrepareBinaryOp(
576     X87Register left, X87Register right, X87Register result) {
577   // You need to use DefineSameAsFirst for x87 instructions
578   DCHECK(result.is(left));
579   x87_stack_.Fxch(right, 1);
580   x87_stack_.Fxch(left);
581 }
582 
583 
FlushIfNecessary(LInstruction * instr,LCodeGen * cgen)584 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
585   if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
586     bool double_inputs = instr->HasDoubleRegisterInput();
587 
588     // Flush stack from tos down, since FreeX87() will mess with tos
589     for (int i = stack_depth_-1; i >= 0; i--) {
590       X87Register reg = stack_[i];
591       // Skip registers which contain the inputs for the next instruction
592       // when flushing the stack
593       if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
594         continue;
595       }
596       Free(reg);
597       if (i < stack_depth_-1) i++;
598     }
599   }
600   if (instr->IsReturn()) {
601     while (stack_depth_ > 0) {
602       __ fstp(0);
603       stack_depth_--;
604     }
605     if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
606   }
607 }
608 
609 
LeavingBlock(int current_block_id,LGoto * goto_instr,LCodeGen * cgen)610 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr,
611                                       LCodeGen* cgen) {
612   // For going to a joined block, an explicit LClobberDoubles is inserted before
613   // LGoto. Because all used x87 registers are spilled to stack slots. The
614   // ResolvePhis phase of register allocator could guarantee the two input's x87
615   // stacks have the same layout. So don't check stack_depth_ <= 1 here.
616   int goto_block_id = goto_instr->block_id();
617   if (current_block_id + 1 != goto_block_id) {
618     // If we have a value on the x87 stack on leaving a block, it must be a
619     // phi input. If the next block we compile is not the join block, we have
620     // to discard the stack state.
621     // Before discarding the stack state, we need to save it if the "goto block"
622     // has unreachable last predecessor when FLAG_unreachable_code_elimination.
623     if (FLAG_unreachable_code_elimination) {
624       int length = goto_instr->block()->predecessors()->length();
625       bool has_unreachable_last_predecessor = false;
626       for (int i = 0; i < length; i++) {
627         HBasicBlock* block = goto_instr->block()->predecessors()->at(i);
628         if (block->IsUnreachable() &&
629             (block->block_id() + 1) == goto_block_id) {
630           has_unreachable_last_predecessor = true;
631         }
632       }
633       if (has_unreachable_last_predecessor) {
634         if (cgen->x87_stack_map_.find(goto_block_id) ==
635             cgen->x87_stack_map_.end()) {
636           X87Stack* stack = new (cgen->zone()) X87Stack(*this);
637           cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack));
638         }
639       }
640     }
641 
642     // Discard the stack state.
643     stack_depth_ = 0;
644   }
645 }
646 
647 
EmitFlushX87ForDeopt()648 void LCodeGen::EmitFlushX87ForDeopt() {
649   // The deoptimizer does not support X87 Registers. But as long as we
650   // deopt from a stub its not a problem, since we will re-materialize the
651   // original stub inputs, which can't be double registers.
652   // DCHECK(info()->IsStub());
653   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
654     __ pushfd();
655     __ VerifyX87StackDepth(x87_stack_.depth());
656     __ popfd();
657   }
658 
659   // Flush X87 stack in the deoptimizer entry.
660 }
661 
662 
ToRegister(LOperand * op) const663 Register LCodeGen::ToRegister(LOperand* op) const {
664   DCHECK(op->IsRegister());
665   return ToRegister(op->index());
666 }
667 
668 
ToX87Register(LOperand * op) const669 X87Register LCodeGen::ToX87Register(LOperand* op) const {
670   DCHECK(op->IsDoubleRegister());
671   return ToX87Register(op->index());
672 }
673 
674 
ToInteger32(LConstantOperand * op) const675 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
676   return ToRepresentation(op, Representation::Integer32());
677 }
678 
679 
ToRepresentation(LConstantOperand * op,const Representation & r) const680 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
681                                    const Representation& r) const {
682   HConstant* constant = chunk_->LookupConstant(op);
683   if (r.IsExternal()) {
684     return reinterpret_cast<int32_t>(
685         constant->ExternalReferenceValue().address());
686   }
687   int32_t value = constant->Integer32Value();
688   if (r.IsInteger32()) return value;
689   DCHECK(r.IsSmiOrTagged());
690   return reinterpret_cast<int32_t>(Smi::FromInt(value));
691 }
692 
693 
ToHandle(LConstantOperand * op) const694 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
695   HConstant* constant = chunk_->LookupConstant(op);
696   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
697   return constant->handle(isolate());
698 }
699 
700 
ToDouble(LConstantOperand * op) const701 double LCodeGen::ToDouble(LConstantOperand* op) const {
702   HConstant* constant = chunk_->LookupConstant(op);
703   DCHECK(constant->HasDoubleValue());
704   return constant->DoubleValue();
705 }
706 
707 
ToExternalReference(LConstantOperand * op) const708 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
709   HConstant* constant = chunk_->LookupConstant(op);
710   DCHECK(constant->HasExternalReferenceValue());
711   return constant->ExternalReferenceValue();
712 }
713 
714 
IsInteger32(LConstantOperand * op) const715 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
716   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
717 }
718 
719 
IsSmi(LConstantOperand * op) const720 bool LCodeGen::IsSmi(LConstantOperand* op) const {
721   return chunk_->LookupLiteralRepresentation(op).IsSmi();
722 }
723 
724 
ArgumentsOffsetWithoutFrame(int index)725 static int ArgumentsOffsetWithoutFrame(int index) {
726   DCHECK(index < 0);
727   return -(index + 1) * kPointerSize + kPCOnStackSize;
728 }
729 
730 
ToOperand(LOperand * op) const731 Operand LCodeGen::ToOperand(LOperand* op) const {
732   if (op->IsRegister()) return Operand(ToRegister(op));
733   DCHECK(!op->IsDoubleRegister());
734   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
735   if (NeedsEagerFrame()) {
736     return Operand(ebp, FrameSlotToFPOffset(op->index()));
737   } else {
738     // Retrieve parameter without eager stack-frame relative to the
739     // stack-pointer.
740     return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
741   }
742 }
743 
744 
HighOperand(LOperand * op)745 Operand LCodeGen::HighOperand(LOperand* op) {
746   DCHECK(op->IsDoubleStackSlot());
747   if (NeedsEagerFrame()) {
748     return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize);
749   } else {
750     // Retrieve parameter without eager stack-frame relative to the
751     // stack-pointer.
752     return Operand(
753         esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
754   }
755 }
756 
757 
WriteTranslation(LEnvironment * environment,Translation * translation)758 void LCodeGen::WriteTranslation(LEnvironment* environment,
759                                 Translation* translation) {
760   if (environment == NULL) return;
761 
762   // The translation includes one command per value in the environment.
763   int translation_size = environment->translation_size();
764 
765   WriteTranslation(environment->outer(), translation);
766   WriteTranslationFrame(environment, translation);
767 
768   int object_index = 0;
769   int dematerialized_index = 0;
770   for (int i = 0; i < translation_size; ++i) {
771     LOperand* value = environment->values()->at(i);
772     AddToTranslation(environment,
773                      translation,
774                      value,
775                      environment->HasTaggedValueAt(i),
776                      environment->HasUint32ValueAt(i),
777                      &object_index,
778                      &dematerialized_index);
779   }
780 }
781 
782 
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)783 void LCodeGen::AddToTranslation(LEnvironment* environment,
784                                 Translation* translation,
785                                 LOperand* op,
786                                 bool is_tagged,
787                                 bool is_uint32,
788                                 int* object_index_pointer,
789                                 int* dematerialized_index_pointer) {
790   if (op == LEnvironment::materialization_marker()) {
791     int object_index = (*object_index_pointer)++;
792     if (environment->ObjectIsDuplicateAt(object_index)) {
793       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
794       translation->DuplicateObject(dupe_of);
795       return;
796     }
797     int object_length = environment->ObjectLengthAt(object_index);
798     if (environment->ObjectIsArgumentsAt(object_index)) {
799       translation->BeginArgumentsObject(object_length);
800     } else {
801       translation->BeginCapturedObject(object_length);
802     }
803     int dematerialized_index = *dematerialized_index_pointer;
804     int env_offset = environment->translation_size() + dematerialized_index;
805     *dematerialized_index_pointer += object_length;
806     for (int i = 0; i < object_length; ++i) {
807       LOperand* value = environment->values()->at(env_offset + i);
808       AddToTranslation(environment,
809                        translation,
810                        value,
811                        environment->HasTaggedValueAt(env_offset + i),
812                        environment->HasUint32ValueAt(env_offset + i),
813                        object_index_pointer,
814                        dematerialized_index_pointer);
815     }
816     return;
817   }
818 
819   if (op->IsStackSlot()) {
820     int index = op->index();
821     if (is_tagged) {
822       translation->StoreStackSlot(index);
823     } else if (is_uint32) {
824       translation->StoreUint32StackSlot(index);
825     } else {
826       translation->StoreInt32StackSlot(index);
827     }
828   } else if (op->IsDoubleStackSlot()) {
829     int index = op->index();
830     translation->StoreDoubleStackSlot(index);
831   } else if (op->IsRegister()) {
832     Register reg = ToRegister(op);
833     if (is_tagged) {
834       translation->StoreRegister(reg);
835     } else if (is_uint32) {
836       translation->StoreUint32Register(reg);
837     } else {
838       translation->StoreInt32Register(reg);
839     }
840   } else if (op->IsDoubleRegister()) {
841     X87Register reg = ToX87Register(op);
842     translation->StoreDoubleRegister(reg);
843   } else if (op->IsConstantOperand()) {
844     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
845     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
846     translation->StoreLiteral(src_index);
847   } else {
848     UNREACHABLE();
849   }
850 }
851 
852 
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode)853 void LCodeGen::CallCodeGeneric(Handle<Code> code,
854                                RelocInfo::Mode mode,
855                                LInstruction* instr,
856                                SafepointMode safepoint_mode) {
857   DCHECK(instr != NULL);
858   __ call(code, mode);
859   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
860 
861   // Signal that we don't inline smi code before these stubs in the
862   // optimizing code generator.
863   if (code->kind() == Code::BINARY_OP_IC ||
864       code->kind() == Code::COMPARE_IC) {
865     __ nop();
866   }
867 }
868 
869 
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)870 void LCodeGen::CallCode(Handle<Code> code,
871                         RelocInfo::Mode mode,
872                         LInstruction* instr) {
873   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
874 }
875 
876 
CallRuntime(const Runtime::Function * fun,int argc,LInstruction * instr,SaveFPRegsMode save_doubles)877 void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc,
878                            LInstruction* instr, SaveFPRegsMode save_doubles) {
879   DCHECK(instr != NULL);
880   DCHECK(instr->HasPointerMap());
881 
882   __ CallRuntime(fun, argc, save_doubles);
883 
884   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
885 
886   DCHECK(info()->is_calling());
887 }
888 
889 
LoadContextFromDeferred(LOperand * context)890 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
891   if (context->IsRegister()) {
892     if (!ToRegister(context).is(esi)) {
893       __ mov(esi, ToRegister(context));
894     }
895   } else if (context->IsStackSlot()) {
896     __ mov(esi, ToOperand(context));
897   } else if (context->IsConstantOperand()) {
898     HConstant* constant =
899         chunk_->LookupConstant(LConstantOperand::cast(context));
900     __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
901   } else {
902     UNREACHABLE();
903   }
904 }
905 
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)906 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
907                                        int argc,
908                                        LInstruction* instr,
909                                        LOperand* context) {
910   LoadContextFromDeferred(context);
911 
912   __ CallRuntimeSaveDoubles(id);
913   RecordSafepointWithRegisters(
914       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
915 
916   DCHECK(info()->is_calling());
917 }
918 
919 
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)920 void LCodeGen::RegisterEnvironmentForDeoptimization(
921     LEnvironment* environment, Safepoint::DeoptMode mode) {
922   environment->set_has_been_used();
923   if (!environment->HasBeenRegistered()) {
924     // Physical stack frame layout:
925     // -x ............. -4  0 ..................................... y
926     // [incoming arguments] [spill slots] [pushed outgoing arguments]
927 
928     // Layout of the environment:
929     // 0 ..................................................... size-1
930     // [parameters] [locals] [expression stack including arguments]
931 
932     // Layout of the translation:
933     // 0 ........................................................ size - 1 + 4
934     // [expression stack including arguments] [locals] [4 words] [parameters]
935     // |>------------  translation_size ------------<|
936 
937     int frame_count = 0;
938     int jsframe_count = 0;
939     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
940       ++frame_count;
941       if (e->frame_type() == JS_FUNCTION) {
942         ++jsframe_count;
943       }
944     }
945     Translation translation(&translations_, frame_count, jsframe_count, zone());
946     WriteTranslation(environment, &translation);
947     int deoptimization_index = deoptimizations_.length();
948     int pc_offset = masm()->pc_offset();
949     environment->Register(deoptimization_index,
950                           translation.index(),
951                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
952     deoptimizations_.Add(environment, zone());
953   }
954 }
955 
DeoptimizeIf(Condition cc,LInstruction * instr,DeoptimizeReason deopt_reason,Deoptimizer::BailoutType bailout_type)956 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
957                             DeoptimizeReason deopt_reason,
958                             Deoptimizer::BailoutType bailout_type) {
959   LEnvironment* environment = instr->environment();
960   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
961   DCHECK(environment->HasBeenRegistered());
962   int id = environment->deoptimization_index();
963   Address entry =
964       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
965   if (entry == NULL) {
966     Abort(kBailoutWasNotPrepared);
967     return;
968   }
969 
970   if (DeoptEveryNTimes()) {
971     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
972     Label no_deopt;
973     __ pushfd();
974     __ push(eax);
975     __ mov(eax, Operand::StaticVariable(count));
976     __ sub(eax, Immediate(1));
977     __ j(not_zero, &no_deopt, Label::kNear);
978     if (FLAG_trap_on_deopt) __ int3();
979     __ mov(eax, Immediate(FLAG_deopt_every_n_times));
980     __ mov(Operand::StaticVariable(count), eax);
981     __ pop(eax);
982     __ popfd();
983     DCHECK(frame_is_built_);
984     // Put the x87 stack layout in TOS.
985     if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
986     __ push(Immediate(x87_stack_.GetLayout()));
987     __ fild_s(MemOperand(esp, 0));
988     // Don't touch eflags.
989     __ lea(esp, Operand(esp, kPointerSize));
990     __ call(entry, RelocInfo::RUNTIME_ENTRY);
991     __ bind(&no_deopt);
992     __ mov(Operand::StaticVariable(count), eax);
993     __ pop(eax);
994     __ popfd();
995   }
996 
997   // Put the x87 stack layout in TOS, so that we can save x87 fp registers in
998   // the correct location.
999   {
1000     Label done;
1001     if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1002     if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
1003 
1004     int x87_stack_layout = x87_stack_.GetLayout();
1005     __ push(Immediate(x87_stack_layout));
1006     __ fild_s(MemOperand(esp, 0));
1007     // Don't touch eflags.
1008     __ lea(esp, Operand(esp, kPointerSize));
1009     __ bind(&done);
1010   }
1011 
1012   if (info()->ShouldTrapOnDeopt()) {
1013     Label done;
1014     if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1015     __ int3();
1016     __ bind(&done);
1017   }
1018 
1019   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
1020 
1021   DCHECK(info()->IsStub() || frame_is_built_);
1022   if (cc == no_condition && frame_is_built_) {
1023     DeoptComment(deopt_info);
1024     __ call(entry, RelocInfo::RUNTIME_ENTRY);
1025   } else {
1026     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
1027                                             !frame_is_built_);
1028     // We often have several deopts to the same entry, reuse the last
1029     // jump entry if this is the case.
1030     if (FLAG_trace_deopt || isolate()->is_profiling() ||
1031         jump_table_.is_empty() ||
1032         !table_entry.IsEquivalentTo(jump_table_.last())) {
1033       jump_table_.Add(table_entry, zone());
1034     }
1035     if (cc == no_condition) {
1036       __ jmp(&jump_table_.last().label);
1037     } else {
1038       __ j(cc, &jump_table_.last().label);
1039     }
1040   }
1041 }
1042 
DeoptimizeIf(Condition cc,LInstruction * instr,DeoptimizeReason deopt_reason)1043 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
1044                             DeoptimizeReason deopt_reason) {
1045   Deoptimizer::BailoutType bailout_type = info()->IsStub()
1046       ? Deoptimizer::LAZY
1047       : Deoptimizer::EAGER;
1048   DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
1049 }
1050 
1051 
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode)1052 void LCodeGen::RecordSafepointWithLazyDeopt(
1053     LInstruction* instr, SafepointMode safepoint_mode) {
1054   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1055     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1056   } else {
1057     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1058     RecordSafepointWithRegisters(
1059         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1060   }
1061 }
1062 
1063 
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)1064 void LCodeGen::RecordSafepoint(
1065     LPointerMap* pointers,
1066     Safepoint::Kind kind,
1067     int arguments,
1068     Safepoint::DeoptMode deopt_mode) {
1069   DCHECK(kind == expected_safepoint_kind_);
1070   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1071   Safepoint safepoint =
1072       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1073   for (int i = 0; i < operands->length(); i++) {
1074     LOperand* pointer = operands->at(i);
1075     if (pointer->IsStackSlot()) {
1076       safepoint.DefinePointerSlot(pointer->index(), zone());
1077     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1078       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1079     }
1080   }
1081 }
1082 
1083 
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode mode)1084 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1085                                Safepoint::DeoptMode mode) {
1086   RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1087 }
1088 
1089 
RecordSafepoint(Safepoint::DeoptMode mode)1090 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1091   LPointerMap empty_pointers(zone());
1092   RecordSafepoint(&empty_pointers, mode);
1093 }
1094 
1095 
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode mode)1096 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1097                                             int arguments,
1098                                             Safepoint::DeoptMode mode) {
1099   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1100 }
1101 
1102 
LabelType(LLabel * label)1103 static const char* LabelType(LLabel* label) {
1104   if (label->is_loop_header()) return " (loop header)";
1105   if (label->is_osr_entry()) return " (OSR entry)";
1106   return "";
1107 }
1108 
1109 
DoLabel(LLabel * label)1110 void LCodeGen::DoLabel(LLabel* label) {
1111   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1112           current_instruction_,
1113           label->hydrogen_value()->id(),
1114           label->block_id(),
1115           LabelType(label));
1116   __ bind(label->label());
1117   current_block_ = label->block_id();
1118   if (label->block()->predecessors()->length() > 1) {
1119     // A join block's x87 stack is that of its last visited predecessor.
1120     // If the last visited predecessor block is unreachable, the stack state
1121     // will be wrong. In such case, use the x87 stack of reachable predecessor.
1122     X87StackMap::const_iterator it = x87_stack_map_.find(current_block_);
1123     // Restore x87 stack.
1124     if (it != x87_stack_map_.end()) {
1125       x87_stack_ = *(it->second);
1126     }
1127   }
1128   DoGap(label);
1129 }
1130 
1131 
DoParallelMove(LParallelMove * move)1132 void LCodeGen::DoParallelMove(LParallelMove* move) {
1133   resolver_.Resolve(move);
1134 }
1135 
1136 
DoGap(LGap * gap)1137 void LCodeGen::DoGap(LGap* gap) {
1138   for (int i = LGap::FIRST_INNER_POSITION;
1139        i <= LGap::LAST_INNER_POSITION;
1140        i++) {
1141     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1142     LParallelMove* move = gap->GetParallelMove(inner_pos);
1143     if (move != NULL) DoParallelMove(move);
1144   }
1145 }
1146 
1147 
DoInstructionGap(LInstructionGap * instr)1148 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1149   DoGap(instr);
1150 }
1151 
1152 
DoParameter(LParameter * instr)1153 void LCodeGen::DoParameter(LParameter* instr) {
1154   // Nothing to do.
1155 }
1156 
1157 
DoUnknownOSRValue(LUnknownOSRValue * instr)1158 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1159   GenerateOsrPrologue();
1160 }
1161 
1162 
DoModByPowerOf2I(LModByPowerOf2I * instr)1163 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1164   Register dividend = ToRegister(instr->dividend());
1165   int32_t divisor = instr->divisor();
1166   DCHECK(dividend.is(ToRegister(instr->result())));
1167 
1168   // Theoretically, a variation of the branch-free code for integer division by
1169   // a power of 2 (calculating the remainder via an additional multiplication
1170   // (which gets simplified to an 'and') and subtraction) should be faster, and
1171   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1172   // indicate that positive dividends are heavily favored, so the branching
1173   // version performs better.
1174   HMod* hmod = instr->hydrogen();
1175   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1176   Label dividend_is_not_negative, done;
1177   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1178     __ test(dividend, dividend);
1179     __ j(not_sign, &dividend_is_not_negative, Label::kNear);
1180     // Note that this is correct even for kMinInt operands.
1181     __ neg(dividend);
1182     __ and_(dividend, mask);
1183     __ neg(dividend);
1184     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1185       DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1186     }
1187     __ jmp(&done, Label::kNear);
1188   }
1189 
1190   __ bind(&dividend_is_not_negative);
1191   __ and_(dividend, mask);
1192   __ bind(&done);
1193 }
1194 
1195 
DoModByConstI(LModByConstI * instr)1196 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1197   Register dividend = ToRegister(instr->dividend());
1198   int32_t divisor = instr->divisor();
1199   DCHECK(ToRegister(instr->result()).is(eax));
1200 
1201   if (divisor == 0) {
1202     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
1203     return;
1204   }
1205 
1206   __ TruncatingDiv(dividend, Abs(divisor));
1207   __ imul(edx, edx, Abs(divisor));
1208   __ mov(eax, dividend);
1209   __ sub(eax, edx);
1210 
1211   // Check for negative zero.
1212   HMod* hmod = instr->hydrogen();
1213   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1214     Label remainder_not_zero;
1215     __ j(not_zero, &remainder_not_zero, Label::kNear);
1216     __ cmp(dividend, Immediate(0));
1217     DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
1218     __ bind(&remainder_not_zero);
1219   }
1220 }
1221 
1222 
DoModI(LModI * instr)1223 void LCodeGen::DoModI(LModI* instr) {
1224   HMod* hmod = instr->hydrogen();
1225 
1226   Register left_reg = ToRegister(instr->left());
1227   DCHECK(left_reg.is(eax));
1228   Register right_reg = ToRegister(instr->right());
1229   DCHECK(!right_reg.is(eax));
1230   DCHECK(!right_reg.is(edx));
1231   Register result_reg = ToRegister(instr->result());
1232   DCHECK(result_reg.is(edx));
1233 
1234   Label done;
1235   // Check for x % 0, idiv would signal a divide error. We have to
1236   // deopt in this case because we can't return a NaN.
1237   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1238     __ test(right_reg, Operand(right_reg));
1239     DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
1240   }
1241 
1242   // Check for kMinInt % -1, idiv would signal a divide error. We
1243   // have to deopt if we care about -0, because we can't return that.
1244   if (hmod->CheckFlag(HValue::kCanOverflow)) {
1245     Label no_overflow_possible;
1246     __ cmp(left_reg, kMinInt);
1247     __ j(not_equal, &no_overflow_possible, Label::kNear);
1248     __ cmp(right_reg, -1);
1249     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1250       DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
1251     } else {
1252       __ j(not_equal, &no_overflow_possible, Label::kNear);
1253       __ Move(result_reg, Immediate(0));
1254       __ jmp(&done, Label::kNear);
1255     }
1256     __ bind(&no_overflow_possible);
1257   }
1258 
1259   // Sign extend dividend in eax into edx:eax.
1260   __ cdq();
1261 
1262   // If we care about -0, test if the dividend is <0 and the result is 0.
1263   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1264     Label positive_left;
1265     __ test(left_reg, Operand(left_reg));
1266     __ j(not_sign, &positive_left, Label::kNear);
1267     __ idiv(right_reg);
1268     __ test(result_reg, Operand(result_reg));
1269     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1270     __ jmp(&done, Label::kNear);
1271     __ bind(&positive_left);
1272   }
1273   __ idiv(right_reg);
1274   __ bind(&done);
1275 }
1276 
1277 
DoDivByPowerOf2I(LDivByPowerOf2I * instr)1278 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1279   Register dividend = ToRegister(instr->dividend());
1280   int32_t divisor = instr->divisor();
1281   Register result = ToRegister(instr->result());
1282   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1283   DCHECK(!result.is(dividend));
1284 
1285   // Check for (0 / -x) that will produce negative zero.
1286   HDiv* hdiv = instr->hydrogen();
1287   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1288     __ test(dividend, dividend);
1289     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1290   }
1291   // Check for (kMinInt / -1).
1292   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1293     __ cmp(dividend, kMinInt);
1294     DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
1295   }
1296   // Deoptimize if remainder will not be 0.
1297   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1298       divisor != 1 && divisor != -1) {
1299     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1300     __ test(dividend, Immediate(mask));
1301     DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
1302   }
1303   __ Move(result, dividend);
1304   int32_t shift = WhichPowerOf2Abs(divisor);
1305   if (shift > 0) {
1306     // The arithmetic shift is always OK, the 'if' is an optimization only.
1307     if (shift > 1) __ sar(result, 31);
1308     __ shr(result, 32 - shift);
1309     __ add(result, dividend);
1310     __ sar(result, shift);
1311   }
1312   if (divisor < 0) __ neg(result);
1313 }
1314 
1315 
DoDivByConstI(LDivByConstI * instr)1316 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1317   Register dividend = ToRegister(instr->dividend());
1318   int32_t divisor = instr->divisor();
1319   DCHECK(ToRegister(instr->result()).is(edx));
1320 
1321   if (divisor == 0) {
1322     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
1323     return;
1324   }
1325 
1326   // Check for (0 / -x) that will produce negative zero.
1327   HDiv* hdiv = instr->hydrogen();
1328   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1329     __ test(dividend, dividend);
1330     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1331   }
1332 
1333   __ TruncatingDiv(dividend, Abs(divisor));
1334   if (divisor < 0) __ neg(edx);
1335 
1336   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1337     __ mov(eax, edx);
1338     __ imul(eax, eax, divisor);
1339     __ sub(eax, dividend);
1340     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
1341   }
1342 }
1343 
1344 
1345 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)1346 void LCodeGen::DoDivI(LDivI* instr) {
1347   HBinaryOperation* hdiv = instr->hydrogen();
1348   Register dividend = ToRegister(instr->dividend());
1349   Register divisor = ToRegister(instr->divisor());
1350   Register remainder = ToRegister(instr->temp());
1351   DCHECK(dividend.is(eax));
1352   DCHECK(remainder.is(edx));
1353   DCHECK(ToRegister(instr->result()).is(eax));
1354   DCHECK(!divisor.is(eax));
1355   DCHECK(!divisor.is(edx));
1356 
1357   // Check for x / 0.
1358   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1359     __ test(divisor, divisor);
1360     DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
1361   }
1362 
1363   // Check for (0 / -x) that will produce negative zero.
1364   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1365     Label dividend_not_zero;
1366     __ test(dividend, dividend);
1367     __ j(not_zero, &dividend_not_zero, Label::kNear);
1368     __ test(divisor, divisor);
1369     DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
1370     __ bind(&dividend_not_zero);
1371   }
1372 
1373   // Check for (kMinInt / -1).
1374   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1375     Label dividend_not_min_int;
1376     __ cmp(dividend, kMinInt);
1377     __ j(not_zero, &dividend_not_min_int, Label::kNear);
1378     __ cmp(divisor, -1);
1379     DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
1380     __ bind(&dividend_not_min_int);
1381   }
1382 
1383   // Sign extend to edx (= remainder).
1384   __ cdq();
1385   __ idiv(divisor);
1386 
1387   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1388     // Deoptimize if remainder is not 0.
1389     __ test(remainder, remainder);
1390     DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
1391   }
1392 }
1393 
1394 
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)1395 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1396   Register dividend = ToRegister(instr->dividend());
1397   int32_t divisor = instr->divisor();
1398   DCHECK(dividend.is(ToRegister(instr->result())));
1399 
1400   // If the divisor is positive, things are easy: There can be no deopts and we
1401   // can simply do an arithmetic right shift.
1402   if (divisor == 1) return;
1403   int32_t shift = WhichPowerOf2Abs(divisor);
1404   if (divisor > 1) {
1405     __ sar(dividend, shift);
1406     return;
1407   }
1408 
1409   // If the divisor is negative, we have to negate and handle edge cases.
1410   __ neg(dividend);
1411   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1412     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1413   }
1414 
1415   // Dividing by -1 is basically negation, unless we overflow.
1416   if (divisor == -1) {
1417     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1418       DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1419     }
1420     return;
1421   }
1422 
1423   // If the negation could not overflow, simply shifting is OK.
1424   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1425     __ sar(dividend, shift);
1426     return;
1427   }
1428 
1429   Label not_kmin_int, done;
1430   __ j(no_overflow, &not_kmin_int, Label::kNear);
1431   __ mov(dividend, Immediate(kMinInt / divisor));
1432   __ jmp(&done, Label::kNear);
1433   __ bind(&not_kmin_int);
1434   __ sar(dividend, shift);
1435   __ bind(&done);
1436 }
1437 
1438 
DoFlooringDivByConstI(LFlooringDivByConstI * instr)1439 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1440   Register dividend = ToRegister(instr->dividend());
1441   int32_t divisor = instr->divisor();
1442   DCHECK(ToRegister(instr->result()).is(edx));
1443 
1444   if (divisor == 0) {
1445     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
1446     return;
1447   }
1448 
1449   // Check for (0 / -x) that will produce negative zero.
1450   HMathFloorOfDiv* hdiv = instr->hydrogen();
1451   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1452     __ test(dividend, dividend);
1453     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1454   }
1455 
1456   // Easy case: We need no dynamic check for the dividend and the flooring
1457   // division is the same as the truncating division.
1458   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1459       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1460     __ TruncatingDiv(dividend, Abs(divisor));
1461     if (divisor < 0) __ neg(edx);
1462     return;
1463   }
1464 
1465   // In the general case we may need to adjust before and after the truncating
1466   // division to get a flooring division.
1467   Register temp = ToRegister(instr->temp3());
1468   DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
1469   Label needs_adjustment, done;
1470   __ cmp(dividend, Immediate(0));
1471   __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1472   __ TruncatingDiv(dividend, Abs(divisor));
1473   if (divisor < 0) __ neg(edx);
1474   __ jmp(&done, Label::kNear);
1475   __ bind(&needs_adjustment);
1476   __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1477   __ TruncatingDiv(temp, Abs(divisor));
1478   if (divisor < 0) __ neg(edx);
1479   __ dec(edx);
1480   __ bind(&done);
1481 }
1482 
1483 
1484 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)1485 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1486   HBinaryOperation* hdiv = instr->hydrogen();
1487   Register dividend = ToRegister(instr->dividend());
1488   Register divisor = ToRegister(instr->divisor());
1489   Register remainder = ToRegister(instr->temp());
1490   Register result = ToRegister(instr->result());
1491   DCHECK(dividend.is(eax));
1492   DCHECK(remainder.is(edx));
1493   DCHECK(result.is(eax));
1494   DCHECK(!divisor.is(eax));
1495   DCHECK(!divisor.is(edx));
1496 
1497   // Check for x / 0.
1498   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1499     __ test(divisor, divisor);
1500     DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
1501   }
1502 
1503   // Check for (0 / -x) that will produce negative zero.
1504   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1505     Label dividend_not_zero;
1506     __ test(dividend, dividend);
1507     __ j(not_zero, &dividend_not_zero, Label::kNear);
1508     __ test(divisor, divisor);
1509     DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
1510     __ bind(&dividend_not_zero);
1511   }
1512 
1513   // Check for (kMinInt / -1).
1514   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1515     Label dividend_not_min_int;
1516     __ cmp(dividend, kMinInt);
1517     __ j(not_zero, &dividend_not_min_int, Label::kNear);
1518     __ cmp(divisor, -1);
1519     DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
1520     __ bind(&dividend_not_min_int);
1521   }
1522 
1523   // Sign extend to edx (= remainder).
1524   __ cdq();
1525   __ idiv(divisor);
1526 
1527   Label done;
1528   __ test(remainder, remainder);
1529   __ j(zero, &done, Label::kNear);
1530   __ xor_(remainder, divisor);
1531   __ sar(remainder, 31);
1532   __ add(result, remainder);
1533   __ bind(&done);
1534 }
1535 
1536 
DoMulI(LMulI * instr)1537 void LCodeGen::DoMulI(LMulI* instr) {
1538   Register left = ToRegister(instr->left());
1539   LOperand* right = instr->right();
1540 
1541   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1542     __ mov(ToRegister(instr->temp()), left);
1543   }
1544 
1545   if (right->IsConstantOperand()) {
1546     // Try strength reductions on the multiplication.
1547     // All replacement instructions are at most as long as the imul
1548     // and have better latency.
1549     int constant = ToInteger32(LConstantOperand::cast(right));
1550     if (constant == -1) {
1551       __ neg(left);
1552     } else if (constant == 0) {
1553       __ xor_(left, Operand(left));
1554     } else if (constant == 2) {
1555       __ add(left, Operand(left));
1556     } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1557       // If we know that the multiplication can't overflow, it's safe to
1558       // use instructions that don't set the overflow flag for the
1559       // multiplication.
1560       switch (constant) {
1561         case 1:
1562           // Do nothing.
1563           break;
1564         case 3:
1565           __ lea(left, Operand(left, left, times_2, 0));
1566           break;
1567         case 4:
1568           __ shl(left, 2);
1569           break;
1570         case 5:
1571           __ lea(left, Operand(left, left, times_4, 0));
1572           break;
1573         case 8:
1574           __ shl(left, 3);
1575           break;
1576         case 9:
1577           __ lea(left, Operand(left, left, times_8, 0));
1578           break;
1579         case 16:
1580           __ shl(left, 4);
1581           break;
1582         default:
1583           __ imul(left, left, constant);
1584           break;
1585       }
1586     } else {
1587       __ imul(left, left, constant);
1588     }
1589   } else {
1590     if (instr->hydrogen()->representation().IsSmi()) {
1591       __ SmiUntag(left);
1592     }
1593     __ imul(left, ToOperand(right));
1594   }
1595 
1596   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1597     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1598   }
1599 
1600   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1601     // Bail out if the result is supposed to be negative zero.
1602     Label done;
1603     __ test(left, Operand(left));
1604     __ j(not_zero, &done);
1605     if (right->IsConstantOperand()) {
1606       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1607         DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
1608       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1609         __ cmp(ToRegister(instr->temp()), Immediate(0));
1610         DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
1611       }
1612     } else {
1613       // Test the non-zero operand for negative sign.
1614       __ or_(ToRegister(instr->temp()), ToOperand(right));
1615       DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
1616     }
1617     __ bind(&done);
1618   }
1619 }
1620 
1621 
DoBitI(LBitI * instr)1622 void LCodeGen::DoBitI(LBitI* instr) {
1623   LOperand* left = instr->left();
1624   LOperand* right = instr->right();
1625   DCHECK(left->Equals(instr->result()));
1626   DCHECK(left->IsRegister());
1627 
1628   if (right->IsConstantOperand()) {
1629     int32_t right_operand =
1630         ToRepresentation(LConstantOperand::cast(right),
1631                          instr->hydrogen()->representation());
1632     switch (instr->op()) {
1633       case Token::BIT_AND:
1634         __ and_(ToRegister(left), right_operand);
1635         break;
1636       case Token::BIT_OR:
1637         __ or_(ToRegister(left), right_operand);
1638         break;
1639       case Token::BIT_XOR:
1640         if (right_operand == int32_t(~0)) {
1641           __ not_(ToRegister(left));
1642         } else {
1643           __ xor_(ToRegister(left), right_operand);
1644         }
1645         break;
1646       default:
1647         UNREACHABLE();
1648         break;
1649     }
1650   } else {
1651     switch (instr->op()) {
1652       case Token::BIT_AND:
1653         __ and_(ToRegister(left), ToOperand(right));
1654         break;
1655       case Token::BIT_OR:
1656         __ or_(ToRegister(left), ToOperand(right));
1657         break;
1658       case Token::BIT_XOR:
1659         __ xor_(ToRegister(left), ToOperand(right));
1660         break;
1661       default:
1662         UNREACHABLE();
1663         break;
1664     }
1665   }
1666 }
1667 
1668 
DoShiftI(LShiftI * instr)1669 void LCodeGen::DoShiftI(LShiftI* instr) {
1670   LOperand* left = instr->left();
1671   LOperand* right = instr->right();
1672   DCHECK(left->Equals(instr->result()));
1673   DCHECK(left->IsRegister());
1674   if (right->IsRegister()) {
1675     DCHECK(ToRegister(right).is(ecx));
1676 
1677     switch (instr->op()) {
1678       case Token::ROR:
1679         __ ror_cl(ToRegister(left));
1680         break;
1681       case Token::SAR:
1682         __ sar_cl(ToRegister(left));
1683         break;
1684       case Token::SHR:
1685         __ shr_cl(ToRegister(left));
1686         if (instr->can_deopt()) {
1687           __ test(ToRegister(left), ToRegister(left));
1688           DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
1689         }
1690         break;
1691       case Token::SHL:
1692         __ shl_cl(ToRegister(left));
1693         break;
1694       default:
1695         UNREACHABLE();
1696         break;
1697     }
1698   } else {
1699     int value = ToInteger32(LConstantOperand::cast(right));
1700     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1701     switch (instr->op()) {
1702       case Token::ROR:
1703         if (shift_count == 0 && instr->can_deopt()) {
1704           __ test(ToRegister(left), ToRegister(left));
1705           DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
1706         } else {
1707           __ ror(ToRegister(left), shift_count);
1708         }
1709         break;
1710       case Token::SAR:
1711         if (shift_count != 0) {
1712           __ sar(ToRegister(left), shift_count);
1713         }
1714         break;
1715       case Token::SHR:
1716         if (shift_count != 0) {
1717           __ shr(ToRegister(left), shift_count);
1718         } else if (instr->can_deopt()) {
1719           __ test(ToRegister(left), ToRegister(left));
1720           DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
1721         }
1722         break;
1723       case Token::SHL:
1724         if (shift_count != 0) {
1725           if (instr->hydrogen_value()->representation().IsSmi() &&
1726               instr->can_deopt()) {
1727             if (shift_count != 1) {
1728               __ shl(ToRegister(left), shift_count - 1);
1729             }
1730             __ SmiTag(ToRegister(left));
1731             DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1732           } else {
1733             __ shl(ToRegister(left), shift_count);
1734           }
1735         }
1736         break;
1737       default:
1738         UNREACHABLE();
1739         break;
1740     }
1741   }
1742 }
1743 
1744 
DoSubI(LSubI * instr)1745 void LCodeGen::DoSubI(LSubI* instr) {
1746   LOperand* left = instr->left();
1747   LOperand* right = instr->right();
1748   DCHECK(left->Equals(instr->result()));
1749 
1750   if (right->IsConstantOperand()) {
1751     __ sub(ToOperand(left),
1752            ToImmediate(right, instr->hydrogen()->representation()));
1753   } else {
1754     __ sub(ToRegister(left), ToOperand(right));
1755   }
1756   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1757     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1758   }
1759 }
1760 
1761 
DoConstantI(LConstantI * instr)1762 void LCodeGen::DoConstantI(LConstantI* instr) {
1763   __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1764 }
1765 
1766 
DoConstantS(LConstantS * instr)1767 void LCodeGen::DoConstantS(LConstantS* instr) {
1768   __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1769 }
1770 
1771 
DoConstantD(LConstantD * instr)1772 void LCodeGen::DoConstantD(LConstantD* instr) {
1773   uint64_t const bits = instr->bits();
1774   uint32_t const lower = static_cast<uint32_t>(bits);
1775   uint32_t const upper = static_cast<uint32_t>(bits >> 32);
1776   DCHECK(instr->result()->IsDoubleRegister());
1777 
1778   __ push(Immediate(upper));
1779   __ push(Immediate(lower));
1780   X87Register reg = ToX87Register(instr->result());
1781   X87Mov(reg, Operand(esp, 0));
1782   __ add(Operand(esp), Immediate(kDoubleSize));
1783 }
1784 
1785 
DoConstantE(LConstantE * instr)1786 void LCodeGen::DoConstantE(LConstantE* instr) {
1787   __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1788 }
1789 
1790 
DoConstantT(LConstantT * instr)1791 void LCodeGen::DoConstantT(LConstantT* instr) {
1792   Register reg = ToRegister(instr->result());
1793   Handle<Object> object = instr->value(isolate());
1794   AllowDeferredHandleDereference smi_check;
1795   __ LoadObject(reg, object);
1796 }
1797 
1798 
BuildSeqStringOperand(Register string,LOperand * index,String::Encoding encoding)1799 Operand LCodeGen::BuildSeqStringOperand(Register string,
1800                                         LOperand* index,
1801                                         String::Encoding encoding) {
1802   if (index->IsConstantOperand()) {
1803     int offset = ToRepresentation(LConstantOperand::cast(index),
1804                                   Representation::Integer32());
1805     if (encoding == String::TWO_BYTE_ENCODING) {
1806       offset *= kUC16Size;
1807     }
1808     STATIC_ASSERT(kCharSize == 1);
1809     return FieldOperand(string, SeqString::kHeaderSize + offset);
1810   }
1811   return FieldOperand(
1812       string, ToRegister(index),
1813       encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1814       SeqString::kHeaderSize);
1815 }
1816 
1817 
DoSeqStringGetChar(LSeqStringGetChar * instr)1818 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1819   String::Encoding encoding = instr->hydrogen()->encoding();
1820   Register result = ToRegister(instr->result());
1821   Register string = ToRegister(instr->string());
1822 
1823   if (FLAG_debug_code) {
1824     __ push(string);
1825     __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
1826     __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
1827 
1828     __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1829     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1830     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1831     __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1832                              ? one_byte_seq_type : two_byte_seq_type));
1833     __ Check(equal, kUnexpectedStringType);
1834     __ pop(string);
1835   }
1836 
1837   Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1838   if (encoding == String::ONE_BYTE_ENCODING) {
1839     __ movzx_b(result, operand);
1840   } else {
1841     __ movzx_w(result, operand);
1842   }
1843 }
1844 
1845 
DoSeqStringSetChar(LSeqStringSetChar * instr)1846 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1847   String::Encoding encoding = instr->hydrogen()->encoding();
1848   Register string = ToRegister(instr->string());
1849 
1850   if (FLAG_debug_code) {
1851     Register value = ToRegister(instr->value());
1852     Register index = ToRegister(instr->index());
1853     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1854     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1855     int encoding_mask =
1856         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1857         ? one_byte_seq_type : two_byte_seq_type;
1858     __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1859   }
1860 
1861   Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1862   if (instr->value()->IsConstantOperand()) {
1863     int value = ToRepresentation(LConstantOperand::cast(instr->value()),
1864                                  Representation::Integer32());
1865     DCHECK_LE(0, value);
1866     if (encoding == String::ONE_BYTE_ENCODING) {
1867       DCHECK_LE(value, String::kMaxOneByteCharCode);
1868       __ mov_b(operand, static_cast<int8_t>(value));
1869     } else {
1870       DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1871       __ mov_w(operand, static_cast<int16_t>(value));
1872     }
1873   } else {
1874     Register value = ToRegister(instr->value());
1875     if (encoding == String::ONE_BYTE_ENCODING) {
1876       __ mov_b(operand, value);
1877     } else {
1878       __ mov_w(operand, value);
1879     }
1880   }
1881 }
1882 
1883 
DoAddI(LAddI * instr)1884 void LCodeGen::DoAddI(LAddI* instr) {
1885   LOperand* left = instr->left();
1886   LOperand* right = instr->right();
1887 
1888   if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1889     if (right->IsConstantOperand()) {
1890       int32_t offset = ToRepresentation(LConstantOperand::cast(right),
1891                                         instr->hydrogen()->representation());
1892       __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
1893     } else {
1894       Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1895       __ lea(ToRegister(instr->result()), address);
1896     }
1897   } else {
1898     if (right->IsConstantOperand()) {
1899       __ add(ToOperand(left),
1900              ToImmediate(right, instr->hydrogen()->representation()));
1901     } else {
1902       __ add(ToRegister(left), ToOperand(right));
1903     }
1904     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1905       DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1906     }
1907   }
1908 }
1909 
1910 
DoMathMinMax(LMathMinMax * instr)1911 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1912   LOperand* left = instr->left();
1913   LOperand* right = instr->right();
1914   DCHECK(left->Equals(instr->result()));
1915   HMathMinMax::Operation operation = instr->hydrogen()->operation();
1916   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1917     Label return_left;
1918     Condition condition = (operation == HMathMinMax::kMathMin)
1919         ? less_equal
1920         : greater_equal;
1921     if (right->IsConstantOperand()) {
1922       Operand left_op = ToOperand(left);
1923       Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
1924                                         instr->hydrogen()->representation());
1925       __ cmp(left_op, immediate);
1926       __ j(condition, &return_left, Label::kNear);
1927       __ mov(left_op, immediate);
1928     } else {
1929       Register left_reg = ToRegister(left);
1930       Operand right_op = ToOperand(right);
1931       __ cmp(left_reg, right_op);
1932       __ j(condition, &return_left, Label::kNear);
1933       __ mov(left_reg, right_op);
1934     }
1935     __ bind(&return_left);
1936   } else {
1937     DCHECK(instr->hydrogen()->representation().IsDouble());
1938     Label check_nan_left, check_zero, return_left, return_right;
1939     Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1940     X87Register left_reg = ToX87Register(left);
1941     X87Register right_reg = ToX87Register(right);
1942 
1943     X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result()));
1944     __ fld(1);
1945     __ fld(1);
1946     __ FCmp();
1947     __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
1948     __ j(equal, &check_zero, Label::kNear);            // left == right.
1949     __ j(condition, &return_left, Label::kNear);
1950     __ jmp(&return_right, Label::kNear);
1951 
1952     __ bind(&check_zero);
1953     __ fld(0);
1954     __ fldz();
1955     __ FCmp();
1956     __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
1957     // At this point, both left and right are either 0 or -0.
1958     if (operation == HMathMinMax::kMathMin) {
1959       // Push st0 and st1 to stack, then pop them to temp registers and OR them,
1960       // load it to left.
1961       Register scratch_reg = ToRegister(instr->temp());
1962       __ fld(1);
1963       __ fld(1);
1964       __ sub(esp, Immediate(2 * kPointerSize));
1965       __ fstp_s(MemOperand(esp, 0));
1966       __ fstp_s(MemOperand(esp, kPointerSize));
1967       __ pop(scratch_reg);
1968       __ or_(MemOperand(esp, 0), scratch_reg);
1969       X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
1970       __ pop(scratch_reg);  // restore esp
1971     } else {
1972       // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1973       // Should put the result in stX0
1974       __ fadd_i(1);
1975     }
1976     __ jmp(&return_left, Label::kNear);
1977 
1978     __ bind(&check_nan_left);
1979     __ fld(0);
1980     __ fld(0);
1981     __ FCmp();                                      // NaN check.
1982     __ j(parity_even, &return_left, Label::kNear);  // left == NaN.
1983 
1984     __ bind(&return_right);
1985     X87Mov(left_reg, right_reg);
1986 
1987     __ bind(&return_left);
1988   }
1989 }
1990 
1991 
DoArithmeticD(LArithmeticD * instr)1992 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1993   X87Register left = ToX87Register(instr->left());
1994   X87Register right = ToX87Register(instr->right());
1995   X87Register result = ToX87Register(instr->result());
1996   if (instr->op() != Token::MOD) {
1997     X87PrepareBinaryOp(left, right, result);
1998   }
1999   // Set the precision control to double-precision.
2000   __ X87SetFPUCW(0x027F);
2001   switch (instr->op()) {
2002     case Token::ADD:
2003       __ fadd_i(1);
2004       break;
2005     case Token::SUB:
2006       __ fsub_i(1);
2007       break;
2008     case Token::MUL:
2009       __ fmul_i(1);
2010       break;
2011     case Token::DIV:
2012       __ fdiv_i(1);
2013       break;
2014     case Token::MOD: {
2015       // Pass two doubles as arguments on the stack.
2016       __ PrepareCallCFunction(4, eax);
2017       X87Mov(Operand(esp, 1 * kDoubleSize), right);
2018       X87Mov(Operand(esp, 0), left);
2019       X87Free(right);
2020       DCHECK(left.is(result));
2021       X87PrepareToWrite(result);
2022       __ CallCFunction(
2023           ExternalReference::mod_two_doubles_operation(isolate()),
2024           4);
2025 
2026       // Return value is in st(0) on ia32.
2027       X87CommitWrite(result);
2028       break;
2029     }
2030     default:
2031       UNREACHABLE();
2032       break;
2033   }
2034 
2035   // Restore the default value of control word.
2036   __ X87SetFPUCW(0x037F);
2037 }
2038 
2039 
DoArithmeticT(LArithmeticT * instr)2040 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2041   DCHECK(ToRegister(instr->context()).is(esi));
2042   DCHECK(ToRegister(instr->left()).is(edx));
2043   DCHECK(ToRegister(instr->right()).is(eax));
2044   DCHECK(ToRegister(instr->result()).is(eax));
2045 
2046   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
2047   CallCode(code, RelocInfo::CODE_TARGET, instr);
2048 }
2049 
2050 
2051 template<class InstrType>
EmitBranch(InstrType instr,Condition cc)2052 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2053   int left_block = instr->TrueDestination(chunk_);
2054   int right_block = instr->FalseDestination(chunk_);
2055 
2056   int next_block = GetNextEmittedBlock();
2057 
2058   if (right_block == left_block || cc == no_condition) {
2059     EmitGoto(left_block);
2060   } else if (left_block == next_block) {
2061     __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2062   } else if (right_block == next_block) {
2063     __ j(cc, chunk_->GetAssemblyLabel(left_block));
2064   } else {
2065     __ j(cc, chunk_->GetAssemblyLabel(left_block));
2066     __ jmp(chunk_->GetAssemblyLabel(right_block));
2067   }
2068 }
2069 
2070 
2071 template <class InstrType>
EmitTrueBranch(InstrType instr,Condition cc)2072 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
2073   int true_block = instr->TrueDestination(chunk_);
2074   if (cc == no_condition) {
2075     __ jmp(chunk_->GetAssemblyLabel(true_block));
2076   } else {
2077     __ j(cc, chunk_->GetAssemblyLabel(true_block));
2078   }
2079 }
2080 
2081 
2082 template<class InstrType>
EmitFalseBranch(InstrType instr,Condition cc)2083 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2084   int false_block = instr->FalseDestination(chunk_);
2085   if (cc == no_condition) {
2086     __ jmp(chunk_->GetAssemblyLabel(false_block));
2087   } else {
2088     __ j(cc, chunk_->GetAssemblyLabel(false_block));
2089   }
2090 }
2091 
2092 
DoBranch(LBranch * instr)2093 void LCodeGen::DoBranch(LBranch* instr) {
2094   Representation r = instr->hydrogen()->value()->representation();
2095   if (r.IsSmiOrInteger32()) {
2096     Register reg = ToRegister(instr->value());
2097     __ test(reg, Operand(reg));
2098     EmitBranch(instr, not_zero);
2099   } else if (r.IsDouble()) {
2100     X87Register reg = ToX87Register(instr->value());
2101     X87LoadForUsage(reg);
2102     __ fldz();
2103     __ FCmp();
2104     EmitBranch(instr, not_zero);
2105   } else {
2106     DCHECK(r.IsTagged());
2107     Register reg = ToRegister(instr->value());
2108     HType type = instr->hydrogen()->value()->type();
2109     if (type.IsBoolean()) {
2110       DCHECK(!info()->IsStub());
2111       __ cmp(reg, factory()->true_value());
2112       EmitBranch(instr, equal);
2113     } else if (type.IsSmi()) {
2114       DCHECK(!info()->IsStub());
2115       __ test(reg, Operand(reg));
2116       EmitBranch(instr, not_equal);
2117     } else if (type.IsJSArray()) {
2118       DCHECK(!info()->IsStub());
2119       EmitBranch(instr, no_condition);
2120     } else if (type.IsHeapNumber()) {
2121       UNREACHABLE();
2122     } else if (type.IsString()) {
2123       DCHECK(!info()->IsStub());
2124       __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2125       EmitBranch(instr, not_equal);
2126     } else {
2127       ToBooleanHints expected = instr->hydrogen()->expected_input_types();
2128       if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
2129 
2130       if (expected & ToBooleanHint::kUndefined) {
2131         // undefined -> false.
2132         __ cmp(reg, factory()->undefined_value());
2133         __ j(equal, instr->FalseLabel(chunk_));
2134       }
2135       if (expected & ToBooleanHint::kBoolean) {
2136         // true -> true.
2137         __ cmp(reg, factory()->true_value());
2138         __ j(equal, instr->TrueLabel(chunk_));
2139         // false -> false.
2140         __ cmp(reg, factory()->false_value());
2141         __ j(equal, instr->FalseLabel(chunk_));
2142       }
2143       if (expected & ToBooleanHint::kNull) {
2144         // 'null' -> false.
2145         __ cmp(reg, factory()->null_value());
2146         __ j(equal, instr->FalseLabel(chunk_));
2147       }
2148 
2149       if (expected & ToBooleanHint::kSmallInteger) {
2150         // Smis: 0 -> false, all other -> true.
2151         __ test(reg, Operand(reg));
2152         __ j(equal, instr->FalseLabel(chunk_));
2153         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2154       } else if (expected & ToBooleanHint::kNeedsMap) {
2155         // If we need a map later and have a Smi -> deopt.
2156         __ test(reg, Immediate(kSmiTagMask));
2157         DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
2158       }
2159 
2160       Register map = no_reg;  // Keep the compiler happy.
2161       if (expected & ToBooleanHint::kNeedsMap) {
2162         map = ToRegister(instr->temp());
2163         DCHECK(!map.is(reg));
2164         __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
2165 
2166         if (expected & ToBooleanHint::kCanBeUndetectable) {
2167           // Undetectable -> false.
2168           __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2169                     Immediate(1 << Map::kIsUndetectable));
2170           __ j(not_zero, instr->FalseLabel(chunk_));
2171         }
2172       }
2173 
2174       if (expected & ToBooleanHint::kReceiver) {
2175         // spec object -> true.
2176         __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
2177         __ j(above_equal, instr->TrueLabel(chunk_));
2178       }
2179 
2180       if (expected & ToBooleanHint::kString) {
2181         // String value -> false iff empty.
2182         Label not_string;
2183         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2184         __ j(above_equal, &not_string, Label::kNear);
2185         __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2186         __ j(not_zero, instr->TrueLabel(chunk_));
2187         __ jmp(instr->FalseLabel(chunk_));
2188         __ bind(&not_string);
2189       }
2190 
2191       if (expected & ToBooleanHint::kSymbol) {
2192         // Symbol value -> true.
2193         __ CmpInstanceType(map, SYMBOL_TYPE);
2194         __ j(equal, instr->TrueLabel(chunk_));
2195       }
2196 
2197       if (expected & ToBooleanHint::kSimdValue) {
2198         // SIMD value -> true.
2199         __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
2200         __ j(equal, instr->TrueLabel(chunk_));
2201       }
2202 
2203       if (expected & ToBooleanHint::kHeapNumber) {
2204         // heap number -> false iff +0, -0, or NaN.
2205         Label not_heap_number;
2206         __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2207                factory()->heap_number_map());
2208         __ j(not_equal, &not_heap_number, Label::kNear);
2209         __ fldz();
2210         __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
2211         __ FCmp();
2212         __ j(zero, instr->FalseLabel(chunk_));
2213         __ jmp(instr->TrueLabel(chunk_));
2214         __ bind(&not_heap_number);
2215       }
2216 
2217       if (expected != ToBooleanHint::kAny) {
2218         // We've seen something for the first time -> deopt.
2219         // This can only happen if we are not generic already.
2220         DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
2221       }
2222     }
2223   }
2224 }
2225 
2226 
EmitGoto(int block)2227 void LCodeGen::EmitGoto(int block) {
2228   if (!IsNextEmittedBlock(block)) {
2229     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2230   }
2231 }
2232 
2233 
DoClobberDoubles(LClobberDoubles * instr)2234 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2235 }
2236 
2237 
DoGoto(LGoto * instr)2238 void LCodeGen::DoGoto(LGoto* instr) {
2239   EmitGoto(instr->block_id());
2240 }
2241 
2242 
TokenToCondition(Token::Value op,bool is_unsigned)2243 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2244   Condition cond = no_condition;
2245   switch (op) {
2246     case Token::EQ:
2247     case Token::EQ_STRICT:
2248       cond = equal;
2249       break;
2250     case Token::NE:
2251     case Token::NE_STRICT:
2252       cond = not_equal;
2253       break;
2254     case Token::LT:
2255       cond = is_unsigned ? below : less;
2256       break;
2257     case Token::GT:
2258       cond = is_unsigned ? above : greater;
2259       break;
2260     case Token::LTE:
2261       cond = is_unsigned ? below_equal : less_equal;
2262       break;
2263     case Token::GTE:
2264       cond = is_unsigned ? above_equal : greater_equal;
2265       break;
2266     case Token::IN:
2267     case Token::INSTANCEOF:
2268     default:
2269       UNREACHABLE();
2270   }
2271   return cond;
2272 }
2273 
2274 
DoCompareNumericAndBranch(LCompareNumericAndBranch * instr)2275 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2276   LOperand* left = instr->left();
2277   LOperand* right = instr->right();
2278   bool is_unsigned =
2279       instr->is_double() ||
2280       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2281       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2282   Condition cc = TokenToCondition(instr->op(), is_unsigned);
2283 
2284   if (left->IsConstantOperand() && right->IsConstantOperand()) {
2285     // We can statically evaluate the comparison.
2286     double left_val = ToDouble(LConstantOperand::cast(left));
2287     double right_val = ToDouble(LConstantOperand::cast(right));
2288     int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2289                          ? instr->TrueDestination(chunk_)
2290                          : instr->FalseDestination(chunk_);
2291     EmitGoto(next_block);
2292   } else {
2293     if (instr->is_double()) {
2294       X87LoadForUsage(ToX87Register(right), ToX87Register(left));
2295       __ FCmp();
2296       // Don't base result on EFLAGS when a NaN is involved. Instead
2297       // jump to the false block.
2298       __ j(parity_even, instr->FalseLabel(chunk_));
2299     } else {
2300       if (right->IsConstantOperand()) {
2301         __ cmp(ToOperand(left),
2302                ToImmediate(right, instr->hydrogen()->representation()));
2303       } else if (left->IsConstantOperand()) {
2304         __ cmp(ToOperand(right),
2305                ToImmediate(left, instr->hydrogen()->representation()));
2306         // We commuted the operands, so commute the condition.
2307         cc = CommuteCondition(cc);
2308       } else {
2309         __ cmp(ToRegister(left), ToOperand(right));
2310       }
2311     }
2312     EmitBranch(instr, cc);
2313   }
2314 }
2315 
2316 
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)2317 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2318   Register left = ToRegister(instr->left());
2319 
2320   if (instr->right()->IsConstantOperand()) {
2321     Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2322     __ CmpObject(left, right);
2323   } else {
2324     Operand right = ToOperand(instr->right());
2325     __ cmp(left, right);
2326   }
2327   EmitBranch(instr, equal);
2328 }
2329 
2330 
DoCmpHoleAndBranch(LCmpHoleAndBranch * instr)2331 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2332   if (instr->hydrogen()->representation().IsTagged()) {
2333     Register input_reg = ToRegister(instr->object());
2334     __ cmp(input_reg, factory()->the_hole_value());
2335     EmitBranch(instr, equal);
2336     return;
2337   }
2338 
2339   // Put the value to the top of stack
2340   X87Register src = ToX87Register(instr->object());
2341   X87LoadForUsage(src);
2342   __ fld(0);
2343   __ fld(0);
2344   __ FCmp();
2345   Label ok;
2346   __ j(parity_even, &ok, Label::kNear);
2347   __ fstp(0);
2348   EmitFalseBranch(instr, no_condition);
2349   __ bind(&ok);
2350 
2351 
2352   __ sub(esp, Immediate(kDoubleSize));
2353   __ fstp_d(MemOperand(esp, 0));
2354 
2355   __ add(esp, Immediate(kDoubleSize));
2356   int offset = sizeof(kHoleNanUpper32);
2357   __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2358   EmitBranch(instr, equal);
2359 }
2360 
2361 
EmitIsString(Register input,Register temp1,Label * is_not_string,SmiCheck check_needed=INLINE_SMI_CHECK)2362 Condition LCodeGen::EmitIsString(Register input,
2363                                  Register temp1,
2364                                  Label* is_not_string,
2365                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
2366   if (check_needed == INLINE_SMI_CHECK) {
2367     __ JumpIfSmi(input, is_not_string);
2368   }
2369 
2370   Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2371 
2372   return cond;
2373 }
2374 
2375 
DoIsStringAndBranch(LIsStringAndBranch * instr)2376 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2377   Register reg = ToRegister(instr->value());
2378   Register temp = ToRegister(instr->temp());
2379 
2380   SmiCheck check_needed =
2381       instr->hydrogen()->value()->type().IsHeapObject()
2382           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2383 
2384   Condition true_cond = EmitIsString(
2385       reg, temp, instr->FalseLabel(chunk_), check_needed);
2386 
2387   EmitBranch(instr, true_cond);
2388 }
2389 
2390 
DoIsSmiAndBranch(LIsSmiAndBranch * instr)2391 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2392   Operand input = ToOperand(instr->value());
2393 
2394   __ test(input, Immediate(kSmiTagMask));
2395   EmitBranch(instr, zero);
2396 }
2397 
2398 
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)2399 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2400   Register input = ToRegister(instr->value());
2401   Register temp = ToRegister(instr->temp());
2402 
2403   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2404     STATIC_ASSERT(kSmiTag == 0);
2405     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2406   }
2407   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2408   __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2409             Immediate(1 << Map::kIsUndetectable));
2410   EmitBranch(instr, not_zero);
2411 }
2412 
2413 
ComputeCompareCondition(Token::Value op)2414 static Condition ComputeCompareCondition(Token::Value op) {
2415   switch (op) {
2416     case Token::EQ_STRICT:
2417     case Token::EQ:
2418       return equal;
2419     case Token::LT:
2420       return less;
2421     case Token::GT:
2422       return greater;
2423     case Token::LTE:
2424       return less_equal;
2425     case Token::GTE:
2426       return greater_equal;
2427     default:
2428       UNREACHABLE();
2429       return no_condition;
2430   }
2431 }
2432 
2433 
DoStringCompareAndBranch(LStringCompareAndBranch * instr)2434 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2435   DCHECK(ToRegister(instr->context()).is(esi));
2436   DCHECK(ToRegister(instr->left()).is(edx));
2437   DCHECK(ToRegister(instr->right()).is(eax));
2438 
2439   Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
2440   CallCode(code, RelocInfo::CODE_TARGET, instr);
2441   __ CompareRoot(eax, Heap::kTrueValueRootIndex);
2442   EmitBranch(instr, equal);
2443 }
2444 
2445 
TestType(HHasInstanceTypeAndBranch * instr)2446 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2447   InstanceType from = instr->from();
2448   InstanceType to = instr->to();
2449   if (from == FIRST_TYPE) return to;
2450   DCHECK(from == to || to == LAST_TYPE);
2451   return from;
2452 }
2453 
2454 
BranchCondition(HHasInstanceTypeAndBranch * instr)2455 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2456   InstanceType from = instr->from();
2457   InstanceType to = instr->to();
2458   if (from == to) return equal;
2459   if (to == LAST_TYPE) return above_equal;
2460   if (from == FIRST_TYPE) return below_equal;
2461   UNREACHABLE();
2462   return equal;
2463 }
2464 
2465 
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)2466 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2467   Register input = ToRegister(instr->value());
2468   Register temp = ToRegister(instr->temp());
2469 
2470   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2471     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2472   }
2473 
2474   __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2475   EmitBranch(instr, BranchCondition(instr->hydrogen()));
2476 }
2477 
2478 // Branches to a label or falls through with the answer in the z flag.  Trashes
2479 // the temp registers, but not the input.
EmitClassOfTest(Label * is_true,Label * is_false,Handle<String> class_name,Register input,Register temp,Register temp2)2480 void LCodeGen::EmitClassOfTest(Label* is_true,
2481                                Label* is_false,
2482                                Handle<String>class_name,
2483                                Register input,
2484                                Register temp,
2485                                Register temp2) {
2486   DCHECK(!input.is(temp));
2487   DCHECK(!input.is(temp2));
2488   DCHECK(!temp.is(temp2));
2489   __ JumpIfSmi(input, is_false);
2490 
2491   __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp);
2492   STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
2493   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2494     __ j(above_equal, is_true);
2495   } else {
2496     __ j(above_equal, is_false);
2497   }
2498 
2499   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2500   // Check if the constructor in the map is a function.
2501   __ GetMapConstructor(temp, temp, temp2);
2502   // Objects with a non-function constructor have class 'Object'.
2503   __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
2504   if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2505     __ j(not_equal, is_true);
2506   } else {
2507     __ j(not_equal, is_false);
2508   }
2509 
2510   // temp now contains the constructor function. Grab the
2511   // instance class name from there.
2512   __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2513   __ mov(temp, FieldOperand(temp,
2514                             SharedFunctionInfo::kInstanceClassNameOffset));
2515   // The class name we are testing against is internalized since it's a literal.
2516   // The name in the constructor is internalized because of the way the context
2517   // is booted.  This routine isn't expected to work for random API-created
2518   // classes and it doesn't have to because you can't access it with natives
2519   // syntax.  Since both sides are internalized it is sufficient to use an
2520   // identity comparison.
2521   __ cmp(temp, class_name);
2522   // End with the answer in the z flag.
2523 }
2524 
2525 
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2526 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2527   Register input = ToRegister(instr->value());
2528   Register temp = ToRegister(instr->temp());
2529   Register temp2 = ToRegister(instr->temp2());
2530 
2531   Handle<String> class_name = instr->hydrogen()->class_name();
2532 
2533   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2534       class_name, input, temp, temp2);
2535 
2536   EmitBranch(instr, equal);
2537 }
2538 
2539 
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2540 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2541   Register reg = ToRegister(instr->value());
2542   __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2543   EmitBranch(instr, equal);
2544 }
2545 
2546 
DoHasInPrototypeChainAndBranch(LHasInPrototypeChainAndBranch * instr)2547 void LCodeGen::DoHasInPrototypeChainAndBranch(
2548     LHasInPrototypeChainAndBranch* instr) {
2549   Register const object = ToRegister(instr->object());
2550   Register const object_map = ToRegister(instr->scratch());
2551   Register const object_prototype = object_map;
2552   Register const prototype = ToRegister(instr->prototype());
2553 
2554   // The {object} must be a spec object.  It's sufficient to know that {object}
2555   // is not a smi, since all other non-spec objects have {null} prototypes and
2556   // will be ruled out below.
2557   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2558     __ test(object, Immediate(kSmiTagMask));
2559     EmitFalseBranch(instr, zero);
2560   }
2561 
2562   // Loop through the {object}s prototype chain looking for the {prototype}.
2563   __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
2564   Label loop;
2565   __ bind(&loop);
2566 
2567   // Deoptimize if the object needs to be access checked.
2568   __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
2569             Immediate(1 << Map::kIsAccessCheckNeeded));
2570   DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
2571   // Deoptimize for proxies.
2572   __ CmpInstanceType(object_map, JS_PROXY_TYPE);
2573   DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
2574 
2575   __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
2576   __ cmp(object_prototype, factory()->null_value());
2577   EmitFalseBranch(instr, equal);
2578   __ cmp(object_prototype, prototype);
2579   EmitTrueBranch(instr, equal);
2580   __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
2581   __ jmp(&loop);
2582 }
2583 
2584 
DoCmpT(LCmpT * instr)2585 void LCodeGen::DoCmpT(LCmpT* instr) {
2586   Token::Value op = instr->op();
2587 
2588   Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2589   CallCode(ic, RelocInfo::CODE_TARGET, instr);
2590 
2591   Condition condition = ComputeCompareCondition(op);
2592   Label true_value, done;
2593   __ test(eax, Operand(eax));
2594   __ j(condition, &true_value, Label::kNear);
2595   __ mov(ToRegister(instr->result()), factory()->false_value());
2596   __ jmp(&done, Label::kNear);
2597   __ bind(&true_value);
2598   __ mov(ToRegister(instr->result()), factory()->true_value());
2599   __ bind(&done);
2600 }
2601 
EmitReturn(LReturn * instr)2602 void LCodeGen::EmitReturn(LReturn* instr) {
2603   int extra_value_count = 1;
2604 
2605   if (instr->has_constant_parameter_count()) {
2606     int parameter_count = ToInteger32(instr->constant_parameter_count());
2607     __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
2608   } else {
2609     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
2610     Register reg = ToRegister(instr->parameter_count());
2611     // The argument count parameter is a smi
2612     __ SmiUntag(reg);
2613     Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
2614 
2615     // emit code to restore stack based on instr->parameter_count()
2616     __ pop(return_addr_reg);  // save return address
2617     __ shl(reg, kPointerSizeLog2);
2618     __ add(esp, reg);
2619     __ jmp(return_addr_reg);
2620   }
2621 }
2622 
2623 
DoReturn(LReturn * instr)2624 void LCodeGen::DoReturn(LReturn* instr) {
2625   if (FLAG_trace && info()->IsOptimizing()) {
2626     // Preserve the return value on the stack and rely on the runtime call
2627     // to return the value in the same register.  We're leaving the code
2628     // managed by the register allocator and tearing down the frame, it's
2629     // safe to write to the context register.
2630     __ push(eax);
2631     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2632     __ CallRuntime(Runtime::kTraceExit);
2633   }
2634   if (NeedsEagerFrame()) {
2635     __ mov(esp, ebp);
2636     __ pop(ebp);
2637   }
2638 
2639   EmitReturn(instr);
2640 }
2641 
2642 
DoLoadContextSlot(LLoadContextSlot * instr)2643 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2644   Register context = ToRegister(instr->context());
2645   Register result = ToRegister(instr->result());
2646   __ mov(result, ContextOperand(context, instr->slot_index()));
2647 
2648   if (instr->hydrogen()->RequiresHoleCheck()) {
2649     __ cmp(result, factory()->the_hole_value());
2650     if (instr->hydrogen()->DeoptimizesOnHole()) {
2651       DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2652     } else {
2653       Label is_not_hole;
2654       __ j(not_equal, &is_not_hole, Label::kNear);
2655       __ mov(result, factory()->undefined_value());
2656       __ bind(&is_not_hole);
2657     }
2658   }
2659 }
2660 
2661 
DoStoreContextSlot(LStoreContextSlot * instr)2662 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2663   Register context = ToRegister(instr->context());
2664   Register value = ToRegister(instr->value());
2665 
2666   Label skip_assignment;
2667 
2668   Operand target = ContextOperand(context, instr->slot_index());
2669   if (instr->hydrogen()->RequiresHoleCheck()) {
2670     __ cmp(target, factory()->the_hole_value());
2671     if (instr->hydrogen()->DeoptimizesOnHole()) {
2672       DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2673     } else {
2674       __ j(not_equal, &skip_assignment, Label::kNear);
2675     }
2676   }
2677 
2678   __ mov(target, value);
2679   if (instr->hydrogen()->NeedsWriteBarrier()) {
2680     SmiCheck check_needed =
2681         instr->hydrogen()->value()->type().IsHeapObject()
2682             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2683     Register temp = ToRegister(instr->temp());
2684     int offset = Context::SlotOffset(instr->slot_index());
2685     __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs,
2686                               EMIT_REMEMBERED_SET, check_needed);
2687   }
2688 
2689   __ bind(&skip_assignment);
2690 }
2691 
2692 
DoLoadNamedField(LLoadNamedField * instr)2693 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2694   HObjectAccess access = instr->hydrogen()->access();
2695   int offset = access.offset();
2696 
2697   if (access.IsExternalMemory()) {
2698     Register result = ToRegister(instr->result());
2699     MemOperand operand = instr->object()->IsConstantOperand()
2700         ? MemOperand::StaticVariable(ToExternalReference(
2701                 LConstantOperand::cast(instr->object())))
2702         : MemOperand(ToRegister(instr->object()), offset);
2703     __ Load(result, operand, access.representation());
2704     return;
2705   }
2706 
2707   Register object = ToRegister(instr->object());
2708   if (instr->hydrogen()->representation().IsDouble()) {
2709     X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
2710     return;
2711   }
2712 
2713   Register result = ToRegister(instr->result());
2714   if (!access.IsInobject()) {
2715     __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2716     object = result;
2717   }
2718   __ Load(result, FieldOperand(object, offset), access.representation());
2719 }
2720 
2721 
EmitPushTaggedOperand(LOperand * operand)2722 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
2723   DCHECK(!operand->IsDoubleRegister());
2724   if (operand->IsConstantOperand()) {
2725     Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
2726     AllowDeferredHandleDereference smi_check;
2727     if (object->IsSmi()) {
2728       __ Push(Handle<Smi>::cast(object));
2729     } else {
2730       __ PushHeapObject(Handle<HeapObject>::cast(object));
2731     }
2732   } else if (operand->IsRegister()) {
2733     __ push(ToRegister(operand));
2734   } else {
2735     __ push(ToOperand(operand));
2736   }
2737 }
2738 
2739 
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)2740 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2741   Register function = ToRegister(instr->function());
2742   Register temp = ToRegister(instr->temp());
2743   Register result = ToRegister(instr->result());
2744 
2745   // Get the prototype or initial map from the function.
2746   __ mov(result,
2747          FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2748 
2749   // Check that the function has a prototype or an initial map.
2750   __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
2751   DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2752 
2753   // If the function does not have an initial map, we're done.
2754   Label done;
2755   __ CmpObjectType(result, MAP_TYPE, temp);
2756   __ j(not_equal, &done, Label::kNear);
2757 
2758   // Get the prototype from the initial map.
2759   __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
2760 
2761   // All done.
2762   __ bind(&done);
2763 }
2764 
2765 
DoLoadRoot(LLoadRoot * instr)2766 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2767   Register result = ToRegister(instr->result());
2768   __ LoadRoot(result, instr->index());
2769 }
2770 
2771 
DoAccessArgumentsAt(LAccessArgumentsAt * instr)2772 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2773   Register arguments = ToRegister(instr->arguments());
2774   Register result = ToRegister(instr->result());
2775   if (instr->length()->IsConstantOperand() &&
2776       instr->index()->IsConstantOperand()) {
2777     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2778     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2779     int index = (const_length - const_index) + 1;
2780     __ mov(result, Operand(arguments, index * kPointerSize));
2781   } else {
2782     Register length = ToRegister(instr->length());
2783     Operand index = ToOperand(instr->index());
2784     // There are two words between the frame pointer and the last argument.
2785     // Subtracting from length accounts for one of them add one more.
2786     __ sub(length, index);
2787     __ mov(result, Operand(arguments, length, times_4, kPointerSize));
2788   }
2789 }
2790 
2791 
DoLoadKeyedExternalArray(LLoadKeyed * instr)2792 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2793   ElementsKind elements_kind = instr->elements_kind();
2794   LOperand* key = instr->key();
2795   if (!key->IsConstantOperand() &&
2796       ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
2797                                   elements_kind)) {
2798     __ SmiUntag(ToRegister(key));
2799   }
2800   Operand operand(BuildFastArrayOperand(
2801       instr->elements(),
2802       key,
2803       instr->hydrogen()->key()->representation(),
2804       elements_kind,
2805       instr->base_offset()));
2806   if (elements_kind == FLOAT32_ELEMENTS) {
2807     X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
2808   } else if (elements_kind == FLOAT64_ELEMENTS) {
2809     X87Mov(ToX87Register(instr->result()), operand);
2810   } else {
2811     Register result(ToRegister(instr->result()));
2812     switch (elements_kind) {
2813       case INT8_ELEMENTS:
2814         __ movsx_b(result, operand);
2815         break;
2816       case UINT8_ELEMENTS:
2817       case UINT8_CLAMPED_ELEMENTS:
2818         __ movzx_b(result, operand);
2819         break;
2820       case INT16_ELEMENTS:
2821         __ movsx_w(result, operand);
2822         break;
2823       case UINT16_ELEMENTS:
2824         __ movzx_w(result, operand);
2825         break;
2826       case INT32_ELEMENTS:
2827         __ mov(result, operand);
2828         break;
2829       case UINT32_ELEMENTS:
2830         __ mov(result, operand);
2831         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2832           __ test(result, Operand(result));
2833           DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
2834         }
2835         break;
2836       case FLOAT32_ELEMENTS:
2837       case FLOAT64_ELEMENTS:
2838       case FAST_SMI_ELEMENTS:
2839       case FAST_ELEMENTS:
2840       case FAST_DOUBLE_ELEMENTS:
2841       case FAST_HOLEY_SMI_ELEMENTS:
2842       case FAST_HOLEY_ELEMENTS:
2843       case FAST_HOLEY_DOUBLE_ELEMENTS:
2844       case DICTIONARY_ELEMENTS:
2845       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2846       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
2847       case FAST_STRING_WRAPPER_ELEMENTS:
2848       case SLOW_STRING_WRAPPER_ELEMENTS:
2849       case NO_ELEMENTS:
2850         UNREACHABLE();
2851         break;
2852     }
2853   }
2854 }
2855 
2856 
DoLoadKeyedFixedDoubleArray(LLoadKeyed * instr)2857 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2858   if (instr->hydrogen()->RequiresHoleCheck()) {
2859     Operand hole_check_operand = BuildFastArrayOperand(
2860         instr->elements(), instr->key(),
2861         instr->hydrogen()->key()->representation(),
2862         FAST_DOUBLE_ELEMENTS,
2863         instr->base_offset() + sizeof(kHoleNanLower32));
2864     __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
2865     DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2866   }
2867 
2868   Operand double_load_operand = BuildFastArrayOperand(
2869       instr->elements(),
2870       instr->key(),
2871       instr->hydrogen()->key()->representation(),
2872       FAST_DOUBLE_ELEMENTS,
2873       instr->base_offset());
2874   X87Mov(ToX87Register(instr->result()), double_load_operand);
2875 }
2876 
2877 
DoLoadKeyedFixedArray(LLoadKeyed * instr)2878 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2879   Register result = ToRegister(instr->result());
2880 
2881   // Load the result.
2882   __ mov(result,
2883          BuildFastArrayOperand(instr->elements(), instr->key(),
2884                                instr->hydrogen()->key()->representation(),
2885                                FAST_ELEMENTS, instr->base_offset()));
2886 
2887   // Check for the hole value.
2888   if (instr->hydrogen()->RequiresHoleCheck()) {
2889     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2890       __ test(result, Immediate(kSmiTagMask));
2891       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotASmi);
2892     } else {
2893       __ cmp(result, factory()->the_hole_value());
2894       DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2895     }
2896   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
2897     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
2898     Label done;
2899     __ cmp(result, factory()->the_hole_value());
2900     __ j(not_equal, &done);
2901     if (info()->IsStub()) {
2902       // A stub can safely convert the hole to undefined only if the array
2903       // protector cell contains (Smi) Isolate::kProtectorValid.
2904       // Otherwise it needs to bail out.
2905       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
2906       __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
2907              Immediate(Smi::FromInt(Isolate::kProtectorValid)));
2908       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
2909     }
2910     __ mov(result, isolate()->factory()->undefined_value());
2911     __ bind(&done);
2912   }
2913 }
2914 
2915 
DoLoadKeyed(LLoadKeyed * instr)2916 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
2917   if (instr->is_fixed_typed_array()) {
2918     DoLoadKeyedExternalArray(instr);
2919   } else if (instr->hydrogen()->representation().IsDouble()) {
2920     DoLoadKeyedFixedDoubleArray(instr);
2921   } else {
2922     DoLoadKeyedFixedArray(instr);
2923   }
2924 }
2925 
2926 
BuildFastArrayOperand(LOperand * elements_pointer,LOperand * key,Representation key_representation,ElementsKind elements_kind,uint32_t base_offset)2927 Operand LCodeGen::BuildFastArrayOperand(
2928     LOperand* elements_pointer,
2929     LOperand* key,
2930     Representation key_representation,
2931     ElementsKind elements_kind,
2932     uint32_t base_offset) {
2933   Register elements_pointer_reg = ToRegister(elements_pointer);
2934   int element_shift_size = ElementsKindToShiftSize(elements_kind);
2935   int shift_size = element_shift_size;
2936   if (key->IsConstantOperand()) {
2937     int constant_value = ToInteger32(LConstantOperand::cast(key));
2938     if (constant_value & 0xF0000000) {
2939       Abort(kArrayIndexConstantValueTooBig);
2940     }
2941     return Operand(elements_pointer_reg,
2942                    ((constant_value) << shift_size)
2943                        + base_offset);
2944   } else {
2945     // Take the tag bit into account while computing the shift size.
2946     if (key_representation.IsSmi() && (shift_size >= 1)) {
2947       shift_size -= kSmiTagSize;
2948     }
2949     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
2950     return Operand(elements_pointer_reg,
2951                    ToRegister(key),
2952                    scale_factor,
2953                    base_offset);
2954   }
2955 }
2956 
2957 
DoArgumentsElements(LArgumentsElements * instr)2958 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2959   Register result = ToRegister(instr->result());
2960 
2961   if (instr->hydrogen()->from_inlined()) {
2962     __ lea(result, Operand(esp, -2 * kPointerSize));
2963   } else if (instr->hydrogen()->arguments_adaptor()) {
2964     // Check for arguments adapter frame.
2965     Label done, adapted;
2966     __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2967     __ mov(result,
2968            Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
2969     __ cmp(Operand(result),
2970            Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2971     __ j(equal, &adapted, Label::kNear);
2972 
2973     // No arguments adaptor frame.
2974     __ mov(result, Operand(ebp));
2975     __ jmp(&done, Label::kNear);
2976 
2977     // Arguments adaptor frame present.
2978     __ bind(&adapted);
2979     __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2980 
2981     // Result is the frame pointer for the frame if not adapted and for the real
2982     // frame below the adaptor frame if adapted.
2983     __ bind(&done);
2984   } else {
2985     __ mov(result, Operand(ebp));
2986   }
2987 }
2988 
2989 
DoArgumentsLength(LArgumentsLength * instr)2990 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2991   Operand elem = ToOperand(instr->elements());
2992   Register result = ToRegister(instr->result());
2993 
2994   Label done;
2995 
2996   // If no arguments adaptor frame the number of arguments is fixed.
2997   __ cmp(ebp, elem);
2998   __ mov(result, Immediate(scope()->num_parameters()));
2999   __ j(equal, &done, Label::kNear);
3000 
3001   // Arguments adaptor frame present. Get argument length from there.
3002   __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3003   __ mov(result, Operand(result,
3004                          ArgumentsAdaptorFrameConstants::kLengthOffset));
3005   __ SmiUntag(result);
3006 
3007   // Argument length is in result register.
3008   __ bind(&done);
3009 }
3010 
3011 
DoWrapReceiver(LWrapReceiver * instr)3012 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3013   Register receiver = ToRegister(instr->receiver());
3014   Register function = ToRegister(instr->function());
3015 
3016   // If the receiver is null or undefined, we have to pass the global
3017   // object as a receiver to normal functions. Values have to be
3018   // passed unchanged to builtins and strict-mode functions.
3019   Label receiver_ok, global_object;
3020   Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3021   Register scratch = ToRegister(instr->temp());
3022 
3023   if (!instr->hydrogen()->known_function()) {
3024     // Do not transform the receiver to object for strict mode
3025     // functions.
3026     __ mov(scratch,
3027            FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3028     __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3029               Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3030     __ j(not_equal, &receiver_ok, dist);
3031 
3032     // Do not transform the receiver to object for builtins.
3033     __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3034               Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3035     __ j(not_equal, &receiver_ok, dist);
3036   }
3037 
3038   // Normal function. Replace undefined or null with global receiver.
3039   __ cmp(receiver, factory()->null_value());
3040   __ j(equal, &global_object, Label::kNear);
3041   __ cmp(receiver, factory()->undefined_value());
3042   __ j(equal, &global_object, Label::kNear);
3043 
3044   // The receiver should be a JS object.
3045   __ test(receiver, Immediate(kSmiTagMask));
3046   DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
3047   __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
3048   DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
3049 
3050   __ jmp(&receiver_ok, Label::kNear);
3051   __ bind(&global_object);
3052   __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
3053   __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
3054   __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
3055   __ bind(&receiver_ok);
3056 }
3057 
3058 
DoApplyArguments(LApplyArguments * instr)3059 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3060   Register receiver = ToRegister(instr->receiver());
3061   Register function = ToRegister(instr->function());
3062   Register length = ToRegister(instr->length());
3063   Register elements = ToRegister(instr->elements());
3064   DCHECK(receiver.is(eax));  // Used for parameter count.
3065   DCHECK(function.is(edi));  // Required by InvokeFunction.
3066   DCHECK(ToRegister(instr->result()).is(eax));
3067 
3068   // Copy the arguments to this function possibly from the
3069   // adaptor frame below it.
3070   const uint32_t kArgumentsLimit = 1 * KB;
3071   __ cmp(length, kArgumentsLimit);
3072   DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
3073 
3074   __ push(receiver);
3075   __ mov(receiver, length);
3076 
3077   // Loop through the arguments pushing them onto the execution
3078   // stack.
3079   Label invoke, loop;
3080   // length is a small non-negative integer, due to the test above.
3081   __ test(length, Operand(length));
3082   __ j(zero, &invoke, Label::kNear);
3083   __ bind(&loop);
3084   __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3085   __ dec(length);
3086   __ j(not_zero, &loop);
3087 
3088   // Invoke the function.
3089   __ bind(&invoke);
3090 
3091   InvokeFlag flag = CALL_FUNCTION;
3092   if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3093     DCHECK(!info()->saves_caller_doubles());
3094     // TODO(ishell): drop current frame before pushing arguments to the stack.
3095     flag = JUMP_FUNCTION;
3096     ParameterCount actual(eax);
3097     // It is safe to use ebx, ecx and edx as scratch registers here given that
3098     // 1) we are not going to return to caller function anyway,
3099     // 2) ebx (expected arguments count) and edx (new.target) will be
3100     //    initialized below.
3101     PrepareForTailCall(actual, ebx, ecx, edx);
3102   }
3103 
3104   DCHECK(instr->HasPointerMap());
3105   LPointerMap* pointers = instr->pointer_map();
3106   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3107   ParameterCount actual(eax);
3108   __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
3109 }
3110 
3111 
DoDebugBreak(LDebugBreak * instr)3112 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3113   __ int3();
3114 }
3115 
3116 
DoPushArgument(LPushArgument * instr)3117 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3118   LOperand* argument = instr->value();
3119   EmitPushTaggedOperand(argument);
3120 }
3121 
3122 
DoDrop(LDrop * instr)3123 void LCodeGen::DoDrop(LDrop* instr) {
3124   __ Drop(instr->count());
3125 }
3126 
3127 
DoThisFunction(LThisFunction * instr)3128 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3129   Register result = ToRegister(instr->result());
3130   __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3131 }
3132 
3133 
DoContext(LContext * instr)3134 void LCodeGen::DoContext(LContext* instr) {
3135   Register result = ToRegister(instr->result());
3136   if (info()->IsOptimizing()) {
3137     __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3138   } else {
3139     // If there is no frame, the context must be in esi.
3140     DCHECK(result.is(esi));
3141   }
3142 }
3143 
3144 
DoDeclareGlobals(LDeclareGlobals * instr)3145 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3146   DCHECK(ToRegister(instr->context()).is(esi));
3147   __ push(Immediate(instr->hydrogen()->pairs()));
3148   __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3149   __ push(Immediate(instr->hydrogen()->feedback_vector()));
3150   CallRuntime(Runtime::kDeclareGlobals, instr);
3151 }
3152 
CallKnownFunction(Handle<JSFunction> function,int formal_parameter_count,int arity,bool is_tail_call,LInstruction * instr)3153 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3154                                  int formal_parameter_count, int arity,
3155                                  bool is_tail_call, LInstruction* instr) {
3156   bool dont_adapt_arguments =
3157       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3158   bool can_invoke_directly =
3159       dont_adapt_arguments || formal_parameter_count == arity;
3160 
3161   Register function_reg = edi;
3162 
3163   if (can_invoke_directly) {
3164     // Change context.
3165     __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
3166 
3167     // Always initialize new target and number of actual arguments.
3168     __ mov(edx, factory()->undefined_value());
3169     __ mov(eax, arity);
3170 
3171     bool is_self_call = function.is_identical_to(info()->closure());
3172 
3173     // Invoke function directly.
3174     if (is_self_call) {
3175       Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3176       if (is_tail_call) {
3177         __ Jump(self, RelocInfo::CODE_TARGET);
3178       } else {
3179         __ Call(self, RelocInfo::CODE_TARGET);
3180       }
3181     } else {
3182       Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
3183       if (is_tail_call) {
3184         __ jmp(target);
3185       } else {
3186         __ call(target);
3187       }
3188     }
3189 
3190     if (!is_tail_call) {
3191       // Set up deoptimization.
3192       RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3193     }
3194   } else {
3195     // We need to adapt arguments.
3196     LPointerMap* pointers = instr->pointer_map();
3197     SafepointGenerator generator(
3198         this, pointers, Safepoint::kLazyDeopt);
3199     ParameterCount actual(arity);
3200     ParameterCount expected(formal_parameter_count);
3201     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3202     __ InvokeFunction(function_reg, expected, actual, flag, generator);
3203   }
3204 }
3205 
3206 
DoCallWithDescriptor(LCallWithDescriptor * instr)3207 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3208   DCHECK(ToRegister(instr->result()).is(eax));
3209 
3210   if (instr->hydrogen()->IsTailCall()) {
3211     if (NeedsEagerFrame()) __ leave();
3212 
3213     if (instr->target()->IsConstantOperand()) {
3214       LConstantOperand* target = LConstantOperand::cast(instr->target());
3215       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3216       __ jmp(code, RelocInfo::CODE_TARGET);
3217     } else {
3218       DCHECK(instr->target()->IsRegister());
3219       Register target = ToRegister(instr->target());
3220       __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3221       __ jmp(target);
3222     }
3223   } else {
3224     LPointerMap* pointers = instr->pointer_map();
3225     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3226 
3227     if (instr->target()->IsConstantOperand()) {
3228       LConstantOperand* target = LConstantOperand::cast(instr->target());
3229       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3230       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3231       __ call(code, RelocInfo::CODE_TARGET);
3232     } else {
3233       DCHECK(instr->target()->IsRegister());
3234       Register target = ToRegister(instr->target());
3235       generator.BeforeCall(__ CallSize(Operand(target)));
3236       __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3237       __ call(target);
3238     }
3239     generator.AfterCall();
3240   }
3241 }
3242 
3243 
DoDeferredMathAbsTaggedHeapNumber(LMathAbs * instr)3244 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3245   Register input_reg = ToRegister(instr->value());
3246   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3247          factory()->heap_number_map());
3248   DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
3249 
3250   Label slow, allocated, done;
3251   uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
3252   available_regs &= ~input_reg.bit();
3253   if (instr->context()->IsRegister()) {
3254     // Make sure that the context isn't overwritten in the AllocateHeapNumber
3255     // macro below.
3256     available_regs &= ~ToRegister(instr->context()).bit();
3257   }
3258 
3259   Register tmp =
3260       Register::from_code(base::bits::CountTrailingZeros32(available_regs));
3261   available_regs &= ~tmp.bit();
3262   Register tmp2 =
3263       Register::from_code(base::bits::CountTrailingZeros32(available_regs));
3264 
3265   // Preserve the value of all registers.
3266   PushSafepointRegistersScope scope(this);
3267 
3268   __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3269   // Check the sign of the argument. If the argument is positive, just
3270   // return it. We do not need to patch the stack since |input| and
3271   // |result| are the same register and |input| will be restored
3272   // unchanged by popping safepoint registers.
3273   __ test(tmp, Immediate(HeapNumber::kSignMask));
3274   __ j(zero, &done, Label::kNear);
3275 
3276   __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3277   __ jmp(&allocated, Label::kNear);
3278 
3279   // Slow case: Call the runtime system to do the number allocation.
3280   __ bind(&slow);
3281   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3282                           instr, instr->context());
3283   // Set the pointer to the new heap number in tmp.
3284   if (!tmp.is(eax)) __ mov(tmp, eax);
3285   // Restore input_reg after call to runtime.
3286   __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3287 
3288   __ bind(&allocated);
3289   __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3290   __ and_(tmp2, ~HeapNumber::kSignMask);
3291   __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3292   __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3293   __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3294   __ StoreToSafepointRegisterSlot(input_reg, tmp);
3295 
3296   __ bind(&done);
3297 }
3298 
3299 
EmitIntegerMathAbs(LMathAbs * instr)3300 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3301   Register input_reg = ToRegister(instr->value());
3302   __ test(input_reg, Operand(input_reg));
3303   Label is_positive;
3304   __ j(not_sign, &is_positive, Label::kNear);
3305   __ neg(input_reg);  // Sets flags.
3306   DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
3307   __ bind(&is_positive);
3308 }
3309 
3310 
DoMathAbs(LMathAbs * instr)3311 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3312   // Class for deferred case.
3313   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3314    public:
3315     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3316                                     LMathAbs* instr,
3317                                     const X87Stack& x87_stack)
3318         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
3319     void Generate() override {
3320       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3321     }
3322     LInstruction* instr() override { return instr_; }
3323 
3324    private:
3325     LMathAbs* instr_;
3326   };
3327 
3328   DCHECK(instr->value()->Equals(instr->result()));
3329   Representation r = instr->hydrogen()->value()->representation();
3330 
3331   if (r.IsDouble()) {
3332     X87Register value = ToX87Register(instr->value());
3333     X87Fxch(value);
3334     __ fabs();
3335   } else if (r.IsSmiOrInteger32()) {
3336     EmitIntegerMathAbs(instr);
3337   } else {  // Tagged case.
3338     DeferredMathAbsTaggedHeapNumber* deferred =
3339         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
3340     Register input_reg = ToRegister(instr->value());
3341     // Smi check.
3342     __ JumpIfNotSmi(input_reg, deferred->entry());
3343     EmitIntegerMathAbs(instr);
3344     __ bind(deferred->exit());
3345   }
3346 }
3347 
3348 
DoMathFloor(LMathFloor * instr)3349 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3350   Register output_reg = ToRegister(instr->result());
3351   X87Register input_reg = ToX87Register(instr->value());
3352   X87Fxch(input_reg);
3353 
3354   Label not_minus_zero, done;
3355   // Deoptimize on unordered.
3356   __ fldz();
3357   __ fld(1);
3358   __ FCmp();
3359   DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
3360   __ j(below, &not_minus_zero, Label::kNear);
3361 
3362   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3363     // Check for negative zero.
3364     __ j(not_equal, &not_minus_zero, Label::kNear);
3365     // +- 0.0.
3366     __ fld(0);
3367     __ FXamSign();
3368     DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
3369     __ Move(output_reg, Immediate(0));
3370     __ jmp(&done, Label::kFar);
3371   }
3372 
3373   // Positive input.
3374   // rc=01B, round down.
3375   __ bind(&not_minus_zero);
3376   __ fnclex();
3377   __ X87SetRC(0x0400);
3378   __ sub(esp, Immediate(kPointerSize));
3379   __ fist_s(Operand(esp, 0));
3380   __ pop(output_reg);
3381   __ X87SetRC(0x0000);
3382   __ X87CheckIA();
3383   DeoptimizeIf(equal, instr, DeoptimizeReason::kOverflow);
3384   __ fnclex();
3385   __ X87SetRC(0x0000);
3386   __ bind(&done);
3387 }
3388 
3389 
DoMathRound(LMathRound * instr)3390 void LCodeGen::DoMathRound(LMathRound* instr) {
3391   X87Register input_reg = ToX87Register(instr->value());
3392   Register result = ToRegister(instr->result());
3393   X87Fxch(input_reg);
3394   Label below_one_half, below_minus_one_half, done;
3395 
3396   ExternalReference one_half = ExternalReference::address_of_one_half();
3397   ExternalReference minus_one_half =
3398       ExternalReference::address_of_minus_one_half();
3399 
3400   __ fld_d(Operand::StaticVariable(one_half));
3401   __ fld(1);
3402   __ FCmp();
3403   __ j(carry, &below_one_half);
3404 
3405   // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x)
3406   __ fld(0);
3407   __ fadd_d(Operand::StaticVariable(one_half));
3408   // rc=11B, round toward zero.
3409   __ X87SetRC(0x0c00);
3410   __ sub(esp, Immediate(kPointerSize));
3411   // Clear exception bits.
3412   __ fnclex();
3413   __ fistp_s(MemOperand(esp, 0));
3414   // Restore round mode.
3415   __ X87SetRC(0x0000);
3416   // Check overflow.
3417   __ X87CheckIA();
3418   __ pop(result);
3419   DeoptimizeIf(equal, instr, DeoptimizeReason::kConversionOverflow);
3420   __ fnclex();
3421   // Restore round mode.
3422   __ X87SetRC(0x0000);
3423   __ jmp(&done);
3424 
3425   __ bind(&below_one_half);
3426   __ fld_d(Operand::StaticVariable(minus_one_half));
3427   __ fld(1);
3428   __ FCmp();
3429   __ j(carry, &below_minus_one_half);
3430   // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3431   // we can ignore the difference between a result of -0 and +0.
3432   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3433     // If the sign is positive, we return +0.
3434     __ fld(0);
3435     __ FXamSign();
3436     DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
3437   }
3438   __ Move(result, Immediate(0));
3439   __ jmp(&done);
3440 
3441   __ bind(&below_minus_one_half);
3442   __ fld(0);
3443   __ fadd_d(Operand::StaticVariable(one_half));
3444   // rc=01B, round down.
3445   __ X87SetRC(0x0400);
3446   __ sub(esp, Immediate(kPointerSize));
3447   // Clear exception bits.
3448   __ fnclex();
3449   __ fistp_s(MemOperand(esp, 0));
3450   // Restore round mode.
3451   __ X87SetRC(0x0000);
3452   // Check overflow.
3453   __ X87CheckIA();
3454   __ pop(result);
3455   DeoptimizeIf(equal, instr, DeoptimizeReason::kConversionOverflow);
3456   __ fnclex();
3457   // Restore round mode.
3458   __ X87SetRC(0x0000);
3459 
3460   __ bind(&done);
3461 }
3462 
3463 
DoMathFround(LMathFround * instr)3464 void LCodeGen::DoMathFround(LMathFround* instr) {
3465   X87Register input_reg = ToX87Register(instr->value());
3466   X87Fxch(input_reg);
3467   __ sub(esp, Immediate(kPointerSize));
3468   __ fstp_s(MemOperand(esp, 0));
3469   X87Fld(MemOperand(esp, 0), kX87FloatOperand);
3470   __ add(esp, Immediate(kPointerSize));
3471 }
3472 
3473 
DoMathSqrt(LMathSqrt * instr)3474 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3475   X87Register input_reg = ToX87Register(instr->value());
3476   __ X87SetFPUCW(0x027F);
3477   X87Fxch(input_reg);
3478   __ fsqrt();
3479   __ X87SetFPUCW(0x037F);
3480 }
3481 
3482 
DoMathPowHalf(LMathPowHalf * instr)3483 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3484   X87Register input_reg = ToX87Register(instr->value());
3485   DCHECK(ToX87Register(instr->result()).is(input_reg));
3486   X87Fxch(input_reg);
3487   // Note that according to ECMA-262 15.8.2.13:
3488   // Math.pow(-Infinity, 0.5) == Infinity
3489   // Math.sqrt(-Infinity) == NaN
3490   Label done, sqrt;
3491   // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1
3492   __ fxam();
3493   __ push(eax);
3494   __ fnstsw_ax();
3495   __ and_(eax, Immediate(0x4700));
3496   __ cmp(eax, Immediate(0x0700));
3497   __ j(not_equal, &sqrt, Label::kNear);
3498   // If input is -Infinity, return Infinity.
3499   __ fchs();
3500   __ jmp(&done, Label::kNear);
3501 
3502   // Square root.
3503   __ bind(&sqrt);
3504   __ fldz();
3505   __ faddp();  // Convert -0 to +0.
3506   __ fsqrt();
3507   __ bind(&done);
3508   __ pop(eax);
3509 }
3510 
3511 
DoPower(LPower * instr)3512 void LCodeGen::DoPower(LPower* instr) {
3513   Representation exponent_type = instr->hydrogen()->right()->representation();
3514   X87Register result = ToX87Register(instr->result());
3515   // Having marked this as a call, we can use any registers.
3516   X87Register base = ToX87Register(instr->left());
3517   ExternalReference one_half = ExternalReference::address_of_one_half();
3518 
3519   if (exponent_type.IsSmi()) {
3520     Register exponent = ToRegister(instr->right());
3521     X87LoadForUsage(base);
3522     __ SmiUntag(exponent);
3523     __ push(exponent);
3524     __ fild_s(MemOperand(esp, 0));
3525     __ pop(exponent);
3526   } else if (exponent_type.IsTagged()) {
3527     Register exponent = ToRegister(instr->right());
3528     Register temp = exponent.is(ecx) ? eax : ecx;
3529     Label no_deopt, done;
3530     X87LoadForUsage(base);
3531     __ JumpIfSmi(exponent, &no_deopt);
3532     __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
3533     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
3534     // Heap number(double)
3535     __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
3536     __ jmp(&done);
3537     // SMI
3538     __ bind(&no_deopt);
3539     __ SmiUntag(exponent);
3540     __ push(exponent);
3541     __ fild_s(MemOperand(esp, 0));
3542     __ pop(exponent);
3543     __ bind(&done);
3544   } else if (exponent_type.IsInteger32()) {
3545     Register exponent = ToRegister(instr->right());
3546     X87LoadForUsage(base);
3547     __ push(exponent);
3548     __ fild_s(MemOperand(esp, 0));
3549     __ pop(exponent);
3550   } else {
3551     DCHECK(exponent_type.IsDouble());
3552     X87Register exponent_double = ToX87Register(instr->right());
3553     X87LoadForUsage(base, exponent_double);
3554   }
3555 
3556   // FP data stack {base, exponent(TOS)}.
3557   // Handle (exponent==+-0.5 && base == -0).
3558   Label not_plus_0;
3559   __ fld(0);
3560   __ fabs();
3561   X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand);
3562   __ FCmp();
3563   __ j(parity_even, &not_plus_0, Label::kNear);  // NaN.
3564   __ j(not_equal, &not_plus_0, Label::kNear);
3565   __ fldz();
3566   // FP data stack {base, exponent(TOS), zero}.
3567   __ faddp(2);
3568   __ bind(&not_plus_0);
3569 
3570   {
3571     __ PrepareCallCFunction(4, eax);
3572     __ fstp_d(MemOperand(esp, kDoubleSize));  // Exponent value.
3573     __ fstp_d(MemOperand(esp, 0));            // Base value.
3574     X87PrepareToWrite(result);
3575     __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
3576                      4);
3577     // Return value is in st(0) on ia32.
3578     X87CommitWrite(result);
3579   }
3580 }
3581 
3582 
DoMathLog(LMathLog * instr)3583 void LCodeGen::DoMathLog(LMathLog* instr) {
3584   DCHECK(instr->value()->Equals(instr->result()));
3585   X87Register result = ToX87Register(instr->result());
3586   X87Register input_reg = ToX87Register(instr->value());
3587   X87Fxch(input_reg);
3588 
3589   // Pass one double as argument on the stack.
3590   __ PrepareCallCFunction(2, eax);
3591   __ fstp_d(MemOperand(esp, 0));
3592   X87PrepareToWrite(result);
3593   __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 2);
3594   // Return value is in st(0) on ia32.
3595   X87CommitWrite(result);
3596 }
3597 
3598 
DoMathClz32(LMathClz32 * instr)3599 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3600   Register input = ToRegister(instr->value());
3601   Register result = ToRegister(instr->result());
3602 
3603   __ Lzcnt(result, input);
3604 }
3605 
DoMathCos(LMathCos * instr)3606 void LCodeGen::DoMathCos(LMathCos* instr) {
3607   X87Register result = ToX87Register(instr->result());
3608   X87Register input_reg = ToX87Register(instr->value());
3609   __ fld(x87_stack_.st(input_reg));
3610 
3611   // Pass one double as argument on the stack.
3612   __ PrepareCallCFunction(2, eax);
3613   __ fstp_d(MemOperand(esp, 0));
3614   X87PrepareToWrite(result);
3615   __ X87SetFPUCW(0x027F);
3616   __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2);
3617   __ X87SetFPUCW(0x037F);
3618   // Return value is in st(0) on ia32.
3619   X87CommitWrite(result);
3620 }
3621 
DoMathSin(LMathSin * instr)3622 void LCodeGen::DoMathSin(LMathSin* instr) {
3623   X87Register result = ToX87Register(instr->result());
3624   X87Register input_reg = ToX87Register(instr->value());
3625   __ fld(x87_stack_.st(input_reg));
3626 
3627   // Pass one double as argument on the stack.
3628   __ PrepareCallCFunction(2, eax);
3629   __ fstp_d(MemOperand(esp, 0));
3630   X87PrepareToWrite(result);
3631   __ X87SetFPUCW(0x027F);
3632   __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2);
3633   __ X87SetFPUCW(0x037F);
3634   // Return value is in st(0) on ia32.
3635   X87CommitWrite(result);
3636 }
3637 
DoMathExp(LMathExp * instr)3638 void LCodeGen::DoMathExp(LMathExp* instr) {
3639   X87Register result = ToX87Register(instr->result());
3640   X87Register input_reg = ToX87Register(instr->value());
3641   __ fld(x87_stack_.st(input_reg));
3642 
3643   // Pass one double as argument on the stack.
3644   __ PrepareCallCFunction(2, eax);
3645   __ fstp_d(MemOperand(esp, 0));
3646   X87PrepareToWrite(result);
3647   __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 2);
3648   // Return value is in st(0) on ia32.
3649   X87CommitWrite(result);
3650 }
3651 
PrepareForTailCall(const ParameterCount & actual,Register scratch1,Register scratch2,Register scratch3)3652 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3653                                   Register scratch1, Register scratch2,
3654                                   Register scratch3) {
3655 #if DEBUG
3656   if (actual.is_reg()) {
3657     DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3658   } else {
3659     DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3660   }
3661 #endif
3662   if (FLAG_code_comments) {
3663     if (actual.is_reg()) {
3664       Comment(";;; PrepareForTailCall, actual: %s {",
3665               RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
3666                   actual.reg().code()));
3667     } else {
3668       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3669     }
3670   }
3671 
3672   // Check if next frame is an arguments adaptor frame.
3673   Register caller_args_count_reg = scratch1;
3674   Label no_arguments_adaptor, formal_parameter_count_loaded;
3675   __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3676   __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
3677          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3678   __ j(not_equal, &no_arguments_adaptor, Label::kNear);
3679 
3680   // Drop current frame and load arguments count from arguments adaptor frame.
3681   __ mov(ebp, scratch2);
3682   __ mov(caller_args_count_reg,
3683          Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3684   __ SmiUntag(caller_args_count_reg);
3685   __ jmp(&formal_parameter_count_loaded, Label::kNear);
3686 
3687   __ bind(&no_arguments_adaptor);
3688   // Load caller's formal parameter count.
3689   __ mov(caller_args_count_reg,
3690          Immediate(info()->literal()->parameter_count()));
3691 
3692   __ bind(&formal_parameter_count_loaded);
3693   __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
3694                         ReturnAddressState::kNotOnStack, 0);
3695   Comment(";;; }");
3696 }
3697 
DoInvokeFunction(LInvokeFunction * instr)3698 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3699   HInvokeFunction* hinstr = instr->hydrogen();
3700   DCHECK(ToRegister(instr->context()).is(esi));
3701   DCHECK(ToRegister(instr->function()).is(edi));
3702   DCHECK(instr->HasPointerMap());
3703 
3704   bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3705 
3706   if (is_tail_call) {
3707     DCHECK(!info()->saves_caller_doubles());
3708     ParameterCount actual(instr->arity());
3709     // It is safe to use ebx, ecx and edx as scratch registers here given that
3710     // 1) we are not going to return to caller function anyway,
3711     // 2) ebx (expected arguments count) and edx (new.target) will be
3712     //    initialized below.
3713     PrepareForTailCall(actual, ebx, ecx, edx);
3714   }
3715 
3716   Handle<JSFunction> known_function = hinstr->known_function();
3717   if (known_function.is_null()) {
3718     LPointerMap* pointers = instr->pointer_map();
3719     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3720     ParameterCount actual(instr->arity());
3721     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3722     __ InvokeFunction(edi, no_reg, actual, flag, generator);
3723   } else {
3724     CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3725                       instr->arity(), is_tail_call, instr);
3726   }
3727 }
3728 
3729 
DoCallNewArray(LCallNewArray * instr)3730 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3731   DCHECK(ToRegister(instr->context()).is(esi));
3732   DCHECK(ToRegister(instr->constructor()).is(edi));
3733   DCHECK(ToRegister(instr->result()).is(eax));
3734 
3735   __ Move(eax, Immediate(instr->arity()));
3736   __ mov(ebx, instr->hydrogen()->site());
3737 
3738   ElementsKind kind = instr->hydrogen()->elements_kind();
3739   AllocationSiteOverrideMode override_mode =
3740       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3741           ? DISABLE_ALLOCATION_SITES
3742           : DONT_OVERRIDE;
3743 
3744   if (instr->arity() == 0) {
3745     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3746     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3747   } else if (instr->arity() == 1) {
3748     Label done;
3749     if (IsFastPackedElementsKind(kind)) {
3750       Label packed_case;
3751       // We might need a change here
3752       // look at the first argument
3753       __ mov(ecx, Operand(esp, 0));
3754       __ test(ecx, ecx);
3755       __ j(zero, &packed_case, Label::kNear);
3756 
3757       ElementsKind holey_kind = GetHoleyElementsKind(kind);
3758       ArraySingleArgumentConstructorStub stub(isolate(),
3759                                               holey_kind,
3760                                               override_mode);
3761       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3762       __ jmp(&done, Label::kNear);
3763       __ bind(&packed_case);
3764     }
3765 
3766     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3767     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3768     __ bind(&done);
3769   } else {
3770     ArrayNArgumentsConstructorStub stub(isolate());
3771     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3772   }
3773 }
3774 
3775 
DoCallRuntime(LCallRuntime * instr)3776 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3777   DCHECK(ToRegister(instr->context()).is(esi));
3778   CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
3779 }
3780 
3781 
DoStoreCodeEntry(LStoreCodeEntry * instr)3782 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3783   Register function = ToRegister(instr->function());
3784   Register code_object = ToRegister(instr->code_object());
3785   __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
3786   __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
3787 }
3788 
3789 
DoInnerAllocatedObject(LInnerAllocatedObject * instr)3790 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3791   Register result = ToRegister(instr->result());
3792   Register base = ToRegister(instr->base_object());
3793   if (instr->offset()->IsConstantOperand()) {
3794     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3795     __ lea(result, Operand(base, ToInteger32(offset)));
3796   } else {
3797     Register offset = ToRegister(instr->offset());
3798     __ lea(result, Operand(base, offset, times_1, 0));
3799   }
3800 }
3801 
3802 
DoStoreNamedField(LStoreNamedField * instr)3803 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3804   Representation representation = instr->hydrogen()->field_representation();
3805 
3806   HObjectAccess access = instr->hydrogen()->access();
3807   int offset = access.offset();
3808 
3809   if (access.IsExternalMemory()) {
3810     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3811     MemOperand operand = instr->object()->IsConstantOperand()
3812         ? MemOperand::StaticVariable(
3813             ToExternalReference(LConstantOperand::cast(instr->object())))
3814         : MemOperand(ToRegister(instr->object()), offset);
3815     if (instr->value()->IsConstantOperand()) {
3816       LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3817       __ mov(operand, Immediate(ToInteger32(operand_value)));
3818     } else {
3819       Register value = ToRegister(instr->value());
3820       __ Store(value, operand, representation);
3821     }
3822     return;
3823   }
3824 
3825   Register object = ToRegister(instr->object());
3826   __ AssertNotSmi(object);
3827   DCHECK(!representation.IsSmi() ||
3828          !instr->value()->IsConstantOperand() ||
3829          IsSmi(LConstantOperand::cast(instr->value())));
3830   if (representation.IsDouble()) {
3831     DCHECK(access.IsInobject());
3832     DCHECK(!instr->hydrogen()->has_transition());
3833     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3834     X87Register value = ToX87Register(instr->value());
3835     X87Mov(FieldOperand(object, offset), value);
3836     return;
3837   }
3838 
3839   if (instr->hydrogen()->has_transition()) {
3840     Handle<Map> transition = instr->hydrogen()->transition_map();
3841     AddDeprecationDependency(transition);
3842     __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
3843     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3844       Register temp = ToRegister(instr->temp());
3845       Register temp_map = ToRegister(instr->temp_map());
3846       __ mov(temp_map, transition);
3847       __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
3848       // Update the write barrier for the map field.
3849       __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
3850     }
3851   }
3852 
3853   // Do the store.
3854   Register write_register = object;
3855   if (!access.IsInobject()) {
3856     write_register = ToRegister(instr->temp());
3857     __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
3858   }
3859 
3860   MemOperand operand = FieldOperand(write_register, offset);
3861   if (instr->value()->IsConstantOperand()) {
3862     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3863     if (operand_value->IsRegister()) {
3864       Register value = ToRegister(operand_value);
3865       __ Store(value, operand, representation);
3866     } else if (representation.IsInteger32() || representation.IsExternal()) {
3867       Immediate immediate = ToImmediate(operand_value, representation);
3868       DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3869       __ mov(operand, immediate);
3870     } else {
3871       Handle<Object> handle_value = ToHandle(operand_value);
3872       DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3873       __ mov(operand, handle_value);
3874     }
3875   } else {
3876     Register value = ToRegister(instr->value());
3877     __ Store(value, operand, representation);
3878   }
3879 
3880   if (instr->hydrogen()->NeedsWriteBarrier()) {
3881     Register value = ToRegister(instr->value());
3882     Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
3883     // Update the write barrier for the object for in-object properties.
3884     __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs,
3885                         EMIT_REMEMBERED_SET,
3886                         instr->hydrogen()->SmiCheckForWriteBarrier(),
3887                         instr->hydrogen()->PointersToHereCheckForValue());
3888   }
3889 }
3890 
3891 
DoBoundsCheck(LBoundsCheck * instr)3892 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3893   Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
3894   if (instr->index()->IsConstantOperand()) {
3895     __ cmp(ToOperand(instr->length()),
3896            ToImmediate(LConstantOperand::cast(instr->index()),
3897                        instr->hydrogen()->length()->representation()));
3898     cc = CommuteCondition(cc);
3899   } else if (instr->length()->IsConstantOperand()) {
3900     __ cmp(ToOperand(instr->index()),
3901            ToImmediate(LConstantOperand::cast(instr->length()),
3902                        instr->hydrogen()->index()->representation()));
3903   } else {
3904     __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
3905   }
3906   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
3907     Label done;
3908     __ j(NegateCondition(cc), &done, Label::kNear);
3909     __ int3();
3910     __ bind(&done);
3911   } else {
3912     DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
3913   }
3914 }
3915 
3916 
DoStoreKeyedExternalArray(LStoreKeyed * instr)3917 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
3918   ElementsKind elements_kind = instr->elements_kind();
3919   LOperand* key = instr->key();
3920   if (!key->IsConstantOperand() &&
3921       ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3922                                   elements_kind)) {
3923     __ SmiUntag(ToRegister(key));
3924   }
3925   Operand operand(BuildFastArrayOperand(
3926       instr->elements(),
3927       key,
3928       instr->hydrogen()->key()->representation(),
3929       elements_kind,
3930       instr->base_offset()));
3931   if (elements_kind == FLOAT32_ELEMENTS) {
3932     X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
3933   } else if (elements_kind == FLOAT64_ELEMENTS) {
3934     uint64_t int_val = kHoleNanInt64;
3935     int32_t lower = static_cast<int32_t>(int_val);
3936     int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
3937     Operand operand2 = BuildFastArrayOperand(
3938         instr->elements(), instr->key(),
3939         instr->hydrogen()->key()->representation(), elements_kind,
3940         instr->base_offset() + kPointerSize);
3941 
3942     Label no_special_nan_handling, done;
3943     X87Register value = ToX87Register(instr->value());
3944     X87Fxch(value);
3945     __ lea(esp, Operand(esp, -kDoubleSize));
3946     __ fst_d(MemOperand(esp, 0));
3947     __ lea(esp, Operand(esp, kDoubleSize));
3948     int offset = sizeof(kHoleNanUpper32);
3949     __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
3950     __ j(not_equal, &no_special_nan_handling, Label::kNear);
3951     __ mov(operand, Immediate(lower));
3952     __ mov(operand2, Immediate(upper));
3953     __ jmp(&done, Label::kNear);
3954 
3955     __ bind(&no_special_nan_handling);
3956     __ fst_d(operand);
3957     __ bind(&done);
3958   } else {
3959     Register value = ToRegister(instr->value());
3960     switch (elements_kind) {
3961       case UINT8_ELEMENTS:
3962       case INT8_ELEMENTS:
3963       case UINT8_CLAMPED_ELEMENTS:
3964         __ mov_b(operand, value);
3965         break;
3966       case UINT16_ELEMENTS:
3967       case INT16_ELEMENTS:
3968         __ mov_w(operand, value);
3969         break;
3970       case UINT32_ELEMENTS:
3971       case INT32_ELEMENTS:
3972         __ mov(operand, value);
3973         break;
3974       case FLOAT32_ELEMENTS:
3975       case FLOAT64_ELEMENTS:
3976       case FAST_SMI_ELEMENTS:
3977       case FAST_ELEMENTS:
3978       case FAST_DOUBLE_ELEMENTS:
3979       case FAST_HOLEY_SMI_ELEMENTS:
3980       case FAST_HOLEY_ELEMENTS:
3981       case FAST_HOLEY_DOUBLE_ELEMENTS:
3982       case DICTIONARY_ELEMENTS:
3983       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3984       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3985       case FAST_STRING_WRAPPER_ELEMENTS:
3986       case SLOW_STRING_WRAPPER_ELEMENTS:
3987       case NO_ELEMENTS:
3988         UNREACHABLE();
3989         break;
3990     }
3991   }
3992 }
3993 
3994 
DoStoreKeyedFixedDoubleArray(LStoreKeyed * instr)3995 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
3996   Operand double_store_operand = BuildFastArrayOperand(
3997       instr->elements(),
3998       instr->key(),
3999       instr->hydrogen()->key()->representation(),
4000       FAST_DOUBLE_ELEMENTS,
4001       instr->base_offset());
4002 
4003   uint64_t int_val = kHoleNanInt64;
4004   int32_t lower = static_cast<int32_t>(int_val);
4005   int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4006   Operand double_store_operand2 = BuildFastArrayOperand(
4007       instr->elements(), instr->key(),
4008       instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS,
4009       instr->base_offset() + kPointerSize);
4010 
4011   if (instr->hydrogen()->IsConstantHoleStore()) {
4012     // This means we should store the (double) hole. No floating point
4013     // registers required.
4014     __ mov(double_store_operand, Immediate(lower));
4015     __ mov(double_store_operand2, Immediate(upper));
4016   } else {
4017     Label no_special_nan_handling, done;
4018     X87Register value = ToX87Register(instr->value());
4019     X87Fxch(value);
4020 
4021     if (instr->NeedsCanonicalization()) {
4022       __ fld(0);
4023       __ fld(0);
4024       __ FCmp();
4025       __ j(parity_odd, &no_special_nan_handling, Label::kNear);
4026       // All NaNs are Canonicalized to 0x7fffffffffffffff
4027       __ mov(double_store_operand, Immediate(0xffffffff));
4028       __ mov(double_store_operand2, Immediate(0x7fffffff));
4029       __ jmp(&done, Label::kNear);
4030     } else {
4031       __ lea(esp, Operand(esp, -kDoubleSize));
4032       __ fst_d(MemOperand(esp, 0));
4033       __ lea(esp, Operand(esp, kDoubleSize));
4034       int offset = sizeof(kHoleNanUpper32);
4035       __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
4036       __ j(not_equal, &no_special_nan_handling, Label::kNear);
4037       __ mov(double_store_operand, Immediate(lower));
4038       __ mov(double_store_operand2, Immediate(upper));
4039       __ jmp(&done, Label::kNear);
4040     }
4041     __ bind(&no_special_nan_handling);
4042     __ fst_d(double_store_operand);
4043     __ bind(&done);
4044   }
4045 }
4046 
4047 
DoStoreKeyedFixedArray(LStoreKeyed * instr)4048 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4049   Register elements = ToRegister(instr->elements());
4050   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4051 
4052   Operand operand = BuildFastArrayOperand(
4053       instr->elements(),
4054       instr->key(),
4055       instr->hydrogen()->key()->representation(),
4056       FAST_ELEMENTS,
4057       instr->base_offset());
4058   if (instr->value()->IsRegister()) {
4059     __ mov(operand, ToRegister(instr->value()));
4060   } else {
4061     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4062     if (IsSmi(operand_value)) {
4063       Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4064       __ mov(operand, immediate);
4065     } else {
4066       DCHECK(!IsInteger32(operand_value));
4067       Handle<Object> handle_value = ToHandle(operand_value);
4068       __ mov(operand, handle_value);
4069     }
4070   }
4071 
4072   if (instr->hydrogen()->NeedsWriteBarrier()) {
4073     DCHECK(instr->value()->IsRegister());
4074     Register value = ToRegister(instr->value());
4075     DCHECK(!instr->key()->IsConstantOperand());
4076     SmiCheck check_needed =
4077         instr->hydrogen()->value()->type().IsHeapObject()
4078           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4079     // Compute address of modified element and store it into key register.
4080     __ lea(key, operand);
4081     __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET,
4082                    check_needed,
4083                    instr->hydrogen()->PointersToHereCheckForValue());
4084   }
4085 }
4086 
4087 
DoStoreKeyed(LStoreKeyed * instr)4088 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4089   // By cases...external, fast-double, fast
4090   if (instr->is_fixed_typed_array()) {
4091     DoStoreKeyedExternalArray(instr);
4092   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4093     DoStoreKeyedFixedDoubleArray(instr);
4094   } else {
4095     DoStoreKeyedFixedArray(instr);
4096   }
4097 }
4098 
4099 
DoTrapAllocationMemento(LTrapAllocationMemento * instr)4100 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4101   Register object = ToRegister(instr->object());
4102   Register temp = ToRegister(instr->temp());
4103   Label no_memento_found;
4104   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4105   DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
4106   __ bind(&no_memento_found);
4107 }
4108 
4109 
DoMaybeGrowElements(LMaybeGrowElements * instr)4110 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4111   class DeferredMaybeGrowElements final : public LDeferredCode {
4112    public:
4113     DeferredMaybeGrowElements(LCodeGen* codegen,
4114                               LMaybeGrowElements* instr,
4115                               const X87Stack& x87_stack)
4116         : LDeferredCode(codegen, x87_stack), instr_(instr) {}
4117     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4118     LInstruction* instr() override { return instr_; }
4119 
4120    private:
4121     LMaybeGrowElements* instr_;
4122   };
4123 
4124   Register result = eax;
4125   DeferredMaybeGrowElements* deferred =
4126       new (zone()) DeferredMaybeGrowElements(this, instr, x87_stack_);
4127   LOperand* key = instr->key();
4128   LOperand* current_capacity = instr->current_capacity();
4129 
4130   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4131   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4132   DCHECK(key->IsConstantOperand() || key->IsRegister());
4133   DCHECK(current_capacity->IsConstantOperand() ||
4134          current_capacity->IsRegister());
4135 
4136   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4137     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4138     int32_t constant_capacity =
4139         ToInteger32(LConstantOperand::cast(current_capacity));
4140     if (constant_key >= constant_capacity) {
4141       // Deferred case.
4142       __ jmp(deferred->entry());
4143     }
4144   } else if (key->IsConstantOperand()) {
4145     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4146     __ cmp(ToOperand(current_capacity), Immediate(constant_key));
4147     __ j(less_equal, deferred->entry());
4148   } else if (current_capacity->IsConstantOperand()) {
4149     int32_t constant_capacity =
4150         ToInteger32(LConstantOperand::cast(current_capacity));
4151     __ cmp(ToRegister(key), Immediate(constant_capacity));
4152     __ j(greater_equal, deferred->entry());
4153   } else {
4154     __ cmp(ToRegister(key), ToRegister(current_capacity));
4155     __ j(greater_equal, deferred->entry());
4156   }
4157 
4158   __ mov(result, ToOperand(instr->elements()));
4159   __ bind(deferred->exit());
4160 }
4161 
4162 
DoDeferredMaybeGrowElements(LMaybeGrowElements * instr)4163 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4164   // TODO(3095996): Get rid of this. For now, we need to make the
4165   // result register contain a valid pointer because it is already
4166   // contained in the register pointer map.
4167   Register result = eax;
4168   __ Move(result, Immediate(0));
4169 
4170   // We have to call a stub.
4171   {
4172     PushSafepointRegistersScope scope(this);
4173     if (instr->object()->IsRegister()) {
4174       __ Move(result, ToRegister(instr->object()));
4175     } else {
4176       __ mov(result, ToOperand(instr->object()));
4177     }
4178 
4179     LOperand* key = instr->key();
4180     if (key->IsConstantOperand()) {
4181       LConstantOperand* constant_key = LConstantOperand::cast(key);
4182       int32_t int_key = ToInteger32(constant_key);
4183       if (Smi::IsValid(int_key)) {
4184         __ mov(ebx, Immediate(Smi::FromInt(int_key)));
4185       } else {
4186         // We should never get here at runtime because there is a smi check on
4187         // the key before this point.
4188         __ int3();
4189       }
4190     } else {
4191       __ Move(ebx, ToRegister(key));
4192       __ SmiTag(ebx);
4193     }
4194 
4195     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
4196     __ CallStub(&stub);
4197     RecordSafepointWithLazyDeopt(
4198         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4199     __ StoreToSafepointRegisterSlot(result, result);
4200   }
4201 
4202   // Deopt on smi, which means the elements array changed to dictionary mode.
4203   __ test(result, Immediate(kSmiTagMask));
4204   DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
4205 }
4206 
4207 
DoTransitionElementsKind(LTransitionElementsKind * instr)4208 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4209   Register object_reg = ToRegister(instr->object());
4210 
4211   Handle<Map> from_map = instr->original_map();
4212   Handle<Map> to_map = instr->transitioned_map();
4213   ElementsKind from_kind = instr->from_kind();
4214   ElementsKind to_kind = instr->to_kind();
4215 
4216   Label not_applicable;
4217   bool is_simple_map_transition =
4218       IsSimpleMapChangeTransition(from_kind, to_kind);
4219   Label::Distance branch_distance =
4220       is_simple_map_transition ? Label::kNear : Label::kFar;
4221   __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4222   __ j(not_equal, &not_applicable, branch_distance);
4223   if (is_simple_map_transition) {
4224     Register new_map_reg = ToRegister(instr->new_map_temp());
4225     __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4226            Immediate(to_map));
4227     // Write barrier.
4228     DCHECK_NOT_NULL(instr->temp());
4229     __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4230                          ToRegister(instr->temp()), kDontSaveFPRegs);
4231   } else {
4232     DCHECK(ToRegister(instr->context()).is(esi));
4233     DCHECK(object_reg.is(eax));
4234     PushSafepointRegistersScope scope(this);
4235     __ mov(ebx, to_map);
4236     TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
4237     __ CallStub(&stub);
4238     RecordSafepointWithLazyDeopt(instr,
4239         RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4240   }
4241   __ bind(&not_applicable);
4242 }
4243 
4244 
DoStringCharCodeAt(LStringCharCodeAt * instr)4245 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4246   class DeferredStringCharCodeAt final : public LDeferredCode {
4247    public:
4248     DeferredStringCharCodeAt(LCodeGen* codegen,
4249                              LStringCharCodeAt* instr,
4250                              const X87Stack& x87_stack)
4251         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4252     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4253     LInstruction* instr() override { return instr_; }
4254 
4255    private:
4256     LStringCharCodeAt* instr_;
4257   };
4258 
4259   DeferredStringCharCodeAt* deferred =
4260       new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
4261 
4262   StringCharLoadGenerator::Generate(masm(),
4263                                     factory(),
4264                                     ToRegister(instr->string()),
4265                                     ToRegister(instr->index()),
4266                                     ToRegister(instr->result()),
4267                                     deferred->entry());
4268   __ bind(deferred->exit());
4269 }
4270 
4271 
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)4272 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4273   Register string = ToRegister(instr->string());
4274   Register result = ToRegister(instr->result());
4275 
4276   // TODO(3095996): Get rid of this. For now, we need to make the
4277   // result register contain a valid pointer because it is already
4278   // contained in the register pointer map.
4279   __ Move(result, Immediate(0));
4280 
4281   PushSafepointRegistersScope scope(this);
4282   __ push(string);
4283   // Push the index as a smi. This is safe because of the checks in
4284   // DoStringCharCodeAt above.
4285   STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4286   if (instr->index()->IsConstantOperand()) {
4287     Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4288                                       Representation::Smi());
4289     __ push(immediate);
4290   } else {
4291     Register index = ToRegister(instr->index());
4292     __ SmiTag(index);
4293     __ push(index);
4294   }
4295   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
4296                           instr, instr->context());
4297   __ AssertSmi(eax);
4298   __ SmiUntag(eax);
4299   __ StoreToSafepointRegisterSlot(result, eax);
4300 }
4301 
4302 
DoStringCharFromCode(LStringCharFromCode * instr)4303 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4304   class DeferredStringCharFromCode final : public LDeferredCode {
4305    public:
4306     DeferredStringCharFromCode(LCodeGen* codegen,
4307                                LStringCharFromCode* instr,
4308                                const X87Stack& x87_stack)
4309         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4310     void Generate() override {
4311       codegen()->DoDeferredStringCharFromCode(instr_);
4312     }
4313     LInstruction* instr() override { return instr_; }
4314 
4315    private:
4316     LStringCharFromCode* instr_;
4317   };
4318 
4319   DeferredStringCharFromCode* deferred =
4320       new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
4321 
4322   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4323   Register char_code = ToRegister(instr->char_code());
4324   Register result = ToRegister(instr->result());
4325   DCHECK(!char_code.is(result));
4326 
4327   __ cmp(char_code, String::kMaxOneByteCharCode);
4328   __ j(above, deferred->entry());
4329   __ Move(result, Immediate(factory()->single_character_string_cache()));
4330   __ mov(result, FieldOperand(result,
4331                               char_code, times_pointer_size,
4332                               FixedArray::kHeaderSize));
4333   __ cmp(result, factory()->undefined_value());
4334   __ j(equal, deferred->entry());
4335   __ bind(deferred->exit());
4336 }
4337 
4338 
DoDeferredStringCharFromCode(LStringCharFromCode * instr)4339 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4340   Register char_code = ToRegister(instr->char_code());
4341   Register result = ToRegister(instr->result());
4342 
4343   // TODO(3095996): Get rid of this. For now, we need to make the
4344   // result register contain a valid pointer because it is already
4345   // contained in the register pointer map.
4346   __ Move(result, Immediate(0));
4347 
4348   PushSafepointRegistersScope scope(this);
4349   __ SmiTag(char_code);
4350   __ push(char_code);
4351   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4352                           instr->context());
4353   __ StoreToSafepointRegisterSlot(result, eax);
4354 }
4355 
4356 
DoStringAdd(LStringAdd * instr)4357 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4358   DCHECK(ToRegister(instr->context()).is(esi));
4359   DCHECK(ToRegister(instr->left()).is(edx));
4360   DCHECK(ToRegister(instr->right()).is(eax));
4361   StringAddStub stub(isolate(),
4362                      instr->hydrogen()->flags(),
4363                      instr->hydrogen()->pretenure_flag());
4364   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4365 }
4366 
4367 
DoInteger32ToDouble(LInteger32ToDouble * instr)4368 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4369   LOperand* input = instr->value();
4370   LOperand* output = instr->result();
4371   DCHECK(input->IsRegister() || input->IsStackSlot());
4372   DCHECK(output->IsDoubleRegister());
4373   if (input->IsRegister()) {
4374     Register input_reg = ToRegister(input);
4375     __ push(input_reg);
4376     X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
4377     __ pop(input_reg);
4378   } else {
4379     X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
4380   }
4381 }
4382 
4383 
DoUint32ToDouble(LUint32ToDouble * instr)4384 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4385   LOperand* input = instr->value();
4386   LOperand* output = instr->result();
4387   X87Register res = ToX87Register(output);
4388   X87PrepareToWrite(res);
4389   __ LoadUint32NoSSE2(ToRegister(input));
4390   X87CommitWrite(res);
4391 }
4392 
4393 
DoNumberTagI(LNumberTagI * instr)4394 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4395   class DeferredNumberTagI final : public LDeferredCode {
4396    public:
4397     DeferredNumberTagI(LCodeGen* codegen,
4398                        LNumberTagI* instr,
4399                        const X87Stack& x87_stack)
4400         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4401     void Generate() override {
4402       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4403                                        SIGNED_INT32);
4404     }
4405     LInstruction* instr() override { return instr_; }
4406 
4407    private:
4408     LNumberTagI* instr_;
4409   };
4410 
4411   LOperand* input = instr->value();
4412   DCHECK(input->IsRegister() && input->Equals(instr->result()));
4413   Register reg = ToRegister(input);
4414 
4415   DeferredNumberTagI* deferred =
4416       new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
4417   __ SmiTag(reg);
4418   __ j(overflow, deferred->entry());
4419   __ bind(deferred->exit());
4420 }
4421 
4422 
DoNumberTagU(LNumberTagU * instr)4423 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4424   class DeferredNumberTagU final : public LDeferredCode {
4425    public:
4426     DeferredNumberTagU(LCodeGen* codegen,
4427                        LNumberTagU* instr,
4428                        const X87Stack& x87_stack)
4429         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4430     void Generate() override {
4431       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4432                                        UNSIGNED_INT32);
4433     }
4434     LInstruction* instr() override { return instr_; }
4435 
4436    private:
4437     LNumberTagU* instr_;
4438   };
4439 
4440   LOperand* input = instr->value();
4441   DCHECK(input->IsRegister() && input->Equals(instr->result()));
4442   Register reg = ToRegister(input);
4443 
4444   DeferredNumberTagU* deferred =
4445       new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
4446   __ cmp(reg, Immediate(Smi::kMaxValue));
4447   __ j(above, deferred->entry());
4448   __ SmiTag(reg);
4449   __ bind(deferred->exit());
4450 }
4451 
4452 
DoDeferredNumberTagIU(LInstruction * instr,LOperand * value,LOperand * temp,IntegerSignedness signedness)4453 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4454                                      LOperand* value,
4455                                      LOperand* temp,
4456                                      IntegerSignedness signedness) {
4457   Label done, slow;
4458   Register reg = ToRegister(value);
4459   Register tmp = ToRegister(temp);
4460 
4461   if (signedness == SIGNED_INT32) {
4462     // There was overflow, so bits 30 and 31 of the original integer
4463     // disagree. Try to allocate a heap number in new space and store
4464     // the value in there. If that fails, call the runtime system.
4465     __ SmiUntag(reg);
4466     __ xor_(reg, 0x80000000);
4467     __ push(reg);
4468     __ fild_s(Operand(esp, 0));
4469     __ pop(reg);
4470   } else {
4471     // There's no fild variant for unsigned values, so zero-extend to a 64-bit
4472     // int manually.
4473     __ push(Immediate(0));
4474     __ push(reg);
4475     __ fild_d(Operand(esp, 0));
4476     __ pop(reg);
4477     __ pop(reg);
4478   }
4479 
4480   if (FLAG_inline_new) {
4481     __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
4482     __ jmp(&done, Label::kNear);
4483   }
4484 
4485   // Slow case: Call the runtime system to do the number allocation.
4486   __ bind(&slow);
4487   {
4488     // TODO(3095996): Put a valid pointer value in the stack slot where the
4489     // result register is stored, as this register is in the pointer map, but
4490     // contains an integer value.
4491     __ Move(reg, Immediate(0));
4492 
4493     // Preserve the value of all registers.
4494     PushSafepointRegistersScope scope(this);
4495     // Reset the context register.
4496     if (!reg.is(esi)) {
4497       __ Move(esi, Immediate(0));
4498     }
4499     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4500     RecordSafepointWithRegisters(
4501         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4502     __ StoreToSafepointRegisterSlot(reg, eax);
4503   }
4504 
4505   __ bind(&done);
4506   __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4507 }
4508 
4509 
DoNumberTagD(LNumberTagD * instr)4510 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4511   class DeferredNumberTagD final : public LDeferredCode {
4512    public:
4513     DeferredNumberTagD(LCodeGen* codegen,
4514                        LNumberTagD* instr,
4515                        const X87Stack& x87_stack)
4516         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4517     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4518     LInstruction* instr() override { return instr_; }
4519 
4520    private:
4521     LNumberTagD* instr_;
4522   };
4523 
4524   Register reg = ToRegister(instr->result());
4525 
4526   // Put the value to the top of stack
4527   X87Register src = ToX87Register(instr->value());
4528   // Don't use X87LoadForUsage here, which is only used by Instruction which
4529   // clobbers fp registers.
4530   x87_stack_.Fxch(src);
4531 
4532   DeferredNumberTagD* deferred =
4533       new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
4534   if (FLAG_inline_new) {
4535     Register tmp = ToRegister(instr->temp());
4536     __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4537   } else {
4538     __ jmp(deferred->entry());
4539   }
4540   __ bind(deferred->exit());
4541   __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
4542 }
4543 
4544 
DoDeferredNumberTagD(LNumberTagD * instr)4545 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4546   // TODO(3095996): Get rid of this. For now, we need to make the
4547   // result register contain a valid pointer because it is already
4548   // contained in the register pointer map.
4549   Register reg = ToRegister(instr->result());
4550   __ Move(reg, Immediate(0));
4551 
4552   PushSafepointRegistersScope scope(this);
4553   // Reset the context register.
4554   if (!reg.is(esi)) {
4555     __ Move(esi, Immediate(0));
4556   }
4557   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4558   RecordSafepointWithRegisters(
4559       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4560   __ StoreToSafepointRegisterSlot(reg, eax);
4561 }
4562 
4563 
DoSmiTag(LSmiTag * instr)4564 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4565   HChange* hchange = instr->hydrogen();
4566   Register input = ToRegister(instr->value());
4567   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4568       hchange->value()->CheckFlag(HValue::kUint32)) {
4569     __ test(input, Immediate(0xc0000000));
4570     DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOverflow);
4571   }
4572   __ SmiTag(input);
4573   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4574       !hchange->value()->CheckFlag(HValue::kUint32)) {
4575     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
4576   }
4577 }
4578 
4579 
DoSmiUntag(LSmiUntag * instr)4580 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4581   LOperand* input = instr->value();
4582   Register result = ToRegister(input);
4583   DCHECK(input->IsRegister() && input->Equals(instr->result()));
4584   if (instr->needs_check()) {
4585     __ test(result, Immediate(kSmiTagMask));
4586     DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
4587   } else {
4588     __ AssertSmi(result);
4589   }
4590   __ SmiUntag(result);
4591 }
4592 
4593 
EmitNumberUntagDNoSSE2(LNumberUntagD * instr,Register input_reg,Register temp_reg,X87Register res_reg,NumberUntagDMode mode)4594 void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
4595                                       Register temp_reg, X87Register res_reg,
4596                                       NumberUntagDMode mode) {
4597   bool can_convert_undefined_to_nan = instr->truncating();
4598   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4599 
4600   Label load_smi, done;
4601 
4602   X87PrepareToWrite(res_reg);
4603   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4604     // Smi check.
4605     __ JumpIfSmi(input_reg, &load_smi);
4606 
4607     // Heap number map check.
4608     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4609            factory()->heap_number_map());
4610     if (!can_convert_undefined_to_nan) {
4611       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
4612     } else {
4613       Label heap_number, convert;
4614       __ j(equal, &heap_number);
4615 
4616       // Convert undefined (or hole) to NaN.
4617       __ cmp(input_reg, factory()->undefined_value());
4618       DeoptimizeIf(not_equal, instr,
4619                    DeoptimizeReason::kNotAHeapNumberUndefined);
4620 
4621       __ bind(&convert);
4622       __ push(Immediate(0xfff80000));
4623       __ push(Immediate(0x00000000));
4624       __ fld_d(MemOperand(esp, 0));
4625       __ lea(esp, Operand(esp, kDoubleSize));
4626       __ jmp(&done, Label::kNear);
4627 
4628       __ bind(&heap_number);
4629     }
4630     // Heap number to x87 conversion.
4631     __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4632     if (deoptimize_on_minus_zero) {
4633       __ fldz();
4634       __ FCmp();
4635       __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4636       __ j(not_zero, &done, Label::kNear);
4637 
4638       // Use general purpose registers to check if we have -0.0
4639       __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
4640       __ test(temp_reg, Immediate(HeapNumber::kSignMask));
4641       __ j(zero, &done, Label::kNear);
4642 
4643       // Pop FPU stack before deoptimizing.
4644       __ fstp(0);
4645       DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
4646     }
4647     __ jmp(&done, Label::kNear);
4648   } else {
4649     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4650   }
4651 
4652   __ bind(&load_smi);
4653   // Clobbering a temp is faster than re-tagging the
4654   // input register since we avoid dependencies.
4655   __ mov(temp_reg, input_reg);
4656   __ SmiUntag(temp_reg);  // Untag smi before converting to float.
4657   __ push(temp_reg);
4658   __ fild_s(Operand(esp, 0));
4659   __ add(esp, Immediate(kPointerSize));
4660   __ bind(&done);
4661   X87CommitWrite(res_reg);
4662 }
4663 
4664 
DoDeferredTaggedToI(LTaggedToI * instr,Label * done)4665 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4666   Register input_reg = ToRegister(instr->value());
4667 
4668   // The input was optimistically untagged; revert it.
4669   STATIC_ASSERT(kSmiTagSize == 1);
4670   __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
4671 
4672   if (instr->truncating()) {
4673     Label truncate;
4674     Label::Distance truncate_distance =
4675         DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4676     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4677            factory()->heap_number_map());
4678     __ j(equal, &truncate, truncate_distance);
4679     __ push(input_reg);
4680     __ CmpObjectType(input_reg, ODDBALL_TYPE, input_reg);
4681     __ pop(input_reg);
4682     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
4683     __ bind(&truncate);
4684     __ TruncateHeapNumberToI(input_reg, input_reg);
4685   } else {
4686     // TODO(olivf) Converting a number on the fpu is actually quite slow. We
4687     // should first try a fast conversion and then bailout to this slow case.
4688     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4689            isolate()->factory()->heap_number_map());
4690     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
4691 
4692     __ sub(esp, Immediate(kPointerSize));
4693     __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4694 
4695     if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
4696       Label no_precision_lost, not_nan, zero_check;
4697       __ fld(0);
4698 
4699       __ fist_s(MemOperand(esp, 0));
4700       __ fild_s(MemOperand(esp, 0));
4701       __ FCmp();
4702       __ pop(input_reg);
4703 
4704       __ j(equal, &no_precision_lost, Label::kNear);
4705       __ fstp(0);
4706       DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
4707       __ bind(&no_precision_lost);
4708 
4709       __ j(parity_odd, &not_nan);
4710       __ fstp(0);
4711       DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
4712       __ bind(&not_nan);
4713 
4714       __ test(input_reg, Operand(input_reg));
4715       __ j(zero, &zero_check, Label::kNear);
4716       __ fstp(0);
4717       __ jmp(done);
4718 
4719       __ bind(&zero_check);
4720       // To check for minus zero, we load the value again as float, and check
4721       // if that is still 0.
4722       __ sub(esp, Immediate(kPointerSize));
4723       __ fstp_s(Operand(esp, 0));
4724       __ pop(input_reg);
4725       __ test(input_reg, Operand(input_reg));
4726       DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
4727     } else {
4728       __ fist_s(MemOperand(esp, 0));
4729       __ fild_s(MemOperand(esp, 0));
4730       __ FCmp();
4731       __ pop(input_reg);
4732       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
4733       DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
4734     }
4735   }
4736 }
4737 
4738 
DoTaggedToI(LTaggedToI * instr)4739 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4740   class DeferredTaggedToI final : public LDeferredCode {
4741    public:
4742     DeferredTaggedToI(LCodeGen* codegen,
4743                       LTaggedToI* instr,
4744                       const X87Stack& x87_stack)
4745         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4746     void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
4747     LInstruction* instr() override { return instr_; }
4748 
4749    private:
4750     LTaggedToI* instr_;
4751   };
4752 
4753   LOperand* input = instr->value();
4754   DCHECK(input->IsRegister());
4755   Register input_reg = ToRegister(input);
4756   DCHECK(input_reg.is(ToRegister(instr->result())));
4757 
4758   if (instr->hydrogen()->value()->representation().IsSmi()) {
4759     __ SmiUntag(input_reg);
4760   } else {
4761     DeferredTaggedToI* deferred =
4762         new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
4763     // Optimistically untag the input.
4764     // If the input is a HeapObject, SmiUntag will set the carry flag.
4765     STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
4766     __ SmiUntag(input_reg);
4767     // Branch to deferred code if the input was tagged.
4768     // The deferred code will take care of restoring the tag.
4769     __ j(carry, deferred->entry());
4770     __ bind(deferred->exit());
4771   }
4772 }
4773 
4774 
DoNumberUntagD(LNumberUntagD * instr)4775 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4776   LOperand* input = instr->value();
4777   DCHECK(input->IsRegister());
4778   LOperand* temp = instr->temp();
4779   DCHECK(temp->IsRegister());
4780   LOperand* result = instr->result();
4781   DCHECK(result->IsDoubleRegister());
4782 
4783   Register input_reg = ToRegister(input);
4784   Register temp_reg = ToRegister(temp);
4785 
4786   HValue* value = instr->hydrogen()->value();
4787   NumberUntagDMode mode = value->representation().IsSmi()
4788       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4789 
4790   EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result),
4791                          mode);
4792 }
4793 
4794 
DoDoubleToI(LDoubleToI * instr)4795 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4796   LOperand* input = instr->value();
4797   DCHECK(input->IsDoubleRegister());
4798   LOperand* result = instr->result();
4799   DCHECK(result->IsRegister());
4800   Register result_reg = ToRegister(result);
4801 
4802   if (instr->truncating()) {
4803     X87Register input_reg = ToX87Register(input);
4804     X87Fxch(input_reg);
4805     __ TruncateX87TOSToI(result_reg);
4806   } else {
4807     Label lost_precision, is_nan, minus_zero, done;
4808     X87Register input_reg = ToX87Register(input);
4809     X87Fxch(input_reg);
4810     __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
4811                  &lost_precision, &is_nan, &minus_zero);
4812     __ jmp(&done);
4813     __ bind(&lost_precision);
4814     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
4815     __ bind(&is_nan);
4816     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
4817     __ bind(&minus_zero);
4818     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
4819     __ bind(&done);
4820   }
4821 }
4822 
4823 
DoDoubleToSmi(LDoubleToSmi * instr)4824 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4825   LOperand* input = instr->value();
4826   DCHECK(input->IsDoubleRegister());
4827   LOperand* result = instr->result();
4828   DCHECK(result->IsRegister());
4829   Register result_reg = ToRegister(result);
4830 
4831   Label lost_precision, is_nan, minus_zero, done;
4832   X87Register input_reg = ToX87Register(input);
4833   X87Fxch(input_reg);
4834   __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
4835                &lost_precision, &is_nan, &minus_zero);
4836   __ jmp(&done);
4837   __ bind(&lost_precision);
4838   DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
4839   __ bind(&is_nan);
4840   DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
4841   __ bind(&minus_zero);
4842   DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
4843   __ bind(&done);
4844   __ SmiTag(result_reg);
4845   DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
4846 }
4847 
4848 
DoCheckSmi(LCheckSmi * instr)4849 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4850   LOperand* input = instr->value();
4851   __ test(ToOperand(input), Immediate(kSmiTagMask));
4852   DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
4853 }
4854 
4855 
DoCheckNonSmi(LCheckNonSmi * instr)4856 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4857   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4858     LOperand* input = instr->value();
4859     __ test(ToOperand(input), Immediate(kSmiTagMask));
4860     DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
4861   }
4862 }
4863 
4864 
DoCheckArrayBufferNotNeutered(LCheckArrayBufferNotNeutered * instr)4865 void LCodeGen::DoCheckArrayBufferNotNeutered(
4866     LCheckArrayBufferNotNeutered* instr) {
4867   Register view = ToRegister(instr->view());
4868   Register scratch = ToRegister(instr->scratch());
4869 
4870   __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
4871   __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
4872             Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
4873   DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
4874 }
4875 
4876 
DoCheckInstanceType(LCheckInstanceType * instr)4877 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4878   Register input = ToRegister(instr->value());
4879   Register temp = ToRegister(instr->temp());
4880 
4881   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
4882 
4883   if (instr->hydrogen()->is_interval_check()) {
4884     InstanceType first;
4885     InstanceType last;
4886     instr->hydrogen()->GetCheckInterval(&first, &last);
4887 
4888     __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first));
4889 
4890     // If there is only one type in the interval check for equality.
4891     if (first == last) {
4892       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
4893     } else {
4894       DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
4895       // Omit check for the last type.
4896       if (last != LAST_TYPE) {
4897         __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
4898         DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
4899       }
4900     }
4901   } else {
4902     uint8_t mask;
4903     uint8_t tag;
4904     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4905 
4906     if (base::bits::IsPowerOfTwo32(mask)) {
4907       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
4908       __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
4909       DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
4910                    DeoptimizeReason::kWrongInstanceType);
4911     } else {
4912       __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
4913       __ and_(temp, mask);
4914       __ cmp(temp, tag);
4915       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
4916     }
4917   }
4918 }
4919 
4920 
DoCheckValue(LCheckValue * instr)4921 void LCodeGen::DoCheckValue(LCheckValue* instr) {
4922   Handle<HeapObject> object = instr->hydrogen()->object().handle();
4923   if (instr->hydrogen()->object_in_new_space()) {
4924     Register reg = ToRegister(instr->value());
4925     Handle<Cell> cell = isolate()->factory()->NewCell(object);
4926     __ cmp(reg, Operand::ForCell(cell));
4927   } else {
4928     Operand operand = ToOperand(instr->value());
4929     __ cmp(operand, object);
4930   }
4931   DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
4932 }
4933 
4934 
DoDeferredInstanceMigration(LCheckMaps * instr,Register object)4935 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4936   {
4937     PushSafepointRegistersScope scope(this);
4938     __ push(object);
4939     __ xor_(esi, esi);
4940     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4941     RecordSafepointWithRegisters(
4942         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4943 
4944     __ test(eax, Immediate(kSmiTagMask));
4945   }
4946   DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
4947 }
4948 
4949 
DoCheckMaps(LCheckMaps * instr)4950 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4951   class DeferredCheckMaps final : public LDeferredCode {
4952    public:
4953     DeferredCheckMaps(LCodeGen* codegen,
4954                       LCheckMaps* instr,
4955                       Register object,
4956                       const X87Stack& x87_stack)
4957         : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
4958       SetExit(check_maps());
4959     }
4960     void Generate() override {
4961       codegen()->DoDeferredInstanceMigration(instr_, object_);
4962     }
4963     Label* check_maps() { return &check_maps_; }
4964     LInstruction* instr() override { return instr_; }
4965 
4966    private:
4967     LCheckMaps* instr_;
4968     Label check_maps_;
4969     Register object_;
4970   };
4971 
4972   if (instr->hydrogen()->IsStabilityCheck()) {
4973     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4974     for (int i = 0; i < maps->size(); ++i) {
4975       AddStabilityDependency(maps->at(i).handle());
4976     }
4977     return;
4978   }
4979 
4980   LOperand* input = instr->value();
4981   DCHECK(input->IsRegister());
4982   Register reg = ToRegister(input);
4983 
4984   DeferredCheckMaps* deferred = NULL;
4985   if (instr->hydrogen()->HasMigrationTarget()) {
4986     deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
4987     __ bind(deferred->check_maps());
4988   }
4989 
4990   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4991   Label success;
4992   for (int i = 0; i < maps->size() - 1; i++) {
4993     Handle<Map> map = maps->at(i).handle();
4994     __ CompareMap(reg, map);
4995     __ j(equal, &success, Label::kNear);
4996   }
4997 
4998   Handle<Map> map = maps->at(maps->size() - 1).handle();
4999   __ CompareMap(reg, map);
5000   if (instr->hydrogen()->HasMigrationTarget()) {
5001     __ j(not_equal, deferred->entry());
5002   } else {
5003     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
5004   }
5005 
5006   __ bind(&success);
5007 }
5008 
5009 
DoClampDToUint8(LClampDToUint8 * instr)5010 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5011   X87Register value_reg = ToX87Register(instr->unclamped());
5012   Register result_reg = ToRegister(instr->result());
5013   X87Fxch(value_reg);
5014   __ ClampTOSToUint8(result_reg);
5015 }
5016 
5017 
DoClampIToUint8(LClampIToUint8 * instr)5018 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5019   DCHECK(instr->unclamped()->Equals(instr->result()));
5020   Register value_reg = ToRegister(instr->result());
5021   __ ClampUint8(value_reg);
5022 }
5023 
5024 
DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2 * instr)5025 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5026   Register input_reg = ToRegister(instr->unclamped());
5027   Register result_reg = ToRegister(instr->result());
5028   Register scratch = ToRegister(instr->scratch());
5029   Register scratch2 = ToRegister(instr->scratch2());
5030   Register scratch3 = ToRegister(instr->scratch3());
5031   Label is_smi, done, heap_number, valid_exponent,
5032       largest_value, zero_result, maybe_nan_or_infinity;
5033 
5034   __ JumpIfSmi(input_reg, &is_smi);
5035 
5036   // Check for heap number
5037   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5038          factory()->heap_number_map());
5039   __ j(equal, &heap_number, Label::kNear);
5040 
5041   // Check for undefined. Undefined is converted to zero for clamping
5042   // conversions.
5043   __ cmp(input_reg, factory()->undefined_value());
5044   DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
5045   __ jmp(&zero_result, Label::kNear);
5046 
5047   // Heap number
5048   __ bind(&heap_number);
5049 
5050   // Surprisingly, all of the hand-crafted bit-manipulations below are much
5051   // faster than the x86 FPU built-in instruction, especially since "banker's
5052   // rounding" would be additionally very expensive
5053 
5054   // Get exponent word.
5055   __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5056   __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5057 
5058   // Test for negative values --> clamp to zero
5059   __ test(scratch, scratch);
5060   __ j(negative, &zero_result, Label::kNear);
5061 
5062   // Get exponent alone in scratch2.
5063   __ mov(scratch2, scratch);
5064   __ and_(scratch2, HeapNumber::kExponentMask);
5065   __ shr(scratch2, HeapNumber::kExponentShift);
5066   __ j(zero, &zero_result, Label::kNear);
5067   __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5068   __ j(negative, &zero_result, Label::kNear);
5069 
5070   const uint32_t non_int8_exponent = 7;
5071   __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5072   // If the exponent is too big, check for special values.
5073   __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5074 
5075   __ bind(&valid_exponent);
5076   // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5077   // < 7. The shift bias is the number of bits to shift the mantissa such that
5078   // with an exponent of 7 such the that top-most one is in bit 30, allowing
5079   // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5080   // 1).
5081   int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5082   __ lea(result_reg, MemOperand(scratch2, shift_bias));
5083   // Here result_reg (ecx) is the shift, scratch is the exponent word.  Get the
5084   // top bits of the mantissa.
5085   __ and_(scratch, HeapNumber::kMantissaMask);
5086   // Put back the implicit 1 of the mantissa
5087   __ or_(scratch, 1 << HeapNumber::kExponentShift);
5088   // Shift up to round
5089   __ shl_cl(scratch);
5090   // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5091   // use the bit in the "ones" place and add it to the "halves" place, which has
5092   // the effect of rounding to even.
5093   __ mov(scratch2, scratch);
5094   const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5095   const uint32_t one_bit_shift = one_half_bit_shift + 1;
5096   __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5097   __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5098   Label no_round;
5099   __ j(less, &no_round, Label::kNear);
5100   Label round_up;
5101   __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5102   __ j(greater, &round_up, Label::kNear);
5103   __ test(scratch3, scratch3);
5104   __ j(not_zero, &round_up, Label::kNear);
5105   __ mov(scratch2, scratch);
5106   __ and_(scratch2, Immediate(1 << one_bit_shift));
5107   __ shr(scratch2, 1);
5108   __ bind(&round_up);
5109   __ add(scratch, scratch2);
5110   __ j(overflow, &largest_value, Label::kNear);
5111   __ bind(&no_round);
5112   __ shr(scratch, 23);
5113   __ mov(result_reg, scratch);
5114   __ jmp(&done, Label::kNear);
5115 
5116   __ bind(&maybe_nan_or_infinity);
5117   // Check for NaN/Infinity, all other values map to 255
5118   __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5119   __ j(not_equal, &largest_value, Label::kNear);
5120 
5121   // Check for NaN, which differs from Infinity in that at least one mantissa
5122   // bit is set.
5123   __ and_(scratch, HeapNumber::kMantissaMask);
5124   __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5125   __ j(not_zero, &zero_result, Label::kNear);  // M!=0 --> NaN
5126   // Infinity -> Fall through to map to 255.
5127 
5128   __ bind(&largest_value);
5129   __ mov(result_reg, Immediate(255));
5130   __ jmp(&done, Label::kNear);
5131 
5132   __ bind(&zero_result);
5133   __ xor_(result_reg, result_reg);
5134   __ jmp(&done, Label::kNear);
5135 
5136   // smi
5137   __ bind(&is_smi);
5138   if (!input_reg.is(result_reg)) {
5139     __ mov(result_reg, input_reg);
5140   }
5141   __ SmiUntag(result_reg);
5142   __ ClampUint8(result_reg);
5143   __ bind(&done);
5144 }
5145 
5146 
DoAllocate(LAllocate * instr)5147 void LCodeGen::DoAllocate(LAllocate* instr) {
5148   class DeferredAllocate final : public LDeferredCode {
5149    public:
5150     DeferredAllocate(LCodeGen* codegen,
5151                      LAllocate* instr,
5152                      const X87Stack& x87_stack)
5153         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5154     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5155     LInstruction* instr() override { return instr_; }
5156 
5157    private:
5158     LAllocate* instr_;
5159   };
5160 
5161   DeferredAllocate* deferred =
5162       new(zone()) DeferredAllocate(this, instr, x87_stack_);
5163 
5164   Register result = ToRegister(instr->result());
5165   Register temp = ToRegister(instr->temp());
5166 
5167   // Allocate memory for the object.
5168   AllocationFlags flags = NO_ALLOCATION_FLAGS;
5169   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5170     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5171   }
5172   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5173     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5174     flags = static_cast<AllocationFlags>(flags | PRETENURE);
5175   }
5176 
5177   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5178     flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
5179   }
5180   DCHECK(!instr->hydrogen()->IsAllocationFolded());
5181 
5182   if (instr->size()->IsConstantOperand()) {
5183     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5184     CHECK(size <= kMaxRegularHeapObjectSize);
5185     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5186   } else {
5187     Register size = ToRegister(instr->size());
5188     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5189   }
5190 
5191   __ bind(deferred->exit());
5192 
5193   if (instr->hydrogen()->MustPrefillWithFiller()) {
5194     if (instr->size()->IsConstantOperand()) {
5195       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5196       __ mov(temp, (size / kPointerSize) - 1);
5197     } else {
5198       temp = ToRegister(instr->size());
5199       __ shr(temp, kPointerSizeLog2);
5200       __ dec(temp);
5201     }
5202     Label loop;
5203     __ bind(&loop);
5204     __ mov(FieldOperand(result, temp, times_pointer_size, 0),
5205         isolate()->factory()->one_pointer_filler_map());
5206     __ dec(temp);
5207     __ j(not_zero, &loop);
5208   }
5209 }
5210 
DoFastAllocate(LFastAllocate * instr)5211 void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
5212   DCHECK(instr->hydrogen()->IsAllocationFolded());
5213   DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
5214   Register result = ToRegister(instr->result());
5215   Register temp = ToRegister(instr->temp());
5216 
5217   AllocationFlags flags = ALLOCATION_FOLDED;
5218   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5219     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5220   }
5221   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5222     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5223     flags = static_cast<AllocationFlags>(flags | PRETENURE);
5224   }
5225   if (instr->size()->IsConstantOperand()) {
5226     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5227     CHECK(size <= kMaxRegularHeapObjectSize);
5228     __ FastAllocate(size, result, temp, flags);
5229   } else {
5230     Register size = ToRegister(instr->size());
5231     __ FastAllocate(size, result, temp, flags);
5232   }
5233 }
5234 
DoDeferredAllocate(LAllocate * instr)5235 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5236   Register result = ToRegister(instr->result());
5237 
5238   // TODO(3095996): Get rid of this. For now, we need to make the
5239   // result register contain a valid pointer because it is already
5240   // contained in the register pointer map.
5241   __ Move(result, Immediate(Smi::kZero));
5242 
5243   PushSafepointRegistersScope scope(this);
5244   if (instr->size()->IsRegister()) {
5245     Register size = ToRegister(instr->size());
5246     DCHECK(!size.is(result));
5247     __ SmiTag(ToRegister(instr->size()));
5248     __ push(size);
5249   } else {
5250     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5251     if (size >= 0 && size <= Smi::kMaxValue) {
5252       __ push(Immediate(Smi::FromInt(size)));
5253     } else {
5254       // We should never get here at runtime => abort
5255       __ int3();
5256       return;
5257     }
5258   }
5259 
5260   int flags = AllocateDoubleAlignFlag::encode(
5261       instr->hydrogen()->MustAllocateDoubleAligned());
5262   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5263     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5264     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5265   } else {
5266     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5267   }
5268   __ push(Immediate(Smi::FromInt(flags)));
5269 
5270   CallRuntimeFromDeferred(
5271       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5272   __ StoreToSafepointRegisterSlot(result, eax);
5273 
5274   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5275     AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
5276     if (instr->hydrogen()->IsOldSpaceAllocation()) {
5277       DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5278       allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
5279     }
5280     // If the allocation folding dominator allocate triggered a GC, allocation
5281     // happend in the runtime. We have to reset the top pointer to virtually
5282     // undo the allocation.
5283     ExternalReference allocation_top =
5284         AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
5285     __ sub(eax, Immediate(kHeapObjectTag));
5286     __ mov(Operand::StaticVariable(allocation_top), eax);
5287     __ add(eax, Immediate(kHeapObjectTag));
5288   }
5289 }
5290 
5291 
DoTypeof(LTypeof * instr)5292 void LCodeGen::DoTypeof(LTypeof* instr) {
5293   DCHECK(ToRegister(instr->context()).is(esi));
5294   DCHECK(ToRegister(instr->value()).is(ebx));
5295   Label end, do_call;
5296   Register value_register = ToRegister(instr->value());
5297   __ JumpIfNotSmi(value_register, &do_call);
5298   __ mov(eax, Immediate(isolate()->factory()->number_string()));
5299   __ jmp(&end);
5300   __ bind(&do_call);
5301   Callable callable = CodeFactory::Typeof(isolate());
5302   CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
5303   __ bind(&end);
5304 }
5305 
5306 
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)5307 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5308   Register input = ToRegister(instr->value());
5309   Condition final_branch_condition = EmitTypeofIs(instr, input);
5310   if (final_branch_condition != no_condition) {
5311     EmitBranch(instr, final_branch_condition);
5312   }
5313 }
5314 
5315 
EmitTypeofIs(LTypeofIsAndBranch * instr,Register input)5316 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5317   Label* true_label = instr->TrueLabel(chunk_);
5318   Label* false_label = instr->FalseLabel(chunk_);
5319   Handle<String> type_name = instr->type_literal();
5320   int left_block = instr->TrueDestination(chunk_);
5321   int right_block = instr->FalseDestination(chunk_);
5322   int next_block = GetNextEmittedBlock();
5323 
5324   Label::Distance true_distance = left_block == next_block ? Label::kNear
5325                                                            : Label::kFar;
5326   Label::Distance false_distance = right_block == next_block ? Label::kNear
5327                                                              : Label::kFar;
5328   Condition final_branch_condition = no_condition;
5329   if (String::Equals(type_name, factory()->number_string())) {
5330     __ JumpIfSmi(input, true_label, true_distance);
5331     __ cmp(FieldOperand(input, HeapObject::kMapOffset),
5332            factory()->heap_number_map());
5333     final_branch_condition = equal;
5334 
5335   } else if (String::Equals(type_name, factory()->string_string())) {
5336     __ JumpIfSmi(input, false_label, false_distance);
5337     __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5338     final_branch_condition = below;
5339 
5340   } else if (String::Equals(type_name, factory()->symbol_string())) {
5341     __ JumpIfSmi(input, false_label, false_distance);
5342     __ CmpObjectType(input, SYMBOL_TYPE, input);
5343     final_branch_condition = equal;
5344 
5345   } else if (String::Equals(type_name, factory()->boolean_string())) {
5346     __ cmp(input, factory()->true_value());
5347     __ j(equal, true_label, true_distance);
5348     __ cmp(input, factory()->false_value());
5349     final_branch_condition = equal;
5350 
5351   } else if (String::Equals(type_name, factory()->undefined_string())) {
5352     __ cmp(input, factory()->null_value());
5353     __ j(equal, false_label, false_distance);
5354     __ JumpIfSmi(input, false_label, false_distance);
5355     // Check for undetectable objects => true.
5356     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5357     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5358               Immediate(1 << Map::kIsUndetectable));
5359     final_branch_condition = not_zero;
5360 
5361   } else if (String::Equals(type_name, factory()->function_string())) {
5362     __ JumpIfSmi(input, false_label, false_distance);
5363     // Check for callable and not undetectable objects => true.
5364     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5365     __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
5366     __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
5367     __ cmp(input, 1 << Map::kIsCallable);
5368     final_branch_condition = equal;
5369 
5370   } else if (String::Equals(type_name, factory()->object_string())) {
5371     __ JumpIfSmi(input, false_label, false_distance);
5372     __ cmp(input, factory()->null_value());
5373     __ j(equal, true_label, true_distance);
5374     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5375     __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
5376     __ j(below, false_label, false_distance);
5377     // Check for callable or undetectable objects => false.
5378     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5379               Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5380     final_branch_condition = zero;
5381 
5382 // clang-format off
5383 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)         \
5384   } else if (String::Equals(type_name, factory()->type##_string())) { \
5385     __ JumpIfSmi(input, false_label, false_distance);                 \
5386     __ cmp(FieldOperand(input, HeapObject::kMapOffset),               \
5387            factory()->type##_map());                                  \
5388     final_branch_condition = equal;
5389   SIMD128_TYPES(SIMD128_TYPE)
5390 #undef SIMD128_TYPE
5391     // clang-format on
5392 
5393   } else {
5394     __ jmp(false_label, false_distance);
5395   }
5396   return final_branch_condition;
5397 }
5398 
5399 
EnsureSpaceForLazyDeopt(int space_needed)5400 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5401   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5402     // Ensure that we have enough space after the previous lazy-bailout
5403     // instruction for patching the code here.
5404     int current_pc = masm()->pc_offset();
5405     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5406       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5407       __ Nop(padding_size);
5408     }
5409   }
5410   last_lazy_deopt_pc_ = masm()->pc_offset();
5411 }
5412 
5413 
DoLazyBailout(LLazyBailout * instr)5414 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5415   last_lazy_deopt_pc_ = masm()->pc_offset();
5416   DCHECK(instr->HasEnvironment());
5417   LEnvironment* env = instr->environment();
5418   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5419   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5420 }
5421 
5422 
DoDeoptimize(LDeoptimize * instr)5423 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5424   Deoptimizer::BailoutType type = instr->hydrogen()->type();
5425   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5426   // needed return address), even though the implementation of LAZY and EAGER is
5427   // now identical. When LAZY is eventually completely folded into EAGER, remove
5428   // the special case below.
5429   if (info()->IsStub() && type == Deoptimizer::EAGER) {
5430     type = Deoptimizer::LAZY;
5431   }
5432   DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5433 }
5434 
5435 
DoDummy(LDummy * instr)5436 void LCodeGen::DoDummy(LDummy* instr) {
5437   // Nothing to see here, move on!
5438 }
5439 
5440 
DoDummyUse(LDummyUse * instr)5441 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5442   // Nothing to see here, move on!
5443 }
5444 
5445 
DoDeferredStackCheck(LStackCheck * instr)5446 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5447   PushSafepointRegistersScope scope(this);
5448   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5449   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5450   RecordSafepointWithLazyDeopt(
5451       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5452   DCHECK(instr->HasEnvironment());
5453   LEnvironment* env = instr->environment();
5454   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5455 }
5456 
5457 
DoStackCheck(LStackCheck * instr)5458 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5459   class DeferredStackCheck final : public LDeferredCode {
5460    public:
5461     DeferredStackCheck(LCodeGen* codegen,
5462                        LStackCheck* instr,
5463                        const X87Stack& x87_stack)
5464         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5465     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5466     LInstruction* instr() override { return instr_; }
5467 
5468    private:
5469     LStackCheck* instr_;
5470   };
5471 
5472   DCHECK(instr->HasEnvironment());
5473   LEnvironment* env = instr->environment();
5474   // There is no LLazyBailout instruction for stack-checks. We have to
5475   // prepare for lazy deoptimization explicitly here.
5476   if (instr->hydrogen()->is_function_entry()) {
5477     // Perform stack overflow check.
5478     Label done;
5479     ExternalReference stack_limit =
5480         ExternalReference::address_of_stack_limit(isolate());
5481     __ cmp(esp, Operand::StaticVariable(stack_limit));
5482     __ j(above_equal, &done, Label::kNear);
5483 
5484     DCHECK(instr->context()->IsRegister());
5485     DCHECK(ToRegister(instr->context()).is(esi));
5486     CallCode(isolate()->builtins()->StackCheck(),
5487              RelocInfo::CODE_TARGET,
5488              instr);
5489     __ bind(&done);
5490   } else {
5491     DCHECK(instr->hydrogen()->is_backwards_branch());
5492     // Perform stack overflow check if this goto needs it before jumping.
5493     DeferredStackCheck* deferred_stack_check =
5494         new(zone()) DeferredStackCheck(this, instr, x87_stack_);
5495     ExternalReference stack_limit =
5496         ExternalReference::address_of_stack_limit(isolate());
5497     __ cmp(esp, Operand::StaticVariable(stack_limit));
5498     __ j(below, deferred_stack_check->entry());
5499     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5500     __ bind(instr->done_label());
5501     deferred_stack_check->SetExit(instr->done_label());
5502     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5503     // Don't record a deoptimization index for the safepoint here.
5504     // This will be done explicitly when emitting call and the safepoint in
5505     // the deferred code.
5506   }
5507 }
5508 
5509 
DoOsrEntry(LOsrEntry * instr)5510 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5511   // This is a pseudo-instruction that ensures that the environment here is
5512   // properly registered for deoptimization and records the assembler's PC
5513   // offset.
5514   LEnvironment* environment = instr->environment();
5515 
5516   // If the environment were already registered, we would have no way of
5517   // backpatching it with the spill slot operands.
5518   DCHECK(!environment->HasBeenRegistered());
5519   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5520 
5521   GenerateOsrPrologue();
5522 }
5523 
5524 
DoForInPrepareMap(LForInPrepareMap * instr)5525 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5526   DCHECK(ToRegister(instr->context()).is(esi));
5527 
5528   Label use_cache, call_runtime;
5529   __ CheckEnumCache(&call_runtime);
5530 
5531   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
5532   __ jmp(&use_cache, Label::kNear);
5533 
5534   // Get the set of properties to enumerate.
5535   __ bind(&call_runtime);
5536   __ push(eax);
5537   CallRuntime(Runtime::kForInEnumerate, instr);
5538   __ bind(&use_cache);
5539 }
5540 
5541 
DoForInCacheArray(LForInCacheArray * instr)5542 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5543   Register map = ToRegister(instr->map());
5544   Register result = ToRegister(instr->result());
5545   Label load_cache, done;
5546   __ EnumLength(result, map);
5547   __ cmp(result, Immediate(Smi::kZero));
5548   __ j(not_equal, &load_cache, Label::kNear);
5549   __ mov(result, isolate()->factory()->empty_fixed_array());
5550   __ jmp(&done, Label::kNear);
5551 
5552   __ bind(&load_cache);
5553   __ LoadInstanceDescriptors(map, result);
5554   __ mov(result,
5555          FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5556   __ mov(result,
5557          FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5558   __ bind(&done);
5559   __ test(result, result);
5560   DeoptimizeIf(equal, instr, DeoptimizeReason::kNoCache);
5561 }
5562 
5563 
DoCheckMapValue(LCheckMapValue * instr)5564 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5565   Register object = ToRegister(instr->value());
5566   __ cmp(ToRegister(instr->map()),
5567          FieldOperand(object, HeapObject::kMapOffset));
5568   DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
5569 }
5570 
5571 
DoDeferredLoadMutableDouble(LLoadFieldByIndex * instr,Register object,Register index)5572 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5573                                            Register object,
5574                                            Register index) {
5575   PushSafepointRegistersScope scope(this);
5576   __ push(object);
5577   __ push(index);
5578   __ xor_(esi, esi);
5579   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5580   RecordSafepointWithRegisters(
5581       instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5582   __ StoreToSafepointRegisterSlot(object, eax);
5583 }
5584 
5585 
DoLoadFieldByIndex(LLoadFieldByIndex * instr)5586 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5587   class DeferredLoadMutableDouble final : public LDeferredCode {
5588    public:
5589     DeferredLoadMutableDouble(LCodeGen* codegen,
5590                               LLoadFieldByIndex* instr,
5591                               Register object,
5592                               Register index,
5593                               const X87Stack& x87_stack)
5594         : LDeferredCode(codegen, x87_stack),
5595           instr_(instr),
5596           object_(object),
5597           index_(index) {
5598     }
5599     void Generate() override {
5600       codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5601     }
5602     LInstruction* instr() override { return instr_; }
5603 
5604    private:
5605     LLoadFieldByIndex* instr_;
5606     Register object_;
5607     Register index_;
5608   };
5609 
5610   Register object = ToRegister(instr->object());
5611   Register index = ToRegister(instr->index());
5612 
5613   DeferredLoadMutableDouble* deferred;
5614   deferred = new(zone()) DeferredLoadMutableDouble(
5615       this, instr, object, index, x87_stack_);
5616 
5617   Label out_of_object, done;
5618   __ test(index, Immediate(Smi::FromInt(1)));
5619   __ j(not_zero, deferred->entry());
5620 
5621   __ sar(index, 1);
5622 
5623   __ cmp(index, Immediate(0));
5624   __ j(less, &out_of_object, Label::kNear);
5625   __ mov(object, FieldOperand(object,
5626                               index,
5627                               times_half_pointer_size,
5628                               JSObject::kHeaderSize));
5629   __ jmp(&done, Label::kNear);
5630 
5631   __ bind(&out_of_object);
5632   __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
5633   __ neg(index);
5634   // Index is now equal to out of object property index plus 1.
5635   __ mov(object, FieldOperand(object,
5636                               index,
5637                               times_half_pointer_size,
5638                               FixedArray::kHeaderSize - kPointerSize));
5639   __ bind(deferred->exit());
5640   __ bind(&done);
5641 }
5642 
5643 #undef __
5644 
5645 }  // namespace internal
5646 }  // namespace v8
5647 
5648 #endif  // V8_TARGET_ARCH_X87
5649