1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_X87
6 
7 #include "src/crankshaft/x87/lithium-codegen-x87.h"
8 
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
13 #include "src/crankshaft/hydrogen-osr.h"
14 #include "src/deoptimizer.h"
15 #include "src/ic/ic.h"
16 #include "src/ic/stub-cache.h"
17 #include "src/profiler/cpu-profiler.h"
18 #include "src/x87/frames-x87.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 
24 // When invoking builtins, we need to record the safepoint in the middle of
25 // the invoke instruction sequence generated by the macro assembler.
26 class SafepointGenerator final : public CallWrapper {
27  public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)28   SafepointGenerator(LCodeGen* codegen,
29                      LPointerMap* pointers,
30                      Safepoint::DeoptMode mode)
31       : codegen_(codegen),
32         pointers_(pointers),
33         deopt_mode_(mode) {}
~SafepointGenerator()34   virtual ~SafepointGenerator() {}
35 
BeforeCall(int call_size) const36   void BeforeCall(int call_size) const override {}
37 
AfterCall() const38   void AfterCall() const override {
39     codegen_->RecordSafepoint(pointers_, deopt_mode_);
40   }
41 
42  private:
43   LCodeGen* codegen_;
44   LPointerMap* pointers_;
45   Safepoint::DeoptMode deopt_mode_;
46 };
47 
48 
49 #define __ masm()->
50 
GenerateCode()51 bool LCodeGen::GenerateCode() {
52   LPhase phase("Z_Code generation", chunk());
53   DCHECK(is_unused());
54   status_ = GENERATING;
55 
56   // Open a frame scope to indicate that there is a frame on the stack.  The
57   // MANUAL indicates that the scope shouldn't actually generate code to set up
58   // the frame (that is done in GeneratePrologue).
59   FrameScope frame_scope(masm_, StackFrame::MANUAL);
60 
61   support_aligned_spilled_doubles_ = info()->IsOptimizing();
62 
63   dynamic_frame_alignment_ = info()->IsOptimizing() &&
64       ((chunk()->num_double_slots() > 2 &&
65         !chunk()->graph()->is_recursive()) ||
66        !info()->osr_ast_id().IsNone());
67 
68   return GeneratePrologue() &&
69       GenerateBody() &&
70       GenerateDeferredCode() &&
71       GenerateJumpTable() &&
72       GenerateSafepointTable();
73 }
74 
75 
FinishCode(Handle<Code> code)76 void LCodeGen::FinishCode(Handle<Code> code) {
77   DCHECK(is_done());
78   code->set_stack_slots(GetStackSlotCount());
79   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
80   PopulateDeoptimizationData(code);
81   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
82     Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
83   }
84 }
85 
86 
87 #ifdef _MSC_VER
MakeSureStackPagesMapped(int offset)88 void LCodeGen::MakeSureStackPagesMapped(int offset) {
89   const int kPageSize = 4 * KB;
90   for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
91     __ mov(Operand(esp, offset), eax);
92   }
93 }
94 #endif
95 
96 
GeneratePrologue()97 bool LCodeGen::GeneratePrologue() {
98   DCHECK(is_generating());
99 
100   if (info()->IsOptimizing()) {
101     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
102 
103 #ifdef DEBUG
104     if (strlen(FLAG_stop_at) > 0 &&
105         info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
106       __ int3();
107     }
108 #endif
109 
110     if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
111       // Move state of dynamic frame alignment into edx.
112       __ Move(edx, Immediate(kNoAlignmentPadding));
113 
114       Label do_not_pad, align_loop;
115       STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
116       // Align esp + 4 to a multiple of 2 * kPointerSize.
117       __ test(esp, Immediate(kPointerSize));
118       __ j(not_zero, &do_not_pad, Label::kNear);
119       __ push(Immediate(0));
120       __ mov(ebx, esp);
121       __ mov(edx, Immediate(kAlignmentPaddingPushed));
122       // Copy arguments, receiver, and return address.
123       __ mov(ecx, Immediate(scope()->num_parameters() + 2));
124 
125       __ bind(&align_loop);
126       __ mov(eax, Operand(ebx, 1 * kPointerSize));
127       __ mov(Operand(ebx, 0), eax);
128       __ add(Operand(ebx), Immediate(kPointerSize));
129       __ dec(ecx);
130       __ j(not_zero, &align_loop, Label::kNear);
131       __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
132       __ bind(&do_not_pad);
133     }
134   }
135 
136   info()->set_prologue_offset(masm_->pc_offset());
137   if (NeedsEagerFrame()) {
138     DCHECK(!frame_is_built_);
139     frame_is_built_ = true;
140     if (info()->IsStub()) {
141       __ StubPrologue();
142     } else {
143       __ Prologue(info()->GeneratePreagedPrologue());
144     }
145   }
146 
147   if (info()->IsOptimizing() &&
148       dynamic_frame_alignment_ &&
149       FLAG_debug_code) {
150     __ test(esp, Immediate(kPointerSize));
151     __ Assert(zero, kFrameIsExpectedToBeAligned);
152   }
153 
154   // Reserve space for the stack slots needed by the code.
155   int slots = GetStackSlotCount();
156   DCHECK(slots != 0 || !info()->IsOptimizing());
157   if (slots > 0) {
158     if (slots == 1) {
159       if (dynamic_frame_alignment_) {
160         __ push(edx);
161       } else {
162         __ push(Immediate(kNoAlignmentPadding));
163       }
164     } else {
165       if (FLAG_debug_code) {
166         __ sub(Operand(esp), Immediate(slots * kPointerSize));
167 #ifdef _MSC_VER
168         MakeSureStackPagesMapped(slots * kPointerSize);
169 #endif
170         __ push(eax);
171         __ mov(Operand(eax), Immediate(slots));
172         Label loop;
173         __ bind(&loop);
174         __ mov(MemOperand(esp, eax, times_4, 0),
175                Immediate(kSlotsZapValue));
176         __ dec(eax);
177         __ j(not_zero, &loop);
178         __ pop(eax);
179       } else {
180         __ sub(Operand(esp), Immediate(slots * kPointerSize));
181 #ifdef _MSC_VER
182         MakeSureStackPagesMapped(slots * kPointerSize);
183 #endif
184       }
185 
186       if (support_aligned_spilled_doubles_) {
187         Comment(";;; Store dynamic frame alignment tag for spilled doubles");
188         // Store dynamic frame alignment state in the first local.
189         int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
190         if (dynamic_frame_alignment_) {
191           __ mov(Operand(ebp, offset), edx);
192         } else {
193           __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
194         }
195       }
196     }
197   }
198 
199   // Initailize FPU state.
200   __ fninit();
201 
202   return !is_aborted();
203 }
204 
205 
DoPrologue(LPrologue * instr)206 void LCodeGen::DoPrologue(LPrologue* instr) {
207   Comment(";;; Prologue begin");
208 
209   // Possibly allocate a local context.
210   if (info_->num_heap_slots() > 0) {
211     Comment(";;; Allocate local context");
212     bool need_write_barrier = true;
213     // Argument to NewContext is the function, which is still in edi.
214     int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
215     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
216     if (info()->scope()->is_script_scope()) {
217       __ push(edi);
218       __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
219       __ CallRuntime(Runtime::kNewScriptContext);
220       deopt_mode = Safepoint::kLazyDeopt;
221     } else if (slots <= FastNewContextStub::kMaximumSlots) {
222       FastNewContextStub stub(isolate(), slots);
223       __ CallStub(&stub);
224       // Result of FastNewContextStub is always in new space.
225       need_write_barrier = false;
226     } else {
227       __ push(edi);
228       __ CallRuntime(Runtime::kNewFunctionContext);
229     }
230     RecordSafepoint(deopt_mode);
231 
232     // Context is returned in eax.  It replaces the context passed to us.
233     // It's saved in the stack and kept live in esi.
234     __ mov(esi, eax);
235     __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
236 
237     // Copy parameters into context if necessary.
238     int num_parameters = scope()->num_parameters();
239     int first_parameter = scope()->has_this_declaration() ? -1 : 0;
240     for (int i = first_parameter; i < num_parameters; i++) {
241       Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
242       if (var->IsContextSlot()) {
243         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
244             (num_parameters - 1 - i) * kPointerSize;
245         // Load parameter from stack.
246         __ mov(eax, Operand(ebp, parameter_offset));
247         // Store it in the context.
248         int context_offset = Context::SlotOffset(var->index());
249         __ mov(Operand(esi, context_offset), eax);
250         // Update the write barrier. This clobbers eax and ebx.
251         if (need_write_barrier) {
252           __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
253                                     kDontSaveFPRegs);
254         } else if (FLAG_debug_code) {
255           Label done;
256           __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
257           __ Abort(kExpectedNewSpaceObject);
258           __ bind(&done);
259         }
260       }
261     }
262     Comment(";;; End allocate local context");
263   }
264 
265   Comment(";;; Prologue end");
266 }
267 
268 
GenerateOsrPrologue()269 void LCodeGen::GenerateOsrPrologue() {
270   // Generate the OSR entry prologue at the first unknown OSR value, or if there
271   // are none, at the OSR entrypoint instruction.
272   if (osr_pc_offset_ >= 0) return;
273 
274   osr_pc_offset_ = masm()->pc_offset();
275 
276     // Move state of dynamic frame alignment into edx.
277   __ Move(edx, Immediate(kNoAlignmentPadding));
278 
279   if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
280     Label do_not_pad, align_loop;
281     // Align ebp + 4 to a multiple of 2 * kPointerSize.
282     __ test(ebp, Immediate(kPointerSize));
283     __ j(zero, &do_not_pad, Label::kNear);
284     __ push(Immediate(0));
285     __ mov(ebx, esp);
286     __ mov(edx, Immediate(kAlignmentPaddingPushed));
287 
288     // Move all parts of the frame over one word. The frame consists of:
289     // unoptimized frame slots, alignment state, context, frame pointer, return
290     // address, receiver, and the arguments.
291     __ mov(ecx, Immediate(scope()->num_parameters() +
292            5 + graph()->osr()->UnoptimizedFrameSlots()));
293 
294     __ bind(&align_loop);
295     __ mov(eax, Operand(ebx, 1 * kPointerSize));
296     __ mov(Operand(ebx, 0), eax);
297     __ add(Operand(ebx), Immediate(kPointerSize));
298     __ dec(ecx);
299     __ j(not_zero, &align_loop, Label::kNear);
300     __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
301     __ sub(Operand(ebp), Immediate(kPointerSize));
302     __ bind(&do_not_pad);
303   }
304 
305   // Save the first local, which is overwritten by the alignment state.
306   Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
307   __ push(alignment_loc);
308 
309   // Set the dynamic frame alignment state.
310   __ mov(alignment_loc, edx);
311 
312   // Adjust the frame size, subsuming the unoptimized frame into the
313   // optimized frame.
314   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
315   DCHECK(slots >= 1);
316   __ sub(esp, Immediate((slots - 1) * kPointerSize));
317 
318   // Initailize FPU state.
319   __ fninit();
320 }
321 
322 
GenerateBodyInstructionPre(LInstruction * instr)323 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
324   if (instr->IsCall()) {
325     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
326   }
327   if (!instr->IsLazyBailout() && !instr->IsGap()) {
328     safepoints_.BumpLastLazySafepointIndex();
329   }
330   FlushX87StackIfNecessary(instr);
331 }
332 
333 
GenerateBodyInstructionPost(LInstruction * instr)334 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
335   // When return from function call, FPU should be initialized again.
336   if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) {
337     bool double_result = instr->HasDoubleRegisterResult();
338     if (double_result) {
339       __ lea(esp, Operand(esp, -kDoubleSize));
340       __ fstp_d(Operand(esp, 0));
341     }
342     __ fninit();
343     if (double_result) {
344       __ fld_d(Operand(esp, 0));
345       __ lea(esp, Operand(esp, kDoubleSize));
346     }
347   }
348   if (instr->IsGoto()) {
349     x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this);
350   } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
351              !instr->IsGap() && !instr->IsReturn()) {
352     if (instr->ClobbersDoubleRegisters(isolate())) {
353       if (instr->HasDoubleRegisterResult()) {
354         DCHECK_EQ(1, x87_stack_.depth());
355       } else {
356         DCHECK_EQ(0, x87_stack_.depth());
357       }
358     }
359     __ VerifyX87StackDepth(x87_stack_.depth());
360   }
361 }
362 
363 
GenerateJumpTable()364 bool LCodeGen::GenerateJumpTable() {
365   if (!jump_table_.length()) return !is_aborted();
366 
367   Label needs_frame;
368   Comment(";;; -------------------- Jump table --------------------");
369 
370   for (int i = 0; i < jump_table_.length(); i++) {
371     Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
372     __ bind(&table_entry->label);
373     Address entry = table_entry->address;
374     DeoptComment(table_entry->deopt_info);
375     if (table_entry->needs_frame) {
376       DCHECK(!info()->saves_caller_doubles());
377       __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
378       __ call(&needs_frame);
379     } else {
380       __ call(entry, RelocInfo::RUNTIME_ENTRY);
381     }
382     info()->LogDeoptCallPosition(masm()->pc_offset(),
383                                  table_entry->deopt_info.inlining_id);
384   }
385   if (needs_frame.is_linked()) {
386     __ bind(&needs_frame);
387 
388     /* stack layout
389        4: entry address
390        3: return address  <-- esp
391        2: garbage
392        1: garbage
393        0: garbage
394     */
395     __ sub(esp, Immediate(kPointerSize));    // Reserve space for stub marker.
396     __ push(MemOperand(esp, kPointerSize));  // Copy return address.
397     __ push(MemOperand(esp, 3 * kPointerSize));  // Copy entry address.
398 
399     /* stack layout
400        4: entry address
401        3: return address
402        2: garbage
403        1: return address
404        0: entry address  <-- esp
405     */
406     __ mov(MemOperand(esp, 4 * kPointerSize), ebp);  // Save ebp.
407 
408     // Copy context.
409     __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
410     __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
411     // Fill ebp with the right stack frame address.
412     __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
413 
414     // This variant of deopt can only be used with stubs. Since we don't
415     // have a function pointer to install in the stack frame that we're
416     // building, install a special marker there instead.
417     DCHECK(info()->IsStub());
418     __ mov(MemOperand(esp, 2 * kPointerSize),
419            Immediate(Smi::FromInt(StackFrame::STUB)));
420 
421     /* stack layout
422        4: old ebp
423        3: context pointer
424        2: stub marker
425        1: return address
426        0: entry address  <-- esp
427     */
428     __ ret(0);  // Call the continuation without clobbering registers.
429   }
430   return !is_aborted();
431 }
432 
433 
GenerateDeferredCode()434 bool LCodeGen::GenerateDeferredCode() {
435   DCHECK(is_generating());
436   if (deferred_.length() > 0) {
437     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
438       LDeferredCode* code = deferred_[i];
439       X87Stack copy(code->x87_stack());
440       x87_stack_ = copy;
441 
442       HValue* value =
443           instructions_->at(code->instruction_index())->hydrogen_value();
444       RecordAndWritePosition(
445           chunk()->graph()->SourcePositionToScriptPosition(value->position()));
446 
447       Comment(";;; <@%d,#%d> "
448               "-------------------- Deferred %s --------------------",
449               code->instruction_index(),
450               code->instr()->hydrogen_value()->id(),
451               code->instr()->Mnemonic());
452       __ bind(code->entry());
453       if (NeedsDeferredFrame()) {
454         Comment(";;; Build frame");
455         DCHECK(!frame_is_built_);
456         DCHECK(info()->IsStub());
457         frame_is_built_ = true;
458         // Build the frame in such a way that esi isn't trashed.
459         __ push(ebp);  // Caller's frame pointer.
460         __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
461         __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
462         __ lea(ebp, Operand(esp, 2 * kPointerSize));
463         Comment(";;; Deferred code");
464       }
465       code->Generate();
466       if (NeedsDeferredFrame()) {
467         __ bind(code->done());
468         Comment(";;; Destroy frame");
469         DCHECK(frame_is_built_);
470         frame_is_built_ = false;
471         __ mov(esp, ebp);
472         __ pop(ebp);
473       }
474       __ jmp(code->exit());
475     }
476   }
477 
478   // Deferred code is the last part of the instruction sequence. Mark
479   // the generated code as done unless we bailed out.
480   if (!is_aborted()) status_ = DONE;
481   return !is_aborted();
482 }
483 
484 
GenerateSafepointTable()485 bool LCodeGen::GenerateSafepointTable() {
486   DCHECK(is_done());
487   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
488     // For lazy deoptimization we need space to patch a call after every call.
489     // Ensure there is always space for such patching, even if the code ends
490     // in a call.
491     int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
492     while (masm()->pc_offset() < target_offset) {
493       masm()->nop();
494     }
495   }
496   safepoints_.Emit(masm(), GetStackSlotCount());
497   return !is_aborted();
498 }
499 
500 
ToRegister(int code) const501 Register LCodeGen::ToRegister(int code) const {
502   return Register::from_code(code);
503 }
504 
505 
ToX87Register(int code) const506 X87Register LCodeGen::ToX87Register(int code) const {
507   return X87Register::from_code(code);
508 }
509 
510 
X87LoadForUsage(X87Register reg)511 void LCodeGen::X87LoadForUsage(X87Register reg) {
512   DCHECK(x87_stack_.Contains(reg));
513   x87_stack_.Fxch(reg);
514   x87_stack_.pop();
515 }
516 
517 
X87LoadForUsage(X87Register reg1,X87Register reg2)518 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
519   DCHECK(x87_stack_.Contains(reg1));
520   DCHECK(x87_stack_.Contains(reg2));
521   if (reg1.is(reg2) && x87_stack_.depth() == 1) {
522     __ fld(x87_stack_.st(reg1));
523     x87_stack_.push(reg1);
524     x87_stack_.pop();
525     x87_stack_.pop();
526   } else {
527     x87_stack_.Fxch(reg1, 1);
528     x87_stack_.Fxch(reg2);
529     x87_stack_.pop();
530     x87_stack_.pop();
531   }
532 }
533 
534 
GetLayout()535 int LCodeGen::X87Stack::GetLayout() {
536   int layout = stack_depth_;
537   for (int i = 0; i < stack_depth_; i++) {
538     layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3));
539   }
540 
541   return layout;
542 }
543 
544 
Fxch(X87Register reg,int other_slot)545 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
546   DCHECK(is_mutable_);
547   DCHECK(Contains(reg) && stack_depth_ > other_slot);
548   int i  = ArrayIndex(reg);
549   int st = st2idx(i);
550   if (st != other_slot) {
551     int other_i = st2idx(other_slot);
552     X87Register other = stack_[other_i];
553     stack_[other_i]   = reg;
554     stack_[i]         = other;
555     if (st == 0) {
556       __ fxch(other_slot);
557     } else if (other_slot == 0) {
558       __ fxch(st);
559     } else {
560       __ fxch(st);
561       __ fxch(other_slot);
562       __ fxch(st);
563     }
564   }
565 }
566 
567 
st2idx(int pos)568 int LCodeGen::X87Stack::st2idx(int pos) {
569   return stack_depth_ - pos - 1;
570 }
571 
572 
ArrayIndex(X87Register reg)573 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
574   for (int i = 0; i < stack_depth_; i++) {
575     if (stack_[i].is(reg)) return i;
576   }
577   UNREACHABLE();
578   return -1;
579 }
580 
581 
Contains(X87Register reg)582 bool LCodeGen::X87Stack::Contains(X87Register reg) {
583   for (int i = 0; i < stack_depth_; i++) {
584     if (stack_[i].is(reg)) return true;
585   }
586   return false;
587 }
588 
589 
Free(X87Register reg)590 void LCodeGen::X87Stack::Free(X87Register reg) {
591   DCHECK(is_mutable_);
592   DCHECK(Contains(reg));
593   int i  = ArrayIndex(reg);
594   int st = st2idx(i);
595   if (st > 0) {
596     // keep track of how fstp(i) changes the order of elements
597     int tos_i = st2idx(0);
598     stack_[i] = stack_[tos_i];
599   }
600   pop();
601   __ fstp(st);
602 }
603 
604 
X87Mov(X87Register dst,Operand src,X87OperandType opts)605 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
606   if (x87_stack_.Contains(dst)) {
607     x87_stack_.Fxch(dst);
608     __ fstp(0);
609   } else {
610     x87_stack_.push(dst);
611   }
612   X87Fld(src, opts);
613 }
614 
615 
X87Mov(X87Register dst,X87Register src,X87OperandType opts)616 void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) {
617   if (x87_stack_.Contains(dst)) {
618     x87_stack_.Fxch(dst);
619     __ fstp(0);
620     x87_stack_.pop();
621     // Push ST(i) onto the FPU register stack
622     __ fld(x87_stack_.st(src));
623     x87_stack_.push(dst);
624   } else {
625     // Push ST(i) onto the FPU register stack
626     __ fld(x87_stack_.st(src));
627     x87_stack_.push(dst);
628   }
629 }
630 
631 
X87Fld(Operand src,X87OperandType opts)632 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
633   DCHECK(!src.is_reg_only());
634   switch (opts) {
635     case kX87DoubleOperand:
636       __ fld_d(src);
637       break;
638     case kX87FloatOperand:
639       __ fld_s(src);
640       break;
641     case kX87IntOperand:
642       __ fild_s(src);
643       break;
644     default:
645       UNREACHABLE();
646   }
647 }
648 
649 
X87Mov(Operand dst,X87Register src,X87OperandType opts)650 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
651   DCHECK(!dst.is_reg_only());
652   x87_stack_.Fxch(src);
653   switch (opts) {
654     case kX87DoubleOperand:
655       __ fst_d(dst);
656       break;
657     case kX87FloatOperand:
658       __ fst_s(dst);
659       break;
660     case kX87IntOperand:
661       __ fist_s(dst);
662       break;
663     default:
664       UNREACHABLE();
665   }
666 }
667 
668 
PrepareToWrite(X87Register reg)669 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
670   DCHECK(is_mutable_);
671   if (Contains(reg)) {
672     Free(reg);
673   }
674   // Mark this register as the next register to write to
675   stack_[stack_depth_] = reg;
676 }
677 
678 
CommitWrite(X87Register reg)679 void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
680   DCHECK(is_mutable_);
681   // Assert the reg is prepared to write, but not on the virtual stack yet
682   DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) &&
683          stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
684   stack_depth_++;
685 }
686 
687 
X87PrepareBinaryOp(X87Register left,X87Register right,X87Register result)688 void LCodeGen::X87PrepareBinaryOp(
689     X87Register left, X87Register right, X87Register result) {
690   // You need to use DefineSameAsFirst for x87 instructions
691   DCHECK(result.is(left));
692   x87_stack_.Fxch(right, 1);
693   x87_stack_.Fxch(left);
694 }
695 
696 
FlushIfNecessary(LInstruction * instr,LCodeGen * cgen)697 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
698   if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
699     bool double_inputs = instr->HasDoubleRegisterInput();
700 
701     // Flush stack from tos down, since FreeX87() will mess with tos
702     for (int i = stack_depth_-1; i >= 0; i--) {
703       X87Register reg = stack_[i];
704       // Skip registers which contain the inputs for the next instruction
705       // when flushing the stack
706       if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
707         continue;
708       }
709       Free(reg);
710       if (i < stack_depth_-1) i++;
711     }
712   }
713   if (instr->IsReturn()) {
714     while (stack_depth_ > 0) {
715       __ fstp(0);
716       stack_depth_--;
717     }
718     if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
719   }
720 }
721 
722 
LeavingBlock(int current_block_id,LGoto * goto_instr,LCodeGen * cgen)723 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr,
724                                       LCodeGen* cgen) {
725   // For going to a joined block, an explicit LClobberDoubles is inserted before
726   // LGoto. Because all used x87 registers are spilled to stack slots. The
727   // ResolvePhis phase of register allocator could guarantee the two input's x87
728   // stacks have the same layout. So don't check stack_depth_ <= 1 here.
729   int goto_block_id = goto_instr->block_id();
730   if (current_block_id + 1 != goto_block_id) {
731     // If we have a value on the x87 stack on leaving a block, it must be a
732     // phi input. If the next block we compile is not the join block, we have
733     // to discard the stack state.
734     // Before discarding the stack state, we need to save it if the "goto block"
735     // has unreachable last predecessor when FLAG_unreachable_code_elimination.
736     if (FLAG_unreachable_code_elimination) {
737       int length = goto_instr->block()->predecessors()->length();
738       bool has_unreachable_last_predecessor = false;
739       for (int i = 0; i < length; i++) {
740         HBasicBlock* block = goto_instr->block()->predecessors()->at(i);
741         if (block->IsUnreachable() &&
742             (block->block_id() + 1) == goto_block_id) {
743           has_unreachable_last_predecessor = true;
744         }
745       }
746       if (has_unreachable_last_predecessor) {
747         if (cgen->x87_stack_map_.find(goto_block_id) ==
748             cgen->x87_stack_map_.end()) {
749           X87Stack* stack = new (cgen->zone()) X87Stack(*this);
750           cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack));
751         }
752       }
753     }
754 
755     // Discard the stack state.
756     stack_depth_ = 0;
757   }
758 }
759 
760 
EmitFlushX87ForDeopt()761 void LCodeGen::EmitFlushX87ForDeopt() {
762   // The deoptimizer does not support X87 Registers. But as long as we
763   // deopt from a stub its not a problem, since we will re-materialize the
764   // original stub inputs, which can't be double registers.
765   // DCHECK(info()->IsStub());
766   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
767     __ pushfd();
768     __ VerifyX87StackDepth(x87_stack_.depth());
769     __ popfd();
770   }
771 
772   // Flush X87 stack in the deoptimizer entry.
773 }
774 
775 
ToRegister(LOperand * op) const776 Register LCodeGen::ToRegister(LOperand* op) const {
777   DCHECK(op->IsRegister());
778   return ToRegister(op->index());
779 }
780 
781 
ToX87Register(LOperand * op) const782 X87Register LCodeGen::ToX87Register(LOperand* op) const {
783   DCHECK(op->IsDoubleRegister());
784   return ToX87Register(op->index());
785 }
786 
787 
ToInteger32(LConstantOperand * op) const788 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
789   return ToRepresentation(op, Representation::Integer32());
790 }
791 
792 
ToRepresentation(LConstantOperand * op,const Representation & r) const793 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
794                                    const Representation& r) const {
795   HConstant* constant = chunk_->LookupConstant(op);
796   if (r.IsExternal()) {
797     return reinterpret_cast<int32_t>(
798         constant->ExternalReferenceValue().address());
799   }
800   int32_t value = constant->Integer32Value();
801   if (r.IsInteger32()) return value;
802   DCHECK(r.IsSmiOrTagged());
803   return reinterpret_cast<int32_t>(Smi::FromInt(value));
804 }
805 
806 
ToHandle(LConstantOperand * op) const807 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
808   HConstant* constant = chunk_->LookupConstant(op);
809   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
810   return constant->handle(isolate());
811 }
812 
813 
ToDouble(LConstantOperand * op) const814 double LCodeGen::ToDouble(LConstantOperand* op) const {
815   HConstant* constant = chunk_->LookupConstant(op);
816   DCHECK(constant->HasDoubleValue());
817   return constant->DoubleValue();
818 }
819 
820 
ToExternalReference(LConstantOperand * op) const821 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
822   HConstant* constant = chunk_->LookupConstant(op);
823   DCHECK(constant->HasExternalReferenceValue());
824   return constant->ExternalReferenceValue();
825 }
826 
827 
IsInteger32(LConstantOperand * op) const828 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
829   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
830 }
831 
832 
IsSmi(LConstantOperand * op) const833 bool LCodeGen::IsSmi(LConstantOperand* op) const {
834   return chunk_->LookupLiteralRepresentation(op).IsSmi();
835 }
836 
837 
ArgumentsOffsetWithoutFrame(int index)838 static int ArgumentsOffsetWithoutFrame(int index) {
839   DCHECK(index < 0);
840   return -(index + 1) * kPointerSize + kPCOnStackSize;
841 }
842 
843 
ToOperand(LOperand * op) const844 Operand LCodeGen::ToOperand(LOperand* op) const {
845   if (op->IsRegister()) return Operand(ToRegister(op));
846   DCHECK(!op->IsDoubleRegister());
847   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
848   if (NeedsEagerFrame()) {
849     return Operand(ebp, StackSlotOffset(op->index()));
850   } else {
851     // Retrieve parameter without eager stack-frame relative to the
852     // stack-pointer.
853     return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
854   }
855 }
856 
857 
HighOperand(LOperand * op)858 Operand LCodeGen::HighOperand(LOperand* op) {
859   DCHECK(op->IsDoubleStackSlot());
860   if (NeedsEagerFrame()) {
861     return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
862   } else {
863     // Retrieve parameter without eager stack-frame relative to the
864     // stack-pointer.
865     return Operand(
866         esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
867   }
868 }
869 
870 
WriteTranslation(LEnvironment * environment,Translation * translation)871 void LCodeGen::WriteTranslation(LEnvironment* environment,
872                                 Translation* translation) {
873   if (environment == NULL) return;
874 
875   // The translation includes one command per value in the environment.
876   int translation_size = environment->translation_size();
877 
878   WriteTranslation(environment->outer(), translation);
879   WriteTranslationFrame(environment, translation);
880 
881   int object_index = 0;
882   int dematerialized_index = 0;
883   for (int i = 0; i < translation_size; ++i) {
884     LOperand* value = environment->values()->at(i);
885     AddToTranslation(environment,
886                      translation,
887                      value,
888                      environment->HasTaggedValueAt(i),
889                      environment->HasUint32ValueAt(i),
890                      &object_index,
891                      &dematerialized_index);
892   }
893 }
894 
895 
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)896 void LCodeGen::AddToTranslation(LEnvironment* environment,
897                                 Translation* translation,
898                                 LOperand* op,
899                                 bool is_tagged,
900                                 bool is_uint32,
901                                 int* object_index_pointer,
902                                 int* dematerialized_index_pointer) {
903   if (op == LEnvironment::materialization_marker()) {
904     int object_index = (*object_index_pointer)++;
905     if (environment->ObjectIsDuplicateAt(object_index)) {
906       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
907       translation->DuplicateObject(dupe_of);
908       return;
909     }
910     int object_length = environment->ObjectLengthAt(object_index);
911     if (environment->ObjectIsArgumentsAt(object_index)) {
912       translation->BeginArgumentsObject(object_length);
913     } else {
914       translation->BeginCapturedObject(object_length);
915     }
916     int dematerialized_index = *dematerialized_index_pointer;
917     int env_offset = environment->translation_size() + dematerialized_index;
918     *dematerialized_index_pointer += object_length;
919     for (int i = 0; i < object_length; ++i) {
920       LOperand* value = environment->values()->at(env_offset + i);
921       AddToTranslation(environment,
922                        translation,
923                        value,
924                        environment->HasTaggedValueAt(env_offset + i),
925                        environment->HasUint32ValueAt(env_offset + i),
926                        object_index_pointer,
927                        dematerialized_index_pointer);
928     }
929     return;
930   }
931 
932   if (op->IsStackSlot()) {
933     int index = op->index();
934     if (index >= 0) {
935       index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
936     }
937     if (is_tagged) {
938       translation->StoreStackSlot(index);
939     } else if (is_uint32) {
940       translation->StoreUint32StackSlot(index);
941     } else {
942       translation->StoreInt32StackSlot(index);
943     }
944   } else if (op->IsDoubleStackSlot()) {
945     int index = op->index();
946     if (index >= 0) {
947       index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
948     }
949     translation->StoreDoubleStackSlot(index);
950   } else if (op->IsRegister()) {
951     Register reg = ToRegister(op);
952     if (is_tagged) {
953       translation->StoreRegister(reg);
954     } else if (is_uint32) {
955       translation->StoreUint32Register(reg);
956     } else {
957       translation->StoreInt32Register(reg);
958     }
959   } else if (op->IsDoubleRegister()) {
960     X87Register reg = ToX87Register(op);
961     translation->StoreDoubleRegister(reg);
962   } else if (op->IsConstantOperand()) {
963     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
964     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
965     translation->StoreLiteral(src_index);
966   } else {
967     UNREACHABLE();
968   }
969 }
970 
971 
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode)972 void LCodeGen::CallCodeGeneric(Handle<Code> code,
973                                RelocInfo::Mode mode,
974                                LInstruction* instr,
975                                SafepointMode safepoint_mode) {
976   DCHECK(instr != NULL);
977   __ call(code, mode);
978   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
979 
980   // Signal that we don't inline smi code before these stubs in the
981   // optimizing code generator.
982   if (code->kind() == Code::BINARY_OP_IC ||
983       code->kind() == Code::COMPARE_IC) {
984     __ nop();
985   }
986 }
987 
988 
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)989 void LCodeGen::CallCode(Handle<Code> code,
990                         RelocInfo::Mode mode,
991                         LInstruction* instr) {
992   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
993 }
994 
995 
CallRuntime(const Runtime::Function * fun,int argc,LInstruction * instr,SaveFPRegsMode save_doubles)996 void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc,
997                            LInstruction* instr, SaveFPRegsMode save_doubles) {
998   DCHECK(instr != NULL);
999   DCHECK(instr->HasPointerMap());
1000 
1001   __ CallRuntime(fun, argc, save_doubles);
1002 
1003   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
1004 
1005   DCHECK(info()->is_calling());
1006 }
1007 
1008 
LoadContextFromDeferred(LOperand * context)1009 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
1010   if (context->IsRegister()) {
1011     if (!ToRegister(context).is(esi)) {
1012       __ mov(esi, ToRegister(context));
1013     }
1014   } else if (context->IsStackSlot()) {
1015     __ mov(esi, ToOperand(context));
1016   } else if (context->IsConstantOperand()) {
1017     HConstant* constant =
1018         chunk_->LookupConstant(LConstantOperand::cast(context));
1019     __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
1020   } else {
1021     UNREACHABLE();
1022   }
1023 }
1024 
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)1025 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
1026                                        int argc,
1027                                        LInstruction* instr,
1028                                        LOperand* context) {
1029   LoadContextFromDeferred(context);
1030 
1031   __ CallRuntimeSaveDoubles(id);
1032   RecordSafepointWithRegisters(
1033       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
1034 
1035   DCHECK(info()->is_calling());
1036 }
1037 
1038 
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)1039 void LCodeGen::RegisterEnvironmentForDeoptimization(
1040     LEnvironment* environment, Safepoint::DeoptMode mode) {
1041   environment->set_has_been_used();
1042   if (!environment->HasBeenRegistered()) {
1043     // Physical stack frame layout:
1044     // -x ............. -4  0 ..................................... y
1045     // [incoming arguments] [spill slots] [pushed outgoing arguments]
1046 
1047     // Layout of the environment:
1048     // 0 ..................................................... size-1
1049     // [parameters] [locals] [expression stack including arguments]
1050 
1051     // Layout of the translation:
1052     // 0 ........................................................ size - 1 + 4
1053     // [expression stack including arguments] [locals] [4 words] [parameters]
1054     // |>------------  translation_size ------------<|
1055 
1056     int frame_count = 0;
1057     int jsframe_count = 0;
1058     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
1059       ++frame_count;
1060       if (e->frame_type() == JS_FUNCTION) {
1061         ++jsframe_count;
1062       }
1063     }
1064     Translation translation(&translations_, frame_count, jsframe_count, zone());
1065     WriteTranslation(environment, &translation);
1066     int deoptimization_index = deoptimizations_.length();
1067     int pc_offset = masm()->pc_offset();
1068     environment->Register(deoptimization_index,
1069                           translation.index(),
1070                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
1071     deoptimizations_.Add(environment, zone());
1072   }
1073 }
1074 
1075 
DeoptimizeIf(Condition cc,LInstruction * instr,Deoptimizer::DeoptReason deopt_reason,Deoptimizer::BailoutType bailout_type)1076 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
1077                             Deoptimizer::DeoptReason deopt_reason,
1078                             Deoptimizer::BailoutType bailout_type) {
1079   LEnvironment* environment = instr->environment();
1080   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1081   DCHECK(environment->HasBeenRegistered());
1082   int id = environment->deoptimization_index();
1083   Address entry =
1084       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1085   if (entry == NULL) {
1086     Abort(kBailoutWasNotPrepared);
1087     return;
1088   }
1089 
1090   if (DeoptEveryNTimes()) {
1091     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1092     Label no_deopt;
1093     __ pushfd();
1094     __ push(eax);
1095     __ mov(eax, Operand::StaticVariable(count));
1096     __ sub(eax, Immediate(1));
1097     __ j(not_zero, &no_deopt, Label::kNear);
1098     if (FLAG_trap_on_deopt) __ int3();
1099     __ mov(eax, Immediate(FLAG_deopt_every_n_times));
1100     __ mov(Operand::StaticVariable(count), eax);
1101     __ pop(eax);
1102     __ popfd();
1103     DCHECK(frame_is_built_);
1104     // Put the x87 stack layout in TOS.
1105     if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
1106     __ push(Immediate(x87_stack_.GetLayout()));
1107     __ fild_s(MemOperand(esp, 0));
1108     // Don't touch eflags.
1109     __ lea(esp, Operand(esp, kPointerSize));
1110     __ call(entry, RelocInfo::RUNTIME_ENTRY);
1111     __ bind(&no_deopt);
1112     __ mov(Operand::StaticVariable(count), eax);
1113     __ pop(eax);
1114     __ popfd();
1115   }
1116 
1117   // Put the x87 stack layout in TOS, so that we can save x87 fp registers in
1118   // the correct location.
1119   {
1120     Label done;
1121     if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1122     if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
1123 
1124     int x87_stack_layout = x87_stack_.GetLayout();
1125     __ push(Immediate(x87_stack_layout));
1126     __ fild_s(MemOperand(esp, 0));
1127     // Don't touch eflags.
1128     __ lea(esp, Operand(esp, kPointerSize));
1129     __ bind(&done);
1130   }
1131 
1132   if (info()->ShouldTrapOnDeopt()) {
1133     Label done;
1134     if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1135     __ int3();
1136     __ bind(&done);
1137   }
1138 
1139   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
1140 
1141   DCHECK(info()->IsStub() || frame_is_built_);
1142   if (cc == no_condition && frame_is_built_) {
1143     DeoptComment(deopt_info);
1144     __ call(entry, RelocInfo::RUNTIME_ENTRY);
1145     info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
1146   } else {
1147     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
1148                                             !frame_is_built_);
1149     // We often have several deopts to the same entry, reuse the last
1150     // jump entry if this is the case.
1151     if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
1152         jump_table_.is_empty() ||
1153         !table_entry.IsEquivalentTo(jump_table_.last())) {
1154       jump_table_.Add(table_entry, zone());
1155     }
1156     if (cc == no_condition) {
1157       __ jmp(&jump_table_.last().label);
1158     } else {
1159       __ j(cc, &jump_table_.last().label);
1160     }
1161   }
1162 }
1163 
1164 
DeoptimizeIf(Condition cc,LInstruction * instr,Deoptimizer::DeoptReason deopt_reason)1165 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
1166                             Deoptimizer::DeoptReason deopt_reason) {
1167   Deoptimizer::BailoutType bailout_type = info()->IsStub()
1168       ? Deoptimizer::LAZY
1169       : Deoptimizer::EAGER;
1170   DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
1171 }
1172 
1173 
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode)1174 void LCodeGen::RecordSafepointWithLazyDeopt(
1175     LInstruction* instr, SafepointMode safepoint_mode) {
1176   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1177     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1178   } else {
1179     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1180     RecordSafepointWithRegisters(
1181         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1182   }
1183 }
1184 
1185 
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)1186 void LCodeGen::RecordSafepoint(
1187     LPointerMap* pointers,
1188     Safepoint::Kind kind,
1189     int arguments,
1190     Safepoint::DeoptMode deopt_mode) {
1191   DCHECK(kind == expected_safepoint_kind_);
1192   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1193   Safepoint safepoint =
1194       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1195   for (int i = 0; i < operands->length(); i++) {
1196     LOperand* pointer = operands->at(i);
1197     if (pointer->IsStackSlot()) {
1198       safepoint.DefinePointerSlot(pointer->index(), zone());
1199     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1200       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1201     }
1202   }
1203 }
1204 
1205 
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode mode)1206 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1207                                Safepoint::DeoptMode mode) {
1208   RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1209 }
1210 
1211 
RecordSafepoint(Safepoint::DeoptMode mode)1212 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1213   LPointerMap empty_pointers(zone());
1214   RecordSafepoint(&empty_pointers, mode);
1215 }
1216 
1217 
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode mode)1218 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1219                                             int arguments,
1220                                             Safepoint::DeoptMode mode) {
1221   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1222 }
1223 
1224 
RecordAndWritePosition(int position)1225 void LCodeGen::RecordAndWritePosition(int position) {
1226   if (position == RelocInfo::kNoPosition) return;
1227   masm()->positions_recorder()->RecordPosition(position);
1228   masm()->positions_recorder()->WriteRecordedPositions();
1229 }
1230 
1231 
LabelType(LLabel * label)1232 static const char* LabelType(LLabel* label) {
1233   if (label->is_loop_header()) return " (loop header)";
1234   if (label->is_osr_entry()) return " (OSR entry)";
1235   return "";
1236 }
1237 
1238 
DoLabel(LLabel * label)1239 void LCodeGen::DoLabel(LLabel* label) {
1240   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1241           current_instruction_,
1242           label->hydrogen_value()->id(),
1243           label->block_id(),
1244           LabelType(label));
1245   __ bind(label->label());
1246   current_block_ = label->block_id();
1247   if (label->block()->predecessors()->length() > 1) {
1248     // A join block's x87 stack is that of its last visited predecessor.
1249     // If the last visited predecessor block is unreachable, the stack state
1250     // will be wrong. In such case, use the x87 stack of reachable predecessor.
1251     X87StackMap::const_iterator it = x87_stack_map_.find(current_block_);
1252     // Restore x87 stack.
1253     if (it != x87_stack_map_.end()) {
1254       x87_stack_ = *(it->second);
1255     }
1256   }
1257   DoGap(label);
1258 }
1259 
1260 
DoParallelMove(LParallelMove * move)1261 void LCodeGen::DoParallelMove(LParallelMove* move) {
1262   resolver_.Resolve(move);
1263 }
1264 
1265 
DoGap(LGap * gap)1266 void LCodeGen::DoGap(LGap* gap) {
1267   for (int i = LGap::FIRST_INNER_POSITION;
1268        i <= LGap::LAST_INNER_POSITION;
1269        i++) {
1270     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1271     LParallelMove* move = gap->GetParallelMove(inner_pos);
1272     if (move != NULL) DoParallelMove(move);
1273   }
1274 }
1275 
1276 
DoInstructionGap(LInstructionGap * instr)1277 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1278   DoGap(instr);
1279 }
1280 
1281 
DoParameter(LParameter * instr)1282 void LCodeGen::DoParameter(LParameter* instr) {
1283   // Nothing to do.
1284 }
1285 
1286 
DoCallStub(LCallStub * instr)1287 void LCodeGen::DoCallStub(LCallStub* instr) {
1288   DCHECK(ToRegister(instr->context()).is(esi));
1289   DCHECK(ToRegister(instr->result()).is(eax));
1290   switch (instr->hydrogen()->major_key()) {
1291     case CodeStub::RegExpExec: {
1292       RegExpExecStub stub(isolate());
1293       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1294       break;
1295     }
1296     case CodeStub::SubString: {
1297       SubStringStub stub(isolate());
1298       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1299       break;
1300     }
1301     default:
1302       UNREACHABLE();
1303   }
1304 }
1305 
1306 
DoUnknownOSRValue(LUnknownOSRValue * instr)1307 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1308   GenerateOsrPrologue();
1309 }
1310 
1311 
DoModByPowerOf2I(LModByPowerOf2I * instr)1312 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1313   Register dividend = ToRegister(instr->dividend());
1314   int32_t divisor = instr->divisor();
1315   DCHECK(dividend.is(ToRegister(instr->result())));
1316 
1317   // Theoretically, a variation of the branch-free code for integer division by
1318   // a power of 2 (calculating the remainder via an additional multiplication
1319   // (which gets simplified to an 'and') and subtraction) should be faster, and
1320   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1321   // indicate that positive dividends are heavily favored, so the branching
1322   // version performs better.
1323   HMod* hmod = instr->hydrogen();
1324   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1325   Label dividend_is_not_negative, done;
1326   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1327     __ test(dividend, dividend);
1328     __ j(not_sign, &dividend_is_not_negative, Label::kNear);
1329     // Note that this is correct even for kMinInt operands.
1330     __ neg(dividend);
1331     __ and_(dividend, mask);
1332     __ neg(dividend);
1333     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1334       DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1335     }
1336     __ jmp(&done, Label::kNear);
1337   }
1338 
1339   __ bind(&dividend_is_not_negative);
1340   __ and_(dividend, mask);
1341   __ bind(&done);
1342 }
1343 
1344 
DoModByConstI(LModByConstI * instr)1345 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1346   Register dividend = ToRegister(instr->dividend());
1347   int32_t divisor = instr->divisor();
1348   DCHECK(ToRegister(instr->result()).is(eax));
1349 
1350   if (divisor == 0) {
1351     DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1352     return;
1353   }
1354 
1355   __ TruncatingDiv(dividend, Abs(divisor));
1356   __ imul(edx, edx, Abs(divisor));
1357   __ mov(eax, dividend);
1358   __ sub(eax, edx);
1359 
1360   // Check for negative zero.
1361   HMod* hmod = instr->hydrogen();
1362   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1363     Label remainder_not_zero;
1364     __ j(not_zero, &remainder_not_zero, Label::kNear);
1365     __ cmp(dividend, Immediate(0));
1366     DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1367     __ bind(&remainder_not_zero);
1368   }
1369 }
1370 
1371 
DoModI(LModI * instr)1372 void LCodeGen::DoModI(LModI* instr) {
1373   HMod* hmod = instr->hydrogen();
1374 
1375   Register left_reg = ToRegister(instr->left());
1376   DCHECK(left_reg.is(eax));
1377   Register right_reg = ToRegister(instr->right());
1378   DCHECK(!right_reg.is(eax));
1379   DCHECK(!right_reg.is(edx));
1380   Register result_reg = ToRegister(instr->result());
1381   DCHECK(result_reg.is(edx));
1382 
1383   Label done;
1384   // Check for x % 0, idiv would signal a divide error. We have to
1385   // deopt in this case because we can't return a NaN.
1386   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1387     __ test(right_reg, Operand(right_reg));
1388     DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1389   }
1390 
1391   // Check for kMinInt % -1, idiv would signal a divide error. We
1392   // have to deopt if we care about -0, because we can't return that.
1393   if (hmod->CheckFlag(HValue::kCanOverflow)) {
1394     Label no_overflow_possible;
1395     __ cmp(left_reg, kMinInt);
1396     __ j(not_equal, &no_overflow_possible, Label::kNear);
1397     __ cmp(right_reg, -1);
1398     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1399       DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
1400     } else {
1401       __ j(not_equal, &no_overflow_possible, Label::kNear);
1402       __ Move(result_reg, Immediate(0));
1403       __ jmp(&done, Label::kNear);
1404     }
1405     __ bind(&no_overflow_possible);
1406   }
1407 
1408   // Sign extend dividend in eax into edx:eax.
1409   __ cdq();
1410 
1411   // If we care about -0, test if the dividend is <0 and the result is 0.
1412   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1413     Label positive_left;
1414     __ test(left_reg, Operand(left_reg));
1415     __ j(not_sign, &positive_left, Label::kNear);
1416     __ idiv(right_reg);
1417     __ test(result_reg, Operand(result_reg));
1418     DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1419     __ jmp(&done, Label::kNear);
1420     __ bind(&positive_left);
1421   }
1422   __ idiv(right_reg);
1423   __ bind(&done);
1424 }
1425 
1426 
DoDivByPowerOf2I(LDivByPowerOf2I * instr)1427 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1428   Register dividend = ToRegister(instr->dividend());
1429   int32_t divisor = instr->divisor();
1430   Register result = ToRegister(instr->result());
1431   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1432   DCHECK(!result.is(dividend));
1433 
1434   // Check for (0 / -x) that will produce negative zero.
1435   HDiv* hdiv = instr->hydrogen();
1436   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1437     __ test(dividend, dividend);
1438     DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1439   }
1440   // Check for (kMinInt / -1).
1441   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1442     __ cmp(dividend, kMinInt);
1443     DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1444   }
1445   // Deoptimize if remainder will not be 0.
1446   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1447       divisor != 1 && divisor != -1) {
1448     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1449     __ test(dividend, Immediate(mask));
1450     DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1451   }
1452   __ Move(result, dividend);
1453   int32_t shift = WhichPowerOf2Abs(divisor);
1454   if (shift > 0) {
1455     // The arithmetic shift is always OK, the 'if' is an optimization only.
1456     if (shift > 1) __ sar(result, 31);
1457     __ shr(result, 32 - shift);
1458     __ add(result, dividend);
1459     __ sar(result, shift);
1460   }
1461   if (divisor < 0) __ neg(result);
1462 }
1463 
1464 
DoDivByConstI(LDivByConstI * instr)1465 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1466   Register dividend = ToRegister(instr->dividend());
1467   int32_t divisor = instr->divisor();
1468   DCHECK(ToRegister(instr->result()).is(edx));
1469 
1470   if (divisor == 0) {
1471     DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1472     return;
1473   }
1474 
1475   // Check for (0 / -x) that will produce negative zero.
1476   HDiv* hdiv = instr->hydrogen();
1477   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1478     __ test(dividend, dividend);
1479     DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1480   }
1481 
1482   __ TruncatingDiv(dividend, Abs(divisor));
1483   if (divisor < 0) __ neg(edx);
1484 
1485   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1486     __ mov(eax, edx);
1487     __ imul(eax, eax, divisor);
1488     __ sub(eax, dividend);
1489     DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
1490   }
1491 }
1492 
1493 
1494 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)1495 void LCodeGen::DoDivI(LDivI* instr) {
1496   HBinaryOperation* hdiv = instr->hydrogen();
1497   Register dividend = ToRegister(instr->dividend());
1498   Register divisor = ToRegister(instr->divisor());
1499   Register remainder = ToRegister(instr->temp());
1500   DCHECK(dividend.is(eax));
1501   DCHECK(remainder.is(edx));
1502   DCHECK(ToRegister(instr->result()).is(eax));
1503   DCHECK(!divisor.is(eax));
1504   DCHECK(!divisor.is(edx));
1505 
1506   // Check for x / 0.
1507   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1508     __ test(divisor, divisor);
1509     DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1510   }
1511 
1512   // Check for (0 / -x) that will produce negative zero.
1513   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1514     Label dividend_not_zero;
1515     __ test(dividend, dividend);
1516     __ j(not_zero, &dividend_not_zero, Label::kNear);
1517     __ test(divisor, divisor);
1518     DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1519     __ bind(&dividend_not_zero);
1520   }
1521 
1522   // Check for (kMinInt / -1).
1523   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1524     Label dividend_not_min_int;
1525     __ cmp(dividend, kMinInt);
1526     __ j(not_zero, &dividend_not_min_int, Label::kNear);
1527     __ cmp(divisor, -1);
1528     DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1529     __ bind(&dividend_not_min_int);
1530   }
1531 
1532   // Sign extend to edx (= remainder).
1533   __ cdq();
1534   __ idiv(divisor);
1535 
1536   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1537     // Deoptimize if remainder is not 0.
1538     __ test(remainder, remainder);
1539     DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1540   }
1541 }
1542 
1543 
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)1544 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1545   Register dividend = ToRegister(instr->dividend());
1546   int32_t divisor = instr->divisor();
1547   DCHECK(dividend.is(ToRegister(instr->result())));
1548 
1549   // If the divisor is positive, things are easy: There can be no deopts and we
1550   // can simply do an arithmetic right shift.
1551   if (divisor == 1) return;
1552   int32_t shift = WhichPowerOf2Abs(divisor);
1553   if (divisor > 1) {
1554     __ sar(dividend, shift);
1555     return;
1556   }
1557 
1558   // If the divisor is negative, we have to negate and handle edge cases.
1559   __ neg(dividend);
1560   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1561     DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1562   }
1563 
1564   // Dividing by -1 is basically negation, unless we overflow.
1565   if (divisor == -1) {
1566     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1567       DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1568     }
1569     return;
1570   }
1571 
1572   // If the negation could not overflow, simply shifting is OK.
1573   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1574     __ sar(dividend, shift);
1575     return;
1576   }
1577 
1578   Label not_kmin_int, done;
1579   __ j(no_overflow, &not_kmin_int, Label::kNear);
1580   __ mov(dividend, Immediate(kMinInt / divisor));
1581   __ jmp(&done, Label::kNear);
1582   __ bind(&not_kmin_int);
1583   __ sar(dividend, shift);
1584   __ bind(&done);
1585 }
1586 
1587 
DoFlooringDivByConstI(LFlooringDivByConstI * instr)1588 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1589   Register dividend = ToRegister(instr->dividend());
1590   int32_t divisor = instr->divisor();
1591   DCHECK(ToRegister(instr->result()).is(edx));
1592 
1593   if (divisor == 0) {
1594     DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1595     return;
1596   }
1597 
1598   // Check for (0 / -x) that will produce negative zero.
1599   HMathFloorOfDiv* hdiv = instr->hydrogen();
1600   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1601     __ test(dividend, dividend);
1602     DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1603   }
1604 
1605   // Easy case: We need no dynamic check for the dividend and the flooring
1606   // division is the same as the truncating division.
1607   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1608       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1609     __ TruncatingDiv(dividend, Abs(divisor));
1610     if (divisor < 0) __ neg(edx);
1611     return;
1612   }
1613 
1614   // In the general case we may need to adjust before and after the truncating
1615   // division to get a flooring division.
1616   Register temp = ToRegister(instr->temp3());
1617   DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
1618   Label needs_adjustment, done;
1619   __ cmp(dividend, Immediate(0));
1620   __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1621   __ TruncatingDiv(dividend, Abs(divisor));
1622   if (divisor < 0) __ neg(edx);
1623   __ jmp(&done, Label::kNear);
1624   __ bind(&needs_adjustment);
1625   __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1626   __ TruncatingDiv(temp, Abs(divisor));
1627   if (divisor < 0) __ neg(edx);
1628   __ dec(edx);
1629   __ bind(&done);
1630 }
1631 
1632 
1633 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)1634 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1635   HBinaryOperation* hdiv = instr->hydrogen();
1636   Register dividend = ToRegister(instr->dividend());
1637   Register divisor = ToRegister(instr->divisor());
1638   Register remainder = ToRegister(instr->temp());
1639   Register result = ToRegister(instr->result());
1640   DCHECK(dividend.is(eax));
1641   DCHECK(remainder.is(edx));
1642   DCHECK(result.is(eax));
1643   DCHECK(!divisor.is(eax));
1644   DCHECK(!divisor.is(edx));
1645 
1646   // Check for x / 0.
1647   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1648     __ test(divisor, divisor);
1649     DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1650   }
1651 
1652   // Check for (0 / -x) that will produce negative zero.
1653   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1654     Label dividend_not_zero;
1655     __ test(dividend, dividend);
1656     __ j(not_zero, &dividend_not_zero, Label::kNear);
1657     __ test(divisor, divisor);
1658     DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1659     __ bind(&dividend_not_zero);
1660   }
1661 
1662   // Check for (kMinInt / -1).
1663   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1664     Label dividend_not_min_int;
1665     __ cmp(dividend, kMinInt);
1666     __ j(not_zero, &dividend_not_min_int, Label::kNear);
1667     __ cmp(divisor, -1);
1668     DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1669     __ bind(&dividend_not_min_int);
1670   }
1671 
1672   // Sign extend to edx (= remainder).
1673   __ cdq();
1674   __ idiv(divisor);
1675 
1676   Label done;
1677   __ test(remainder, remainder);
1678   __ j(zero, &done, Label::kNear);
1679   __ xor_(remainder, divisor);
1680   __ sar(remainder, 31);
1681   __ add(result, remainder);
1682   __ bind(&done);
1683 }
1684 
1685 
DoMulI(LMulI * instr)1686 void LCodeGen::DoMulI(LMulI* instr) {
1687   Register left = ToRegister(instr->left());
1688   LOperand* right = instr->right();
1689 
1690   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1691     __ mov(ToRegister(instr->temp()), left);
1692   }
1693 
1694   if (right->IsConstantOperand()) {
1695     // Try strength reductions on the multiplication.
1696     // All replacement instructions are at most as long as the imul
1697     // and have better latency.
1698     int constant = ToInteger32(LConstantOperand::cast(right));
1699     if (constant == -1) {
1700       __ neg(left);
1701     } else if (constant == 0) {
1702       __ xor_(left, Operand(left));
1703     } else if (constant == 2) {
1704       __ add(left, Operand(left));
1705     } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1706       // If we know that the multiplication can't overflow, it's safe to
1707       // use instructions that don't set the overflow flag for the
1708       // multiplication.
1709       switch (constant) {
1710         case 1:
1711           // Do nothing.
1712           break;
1713         case 3:
1714           __ lea(left, Operand(left, left, times_2, 0));
1715           break;
1716         case 4:
1717           __ shl(left, 2);
1718           break;
1719         case 5:
1720           __ lea(left, Operand(left, left, times_4, 0));
1721           break;
1722         case 8:
1723           __ shl(left, 3);
1724           break;
1725         case 9:
1726           __ lea(left, Operand(left, left, times_8, 0));
1727           break;
1728         case 16:
1729           __ shl(left, 4);
1730           break;
1731         default:
1732           __ imul(left, left, constant);
1733           break;
1734       }
1735     } else {
1736       __ imul(left, left, constant);
1737     }
1738   } else {
1739     if (instr->hydrogen()->representation().IsSmi()) {
1740       __ SmiUntag(left);
1741     }
1742     __ imul(left, ToOperand(right));
1743   }
1744 
1745   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1746     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1747   }
1748 
1749   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1750     // Bail out if the result is supposed to be negative zero.
1751     Label done;
1752     __ test(left, Operand(left));
1753     __ j(not_zero, &done);
1754     if (right->IsConstantOperand()) {
1755       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1756         DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
1757       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1758         __ cmp(ToRegister(instr->temp()), Immediate(0));
1759         DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1760       }
1761     } else {
1762       // Test the non-zero operand for negative sign.
1763       __ or_(ToRegister(instr->temp()), ToOperand(right));
1764       DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1765     }
1766     __ bind(&done);
1767   }
1768 }
1769 
1770 
DoBitI(LBitI * instr)1771 void LCodeGen::DoBitI(LBitI* instr) {
1772   LOperand* left = instr->left();
1773   LOperand* right = instr->right();
1774   DCHECK(left->Equals(instr->result()));
1775   DCHECK(left->IsRegister());
1776 
1777   if (right->IsConstantOperand()) {
1778     int32_t right_operand =
1779         ToRepresentation(LConstantOperand::cast(right),
1780                          instr->hydrogen()->representation());
1781     switch (instr->op()) {
1782       case Token::BIT_AND:
1783         __ and_(ToRegister(left), right_operand);
1784         break;
1785       case Token::BIT_OR:
1786         __ or_(ToRegister(left), right_operand);
1787         break;
1788       case Token::BIT_XOR:
1789         if (right_operand == int32_t(~0)) {
1790           __ not_(ToRegister(left));
1791         } else {
1792           __ xor_(ToRegister(left), right_operand);
1793         }
1794         break;
1795       default:
1796         UNREACHABLE();
1797         break;
1798     }
1799   } else {
1800     switch (instr->op()) {
1801       case Token::BIT_AND:
1802         __ and_(ToRegister(left), ToOperand(right));
1803         break;
1804       case Token::BIT_OR:
1805         __ or_(ToRegister(left), ToOperand(right));
1806         break;
1807       case Token::BIT_XOR:
1808         __ xor_(ToRegister(left), ToOperand(right));
1809         break;
1810       default:
1811         UNREACHABLE();
1812         break;
1813     }
1814   }
1815 }
1816 
1817 
DoShiftI(LShiftI * instr)1818 void LCodeGen::DoShiftI(LShiftI* instr) {
1819   LOperand* left = instr->left();
1820   LOperand* right = instr->right();
1821   DCHECK(left->Equals(instr->result()));
1822   DCHECK(left->IsRegister());
1823   if (right->IsRegister()) {
1824     DCHECK(ToRegister(right).is(ecx));
1825 
1826     switch (instr->op()) {
1827       case Token::ROR:
1828         __ ror_cl(ToRegister(left));
1829         break;
1830       case Token::SAR:
1831         __ sar_cl(ToRegister(left));
1832         break;
1833       case Token::SHR:
1834         __ shr_cl(ToRegister(left));
1835         if (instr->can_deopt()) {
1836           __ test(ToRegister(left), ToRegister(left));
1837           DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
1838         }
1839         break;
1840       case Token::SHL:
1841         __ shl_cl(ToRegister(left));
1842         break;
1843       default:
1844         UNREACHABLE();
1845         break;
1846     }
1847   } else {
1848     int value = ToInteger32(LConstantOperand::cast(right));
1849     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1850     switch (instr->op()) {
1851       case Token::ROR:
1852         if (shift_count == 0 && instr->can_deopt()) {
1853           __ test(ToRegister(left), ToRegister(left));
1854           DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
1855         } else {
1856           __ ror(ToRegister(left), shift_count);
1857         }
1858         break;
1859       case Token::SAR:
1860         if (shift_count != 0) {
1861           __ sar(ToRegister(left), shift_count);
1862         }
1863         break;
1864       case Token::SHR:
1865         if (shift_count != 0) {
1866           __ shr(ToRegister(left), shift_count);
1867         } else if (instr->can_deopt()) {
1868           __ test(ToRegister(left), ToRegister(left));
1869           DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
1870         }
1871         break;
1872       case Token::SHL:
1873         if (shift_count != 0) {
1874           if (instr->hydrogen_value()->representation().IsSmi() &&
1875               instr->can_deopt()) {
1876             if (shift_count != 1) {
1877               __ shl(ToRegister(left), shift_count - 1);
1878             }
1879             __ SmiTag(ToRegister(left));
1880             DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1881           } else {
1882             __ shl(ToRegister(left), shift_count);
1883           }
1884         }
1885         break;
1886       default:
1887         UNREACHABLE();
1888         break;
1889     }
1890   }
1891 }
1892 
1893 
DoSubI(LSubI * instr)1894 void LCodeGen::DoSubI(LSubI* instr) {
1895   LOperand* left = instr->left();
1896   LOperand* right = instr->right();
1897   DCHECK(left->Equals(instr->result()));
1898 
1899   if (right->IsConstantOperand()) {
1900     __ sub(ToOperand(left),
1901            ToImmediate(right, instr->hydrogen()->representation()));
1902   } else {
1903     __ sub(ToRegister(left), ToOperand(right));
1904   }
1905   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1906     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1907   }
1908 }
1909 
1910 
DoConstantI(LConstantI * instr)1911 void LCodeGen::DoConstantI(LConstantI* instr) {
1912   __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1913 }
1914 
1915 
DoConstantS(LConstantS * instr)1916 void LCodeGen::DoConstantS(LConstantS* instr) {
1917   __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1918 }
1919 
1920 
DoConstantD(LConstantD * instr)1921 void LCodeGen::DoConstantD(LConstantD* instr) {
1922   uint64_t const bits = instr->bits();
1923   uint32_t const lower = static_cast<uint32_t>(bits);
1924   uint32_t const upper = static_cast<uint32_t>(bits >> 32);
1925   DCHECK(instr->result()->IsDoubleRegister());
1926 
1927   __ push(Immediate(upper));
1928   __ push(Immediate(lower));
1929   X87Register reg = ToX87Register(instr->result());
1930   X87Mov(reg, Operand(esp, 0));
1931   __ add(Operand(esp), Immediate(kDoubleSize));
1932 }
1933 
1934 
DoConstantE(LConstantE * instr)1935 void LCodeGen::DoConstantE(LConstantE* instr) {
1936   __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1937 }
1938 
1939 
DoConstantT(LConstantT * instr)1940 void LCodeGen::DoConstantT(LConstantT* instr) {
1941   Register reg = ToRegister(instr->result());
1942   Handle<Object> object = instr->value(isolate());
1943   AllowDeferredHandleDereference smi_check;
1944   __ LoadObject(reg, object);
1945 }
1946 
1947 
DoMapEnumLength(LMapEnumLength * instr)1948 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1949   Register result = ToRegister(instr->result());
1950   Register map = ToRegister(instr->value());
1951   __ EnumLength(result, map);
1952 }
1953 
1954 
BuildSeqStringOperand(Register string,LOperand * index,String::Encoding encoding)1955 Operand LCodeGen::BuildSeqStringOperand(Register string,
1956                                         LOperand* index,
1957                                         String::Encoding encoding) {
1958   if (index->IsConstantOperand()) {
1959     int offset = ToRepresentation(LConstantOperand::cast(index),
1960                                   Representation::Integer32());
1961     if (encoding == String::TWO_BYTE_ENCODING) {
1962       offset *= kUC16Size;
1963     }
1964     STATIC_ASSERT(kCharSize == 1);
1965     return FieldOperand(string, SeqString::kHeaderSize + offset);
1966   }
1967   return FieldOperand(
1968       string, ToRegister(index),
1969       encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1970       SeqString::kHeaderSize);
1971 }
1972 
1973 
DoSeqStringGetChar(LSeqStringGetChar * instr)1974 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1975   String::Encoding encoding = instr->hydrogen()->encoding();
1976   Register result = ToRegister(instr->result());
1977   Register string = ToRegister(instr->string());
1978 
1979   if (FLAG_debug_code) {
1980     __ push(string);
1981     __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
1982     __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
1983 
1984     __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1985     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1986     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1987     __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1988                              ? one_byte_seq_type : two_byte_seq_type));
1989     __ Check(equal, kUnexpectedStringType);
1990     __ pop(string);
1991   }
1992 
1993   Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1994   if (encoding == String::ONE_BYTE_ENCODING) {
1995     __ movzx_b(result, operand);
1996   } else {
1997     __ movzx_w(result, operand);
1998   }
1999 }
2000 
2001 
DoSeqStringSetChar(LSeqStringSetChar * instr)2002 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2003   String::Encoding encoding = instr->hydrogen()->encoding();
2004   Register string = ToRegister(instr->string());
2005 
2006   if (FLAG_debug_code) {
2007     Register value = ToRegister(instr->value());
2008     Register index = ToRegister(instr->index());
2009     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2010     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2011     int encoding_mask =
2012         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2013         ? one_byte_seq_type : two_byte_seq_type;
2014     __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2015   }
2016 
2017   Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2018   if (instr->value()->IsConstantOperand()) {
2019     int value = ToRepresentation(LConstantOperand::cast(instr->value()),
2020                                  Representation::Integer32());
2021     DCHECK_LE(0, value);
2022     if (encoding == String::ONE_BYTE_ENCODING) {
2023       DCHECK_LE(value, String::kMaxOneByteCharCode);
2024       __ mov_b(operand, static_cast<int8_t>(value));
2025     } else {
2026       DCHECK_LE(value, String::kMaxUtf16CodeUnit);
2027       __ mov_w(operand, static_cast<int16_t>(value));
2028     }
2029   } else {
2030     Register value = ToRegister(instr->value());
2031     if (encoding == String::ONE_BYTE_ENCODING) {
2032       __ mov_b(operand, value);
2033     } else {
2034       __ mov_w(operand, value);
2035     }
2036   }
2037 }
2038 
2039 
DoAddI(LAddI * instr)2040 void LCodeGen::DoAddI(LAddI* instr) {
2041   LOperand* left = instr->left();
2042   LOperand* right = instr->right();
2043 
2044   if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
2045     if (right->IsConstantOperand()) {
2046       int32_t offset = ToRepresentation(LConstantOperand::cast(right),
2047                                         instr->hydrogen()->representation());
2048       __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
2049     } else {
2050       Operand address(ToRegister(left), ToRegister(right), times_1, 0);
2051       __ lea(ToRegister(instr->result()), address);
2052     }
2053   } else {
2054     if (right->IsConstantOperand()) {
2055       __ add(ToOperand(left),
2056              ToImmediate(right, instr->hydrogen()->representation()));
2057     } else {
2058       __ add(ToRegister(left), ToOperand(right));
2059     }
2060     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
2061       DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
2062     }
2063   }
2064 }
2065 
2066 
DoMathMinMax(LMathMinMax * instr)2067 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2068   LOperand* left = instr->left();
2069   LOperand* right = instr->right();
2070   DCHECK(left->Equals(instr->result()));
2071   HMathMinMax::Operation operation = instr->hydrogen()->operation();
2072   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2073     Label return_left;
2074     Condition condition = (operation == HMathMinMax::kMathMin)
2075         ? less_equal
2076         : greater_equal;
2077     if (right->IsConstantOperand()) {
2078       Operand left_op = ToOperand(left);
2079       Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
2080                                         instr->hydrogen()->representation());
2081       __ cmp(left_op, immediate);
2082       __ j(condition, &return_left, Label::kNear);
2083       __ mov(left_op, immediate);
2084     } else {
2085       Register left_reg = ToRegister(left);
2086       Operand right_op = ToOperand(right);
2087       __ cmp(left_reg, right_op);
2088       __ j(condition, &return_left, Label::kNear);
2089       __ mov(left_reg, right_op);
2090     }
2091     __ bind(&return_left);
2092   } else {
2093     DCHECK(instr->hydrogen()->representation().IsDouble());
2094     Label check_nan_left, check_zero, return_left, return_right;
2095     Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
2096     X87Register left_reg = ToX87Register(left);
2097     X87Register right_reg = ToX87Register(right);
2098 
2099     X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result()));
2100     __ fld(1);
2101     __ fld(1);
2102     __ FCmp();
2103     __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
2104     __ j(equal, &check_zero, Label::kNear);            // left == right.
2105     __ j(condition, &return_left, Label::kNear);
2106     __ jmp(&return_right, Label::kNear);
2107 
2108     __ bind(&check_zero);
2109     __ fld(0);
2110     __ fldz();
2111     __ FCmp();
2112     __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
2113     // At this point, both left and right are either 0 or -0.
2114     if (operation == HMathMinMax::kMathMin) {
2115       // Push st0 and st1 to stack, then pop them to temp registers and OR them,
2116       // load it to left.
2117       Register scratch_reg = ToRegister(instr->temp());
2118       __ fld(1);
2119       __ fld(1);
2120       __ sub(esp, Immediate(2 * kPointerSize));
2121       __ fstp_s(MemOperand(esp, 0));
2122       __ fstp_s(MemOperand(esp, kPointerSize));
2123       __ pop(scratch_reg);
2124       __ xor_(MemOperand(esp, 0), scratch_reg);
2125       X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
2126       __ pop(scratch_reg);  // restore esp
2127     } else {
2128       // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
2129       X87Fxch(left_reg);
2130       __ fadd(1);
2131     }
2132     __ jmp(&return_left, Label::kNear);
2133 
2134     __ bind(&check_nan_left);
2135     __ fld(0);
2136     __ fld(0);
2137     __ FCmp();                                      // NaN check.
2138     __ j(parity_even, &return_left, Label::kNear);  // left == NaN.
2139 
2140     __ bind(&return_right);
2141     X87Fxch(left_reg);
2142     X87Mov(left_reg, right_reg);
2143 
2144     __ bind(&return_left);
2145   }
2146 }
2147 
2148 
DoArithmeticD(LArithmeticD * instr)2149 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2150   X87Register left = ToX87Register(instr->left());
2151   X87Register right = ToX87Register(instr->right());
2152   X87Register result = ToX87Register(instr->result());
2153   if (instr->op() != Token::MOD) {
2154     X87PrepareBinaryOp(left, right, result);
2155   }
2156   // Set the precision control to double-precision.
2157   __ X87SetFPUCW(0x027F);
2158   switch (instr->op()) {
2159     case Token::ADD:
2160       __ fadd_i(1);
2161       break;
2162     case Token::SUB:
2163       __ fsub_i(1);
2164       break;
2165     case Token::MUL:
2166       __ fmul_i(1);
2167       break;
2168     case Token::DIV:
2169       __ fdiv_i(1);
2170       break;
2171     case Token::MOD: {
2172       // Pass two doubles as arguments on the stack.
2173       __ PrepareCallCFunction(4, eax);
2174       X87Mov(Operand(esp, 1 * kDoubleSize), right);
2175       X87Mov(Operand(esp, 0), left);
2176       X87Free(right);
2177       DCHECK(left.is(result));
2178       X87PrepareToWrite(result);
2179       __ CallCFunction(
2180           ExternalReference::mod_two_doubles_operation(isolate()),
2181           4);
2182 
2183       // Return value is in st(0) on ia32.
2184       X87CommitWrite(result);
2185       break;
2186     }
2187     default:
2188       UNREACHABLE();
2189       break;
2190   }
2191 
2192   // Restore the default value of control word.
2193   __ X87SetFPUCW(0x037F);
2194 }
2195 
2196 
DoArithmeticT(LArithmeticT * instr)2197 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2198   DCHECK(ToRegister(instr->context()).is(esi));
2199   DCHECK(ToRegister(instr->left()).is(edx));
2200   DCHECK(ToRegister(instr->right()).is(eax));
2201   DCHECK(ToRegister(instr->result()).is(eax));
2202 
2203   Handle<Code> code =
2204       CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
2205   CallCode(code, RelocInfo::CODE_TARGET, instr);
2206 }
2207 
2208 
2209 template<class InstrType>
EmitBranch(InstrType instr,Condition cc)2210 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2211   int left_block = instr->TrueDestination(chunk_);
2212   int right_block = instr->FalseDestination(chunk_);
2213 
2214   int next_block = GetNextEmittedBlock();
2215 
2216   if (right_block == left_block || cc == no_condition) {
2217     EmitGoto(left_block);
2218   } else if (left_block == next_block) {
2219     __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2220   } else if (right_block == next_block) {
2221     __ j(cc, chunk_->GetAssemblyLabel(left_block));
2222   } else {
2223     __ j(cc, chunk_->GetAssemblyLabel(left_block));
2224     __ jmp(chunk_->GetAssemblyLabel(right_block));
2225   }
2226 }
2227 
2228 
2229 template <class InstrType>
EmitTrueBranch(InstrType instr,Condition cc)2230 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
2231   int true_block = instr->TrueDestination(chunk_);
2232   if (cc == no_condition) {
2233     __ jmp(chunk_->GetAssemblyLabel(true_block));
2234   } else {
2235     __ j(cc, chunk_->GetAssemblyLabel(true_block));
2236   }
2237 }
2238 
2239 
2240 template<class InstrType>
EmitFalseBranch(InstrType instr,Condition cc)2241 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2242   int false_block = instr->FalseDestination(chunk_);
2243   if (cc == no_condition) {
2244     __ jmp(chunk_->GetAssemblyLabel(false_block));
2245   } else {
2246     __ j(cc, chunk_->GetAssemblyLabel(false_block));
2247   }
2248 }
2249 
2250 
DoBranch(LBranch * instr)2251 void LCodeGen::DoBranch(LBranch* instr) {
2252   Representation r = instr->hydrogen()->value()->representation();
2253   if (r.IsSmiOrInteger32()) {
2254     Register reg = ToRegister(instr->value());
2255     __ test(reg, Operand(reg));
2256     EmitBranch(instr, not_zero);
2257   } else if (r.IsDouble()) {
2258     X87Register reg = ToX87Register(instr->value());
2259     X87LoadForUsage(reg);
2260     __ fldz();
2261     __ FCmp();
2262     EmitBranch(instr, not_zero);
2263   } else {
2264     DCHECK(r.IsTagged());
2265     Register reg = ToRegister(instr->value());
2266     HType type = instr->hydrogen()->value()->type();
2267     if (type.IsBoolean()) {
2268       DCHECK(!info()->IsStub());
2269       __ cmp(reg, factory()->true_value());
2270       EmitBranch(instr, equal);
2271     } else if (type.IsSmi()) {
2272       DCHECK(!info()->IsStub());
2273       __ test(reg, Operand(reg));
2274       EmitBranch(instr, not_equal);
2275     } else if (type.IsJSArray()) {
2276       DCHECK(!info()->IsStub());
2277       EmitBranch(instr, no_condition);
2278     } else if (type.IsHeapNumber()) {
2279       UNREACHABLE();
2280     } else if (type.IsString()) {
2281       DCHECK(!info()->IsStub());
2282       __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2283       EmitBranch(instr, not_equal);
2284     } else {
2285       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2286       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2287 
2288       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2289         // undefined -> false.
2290         __ cmp(reg, factory()->undefined_value());
2291         __ j(equal, instr->FalseLabel(chunk_));
2292       }
2293       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2294         // true -> true.
2295         __ cmp(reg, factory()->true_value());
2296         __ j(equal, instr->TrueLabel(chunk_));
2297         // false -> false.
2298         __ cmp(reg, factory()->false_value());
2299         __ j(equal, instr->FalseLabel(chunk_));
2300       }
2301       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2302         // 'null' -> false.
2303         __ cmp(reg, factory()->null_value());
2304         __ j(equal, instr->FalseLabel(chunk_));
2305       }
2306 
2307       if (expected.Contains(ToBooleanStub::SMI)) {
2308         // Smis: 0 -> false, all other -> true.
2309         __ test(reg, Operand(reg));
2310         __ j(equal, instr->FalseLabel(chunk_));
2311         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2312       } else if (expected.NeedsMap()) {
2313         // If we need a map later and have a Smi -> deopt.
2314         __ test(reg, Immediate(kSmiTagMask));
2315         DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
2316       }
2317 
2318       Register map = no_reg;  // Keep the compiler happy.
2319       if (expected.NeedsMap()) {
2320         map = ToRegister(instr->temp());
2321         DCHECK(!map.is(reg));
2322         __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
2323 
2324         if (expected.CanBeUndetectable()) {
2325           // Undetectable -> false.
2326           __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2327                     1 << Map::kIsUndetectable);
2328           __ j(not_zero, instr->FalseLabel(chunk_));
2329         }
2330       }
2331 
2332       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2333         // spec object -> true.
2334         __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
2335         __ j(above_equal, instr->TrueLabel(chunk_));
2336       }
2337 
2338       if (expected.Contains(ToBooleanStub::STRING)) {
2339         // String value -> false iff empty.
2340         Label not_string;
2341         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2342         __ j(above_equal, &not_string, Label::kNear);
2343         __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2344         __ j(not_zero, instr->TrueLabel(chunk_));
2345         __ jmp(instr->FalseLabel(chunk_));
2346         __ bind(&not_string);
2347       }
2348 
2349       if (expected.Contains(ToBooleanStub::SYMBOL)) {
2350         // Symbol value -> true.
2351         __ CmpInstanceType(map, SYMBOL_TYPE);
2352         __ j(equal, instr->TrueLabel(chunk_));
2353       }
2354 
2355       if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2356         // SIMD value -> true.
2357         __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
2358         __ j(equal, instr->TrueLabel(chunk_));
2359       }
2360 
2361       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2362         // heap number -> false iff +0, -0, or NaN.
2363         Label not_heap_number;
2364         __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2365                factory()->heap_number_map());
2366         __ j(not_equal, &not_heap_number, Label::kNear);
2367         __ fldz();
2368         __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
2369         __ FCmp();
2370         __ j(zero, instr->FalseLabel(chunk_));
2371         __ jmp(instr->TrueLabel(chunk_));
2372         __ bind(&not_heap_number);
2373       }
2374 
2375       if (!expected.IsGeneric()) {
2376         // We've seen something for the first time -> deopt.
2377         // This can only happen if we are not generic already.
2378         DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
2379       }
2380     }
2381   }
2382 }
2383 
2384 
EmitGoto(int block)2385 void LCodeGen::EmitGoto(int block) {
2386   if (!IsNextEmittedBlock(block)) {
2387     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2388   }
2389 }
2390 
2391 
DoClobberDoubles(LClobberDoubles * instr)2392 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2393 }
2394 
2395 
DoGoto(LGoto * instr)2396 void LCodeGen::DoGoto(LGoto* instr) {
2397   EmitGoto(instr->block_id());
2398 }
2399 
2400 
TokenToCondition(Token::Value op,bool is_unsigned)2401 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2402   Condition cond = no_condition;
2403   switch (op) {
2404     case Token::EQ:
2405     case Token::EQ_STRICT:
2406       cond = equal;
2407       break;
2408     case Token::NE:
2409     case Token::NE_STRICT:
2410       cond = not_equal;
2411       break;
2412     case Token::LT:
2413       cond = is_unsigned ? below : less;
2414       break;
2415     case Token::GT:
2416       cond = is_unsigned ? above : greater;
2417       break;
2418     case Token::LTE:
2419       cond = is_unsigned ? below_equal : less_equal;
2420       break;
2421     case Token::GTE:
2422       cond = is_unsigned ? above_equal : greater_equal;
2423       break;
2424     case Token::IN:
2425     case Token::INSTANCEOF:
2426     default:
2427       UNREACHABLE();
2428   }
2429   return cond;
2430 }
2431 
2432 
DoCompareNumericAndBranch(LCompareNumericAndBranch * instr)2433 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2434   LOperand* left = instr->left();
2435   LOperand* right = instr->right();
2436   bool is_unsigned =
2437       instr->is_double() ||
2438       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2439       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2440   Condition cc = TokenToCondition(instr->op(), is_unsigned);
2441 
2442   if (left->IsConstantOperand() && right->IsConstantOperand()) {
2443     // We can statically evaluate the comparison.
2444     double left_val = ToDouble(LConstantOperand::cast(left));
2445     double right_val = ToDouble(LConstantOperand::cast(right));
2446     int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2447         instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2448     EmitGoto(next_block);
2449   } else {
2450     if (instr->is_double()) {
2451       X87LoadForUsage(ToX87Register(right), ToX87Register(left));
2452       __ FCmp();
2453       // Don't base result on EFLAGS when a NaN is involved. Instead
2454       // jump to the false block.
2455       __ j(parity_even, instr->FalseLabel(chunk_));
2456     } else {
2457       if (right->IsConstantOperand()) {
2458         __ cmp(ToOperand(left),
2459                ToImmediate(right, instr->hydrogen()->representation()));
2460       } else if (left->IsConstantOperand()) {
2461         __ cmp(ToOperand(right),
2462                ToImmediate(left, instr->hydrogen()->representation()));
2463         // We commuted the operands, so commute the condition.
2464         cc = CommuteCondition(cc);
2465       } else {
2466         __ cmp(ToRegister(left), ToOperand(right));
2467       }
2468     }
2469     EmitBranch(instr, cc);
2470   }
2471 }
2472 
2473 
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)2474 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2475   Register left = ToRegister(instr->left());
2476 
2477   if (instr->right()->IsConstantOperand()) {
2478     Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2479     __ CmpObject(left, right);
2480   } else {
2481     Operand right = ToOperand(instr->right());
2482     __ cmp(left, right);
2483   }
2484   EmitBranch(instr, equal);
2485 }
2486 
2487 
DoCmpHoleAndBranch(LCmpHoleAndBranch * instr)2488 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2489   if (instr->hydrogen()->representation().IsTagged()) {
2490     Register input_reg = ToRegister(instr->object());
2491     __ cmp(input_reg, factory()->the_hole_value());
2492     EmitBranch(instr, equal);
2493     return;
2494   }
2495 
2496   // Put the value to the top of stack
2497   X87Register src = ToX87Register(instr->object());
2498   X87LoadForUsage(src);
2499   __ fld(0);
2500   __ fld(0);
2501   __ FCmp();
2502   Label ok;
2503   __ j(parity_even, &ok, Label::kNear);
2504   __ fstp(0);
2505   EmitFalseBranch(instr, no_condition);
2506   __ bind(&ok);
2507 
2508 
2509   __ sub(esp, Immediate(kDoubleSize));
2510   __ fstp_d(MemOperand(esp, 0));
2511 
2512   __ add(esp, Immediate(kDoubleSize));
2513   int offset = sizeof(kHoleNanUpper32);
2514   // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
2515   // so we check the upper with 0xffffffff for hole as a temporary fix.
2516   __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
2517   EmitBranch(instr, equal);
2518 }
2519 
2520 
DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch * instr)2521 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2522   Representation rep = instr->hydrogen()->value()->representation();
2523   DCHECK(!rep.IsInteger32());
2524 
2525   if (rep.IsDouble()) {
2526     X87Register input = ToX87Register(instr->value());
2527     X87LoadForUsage(input);
2528     __ FXamMinusZero();
2529     EmitBranch(instr, equal);
2530   } else {
2531     Register value = ToRegister(instr->value());
2532     Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2533     __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2534     __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
2535            Immediate(0x1));
2536     EmitFalseBranch(instr, no_overflow);
2537     __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
2538            Immediate(0x00000000));
2539     EmitBranch(instr, equal);
2540   }
2541 }
2542 
2543 
EmitIsString(Register input,Register temp1,Label * is_not_string,SmiCheck check_needed=INLINE_SMI_CHECK)2544 Condition LCodeGen::EmitIsString(Register input,
2545                                  Register temp1,
2546                                  Label* is_not_string,
2547                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
2548   if (check_needed == INLINE_SMI_CHECK) {
2549     __ JumpIfSmi(input, is_not_string);
2550   }
2551 
2552   Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2553 
2554   return cond;
2555 }
2556 
2557 
DoIsStringAndBranch(LIsStringAndBranch * instr)2558 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2559   Register reg = ToRegister(instr->value());
2560   Register temp = ToRegister(instr->temp());
2561 
2562   SmiCheck check_needed =
2563       instr->hydrogen()->value()->type().IsHeapObject()
2564           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2565 
2566   Condition true_cond = EmitIsString(
2567       reg, temp, instr->FalseLabel(chunk_), check_needed);
2568 
2569   EmitBranch(instr, true_cond);
2570 }
2571 
2572 
DoIsSmiAndBranch(LIsSmiAndBranch * instr)2573 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2574   Operand input = ToOperand(instr->value());
2575 
2576   __ test(input, Immediate(kSmiTagMask));
2577   EmitBranch(instr, zero);
2578 }
2579 
2580 
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)2581 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2582   Register input = ToRegister(instr->value());
2583   Register temp = ToRegister(instr->temp());
2584 
2585   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2586     STATIC_ASSERT(kSmiTag == 0);
2587     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2588   }
2589   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2590   __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2591             1 << Map::kIsUndetectable);
2592   EmitBranch(instr, not_zero);
2593 }
2594 
2595 
ComputeCompareCondition(Token::Value op)2596 static Condition ComputeCompareCondition(Token::Value op) {
2597   switch (op) {
2598     case Token::EQ_STRICT:
2599     case Token::EQ:
2600       return equal;
2601     case Token::LT:
2602       return less;
2603     case Token::GT:
2604       return greater;
2605     case Token::LTE:
2606       return less_equal;
2607     case Token::GTE:
2608       return greater_equal;
2609     default:
2610       UNREACHABLE();
2611       return no_condition;
2612   }
2613 }
2614 
2615 
DoStringCompareAndBranch(LStringCompareAndBranch * instr)2616 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2617   DCHECK(ToRegister(instr->context()).is(esi));
2618   DCHECK(ToRegister(instr->left()).is(edx));
2619   DCHECK(ToRegister(instr->right()).is(eax));
2620 
2621   Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
2622   CallCode(code, RelocInfo::CODE_TARGET, instr);
2623   __ test(eax, eax);
2624 
2625   EmitBranch(instr, ComputeCompareCondition(instr->op()));
2626 }
2627 
2628 
TestType(HHasInstanceTypeAndBranch * instr)2629 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2630   InstanceType from = instr->from();
2631   InstanceType to = instr->to();
2632   if (from == FIRST_TYPE) return to;
2633   DCHECK(from == to || to == LAST_TYPE);
2634   return from;
2635 }
2636 
2637 
BranchCondition(HHasInstanceTypeAndBranch * instr)2638 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2639   InstanceType from = instr->from();
2640   InstanceType to = instr->to();
2641   if (from == to) return equal;
2642   if (to == LAST_TYPE) return above_equal;
2643   if (from == FIRST_TYPE) return below_equal;
2644   UNREACHABLE();
2645   return equal;
2646 }
2647 
2648 
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)2649 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2650   Register input = ToRegister(instr->value());
2651   Register temp = ToRegister(instr->temp());
2652 
2653   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2654     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2655   }
2656 
2657   __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2658   EmitBranch(instr, BranchCondition(instr->hydrogen()));
2659 }
2660 
2661 
DoGetCachedArrayIndex(LGetCachedArrayIndex * instr)2662 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2663   Register input = ToRegister(instr->value());
2664   Register result = ToRegister(instr->result());
2665 
2666   __ AssertString(input);
2667 
2668   __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2669   __ IndexFromHash(result, result);
2670 }
2671 
2672 
DoHasCachedArrayIndexAndBranch(LHasCachedArrayIndexAndBranch * instr)2673 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2674     LHasCachedArrayIndexAndBranch* instr) {
2675   Register input = ToRegister(instr->value());
2676 
2677   __ test(FieldOperand(input, String::kHashFieldOffset),
2678           Immediate(String::kContainsCachedArrayIndexMask));
2679   EmitBranch(instr, equal);
2680 }
2681 
2682 
2683 // Branches to a label or falls through with the answer in the z flag.  Trashes
2684 // the temp registers, but not the input.
EmitClassOfTest(Label * is_true,Label * is_false,Handle<String> class_name,Register input,Register temp,Register temp2)2685 void LCodeGen::EmitClassOfTest(Label* is_true,
2686                                Label* is_false,
2687                                Handle<String>class_name,
2688                                Register input,
2689                                Register temp,
2690                                Register temp2) {
2691   DCHECK(!input.is(temp));
2692   DCHECK(!input.is(temp2));
2693   DCHECK(!temp.is(temp2));
2694   __ JumpIfSmi(input, is_false);
2695 
2696   __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
2697   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2698     __ j(equal, is_true);
2699   } else {
2700     __ j(equal, is_false);
2701   }
2702 
2703   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2704   // Check if the constructor in the map is a function.
2705   __ GetMapConstructor(temp, temp, temp2);
2706   // Objects with a non-function constructor have class 'Object'.
2707   __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
2708   if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2709     __ j(not_equal, is_true);
2710   } else {
2711     __ j(not_equal, is_false);
2712   }
2713 
2714   // temp now contains the constructor function. Grab the
2715   // instance class name from there.
2716   __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2717   __ mov(temp, FieldOperand(temp,
2718                             SharedFunctionInfo::kInstanceClassNameOffset));
2719   // The class name we are testing against is internalized since it's a literal.
2720   // The name in the constructor is internalized because of the way the context
2721   // is booted.  This routine isn't expected to work for random API-created
2722   // classes and it doesn't have to because you can't access it with natives
2723   // syntax.  Since both sides are internalized it is sufficient to use an
2724   // identity comparison.
2725   __ cmp(temp, class_name);
2726   // End with the answer in the z flag.
2727 }
2728 
2729 
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2730 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2731   Register input = ToRegister(instr->value());
2732   Register temp = ToRegister(instr->temp());
2733   Register temp2 = ToRegister(instr->temp2());
2734 
2735   Handle<String> class_name = instr->hydrogen()->class_name();
2736 
2737   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2738       class_name, input, temp, temp2);
2739 
2740   EmitBranch(instr, equal);
2741 }
2742 
2743 
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2744 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2745   Register reg = ToRegister(instr->value());
2746   __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2747   EmitBranch(instr, equal);
2748 }
2749 
2750 
DoInstanceOf(LInstanceOf * instr)2751 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2752   DCHECK(ToRegister(instr->context()).is(esi));
2753   DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
2754   DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
2755   DCHECK(ToRegister(instr->result()).is(eax));
2756   InstanceOfStub stub(isolate());
2757   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2758 }
2759 
2760 
DoHasInPrototypeChainAndBranch(LHasInPrototypeChainAndBranch * instr)2761 void LCodeGen::DoHasInPrototypeChainAndBranch(
2762     LHasInPrototypeChainAndBranch* instr) {
2763   Register const object = ToRegister(instr->object());
2764   Register const object_map = ToRegister(instr->scratch());
2765   Register const object_prototype = object_map;
2766   Register const prototype = ToRegister(instr->prototype());
2767 
2768   // The {object} must be a spec object.  It's sufficient to know that {object}
2769   // is not a smi, since all other non-spec objects have {null} prototypes and
2770   // will be ruled out below.
2771   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2772     __ test(object, Immediate(kSmiTagMask));
2773     EmitFalseBranch(instr, zero);
2774   }
2775 
2776   // Loop through the {object}s prototype chain looking for the {prototype}.
2777   __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
2778   Label loop;
2779   __ bind(&loop);
2780 
2781   // Deoptimize if the object needs to be access checked.
2782   __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
2783             1 << Map::kIsAccessCheckNeeded);
2784   DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
2785   // Deoptimize for proxies.
2786   __ CmpInstanceType(object_map, JS_PROXY_TYPE);
2787   DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
2788 
2789   __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
2790   __ cmp(object_prototype, prototype);
2791   EmitTrueBranch(instr, equal);
2792   __ cmp(object_prototype, factory()->null_value());
2793   EmitFalseBranch(instr, equal);
2794   __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
2795   __ jmp(&loop);
2796 }
2797 
2798 
DoCmpT(LCmpT * instr)2799 void LCodeGen::DoCmpT(LCmpT* instr) {
2800   Token::Value op = instr->op();
2801 
2802   Handle<Code> ic =
2803       CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2804   CallCode(ic, RelocInfo::CODE_TARGET, instr);
2805 
2806   Condition condition = ComputeCompareCondition(op);
2807   Label true_value, done;
2808   __ test(eax, Operand(eax));
2809   __ j(condition, &true_value, Label::kNear);
2810   __ mov(ToRegister(instr->result()), factory()->false_value());
2811   __ jmp(&done, Label::kNear);
2812   __ bind(&true_value);
2813   __ mov(ToRegister(instr->result()), factory()->true_value());
2814   __ bind(&done);
2815 }
2816 
2817 
EmitReturn(LReturn * instr,bool dynamic_frame_alignment)2818 void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
2819   int extra_value_count = dynamic_frame_alignment ? 2 : 1;
2820 
2821   if (instr->has_constant_parameter_count()) {
2822     int parameter_count = ToInteger32(instr->constant_parameter_count());
2823     if (dynamic_frame_alignment && FLAG_debug_code) {
2824       __ cmp(Operand(esp,
2825                      (parameter_count + extra_value_count) * kPointerSize),
2826              Immediate(kAlignmentZapValue));
2827       __ Assert(equal, kExpectedAlignmentMarker);
2828     }
2829     __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
2830   } else {
2831     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
2832     Register reg = ToRegister(instr->parameter_count());
2833     // The argument count parameter is a smi
2834     __ SmiUntag(reg);
2835     Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
2836     if (dynamic_frame_alignment && FLAG_debug_code) {
2837       DCHECK(extra_value_count == 2);
2838       __ cmp(Operand(esp, reg, times_pointer_size,
2839                      extra_value_count * kPointerSize),
2840              Immediate(kAlignmentZapValue));
2841       __ Assert(equal, kExpectedAlignmentMarker);
2842     }
2843 
2844     // emit code to restore stack based on instr->parameter_count()
2845     __ pop(return_addr_reg);  // save return address
2846     if (dynamic_frame_alignment) {
2847       __ inc(reg);  // 1 more for alignment
2848     }
2849     __ shl(reg, kPointerSizeLog2);
2850     __ add(esp, reg);
2851     __ jmp(return_addr_reg);
2852   }
2853 }
2854 
2855 
DoReturn(LReturn * instr)2856 void LCodeGen::DoReturn(LReturn* instr) {
2857   if (FLAG_trace && info()->IsOptimizing()) {
2858     // Preserve the return value on the stack and rely on the runtime call
2859     // to return the value in the same register.  We're leaving the code
2860     // managed by the register allocator and tearing down the frame, it's
2861     // safe to write to the context register.
2862     __ push(eax);
2863     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2864     __ CallRuntime(Runtime::kTraceExit);
2865   }
2866   if (dynamic_frame_alignment_) {
2867     // Fetch the state of the dynamic frame alignment.
2868     __ mov(edx, Operand(ebp,
2869       JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
2870   }
2871   if (NeedsEagerFrame()) {
2872     __ mov(esp, ebp);
2873     __ pop(ebp);
2874   }
2875   if (dynamic_frame_alignment_) {
2876     Label no_padding;
2877     __ cmp(edx, Immediate(kNoAlignmentPadding));
2878     __ j(equal, &no_padding, Label::kNear);
2879 
2880     EmitReturn(instr, true);
2881     __ bind(&no_padding);
2882   }
2883 
2884   EmitReturn(instr, false);
2885 }
2886 
2887 
2888 template <class T>
EmitVectorLoadICRegisters(T * instr)2889 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2890   Register vector_register = ToRegister(instr->temp_vector());
2891   Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2892   DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2893   DCHECK(slot_register.is(eax));
2894 
2895   AllowDeferredHandleDereference vector_structure_check;
2896   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2897   __ mov(vector_register, vector);
2898   // No need to allocate this register.
2899   FeedbackVectorSlot slot = instr->hydrogen()->slot();
2900   int index = vector->GetIndex(slot);
2901   __ mov(slot_register, Immediate(Smi::FromInt(index)));
2902 }
2903 
2904 
2905 template <class T>
EmitVectorStoreICRegisters(T * instr)2906 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2907   Register vector_register = ToRegister(instr->temp_vector());
2908   Register slot_register = ToRegister(instr->temp_slot());
2909 
2910   AllowDeferredHandleDereference vector_structure_check;
2911   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2912   __ mov(vector_register, vector);
2913   FeedbackVectorSlot slot = instr->hydrogen()->slot();
2914   int index = vector->GetIndex(slot);
2915   __ mov(slot_register, Immediate(Smi::FromInt(index)));
2916 }
2917 
2918 
DoLoadGlobalGeneric(LLoadGlobalGeneric * instr)2919 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2920   DCHECK(ToRegister(instr->context()).is(esi));
2921   DCHECK(ToRegister(instr->global_object())
2922              .is(LoadDescriptor::ReceiverRegister()));
2923   DCHECK(ToRegister(instr->result()).is(eax));
2924 
2925   __ mov(LoadDescriptor::NameRegister(), instr->name());
2926   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2927   Handle<Code> ic =
2928       CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
2929                                          SLOPPY, PREMONOMORPHIC).code();
2930   CallCode(ic, RelocInfo::CODE_TARGET, instr);
2931 }
2932 
2933 
DoLoadContextSlot(LLoadContextSlot * instr)2934 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2935   Register context = ToRegister(instr->context());
2936   Register result = ToRegister(instr->result());
2937   __ mov(result, ContextOperand(context, instr->slot_index()));
2938 
2939   if (instr->hydrogen()->RequiresHoleCheck()) {
2940     __ cmp(result, factory()->the_hole_value());
2941     if (instr->hydrogen()->DeoptimizesOnHole()) {
2942       DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2943     } else {
2944       Label is_not_hole;
2945       __ j(not_equal, &is_not_hole, Label::kNear);
2946       __ mov(result, factory()->undefined_value());
2947       __ bind(&is_not_hole);
2948     }
2949   }
2950 }
2951 
2952 
DoStoreContextSlot(LStoreContextSlot * instr)2953 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2954   Register context = ToRegister(instr->context());
2955   Register value = ToRegister(instr->value());
2956 
2957   Label skip_assignment;
2958 
2959   Operand target = ContextOperand(context, instr->slot_index());
2960   if (instr->hydrogen()->RequiresHoleCheck()) {
2961     __ cmp(target, factory()->the_hole_value());
2962     if (instr->hydrogen()->DeoptimizesOnHole()) {
2963       DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2964     } else {
2965       __ j(not_equal, &skip_assignment, Label::kNear);
2966     }
2967   }
2968 
2969   __ mov(target, value);
2970   if (instr->hydrogen()->NeedsWriteBarrier()) {
2971     SmiCheck check_needed =
2972         instr->hydrogen()->value()->type().IsHeapObject()
2973             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2974     Register temp = ToRegister(instr->temp());
2975     int offset = Context::SlotOffset(instr->slot_index());
2976     __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs,
2977                               EMIT_REMEMBERED_SET, check_needed);
2978   }
2979 
2980   __ bind(&skip_assignment);
2981 }
2982 
2983 
DoLoadNamedField(LLoadNamedField * instr)2984 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2985   HObjectAccess access = instr->hydrogen()->access();
2986   int offset = access.offset();
2987 
2988   if (access.IsExternalMemory()) {
2989     Register result = ToRegister(instr->result());
2990     MemOperand operand = instr->object()->IsConstantOperand()
2991         ? MemOperand::StaticVariable(ToExternalReference(
2992                 LConstantOperand::cast(instr->object())))
2993         : MemOperand(ToRegister(instr->object()), offset);
2994     __ Load(result, operand, access.representation());
2995     return;
2996   }
2997 
2998   Register object = ToRegister(instr->object());
2999   if (instr->hydrogen()->representation().IsDouble()) {
3000     X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
3001     return;
3002   }
3003 
3004   Register result = ToRegister(instr->result());
3005   if (!access.IsInobject()) {
3006     __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
3007     object = result;
3008   }
3009   __ Load(result, FieldOperand(object, offset), access.representation());
3010 }
3011 
3012 
EmitPushTaggedOperand(LOperand * operand)3013 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
3014   DCHECK(!operand->IsDoubleRegister());
3015   if (operand->IsConstantOperand()) {
3016     Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
3017     AllowDeferredHandleDereference smi_check;
3018     if (object->IsSmi()) {
3019       __ Push(Handle<Smi>::cast(object));
3020     } else {
3021       __ PushHeapObject(Handle<HeapObject>::cast(object));
3022     }
3023   } else if (operand->IsRegister()) {
3024     __ push(ToRegister(operand));
3025   } else {
3026     __ push(ToOperand(operand));
3027   }
3028 }
3029 
3030 
DoLoadNamedGeneric(LLoadNamedGeneric * instr)3031 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3032   DCHECK(ToRegister(instr->context()).is(esi));
3033   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3034   DCHECK(ToRegister(instr->result()).is(eax));
3035 
3036   __ mov(LoadDescriptor::NameRegister(), instr->name());
3037   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3038   Handle<Code> ic =
3039       CodeFactory::LoadICInOptimizedCode(
3040           isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
3041           instr->hydrogen()->initialization_state()).code();
3042   CallCode(ic, RelocInfo::CODE_TARGET, instr);
3043 }
3044 
3045 
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)3046 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3047   Register function = ToRegister(instr->function());
3048   Register temp = ToRegister(instr->temp());
3049   Register result = ToRegister(instr->result());
3050 
3051   // Get the prototype or initial map from the function.
3052   __ mov(result,
3053          FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3054 
3055   // Check that the function has a prototype or an initial map.
3056   __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
3057   DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3058 
3059   // If the function does not have an initial map, we're done.
3060   Label done;
3061   __ CmpObjectType(result, MAP_TYPE, temp);
3062   __ j(not_equal, &done, Label::kNear);
3063 
3064   // Get the prototype from the initial map.
3065   __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
3066 
3067   // All done.
3068   __ bind(&done);
3069 }
3070 
3071 
DoLoadRoot(LLoadRoot * instr)3072 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3073   Register result = ToRegister(instr->result());
3074   __ LoadRoot(result, instr->index());
3075 }
3076 
3077 
DoAccessArgumentsAt(LAccessArgumentsAt * instr)3078 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3079   Register arguments = ToRegister(instr->arguments());
3080   Register result = ToRegister(instr->result());
3081   if (instr->length()->IsConstantOperand() &&
3082       instr->index()->IsConstantOperand()) {
3083     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3084     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3085     int index = (const_length - const_index) + 1;
3086     __ mov(result, Operand(arguments, index * kPointerSize));
3087   } else {
3088     Register length = ToRegister(instr->length());
3089     Operand index = ToOperand(instr->index());
3090     // There are two words between the frame pointer and the last argument.
3091     // Subtracting from length accounts for one of them add one more.
3092     __ sub(length, index);
3093     __ mov(result, Operand(arguments, length, times_4, kPointerSize));
3094   }
3095 }
3096 
3097 
DoLoadKeyedExternalArray(LLoadKeyed * instr)3098 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3099   ElementsKind elements_kind = instr->elements_kind();
3100   LOperand* key = instr->key();
3101   if (!key->IsConstantOperand() &&
3102       ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3103                                   elements_kind)) {
3104     __ SmiUntag(ToRegister(key));
3105   }
3106   Operand operand(BuildFastArrayOperand(
3107       instr->elements(),
3108       key,
3109       instr->hydrogen()->key()->representation(),
3110       elements_kind,
3111       instr->base_offset()));
3112   if (elements_kind == FLOAT32_ELEMENTS) {
3113     X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
3114   } else if (elements_kind == FLOAT64_ELEMENTS) {
3115     X87Mov(ToX87Register(instr->result()), operand);
3116   } else {
3117     Register result(ToRegister(instr->result()));
3118     switch (elements_kind) {
3119       case INT8_ELEMENTS:
3120         __ movsx_b(result, operand);
3121         break;
3122       case UINT8_ELEMENTS:
3123       case UINT8_CLAMPED_ELEMENTS:
3124         __ movzx_b(result, operand);
3125         break;
3126       case INT16_ELEMENTS:
3127         __ movsx_w(result, operand);
3128         break;
3129       case UINT16_ELEMENTS:
3130         __ movzx_w(result, operand);
3131         break;
3132       case INT32_ELEMENTS:
3133         __ mov(result, operand);
3134         break;
3135       case UINT32_ELEMENTS:
3136         __ mov(result, operand);
3137         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3138           __ test(result, Operand(result));
3139           DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
3140         }
3141         break;
3142       case FLOAT32_ELEMENTS:
3143       case FLOAT64_ELEMENTS:
3144       case FAST_SMI_ELEMENTS:
3145       case FAST_ELEMENTS:
3146       case FAST_DOUBLE_ELEMENTS:
3147       case FAST_HOLEY_SMI_ELEMENTS:
3148       case FAST_HOLEY_ELEMENTS:
3149       case FAST_HOLEY_DOUBLE_ELEMENTS:
3150       case DICTIONARY_ELEMENTS:
3151       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3152       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3153         UNREACHABLE();
3154         break;
3155     }
3156   }
3157 }
3158 
3159 
DoLoadKeyedFixedDoubleArray(LLoadKeyed * instr)3160 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3161   if (instr->hydrogen()->RequiresHoleCheck()) {
3162     Operand hole_check_operand = BuildFastArrayOperand(
3163         instr->elements(), instr->key(),
3164         instr->hydrogen()->key()->representation(),
3165         FAST_DOUBLE_ELEMENTS,
3166         instr->base_offset() + sizeof(kHoleNanLower32));
3167     __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3168     DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3169   }
3170 
3171   Operand double_load_operand = BuildFastArrayOperand(
3172       instr->elements(),
3173       instr->key(),
3174       instr->hydrogen()->key()->representation(),
3175       FAST_DOUBLE_ELEMENTS,
3176       instr->base_offset());
3177   X87Mov(ToX87Register(instr->result()), double_load_operand);
3178 }
3179 
3180 
DoLoadKeyedFixedArray(LLoadKeyed * instr)3181 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3182   Register result = ToRegister(instr->result());
3183 
3184   // Load the result.
3185   __ mov(result,
3186          BuildFastArrayOperand(instr->elements(), instr->key(),
3187                                instr->hydrogen()->key()->representation(),
3188                                FAST_ELEMENTS, instr->base_offset()));
3189 
3190   // Check for the hole value.
3191   if (instr->hydrogen()->RequiresHoleCheck()) {
3192     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3193       __ test(result, Immediate(kSmiTagMask));
3194       DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
3195     } else {
3196       __ cmp(result, factory()->the_hole_value());
3197       DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3198     }
3199   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3200     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3201     Label done;
3202     __ cmp(result, factory()->the_hole_value());
3203     __ j(not_equal, &done);
3204     if (info()->IsStub()) {
3205       // A stub can safely convert the hole to undefined only if the array
3206       // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3207       // it needs to bail out.
3208       __ mov(result, isolate()->factory()->array_protector());
3209       __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
3210              Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
3211       DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
3212     }
3213     __ mov(result, isolate()->factory()->undefined_value());
3214     __ bind(&done);
3215   }
3216 }
3217 
3218 
DoLoadKeyed(LLoadKeyed * instr)3219 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3220   if (instr->is_fixed_typed_array()) {
3221     DoLoadKeyedExternalArray(instr);
3222   } else if (instr->hydrogen()->representation().IsDouble()) {
3223     DoLoadKeyedFixedDoubleArray(instr);
3224   } else {
3225     DoLoadKeyedFixedArray(instr);
3226   }
3227 }
3228 
3229 
BuildFastArrayOperand(LOperand * elements_pointer,LOperand * key,Representation key_representation,ElementsKind elements_kind,uint32_t base_offset)3230 Operand LCodeGen::BuildFastArrayOperand(
3231     LOperand* elements_pointer,
3232     LOperand* key,
3233     Representation key_representation,
3234     ElementsKind elements_kind,
3235     uint32_t base_offset) {
3236   Register elements_pointer_reg = ToRegister(elements_pointer);
3237   int element_shift_size = ElementsKindToShiftSize(elements_kind);
3238   int shift_size = element_shift_size;
3239   if (key->IsConstantOperand()) {
3240     int constant_value = ToInteger32(LConstantOperand::cast(key));
3241     if (constant_value & 0xF0000000) {
3242       Abort(kArrayIndexConstantValueTooBig);
3243     }
3244     return Operand(elements_pointer_reg,
3245                    ((constant_value) << shift_size)
3246                        + base_offset);
3247   } else {
3248     // Take the tag bit into account while computing the shift size.
3249     if (key_representation.IsSmi() && (shift_size >= 1)) {
3250       shift_size -= kSmiTagSize;
3251     }
3252     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3253     return Operand(elements_pointer_reg,
3254                    ToRegister(key),
3255                    scale_factor,
3256                    base_offset);
3257   }
3258 }
3259 
3260 
DoLoadKeyedGeneric(LLoadKeyedGeneric * instr)3261 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3262   DCHECK(ToRegister(instr->context()).is(esi));
3263   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3264   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3265 
3266   if (instr->hydrogen()->HasVectorAndSlot()) {
3267     EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3268   }
3269 
3270   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3271                         isolate(), instr->hydrogen()->language_mode(),
3272                         instr->hydrogen()->initialization_state()).code();
3273   CallCode(ic, RelocInfo::CODE_TARGET, instr);
3274 }
3275 
3276 
DoArgumentsElements(LArgumentsElements * instr)3277 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3278   Register result = ToRegister(instr->result());
3279 
3280   if (instr->hydrogen()->from_inlined()) {
3281     __ lea(result, Operand(esp, -2 * kPointerSize));
3282   } else {
3283     // Check for arguments adapter frame.
3284     Label done, adapted;
3285     __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3286     __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
3287     __ cmp(Operand(result),
3288            Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3289     __ j(equal, &adapted, Label::kNear);
3290 
3291     // No arguments adaptor frame.
3292     __ mov(result, Operand(ebp));
3293     __ jmp(&done, Label::kNear);
3294 
3295     // Arguments adaptor frame present.
3296     __ bind(&adapted);
3297     __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3298 
3299     // Result is the frame pointer for the frame if not adapted and for the real
3300     // frame below the adaptor frame if adapted.
3301     __ bind(&done);
3302   }
3303 }
3304 
3305 
DoArgumentsLength(LArgumentsLength * instr)3306 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3307   Operand elem = ToOperand(instr->elements());
3308   Register result = ToRegister(instr->result());
3309 
3310   Label done;
3311 
3312   // If no arguments adaptor frame the number of arguments is fixed.
3313   __ cmp(ebp, elem);
3314   __ mov(result, Immediate(scope()->num_parameters()));
3315   __ j(equal, &done, Label::kNear);
3316 
3317   // Arguments adaptor frame present. Get argument length from there.
3318   __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3319   __ mov(result, Operand(result,
3320                          ArgumentsAdaptorFrameConstants::kLengthOffset));
3321   __ SmiUntag(result);
3322 
3323   // Argument length is in result register.
3324   __ bind(&done);
3325 }
3326 
3327 
DoWrapReceiver(LWrapReceiver * instr)3328 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3329   Register receiver = ToRegister(instr->receiver());
3330   Register function = ToRegister(instr->function());
3331 
3332   // If the receiver is null or undefined, we have to pass the global
3333   // object as a receiver to normal functions. Values have to be
3334   // passed unchanged to builtins and strict-mode functions.
3335   Label receiver_ok, global_object;
3336   Register scratch = ToRegister(instr->temp());
3337 
3338   if (!instr->hydrogen()->known_function()) {
3339     // Do not transform the receiver to object for strict mode
3340     // functions.
3341     __ mov(scratch,
3342            FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3343     __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3344               1 << SharedFunctionInfo::kStrictModeBitWithinByte);
3345     __ j(not_equal, &receiver_ok);
3346 
3347     // Do not transform the receiver to object for builtins.
3348     __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3349               1 << SharedFunctionInfo::kNativeBitWithinByte);
3350     __ j(not_equal, &receiver_ok);
3351   }
3352 
3353   // Normal function. Replace undefined or null with global receiver.
3354   __ cmp(receiver, factory()->null_value());
3355   __ j(equal, &global_object);
3356   __ cmp(receiver, factory()->undefined_value());
3357   __ j(equal, &global_object);
3358 
3359   // The receiver should be a JS object.
3360   __ test(receiver, Immediate(kSmiTagMask));
3361   DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
3362   __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
3363   DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
3364 
3365   __ jmp(&receiver_ok, Label::kNear);
3366   __ bind(&global_object);
3367   __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
3368   __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
3369   __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
3370   __ bind(&receiver_ok);
3371 }
3372 
3373 
DoApplyArguments(LApplyArguments * instr)3374 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3375   Register receiver = ToRegister(instr->receiver());
3376   Register function = ToRegister(instr->function());
3377   Register length = ToRegister(instr->length());
3378   Register elements = ToRegister(instr->elements());
3379   DCHECK(receiver.is(eax));  // Used for parameter count.
3380   DCHECK(function.is(edi));  // Required by InvokeFunction.
3381   DCHECK(ToRegister(instr->result()).is(eax));
3382 
3383   // Copy the arguments to this function possibly from the
3384   // adaptor frame below it.
3385   const uint32_t kArgumentsLimit = 1 * KB;
3386   __ cmp(length, kArgumentsLimit);
3387   DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
3388 
3389   __ push(receiver);
3390   __ mov(receiver, length);
3391 
3392   // Loop through the arguments pushing them onto the execution
3393   // stack.
3394   Label invoke, loop;
3395   // length is a small non-negative integer, due to the test above.
3396   __ test(length, Operand(length));
3397   __ j(zero, &invoke, Label::kNear);
3398   __ bind(&loop);
3399   __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3400   __ dec(length);
3401   __ j(not_zero, &loop);
3402 
3403   // Invoke the function.
3404   __ bind(&invoke);
3405   DCHECK(instr->HasPointerMap());
3406   LPointerMap* pointers = instr->pointer_map();
3407   SafepointGenerator safepoint_generator(
3408       this, pointers, Safepoint::kLazyDeopt);
3409   ParameterCount actual(eax);
3410   __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
3411                     safepoint_generator);
3412 }
3413 
3414 
DoDebugBreak(LDebugBreak * instr)3415 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3416   __ int3();
3417 }
3418 
3419 
DoPushArgument(LPushArgument * instr)3420 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3421   LOperand* argument = instr->value();
3422   EmitPushTaggedOperand(argument);
3423 }
3424 
3425 
DoDrop(LDrop * instr)3426 void LCodeGen::DoDrop(LDrop* instr) {
3427   __ Drop(instr->count());
3428 }
3429 
3430 
DoThisFunction(LThisFunction * instr)3431 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3432   Register result = ToRegister(instr->result());
3433   __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3434 }
3435 
3436 
DoContext(LContext * instr)3437 void LCodeGen::DoContext(LContext* instr) {
3438   Register result = ToRegister(instr->result());
3439   if (info()->IsOptimizing()) {
3440     __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3441   } else {
3442     // If there is no frame, the context must be in esi.
3443     DCHECK(result.is(esi));
3444   }
3445 }
3446 
3447 
DoDeclareGlobals(LDeclareGlobals * instr)3448 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3449   DCHECK(ToRegister(instr->context()).is(esi));
3450   __ push(Immediate(instr->hydrogen()->pairs()));
3451   __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3452   CallRuntime(Runtime::kDeclareGlobals, instr);
3453 }
3454 
3455 
CallKnownFunction(Handle<JSFunction> function,int formal_parameter_count,int arity,LInstruction * instr)3456 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3457                                  int formal_parameter_count, int arity,
3458                                  LInstruction* instr) {
3459   bool dont_adapt_arguments =
3460       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3461   bool can_invoke_directly =
3462       dont_adapt_arguments || formal_parameter_count == arity;
3463 
3464   Register function_reg = edi;
3465 
3466   if (can_invoke_directly) {
3467     // Change context.
3468     __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
3469 
3470     // Always initialize new target and number of actual arguments.
3471     __ mov(edx, factory()->undefined_value());
3472     __ mov(eax, arity);
3473 
3474     // Invoke function directly.
3475     if (function.is_identical_to(info()->closure())) {
3476       __ CallSelf();
3477     } else {
3478       __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
3479     }
3480     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3481   } else {
3482     // We need to adapt arguments.
3483     LPointerMap* pointers = instr->pointer_map();
3484     SafepointGenerator generator(
3485         this, pointers, Safepoint::kLazyDeopt);
3486     ParameterCount count(arity);
3487     ParameterCount expected(formal_parameter_count);
3488     __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3489   }
3490 }
3491 
3492 
DoCallWithDescriptor(LCallWithDescriptor * instr)3493 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3494   DCHECK(ToRegister(instr->result()).is(eax));
3495 
3496   if (instr->hydrogen()->IsTailCall()) {
3497     if (NeedsEagerFrame()) __ leave();
3498 
3499     if (instr->target()->IsConstantOperand()) {
3500       LConstantOperand* target = LConstantOperand::cast(instr->target());
3501       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3502       __ jmp(code, RelocInfo::CODE_TARGET);
3503     } else {
3504       DCHECK(instr->target()->IsRegister());
3505       Register target = ToRegister(instr->target());
3506       __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3507       __ jmp(target);
3508     }
3509   } else {
3510     LPointerMap* pointers = instr->pointer_map();
3511     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3512 
3513     if (instr->target()->IsConstantOperand()) {
3514       LConstantOperand* target = LConstantOperand::cast(instr->target());
3515       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3516       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3517       __ call(code, RelocInfo::CODE_TARGET);
3518     } else {
3519       DCHECK(instr->target()->IsRegister());
3520       Register target = ToRegister(instr->target());
3521       generator.BeforeCall(__ CallSize(Operand(target)));
3522       __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3523       __ call(target);
3524     }
3525     generator.AfterCall();
3526   }
3527 }
3528 
3529 
DoCallJSFunction(LCallJSFunction * instr)3530 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3531   DCHECK(ToRegister(instr->function()).is(edi));
3532   DCHECK(ToRegister(instr->result()).is(eax));
3533 
3534   // Change context.
3535   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3536 
3537   // Always initialize new target and number of actual arguments.
3538   __ mov(edx, factory()->undefined_value());
3539   __ mov(eax, instr->arity());
3540 
3541   bool is_self_call = false;
3542   if (instr->hydrogen()->function()->IsConstant()) {
3543     HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3544     Handle<JSFunction> jsfun =
3545       Handle<JSFunction>::cast(fun_const->handle(isolate()));
3546     is_self_call = jsfun.is_identical_to(info()->closure());
3547   }
3548 
3549   if (is_self_call) {
3550     __ CallSelf();
3551   } else {
3552     __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
3553   }
3554 
3555   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3556 }
3557 
3558 
DoDeferredMathAbsTaggedHeapNumber(LMathAbs * instr)3559 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3560   Register input_reg = ToRegister(instr->value());
3561   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3562          factory()->heap_number_map());
3563   DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3564 
3565   Label slow, allocated, done;
3566   Register tmp = input_reg.is(eax) ? ecx : eax;
3567   Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
3568 
3569   // Preserve the value of all registers.
3570   PushSafepointRegistersScope scope(this);
3571 
3572   __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3573   // Check the sign of the argument. If the argument is positive, just
3574   // return it. We do not need to patch the stack since |input| and
3575   // |result| are the same register and |input| will be restored
3576   // unchanged by popping safepoint registers.
3577   __ test(tmp, Immediate(HeapNumber::kSignMask));
3578   __ j(zero, &done, Label::kNear);
3579 
3580   __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3581   __ jmp(&allocated, Label::kNear);
3582 
3583   // Slow case: Call the runtime system to do the number allocation.
3584   __ bind(&slow);
3585   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3586                           instr, instr->context());
3587   // Set the pointer to the new heap number in tmp.
3588   if (!tmp.is(eax)) __ mov(tmp, eax);
3589   // Restore input_reg after call to runtime.
3590   __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3591 
3592   __ bind(&allocated);
3593   __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3594   __ and_(tmp2, ~HeapNumber::kSignMask);
3595   __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3596   __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3597   __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3598   __ StoreToSafepointRegisterSlot(input_reg, tmp);
3599 
3600   __ bind(&done);
3601 }
3602 
3603 
EmitIntegerMathAbs(LMathAbs * instr)3604 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3605   Register input_reg = ToRegister(instr->value());
3606   __ test(input_reg, Operand(input_reg));
3607   Label is_positive;
3608   __ j(not_sign, &is_positive, Label::kNear);
3609   __ neg(input_reg);  // Sets flags.
3610   DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3611   __ bind(&is_positive);
3612 }
3613 
3614 
DoMathAbs(LMathAbs * instr)3615 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3616   // Class for deferred case.
3617   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3618    public:
3619     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3620                                     LMathAbs* instr,
3621                                     const X87Stack& x87_stack)
3622         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
3623     void Generate() override {
3624       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3625     }
3626     LInstruction* instr() override { return instr_; }
3627 
3628    private:
3629     LMathAbs* instr_;
3630   };
3631 
3632   DCHECK(instr->value()->Equals(instr->result()));
3633   Representation r = instr->hydrogen()->value()->representation();
3634 
3635   if (r.IsDouble()) {
3636     X87Register value = ToX87Register(instr->value());
3637     X87Fxch(value);
3638     __ fabs();
3639   } else if (r.IsSmiOrInteger32()) {
3640     EmitIntegerMathAbs(instr);
3641   } else {  // Tagged case.
3642     DeferredMathAbsTaggedHeapNumber* deferred =
3643         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
3644     Register input_reg = ToRegister(instr->value());
3645     // Smi check.
3646     __ JumpIfNotSmi(input_reg, deferred->entry());
3647     EmitIntegerMathAbs(instr);
3648     __ bind(deferred->exit());
3649   }
3650 }
3651 
3652 
DoMathFloor(LMathFloor * instr)3653 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3654   Register output_reg = ToRegister(instr->result());
3655   X87Register input_reg = ToX87Register(instr->value());
3656   X87Fxch(input_reg);
3657 
3658   Label not_minus_zero, done;
3659   // Deoptimize on unordered.
3660   __ fldz();
3661   __ fld(1);
3662   __ FCmp();
3663   DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
3664   __ j(below, &not_minus_zero, Label::kNear);
3665 
3666   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3667     // Check for negative zero.
3668     __ j(not_equal, &not_minus_zero, Label::kNear);
3669     // +- 0.0.
3670     __ fld(0);
3671     __ FXamSign();
3672     DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3673     __ Move(output_reg, Immediate(0));
3674     __ jmp(&done, Label::kFar);
3675   }
3676 
3677   // Positive input.
3678   // rc=01B, round down.
3679   __ bind(&not_minus_zero);
3680   __ fnclex();
3681   __ X87SetRC(0x0400);
3682   __ sub(esp, Immediate(kPointerSize));
3683   __ fist_s(Operand(esp, 0));
3684   __ pop(output_reg);
3685   __ X87CheckIA();
3686   DeoptimizeIf(equal, instr, Deoptimizer::kOverflow);
3687   __ fnclex();
3688   __ X87SetRC(0x0000);
3689   __ bind(&done);
3690 }
3691 
3692 
DoMathRound(LMathRound * instr)3693 void LCodeGen::DoMathRound(LMathRound* instr) {
3694   X87Register input_reg = ToX87Register(instr->value());
3695   Register result = ToRegister(instr->result());
3696   X87Fxch(input_reg);
3697   Label below_one_half, below_minus_one_half, done;
3698 
3699   ExternalReference one_half = ExternalReference::address_of_one_half();
3700   ExternalReference minus_one_half =
3701       ExternalReference::address_of_minus_one_half();
3702 
3703   __ fld_d(Operand::StaticVariable(one_half));
3704   __ fld(1);
3705   __ FCmp();
3706   __ j(carry, &below_one_half);
3707 
3708   // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x)
3709   __ fld(0);
3710   __ fadd_d(Operand::StaticVariable(one_half));
3711   // rc=11B, round toward zero.
3712   __ X87SetRC(0x0c00);
3713   __ sub(esp, Immediate(kPointerSize));
3714   // Clear exception bits.
3715   __ fnclex();
3716   __ fistp_s(MemOperand(esp, 0));
3717   // Check overflow.
3718   __ X87CheckIA();
3719   __ pop(result);
3720   DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
3721   __ fnclex();
3722   // Restore round mode.
3723   __ X87SetRC(0x0000);
3724   __ jmp(&done);
3725 
3726   __ bind(&below_one_half);
3727   __ fld_d(Operand::StaticVariable(minus_one_half));
3728   __ fld(1);
3729   __ FCmp();
3730   __ j(carry, &below_minus_one_half);
3731   // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3732   // we can ignore the difference between a result of -0 and +0.
3733   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3734     // If the sign is positive, we return +0.
3735     __ fld(0);
3736     __ FXamSign();
3737     DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3738   }
3739   __ Move(result, Immediate(0));
3740   __ jmp(&done);
3741 
3742   __ bind(&below_minus_one_half);
3743   __ fld(0);
3744   __ fadd_d(Operand::StaticVariable(one_half));
3745   // rc=01B, round down.
3746   __ X87SetRC(0x0400);
3747   __ sub(esp, Immediate(kPointerSize));
3748   // Clear exception bits.
3749   __ fnclex();
3750   __ fistp_s(MemOperand(esp, 0));
3751   // Check overflow.
3752   __ X87CheckIA();
3753   __ pop(result);
3754   DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
3755   __ fnclex();
3756   // Restore round mode.
3757   __ X87SetRC(0x0000);
3758 
3759   __ bind(&done);
3760 }
3761 
3762 
DoMathFround(LMathFround * instr)3763 void LCodeGen::DoMathFround(LMathFround* instr) {
3764   X87Register input_reg = ToX87Register(instr->value());
3765   X87Fxch(input_reg);
3766   __ sub(esp, Immediate(kPointerSize));
3767   __ fstp_s(MemOperand(esp, 0));
3768   X87Fld(MemOperand(esp, 0), kX87FloatOperand);
3769   __ add(esp, Immediate(kPointerSize));
3770 }
3771 
3772 
DoMathSqrt(LMathSqrt * instr)3773 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3774   X87Register input_reg = ToX87Register(instr->value());
3775   __ X87SetFPUCW(0x027F);
3776   X87Fxch(input_reg);
3777   __ fsqrt();
3778   __ X87SetFPUCW(0x037F);
3779 }
3780 
3781 
DoMathPowHalf(LMathPowHalf * instr)3782 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3783   X87Register input_reg = ToX87Register(instr->value());
3784   DCHECK(ToX87Register(instr->result()).is(input_reg));
3785   X87Fxch(input_reg);
3786   // Note that according to ECMA-262 15.8.2.13:
3787   // Math.pow(-Infinity, 0.5) == Infinity
3788   // Math.sqrt(-Infinity) == NaN
3789   Label done, sqrt;
3790   // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1
3791   __ fxam();
3792   __ push(eax);
3793   __ fnstsw_ax();
3794   __ and_(eax, Immediate(0x4700));
3795   __ cmp(eax, Immediate(0x0700));
3796   __ j(not_equal, &sqrt, Label::kNear);
3797   // If input is -Infinity, return Infinity.
3798   __ fchs();
3799   __ jmp(&done, Label::kNear);
3800 
3801   // Square root.
3802   __ bind(&sqrt);
3803   __ fldz();
3804   __ faddp();  // Convert -0 to +0.
3805   __ fsqrt();
3806   __ bind(&done);
3807   __ pop(eax);
3808 }
3809 
3810 
DoPower(LPower * instr)3811 void LCodeGen::DoPower(LPower* instr) {
3812   Representation exponent_type = instr->hydrogen()->right()->representation();
3813   X87Register result = ToX87Register(instr->result());
3814   // Having marked this as a call, we can use any registers.
3815   X87Register base = ToX87Register(instr->left());
3816   ExternalReference one_half = ExternalReference::address_of_one_half();
3817 
3818   if (exponent_type.IsSmi()) {
3819     Register exponent = ToRegister(instr->right());
3820     X87LoadForUsage(base);
3821     __ SmiUntag(exponent);
3822     __ push(exponent);
3823     __ fild_s(MemOperand(esp, 0));
3824     __ pop(exponent);
3825   } else if (exponent_type.IsTagged()) {
3826     Register exponent = ToRegister(instr->right());
3827     Register temp = exponent.is(ecx) ? eax : ecx;
3828     Label no_deopt, done;
3829     X87LoadForUsage(base);
3830     __ JumpIfSmi(exponent, &no_deopt);
3831     __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
3832     DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3833     // Heap number(double)
3834     __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
3835     __ jmp(&done);
3836     // SMI
3837     __ bind(&no_deopt);
3838     __ SmiUntag(exponent);
3839     __ push(exponent);
3840     __ fild_s(MemOperand(esp, 0));
3841     __ pop(exponent);
3842     __ bind(&done);
3843   } else if (exponent_type.IsInteger32()) {
3844     Register exponent = ToRegister(instr->right());
3845     X87LoadForUsage(base);
3846     __ push(exponent);
3847     __ fild_s(MemOperand(esp, 0));
3848     __ pop(exponent);
3849   } else {
3850     DCHECK(exponent_type.IsDouble());
3851     X87Register exponent_double = ToX87Register(instr->right());
3852     X87LoadForUsage(base, exponent_double);
3853   }
3854 
3855   // FP data stack {base, exponent(TOS)}.
3856   // Handle (exponent==+-0.5 && base == -0).
3857   Label not_plus_0;
3858   __ fld(0);
3859   __ fabs();
3860   X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand);
3861   __ FCmp();
3862   __ j(parity_even, &not_plus_0, Label::kNear);  // NaN.
3863   __ j(not_equal, &not_plus_0, Label::kNear);
3864   __ fldz();
3865   // FP data stack {base, exponent(TOS), zero}.
3866   __ faddp(2);
3867   __ bind(&not_plus_0);
3868 
3869   {
3870     __ PrepareCallCFunction(4, eax);
3871     __ fstp_d(MemOperand(esp, kDoubleSize));  // Exponent value.
3872     __ fstp_d(MemOperand(esp, 0));            // Base value.
3873     X87PrepareToWrite(result);
3874     __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
3875                      4);
3876     // Return value is in st(0) on ia32.
3877     X87CommitWrite(result);
3878   }
3879 }
3880 
3881 
DoMathLog(LMathLog * instr)3882 void LCodeGen::DoMathLog(LMathLog* instr) {
3883   DCHECK(instr->value()->Equals(instr->result()));
3884   X87Register input_reg = ToX87Register(instr->value());
3885   X87Fxch(input_reg);
3886 
3887   Label positive, done, zero, nan_result;
3888   __ fldz();
3889   __ fld(1);
3890   __ FCmp();
3891   __ j(below, &nan_result, Label::kNear);
3892   __ j(equal, &zero, Label::kNear);
3893   // Positive input.
3894   // {input, ln2}.
3895   __ fldln2();
3896   // {ln2, input}.
3897   __ fxch();
3898   // {result}.
3899   __ fyl2x();
3900   __ jmp(&done, Label::kNear);
3901 
3902   __ bind(&nan_result);
3903   X87PrepareToWrite(input_reg);
3904   __ push(Immediate(0xffffffff));
3905   __ push(Immediate(0x7fffffff));
3906   __ fld_d(MemOperand(esp, 0));
3907   __ lea(esp, Operand(esp, kDoubleSize));
3908   X87CommitWrite(input_reg);
3909   __ jmp(&done, Label::kNear);
3910 
3911   __ bind(&zero);
3912   ExternalReference ninf = ExternalReference::address_of_negative_infinity();
3913   X87PrepareToWrite(input_reg);
3914   __ fld_d(Operand::StaticVariable(ninf));
3915   X87CommitWrite(input_reg);
3916 
3917   __ bind(&done);
3918 }
3919 
3920 
DoMathClz32(LMathClz32 * instr)3921 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3922   Register input = ToRegister(instr->value());
3923   Register result = ToRegister(instr->result());
3924 
3925   __ Lzcnt(result, input);
3926 }
3927 
3928 
DoMathExp(LMathExp * instr)3929 void LCodeGen::DoMathExp(LMathExp* instr) {
3930   X87Register input = ToX87Register(instr->value());
3931   X87Register result_reg = ToX87Register(instr->result());
3932   Register temp_result = ToRegister(instr->temp1());
3933   Register temp = ToRegister(instr->temp2());
3934   Label slow, done, smi, finish;
3935   DCHECK(result_reg.is(input));
3936 
3937   // Store input into Heap number and call runtime function kMathExpRT.
3938   if (FLAG_inline_new) {
3939     __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
3940     __ jmp(&done, Label::kNear);
3941   }
3942 
3943   // Slow case: Call the runtime system to do the number allocation.
3944   __ bind(&slow);
3945   {
3946     // TODO(3095996): Put a valid pointer value in the stack slot where the
3947     // result register is stored, as this register is in the pointer map, but
3948     // contains an integer value.
3949     __ Move(temp_result, Immediate(0));
3950 
3951     // Preserve the value of all registers.
3952     PushSafepointRegistersScope scope(this);
3953 
3954     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3955     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
3956     RecordSafepointWithRegisters(instr->pointer_map(), 0,
3957                                  Safepoint::kNoLazyDeopt);
3958     __ StoreToSafepointRegisterSlot(temp_result, eax);
3959   }
3960   __ bind(&done);
3961   X87LoadForUsage(input);
3962   __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
3963 
3964   {
3965     // Preserve the value of all registers.
3966     PushSafepointRegistersScope scope(this);
3967 
3968     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3969     __ push(temp_result);
3970     __ CallRuntimeSaveDoubles(Runtime::kMathExpRT);
3971     RecordSafepointWithRegisters(instr->pointer_map(), 1,
3972                                  Safepoint::kNoLazyDeopt);
3973     __ StoreToSafepointRegisterSlot(temp_result, eax);
3974   }
3975   X87PrepareToWrite(result_reg);
3976   // return value of MathExpRT is Smi or Heap Number.
3977   __ JumpIfSmi(temp_result, &smi);
3978   // Heap number(double)
3979   __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
3980   __ jmp(&finish);
3981   // SMI
3982   __ bind(&smi);
3983   __ SmiUntag(temp_result);
3984   __ push(temp_result);
3985   __ fild_s(MemOperand(esp, 0));
3986   __ pop(temp_result);
3987   __ bind(&finish);
3988   X87CommitWrite(result_reg);
3989 }
3990 
3991 
DoInvokeFunction(LInvokeFunction * instr)3992 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3993   DCHECK(ToRegister(instr->context()).is(esi));
3994   DCHECK(ToRegister(instr->function()).is(edi));
3995   DCHECK(instr->HasPointerMap());
3996 
3997   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3998   if (known_function.is_null()) {
3999     LPointerMap* pointers = instr->pointer_map();
4000     SafepointGenerator generator(
4001         this, pointers, Safepoint::kLazyDeopt);
4002     ParameterCount count(instr->arity());
4003     __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
4004   } else {
4005     CallKnownFunction(known_function,
4006                       instr->hydrogen()->formal_parameter_count(),
4007                       instr->arity(), instr);
4008   }
4009 }
4010 
4011 
DoCallFunction(LCallFunction * instr)4012 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4013   DCHECK(ToRegister(instr->context()).is(esi));
4014   DCHECK(ToRegister(instr->function()).is(edi));
4015   DCHECK(ToRegister(instr->result()).is(eax));
4016 
4017   int arity = instr->arity();
4018   ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
4019   if (instr->hydrogen()->HasVectorAndSlot()) {
4020     Register slot_register = ToRegister(instr->temp_slot());
4021     Register vector_register = ToRegister(instr->temp_vector());
4022     DCHECK(slot_register.is(edx));
4023     DCHECK(vector_register.is(ebx));
4024 
4025     AllowDeferredHandleDereference vector_structure_check;
4026     Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
4027     int index = vector->GetIndex(instr->hydrogen()->slot());
4028 
4029     __ mov(vector_register, vector);
4030     __ mov(slot_register, Immediate(Smi::FromInt(index)));
4031 
4032     Handle<Code> ic =
4033         CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
4034     CallCode(ic, RelocInfo::CODE_TARGET, instr);
4035   } else {
4036     __ Set(eax, arity);
4037     CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
4038   }
4039 }
4040 
4041 
DoCallNewArray(LCallNewArray * instr)4042 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4043   DCHECK(ToRegister(instr->context()).is(esi));
4044   DCHECK(ToRegister(instr->constructor()).is(edi));
4045   DCHECK(ToRegister(instr->result()).is(eax));
4046 
4047   __ Move(eax, Immediate(instr->arity()));
4048   if (instr->arity() == 1) {
4049     // We only need the allocation site for the case we have a length argument.
4050     // The case may bail out to the runtime, which will determine the correct
4051     // elements kind with the site.
4052     __ mov(ebx, instr->hydrogen()->site());
4053   } else {
4054     __ mov(ebx, isolate()->factory()->undefined_value());
4055   }
4056 
4057   ElementsKind kind = instr->hydrogen()->elements_kind();
4058   AllocationSiteOverrideMode override_mode =
4059       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4060           ? DISABLE_ALLOCATION_SITES
4061           : DONT_OVERRIDE;
4062 
4063   if (instr->arity() == 0) {
4064     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4065     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4066   } else if (instr->arity() == 1) {
4067     Label done;
4068     if (IsFastPackedElementsKind(kind)) {
4069       Label packed_case;
4070       // We might need a change here
4071       // look at the first argument
4072       __ mov(ecx, Operand(esp, 0));
4073       __ test(ecx, ecx);
4074       __ j(zero, &packed_case, Label::kNear);
4075 
4076       ElementsKind holey_kind = GetHoleyElementsKind(kind);
4077       ArraySingleArgumentConstructorStub stub(isolate(),
4078                                               holey_kind,
4079                                               override_mode);
4080       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4081       __ jmp(&done, Label::kNear);
4082       __ bind(&packed_case);
4083     }
4084 
4085     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4086     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4087     __ bind(&done);
4088   } else {
4089     ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4090     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4091   }
4092 }
4093 
4094 
DoCallRuntime(LCallRuntime * instr)4095 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4096   DCHECK(ToRegister(instr->context()).is(esi));
4097   CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4098 }
4099 
4100 
DoStoreCodeEntry(LStoreCodeEntry * instr)4101 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4102   Register function = ToRegister(instr->function());
4103   Register code_object = ToRegister(instr->code_object());
4104   __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
4105   __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4106 }
4107 
4108 
DoInnerAllocatedObject(LInnerAllocatedObject * instr)4109 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4110   Register result = ToRegister(instr->result());
4111   Register base = ToRegister(instr->base_object());
4112   if (instr->offset()->IsConstantOperand()) {
4113     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4114     __ lea(result, Operand(base, ToInteger32(offset)));
4115   } else {
4116     Register offset = ToRegister(instr->offset());
4117     __ lea(result, Operand(base, offset, times_1, 0));
4118   }
4119 }
4120 
4121 
DoStoreNamedField(LStoreNamedField * instr)4122 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4123   Representation representation = instr->hydrogen()->field_representation();
4124 
4125   HObjectAccess access = instr->hydrogen()->access();
4126   int offset = access.offset();
4127 
4128   if (access.IsExternalMemory()) {
4129     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4130     MemOperand operand = instr->object()->IsConstantOperand()
4131         ? MemOperand::StaticVariable(
4132             ToExternalReference(LConstantOperand::cast(instr->object())))
4133         : MemOperand(ToRegister(instr->object()), offset);
4134     if (instr->value()->IsConstantOperand()) {
4135       LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4136       __ mov(operand, Immediate(ToInteger32(operand_value)));
4137     } else {
4138       Register value = ToRegister(instr->value());
4139       __ Store(value, operand, representation);
4140     }
4141     return;
4142   }
4143 
4144   Register object = ToRegister(instr->object());
4145   __ AssertNotSmi(object);
4146   DCHECK(!representation.IsSmi() ||
4147          !instr->value()->IsConstantOperand() ||
4148          IsSmi(LConstantOperand::cast(instr->value())));
4149   if (representation.IsDouble()) {
4150     DCHECK(access.IsInobject());
4151     DCHECK(!instr->hydrogen()->has_transition());
4152     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4153     X87Register value = ToX87Register(instr->value());
4154     X87Mov(FieldOperand(object, offset), value);
4155     return;
4156   }
4157 
4158   if (instr->hydrogen()->has_transition()) {
4159     Handle<Map> transition = instr->hydrogen()->transition_map();
4160     AddDeprecationDependency(transition);
4161     __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
4162     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4163       Register temp = ToRegister(instr->temp());
4164       Register temp_map = ToRegister(instr->temp_map());
4165       __ mov(temp_map, transition);
4166       __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
4167       // Update the write barrier for the map field.
4168       __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
4169     }
4170   }
4171 
4172   // Do the store.
4173   Register write_register = object;
4174   if (!access.IsInobject()) {
4175     write_register = ToRegister(instr->temp());
4176     __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4177   }
4178 
4179   MemOperand operand = FieldOperand(write_register, offset);
4180   if (instr->value()->IsConstantOperand()) {
4181     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4182     if (operand_value->IsRegister()) {
4183       Register value = ToRegister(operand_value);
4184       __ Store(value, operand, representation);
4185     } else if (representation.IsInteger32() || representation.IsExternal()) {
4186       Immediate immediate = ToImmediate(operand_value, representation);
4187       DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4188       __ mov(operand, immediate);
4189     } else {
4190       Handle<Object> handle_value = ToHandle(operand_value);
4191       DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4192       __ mov(operand, handle_value);
4193     }
4194   } else {
4195     Register value = ToRegister(instr->value());
4196     __ Store(value, operand, representation);
4197   }
4198 
4199   if (instr->hydrogen()->NeedsWriteBarrier()) {
4200     Register value = ToRegister(instr->value());
4201     Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4202     // Update the write barrier for the object for in-object properties.
4203     __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs,
4204                         EMIT_REMEMBERED_SET,
4205                         instr->hydrogen()->SmiCheckForWriteBarrier(),
4206                         instr->hydrogen()->PointersToHereCheckForValue());
4207   }
4208 }
4209 
4210 
DoStoreNamedGeneric(LStoreNamedGeneric * instr)4211 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4212   DCHECK(ToRegister(instr->context()).is(esi));
4213   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4214   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4215 
4216   if (instr->hydrogen()->HasVectorAndSlot()) {
4217     EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4218   }
4219 
4220   __ mov(StoreDescriptor::NameRegister(), instr->name());
4221   Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4222                         isolate(), instr->language_mode(),
4223                         instr->hydrogen()->initialization_state()).code();
4224   CallCode(ic, RelocInfo::CODE_TARGET, instr);
4225 }
4226 
4227 
DoBoundsCheck(LBoundsCheck * instr)4228 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4229   Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
4230   if (instr->index()->IsConstantOperand()) {
4231     __ cmp(ToOperand(instr->length()),
4232            ToImmediate(LConstantOperand::cast(instr->index()),
4233                        instr->hydrogen()->length()->representation()));
4234     cc = CommuteCondition(cc);
4235   } else if (instr->length()->IsConstantOperand()) {
4236     __ cmp(ToOperand(instr->index()),
4237            ToImmediate(LConstantOperand::cast(instr->length()),
4238                        instr->hydrogen()->index()->representation()));
4239   } else {
4240     __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4241   }
4242   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4243     Label done;
4244     __ j(NegateCondition(cc), &done, Label::kNear);
4245     __ int3();
4246     __ bind(&done);
4247   } else {
4248     DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4249   }
4250 }
4251 
4252 
DoStoreKeyedExternalArray(LStoreKeyed * instr)4253 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4254   ElementsKind elements_kind = instr->elements_kind();
4255   LOperand* key = instr->key();
4256   if (!key->IsConstantOperand() &&
4257       ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4258                                   elements_kind)) {
4259     __ SmiUntag(ToRegister(key));
4260   }
4261   Operand operand(BuildFastArrayOperand(
4262       instr->elements(),
4263       key,
4264       instr->hydrogen()->key()->representation(),
4265       elements_kind,
4266       instr->base_offset()));
4267   if (elements_kind == FLOAT32_ELEMENTS) {
4268     X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
4269   } else if (elements_kind == FLOAT64_ELEMENTS) {
4270     uint64_t int_val = kHoleNanInt64;
4271     int32_t lower = static_cast<int32_t>(int_val);
4272     int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4273     Operand operand2 = BuildFastArrayOperand(
4274         instr->elements(), instr->key(),
4275         instr->hydrogen()->key()->representation(), elements_kind,
4276         instr->base_offset() + kPointerSize);
4277 
4278     Label no_special_nan_handling, done;
4279     X87Register value = ToX87Register(instr->value());
4280     X87Fxch(value);
4281     __ lea(esp, Operand(esp, -kDoubleSize));
4282     __ fst_d(MemOperand(esp, 0));
4283     __ lea(esp, Operand(esp, kDoubleSize));
4284     int offset = sizeof(kHoleNanUpper32);
4285     // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
4286     // so we check the upper with 0xffffffff for hole as a temporary fix.
4287     __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
4288     __ j(not_equal, &no_special_nan_handling, Label::kNear);
4289     __ mov(operand, Immediate(lower));
4290     __ mov(operand2, Immediate(upper));
4291     __ jmp(&done, Label::kNear);
4292 
4293     __ bind(&no_special_nan_handling);
4294     __ fst_d(operand);
4295     __ bind(&done);
4296   } else {
4297     Register value = ToRegister(instr->value());
4298     switch (elements_kind) {
4299       case UINT8_ELEMENTS:
4300       case INT8_ELEMENTS:
4301       case UINT8_CLAMPED_ELEMENTS:
4302         __ mov_b(operand, value);
4303         break;
4304       case UINT16_ELEMENTS:
4305       case INT16_ELEMENTS:
4306         __ mov_w(operand, value);
4307         break;
4308       case UINT32_ELEMENTS:
4309       case INT32_ELEMENTS:
4310         __ mov(operand, value);
4311         break;
4312       case FLOAT32_ELEMENTS:
4313       case FLOAT64_ELEMENTS:
4314       case FAST_SMI_ELEMENTS:
4315       case FAST_ELEMENTS:
4316       case FAST_DOUBLE_ELEMENTS:
4317       case FAST_HOLEY_SMI_ELEMENTS:
4318       case FAST_HOLEY_ELEMENTS:
4319       case FAST_HOLEY_DOUBLE_ELEMENTS:
4320       case DICTIONARY_ELEMENTS:
4321       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4322       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4323         UNREACHABLE();
4324         break;
4325     }
4326   }
4327 }
4328 
4329 
DoStoreKeyedFixedDoubleArray(LStoreKeyed * instr)4330 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4331   Operand double_store_operand = BuildFastArrayOperand(
4332       instr->elements(),
4333       instr->key(),
4334       instr->hydrogen()->key()->representation(),
4335       FAST_DOUBLE_ELEMENTS,
4336       instr->base_offset());
4337 
4338   uint64_t int_val = kHoleNanInt64;
4339   int32_t lower = static_cast<int32_t>(int_val);
4340   int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4341   Operand double_store_operand2 = BuildFastArrayOperand(
4342       instr->elements(), instr->key(),
4343       instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS,
4344       instr->base_offset() + kPointerSize);
4345 
4346   if (instr->hydrogen()->IsConstantHoleStore()) {
4347     // This means we should store the (double) hole. No floating point
4348     // registers required.
4349     __ mov(double_store_operand, Immediate(lower));
4350     __ mov(double_store_operand2, Immediate(upper));
4351   } else {
4352     Label no_special_nan_handling, done;
4353     X87Register value = ToX87Register(instr->value());
4354     X87Fxch(value);
4355 
4356     if (instr->NeedsCanonicalization()) {
4357       __ fld(0);
4358       __ fld(0);
4359       __ FCmp();
4360       __ j(parity_odd, &no_special_nan_handling, Label::kNear);
4361       // All NaNs are Canonicalized to 0x7fffffffffffffff
4362       __ mov(double_store_operand, Immediate(0xffffffff));
4363       __ mov(double_store_operand2, Immediate(0x7fffffff));
4364       __ jmp(&done, Label::kNear);
4365     } else {
4366       __ lea(esp, Operand(esp, -kDoubleSize));
4367       __ fst_d(MemOperand(esp, 0));
4368       __ lea(esp, Operand(esp, kDoubleSize));
4369       int offset = sizeof(kHoleNanUpper32);
4370       // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
4371       // so we check the upper with 0xffffffff for hole as a temporary fix.
4372       __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
4373       __ j(not_equal, &no_special_nan_handling, Label::kNear);
4374       __ mov(double_store_operand, Immediate(lower));
4375       __ mov(double_store_operand2, Immediate(upper));
4376       __ jmp(&done, Label::kNear);
4377     }
4378     __ bind(&no_special_nan_handling);
4379     __ fst_d(double_store_operand);
4380     __ bind(&done);
4381   }
4382 }
4383 
4384 
DoStoreKeyedFixedArray(LStoreKeyed * instr)4385 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4386   Register elements = ToRegister(instr->elements());
4387   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4388 
4389   Operand operand = BuildFastArrayOperand(
4390       instr->elements(),
4391       instr->key(),
4392       instr->hydrogen()->key()->representation(),
4393       FAST_ELEMENTS,
4394       instr->base_offset());
4395   if (instr->value()->IsRegister()) {
4396     __ mov(operand, ToRegister(instr->value()));
4397   } else {
4398     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4399     if (IsSmi(operand_value)) {
4400       Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4401       __ mov(operand, immediate);
4402     } else {
4403       DCHECK(!IsInteger32(operand_value));
4404       Handle<Object> handle_value = ToHandle(operand_value);
4405       __ mov(operand, handle_value);
4406     }
4407   }
4408 
4409   if (instr->hydrogen()->NeedsWriteBarrier()) {
4410     DCHECK(instr->value()->IsRegister());
4411     Register value = ToRegister(instr->value());
4412     DCHECK(!instr->key()->IsConstantOperand());
4413     SmiCheck check_needed =
4414         instr->hydrogen()->value()->type().IsHeapObject()
4415           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4416     // Compute address of modified element and store it into key register.
4417     __ lea(key, operand);
4418     __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET,
4419                    check_needed,
4420                    instr->hydrogen()->PointersToHereCheckForValue());
4421   }
4422 }
4423 
4424 
DoStoreKeyed(LStoreKeyed * instr)4425 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4426   // By cases...external, fast-double, fast
4427   if (instr->is_fixed_typed_array()) {
4428     DoStoreKeyedExternalArray(instr);
4429   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4430     DoStoreKeyedFixedDoubleArray(instr);
4431   } else {
4432     DoStoreKeyedFixedArray(instr);
4433   }
4434 }
4435 
4436 
DoStoreKeyedGeneric(LStoreKeyedGeneric * instr)4437 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4438   DCHECK(ToRegister(instr->context()).is(esi));
4439   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4440   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4441   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4442 
4443   if (instr->hydrogen()->HasVectorAndSlot()) {
4444     EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4445   }
4446 
4447   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4448                         isolate(), instr->language_mode(),
4449                         instr->hydrogen()->initialization_state()).code();
4450   CallCode(ic, RelocInfo::CODE_TARGET, instr);
4451 }
4452 
4453 
DoTrapAllocationMemento(LTrapAllocationMemento * instr)4454 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4455   Register object = ToRegister(instr->object());
4456   Register temp = ToRegister(instr->temp());
4457   Label no_memento_found;
4458   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4459   DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
4460   __ bind(&no_memento_found);
4461 }
4462 
4463 
DoMaybeGrowElements(LMaybeGrowElements * instr)4464 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4465   class DeferredMaybeGrowElements final : public LDeferredCode {
4466    public:
4467     DeferredMaybeGrowElements(LCodeGen* codegen,
4468                               LMaybeGrowElements* instr,
4469                               const X87Stack& x87_stack)
4470         : LDeferredCode(codegen, x87_stack), instr_(instr) {}
4471     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4472     LInstruction* instr() override { return instr_; }
4473 
4474    private:
4475     LMaybeGrowElements* instr_;
4476   };
4477 
4478   Register result = eax;
4479   DeferredMaybeGrowElements* deferred =
4480       new (zone()) DeferredMaybeGrowElements(this, instr, x87_stack_);
4481   LOperand* key = instr->key();
4482   LOperand* current_capacity = instr->current_capacity();
4483 
4484   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4485   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4486   DCHECK(key->IsConstantOperand() || key->IsRegister());
4487   DCHECK(current_capacity->IsConstantOperand() ||
4488          current_capacity->IsRegister());
4489 
4490   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4491     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4492     int32_t constant_capacity =
4493         ToInteger32(LConstantOperand::cast(current_capacity));
4494     if (constant_key >= constant_capacity) {
4495       // Deferred case.
4496       __ jmp(deferred->entry());
4497     }
4498   } else if (key->IsConstantOperand()) {
4499     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4500     __ cmp(ToOperand(current_capacity), Immediate(constant_key));
4501     __ j(less_equal, deferred->entry());
4502   } else if (current_capacity->IsConstantOperand()) {
4503     int32_t constant_capacity =
4504         ToInteger32(LConstantOperand::cast(current_capacity));
4505     __ cmp(ToRegister(key), Immediate(constant_capacity));
4506     __ j(greater_equal, deferred->entry());
4507   } else {
4508     __ cmp(ToRegister(key), ToRegister(current_capacity));
4509     __ j(greater_equal, deferred->entry());
4510   }
4511 
4512   __ mov(result, ToOperand(instr->elements()));
4513   __ bind(deferred->exit());
4514 }
4515 
4516 
DoDeferredMaybeGrowElements(LMaybeGrowElements * instr)4517 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4518   // TODO(3095996): Get rid of this. For now, we need to make the
4519   // result register contain a valid pointer because it is already
4520   // contained in the register pointer map.
4521   Register result = eax;
4522   __ Move(result, Immediate(0));
4523 
4524   // We have to call a stub.
4525   {
4526     PushSafepointRegistersScope scope(this);
4527     if (instr->object()->IsRegister()) {
4528       __ Move(result, ToRegister(instr->object()));
4529     } else {
4530       __ mov(result, ToOperand(instr->object()));
4531     }
4532 
4533     LOperand* key = instr->key();
4534     if (key->IsConstantOperand()) {
4535       __ mov(ebx, ToImmediate(key, Representation::Smi()));
4536     } else {
4537       __ Move(ebx, ToRegister(key));
4538       __ SmiTag(ebx);
4539     }
4540 
4541     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4542                                instr->hydrogen()->kind());
4543     __ CallStub(&stub);
4544     RecordSafepointWithLazyDeopt(
4545         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4546     __ StoreToSafepointRegisterSlot(result, result);
4547   }
4548 
4549   // Deopt on smi, which means the elements array changed to dictionary mode.
4550   __ test(result, Immediate(kSmiTagMask));
4551   DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
4552 }
4553 
4554 
DoTransitionElementsKind(LTransitionElementsKind * instr)4555 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4556   Register object_reg = ToRegister(instr->object());
4557 
4558   Handle<Map> from_map = instr->original_map();
4559   Handle<Map> to_map = instr->transitioned_map();
4560   ElementsKind from_kind = instr->from_kind();
4561   ElementsKind to_kind = instr->to_kind();
4562 
4563   Label not_applicable;
4564   bool is_simple_map_transition =
4565       IsSimpleMapChangeTransition(from_kind, to_kind);
4566   Label::Distance branch_distance =
4567       is_simple_map_transition ? Label::kNear : Label::kFar;
4568   __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4569   __ j(not_equal, &not_applicable, branch_distance);
4570   if (is_simple_map_transition) {
4571     Register new_map_reg = ToRegister(instr->new_map_temp());
4572     __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4573            Immediate(to_map));
4574     // Write barrier.
4575     DCHECK_NOT_NULL(instr->temp());
4576     __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4577                          ToRegister(instr->temp()), kDontSaveFPRegs);
4578   } else {
4579     DCHECK(ToRegister(instr->context()).is(esi));
4580     DCHECK(object_reg.is(eax));
4581     PushSafepointRegistersScope scope(this);
4582     __ mov(ebx, to_map);
4583     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4584     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4585     __ CallStub(&stub);
4586     RecordSafepointWithLazyDeopt(instr,
4587         RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4588   }
4589   __ bind(&not_applicable);
4590 }
4591 
4592 
DoStringCharCodeAt(LStringCharCodeAt * instr)4593 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4594   class DeferredStringCharCodeAt final : public LDeferredCode {
4595    public:
4596     DeferredStringCharCodeAt(LCodeGen* codegen,
4597                              LStringCharCodeAt* instr,
4598                              const X87Stack& x87_stack)
4599         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4600     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4601     LInstruction* instr() override { return instr_; }
4602 
4603    private:
4604     LStringCharCodeAt* instr_;
4605   };
4606 
4607   DeferredStringCharCodeAt* deferred =
4608       new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
4609 
4610   StringCharLoadGenerator::Generate(masm(),
4611                                     factory(),
4612                                     ToRegister(instr->string()),
4613                                     ToRegister(instr->index()),
4614                                     ToRegister(instr->result()),
4615                                     deferred->entry());
4616   __ bind(deferred->exit());
4617 }
4618 
4619 
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)4620 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4621   Register string = ToRegister(instr->string());
4622   Register result = ToRegister(instr->result());
4623 
4624   // TODO(3095996): Get rid of this. For now, we need to make the
4625   // result register contain a valid pointer because it is already
4626   // contained in the register pointer map.
4627   __ Move(result, Immediate(0));
4628 
4629   PushSafepointRegistersScope scope(this);
4630   __ push(string);
4631   // Push the index as a smi. This is safe because of the checks in
4632   // DoStringCharCodeAt above.
4633   STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4634   if (instr->index()->IsConstantOperand()) {
4635     Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4636                                       Representation::Smi());
4637     __ push(immediate);
4638   } else {
4639     Register index = ToRegister(instr->index());
4640     __ SmiTag(index);
4641     __ push(index);
4642   }
4643   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
4644                           instr, instr->context());
4645   __ AssertSmi(eax);
4646   __ SmiUntag(eax);
4647   __ StoreToSafepointRegisterSlot(result, eax);
4648 }
4649 
4650 
DoStringCharFromCode(LStringCharFromCode * instr)4651 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4652   class DeferredStringCharFromCode final : public LDeferredCode {
4653    public:
4654     DeferredStringCharFromCode(LCodeGen* codegen,
4655                                LStringCharFromCode* instr,
4656                                const X87Stack& x87_stack)
4657         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4658     void Generate() override {
4659       codegen()->DoDeferredStringCharFromCode(instr_);
4660     }
4661     LInstruction* instr() override { return instr_; }
4662 
4663    private:
4664     LStringCharFromCode* instr_;
4665   };
4666 
4667   DeferredStringCharFromCode* deferred =
4668       new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
4669 
4670   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4671   Register char_code = ToRegister(instr->char_code());
4672   Register result = ToRegister(instr->result());
4673   DCHECK(!char_code.is(result));
4674 
4675   __ cmp(char_code, String::kMaxOneByteCharCode);
4676   __ j(above, deferred->entry());
4677   __ Move(result, Immediate(factory()->single_character_string_cache()));
4678   __ mov(result, FieldOperand(result,
4679                               char_code, times_pointer_size,
4680                               FixedArray::kHeaderSize));
4681   __ cmp(result, factory()->undefined_value());
4682   __ j(equal, deferred->entry());
4683   __ bind(deferred->exit());
4684 }
4685 
4686 
DoDeferredStringCharFromCode(LStringCharFromCode * instr)4687 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4688   Register char_code = ToRegister(instr->char_code());
4689   Register result = ToRegister(instr->result());
4690 
4691   // TODO(3095996): Get rid of this. For now, we need to make the
4692   // result register contain a valid pointer because it is already
4693   // contained in the register pointer map.
4694   __ Move(result, Immediate(0));
4695 
4696   PushSafepointRegistersScope scope(this);
4697   __ SmiTag(char_code);
4698   __ push(char_code);
4699   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4700                           instr->context());
4701   __ StoreToSafepointRegisterSlot(result, eax);
4702 }
4703 
4704 
DoStringAdd(LStringAdd * instr)4705 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4706   DCHECK(ToRegister(instr->context()).is(esi));
4707   DCHECK(ToRegister(instr->left()).is(edx));
4708   DCHECK(ToRegister(instr->right()).is(eax));
4709   StringAddStub stub(isolate(),
4710                      instr->hydrogen()->flags(),
4711                      instr->hydrogen()->pretenure_flag());
4712   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4713 }
4714 
4715 
DoInteger32ToDouble(LInteger32ToDouble * instr)4716 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4717   LOperand* input = instr->value();
4718   LOperand* output = instr->result();
4719   DCHECK(input->IsRegister() || input->IsStackSlot());
4720   DCHECK(output->IsDoubleRegister());
4721   if (input->IsRegister()) {
4722     Register input_reg = ToRegister(input);
4723     __ push(input_reg);
4724     X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
4725     __ pop(input_reg);
4726   } else {
4727     X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
4728   }
4729 }
4730 
4731 
DoUint32ToDouble(LUint32ToDouble * instr)4732 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4733   LOperand* input = instr->value();
4734   LOperand* output = instr->result();
4735   X87Register res = ToX87Register(output);
4736   X87PrepareToWrite(res);
4737   __ LoadUint32NoSSE2(ToRegister(input));
4738   X87CommitWrite(res);
4739 }
4740 
4741 
DoNumberTagI(LNumberTagI * instr)4742 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4743   class DeferredNumberTagI final : public LDeferredCode {
4744    public:
4745     DeferredNumberTagI(LCodeGen* codegen,
4746                        LNumberTagI* instr,
4747                        const X87Stack& x87_stack)
4748         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4749     void Generate() override {
4750       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4751                                        SIGNED_INT32);
4752     }
4753     LInstruction* instr() override { return instr_; }
4754 
4755    private:
4756     LNumberTagI* instr_;
4757   };
4758 
4759   LOperand* input = instr->value();
4760   DCHECK(input->IsRegister() && input->Equals(instr->result()));
4761   Register reg = ToRegister(input);
4762 
4763   DeferredNumberTagI* deferred =
4764       new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
4765   __ SmiTag(reg);
4766   __ j(overflow, deferred->entry());
4767   __ bind(deferred->exit());
4768 }
4769 
4770 
DoNumberTagU(LNumberTagU * instr)4771 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4772   class DeferredNumberTagU final : public LDeferredCode {
4773    public:
4774     DeferredNumberTagU(LCodeGen* codegen,
4775                        LNumberTagU* instr,
4776                        const X87Stack& x87_stack)
4777         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4778     void Generate() override {
4779       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4780                                        UNSIGNED_INT32);
4781     }
4782     LInstruction* instr() override { return instr_; }
4783 
4784    private:
4785     LNumberTagU* instr_;
4786   };
4787 
4788   LOperand* input = instr->value();
4789   DCHECK(input->IsRegister() && input->Equals(instr->result()));
4790   Register reg = ToRegister(input);
4791 
4792   DeferredNumberTagU* deferred =
4793       new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
4794   __ cmp(reg, Immediate(Smi::kMaxValue));
4795   __ j(above, deferred->entry());
4796   __ SmiTag(reg);
4797   __ bind(deferred->exit());
4798 }
4799 
4800 
DoDeferredNumberTagIU(LInstruction * instr,LOperand * value,LOperand * temp,IntegerSignedness signedness)4801 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4802                                      LOperand* value,
4803                                      LOperand* temp,
4804                                      IntegerSignedness signedness) {
4805   Label done, slow;
4806   Register reg = ToRegister(value);
4807   Register tmp = ToRegister(temp);
4808 
4809   if (signedness == SIGNED_INT32) {
4810     // There was overflow, so bits 30 and 31 of the original integer
4811     // disagree. Try to allocate a heap number in new space and store
4812     // the value in there. If that fails, call the runtime system.
4813     __ SmiUntag(reg);
4814     __ xor_(reg, 0x80000000);
4815     __ push(reg);
4816     __ fild_s(Operand(esp, 0));
4817     __ pop(reg);
4818   } else {
4819     // There's no fild variant for unsigned values, so zero-extend to a 64-bit
4820     // int manually.
4821     __ push(Immediate(0));
4822     __ push(reg);
4823     __ fild_d(Operand(esp, 0));
4824     __ pop(reg);
4825     __ pop(reg);
4826   }
4827 
4828   if (FLAG_inline_new) {
4829     __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
4830     __ jmp(&done, Label::kNear);
4831   }
4832 
4833   // Slow case: Call the runtime system to do the number allocation.
4834   __ bind(&slow);
4835   {
4836     // TODO(3095996): Put a valid pointer value in the stack slot where the
4837     // result register is stored, as this register is in the pointer map, but
4838     // contains an integer value.
4839     __ Move(reg, Immediate(0));
4840 
4841     // Preserve the value of all registers.
4842     PushSafepointRegistersScope scope(this);
4843 
4844     // NumberTagI and NumberTagD use the context from the frame, rather than
4845     // the environment's HContext or HInlinedContext value.
4846     // They only call Runtime::kAllocateHeapNumber.
4847     // The corresponding HChange instructions are added in a phase that does
4848     // not have easy access to the local context.
4849     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4850     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4851     RecordSafepointWithRegisters(
4852         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4853     __ StoreToSafepointRegisterSlot(reg, eax);
4854   }
4855 
4856   __ bind(&done);
4857   __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4858 }
4859 
4860 
DoNumberTagD(LNumberTagD * instr)4861 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4862   class DeferredNumberTagD final : public LDeferredCode {
4863    public:
4864     DeferredNumberTagD(LCodeGen* codegen,
4865                        LNumberTagD* instr,
4866                        const X87Stack& x87_stack)
4867         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4868     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4869     LInstruction* instr() override { return instr_; }
4870 
4871    private:
4872     LNumberTagD* instr_;
4873   };
4874 
4875   Register reg = ToRegister(instr->result());
4876 
4877   // Put the value to the top of stack
4878   X87Register src = ToX87Register(instr->value());
4879   // Don't use X87LoadForUsage here, which is only used by Instruction which
4880   // clobbers fp registers.
4881   x87_stack_.Fxch(src);
4882 
4883   DeferredNumberTagD* deferred =
4884       new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
4885   if (FLAG_inline_new) {
4886     Register tmp = ToRegister(instr->temp());
4887     __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4888   } else {
4889     __ jmp(deferred->entry());
4890   }
4891   __ bind(deferred->exit());
4892   __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
4893 }
4894 
4895 
DoDeferredNumberTagD(LNumberTagD * instr)4896 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4897   // TODO(3095996): Get rid of this. For now, we need to make the
4898   // result register contain a valid pointer because it is already
4899   // contained in the register pointer map.
4900   Register reg = ToRegister(instr->result());
4901   __ Move(reg, Immediate(0));
4902 
4903   PushSafepointRegistersScope scope(this);
4904   // NumberTagI and NumberTagD use the context from the frame, rather than
4905   // the environment's HContext or HInlinedContext value.
4906   // They only call Runtime::kAllocateHeapNumber.
4907   // The corresponding HChange instructions are added in a phase that does
4908   // not have easy access to the local context.
4909   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4910   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4911   RecordSafepointWithRegisters(
4912       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4913   __ StoreToSafepointRegisterSlot(reg, eax);
4914 }
4915 
4916 
DoSmiTag(LSmiTag * instr)4917 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4918   HChange* hchange = instr->hydrogen();
4919   Register input = ToRegister(instr->value());
4920   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4921       hchange->value()->CheckFlag(HValue::kUint32)) {
4922     __ test(input, Immediate(0xc0000000));
4923     DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
4924   }
4925   __ SmiTag(input);
4926   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4927       !hchange->value()->CheckFlag(HValue::kUint32)) {
4928     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
4929   }
4930 }
4931 
4932 
DoSmiUntag(LSmiUntag * instr)4933 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4934   LOperand* input = instr->value();
4935   Register result = ToRegister(input);
4936   DCHECK(input->IsRegister() && input->Equals(instr->result()));
4937   if (instr->needs_check()) {
4938     __ test(result, Immediate(kSmiTagMask));
4939     DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
4940   } else {
4941     __ AssertSmi(result);
4942   }
4943   __ SmiUntag(result);
4944 }
4945 
4946 
EmitNumberUntagDNoSSE2(LNumberUntagD * instr,Register input_reg,Register temp_reg,X87Register res_reg,NumberUntagDMode mode)4947 void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
4948                                       Register temp_reg, X87Register res_reg,
4949                                       NumberUntagDMode mode) {
4950   bool can_convert_undefined_to_nan =
4951       instr->hydrogen()->can_convert_undefined_to_nan();
4952   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4953 
4954   Label load_smi, done;
4955 
4956   X87PrepareToWrite(res_reg);
4957   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4958     // Smi check.
4959     __ JumpIfSmi(input_reg, &load_smi);
4960 
4961     // Heap number map check.
4962     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4963            factory()->heap_number_map());
4964     if (!can_convert_undefined_to_nan) {
4965       DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
4966     } else {
4967       Label heap_number, convert;
4968       __ j(equal, &heap_number);
4969 
4970       // Convert undefined (or hole) to NaN.
4971       __ cmp(input_reg, factory()->undefined_value());
4972       DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
4973 
4974       __ bind(&convert);
4975       __ push(Immediate(0xffffffff));
4976       __ push(Immediate(0x7fffffff));
4977       __ fld_d(MemOperand(esp, 0));
4978       __ lea(esp, Operand(esp, kDoubleSize));
4979       __ jmp(&done, Label::kNear);
4980 
4981       __ bind(&heap_number);
4982     }
4983     // Heap number to x87 conversion.
4984     __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4985     if (deoptimize_on_minus_zero) {
4986       __ fldz();
4987       __ FCmp();
4988       __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4989       __ j(not_zero, &done, Label::kNear);
4990 
4991       // Use general purpose registers to check if we have -0.0
4992       __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
4993       __ test(temp_reg, Immediate(HeapNumber::kSignMask));
4994       __ j(zero, &done, Label::kNear);
4995 
4996       // Pop FPU stack before deoptimizing.
4997       __ fstp(0);
4998       DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
4999     }
5000     __ jmp(&done, Label::kNear);
5001   } else {
5002     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
5003   }
5004 
5005   __ bind(&load_smi);
5006   // Clobbering a temp is faster than re-tagging the
5007   // input register since we avoid dependencies.
5008   __ mov(temp_reg, input_reg);
5009   __ SmiUntag(temp_reg);  // Untag smi before converting to float.
5010   __ push(temp_reg);
5011   __ fild_s(Operand(esp, 0));
5012   __ add(esp, Immediate(kPointerSize));
5013   __ bind(&done);
5014   X87CommitWrite(res_reg);
5015 }
5016 
5017 
DoDeferredTaggedToI(LTaggedToI * instr,Label * done)5018 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5019   Register input_reg = ToRegister(instr->value());
5020 
5021   // The input was optimistically untagged; revert it.
5022   STATIC_ASSERT(kSmiTagSize == 1);
5023   __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
5024 
5025   if (instr->truncating()) {
5026     Label no_heap_number, check_bools, check_false;
5027 
5028     // Heap number map check.
5029     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5030            factory()->heap_number_map());
5031     __ j(not_equal, &no_heap_number, Label::kNear);
5032     __ TruncateHeapNumberToI(input_reg, input_reg);
5033     __ jmp(done);
5034 
5035     __ bind(&no_heap_number);
5036     // Check for Oddballs. Undefined/False is converted to zero and True to one
5037     // for truncating conversions.
5038     __ cmp(input_reg, factory()->undefined_value());
5039     __ j(not_equal, &check_bools, Label::kNear);
5040     __ Move(input_reg, Immediate(0));
5041     __ jmp(done);
5042 
5043     __ bind(&check_bools);
5044     __ cmp(input_reg, factory()->true_value());
5045     __ j(not_equal, &check_false, Label::kNear);
5046     __ Move(input_reg, Immediate(1));
5047     __ jmp(done);
5048 
5049     __ bind(&check_false);
5050     __ cmp(input_reg, factory()->false_value());
5051     DeoptimizeIf(not_equal, instr,
5052                  Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5053     __ Move(input_reg, Immediate(0));
5054   } else {
5055     // TODO(olivf) Converting a number on the fpu is actually quite slow. We
5056     // should first try a fast conversion and then bailout to this slow case.
5057     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5058            isolate()->factory()->heap_number_map());
5059     DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
5060 
5061     __ sub(esp, Immediate(kPointerSize));
5062     __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5063 
5064     if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
5065       Label no_precision_lost, not_nan, zero_check;
5066       __ fld(0);
5067 
5068       __ fist_s(MemOperand(esp, 0));
5069       __ fild_s(MemOperand(esp, 0));
5070       __ FCmp();
5071       __ pop(input_reg);
5072 
5073       __ j(equal, &no_precision_lost, Label::kNear);
5074       __ fstp(0);
5075       DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5076       __ bind(&no_precision_lost);
5077 
5078       __ j(parity_odd, &not_nan);
5079       __ fstp(0);
5080       DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5081       __ bind(&not_nan);
5082 
5083       __ test(input_reg, Operand(input_reg));
5084       __ j(zero, &zero_check, Label::kNear);
5085       __ fstp(0);
5086       __ jmp(done);
5087 
5088       __ bind(&zero_check);
5089       // To check for minus zero, we load the value again as float, and check
5090       // if that is still 0.
5091       __ sub(esp, Immediate(kPointerSize));
5092       __ fstp_s(Operand(esp, 0));
5093       __ pop(input_reg);
5094       __ test(input_reg, Operand(input_reg));
5095       DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
5096     } else {
5097       __ fist_s(MemOperand(esp, 0));
5098       __ fild_s(MemOperand(esp, 0));
5099       __ FCmp();
5100       __ pop(input_reg);
5101       DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
5102       DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
5103     }
5104   }
5105 }
5106 
5107 
DoTaggedToI(LTaggedToI * instr)5108 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5109   class DeferredTaggedToI final : public LDeferredCode {
5110    public:
5111     DeferredTaggedToI(LCodeGen* codegen,
5112                       LTaggedToI* instr,
5113                       const X87Stack& x87_stack)
5114         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5115     void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
5116     LInstruction* instr() override { return instr_; }
5117 
5118    private:
5119     LTaggedToI* instr_;
5120   };
5121 
5122   LOperand* input = instr->value();
5123   DCHECK(input->IsRegister());
5124   Register input_reg = ToRegister(input);
5125   DCHECK(input_reg.is(ToRegister(instr->result())));
5126 
5127   if (instr->hydrogen()->value()->representation().IsSmi()) {
5128     __ SmiUntag(input_reg);
5129   } else {
5130     DeferredTaggedToI* deferred =
5131         new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5132     // Optimistically untag the input.
5133     // If the input is a HeapObject, SmiUntag will set the carry flag.
5134     STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
5135     __ SmiUntag(input_reg);
5136     // Branch to deferred code if the input was tagged.
5137     // The deferred code will take care of restoring the tag.
5138     __ j(carry, deferred->entry());
5139     __ bind(deferred->exit());
5140   }
5141 }
5142 
5143 
DoNumberUntagD(LNumberUntagD * instr)5144 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5145   LOperand* input = instr->value();
5146   DCHECK(input->IsRegister());
5147   LOperand* temp = instr->temp();
5148   DCHECK(temp->IsRegister());
5149   LOperand* result = instr->result();
5150   DCHECK(result->IsDoubleRegister());
5151 
5152   Register input_reg = ToRegister(input);
5153   Register temp_reg = ToRegister(temp);
5154 
5155   HValue* value = instr->hydrogen()->value();
5156   NumberUntagDMode mode = value->representation().IsSmi()
5157       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5158 
5159   EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result),
5160                          mode);
5161 }
5162 
5163 
DoDoubleToI(LDoubleToI * instr)5164 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5165   LOperand* input = instr->value();
5166   DCHECK(input->IsDoubleRegister());
5167   LOperand* result = instr->result();
5168   DCHECK(result->IsRegister());
5169   Register result_reg = ToRegister(result);
5170 
5171   if (instr->truncating()) {
5172     X87Register input_reg = ToX87Register(input);
5173     X87Fxch(input_reg);
5174     __ TruncateX87TOSToI(result_reg);
5175   } else {
5176     Label lost_precision, is_nan, minus_zero, done;
5177     X87Register input_reg = ToX87Register(input);
5178     X87Fxch(input_reg);
5179     __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5180                  &lost_precision, &is_nan, &minus_zero);
5181     __ jmp(&done);
5182     __ bind(&lost_precision);
5183     DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5184     __ bind(&is_nan);
5185     DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5186     __ bind(&minus_zero);
5187     DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
5188     __ bind(&done);
5189   }
5190 }
5191 
5192 
DoDoubleToSmi(LDoubleToSmi * instr)5193 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5194   LOperand* input = instr->value();
5195   DCHECK(input->IsDoubleRegister());
5196   LOperand* result = instr->result();
5197   DCHECK(result->IsRegister());
5198   Register result_reg = ToRegister(result);
5199 
5200   Label lost_precision, is_nan, minus_zero, done;
5201   X87Register input_reg = ToX87Register(input);
5202   X87Fxch(input_reg);
5203   __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5204                &lost_precision, &is_nan, &minus_zero);
5205   __ jmp(&done);
5206   __ bind(&lost_precision);
5207   DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5208   __ bind(&is_nan);
5209   DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5210   __ bind(&minus_zero);
5211   DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
5212   __ bind(&done);
5213   __ SmiTag(result_reg);
5214   DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
5215 }
5216 
5217 
DoCheckSmi(LCheckSmi * instr)5218 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5219   LOperand* input = instr->value();
5220   __ test(ToOperand(input), Immediate(kSmiTagMask));
5221   DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
5222 }
5223 
5224 
DoCheckNonSmi(LCheckNonSmi * instr)5225 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5226   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5227     LOperand* input = instr->value();
5228     __ test(ToOperand(input), Immediate(kSmiTagMask));
5229     DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
5230   }
5231 }
5232 
5233 
DoCheckArrayBufferNotNeutered(LCheckArrayBufferNotNeutered * instr)5234 void LCodeGen::DoCheckArrayBufferNotNeutered(
5235     LCheckArrayBufferNotNeutered* instr) {
5236   Register view = ToRegister(instr->view());
5237   Register scratch = ToRegister(instr->scratch());
5238 
5239   __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
5240   __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
5241             1 << JSArrayBuffer::WasNeutered::kShift);
5242   DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
5243 }
5244 
5245 
DoCheckInstanceType(LCheckInstanceType * instr)5246 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5247   Register input = ToRegister(instr->value());
5248   Register temp = ToRegister(instr->temp());
5249 
5250   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
5251 
5252   if (instr->hydrogen()->is_interval_check()) {
5253     InstanceType first;
5254     InstanceType last;
5255     instr->hydrogen()->GetCheckInterval(&first, &last);
5256 
5257     __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5258             static_cast<int8_t>(first));
5259 
5260     // If there is only one type in the interval check for equality.
5261     if (first == last) {
5262       DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5263     } else {
5264       DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
5265       // Omit check for the last type.
5266       if (last != LAST_TYPE) {
5267         __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5268                 static_cast<int8_t>(last));
5269         DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
5270       }
5271     }
5272   } else {
5273     uint8_t mask;
5274     uint8_t tag;
5275     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5276 
5277     if (base::bits::IsPowerOfTwo32(mask)) {
5278       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5279       __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
5280       DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
5281                    Deoptimizer::kWrongInstanceType);
5282     } else {
5283       __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5284       __ and_(temp, mask);
5285       __ cmp(temp, tag);
5286       DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5287     }
5288   }
5289 }
5290 
5291 
DoCheckValue(LCheckValue * instr)5292 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5293   Handle<HeapObject> object = instr->hydrogen()->object().handle();
5294   if (instr->hydrogen()->object_in_new_space()) {
5295     Register reg = ToRegister(instr->value());
5296     Handle<Cell> cell = isolate()->factory()->NewCell(object);
5297     __ cmp(reg, Operand::ForCell(cell));
5298   } else {
5299     Operand operand = ToOperand(instr->value());
5300     __ cmp(operand, object);
5301   }
5302   DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
5303 }
5304 
5305 
DoDeferredInstanceMigration(LCheckMaps * instr,Register object)5306 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5307   {
5308     PushSafepointRegistersScope scope(this);
5309     __ push(object);
5310     __ xor_(esi, esi);
5311     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5312     RecordSafepointWithRegisters(
5313         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5314 
5315     __ test(eax, Immediate(kSmiTagMask));
5316   }
5317   DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
5318 }
5319 
5320 
DoCheckMaps(LCheckMaps * instr)5321 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5322   class DeferredCheckMaps final : public LDeferredCode {
5323    public:
5324     DeferredCheckMaps(LCodeGen* codegen,
5325                       LCheckMaps* instr,
5326                       Register object,
5327                       const X87Stack& x87_stack)
5328         : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
5329       SetExit(check_maps());
5330     }
5331     void Generate() override {
5332       codegen()->DoDeferredInstanceMigration(instr_, object_);
5333     }
5334     Label* check_maps() { return &check_maps_; }
5335     LInstruction* instr() override { return instr_; }
5336 
5337    private:
5338     LCheckMaps* instr_;
5339     Label check_maps_;
5340     Register object_;
5341   };
5342 
5343   if (instr->hydrogen()->IsStabilityCheck()) {
5344     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5345     for (int i = 0; i < maps->size(); ++i) {
5346       AddStabilityDependency(maps->at(i).handle());
5347     }
5348     return;
5349   }
5350 
5351   LOperand* input = instr->value();
5352   DCHECK(input->IsRegister());
5353   Register reg = ToRegister(input);
5354 
5355   DeferredCheckMaps* deferred = NULL;
5356   if (instr->hydrogen()->HasMigrationTarget()) {
5357     deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
5358     __ bind(deferred->check_maps());
5359   }
5360 
5361   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5362   Label success;
5363   for (int i = 0; i < maps->size() - 1; i++) {
5364     Handle<Map> map = maps->at(i).handle();
5365     __ CompareMap(reg, map);
5366     __ j(equal, &success, Label::kNear);
5367   }
5368 
5369   Handle<Map> map = maps->at(maps->size() - 1).handle();
5370   __ CompareMap(reg, map);
5371   if (instr->hydrogen()->HasMigrationTarget()) {
5372     __ j(not_equal, deferred->entry());
5373   } else {
5374     DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5375   }
5376 
5377   __ bind(&success);
5378 }
5379 
5380 
DoClampDToUint8(LClampDToUint8 * instr)5381 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5382   X87Register value_reg = ToX87Register(instr->unclamped());
5383   Register result_reg = ToRegister(instr->result());
5384   X87Fxch(value_reg);
5385   __ ClampTOSToUint8(result_reg);
5386 }
5387 
5388 
DoClampIToUint8(LClampIToUint8 * instr)5389 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5390   DCHECK(instr->unclamped()->Equals(instr->result()));
5391   Register value_reg = ToRegister(instr->result());
5392   __ ClampUint8(value_reg);
5393 }
5394 
5395 
DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2 * instr)5396 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5397   Register input_reg = ToRegister(instr->unclamped());
5398   Register result_reg = ToRegister(instr->result());
5399   Register scratch = ToRegister(instr->scratch());
5400   Register scratch2 = ToRegister(instr->scratch2());
5401   Register scratch3 = ToRegister(instr->scratch3());
5402   Label is_smi, done, heap_number, valid_exponent,
5403       largest_value, zero_result, maybe_nan_or_infinity;
5404 
5405   __ JumpIfSmi(input_reg, &is_smi);
5406 
5407   // Check for heap number
5408   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5409          factory()->heap_number_map());
5410   __ j(equal, &heap_number, Label::kNear);
5411 
5412   // Check for undefined. Undefined is converted to zero for clamping
5413   // conversions.
5414   __ cmp(input_reg, factory()->undefined_value());
5415   DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
5416   __ jmp(&zero_result, Label::kNear);
5417 
5418   // Heap number
5419   __ bind(&heap_number);
5420 
5421   // Surprisingly, all of the hand-crafted bit-manipulations below are much
5422   // faster than the x86 FPU built-in instruction, especially since "banker's
5423   // rounding" would be additionally very expensive
5424 
5425   // Get exponent word.
5426   __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5427   __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5428 
5429   // Test for negative values --> clamp to zero
5430   __ test(scratch, scratch);
5431   __ j(negative, &zero_result, Label::kNear);
5432 
5433   // Get exponent alone in scratch2.
5434   __ mov(scratch2, scratch);
5435   __ and_(scratch2, HeapNumber::kExponentMask);
5436   __ shr(scratch2, HeapNumber::kExponentShift);
5437   __ j(zero, &zero_result, Label::kNear);
5438   __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5439   __ j(negative, &zero_result, Label::kNear);
5440 
5441   const uint32_t non_int8_exponent = 7;
5442   __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5443   // If the exponent is too big, check for special values.
5444   __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5445 
5446   __ bind(&valid_exponent);
5447   // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5448   // < 7. The shift bias is the number of bits to shift the mantissa such that
5449   // with an exponent of 7 such the that top-most one is in bit 30, allowing
5450   // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5451   // 1).
5452   int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5453   __ lea(result_reg, MemOperand(scratch2, shift_bias));
5454   // Here result_reg (ecx) is the shift, scratch is the exponent word.  Get the
5455   // top bits of the mantissa.
5456   __ and_(scratch, HeapNumber::kMantissaMask);
5457   // Put back the implicit 1 of the mantissa
5458   __ or_(scratch, 1 << HeapNumber::kExponentShift);
5459   // Shift up to round
5460   __ shl_cl(scratch);
5461   // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5462   // use the bit in the "ones" place and add it to the "halves" place, which has
5463   // the effect of rounding to even.
5464   __ mov(scratch2, scratch);
5465   const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5466   const uint32_t one_bit_shift = one_half_bit_shift + 1;
5467   __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5468   __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5469   Label no_round;
5470   __ j(less, &no_round, Label::kNear);
5471   Label round_up;
5472   __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5473   __ j(greater, &round_up, Label::kNear);
5474   __ test(scratch3, scratch3);
5475   __ j(not_zero, &round_up, Label::kNear);
5476   __ mov(scratch2, scratch);
5477   __ and_(scratch2, Immediate(1 << one_bit_shift));
5478   __ shr(scratch2, 1);
5479   __ bind(&round_up);
5480   __ add(scratch, scratch2);
5481   __ j(overflow, &largest_value, Label::kNear);
5482   __ bind(&no_round);
5483   __ shr(scratch, 23);
5484   __ mov(result_reg, scratch);
5485   __ jmp(&done, Label::kNear);
5486 
5487   __ bind(&maybe_nan_or_infinity);
5488   // Check for NaN/Infinity, all other values map to 255
5489   __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5490   __ j(not_equal, &largest_value, Label::kNear);
5491 
5492   // Check for NaN, which differs from Infinity in that at least one mantissa
5493   // bit is set.
5494   __ and_(scratch, HeapNumber::kMantissaMask);
5495   __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5496   __ j(not_zero, &zero_result, Label::kNear);  // M!=0 --> NaN
5497   // Infinity -> Fall through to map to 255.
5498 
5499   __ bind(&largest_value);
5500   __ mov(result_reg, Immediate(255));
5501   __ jmp(&done, Label::kNear);
5502 
5503   __ bind(&zero_result);
5504   __ xor_(result_reg, result_reg);
5505   __ jmp(&done, Label::kNear);
5506 
5507   // smi
5508   __ bind(&is_smi);
5509   if (!input_reg.is(result_reg)) {
5510     __ mov(result_reg, input_reg);
5511   }
5512   __ SmiUntag(result_reg);
5513   __ ClampUint8(result_reg);
5514   __ bind(&done);
5515 }
5516 
5517 
DoDoubleBits(LDoubleBits * instr)5518 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5519   X87Register value_reg = ToX87Register(instr->value());
5520   Register result_reg = ToRegister(instr->result());
5521   X87Fxch(value_reg);
5522   __ sub(esp, Immediate(kDoubleSize));
5523   __ fst_d(Operand(esp, 0));
5524   if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5525     __ mov(result_reg, Operand(esp, kPointerSize));
5526   } else {
5527     __ mov(result_reg, Operand(esp, 0));
5528   }
5529   __ add(esp, Immediate(kDoubleSize));
5530 }
5531 
5532 
DoConstructDouble(LConstructDouble * instr)5533 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5534   Register hi_reg = ToRegister(instr->hi());
5535   Register lo_reg = ToRegister(instr->lo());
5536   X87Register result_reg = ToX87Register(instr->result());
5537   // Follow below pattern to write a x87 fp register.
5538   X87PrepareToWrite(result_reg);
5539   __ sub(esp, Immediate(kDoubleSize));
5540   __ mov(Operand(esp, 0), lo_reg);
5541   __ mov(Operand(esp, kPointerSize), hi_reg);
5542   __ fld_d(Operand(esp, 0));
5543   __ add(esp, Immediate(kDoubleSize));
5544   X87CommitWrite(result_reg);
5545 }
5546 
5547 
DoAllocate(LAllocate * instr)5548 void LCodeGen::DoAllocate(LAllocate* instr) {
5549   class DeferredAllocate final : public LDeferredCode {
5550    public:
5551     DeferredAllocate(LCodeGen* codegen,
5552                      LAllocate* instr,
5553                      const X87Stack& x87_stack)
5554         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5555     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5556     LInstruction* instr() override { return instr_; }
5557 
5558    private:
5559     LAllocate* instr_;
5560   };
5561 
5562   DeferredAllocate* deferred =
5563       new(zone()) DeferredAllocate(this, instr, x87_stack_);
5564 
5565   Register result = ToRegister(instr->result());
5566   Register temp = ToRegister(instr->temp());
5567 
5568   // Allocate memory for the object.
5569   AllocationFlags flags = TAG_OBJECT;
5570   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5571     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5572   }
5573   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5574     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5575     flags = static_cast<AllocationFlags>(flags | PRETENURE);
5576   }
5577 
5578   if (instr->size()->IsConstantOperand()) {
5579     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5580     CHECK(size <= Page::kMaxRegularHeapObjectSize);
5581     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5582   } else {
5583     Register size = ToRegister(instr->size());
5584     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5585   }
5586 
5587   __ bind(deferred->exit());
5588 
5589   if (instr->hydrogen()->MustPrefillWithFiller()) {
5590     if (instr->size()->IsConstantOperand()) {
5591       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5592       __ mov(temp, (size / kPointerSize) - 1);
5593     } else {
5594       temp = ToRegister(instr->size());
5595       __ shr(temp, kPointerSizeLog2);
5596       __ dec(temp);
5597     }
5598     Label loop;
5599     __ bind(&loop);
5600     __ mov(FieldOperand(result, temp, times_pointer_size, 0),
5601         isolate()->factory()->one_pointer_filler_map());
5602     __ dec(temp);
5603     __ j(not_zero, &loop);
5604   }
5605 }
5606 
5607 
DoDeferredAllocate(LAllocate * instr)5608 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5609   Register result = ToRegister(instr->result());
5610 
5611   // TODO(3095996): Get rid of this. For now, we need to make the
5612   // result register contain a valid pointer because it is already
5613   // contained in the register pointer map.
5614   __ Move(result, Immediate(Smi::FromInt(0)));
5615 
5616   PushSafepointRegistersScope scope(this);
5617   if (instr->size()->IsRegister()) {
5618     Register size = ToRegister(instr->size());
5619     DCHECK(!size.is(result));
5620     __ SmiTag(ToRegister(instr->size()));
5621     __ push(size);
5622   } else {
5623     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5624     if (size >= 0 && size <= Smi::kMaxValue) {
5625       __ push(Immediate(Smi::FromInt(size)));
5626     } else {
5627       // We should never get here at runtime => abort
5628       __ int3();
5629       return;
5630     }
5631   }
5632 
5633   int flags = AllocateDoubleAlignFlag::encode(
5634       instr->hydrogen()->MustAllocateDoubleAligned());
5635   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5636     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5637     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5638   } else {
5639     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5640   }
5641   __ push(Immediate(Smi::FromInt(flags)));
5642 
5643   CallRuntimeFromDeferred(
5644       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5645   __ StoreToSafepointRegisterSlot(result, eax);
5646 }
5647 
5648 
DoToFastProperties(LToFastProperties * instr)5649 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5650   DCHECK(ToRegister(instr->value()).is(eax));
5651   __ push(eax);
5652   CallRuntime(Runtime::kToFastProperties, 1, instr);
5653 }
5654 
5655 
DoTypeof(LTypeof * instr)5656 void LCodeGen::DoTypeof(LTypeof* instr) {
5657   DCHECK(ToRegister(instr->context()).is(esi));
5658   DCHECK(ToRegister(instr->value()).is(ebx));
5659   Label end, do_call;
5660   Register value_register = ToRegister(instr->value());
5661   __ JumpIfNotSmi(value_register, &do_call);
5662   __ mov(eax, Immediate(isolate()->factory()->number_string()));
5663   __ jmp(&end);
5664   __ bind(&do_call);
5665   TypeofStub stub(isolate());
5666   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5667   __ bind(&end);
5668 }
5669 
5670 
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)5671 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5672   Register input = ToRegister(instr->value());
5673   Condition final_branch_condition = EmitTypeofIs(instr, input);
5674   if (final_branch_condition != no_condition) {
5675     EmitBranch(instr, final_branch_condition);
5676   }
5677 }
5678 
5679 
EmitTypeofIs(LTypeofIsAndBranch * instr,Register input)5680 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5681   Label* true_label = instr->TrueLabel(chunk_);
5682   Label* false_label = instr->FalseLabel(chunk_);
5683   Handle<String> type_name = instr->type_literal();
5684   int left_block = instr->TrueDestination(chunk_);
5685   int right_block = instr->FalseDestination(chunk_);
5686   int next_block = GetNextEmittedBlock();
5687 
5688   Label::Distance true_distance = left_block == next_block ? Label::kNear
5689                                                            : Label::kFar;
5690   Label::Distance false_distance = right_block == next_block ? Label::kNear
5691                                                              : Label::kFar;
5692   Condition final_branch_condition = no_condition;
5693   if (String::Equals(type_name, factory()->number_string())) {
5694     __ JumpIfSmi(input, true_label, true_distance);
5695     __ cmp(FieldOperand(input, HeapObject::kMapOffset),
5696            factory()->heap_number_map());
5697     final_branch_condition = equal;
5698 
5699   } else if (String::Equals(type_name, factory()->string_string())) {
5700     __ JumpIfSmi(input, false_label, false_distance);
5701     __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5702     final_branch_condition = below;
5703 
5704   } else if (String::Equals(type_name, factory()->symbol_string())) {
5705     __ JumpIfSmi(input, false_label, false_distance);
5706     __ CmpObjectType(input, SYMBOL_TYPE, input);
5707     final_branch_condition = equal;
5708 
5709   } else if (String::Equals(type_name, factory()->boolean_string())) {
5710     __ cmp(input, factory()->true_value());
5711     __ j(equal, true_label, true_distance);
5712     __ cmp(input, factory()->false_value());
5713     final_branch_condition = equal;
5714 
5715   } else if (String::Equals(type_name, factory()->undefined_string())) {
5716     __ cmp(input, factory()->undefined_value());
5717     __ j(equal, true_label, true_distance);
5718     __ JumpIfSmi(input, false_label, false_distance);
5719     // Check for undetectable objects => true.
5720     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5721     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5722               1 << Map::kIsUndetectable);
5723     final_branch_condition = not_zero;
5724 
5725   } else if (String::Equals(type_name, factory()->function_string())) {
5726     __ JumpIfSmi(input, false_label, false_distance);
5727     // Check for callable and not undetectable objects => true.
5728     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5729     __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
5730     __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
5731     __ cmp(input, 1 << Map::kIsCallable);
5732     final_branch_condition = equal;
5733 
5734   } else if (String::Equals(type_name, factory()->object_string())) {
5735     __ JumpIfSmi(input, false_label, false_distance);
5736     __ cmp(input, factory()->null_value());
5737     __ j(equal, true_label, true_distance);
5738     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5739     __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
5740     __ j(below, false_label, false_distance);
5741     // Check for callable or undetectable objects => false.
5742     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5743               (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
5744     final_branch_condition = zero;
5745 
5746 // clang-format off
5747 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)         \
5748   } else if (String::Equals(type_name, factory()->type##_string())) { \
5749     __ JumpIfSmi(input, false_label, false_distance);                 \
5750     __ cmp(FieldOperand(input, HeapObject::kMapOffset),               \
5751            factory()->type##_map());                                  \
5752     final_branch_condition = equal;
5753   SIMD128_TYPES(SIMD128_TYPE)
5754 #undef SIMD128_TYPE
5755     // clang-format on
5756 
5757   } else {
5758     __ jmp(false_label, false_distance);
5759   }
5760   return final_branch_condition;
5761 }
5762 
5763 
EnsureSpaceForLazyDeopt(int space_needed)5764 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5765   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5766     // Ensure that we have enough space after the previous lazy-bailout
5767     // instruction for patching the code here.
5768     int current_pc = masm()->pc_offset();
5769     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5770       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5771       __ Nop(padding_size);
5772     }
5773   }
5774   last_lazy_deopt_pc_ = masm()->pc_offset();
5775 }
5776 
5777 
DoLazyBailout(LLazyBailout * instr)5778 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5779   last_lazy_deopt_pc_ = masm()->pc_offset();
5780   DCHECK(instr->HasEnvironment());
5781   LEnvironment* env = instr->environment();
5782   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5783   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5784 }
5785 
5786 
DoDeoptimize(LDeoptimize * instr)5787 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5788   Deoptimizer::BailoutType type = instr->hydrogen()->type();
5789   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5790   // needed return address), even though the implementation of LAZY and EAGER is
5791   // now identical. When LAZY is eventually completely folded into EAGER, remove
5792   // the special case below.
5793   if (info()->IsStub() && type == Deoptimizer::EAGER) {
5794     type = Deoptimizer::LAZY;
5795   }
5796   DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5797 }
5798 
5799 
DoDummy(LDummy * instr)5800 void LCodeGen::DoDummy(LDummy* instr) {
5801   // Nothing to see here, move on!
5802 }
5803 
5804 
DoDummyUse(LDummyUse * instr)5805 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5806   // Nothing to see here, move on!
5807 }
5808 
5809 
DoDeferredStackCheck(LStackCheck * instr)5810 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5811   PushSafepointRegistersScope scope(this);
5812   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5813   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5814   RecordSafepointWithLazyDeopt(
5815       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5816   DCHECK(instr->HasEnvironment());
5817   LEnvironment* env = instr->environment();
5818   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5819 }
5820 
5821 
DoStackCheck(LStackCheck * instr)5822 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5823   class DeferredStackCheck final : public LDeferredCode {
5824    public:
5825     DeferredStackCheck(LCodeGen* codegen,
5826                        LStackCheck* instr,
5827                        const X87Stack& x87_stack)
5828         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5829     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5830     LInstruction* instr() override { return instr_; }
5831 
5832    private:
5833     LStackCheck* instr_;
5834   };
5835 
5836   DCHECK(instr->HasEnvironment());
5837   LEnvironment* env = instr->environment();
5838   // There is no LLazyBailout instruction for stack-checks. We have to
5839   // prepare for lazy deoptimization explicitly here.
5840   if (instr->hydrogen()->is_function_entry()) {
5841     // Perform stack overflow check.
5842     Label done;
5843     ExternalReference stack_limit =
5844         ExternalReference::address_of_stack_limit(isolate());
5845     __ cmp(esp, Operand::StaticVariable(stack_limit));
5846     __ j(above_equal, &done, Label::kNear);
5847 
5848     DCHECK(instr->context()->IsRegister());
5849     DCHECK(ToRegister(instr->context()).is(esi));
5850     CallCode(isolate()->builtins()->StackCheck(),
5851              RelocInfo::CODE_TARGET,
5852              instr);
5853     __ bind(&done);
5854   } else {
5855     DCHECK(instr->hydrogen()->is_backwards_branch());
5856     // Perform stack overflow check if this goto needs it before jumping.
5857     DeferredStackCheck* deferred_stack_check =
5858         new(zone()) DeferredStackCheck(this, instr, x87_stack_);
5859     ExternalReference stack_limit =
5860         ExternalReference::address_of_stack_limit(isolate());
5861     __ cmp(esp, Operand::StaticVariable(stack_limit));
5862     __ j(below, deferred_stack_check->entry());
5863     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5864     __ bind(instr->done_label());
5865     deferred_stack_check->SetExit(instr->done_label());
5866     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5867     // Don't record a deoptimization index for the safepoint here.
5868     // This will be done explicitly when emitting call and the safepoint in
5869     // the deferred code.
5870   }
5871 }
5872 
5873 
DoOsrEntry(LOsrEntry * instr)5874 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5875   // This is a pseudo-instruction that ensures that the environment here is
5876   // properly registered for deoptimization and records the assembler's PC
5877   // offset.
5878   LEnvironment* environment = instr->environment();
5879 
5880   // If the environment were already registered, we would have no way of
5881   // backpatching it with the spill slot operands.
5882   DCHECK(!environment->HasBeenRegistered());
5883   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5884 
5885   GenerateOsrPrologue();
5886 }
5887 
5888 
DoForInPrepareMap(LForInPrepareMap * instr)5889 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5890   DCHECK(ToRegister(instr->context()).is(esi));
5891   __ test(eax, Immediate(kSmiTagMask));
5892   DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
5893 
5894   STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
5895   __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
5896   DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
5897 
5898   Label use_cache, call_runtime;
5899   __ CheckEnumCache(&call_runtime);
5900 
5901   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
5902   __ jmp(&use_cache, Label::kNear);
5903 
5904   // Get the set of properties to enumerate.
5905   __ bind(&call_runtime);
5906   __ push(eax);
5907   CallRuntime(Runtime::kGetPropertyNamesFast, instr);
5908 
5909   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
5910          isolate()->factory()->meta_map());
5911   DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5912   __ bind(&use_cache);
5913 }
5914 
5915 
DoForInCacheArray(LForInCacheArray * instr)5916 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5917   Register map = ToRegister(instr->map());
5918   Register result = ToRegister(instr->result());
5919   Label load_cache, done;
5920   __ EnumLength(result, map);
5921   __ cmp(result, Immediate(Smi::FromInt(0)));
5922   __ j(not_equal, &load_cache, Label::kNear);
5923   __ mov(result, isolate()->factory()->empty_fixed_array());
5924   __ jmp(&done, Label::kNear);
5925 
5926   __ bind(&load_cache);
5927   __ LoadInstanceDescriptors(map, result);
5928   __ mov(result,
5929          FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5930   __ mov(result,
5931          FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5932   __ bind(&done);
5933   __ test(result, result);
5934   DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
5935 }
5936 
5937 
DoCheckMapValue(LCheckMapValue * instr)5938 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5939   Register object = ToRegister(instr->value());
5940   __ cmp(ToRegister(instr->map()),
5941          FieldOperand(object, HeapObject::kMapOffset));
5942   DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5943 }
5944 
5945 
DoDeferredLoadMutableDouble(LLoadFieldByIndex * instr,Register object,Register index)5946 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5947                                            Register object,
5948                                            Register index) {
5949   PushSafepointRegistersScope scope(this);
5950   __ push(object);
5951   __ push(index);
5952   __ xor_(esi, esi);
5953   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5954   RecordSafepointWithRegisters(
5955       instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5956   __ StoreToSafepointRegisterSlot(object, eax);
5957 }
5958 
5959 
DoLoadFieldByIndex(LLoadFieldByIndex * instr)5960 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5961   class DeferredLoadMutableDouble final : public LDeferredCode {
5962    public:
5963     DeferredLoadMutableDouble(LCodeGen* codegen,
5964                               LLoadFieldByIndex* instr,
5965                               Register object,
5966                               Register index,
5967                               const X87Stack& x87_stack)
5968         : LDeferredCode(codegen, x87_stack),
5969           instr_(instr),
5970           object_(object),
5971           index_(index) {
5972     }
5973     void Generate() override {
5974       codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5975     }
5976     LInstruction* instr() override { return instr_; }
5977 
5978    private:
5979     LLoadFieldByIndex* instr_;
5980     Register object_;
5981     Register index_;
5982   };
5983 
5984   Register object = ToRegister(instr->object());
5985   Register index = ToRegister(instr->index());
5986 
5987   DeferredLoadMutableDouble* deferred;
5988   deferred = new(zone()) DeferredLoadMutableDouble(
5989       this, instr, object, index, x87_stack_);
5990 
5991   Label out_of_object, done;
5992   __ test(index, Immediate(Smi::FromInt(1)));
5993   __ j(not_zero, deferred->entry());
5994 
5995   __ sar(index, 1);
5996 
5997   __ cmp(index, Immediate(0));
5998   __ j(less, &out_of_object, Label::kNear);
5999   __ mov(object, FieldOperand(object,
6000                               index,
6001                               times_half_pointer_size,
6002                               JSObject::kHeaderSize));
6003   __ jmp(&done, Label::kNear);
6004 
6005   __ bind(&out_of_object);
6006   __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
6007   __ neg(index);
6008   // Index is now equal to out of object property index plus 1.
6009   __ mov(object, FieldOperand(object,
6010                               index,
6011                               times_half_pointer_size,
6012                               FixedArray::kHeaderSize - kPointerSize));
6013   __ bind(deferred->exit());
6014   __ bind(&done);
6015 }
6016 
6017 
DoStoreFrameContext(LStoreFrameContext * instr)6018 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6019   Register context = ToRegister(instr->context());
6020   __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
6021 }
6022 
6023 
DoAllocateBlockContext(LAllocateBlockContext * instr)6024 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6025   Handle<ScopeInfo> scope_info = instr->scope_info();
6026   __ Push(scope_info);
6027   __ push(ToRegister(instr->function()));
6028   CallRuntime(Runtime::kPushBlockContext, instr);
6029   RecordSafepoint(Safepoint::kNoLazyDeopt);
6030 }
6031 
6032 
6033 #undef __
6034 
6035 }  // namespace internal
6036 }  // namespace v8
6037 
6038 #endif  // V8_TARGET_ARCH_X87
6039