1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #if V8_TARGET_ARCH_X87
8
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
13 #include "src/deoptimizer.h"
14 #include "src/hydrogen-osr.h"
15 #include "src/ic/ic.h"
16 #include "src/ic/stub-cache.h"
17 #include "src/x87/lithium-codegen-x87.h"
18
19 namespace v8 {
20 namespace internal {
21
22
23 // When invoking builtins, we need to record the safepoint in the middle of
24 // the invoke instruction sequence generated by the macro assembler.
25 class SafepointGenerator FINAL : public CallWrapper {
26 public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)27 SafepointGenerator(LCodeGen* codegen,
28 LPointerMap* pointers,
29 Safepoint::DeoptMode mode)
30 : codegen_(codegen),
31 pointers_(pointers),
32 deopt_mode_(mode) {}
~SafepointGenerator()33 virtual ~SafepointGenerator() {}
34
BeforeCall(int call_size) const35 virtual void BeforeCall(int call_size) const OVERRIDE {}
36
AfterCall() const37 virtual void AfterCall() const OVERRIDE {
38 codegen_->RecordSafepoint(pointers_, deopt_mode_);
39 }
40
41 private:
42 LCodeGen* codegen_;
43 LPointerMap* pointers_;
44 Safepoint::DeoptMode deopt_mode_;
45 };
46
47
48 #define __ masm()->
49
GenerateCode()50 bool LCodeGen::GenerateCode() {
51 LPhase phase("Z_Code generation", chunk());
52 DCHECK(is_unused());
53 status_ = GENERATING;
54
55 // Open a frame scope to indicate that there is a frame on the stack. The
56 // MANUAL indicates that the scope shouldn't actually generate code to set up
57 // the frame (that is done in GeneratePrologue).
58 FrameScope frame_scope(masm_, StackFrame::MANUAL);
59
60 support_aligned_spilled_doubles_ = info()->IsOptimizing();
61
62 dynamic_frame_alignment_ = info()->IsOptimizing() &&
63 ((chunk()->num_double_slots() > 2 &&
64 !chunk()->graph()->is_recursive()) ||
65 !info()->osr_ast_id().IsNone());
66
67 return GeneratePrologue() &&
68 GenerateBody() &&
69 GenerateDeferredCode() &&
70 GenerateJumpTable() &&
71 GenerateSafepointTable();
72 }
73
74
FinishCode(Handle<Code> code)75 void LCodeGen::FinishCode(Handle<Code> code) {
76 DCHECK(is_done());
77 code->set_stack_slots(GetStackSlotCount());
78 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
79 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
80 PopulateDeoptimizationData(code);
81 if (!info()->IsStub()) {
82 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
83 }
84 }
85
86
87 #ifdef _MSC_VER
MakeSureStackPagesMapped(int offset)88 void LCodeGen::MakeSureStackPagesMapped(int offset) {
89 const int kPageSize = 4 * KB;
90 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
91 __ mov(Operand(esp, offset), eax);
92 }
93 }
94 #endif
95
96
GeneratePrologue()97 bool LCodeGen::GeneratePrologue() {
98 DCHECK(is_generating());
99
100 if (info()->IsOptimizing()) {
101 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
102
103 #ifdef DEBUG
104 if (strlen(FLAG_stop_at) > 0 &&
105 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
106 __ int3();
107 }
108 #endif
109
110 // Sloppy mode functions and builtins need to replace the receiver with the
111 // global proxy when called as functions (without an explicit receiver
112 // object).
113 if (info_->this_has_uses() &&
114 info_->strict_mode() == SLOPPY &&
115 !info_->is_native()) {
116 Label ok;
117 // +1 for return address.
118 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
119 __ mov(ecx, Operand(esp, receiver_offset));
120
121 __ cmp(ecx, isolate()->factory()->undefined_value());
122 __ j(not_equal, &ok, Label::kNear);
123
124 __ mov(ecx, GlobalObjectOperand());
125 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
126
127 __ mov(Operand(esp, receiver_offset), ecx);
128
129 __ bind(&ok);
130 }
131
132 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
133 // Move state of dynamic frame alignment into edx.
134 __ Move(edx, Immediate(kNoAlignmentPadding));
135
136 Label do_not_pad, align_loop;
137 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
138 // Align esp + 4 to a multiple of 2 * kPointerSize.
139 __ test(esp, Immediate(kPointerSize));
140 __ j(not_zero, &do_not_pad, Label::kNear);
141 __ push(Immediate(0));
142 __ mov(ebx, esp);
143 __ mov(edx, Immediate(kAlignmentPaddingPushed));
144 // Copy arguments, receiver, and return address.
145 __ mov(ecx, Immediate(scope()->num_parameters() + 2));
146
147 __ bind(&align_loop);
148 __ mov(eax, Operand(ebx, 1 * kPointerSize));
149 __ mov(Operand(ebx, 0), eax);
150 __ add(Operand(ebx), Immediate(kPointerSize));
151 __ dec(ecx);
152 __ j(not_zero, &align_loop, Label::kNear);
153 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
154 __ bind(&do_not_pad);
155 }
156 }
157
158 info()->set_prologue_offset(masm_->pc_offset());
159 if (NeedsEagerFrame()) {
160 DCHECK(!frame_is_built_);
161 frame_is_built_ = true;
162 if (info()->IsStub()) {
163 __ StubPrologue();
164 } else {
165 __ Prologue(info()->IsCodePreAgingActive());
166 }
167 info()->AddNoFrameRange(0, masm_->pc_offset());
168 }
169
170 if (info()->IsOptimizing() &&
171 dynamic_frame_alignment_ &&
172 FLAG_debug_code) {
173 __ test(esp, Immediate(kPointerSize));
174 __ Assert(zero, kFrameIsExpectedToBeAligned);
175 }
176
177 // Reserve space for the stack slots needed by the code.
178 int slots = GetStackSlotCount();
179 DCHECK(slots != 0 || !info()->IsOptimizing());
180 if (slots > 0) {
181 if (slots == 1) {
182 if (dynamic_frame_alignment_) {
183 __ push(edx);
184 } else {
185 __ push(Immediate(kNoAlignmentPadding));
186 }
187 } else {
188 if (FLAG_debug_code) {
189 __ sub(Operand(esp), Immediate(slots * kPointerSize));
190 #ifdef _MSC_VER
191 MakeSureStackPagesMapped(slots * kPointerSize);
192 #endif
193 __ push(eax);
194 __ mov(Operand(eax), Immediate(slots));
195 Label loop;
196 __ bind(&loop);
197 __ mov(MemOperand(esp, eax, times_4, 0),
198 Immediate(kSlotsZapValue));
199 __ dec(eax);
200 __ j(not_zero, &loop);
201 __ pop(eax);
202 } else {
203 __ sub(Operand(esp), Immediate(slots * kPointerSize));
204 #ifdef _MSC_VER
205 MakeSureStackPagesMapped(slots * kPointerSize);
206 #endif
207 }
208
209 if (support_aligned_spilled_doubles_) {
210 Comment(";;; Store dynamic frame alignment tag for spilled doubles");
211 // Store dynamic frame alignment state in the first local.
212 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
213 if (dynamic_frame_alignment_) {
214 __ mov(Operand(ebp, offset), edx);
215 } else {
216 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
217 }
218 }
219 }
220 }
221
222 // Possibly allocate a local context.
223 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
224 if (heap_slots > 0) {
225 Comment(";;; Allocate local context");
226 bool need_write_barrier = true;
227 // Argument to NewContext is the function, which is still in edi.
228 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
229 FastNewContextStub stub(isolate(), heap_slots);
230 __ CallStub(&stub);
231 // Result of FastNewContextStub is always in new space.
232 need_write_barrier = false;
233 } else {
234 __ push(edi);
235 __ CallRuntime(Runtime::kNewFunctionContext, 1);
236 }
237 RecordSafepoint(Safepoint::kNoLazyDeopt);
238 // Context is returned in eax. It replaces the context passed to us.
239 // It's saved in the stack and kept live in esi.
240 __ mov(esi, eax);
241 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
242
243 // Copy parameters into context if necessary.
244 int num_parameters = scope()->num_parameters();
245 for (int i = 0; i < num_parameters; i++) {
246 Variable* var = scope()->parameter(i);
247 if (var->IsContextSlot()) {
248 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
249 (num_parameters - 1 - i) * kPointerSize;
250 // Load parameter from stack.
251 __ mov(eax, Operand(ebp, parameter_offset));
252 // Store it in the context.
253 int context_offset = Context::SlotOffset(var->index());
254 __ mov(Operand(esi, context_offset), eax);
255 // Update the write barrier. This clobbers eax and ebx.
256 if (need_write_barrier) {
257 __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
258 kDontSaveFPRegs);
259 } else if (FLAG_debug_code) {
260 Label done;
261 __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
262 __ Abort(kExpectedNewSpaceObject);
263 __ bind(&done);
264 }
265 }
266 }
267 Comment(";;; End allocate local context");
268 }
269
270 // Initailize FPU state.
271 __ fninit();
272 // Trace the call.
273 if (FLAG_trace && info()->IsOptimizing()) {
274 // We have not executed any compiled code yet, so esi still holds the
275 // incoming context.
276 __ CallRuntime(Runtime::kTraceEnter, 0);
277 }
278 return !is_aborted();
279 }
280
281
GenerateOsrPrologue()282 void LCodeGen::GenerateOsrPrologue() {
283 // Generate the OSR entry prologue at the first unknown OSR value, or if there
284 // are none, at the OSR entrypoint instruction.
285 if (osr_pc_offset_ >= 0) return;
286
287 osr_pc_offset_ = masm()->pc_offset();
288
289 // Move state of dynamic frame alignment into edx.
290 __ Move(edx, Immediate(kNoAlignmentPadding));
291
292 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
293 Label do_not_pad, align_loop;
294 // Align ebp + 4 to a multiple of 2 * kPointerSize.
295 __ test(ebp, Immediate(kPointerSize));
296 __ j(zero, &do_not_pad, Label::kNear);
297 __ push(Immediate(0));
298 __ mov(ebx, esp);
299 __ mov(edx, Immediate(kAlignmentPaddingPushed));
300
301 // Move all parts of the frame over one word. The frame consists of:
302 // unoptimized frame slots, alignment state, context, frame pointer, return
303 // address, receiver, and the arguments.
304 __ mov(ecx, Immediate(scope()->num_parameters() +
305 5 + graph()->osr()->UnoptimizedFrameSlots()));
306
307 __ bind(&align_loop);
308 __ mov(eax, Operand(ebx, 1 * kPointerSize));
309 __ mov(Operand(ebx, 0), eax);
310 __ add(Operand(ebx), Immediate(kPointerSize));
311 __ dec(ecx);
312 __ j(not_zero, &align_loop, Label::kNear);
313 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
314 __ sub(Operand(ebp), Immediate(kPointerSize));
315 __ bind(&do_not_pad);
316 }
317
318 // Save the first local, which is overwritten by the alignment state.
319 Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
320 __ push(alignment_loc);
321
322 // Set the dynamic frame alignment state.
323 __ mov(alignment_loc, edx);
324
325 // Adjust the frame size, subsuming the unoptimized frame into the
326 // optimized frame.
327 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
328 DCHECK(slots >= 1);
329 __ sub(esp, Immediate((slots - 1) * kPointerSize));
330
331 // Initailize FPU state.
332 __ fninit();
333 }
334
335
GenerateBodyInstructionPre(LInstruction * instr)336 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
337 if (instr->IsCall()) {
338 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
339 }
340 if (!instr->IsLazyBailout() && !instr->IsGap()) {
341 safepoints_.BumpLastLazySafepointIndex();
342 }
343 FlushX87StackIfNecessary(instr);
344 }
345
346
GenerateBodyInstructionPost(LInstruction * instr)347 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
348 // When return from function call, FPU should be initialized again.
349 if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) {
350 bool double_result = instr->HasDoubleRegisterResult();
351 if (double_result) {
352 __ lea(esp, Operand(esp, -kDoubleSize));
353 __ fstp_d(Operand(esp, 0));
354 }
355 __ fninit();
356 if (double_result) {
357 __ fld_d(Operand(esp, 0));
358 __ lea(esp, Operand(esp, kDoubleSize));
359 }
360 }
361 if (instr->IsGoto()) {
362 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this);
363 } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
364 !instr->IsGap() && !instr->IsReturn()) {
365 if (instr->ClobbersDoubleRegisters(isolate())) {
366 if (instr->HasDoubleRegisterResult()) {
367 DCHECK_EQ(1, x87_stack_.depth());
368 } else {
369 DCHECK_EQ(0, x87_stack_.depth());
370 }
371 }
372 __ VerifyX87StackDepth(x87_stack_.depth());
373 }
374 }
375
376
GenerateJumpTable()377 bool LCodeGen::GenerateJumpTable() {
378 Label needs_frame;
379 if (jump_table_.length() > 0) {
380 Comment(";;; -------------------- Jump table --------------------");
381 }
382 for (int i = 0; i < jump_table_.length(); i++) {
383 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
384 __ bind(&table_entry->label);
385 Address entry = table_entry->address;
386 DeoptComment(table_entry->reason);
387 if (table_entry->needs_frame) {
388 DCHECK(!info()->saves_caller_doubles());
389 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
390 if (needs_frame.is_bound()) {
391 __ jmp(&needs_frame);
392 } else {
393 __ bind(&needs_frame);
394 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
395 // This variant of deopt can only be used with stubs. Since we don't
396 // have a function pointer to install in the stack frame that we're
397 // building, install a special marker there instead.
398 DCHECK(info()->IsStub());
399 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
400 // Push a PC inside the function so that the deopt code can find where
401 // the deopt comes from. It doesn't have to be the precise return
402 // address of a "calling" LAZY deopt, it only has to be somewhere
403 // inside the code body.
404 Label push_approx_pc;
405 __ call(&push_approx_pc);
406 __ bind(&push_approx_pc);
407 // Push the continuation which was stashed were the ebp should
408 // be. Replace it with the saved ebp.
409 __ push(MemOperand(esp, 3 * kPointerSize));
410 __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
411 __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
412 __ ret(0); // Call the continuation without clobbering registers.
413 }
414 } else {
415 __ call(entry, RelocInfo::RUNTIME_ENTRY);
416 }
417 }
418 return !is_aborted();
419 }
420
421
GenerateDeferredCode()422 bool LCodeGen::GenerateDeferredCode() {
423 DCHECK(is_generating());
424 if (deferred_.length() > 0) {
425 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
426 LDeferredCode* code = deferred_[i];
427 X87Stack copy(code->x87_stack());
428 x87_stack_ = copy;
429
430 HValue* value =
431 instructions_->at(code->instruction_index())->hydrogen_value();
432 RecordAndWritePosition(
433 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
434
435 Comment(";;; <@%d,#%d> "
436 "-------------------- Deferred %s --------------------",
437 code->instruction_index(),
438 code->instr()->hydrogen_value()->id(),
439 code->instr()->Mnemonic());
440 __ bind(code->entry());
441 if (NeedsDeferredFrame()) {
442 Comment(";;; Build frame");
443 DCHECK(!frame_is_built_);
444 DCHECK(info()->IsStub());
445 frame_is_built_ = true;
446 // Build the frame in such a way that esi isn't trashed.
447 __ push(ebp); // Caller's frame pointer.
448 __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
449 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
450 __ lea(ebp, Operand(esp, 2 * kPointerSize));
451 Comment(";;; Deferred code");
452 }
453 code->Generate();
454 if (NeedsDeferredFrame()) {
455 __ bind(code->done());
456 Comment(";;; Destroy frame");
457 DCHECK(frame_is_built_);
458 frame_is_built_ = false;
459 __ mov(esp, ebp);
460 __ pop(ebp);
461 }
462 __ jmp(code->exit());
463 }
464 }
465
466 // Deferred code is the last part of the instruction sequence. Mark
467 // the generated code as done unless we bailed out.
468 if (!is_aborted()) status_ = DONE;
469 return !is_aborted();
470 }
471
472
GenerateSafepointTable()473 bool LCodeGen::GenerateSafepointTable() {
474 DCHECK(is_done());
475 if (!info()->IsStub()) {
476 // For lazy deoptimization we need space to patch a call after every call.
477 // Ensure there is always space for such patching, even if the code ends
478 // in a call.
479 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
480 while (masm()->pc_offset() < target_offset) {
481 masm()->nop();
482 }
483 }
484 safepoints_.Emit(masm(), GetStackSlotCount());
485 return !is_aborted();
486 }
487
488
ToRegister(int index) const489 Register LCodeGen::ToRegister(int index) const {
490 return Register::FromAllocationIndex(index);
491 }
492
493
ToX87Register(int index) const494 X87Register LCodeGen::ToX87Register(int index) const {
495 return X87Register::FromAllocationIndex(index);
496 }
497
498
X87LoadForUsage(X87Register reg)499 void LCodeGen::X87LoadForUsage(X87Register reg) {
500 DCHECK(x87_stack_.Contains(reg));
501 x87_stack_.Fxch(reg);
502 x87_stack_.pop();
503 }
504
505
X87LoadForUsage(X87Register reg1,X87Register reg2)506 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
507 DCHECK(x87_stack_.Contains(reg1));
508 DCHECK(x87_stack_.Contains(reg2));
509 if (reg1.is(reg2) && x87_stack_.depth() == 1) {
510 __ fld(x87_stack_.st(reg1));
511 x87_stack_.push(reg1);
512 x87_stack_.pop();
513 x87_stack_.pop();
514 } else {
515 x87_stack_.Fxch(reg1, 1);
516 x87_stack_.Fxch(reg2);
517 x87_stack_.pop();
518 x87_stack_.pop();
519 }
520 }
521
522
GetLayout()523 int LCodeGen::X87Stack::GetLayout() {
524 int layout = stack_depth_;
525 for (int i = 0; i < stack_depth_; i++) {
526 layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3));
527 }
528
529 return layout;
530 }
531
532
Fxch(X87Register reg,int other_slot)533 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
534 DCHECK(is_mutable_);
535 DCHECK(Contains(reg) && stack_depth_ > other_slot);
536 int i = ArrayIndex(reg);
537 int st = st2idx(i);
538 if (st != other_slot) {
539 int other_i = st2idx(other_slot);
540 X87Register other = stack_[other_i];
541 stack_[other_i] = reg;
542 stack_[i] = other;
543 if (st == 0) {
544 __ fxch(other_slot);
545 } else if (other_slot == 0) {
546 __ fxch(st);
547 } else {
548 __ fxch(st);
549 __ fxch(other_slot);
550 __ fxch(st);
551 }
552 }
553 }
554
555
st2idx(int pos)556 int LCodeGen::X87Stack::st2idx(int pos) {
557 return stack_depth_ - pos - 1;
558 }
559
560
ArrayIndex(X87Register reg)561 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
562 for (int i = 0; i < stack_depth_; i++) {
563 if (stack_[i].is(reg)) return i;
564 }
565 UNREACHABLE();
566 return -1;
567 }
568
569
Contains(X87Register reg)570 bool LCodeGen::X87Stack::Contains(X87Register reg) {
571 for (int i = 0; i < stack_depth_; i++) {
572 if (stack_[i].is(reg)) return true;
573 }
574 return false;
575 }
576
577
Free(X87Register reg)578 void LCodeGen::X87Stack::Free(X87Register reg) {
579 DCHECK(is_mutable_);
580 DCHECK(Contains(reg));
581 int i = ArrayIndex(reg);
582 int st = st2idx(i);
583 if (st > 0) {
584 // keep track of how fstp(i) changes the order of elements
585 int tos_i = st2idx(0);
586 stack_[i] = stack_[tos_i];
587 }
588 pop();
589 __ fstp(st);
590 }
591
592
X87Mov(X87Register dst,Operand src,X87OperandType opts)593 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
594 if (x87_stack_.Contains(dst)) {
595 x87_stack_.Fxch(dst);
596 __ fstp(0);
597 } else {
598 x87_stack_.push(dst);
599 }
600 X87Fld(src, opts);
601 }
602
603
X87Mov(X87Register dst,X87Register src,X87OperandType opts)604 void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) {
605 if (x87_stack_.Contains(dst)) {
606 x87_stack_.Fxch(dst);
607 __ fstp(0);
608 x87_stack_.pop();
609 // Push ST(i) onto the FPU register stack
610 __ fld(x87_stack_.st(src));
611 x87_stack_.push(dst);
612 } else {
613 // Push ST(i) onto the FPU register stack
614 __ fld(x87_stack_.st(src));
615 x87_stack_.push(dst);
616 }
617 }
618
619
X87Fld(Operand src,X87OperandType opts)620 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
621 DCHECK(!src.is_reg_only());
622 switch (opts) {
623 case kX87DoubleOperand:
624 __ fld_d(src);
625 break;
626 case kX87FloatOperand:
627 __ fld_s(src);
628 break;
629 case kX87IntOperand:
630 __ fild_s(src);
631 break;
632 default:
633 UNREACHABLE();
634 }
635 }
636
637
X87Mov(Operand dst,X87Register src,X87OperandType opts)638 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
639 DCHECK(!dst.is_reg_only());
640 x87_stack_.Fxch(src);
641 switch (opts) {
642 case kX87DoubleOperand:
643 __ fst_d(dst);
644 break;
645 case kX87FloatOperand:
646 __ fst_s(dst);
647 break;
648 case kX87IntOperand:
649 __ fist_s(dst);
650 break;
651 default:
652 UNREACHABLE();
653 }
654 }
655
656
PrepareToWrite(X87Register reg)657 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
658 DCHECK(is_mutable_);
659 if (Contains(reg)) {
660 Free(reg);
661 }
662 // Mark this register as the next register to write to
663 stack_[stack_depth_] = reg;
664 }
665
666
CommitWrite(X87Register reg)667 void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
668 DCHECK(is_mutable_);
669 // Assert the reg is prepared to write, but not on the virtual stack yet
670 DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) &&
671 stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
672 stack_depth_++;
673 }
674
675
X87PrepareBinaryOp(X87Register left,X87Register right,X87Register result)676 void LCodeGen::X87PrepareBinaryOp(
677 X87Register left, X87Register right, X87Register result) {
678 // You need to use DefineSameAsFirst for x87 instructions
679 DCHECK(result.is(left));
680 x87_stack_.Fxch(right, 1);
681 x87_stack_.Fxch(left);
682 }
683
684
FlushIfNecessary(LInstruction * instr,LCodeGen * cgen)685 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
686 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
687 bool double_inputs = instr->HasDoubleRegisterInput();
688
689 // Flush stack from tos down, since FreeX87() will mess with tos
690 for (int i = stack_depth_-1; i >= 0; i--) {
691 X87Register reg = stack_[i];
692 // Skip registers which contain the inputs for the next instruction
693 // when flushing the stack
694 if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
695 continue;
696 }
697 Free(reg);
698 if (i < stack_depth_-1) i++;
699 }
700 }
701 if (instr->IsReturn()) {
702 while (stack_depth_ > 0) {
703 __ fstp(0);
704 stack_depth_--;
705 }
706 if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
707 }
708 }
709
710
LeavingBlock(int current_block_id,LGoto * goto_instr,LCodeGen * cgen)711 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr,
712 LCodeGen* cgen) {
713 // For going to a joined block, an explicit LClobberDoubles is inserted before
714 // LGoto. Because all used x87 registers are spilled to stack slots. The
715 // ResolvePhis phase of register allocator could guarantee the two input's x87
716 // stacks have the same layout. So don't check stack_depth_ <= 1 here.
717 int goto_block_id = goto_instr->block_id();
718 if (current_block_id + 1 != goto_block_id) {
719 // If we have a value on the x87 stack on leaving a block, it must be a
720 // phi input. If the next block we compile is not the join block, we have
721 // to discard the stack state.
722 // Before discarding the stack state, we need to save it if the "goto block"
723 // has unreachable last predecessor when FLAG_unreachable_code_elimination.
724 if (FLAG_unreachable_code_elimination) {
725 int length = goto_instr->block()->predecessors()->length();
726 bool has_unreachable_last_predecessor = false;
727 for (int i = 0; i < length; i++) {
728 HBasicBlock* block = goto_instr->block()->predecessors()->at(i);
729 if (block->IsUnreachable() &&
730 (block->block_id() + 1) == goto_block_id) {
731 has_unreachable_last_predecessor = true;
732 }
733 }
734 if (has_unreachable_last_predecessor) {
735 if (cgen->x87_stack_map_.find(goto_block_id) ==
736 cgen->x87_stack_map_.end()) {
737 X87Stack* stack = new (cgen->zone()) X87Stack(*this);
738 cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack));
739 }
740 }
741 }
742
743 // Discard the stack state.
744 stack_depth_ = 0;
745 }
746 }
747
748
EmitFlushX87ForDeopt()749 void LCodeGen::EmitFlushX87ForDeopt() {
750 // The deoptimizer does not support X87 Registers. But as long as we
751 // deopt from a stub its not a problem, since we will re-materialize the
752 // original stub inputs, which can't be double registers.
753 // DCHECK(info()->IsStub());
754 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
755 __ pushfd();
756 __ VerifyX87StackDepth(x87_stack_.depth());
757 __ popfd();
758 }
759
760 // Flush X87 stack in the deoptimizer entry.
761 }
762
763
ToRegister(LOperand * op) const764 Register LCodeGen::ToRegister(LOperand* op) const {
765 DCHECK(op->IsRegister());
766 return ToRegister(op->index());
767 }
768
769
ToX87Register(LOperand * op) const770 X87Register LCodeGen::ToX87Register(LOperand* op) const {
771 DCHECK(op->IsDoubleRegister());
772 return ToX87Register(op->index());
773 }
774
775
ToInteger32(LConstantOperand * op) const776 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
777 return ToRepresentation(op, Representation::Integer32());
778 }
779
780
ToRepresentation(LConstantOperand * op,const Representation & r) const781 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
782 const Representation& r) const {
783 HConstant* constant = chunk_->LookupConstant(op);
784 int32_t value = constant->Integer32Value();
785 if (r.IsInteger32()) return value;
786 DCHECK(r.IsSmiOrTagged());
787 return reinterpret_cast<int32_t>(Smi::FromInt(value));
788 }
789
790
ToHandle(LConstantOperand * op) const791 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
792 HConstant* constant = chunk_->LookupConstant(op);
793 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
794 return constant->handle(isolate());
795 }
796
797
ToDouble(LConstantOperand * op) const798 double LCodeGen::ToDouble(LConstantOperand* op) const {
799 HConstant* constant = chunk_->LookupConstant(op);
800 DCHECK(constant->HasDoubleValue());
801 return constant->DoubleValue();
802 }
803
804
ToExternalReference(LConstantOperand * op) const805 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
806 HConstant* constant = chunk_->LookupConstant(op);
807 DCHECK(constant->HasExternalReferenceValue());
808 return constant->ExternalReferenceValue();
809 }
810
811
IsInteger32(LConstantOperand * op) const812 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
813 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
814 }
815
816
IsSmi(LConstantOperand * op) const817 bool LCodeGen::IsSmi(LConstantOperand* op) const {
818 return chunk_->LookupLiteralRepresentation(op).IsSmi();
819 }
820
821
ArgumentsOffsetWithoutFrame(int index)822 static int ArgumentsOffsetWithoutFrame(int index) {
823 DCHECK(index < 0);
824 return -(index + 1) * kPointerSize + kPCOnStackSize;
825 }
826
827
ToOperand(LOperand * op) const828 Operand LCodeGen::ToOperand(LOperand* op) const {
829 if (op->IsRegister()) return Operand(ToRegister(op));
830 DCHECK(!op->IsDoubleRegister());
831 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
832 if (NeedsEagerFrame()) {
833 return Operand(ebp, StackSlotOffset(op->index()));
834 } else {
835 // Retrieve parameter without eager stack-frame relative to the
836 // stack-pointer.
837 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
838 }
839 }
840
841
HighOperand(LOperand * op)842 Operand LCodeGen::HighOperand(LOperand* op) {
843 DCHECK(op->IsDoubleStackSlot());
844 if (NeedsEagerFrame()) {
845 return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
846 } else {
847 // Retrieve parameter without eager stack-frame relative to the
848 // stack-pointer.
849 return Operand(
850 esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
851 }
852 }
853
854
WriteTranslation(LEnvironment * environment,Translation * translation)855 void LCodeGen::WriteTranslation(LEnvironment* environment,
856 Translation* translation) {
857 if (environment == NULL) return;
858
859 // The translation includes one command per value in the environment.
860 int translation_size = environment->translation_size();
861 // The output frame height does not include the parameters.
862 int height = translation_size - environment->parameter_count();
863
864 WriteTranslation(environment->outer(), translation);
865 bool has_closure_id = !info()->closure().is_null() &&
866 !info()->closure().is_identical_to(environment->closure());
867 int closure_id = has_closure_id
868 ? DefineDeoptimizationLiteral(environment->closure())
869 : Translation::kSelfLiteralId;
870 switch (environment->frame_type()) {
871 case JS_FUNCTION:
872 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
873 break;
874 case JS_CONSTRUCT:
875 translation->BeginConstructStubFrame(closure_id, translation_size);
876 break;
877 case JS_GETTER:
878 DCHECK(translation_size == 1);
879 DCHECK(height == 0);
880 translation->BeginGetterStubFrame(closure_id);
881 break;
882 case JS_SETTER:
883 DCHECK(translation_size == 2);
884 DCHECK(height == 0);
885 translation->BeginSetterStubFrame(closure_id);
886 break;
887 case ARGUMENTS_ADAPTOR:
888 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
889 break;
890 case STUB:
891 translation->BeginCompiledStubFrame();
892 break;
893 default:
894 UNREACHABLE();
895 }
896
897 int object_index = 0;
898 int dematerialized_index = 0;
899 for (int i = 0; i < translation_size; ++i) {
900 LOperand* value = environment->values()->at(i);
901 AddToTranslation(environment,
902 translation,
903 value,
904 environment->HasTaggedValueAt(i),
905 environment->HasUint32ValueAt(i),
906 &object_index,
907 &dematerialized_index);
908 }
909 }
910
911
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)912 void LCodeGen::AddToTranslation(LEnvironment* environment,
913 Translation* translation,
914 LOperand* op,
915 bool is_tagged,
916 bool is_uint32,
917 int* object_index_pointer,
918 int* dematerialized_index_pointer) {
919 if (op == LEnvironment::materialization_marker()) {
920 int object_index = (*object_index_pointer)++;
921 if (environment->ObjectIsDuplicateAt(object_index)) {
922 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
923 translation->DuplicateObject(dupe_of);
924 return;
925 }
926 int object_length = environment->ObjectLengthAt(object_index);
927 if (environment->ObjectIsArgumentsAt(object_index)) {
928 translation->BeginArgumentsObject(object_length);
929 } else {
930 translation->BeginCapturedObject(object_length);
931 }
932 int dematerialized_index = *dematerialized_index_pointer;
933 int env_offset = environment->translation_size() + dematerialized_index;
934 *dematerialized_index_pointer += object_length;
935 for (int i = 0; i < object_length; ++i) {
936 LOperand* value = environment->values()->at(env_offset + i);
937 AddToTranslation(environment,
938 translation,
939 value,
940 environment->HasTaggedValueAt(env_offset + i),
941 environment->HasUint32ValueAt(env_offset + i),
942 object_index_pointer,
943 dematerialized_index_pointer);
944 }
945 return;
946 }
947
948 if (op->IsStackSlot()) {
949 if (is_tagged) {
950 translation->StoreStackSlot(op->index());
951 } else if (is_uint32) {
952 translation->StoreUint32StackSlot(op->index());
953 } else {
954 translation->StoreInt32StackSlot(op->index());
955 }
956 } else if (op->IsDoubleStackSlot()) {
957 translation->StoreDoubleStackSlot(op->index());
958 } else if (op->IsRegister()) {
959 Register reg = ToRegister(op);
960 if (is_tagged) {
961 translation->StoreRegister(reg);
962 } else if (is_uint32) {
963 translation->StoreUint32Register(reg);
964 } else {
965 translation->StoreInt32Register(reg);
966 }
967 } else if (op->IsDoubleRegister()) {
968 X87Register reg = ToX87Register(op);
969 translation->StoreDoubleRegister(reg);
970 } else if (op->IsConstantOperand()) {
971 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
972 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
973 translation->StoreLiteral(src_index);
974 } else {
975 UNREACHABLE();
976 }
977 }
978
979
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode)980 void LCodeGen::CallCodeGeneric(Handle<Code> code,
981 RelocInfo::Mode mode,
982 LInstruction* instr,
983 SafepointMode safepoint_mode) {
984 DCHECK(instr != NULL);
985 __ call(code, mode);
986 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
987
988 // Signal that we don't inline smi code before these stubs in the
989 // optimizing code generator.
990 if (code->kind() == Code::BINARY_OP_IC ||
991 code->kind() == Code::COMPARE_IC) {
992 __ nop();
993 }
994 }
995
996
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)997 void LCodeGen::CallCode(Handle<Code> code,
998 RelocInfo::Mode mode,
999 LInstruction* instr) {
1000 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
1001 }
1002
1003
CallRuntime(const Runtime::Function * fun,int argc,LInstruction * instr,SaveFPRegsMode save_doubles)1004 void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc,
1005 LInstruction* instr, SaveFPRegsMode save_doubles) {
1006 DCHECK(instr != NULL);
1007 DCHECK(instr->HasPointerMap());
1008
1009 __ CallRuntime(fun, argc, save_doubles);
1010
1011 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
1012
1013 DCHECK(info()->is_calling());
1014 }
1015
1016
LoadContextFromDeferred(LOperand * context)1017 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
1018 if (context->IsRegister()) {
1019 if (!ToRegister(context).is(esi)) {
1020 __ mov(esi, ToRegister(context));
1021 }
1022 } else if (context->IsStackSlot()) {
1023 __ mov(esi, ToOperand(context));
1024 } else if (context->IsConstantOperand()) {
1025 HConstant* constant =
1026 chunk_->LookupConstant(LConstantOperand::cast(context));
1027 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
1028 } else {
1029 UNREACHABLE();
1030 }
1031 }
1032
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)1033 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
1034 int argc,
1035 LInstruction* instr,
1036 LOperand* context) {
1037 LoadContextFromDeferred(context);
1038
1039 __ CallRuntimeSaveDoubles(id);
1040 RecordSafepointWithRegisters(
1041 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
1042
1043 DCHECK(info()->is_calling());
1044 }
1045
1046
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)1047 void LCodeGen::RegisterEnvironmentForDeoptimization(
1048 LEnvironment* environment, Safepoint::DeoptMode mode) {
1049 environment->set_has_been_used();
1050 if (!environment->HasBeenRegistered()) {
1051 // Physical stack frame layout:
1052 // -x ............. -4 0 ..................................... y
1053 // [incoming arguments] [spill slots] [pushed outgoing arguments]
1054
1055 // Layout of the environment:
1056 // 0 ..................................................... size-1
1057 // [parameters] [locals] [expression stack including arguments]
1058
1059 // Layout of the translation:
1060 // 0 ........................................................ size - 1 + 4
1061 // [expression stack including arguments] [locals] [4 words] [parameters]
1062 // |>------------ translation_size ------------<|
1063
1064 int frame_count = 0;
1065 int jsframe_count = 0;
1066 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
1067 ++frame_count;
1068 if (e->frame_type() == JS_FUNCTION) {
1069 ++jsframe_count;
1070 }
1071 }
1072 Translation translation(&translations_, frame_count, jsframe_count, zone());
1073 WriteTranslation(environment, &translation);
1074 int deoptimization_index = deoptimizations_.length();
1075 int pc_offset = masm()->pc_offset();
1076 environment->Register(deoptimization_index,
1077 translation.index(),
1078 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
1079 deoptimizations_.Add(environment, zone());
1080 }
1081 }
1082
1083
DeoptimizeIf(Condition cc,LInstruction * instr,const char * detail,Deoptimizer::BailoutType bailout_type)1084 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
1085 const char* detail,
1086 Deoptimizer::BailoutType bailout_type) {
1087 LEnvironment* environment = instr->environment();
1088 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1089 DCHECK(environment->HasBeenRegistered());
1090 int id = environment->deoptimization_index();
1091 DCHECK(info()->IsOptimizing() || info()->IsStub());
1092 Address entry =
1093 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1094 if (entry == NULL) {
1095 Abort(kBailoutWasNotPrepared);
1096 return;
1097 }
1098
1099 if (DeoptEveryNTimes()) {
1100 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1101 Label no_deopt;
1102 __ pushfd();
1103 __ push(eax);
1104 __ mov(eax, Operand::StaticVariable(count));
1105 __ sub(eax, Immediate(1));
1106 __ j(not_zero, &no_deopt, Label::kNear);
1107 if (FLAG_trap_on_deopt) __ int3();
1108 __ mov(eax, Immediate(FLAG_deopt_every_n_times));
1109 __ mov(Operand::StaticVariable(count), eax);
1110 __ pop(eax);
1111 __ popfd();
1112 DCHECK(frame_is_built_);
1113 // Put the x87 stack layout in TOS.
1114 if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
1115 __ push(Immediate(x87_stack_.GetLayout()));
1116 __ fild_s(MemOperand(esp, 0));
1117 // Don't touch eflags.
1118 __ lea(esp, Operand(esp, kPointerSize));
1119 __ call(entry, RelocInfo::RUNTIME_ENTRY);
1120 __ bind(&no_deopt);
1121 __ mov(Operand::StaticVariable(count), eax);
1122 __ pop(eax);
1123 __ popfd();
1124 }
1125
1126 // Put the x87 stack layout in TOS, so that we can save x87 fp registers in
1127 // the correct location.
1128 {
1129 Label done;
1130 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1131 if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
1132
1133 int x87_stack_layout = x87_stack_.GetLayout();
1134 __ push(Immediate(x87_stack_layout));
1135 __ fild_s(MemOperand(esp, 0));
1136 // Don't touch eflags.
1137 __ lea(esp, Operand(esp, kPointerSize));
1138 __ bind(&done);
1139 }
1140
1141 if (info()->ShouldTrapOnDeopt()) {
1142 Label done;
1143 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1144 __ int3();
1145 __ bind(&done);
1146 }
1147
1148 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
1149 instr->Mnemonic(), detail);
1150 DCHECK(info()->IsStub() || frame_is_built_);
1151 if (cc == no_condition && frame_is_built_) {
1152 DeoptComment(reason);
1153 __ call(entry, RelocInfo::RUNTIME_ENTRY);
1154 } else {
1155 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
1156 !frame_is_built_);
1157 // We often have several deopts to the same entry, reuse the last
1158 // jump entry if this is the case.
1159 if (jump_table_.is_empty() ||
1160 !table_entry.IsEquivalentTo(jump_table_.last())) {
1161 jump_table_.Add(table_entry, zone());
1162 }
1163 if (cc == no_condition) {
1164 __ jmp(&jump_table_.last().label);
1165 } else {
1166 __ j(cc, &jump_table_.last().label);
1167 }
1168 }
1169 }
1170
1171
DeoptimizeIf(Condition cc,LInstruction * instr,const char * detail)1172 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
1173 const char* detail) {
1174 Deoptimizer::BailoutType bailout_type = info()->IsStub()
1175 ? Deoptimizer::LAZY
1176 : Deoptimizer::EAGER;
1177 DeoptimizeIf(cc, instr, detail, bailout_type);
1178 }
1179
1180
PopulateDeoptimizationData(Handle<Code> code)1181 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
1182 int length = deoptimizations_.length();
1183 if (length == 0) return;
1184 Handle<DeoptimizationInputData> data =
1185 DeoptimizationInputData::New(isolate(), length, TENURED);
1186
1187 Handle<ByteArray> translations =
1188 translations_.CreateByteArray(isolate()->factory());
1189 data->SetTranslationByteArray(*translations);
1190 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
1191 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
1192 if (info_->IsOptimizing()) {
1193 // Reference to shared function info does not change between phases.
1194 AllowDeferredHandleDereference allow_handle_dereference;
1195 data->SetSharedFunctionInfo(*info_->shared_info());
1196 } else {
1197 data->SetSharedFunctionInfo(Smi::FromInt(0));
1198 }
1199
1200 Handle<FixedArray> literals =
1201 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
1202 { AllowDeferredHandleDereference copy_handles;
1203 for (int i = 0; i < deoptimization_literals_.length(); i++) {
1204 literals->set(i, *deoptimization_literals_[i]);
1205 }
1206 data->SetLiteralArray(*literals);
1207 }
1208
1209 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
1210 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
1211
1212 // Populate the deoptimization entries.
1213 for (int i = 0; i < length; i++) {
1214 LEnvironment* env = deoptimizations_[i];
1215 data->SetAstId(i, env->ast_id());
1216 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
1217 data->SetArgumentsStackHeight(i,
1218 Smi::FromInt(env->arguments_stack_height()));
1219 data->SetPc(i, Smi::FromInt(env->pc_offset()));
1220 }
1221 code->set_deoptimization_data(*data);
1222 }
1223
1224
DefineDeoptimizationLiteral(Handle<Object> literal)1225 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
1226 int result = deoptimization_literals_.length();
1227 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
1228 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
1229 }
1230 deoptimization_literals_.Add(literal, zone());
1231 return result;
1232 }
1233
1234
PopulateDeoptimizationLiteralsWithInlinedFunctions()1235 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
1236 DCHECK(deoptimization_literals_.length() == 0);
1237
1238 const ZoneList<Handle<JSFunction> >* inlined_closures =
1239 chunk()->inlined_closures();
1240
1241 for (int i = 0, length = inlined_closures->length();
1242 i < length;
1243 i++) {
1244 DefineDeoptimizationLiteral(inlined_closures->at(i));
1245 }
1246
1247 inlined_function_count_ = deoptimization_literals_.length();
1248 }
1249
1250
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode)1251 void LCodeGen::RecordSafepointWithLazyDeopt(
1252 LInstruction* instr, SafepointMode safepoint_mode) {
1253 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1254 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1255 } else {
1256 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1257 RecordSafepointWithRegisters(
1258 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1259 }
1260 }
1261
1262
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)1263 void LCodeGen::RecordSafepoint(
1264 LPointerMap* pointers,
1265 Safepoint::Kind kind,
1266 int arguments,
1267 Safepoint::DeoptMode deopt_mode) {
1268 DCHECK(kind == expected_safepoint_kind_);
1269 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1270 Safepoint safepoint =
1271 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1272 for (int i = 0; i < operands->length(); i++) {
1273 LOperand* pointer = operands->at(i);
1274 if (pointer->IsStackSlot()) {
1275 safepoint.DefinePointerSlot(pointer->index(), zone());
1276 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1277 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1278 }
1279 }
1280 }
1281
1282
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode mode)1283 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1284 Safepoint::DeoptMode mode) {
1285 RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1286 }
1287
1288
RecordSafepoint(Safepoint::DeoptMode mode)1289 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1290 LPointerMap empty_pointers(zone());
1291 RecordSafepoint(&empty_pointers, mode);
1292 }
1293
1294
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode mode)1295 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1296 int arguments,
1297 Safepoint::DeoptMode mode) {
1298 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1299 }
1300
1301
RecordAndWritePosition(int position)1302 void LCodeGen::RecordAndWritePosition(int position) {
1303 if (position == RelocInfo::kNoPosition) return;
1304 masm()->positions_recorder()->RecordPosition(position);
1305 masm()->positions_recorder()->WriteRecordedPositions();
1306 }
1307
1308
LabelType(LLabel * label)1309 static const char* LabelType(LLabel* label) {
1310 if (label->is_loop_header()) return " (loop header)";
1311 if (label->is_osr_entry()) return " (OSR entry)";
1312 return "";
1313 }
1314
1315
DoLabel(LLabel * label)1316 void LCodeGen::DoLabel(LLabel* label) {
1317 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1318 current_instruction_,
1319 label->hydrogen_value()->id(),
1320 label->block_id(),
1321 LabelType(label));
1322 __ bind(label->label());
1323 current_block_ = label->block_id();
1324 if (label->block()->predecessors()->length() > 1) {
1325 // A join block's x87 stack is that of its last visited predecessor.
1326 // If the last visited predecessor block is unreachable, the stack state
1327 // will be wrong. In such case, use the x87 stack of reachable predecessor.
1328 X87StackMap::const_iterator it = x87_stack_map_.find(current_block_);
1329 // Restore x87 stack.
1330 if (it != x87_stack_map_.end()) {
1331 x87_stack_ = *(it->second);
1332 }
1333 }
1334 DoGap(label);
1335 }
1336
1337
DoParallelMove(LParallelMove * move)1338 void LCodeGen::DoParallelMove(LParallelMove* move) {
1339 resolver_.Resolve(move);
1340 }
1341
1342
DoGap(LGap * gap)1343 void LCodeGen::DoGap(LGap* gap) {
1344 for (int i = LGap::FIRST_INNER_POSITION;
1345 i <= LGap::LAST_INNER_POSITION;
1346 i++) {
1347 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1348 LParallelMove* move = gap->GetParallelMove(inner_pos);
1349 if (move != NULL) DoParallelMove(move);
1350 }
1351 }
1352
1353
DoInstructionGap(LInstructionGap * instr)1354 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1355 DoGap(instr);
1356 }
1357
1358
DoParameter(LParameter * instr)1359 void LCodeGen::DoParameter(LParameter* instr) {
1360 // Nothing to do.
1361 }
1362
1363
DoCallStub(LCallStub * instr)1364 void LCodeGen::DoCallStub(LCallStub* instr) {
1365 DCHECK(ToRegister(instr->context()).is(esi));
1366 DCHECK(ToRegister(instr->result()).is(eax));
1367 switch (instr->hydrogen()->major_key()) {
1368 case CodeStub::RegExpExec: {
1369 RegExpExecStub stub(isolate());
1370 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1371 break;
1372 }
1373 case CodeStub::SubString: {
1374 SubStringStub stub(isolate());
1375 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1376 break;
1377 }
1378 case CodeStub::StringCompare: {
1379 StringCompareStub stub(isolate());
1380 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1381 break;
1382 }
1383 default:
1384 UNREACHABLE();
1385 }
1386 }
1387
1388
DoUnknownOSRValue(LUnknownOSRValue * instr)1389 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1390 GenerateOsrPrologue();
1391 }
1392
1393
DoModByPowerOf2I(LModByPowerOf2I * instr)1394 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1395 Register dividend = ToRegister(instr->dividend());
1396 int32_t divisor = instr->divisor();
1397 DCHECK(dividend.is(ToRegister(instr->result())));
1398
1399 // Theoretically, a variation of the branch-free code for integer division by
1400 // a power of 2 (calculating the remainder via an additional multiplication
1401 // (which gets simplified to an 'and') and subtraction) should be faster, and
1402 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1403 // indicate that positive dividends are heavily favored, so the branching
1404 // version performs better.
1405 HMod* hmod = instr->hydrogen();
1406 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1407 Label dividend_is_not_negative, done;
1408 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1409 __ test(dividend, dividend);
1410 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1411 // Note that this is correct even for kMinInt operands.
1412 __ neg(dividend);
1413 __ and_(dividend, mask);
1414 __ neg(dividend);
1415 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1416 DeoptimizeIf(zero, instr, "minus zero");
1417 }
1418 __ jmp(&done, Label::kNear);
1419 }
1420
1421 __ bind(÷nd_is_not_negative);
1422 __ and_(dividend, mask);
1423 __ bind(&done);
1424 }
1425
1426
DoModByConstI(LModByConstI * instr)1427 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1428 Register dividend = ToRegister(instr->dividend());
1429 int32_t divisor = instr->divisor();
1430 DCHECK(ToRegister(instr->result()).is(eax));
1431
1432 if (divisor == 0) {
1433 DeoptimizeIf(no_condition, instr, "division by zero");
1434 return;
1435 }
1436
1437 __ TruncatingDiv(dividend, Abs(divisor));
1438 __ imul(edx, edx, Abs(divisor));
1439 __ mov(eax, dividend);
1440 __ sub(eax, edx);
1441
1442 // Check for negative zero.
1443 HMod* hmod = instr->hydrogen();
1444 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1445 Label remainder_not_zero;
1446 __ j(not_zero, &remainder_not_zero, Label::kNear);
1447 __ cmp(dividend, Immediate(0));
1448 DeoptimizeIf(less, instr, "minus zero");
1449 __ bind(&remainder_not_zero);
1450 }
1451 }
1452
1453
DoModI(LModI * instr)1454 void LCodeGen::DoModI(LModI* instr) {
1455 HMod* hmod = instr->hydrogen();
1456
1457 Register left_reg = ToRegister(instr->left());
1458 DCHECK(left_reg.is(eax));
1459 Register right_reg = ToRegister(instr->right());
1460 DCHECK(!right_reg.is(eax));
1461 DCHECK(!right_reg.is(edx));
1462 Register result_reg = ToRegister(instr->result());
1463 DCHECK(result_reg.is(edx));
1464
1465 Label done;
1466 // Check for x % 0, idiv would signal a divide error. We have to
1467 // deopt in this case because we can't return a NaN.
1468 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1469 __ test(right_reg, Operand(right_reg));
1470 DeoptimizeIf(zero, instr, "division by zero");
1471 }
1472
1473 // Check for kMinInt % -1, idiv would signal a divide error. We
1474 // have to deopt if we care about -0, because we can't return that.
1475 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1476 Label no_overflow_possible;
1477 __ cmp(left_reg, kMinInt);
1478 __ j(not_equal, &no_overflow_possible, Label::kNear);
1479 __ cmp(right_reg, -1);
1480 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1481 DeoptimizeIf(equal, instr, "minus zero");
1482 } else {
1483 __ j(not_equal, &no_overflow_possible, Label::kNear);
1484 __ Move(result_reg, Immediate(0));
1485 __ jmp(&done, Label::kNear);
1486 }
1487 __ bind(&no_overflow_possible);
1488 }
1489
1490 // Sign extend dividend in eax into edx:eax.
1491 __ cdq();
1492
1493 // If we care about -0, test if the dividend is <0 and the result is 0.
1494 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1495 Label positive_left;
1496 __ test(left_reg, Operand(left_reg));
1497 __ j(not_sign, &positive_left, Label::kNear);
1498 __ idiv(right_reg);
1499 __ test(result_reg, Operand(result_reg));
1500 DeoptimizeIf(zero, instr, "minus zero");
1501 __ jmp(&done, Label::kNear);
1502 __ bind(&positive_left);
1503 }
1504 __ idiv(right_reg);
1505 __ bind(&done);
1506 }
1507
1508
DoDivByPowerOf2I(LDivByPowerOf2I * instr)1509 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1510 Register dividend = ToRegister(instr->dividend());
1511 int32_t divisor = instr->divisor();
1512 Register result = ToRegister(instr->result());
1513 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1514 DCHECK(!result.is(dividend));
1515
1516 // Check for (0 / -x) that will produce negative zero.
1517 HDiv* hdiv = instr->hydrogen();
1518 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1519 __ test(dividend, dividend);
1520 DeoptimizeIf(zero, instr, "minus zero");
1521 }
1522 // Check for (kMinInt / -1).
1523 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1524 __ cmp(dividend, kMinInt);
1525 DeoptimizeIf(zero, instr, "overflow");
1526 }
1527 // Deoptimize if remainder will not be 0.
1528 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1529 divisor != 1 && divisor != -1) {
1530 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1531 __ test(dividend, Immediate(mask));
1532 DeoptimizeIf(not_zero, instr, "lost precision");
1533 }
1534 __ Move(result, dividend);
1535 int32_t shift = WhichPowerOf2Abs(divisor);
1536 if (shift > 0) {
1537 // The arithmetic shift is always OK, the 'if' is an optimization only.
1538 if (shift > 1) __ sar(result, 31);
1539 __ shr(result, 32 - shift);
1540 __ add(result, dividend);
1541 __ sar(result, shift);
1542 }
1543 if (divisor < 0) __ neg(result);
1544 }
1545
1546
DoDivByConstI(LDivByConstI * instr)1547 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1548 Register dividend = ToRegister(instr->dividend());
1549 int32_t divisor = instr->divisor();
1550 DCHECK(ToRegister(instr->result()).is(edx));
1551
1552 if (divisor == 0) {
1553 DeoptimizeIf(no_condition, instr, "division by zero");
1554 return;
1555 }
1556
1557 // Check for (0 / -x) that will produce negative zero.
1558 HDiv* hdiv = instr->hydrogen();
1559 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1560 __ test(dividend, dividend);
1561 DeoptimizeIf(zero, instr, "minus zero");
1562 }
1563
1564 __ TruncatingDiv(dividend, Abs(divisor));
1565 if (divisor < 0) __ neg(edx);
1566
1567 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1568 __ mov(eax, edx);
1569 __ imul(eax, eax, divisor);
1570 __ sub(eax, dividend);
1571 DeoptimizeIf(not_equal, instr, "lost precision");
1572 }
1573 }
1574
1575
1576 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)1577 void LCodeGen::DoDivI(LDivI* instr) {
1578 HBinaryOperation* hdiv = instr->hydrogen();
1579 Register dividend = ToRegister(instr->dividend());
1580 Register divisor = ToRegister(instr->divisor());
1581 Register remainder = ToRegister(instr->temp());
1582 DCHECK(dividend.is(eax));
1583 DCHECK(remainder.is(edx));
1584 DCHECK(ToRegister(instr->result()).is(eax));
1585 DCHECK(!divisor.is(eax));
1586 DCHECK(!divisor.is(edx));
1587
1588 // Check for x / 0.
1589 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1590 __ test(divisor, divisor);
1591 DeoptimizeIf(zero, instr, "division by zero");
1592 }
1593
1594 // Check for (0 / -x) that will produce negative zero.
1595 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1596 Label dividend_not_zero;
1597 __ test(dividend, dividend);
1598 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1599 __ test(divisor, divisor);
1600 DeoptimizeIf(sign, instr, "minus zero");
1601 __ bind(÷nd_not_zero);
1602 }
1603
1604 // Check for (kMinInt / -1).
1605 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1606 Label dividend_not_min_int;
1607 __ cmp(dividend, kMinInt);
1608 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1609 __ cmp(divisor, -1);
1610 DeoptimizeIf(zero, instr, "overflow");
1611 __ bind(÷nd_not_min_int);
1612 }
1613
1614 // Sign extend to edx (= remainder).
1615 __ cdq();
1616 __ idiv(divisor);
1617
1618 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1619 // Deoptimize if remainder is not 0.
1620 __ test(remainder, remainder);
1621 DeoptimizeIf(not_zero, instr, "lost precision");
1622 }
1623 }
1624
1625
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)1626 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1627 Register dividend = ToRegister(instr->dividend());
1628 int32_t divisor = instr->divisor();
1629 DCHECK(dividend.is(ToRegister(instr->result())));
1630
1631 // If the divisor is positive, things are easy: There can be no deopts and we
1632 // can simply do an arithmetic right shift.
1633 if (divisor == 1) return;
1634 int32_t shift = WhichPowerOf2Abs(divisor);
1635 if (divisor > 1) {
1636 __ sar(dividend, shift);
1637 return;
1638 }
1639
1640 // If the divisor is negative, we have to negate and handle edge cases.
1641 __ neg(dividend);
1642 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1643 DeoptimizeIf(zero, instr, "minus zero");
1644 }
1645
1646 // Dividing by -1 is basically negation, unless we overflow.
1647 if (divisor == -1) {
1648 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1649 DeoptimizeIf(overflow, instr, "overflow");
1650 }
1651 return;
1652 }
1653
1654 // If the negation could not overflow, simply shifting is OK.
1655 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1656 __ sar(dividend, shift);
1657 return;
1658 }
1659
1660 Label not_kmin_int, done;
1661 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1662 __ mov(dividend, Immediate(kMinInt / divisor));
1663 __ jmp(&done, Label::kNear);
1664 __ bind(¬_kmin_int);
1665 __ sar(dividend, shift);
1666 __ bind(&done);
1667 }
1668
1669
DoFlooringDivByConstI(LFlooringDivByConstI * instr)1670 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1671 Register dividend = ToRegister(instr->dividend());
1672 int32_t divisor = instr->divisor();
1673 DCHECK(ToRegister(instr->result()).is(edx));
1674
1675 if (divisor == 0) {
1676 DeoptimizeIf(no_condition, instr, "division by zero");
1677 return;
1678 }
1679
1680 // Check for (0 / -x) that will produce negative zero.
1681 HMathFloorOfDiv* hdiv = instr->hydrogen();
1682 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1683 __ test(dividend, dividend);
1684 DeoptimizeIf(zero, instr, "minus zero");
1685 }
1686
1687 // Easy case: We need no dynamic check for the dividend and the flooring
1688 // division is the same as the truncating division.
1689 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1690 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1691 __ TruncatingDiv(dividend, Abs(divisor));
1692 if (divisor < 0) __ neg(edx);
1693 return;
1694 }
1695
1696 // In the general case we may need to adjust before and after the truncating
1697 // division to get a flooring division.
1698 Register temp = ToRegister(instr->temp3());
1699 DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
1700 Label needs_adjustment, done;
1701 __ cmp(dividend, Immediate(0));
1702 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1703 __ TruncatingDiv(dividend, Abs(divisor));
1704 if (divisor < 0) __ neg(edx);
1705 __ jmp(&done, Label::kNear);
1706 __ bind(&needs_adjustment);
1707 __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1708 __ TruncatingDiv(temp, Abs(divisor));
1709 if (divisor < 0) __ neg(edx);
1710 __ dec(edx);
1711 __ bind(&done);
1712 }
1713
1714
1715 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)1716 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1717 HBinaryOperation* hdiv = instr->hydrogen();
1718 Register dividend = ToRegister(instr->dividend());
1719 Register divisor = ToRegister(instr->divisor());
1720 Register remainder = ToRegister(instr->temp());
1721 Register result = ToRegister(instr->result());
1722 DCHECK(dividend.is(eax));
1723 DCHECK(remainder.is(edx));
1724 DCHECK(result.is(eax));
1725 DCHECK(!divisor.is(eax));
1726 DCHECK(!divisor.is(edx));
1727
1728 // Check for x / 0.
1729 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1730 __ test(divisor, divisor);
1731 DeoptimizeIf(zero, instr, "division by zero");
1732 }
1733
1734 // Check for (0 / -x) that will produce negative zero.
1735 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1736 Label dividend_not_zero;
1737 __ test(dividend, dividend);
1738 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1739 __ test(divisor, divisor);
1740 DeoptimizeIf(sign, instr, "minus zero");
1741 __ bind(÷nd_not_zero);
1742 }
1743
1744 // Check for (kMinInt / -1).
1745 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1746 Label dividend_not_min_int;
1747 __ cmp(dividend, kMinInt);
1748 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1749 __ cmp(divisor, -1);
1750 DeoptimizeIf(zero, instr, "overflow");
1751 __ bind(÷nd_not_min_int);
1752 }
1753
1754 // Sign extend to edx (= remainder).
1755 __ cdq();
1756 __ idiv(divisor);
1757
1758 Label done;
1759 __ test(remainder, remainder);
1760 __ j(zero, &done, Label::kNear);
1761 __ xor_(remainder, divisor);
1762 __ sar(remainder, 31);
1763 __ add(result, remainder);
1764 __ bind(&done);
1765 }
1766
1767
DoMulI(LMulI * instr)1768 void LCodeGen::DoMulI(LMulI* instr) {
1769 Register left = ToRegister(instr->left());
1770 LOperand* right = instr->right();
1771
1772 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1773 __ mov(ToRegister(instr->temp()), left);
1774 }
1775
1776 if (right->IsConstantOperand()) {
1777 // Try strength reductions on the multiplication.
1778 // All replacement instructions are at most as long as the imul
1779 // and have better latency.
1780 int constant = ToInteger32(LConstantOperand::cast(right));
1781 if (constant == -1) {
1782 __ neg(left);
1783 } else if (constant == 0) {
1784 __ xor_(left, Operand(left));
1785 } else if (constant == 2) {
1786 __ add(left, Operand(left));
1787 } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1788 // If we know that the multiplication can't overflow, it's safe to
1789 // use instructions that don't set the overflow flag for the
1790 // multiplication.
1791 switch (constant) {
1792 case 1:
1793 // Do nothing.
1794 break;
1795 case 3:
1796 __ lea(left, Operand(left, left, times_2, 0));
1797 break;
1798 case 4:
1799 __ shl(left, 2);
1800 break;
1801 case 5:
1802 __ lea(left, Operand(left, left, times_4, 0));
1803 break;
1804 case 8:
1805 __ shl(left, 3);
1806 break;
1807 case 9:
1808 __ lea(left, Operand(left, left, times_8, 0));
1809 break;
1810 case 16:
1811 __ shl(left, 4);
1812 break;
1813 default:
1814 __ imul(left, left, constant);
1815 break;
1816 }
1817 } else {
1818 __ imul(left, left, constant);
1819 }
1820 } else {
1821 if (instr->hydrogen()->representation().IsSmi()) {
1822 __ SmiUntag(left);
1823 }
1824 __ imul(left, ToOperand(right));
1825 }
1826
1827 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1828 DeoptimizeIf(overflow, instr, "overflow");
1829 }
1830
1831 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1832 // Bail out if the result is supposed to be negative zero.
1833 Label done;
1834 __ test(left, Operand(left));
1835 __ j(not_zero, &done);
1836 if (right->IsConstantOperand()) {
1837 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1838 DeoptimizeIf(no_condition, instr, "minus zero");
1839 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1840 __ cmp(ToRegister(instr->temp()), Immediate(0));
1841 DeoptimizeIf(less, instr, "minus zero");
1842 }
1843 } else {
1844 // Test the non-zero operand for negative sign.
1845 __ or_(ToRegister(instr->temp()), ToOperand(right));
1846 DeoptimizeIf(sign, instr, "minus zero");
1847 }
1848 __ bind(&done);
1849 }
1850 }
1851
1852
DoBitI(LBitI * instr)1853 void LCodeGen::DoBitI(LBitI* instr) {
1854 LOperand* left = instr->left();
1855 LOperand* right = instr->right();
1856 DCHECK(left->Equals(instr->result()));
1857 DCHECK(left->IsRegister());
1858
1859 if (right->IsConstantOperand()) {
1860 int32_t right_operand =
1861 ToRepresentation(LConstantOperand::cast(right),
1862 instr->hydrogen()->representation());
1863 switch (instr->op()) {
1864 case Token::BIT_AND:
1865 __ and_(ToRegister(left), right_operand);
1866 break;
1867 case Token::BIT_OR:
1868 __ or_(ToRegister(left), right_operand);
1869 break;
1870 case Token::BIT_XOR:
1871 if (right_operand == int32_t(~0)) {
1872 __ not_(ToRegister(left));
1873 } else {
1874 __ xor_(ToRegister(left), right_operand);
1875 }
1876 break;
1877 default:
1878 UNREACHABLE();
1879 break;
1880 }
1881 } else {
1882 switch (instr->op()) {
1883 case Token::BIT_AND:
1884 __ and_(ToRegister(left), ToOperand(right));
1885 break;
1886 case Token::BIT_OR:
1887 __ or_(ToRegister(left), ToOperand(right));
1888 break;
1889 case Token::BIT_XOR:
1890 __ xor_(ToRegister(left), ToOperand(right));
1891 break;
1892 default:
1893 UNREACHABLE();
1894 break;
1895 }
1896 }
1897 }
1898
1899
DoShiftI(LShiftI * instr)1900 void LCodeGen::DoShiftI(LShiftI* instr) {
1901 LOperand* left = instr->left();
1902 LOperand* right = instr->right();
1903 DCHECK(left->Equals(instr->result()));
1904 DCHECK(left->IsRegister());
1905 if (right->IsRegister()) {
1906 DCHECK(ToRegister(right).is(ecx));
1907
1908 switch (instr->op()) {
1909 case Token::ROR:
1910 __ ror_cl(ToRegister(left));
1911 break;
1912 case Token::SAR:
1913 __ sar_cl(ToRegister(left));
1914 break;
1915 case Token::SHR:
1916 __ shr_cl(ToRegister(left));
1917 if (instr->can_deopt()) {
1918 __ test(ToRegister(left), ToRegister(left));
1919 DeoptimizeIf(sign, instr, "negative value");
1920 }
1921 break;
1922 case Token::SHL:
1923 __ shl_cl(ToRegister(left));
1924 break;
1925 default:
1926 UNREACHABLE();
1927 break;
1928 }
1929 } else {
1930 int value = ToInteger32(LConstantOperand::cast(right));
1931 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1932 switch (instr->op()) {
1933 case Token::ROR:
1934 if (shift_count == 0 && instr->can_deopt()) {
1935 __ test(ToRegister(left), ToRegister(left));
1936 DeoptimizeIf(sign, instr, "negative value");
1937 } else {
1938 __ ror(ToRegister(left), shift_count);
1939 }
1940 break;
1941 case Token::SAR:
1942 if (shift_count != 0) {
1943 __ sar(ToRegister(left), shift_count);
1944 }
1945 break;
1946 case Token::SHR:
1947 if (shift_count != 0) {
1948 __ shr(ToRegister(left), shift_count);
1949 } else if (instr->can_deopt()) {
1950 __ test(ToRegister(left), ToRegister(left));
1951 DeoptimizeIf(sign, instr, "negative value");
1952 }
1953 break;
1954 case Token::SHL:
1955 if (shift_count != 0) {
1956 if (instr->hydrogen_value()->representation().IsSmi() &&
1957 instr->can_deopt()) {
1958 if (shift_count != 1) {
1959 __ shl(ToRegister(left), shift_count - 1);
1960 }
1961 __ SmiTag(ToRegister(left));
1962 DeoptimizeIf(overflow, instr, "overflow");
1963 } else {
1964 __ shl(ToRegister(left), shift_count);
1965 }
1966 }
1967 break;
1968 default:
1969 UNREACHABLE();
1970 break;
1971 }
1972 }
1973 }
1974
1975
DoSubI(LSubI * instr)1976 void LCodeGen::DoSubI(LSubI* instr) {
1977 LOperand* left = instr->left();
1978 LOperand* right = instr->right();
1979 DCHECK(left->Equals(instr->result()));
1980
1981 if (right->IsConstantOperand()) {
1982 __ sub(ToOperand(left),
1983 ToImmediate(right, instr->hydrogen()->representation()));
1984 } else {
1985 __ sub(ToRegister(left), ToOperand(right));
1986 }
1987 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1988 DeoptimizeIf(overflow, instr, "overflow");
1989 }
1990 }
1991
1992
DoConstantI(LConstantI * instr)1993 void LCodeGen::DoConstantI(LConstantI* instr) {
1994 __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1995 }
1996
1997
DoConstantS(LConstantS * instr)1998 void LCodeGen::DoConstantS(LConstantS* instr) {
1999 __ Move(ToRegister(instr->result()), Immediate(instr->value()));
2000 }
2001
2002
DoConstantD(LConstantD * instr)2003 void LCodeGen::DoConstantD(LConstantD* instr) {
2004 double v = instr->value();
2005 uint64_t int_val = bit_cast<uint64_t, double>(v);
2006 int32_t lower = static_cast<int32_t>(int_val);
2007 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
2008 DCHECK(instr->result()->IsDoubleRegister());
2009
2010 __ push(Immediate(upper));
2011 __ push(Immediate(lower));
2012 X87Register reg = ToX87Register(instr->result());
2013 X87Mov(reg, Operand(esp, 0));
2014 __ add(Operand(esp), Immediate(kDoubleSize));
2015 }
2016
2017
DoConstantE(LConstantE * instr)2018 void LCodeGen::DoConstantE(LConstantE* instr) {
2019 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
2020 }
2021
2022
DoConstantT(LConstantT * instr)2023 void LCodeGen::DoConstantT(LConstantT* instr) {
2024 Register reg = ToRegister(instr->result());
2025 Handle<Object> object = instr->value(isolate());
2026 AllowDeferredHandleDereference smi_check;
2027 __ LoadObject(reg, object);
2028 }
2029
2030
DoMapEnumLength(LMapEnumLength * instr)2031 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
2032 Register result = ToRegister(instr->result());
2033 Register map = ToRegister(instr->value());
2034 __ EnumLength(result, map);
2035 }
2036
2037
DoDateField(LDateField * instr)2038 void LCodeGen::DoDateField(LDateField* instr) {
2039 Register object = ToRegister(instr->date());
2040 Register result = ToRegister(instr->result());
2041 Register scratch = ToRegister(instr->temp());
2042 Smi* index = instr->index();
2043 Label runtime, done;
2044 DCHECK(object.is(result));
2045 DCHECK(object.is(eax));
2046
2047 __ test(object, Immediate(kSmiTagMask));
2048 DeoptimizeIf(zero, instr, "Smi");
2049 __ CmpObjectType(object, JS_DATE_TYPE, scratch);
2050 DeoptimizeIf(not_equal, instr, "not a date object");
2051
2052 if (index->value() == 0) {
2053 __ mov(result, FieldOperand(object, JSDate::kValueOffset));
2054 } else {
2055 if (index->value() < JSDate::kFirstUncachedField) {
2056 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2057 __ mov(scratch, Operand::StaticVariable(stamp));
2058 __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
2059 __ j(not_equal, &runtime, Label::kNear);
2060 __ mov(result, FieldOperand(object, JSDate::kValueOffset +
2061 kPointerSize * index->value()));
2062 __ jmp(&done, Label::kNear);
2063 }
2064 __ bind(&runtime);
2065 __ PrepareCallCFunction(2, scratch);
2066 __ mov(Operand(esp, 0), object);
2067 __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
2068 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2069 __ bind(&done);
2070 }
2071 }
2072
2073
BuildSeqStringOperand(Register string,LOperand * index,String::Encoding encoding)2074 Operand LCodeGen::BuildSeqStringOperand(Register string,
2075 LOperand* index,
2076 String::Encoding encoding) {
2077 if (index->IsConstantOperand()) {
2078 int offset = ToRepresentation(LConstantOperand::cast(index),
2079 Representation::Integer32());
2080 if (encoding == String::TWO_BYTE_ENCODING) {
2081 offset *= kUC16Size;
2082 }
2083 STATIC_ASSERT(kCharSize == 1);
2084 return FieldOperand(string, SeqString::kHeaderSize + offset);
2085 }
2086 return FieldOperand(
2087 string, ToRegister(index),
2088 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
2089 SeqString::kHeaderSize);
2090 }
2091
2092
DoSeqStringGetChar(LSeqStringGetChar * instr)2093 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
2094 String::Encoding encoding = instr->hydrogen()->encoding();
2095 Register result = ToRegister(instr->result());
2096 Register string = ToRegister(instr->string());
2097
2098 if (FLAG_debug_code) {
2099 __ push(string);
2100 __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
2101 __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
2102
2103 __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
2104 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2105 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2106 __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
2107 ? one_byte_seq_type : two_byte_seq_type));
2108 __ Check(equal, kUnexpectedStringType);
2109 __ pop(string);
2110 }
2111
2112 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2113 if (encoding == String::ONE_BYTE_ENCODING) {
2114 __ movzx_b(result, operand);
2115 } else {
2116 __ movzx_w(result, operand);
2117 }
2118 }
2119
2120
DoSeqStringSetChar(LSeqStringSetChar * instr)2121 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2122 String::Encoding encoding = instr->hydrogen()->encoding();
2123 Register string = ToRegister(instr->string());
2124
2125 if (FLAG_debug_code) {
2126 Register value = ToRegister(instr->value());
2127 Register index = ToRegister(instr->index());
2128 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2129 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2130 int encoding_mask =
2131 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2132 ? one_byte_seq_type : two_byte_seq_type;
2133 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2134 }
2135
2136 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2137 if (instr->value()->IsConstantOperand()) {
2138 int value = ToRepresentation(LConstantOperand::cast(instr->value()),
2139 Representation::Integer32());
2140 DCHECK_LE(0, value);
2141 if (encoding == String::ONE_BYTE_ENCODING) {
2142 DCHECK_LE(value, String::kMaxOneByteCharCode);
2143 __ mov_b(operand, static_cast<int8_t>(value));
2144 } else {
2145 DCHECK_LE(value, String::kMaxUtf16CodeUnit);
2146 __ mov_w(operand, static_cast<int16_t>(value));
2147 }
2148 } else {
2149 Register value = ToRegister(instr->value());
2150 if (encoding == String::ONE_BYTE_ENCODING) {
2151 __ mov_b(operand, value);
2152 } else {
2153 __ mov_w(operand, value);
2154 }
2155 }
2156 }
2157
2158
DoAddI(LAddI * instr)2159 void LCodeGen::DoAddI(LAddI* instr) {
2160 LOperand* left = instr->left();
2161 LOperand* right = instr->right();
2162
2163 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
2164 if (right->IsConstantOperand()) {
2165 int32_t offset = ToRepresentation(LConstantOperand::cast(right),
2166 instr->hydrogen()->representation());
2167 __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
2168 } else {
2169 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
2170 __ lea(ToRegister(instr->result()), address);
2171 }
2172 } else {
2173 if (right->IsConstantOperand()) {
2174 __ add(ToOperand(left),
2175 ToImmediate(right, instr->hydrogen()->representation()));
2176 } else {
2177 __ add(ToRegister(left), ToOperand(right));
2178 }
2179 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
2180 DeoptimizeIf(overflow, instr, "overflow");
2181 }
2182 }
2183 }
2184
2185
DoMathMinMax(LMathMinMax * instr)2186 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2187 LOperand* left = instr->left();
2188 LOperand* right = instr->right();
2189 DCHECK(left->Equals(instr->result()));
2190 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2191 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2192 Label return_left;
2193 Condition condition = (operation == HMathMinMax::kMathMin)
2194 ? less_equal
2195 : greater_equal;
2196 if (right->IsConstantOperand()) {
2197 Operand left_op = ToOperand(left);
2198 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
2199 instr->hydrogen()->representation());
2200 __ cmp(left_op, immediate);
2201 __ j(condition, &return_left, Label::kNear);
2202 __ mov(left_op, immediate);
2203 } else {
2204 Register left_reg = ToRegister(left);
2205 Operand right_op = ToOperand(right);
2206 __ cmp(left_reg, right_op);
2207 __ j(condition, &return_left, Label::kNear);
2208 __ mov(left_reg, right_op);
2209 }
2210 __ bind(&return_left);
2211 } else {
2212 DCHECK(instr->hydrogen()->representation().IsDouble());
2213 Label check_nan_left, check_zero, return_left, return_right;
2214 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
2215 X87Register left_reg = ToX87Register(left);
2216 X87Register right_reg = ToX87Register(right);
2217
2218 X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result()));
2219 __ fld(1);
2220 __ fld(1);
2221 __ FCmp();
2222 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
2223 __ j(equal, &check_zero, Label::kNear); // left == right.
2224 __ j(condition, &return_left, Label::kNear);
2225 __ jmp(&return_right, Label::kNear);
2226
2227 __ bind(&check_zero);
2228 __ fld(0);
2229 __ fldz();
2230 __ FCmp();
2231 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
2232 // At this point, both left and right are either 0 or -0.
2233 if (operation == HMathMinMax::kMathMin) {
2234 // Push st0 and st1 to stack, then pop them to temp registers and OR them,
2235 // load it to left.
2236 Register scratch_reg = ToRegister(instr->temp());
2237 __ fld(1);
2238 __ fld(1);
2239 __ sub(esp, Immediate(2 * kPointerSize));
2240 __ fstp_s(MemOperand(esp, 0));
2241 __ fstp_s(MemOperand(esp, kPointerSize));
2242 __ pop(scratch_reg);
2243 __ xor_(MemOperand(esp, 0), scratch_reg);
2244 X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
2245 __ pop(scratch_reg); // restore esp
2246 } else {
2247 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
2248 X87Fxch(left_reg);
2249 __ fadd(1);
2250 }
2251 __ jmp(&return_left, Label::kNear);
2252
2253 __ bind(&check_nan_left);
2254 __ fld(0);
2255 __ fld(0);
2256 __ FCmp(); // NaN check.
2257 __ j(parity_even, &return_left, Label::kNear); // left == NaN.
2258
2259 __ bind(&return_right);
2260 X87Fxch(left_reg);
2261 X87Mov(left_reg, right_reg);
2262
2263 __ bind(&return_left);
2264 }
2265 }
2266
2267
DoArithmeticD(LArithmeticD * instr)2268 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2269 X87Register left = ToX87Register(instr->left());
2270 X87Register right = ToX87Register(instr->right());
2271 X87Register result = ToX87Register(instr->result());
2272 if (instr->op() != Token::MOD) {
2273 X87PrepareBinaryOp(left, right, result);
2274 }
2275 // Set the precision control to double-precision.
2276 __ X87SetFPUCW(0x027F);
2277 switch (instr->op()) {
2278 case Token::ADD:
2279 __ fadd_i(1);
2280 break;
2281 case Token::SUB:
2282 __ fsub_i(1);
2283 break;
2284 case Token::MUL:
2285 __ fmul_i(1);
2286 break;
2287 case Token::DIV:
2288 __ fdiv_i(1);
2289 break;
2290 case Token::MOD: {
2291 // Pass two doubles as arguments on the stack.
2292 __ PrepareCallCFunction(4, eax);
2293 X87Mov(Operand(esp, 1 * kDoubleSize), right);
2294 X87Mov(Operand(esp, 0), left);
2295 X87Free(right);
2296 DCHECK(left.is(result));
2297 X87PrepareToWrite(result);
2298 __ CallCFunction(
2299 ExternalReference::mod_two_doubles_operation(isolate()),
2300 4);
2301
2302 // Return value is in st(0) on ia32.
2303 X87CommitWrite(result);
2304 break;
2305 }
2306 default:
2307 UNREACHABLE();
2308 break;
2309 }
2310
2311 // Restore the default value of control word.
2312 __ X87SetFPUCW(0x037F);
2313 }
2314
2315
DoArithmeticT(LArithmeticT * instr)2316 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2317 DCHECK(ToRegister(instr->context()).is(esi));
2318 DCHECK(ToRegister(instr->left()).is(edx));
2319 DCHECK(ToRegister(instr->right()).is(eax));
2320 DCHECK(ToRegister(instr->result()).is(eax));
2321
2322 Handle<Code> code =
2323 CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2324 CallCode(code, RelocInfo::CODE_TARGET, instr);
2325 }
2326
2327
2328 template<class InstrType>
EmitBranch(InstrType instr,Condition cc)2329 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2330 int left_block = instr->TrueDestination(chunk_);
2331 int right_block = instr->FalseDestination(chunk_);
2332
2333 int next_block = GetNextEmittedBlock();
2334
2335 if (right_block == left_block || cc == no_condition) {
2336 EmitGoto(left_block);
2337 } else if (left_block == next_block) {
2338 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2339 } else if (right_block == next_block) {
2340 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2341 } else {
2342 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2343 __ jmp(chunk_->GetAssemblyLabel(right_block));
2344 }
2345 }
2346
2347
2348 template<class InstrType>
EmitFalseBranch(InstrType instr,Condition cc)2349 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2350 int false_block = instr->FalseDestination(chunk_);
2351 if (cc == no_condition) {
2352 __ jmp(chunk_->GetAssemblyLabel(false_block));
2353 } else {
2354 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2355 }
2356 }
2357
2358
DoBranch(LBranch * instr)2359 void LCodeGen::DoBranch(LBranch* instr) {
2360 Representation r = instr->hydrogen()->value()->representation();
2361 if (r.IsSmiOrInteger32()) {
2362 Register reg = ToRegister(instr->value());
2363 __ test(reg, Operand(reg));
2364 EmitBranch(instr, not_zero);
2365 } else if (r.IsDouble()) {
2366 X87Register reg = ToX87Register(instr->value());
2367 X87LoadForUsage(reg);
2368 __ fldz();
2369 __ FCmp();
2370 EmitBranch(instr, not_zero);
2371 } else {
2372 DCHECK(r.IsTagged());
2373 Register reg = ToRegister(instr->value());
2374 HType type = instr->hydrogen()->value()->type();
2375 if (type.IsBoolean()) {
2376 DCHECK(!info()->IsStub());
2377 __ cmp(reg, factory()->true_value());
2378 EmitBranch(instr, equal);
2379 } else if (type.IsSmi()) {
2380 DCHECK(!info()->IsStub());
2381 __ test(reg, Operand(reg));
2382 EmitBranch(instr, not_equal);
2383 } else if (type.IsJSArray()) {
2384 DCHECK(!info()->IsStub());
2385 EmitBranch(instr, no_condition);
2386 } else if (type.IsHeapNumber()) {
2387 UNREACHABLE();
2388 } else if (type.IsString()) {
2389 DCHECK(!info()->IsStub());
2390 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2391 EmitBranch(instr, not_equal);
2392 } else {
2393 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2394 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2395
2396 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2397 // undefined -> false.
2398 __ cmp(reg, factory()->undefined_value());
2399 __ j(equal, instr->FalseLabel(chunk_));
2400 }
2401 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2402 // true -> true.
2403 __ cmp(reg, factory()->true_value());
2404 __ j(equal, instr->TrueLabel(chunk_));
2405 // false -> false.
2406 __ cmp(reg, factory()->false_value());
2407 __ j(equal, instr->FalseLabel(chunk_));
2408 }
2409 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2410 // 'null' -> false.
2411 __ cmp(reg, factory()->null_value());
2412 __ j(equal, instr->FalseLabel(chunk_));
2413 }
2414
2415 if (expected.Contains(ToBooleanStub::SMI)) {
2416 // Smis: 0 -> false, all other -> true.
2417 __ test(reg, Operand(reg));
2418 __ j(equal, instr->FalseLabel(chunk_));
2419 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2420 } else if (expected.NeedsMap()) {
2421 // If we need a map later and have a Smi -> deopt.
2422 __ test(reg, Immediate(kSmiTagMask));
2423 DeoptimizeIf(zero, instr, "Smi");
2424 }
2425
2426 Register map = no_reg; // Keep the compiler happy.
2427 if (expected.NeedsMap()) {
2428 map = ToRegister(instr->temp());
2429 DCHECK(!map.is(reg));
2430 __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
2431
2432 if (expected.CanBeUndetectable()) {
2433 // Undetectable -> false.
2434 __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2435 1 << Map::kIsUndetectable);
2436 __ j(not_zero, instr->FalseLabel(chunk_));
2437 }
2438 }
2439
2440 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2441 // spec object -> true.
2442 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2443 __ j(above_equal, instr->TrueLabel(chunk_));
2444 }
2445
2446 if (expected.Contains(ToBooleanStub::STRING)) {
2447 // String value -> false iff empty.
2448 Label not_string;
2449 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2450 __ j(above_equal, ¬_string, Label::kNear);
2451 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2452 __ j(not_zero, instr->TrueLabel(chunk_));
2453 __ jmp(instr->FalseLabel(chunk_));
2454 __ bind(¬_string);
2455 }
2456
2457 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2458 // Symbol value -> true.
2459 __ CmpInstanceType(map, SYMBOL_TYPE);
2460 __ j(equal, instr->TrueLabel(chunk_));
2461 }
2462
2463 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2464 // heap number -> false iff +0, -0, or NaN.
2465 Label not_heap_number;
2466 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2467 factory()->heap_number_map());
2468 __ j(not_equal, ¬_heap_number, Label::kNear);
2469 __ fldz();
2470 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
2471 __ FCmp();
2472 __ j(zero, instr->FalseLabel(chunk_));
2473 __ jmp(instr->TrueLabel(chunk_));
2474 __ bind(¬_heap_number);
2475 }
2476
2477 if (!expected.IsGeneric()) {
2478 // We've seen something for the first time -> deopt.
2479 // This can only happen if we are not generic already.
2480 DeoptimizeIf(no_condition, instr, "unexpected object");
2481 }
2482 }
2483 }
2484 }
2485
2486
EmitGoto(int block)2487 void LCodeGen::EmitGoto(int block) {
2488 if (!IsNextEmittedBlock(block)) {
2489 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2490 }
2491 }
2492
2493
DoClobberDoubles(LClobberDoubles * instr)2494 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2495 }
2496
2497
DoGoto(LGoto * instr)2498 void LCodeGen::DoGoto(LGoto* instr) {
2499 EmitGoto(instr->block_id());
2500 }
2501
2502
TokenToCondition(Token::Value op,bool is_unsigned)2503 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2504 Condition cond = no_condition;
2505 switch (op) {
2506 case Token::EQ:
2507 case Token::EQ_STRICT:
2508 cond = equal;
2509 break;
2510 case Token::NE:
2511 case Token::NE_STRICT:
2512 cond = not_equal;
2513 break;
2514 case Token::LT:
2515 cond = is_unsigned ? below : less;
2516 break;
2517 case Token::GT:
2518 cond = is_unsigned ? above : greater;
2519 break;
2520 case Token::LTE:
2521 cond = is_unsigned ? below_equal : less_equal;
2522 break;
2523 case Token::GTE:
2524 cond = is_unsigned ? above_equal : greater_equal;
2525 break;
2526 case Token::IN:
2527 case Token::INSTANCEOF:
2528 default:
2529 UNREACHABLE();
2530 }
2531 return cond;
2532 }
2533
2534
DoCompareNumericAndBranch(LCompareNumericAndBranch * instr)2535 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2536 LOperand* left = instr->left();
2537 LOperand* right = instr->right();
2538 bool is_unsigned =
2539 instr->is_double() ||
2540 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2541 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2542 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2543
2544 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2545 // We can statically evaluate the comparison.
2546 double left_val = ToDouble(LConstantOperand::cast(left));
2547 double right_val = ToDouble(LConstantOperand::cast(right));
2548 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2549 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2550 EmitGoto(next_block);
2551 } else {
2552 if (instr->is_double()) {
2553 X87LoadForUsage(ToX87Register(right), ToX87Register(left));
2554 __ FCmp();
2555 // Don't base result on EFLAGS when a NaN is involved. Instead
2556 // jump to the false block.
2557 __ j(parity_even, instr->FalseLabel(chunk_));
2558 } else {
2559 if (right->IsConstantOperand()) {
2560 __ cmp(ToOperand(left),
2561 ToImmediate(right, instr->hydrogen()->representation()));
2562 } else if (left->IsConstantOperand()) {
2563 __ cmp(ToOperand(right),
2564 ToImmediate(left, instr->hydrogen()->representation()));
2565 // We commuted the operands, so commute the condition.
2566 cc = CommuteCondition(cc);
2567 } else {
2568 __ cmp(ToRegister(left), ToOperand(right));
2569 }
2570 }
2571 EmitBranch(instr, cc);
2572 }
2573 }
2574
2575
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)2576 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2577 Register left = ToRegister(instr->left());
2578
2579 if (instr->right()->IsConstantOperand()) {
2580 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2581 __ CmpObject(left, right);
2582 } else {
2583 Operand right = ToOperand(instr->right());
2584 __ cmp(left, right);
2585 }
2586 EmitBranch(instr, equal);
2587 }
2588
2589
DoCmpHoleAndBranch(LCmpHoleAndBranch * instr)2590 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2591 if (instr->hydrogen()->representation().IsTagged()) {
2592 Register input_reg = ToRegister(instr->object());
2593 __ cmp(input_reg, factory()->the_hole_value());
2594 EmitBranch(instr, equal);
2595 return;
2596 }
2597
2598 // Put the value to the top of stack
2599 X87Register src = ToX87Register(instr->object());
2600 X87LoadForUsage(src);
2601 __ fld(0);
2602 __ fld(0);
2603 __ FCmp();
2604 Label ok;
2605 __ j(parity_even, &ok, Label::kNear);
2606 __ fstp(0);
2607 EmitFalseBranch(instr, no_condition);
2608 __ bind(&ok);
2609
2610
2611 __ sub(esp, Immediate(kDoubleSize));
2612 __ fstp_d(MemOperand(esp, 0));
2613
2614 __ add(esp, Immediate(kDoubleSize));
2615 int offset = sizeof(kHoleNanUpper32);
2616 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2617 EmitBranch(instr, equal);
2618 }
2619
2620
DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch * instr)2621 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2622 Representation rep = instr->hydrogen()->value()->representation();
2623 DCHECK(!rep.IsInteger32());
2624
2625 if (rep.IsDouble()) {
2626 X87Register input = ToX87Register(instr->value());
2627 X87LoadForUsage(input);
2628 __ FXamMinusZero();
2629 EmitBranch(instr, equal);
2630 } else {
2631 Register value = ToRegister(instr->value());
2632 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2633 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2634 __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
2635 Immediate(0x1));
2636 EmitFalseBranch(instr, no_overflow);
2637 __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
2638 Immediate(0x00000000));
2639 EmitBranch(instr, equal);
2640 }
2641 }
2642
2643
EmitIsObject(Register input,Register temp1,Label * is_not_object,Label * is_object)2644 Condition LCodeGen::EmitIsObject(Register input,
2645 Register temp1,
2646 Label* is_not_object,
2647 Label* is_object) {
2648 __ JumpIfSmi(input, is_not_object);
2649
2650 __ cmp(input, isolate()->factory()->null_value());
2651 __ j(equal, is_object);
2652
2653 __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
2654 // Undetectable objects behave like undefined.
2655 __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
2656 1 << Map::kIsUndetectable);
2657 __ j(not_zero, is_not_object);
2658
2659 __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
2660 __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
2661 __ j(below, is_not_object);
2662 __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
2663 return below_equal;
2664 }
2665
2666
DoIsObjectAndBranch(LIsObjectAndBranch * instr)2667 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2668 Register reg = ToRegister(instr->value());
2669 Register temp = ToRegister(instr->temp());
2670
2671 Condition true_cond = EmitIsObject(
2672 reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2673
2674 EmitBranch(instr, true_cond);
2675 }
2676
2677
EmitIsString(Register input,Register temp1,Label * is_not_string,SmiCheck check_needed=INLINE_SMI_CHECK)2678 Condition LCodeGen::EmitIsString(Register input,
2679 Register temp1,
2680 Label* is_not_string,
2681 SmiCheck check_needed = INLINE_SMI_CHECK) {
2682 if (check_needed == INLINE_SMI_CHECK) {
2683 __ JumpIfSmi(input, is_not_string);
2684 }
2685
2686 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2687
2688 return cond;
2689 }
2690
2691
DoIsStringAndBranch(LIsStringAndBranch * instr)2692 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2693 Register reg = ToRegister(instr->value());
2694 Register temp = ToRegister(instr->temp());
2695
2696 SmiCheck check_needed =
2697 instr->hydrogen()->value()->type().IsHeapObject()
2698 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2699
2700 Condition true_cond = EmitIsString(
2701 reg, temp, instr->FalseLabel(chunk_), check_needed);
2702
2703 EmitBranch(instr, true_cond);
2704 }
2705
2706
DoIsSmiAndBranch(LIsSmiAndBranch * instr)2707 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2708 Operand input = ToOperand(instr->value());
2709
2710 __ test(input, Immediate(kSmiTagMask));
2711 EmitBranch(instr, zero);
2712 }
2713
2714
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)2715 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2716 Register input = ToRegister(instr->value());
2717 Register temp = ToRegister(instr->temp());
2718
2719 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2720 STATIC_ASSERT(kSmiTag == 0);
2721 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2722 }
2723 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2724 __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2725 1 << Map::kIsUndetectable);
2726 EmitBranch(instr, not_zero);
2727 }
2728
2729
ComputeCompareCondition(Token::Value op)2730 static Condition ComputeCompareCondition(Token::Value op) {
2731 switch (op) {
2732 case Token::EQ_STRICT:
2733 case Token::EQ:
2734 return equal;
2735 case Token::LT:
2736 return less;
2737 case Token::GT:
2738 return greater;
2739 case Token::LTE:
2740 return less_equal;
2741 case Token::GTE:
2742 return greater_equal;
2743 default:
2744 UNREACHABLE();
2745 return no_condition;
2746 }
2747 }
2748
2749
DoStringCompareAndBranch(LStringCompareAndBranch * instr)2750 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2751 Token::Value op = instr->op();
2752
2753 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2754 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2755
2756 Condition condition = ComputeCompareCondition(op);
2757 __ test(eax, Operand(eax));
2758
2759 EmitBranch(instr, condition);
2760 }
2761
2762
TestType(HHasInstanceTypeAndBranch * instr)2763 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2764 InstanceType from = instr->from();
2765 InstanceType to = instr->to();
2766 if (from == FIRST_TYPE) return to;
2767 DCHECK(from == to || to == LAST_TYPE);
2768 return from;
2769 }
2770
2771
BranchCondition(HHasInstanceTypeAndBranch * instr)2772 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2773 InstanceType from = instr->from();
2774 InstanceType to = instr->to();
2775 if (from == to) return equal;
2776 if (to == LAST_TYPE) return above_equal;
2777 if (from == FIRST_TYPE) return below_equal;
2778 UNREACHABLE();
2779 return equal;
2780 }
2781
2782
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)2783 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2784 Register input = ToRegister(instr->value());
2785 Register temp = ToRegister(instr->temp());
2786
2787 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2788 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2789 }
2790
2791 __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2792 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2793 }
2794
2795
DoGetCachedArrayIndex(LGetCachedArrayIndex * instr)2796 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2797 Register input = ToRegister(instr->value());
2798 Register result = ToRegister(instr->result());
2799
2800 __ AssertString(input);
2801
2802 __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2803 __ IndexFromHash(result, result);
2804 }
2805
2806
DoHasCachedArrayIndexAndBranch(LHasCachedArrayIndexAndBranch * instr)2807 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2808 LHasCachedArrayIndexAndBranch* instr) {
2809 Register input = ToRegister(instr->value());
2810
2811 __ test(FieldOperand(input, String::kHashFieldOffset),
2812 Immediate(String::kContainsCachedArrayIndexMask));
2813 EmitBranch(instr, equal);
2814 }
2815
2816
2817 // Branches to a label or falls through with the answer in the z flag. Trashes
2818 // the temp registers, but not the input.
EmitClassOfTest(Label * is_true,Label * is_false,Handle<String> class_name,Register input,Register temp,Register temp2)2819 void LCodeGen::EmitClassOfTest(Label* is_true,
2820 Label* is_false,
2821 Handle<String>class_name,
2822 Register input,
2823 Register temp,
2824 Register temp2) {
2825 DCHECK(!input.is(temp));
2826 DCHECK(!input.is(temp2));
2827 DCHECK(!temp.is(temp2));
2828 __ JumpIfSmi(input, is_false);
2829
2830 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2831 // Assuming the following assertions, we can use the same compares to test
2832 // for both being a function type and being in the object type range.
2833 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2834 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2835 FIRST_SPEC_OBJECT_TYPE + 1);
2836 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2837 LAST_SPEC_OBJECT_TYPE - 1);
2838 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2839 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2840 __ j(below, is_false);
2841 __ j(equal, is_true);
2842 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2843 __ j(equal, is_true);
2844 } else {
2845 // Faster code path to avoid two compares: subtract lower bound from the
2846 // actual type and do a signed compare with the width of the type range.
2847 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2848 __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2849 __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2850 __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2851 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2852 __ j(above, is_false);
2853 }
2854
2855 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2856 // Check if the constructor in the map is a function.
2857 __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
2858 // Objects with a non-function constructor have class 'Object'.
2859 __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
2860 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2861 __ j(not_equal, is_true);
2862 } else {
2863 __ j(not_equal, is_false);
2864 }
2865
2866 // temp now contains the constructor function. Grab the
2867 // instance class name from there.
2868 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2869 __ mov(temp, FieldOperand(temp,
2870 SharedFunctionInfo::kInstanceClassNameOffset));
2871 // The class name we are testing against is internalized since it's a literal.
2872 // The name in the constructor is internalized because of the way the context
2873 // is booted. This routine isn't expected to work for random API-created
2874 // classes and it doesn't have to because you can't access it with natives
2875 // syntax. Since both sides are internalized it is sufficient to use an
2876 // identity comparison.
2877 __ cmp(temp, class_name);
2878 // End with the answer in the z flag.
2879 }
2880
2881
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2882 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2883 Register input = ToRegister(instr->value());
2884 Register temp = ToRegister(instr->temp());
2885 Register temp2 = ToRegister(instr->temp2());
2886
2887 Handle<String> class_name = instr->hydrogen()->class_name();
2888
2889 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2890 class_name, input, temp, temp2);
2891
2892 EmitBranch(instr, equal);
2893 }
2894
2895
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2896 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2897 Register reg = ToRegister(instr->value());
2898 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2899 EmitBranch(instr, equal);
2900 }
2901
2902
DoInstanceOf(LInstanceOf * instr)2903 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2904 // Object and function are in fixed registers defined by the stub.
2905 DCHECK(ToRegister(instr->context()).is(esi));
2906 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2907 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2908
2909 Label true_value, done;
2910 __ test(eax, Operand(eax));
2911 __ j(zero, &true_value, Label::kNear);
2912 __ mov(ToRegister(instr->result()), factory()->false_value());
2913 __ jmp(&done, Label::kNear);
2914 __ bind(&true_value);
2915 __ mov(ToRegister(instr->result()), factory()->true_value());
2916 __ bind(&done);
2917 }
2918
2919
DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal * instr)2920 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2921 class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2922 public:
2923 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2924 LInstanceOfKnownGlobal* instr,
2925 const X87Stack& x87_stack)
2926 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
2927 virtual void Generate() OVERRIDE {
2928 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2929 }
2930 virtual LInstruction* instr() OVERRIDE { return instr_; }
2931 Label* map_check() { return &map_check_; }
2932 private:
2933 LInstanceOfKnownGlobal* instr_;
2934 Label map_check_;
2935 };
2936
2937 DeferredInstanceOfKnownGlobal* deferred;
2938 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
2939
2940 Label done, false_result;
2941 Register object = ToRegister(instr->value());
2942 Register temp = ToRegister(instr->temp());
2943
2944 // A Smi is not an instance of anything.
2945 __ JumpIfSmi(object, &false_result, Label::kNear);
2946
2947 // This is the inlined call site instanceof cache. The two occurences of the
2948 // hole value will be patched to the last map/result pair generated by the
2949 // instanceof stub.
2950 Label cache_miss;
2951 Register map = ToRegister(instr->temp());
2952 __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
2953 __ bind(deferred->map_check()); // Label for calculating code patching.
2954 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2955 __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
2956 __ j(not_equal, &cache_miss, Label::kNear);
2957 __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
2958 __ jmp(&done, Label::kNear);
2959
2960 // The inlined call site cache did not match. Check for null and string
2961 // before calling the deferred code.
2962 __ bind(&cache_miss);
2963 // Null is not an instance of anything.
2964 __ cmp(object, factory()->null_value());
2965 __ j(equal, &false_result, Label::kNear);
2966
2967 // String values are not instances of anything.
2968 Condition is_string = masm_->IsObjectStringType(object, temp, temp);
2969 __ j(is_string, &false_result, Label::kNear);
2970
2971 // Go to the deferred code.
2972 __ jmp(deferred->entry());
2973
2974 __ bind(&false_result);
2975 __ mov(ToRegister(instr->result()), factory()->false_value());
2976
2977 // Here result has either true or false. Deferred code also produces true or
2978 // false object.
2979 __ bind(deferred->exit());
2980 __ bind(&done);
2981 }
2982
2983
DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal * instr,Label * map_check)2984 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2985 Label* map_check) {
2986 PushSafepointRegistersScope scope(this);
2987
2988 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2989 flags = static_cast<InstanceofStub::Flags>(
2990 flags | InstanceofStub::kArgsInRegisters);
2991 flags = static_cast<InstanceofStub::Flags>(
2992 flags | InstanceofStub::kCallSiteInlineCheck);
2993 flags = static_cast<InstanceofStub::Flags>(
2994 flags | InstanceofStub::kReturnTrueFalseObject);
2995 InstanceofStub stub(isolate(), flags);
2996
2997 // Get the temp register reserved by the instruction. This needs to be a
2998 // register which is pushed last by PushSafepointRegisters as top of the
2999 // stack is used to pass the offset to the location of the map check to
3000 // the stub.
3001 Register temp = ToRegister(instr->temp());
3002 DCHECK(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
3003 __ LoadHeapObject(InstanceofStub::right(), instr->function());
3004 static const int kAdditionalDelta = 13;
3005 int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
3006 __ mov(temp, Immediate(delta));
3007 __ StoreToSafepointRegisterSlot(temp, temp);
3008 CallCodeGeneric(stub.GetCode(),
3009 RelocInfo::CODE_TARGET,
3010 instr,
3011 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3012 // Get the deoptimization index of the LLazyBailout-environment that
3013 // corresponds to this instruction.
3014 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3015 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3016
3017 // Put the result value into the eax slot and restore all registers.
3018 __ StoreToSafepointRegisterSlot(eax, eax);
3019 }
3020
3021
DoCmpT(LCmpT * instr)3022 void LCodeGen::DoCmpT(LCmpT* instr) {
3023 Token::Value op = instr->op();
3024
3025 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
3026 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3027
3028 Condition condition = ComputeCompareCondition(op);
3029 Label true_value, done;
3030 __ test(eax, Operand(eax));
3031 __ j(condition, &true_value, Label::kNear);
3032 __ mov(ToRegister(instr->result()), factory()->false_value());
3033 __ jmp(&done, Label::kNear);
3034 __ bind(&true_value);
3035 __ mov(ToRegister(instr->result()), factory()->true_value());
3036 __ bind(&done);
3037 }
3038
3039
EmitReturn(LReturn * instr,bool dynamic_frame_alignment)3040 void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
3041 int extra_value_count = dynamic_frame_alignment ? 2 : 1;
3042
3043 if (instr->has_constant_parameter_count()) {
3044 int parameter_count = ToInteger32(instr->constant_parameter_count());
3045 if (dynamic_frame_alignment && FLAG_debug_code) {
3046 __ cmp(Operand(esp,
3047 (parameter_count + extra_value_count) * kPointerSize),
3048 Immediate(kAlignmentZapValue));
3049 __ Assert(equal, kExpectedAlignmentMarker);
3050 }
3051 __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
3052 } else {
3053 Register reg = ToRegister(instr->parameter_count());
3054 // The argument count parameter is a smi
3055 __ SmiUntag(reg);
3056 Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
3057 if (dynamic_frame_alignment && FLAG_debug_code) {
3058 DCHECK(extra_value_count == 2);
3059 __ cmp(Operand(esp, reg, times_pointer_size,
3060 extra_value_count * kPointerSize),
3061 Immediate(kAlignmentZapValue));
3062 __ Assert(equal, kExpectedAlignmentMarker);
3063 }
3064
3065 // emit code to restore stack based on instr->parameter_count()
3066 __ pop(return_addr_reg); // save return address
3067 if (dynamic_frame_alignment) {
3068 __ inc(reg); // 1 more for alignment
3069 }
3070 __ shl(reg, kPointerSizeLog2);
3071 __ add(esp, reg);
3072 __ jmp(return_addr_reg);
3073 }
3074 }
3075
3076
DoReturn(LReturn * instr)3077 void LCodeGen::DoReturn(LReturn* instr) {
3078 if (FLAG_trace && info()->IsOptimizing()) {
3079 // Preserve the return value on the stack and rely on the runtime call
3080 // to return the value in the same register. We're leaving the code
3081 // managed by the register allocator and tearing down the frame, it's
3082 // safe to write to the context register.
3083 __ push(eax);
3084 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3085 __ CallRuntime(Runtime::kTraceExit, 1);
3086 }
3087 if (dynamic_frame_alignment_) {
3088 // Fetch the state of the dynamic frame alignment.
3089 __ mov(edx, Operand(ebp,
3090 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
3091 }
3092 int no_frame_start = -1;
3093 if (NeedsEagerFrame()) {
3094 __ mov(esp, ebp);
3095 __ pop(ebp);
3096 no_frame_start = masm_->pc_offset();
3097 }
3098 if (dynamic_frame_alignment_) {
3099 Label no_padding;
3100 __ cmp(edx, Immediate(kNoAlignmentPadding));
3101 __ j(equal, &no_padding, Label::kNear);
3102
3103 EmitReturn(instr, true);
3104 __ bind(&no_padding);
3105 }
3106
3107 EmitReturn(instr, false);
3108 if (no_frame_start != -1) {
3109 info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
3110 }
3111 }
3112
3113
DoLoadGlobalCell(LLoadGlobalCell * instr)3114 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3115 Register result = ToRegister(instr->result());
3116 __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
3117 if (instr->hydrogen()->RequiresHoleCheck()) {
3118 __ cmp(result, factory()->the_hole_value());
3119 DeoptimizeIf(equal, instr, "hole");
3120 }
3121 }
3122
3123
3124 template <class T>
EmitVectorLoadICRegisters(T * instr)3125 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
3126 DCHECK(FLAG_vector_ics);
3127 Register vector = ToRegister(instr->temp_vector());
3128 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
3129 __ mov(vector, instr->hydrogen()->feedback_vector());
3130 // No need to allocate this register.
3131 DCHECK(VectorLoadICDescriptor::SlotRegister().is(eax));
3132 __ mov(VectorLoadICDescriptor::SlotRegister(),
3133 Immediate(Smi::FromInt(instr->hydrogen()->slot())));
3134 }
3135
3136
DoLoadGlobalGeneric(LLoadGlobalGeneric * instr)3137 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3138 DCHECK(ToRegister(instr->context()).is(esi));
3139 DCHECK(ToRegister(instr->global_object())
3140 .is(LoadDescriptor::ReceiverRegister()));
3141 DCHECK(ToRegister(instr->result()).is(eax));
3142
3143 __ mov(LoadDescriptor::NameRegister(), instr->name());
3144 if (FLAG_vector_ics) {
3145 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3146 }
3147 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3148 Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
3149 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3150 }
3151
3152
DoStoreGlobalCell(LStoreGlobalCell * instr)3153 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3154 Register value = ToRegister(instr->value());
3155 Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
3156
3157 // If the cell we are storing to contains the hole it could have
3158 // been deleted from the property dictionary. In that case, we need
3159 // to update the property details in the property dictionary to mark
3160 // it as no longer deleted. We deoptimize in that case.
3161 if (instr->hydrogen()->RequiresHoleCheck()) {
3162 __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
3163 DeoptimizeIf(equal, instr, "hole");
3164 }
3165
3166 // Store the value.
3167 __ mov(Operand::ForCell(cell_handle), value);
3168 // Cells are always rescanned, so no write barrier here.
3169 }
3170
3171
DoLoadContextSlot(LLoadContextSlot * instr)3172 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3173 Register context = ToRegister(instr->context());
3174 Register result = ToRegister(instr->result());
3175 __ mov(result, ContextOperand(context, instr->slot_index()));
3176
3177 if (instr->hydrogen()->RequiresHoleCheck()) {
3178 __ cmp(result, factory()->the_hole_value());
3179 if (instr->hydrogen()->DeoptimizesOnHole()) {
3180 DeoptimizeIf(equal, instr, "hole");
3181 } else {
3182 Label is_not_hole;
3183 __ j(not_equal, &is_not_hole, Label::kNear);
3184 __ mov(result, factory()->undefined_value());
3185 __ bind(&is_not_hole);
3186 }
3187 }
3188 }
3189
3190
DoStoreContextSlot(LStoreContextSlot * instr)3191 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3192 Register context = ToRegister(instr->context());
3193 Register value = ToRegister(instr->value());
3194
3195 Label skip_assignment;
3196
3197 Operand target = ContextOperand(context, instr->slot_index());
3198 if (instr->hydrogen()->RequiresHoleCheck()) {
3199 __ cmp(target, factory()->the_hole_value());
3200 if (instr->hydrogen()->DeoptimizesOnHole()) {
3201 DeoptimizeIf(equal, instr, "hole");
3202 } else {
3203 __ j(not_equal, &skip_assignment, Label::kNear);
3204 }
3205 }
3206
3207 __ mov(target, value);
3208 if (instr->hydrogen()->NeedsWriteBarrier()) {
3209 SmiCheck check_needed =
3210 instr->hydrogen()->value()->type().IsHeapObject()
3211 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3212 Register temp = ToRegister(instr->temp());
3213 int offset = Context::SlotOffset(instr->slot_index());
3214 __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs,
3215 EMIT_REMEMBERED_SET, check_needed);
3216 }
3217
3218 __ bind(&skip_assignment);
3219 }
3220
3221
DoLoadNamedField(LLoadNamedField * instr)3222 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3223 HObjectAccess access = instr->hydrogen()->access();
3224 int offset = access.offset();
3225
3226 if (access.IsExternalMemory()) {
3227 Register result = ToRegister(instr->result());
3228 MemOperand operand = instr->object()->IsConstantOperand()
3229 ? MemOperand::StaticVariable(ToExternalReference(
3230 LConstantOperand::cast(instr->object())))
3231 : MemOperand(ToRegister(instr->object()), offset);
3232 __ Load(result, operand, access.representation());
3233 return;
3234 }
3235
3236 Register object = ToRegister(instr->object());
3237 if (instr->hydrogen()->representation().IsDouble()) {
3238 X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
3239 return;
3240 }
3241
3242 Register result = ToRegister(instr->result());
3243 if (!access.IsInobject()) {
3244 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
3245 object = result;
3246 }
3247 __ Load(result, FieldOperand(object, offset), access.representation());
3248 }
3249
3250
EmitPushTaggedOperand(LOperand * operand)3251 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
3252 DCHECK(!operand->IsDoubleRegister());
3253 if (operand->IsConstantOperand()) {
3254 Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
3255 AllowDeferredHandleDereference smi_check;
3256 if (object->IsSmi()) {
3257 __ Push(Handle<Smi>::cast(object));
3258 } else {
3259 __ PushHeapObject(Handle<HeapObject>::cast(object));
3260 }
3261 } else if (operand->IsRegister()) {
3262 __ push(ToRegister(operand));
3263 } else {
3264 __ push(ToOperand(operand));
3265 }
3266 }
3267
3268
DoLoadNamedGeneric(LLoadNamedGeneric * instr)3269 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3270 DCHECK(ToRegister(instr->context()).is(esi));
3271 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3272 DCHECK(ToRegister(instr->result()).is(eax));
3273
3274 __ mov(LoadDescriptor::NameRegister(), instr->name());
3275 if (FLAG_vector_ics) {
3276 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3277 }
3278 Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3279 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3280 }
3281
3282
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)3283 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3284 Register function = ToRegister(instr->function());
3285 Register temp = ToRegister(instr->temp());
3286 Register result = ToRegister(instr->result());
3287
3288 // Get the prototype or initial map from the function.
3289 __ mov(result,
3290 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3291
3292 // Check that the function has a prototype or an initial map.
3293 __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
3294 DeoptimizeIf(equal, instr, "hole");
3295
3296 // If the function does not have an initial map, we're done.
3297 Label done;
3298 __ CmpObjectType(result, MAP_TYPE, temp);
3299 __ j(not_equal, &done, Label::kNear);
3300
3301 // Get the prototype from the initial map.
3302 __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
3303
3304 // All done.
3305 __ bind(&done);
3306 }
3307
3308
DoLoadRoot(LLoadRoot * instr)3309 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3310 Register result = ToRegister(instr->result());
3311 __ LoadRoot(result, instr->index());
3312 }
3313
3314
DoAccessArgumentsAt(LAccessArgumentsAt * instr)3315 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3316 Register arguments = ToRegister(instr->arguments());
3317 Register result = ToRegister(instr->result());
3318 if (instr->length()->IsConstantOperand() &&
3319 instr->index()->IsConstantOperand()) {
3320 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3321 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3322 int index = (const_length - const_index) + 1;
3323 __ mov(result, Operand(arguments, index * kPointerSize));
3324 } else {
3325 Register length = ToRegister(instr->length());
3326 Operand index = ToOperand(instr->index());
3327 // There are two words between the frame pointer and the last argument.
3328 // Subtracting from length accounts for one of them add one more.
3329 __ sub(length, index);
3330 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
3331 }
3332 }
3333
3334
DoLoadKeyedExternalArray(LLoadKeyed * instr)3335 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3336 ElementsKind elements_kind = instr->elements_kind();
3337 LOperand* key = instr->key();
3338 if (!key->IsConstantOperand() &&
3339 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3340 elements_kind)) {
3341 __ SmiUntag(ToRegister(key));
3342 }
3343 Operand operand(BuildFastArrayOperand(
3344 instr->elements(),
3345 key,
3346 instr->hydrogen()->key()->representation(),
3347 elements_kind,
3348 instr->base_offset()));
3349 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3350 elements_kind == FLOAT32_ELEMENTS) {
3351 X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
3352 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3353 elements_kind == FLOAT64_ELEMENTS) {
3354 X87Mov(ToX87Register(instr->result()), operand);
3355 } else {
3356 Register result(ToRegister(instr->result()));
3357 switch (elements_kind) {
3358 case EXTERNAL_INT8_ELEMENTS:
3359 case INT8_ELEMENTS:
3360 __ movsx_b(result, operand);
3361 break;
3362 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3363 case EXTERNAL_UINT8_ELEMENTS:
3364 case UINT8_ELEMENTS:
3365 case UINT8_CLAMPED_ELEMENTS:
3366 __ movzx_b(result, operand);
3367 break;
3368 case EXTERNAL_INT16_ELEMENTS:
3369 case INT16_ELEMENTS:
3370 __ movsx_w(result, operand);
3371 break;
3372 case EXTERNAL_UINT16_ELEMENTS:
3373 case UINT16_ELEMENTS:
3374 __ movzx_w(result, operand);
3375 break;
3376 case EXTERNAL_INT32_ELEMENTS:
3377 case INT32_ELEMENTS:
3378 __ mov(result, operand);
3379 break;
3380 case EXTERNAL_UINT32_ELEMENTS:
3381 case UINT32_ELEMENTS:
3382 __ mov(result, operand);
3383 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3384 __ test(result, Operand(result));
3385 DeoptimizeIf(negative, instr, "negative value");
3386 }
3387 break;
3388 case EXTERNAL_FLOAT32_ELEMENTS:
3389 case EXTERNAL_FLOAT64_ELEMENTS:
3390 case FLOAT32_ELEMENTS:
3391 case FLOAT64_ELEMENTS:
3392 case FAST_SMI_ELEMENTS:
3393 case FAST_ELEMENTS:
3394 case FAST_DOUBLE_ELEMENTS:
3395 case FAST_HOLEY_SMI_ELEMENTS:
3396 case FAST_HOLEY_ELEMENTS:
3397 case FAST_HOLEY_DOUBLE_ELEMENTS:
3398 case DICTIONARY_ELEMENTS:
3399 case SLOPPY_ARGUMENTS_ELEMENTS:
3400 UNREACHABLE();
3401 break;
3402 }
3403 }
3404 }
3405
3406
DoLoadKeyedFixedDoubleArray(LLoadKeyed * instr)3407 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3408 if (instr->hydrogen()->RequiresHoleCheck()) {
3409 Operand hole_check_operand = BuildFastArrayOperand(
3410 instr->elements(), instr->key(),
3411 instr->hydrogen()->key()->representation(),
3412 FAST_DOUBLE_ELEMENTS,
3413 instr->base_offset() + sizeof(kHoleNanLower32));
3414 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3415 DeoptimizeIf(equal, instr, "hole");
3416 }
3417
3418 Operand double_load_operand = BuildFastArrayOperand(
3419 instr->elements(),
3420 instr->key(),
3421 instr->hydrogen()->key()->representation(),
3422 FAST_DOUBLE_ELEMENTS,
3423 instr->base_offset());
3424 X87Mov(ToX87Register(instr->result()), double_load_operand);
3425 }
3426
3427
DoLoadKeyedFixedArray(LLoadKeyed * instr)3428 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3429 Register result = ToRegister(instr->result());
3430
3431 // Load the result.
3432 __ mov(result,
3433 BuildFastArrayOperand(instr->elements(), instr->key(),
3434 instr->hydrogen()->key()->representation(),
3435 FAST_ELEMENTS, instr->base_offset()));
3436
3437 // Check for the hole value.
3438 if (instr->hydrogen()->RequiresHoleCheck()) {
3439 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3440 __ test(result, Immediate(kSmiTagMask));
3441 DeoptimizeIf(not_equal, instr, "not a Smi");
3442 } else {
3443 __ cmp(result, factory()->the_hole_value());
3444 DeoptimizeIf(equal, instr, "hole");
3445 }
3446 }
3447 }
3448
3449
DoLoadKeyed(LLoadKeyed * instr)3450 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3451 if (instr->is_typed_elements()) {
3452 DoLoadKeyedExternalArray(instr);
3453 } else if (instr->hydrogen()->representation().IsDouble()) {
3454 DoLoadKeyedFixedDoubleArray(instr);
3455 } else {
3456 DoLoadKeyedFixedArray(instr);
3457 }
3458 }
3459
3460
BuildFastArrayOperand(LOperand * elements_pointer,LOperand * key,Representation key_representation,ElementsKind elements_kind,uint32_t base_offset)3461 Operand LCodeGen::BuildFastArrayOperand(
3462 LOperand* elements_pointer,
3463 LOperand* key,
3464 Representation key_representation,
3465 ElementsKind elements_kind,
3466 uint32_t base_offset) {
3467 Register elements_pointer_reg = ToRegister(elements_pointer);
3468 int element_shift_size = ElementsKindToShiftSize(elements_kind);
3469 int shift_size = element_shift_size;
3470 if (key->IsConstantOperand()) {
3471 int constant_value = ToInteger32(LConstantOperand::cast(key));
3472 if (constant_value & 0xF0000000) {
3473 Abort(kArrayIndexConstantValueTooBig);
3474 }
3475 return Operand(elements_pointer_reg,
3476 ((constant_value) << shift_size)
3477 + base_offset);
3478 } else {
3479 // Take the tag bit into account while computing the shift size.
3480 if (key_representation.IsSmi() && (shift_size >= 1)) {
3481 shift_size -= kSmiTagSize;
3482 }
3483 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3484 return Operand(elements_pointer_reg,
3485 ToRegister(key),
3486 scale_factor,
3487 base_offset);
3488 }
3489 }
3490
3491
DoLoadKeyedGeneric(LLoadKeyedGeneric * instr)3492 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3493 DCHECK(ToRegister(instr->context()).is(esi));
3494 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3495 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3496
3497 if (FLAG_vector_ics) {
3498 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3499 }
3500
3501 Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3502 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3503 }
3504
3505
DoArgumentsElements(LArgumentsElements * instr)3506 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3507 Register result = ToRegister(instr->result());
3508
3509 if (instr->hydrogen()->from_inlined()) {
3510 __ lea(result, Operand(esp, -2 * kPointerSize));
3511 } else {
3512 // Check for arguments adapter frame.
3513 Label done, adapted;
3514 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3515 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
3516 __ cmp(Operand(result),
3517 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3518 __ j(equal, &adapted, Label::kNear);
3519
3520 // No arguments adaptor frame.
3521 __ mov(result, Operand(ebp));
3522 __ jmp(&done, Label::kNear);
3523
3524 // Arguments adaptor frame present.
3525 __ bind(&adapted);
3526 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3527
3528 // Result is the frame pointer for the frame if not adapted and for the real
3529 // frame below the adaptor frame if adapted.
3530 __ bind(&done);
3531 }
3532 }
3533
3534
DoArgumentsLength(LArgumentsLength * instr)3535 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3536 Operand elem = ToOperand(instr->elements());
3537 Register result = ToRegister(instr->result());
3538
3539 Label done;
3540
3541 // If no arguments adaptor frame the number of arguments is fixed.
3542 __ cmp(ebp, elem);
3543 __ mov(result, Immediate(scope()->num_parameters()));
3544 __ j(equal, &done, Label::kNear);
3545
3546 // Arguments adaptor frame present. Get argument length from there.
3547 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3548 __ mov(result, Operand(result,
3549 ArgumentsAdaptorFrameConstants::kLengthOffset));
3550 __ SmiUntag(result);
3551
3552 // Argument length is in result register.
3553 __ bind(&done);
3554 }
3555
3556
DoWrapReceiver(LWrapReceiver * instr)3557 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3558 Register receiver = ToRegister(instr->receiver());
3559 Register function = ToRegister(instr->function());
3560
3561 // If the receiver is null or undefined, we have to pass the global
3562 // object as a receiver to normal functions. Values have to be
3563 // passed unchanged to builtins and strict-mode functions.
3564 Label receiver_ok, global_object;
3565 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3566 Register scratch = ToRegister(instr->temp());
3567
3568 if (!instr->hydrogen()->known_function()) {
3569 // Do not transform the receiver to object for strict mode
3570 // functions.
3571 __ mov(scratch,
3572 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3573 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3574 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
3575 __ j(not_equal, &receiver_ok, dist);
3576
3577 // Do not transform the receiver to object for builtins.
3578 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3579 1 << SharedFunctionInfo::kNativeBitWithinByte);
3580 __ j(not_equal, &receiver_ok, dist);
3581 }
3582
3583 // Normal function. Replace undefined or null with global receiver.
3584 __ cmp(receiver, factory()->null_value());
3585 __ j(equal, &global_object, Label::kNear);
3586 __ cmp(receiver, factory()->undefined_value());
3587 __ j(equal, &global_object, Label::kNear);
3588
3589 // The receiver should be a JS object.
3590 __ test(receiver, Immediate(kSmiTagMask));
3591 DeoptimizeIf(equal, instr, "Smi");
3592 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
3593 DeoptimizeIf(below, instr, "not a JavaScript object");
3594
3595 __ jmp(&receiver_ok, Label::kNear);
3596 __ bind(&global_object);
3597 __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
3598 const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
3599 __ mov(receiver, Operand(receiver, global_offset));
3600 const int proxy_offset = GlobalObject::kGlobalProxyOffset;
3601 __ mov(receiver, FieldOperand(receiver, proxy_offset));
3602 __ bind(&receiver_ok);
3603 }
3604
3605
DoApplyArguments(LApplyArguments * instr)3606 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3607 Register receiver = ToRegister(instr->receiver());
3608 Register function = ToRegister(instr->function());
3609 Register length = ToRegister(instr->length());
3610 Register elements = ToRegister(instr->elements());
3611 DCHECK(receiver.is(eax)); // Used for parameter count.
3612 DCHECK(function.is(edi)); // Required by InvokeFunction.
3613 DCHECK(ToRegister(instr->result()).is(eax));
3614
3615 // Copy the arguments to this function possibly from the
3616 // adaptor frame below it.
3617 const uint32_t kArgumentsLimit = 1 * KB;
3618 __ cmp(length, kArgumentsLimit);
3619 DeoptimizeIf(above, instr, "too many arguments");
3620
3621 __ push(receiver);
3622 __ mov(receiver, length);
3623
3624 // Loop through the arguments pushing them onto the execution
3625 // stack.
3626 Label invoke, loop;
3627 // length is a small non-negative integer, due to the test above.
3628 __ test(length, Operand(length));
3629 __ j(zero, &invoke, Label::kNear);
3630 __ bind(&loop);
3631 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3632 __ dec(length);
3633 __ j(not_zero, &loop);
3634
3635 // Invoke the function.
3636 __ bind(&invoke);
3637 DCHECK(instr->HasPointerMap());
3638 LPointerMap* pointers = instr->pointer_map();
3639 SafepointGenerator safepoint_generator(
3640 this, pointers, Safepoint::kLazyDeopt);
3641 ParameterCount actual(eax);
3642 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3643 }
3644
3645
DoDebugBreak(LDebugBreak * instr)3646 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3647 __ int3();
3648 }
3649
3650
DoPushArgument(LPushArgument * instr)3651 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3652 LOperand* argument = instr->value();
3653 EmitPushTaggedOperand(argument);
3654 }
3655
3656
DoDrop(LDrop * instr)3657 void LCodeGen::DoDrop(LDrop* instr) {
3658 __ Drop(instr->count());
3659 }
3660
3661
DoThisFunction(LThisFunction * instr)3662 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3663 Register result = ToRegister(instr->result());
3664 __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3665 }
3666
3667
DoContext(LContext * instr)3668 void LCodeGen::DoContext(LContext* instr) {
3669 Register result = ToRegister(instr->result());
3670 if (info()->IsOptimizing()) {
3671 __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3672 } else {
3673 // If there is no frame, the context must be in esi.
3674 DCHECK(result.is(esi));
3675 }
3676 }
3677
3678
DoDeclareGlobals(LDeclareGlobals * instr)3679 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3680 DCHECK(ToRegister(instr->context()).is(esi));
3681 __ push(esi); // The context is the first argument.
3682 __ push(Immediate(instr->hydrogen()->pairs()));
3683 __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3684 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3685 }
3686
3687
CallKnownFunction(Handle<JSFunction> function,int formal_parameter_count,int arity,LInstruction * instr,EDIState edi_state)3688 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3689 int formal_parameter_count,
3690 int arity,
3691 LInstruction* instr,
3692 EDIState edi_state) {
3693 bool dont_adapt_arguments =
3694 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3695 bool can_invoke_directly =
3696 dont_adapt_arguments || formal_parameter_count == arity;
3697
3698 if (can_invoke_directly) {
3699 if (edi_state == EDI_UNINITIALIZED) {
3700 __ LoadHeapObject(edi, function);
3701 }
3702
3703 // Change context.
3704 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3705
3706 // Set eax to arguments count if adaption is not needed. Assumes that eax
3707 // is available to write to at this point.
3708 if (dont_adapt_arguments) {
3709 __ mov(eax, arity);
3710 }
3711
3712 // Invoke function directly.
3713 if (function.is_identical_to(info()->closure())) {
3714 __ CallSelf();
3715 } else {
3716 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
3717 }
3718 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3719 } else {
3720 // We need to adapt arguments.
3721 LPointerMap* pointers = instr->pointer_map();
3722 SafepointGenerator generator(
3723 this, pointers, Safepoint::kLazyDeopt);
3724 ParameterCount count(arity);
3725 ParameterCount expected(formal_parameter_count);
3726 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3727 }
3728 }
3729
3730
DoTailCallThroughMegamorphicCache(LTailCallThroughMegamorphicCache * instr)3731 void LCodeGen::DoTailCallThroughMegamorphicCache(
3732 LTailCallThroughMegamorphicCache* instr) {
3733 Register receiver = ToRegister(instr->receiver());
3734 Register name = ToRegister(instr->name());
3735 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3736 DCHECK(name.is(LoadDescriptor::NameRegister()));
3737
3738 Register scratch = ebx;
3739 Register extra = eax;
3740 DCHECK(!scratch.is(receiver) && !scratch.is(name));
3741 DCHECK(!extra.is(receiver) && !extra.is(name));
3742
3743 // Important for the tail-call.
3744 bool must_teardown_frame = NeedsEagerFrame();
3745
3746 // The probe will tail call to a handler if found.
3747 isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
3748 must_teardown_frame, receiver, name,
3749 scratch, extra);
3750
3751 // Tail call to miss if we ended up here.
3752 if (must_teardown_frame) __ leave();
3753 LoadIC::GenerateMiss(masm());
3754 }
3755
3756
DoCallWithDescriptor(LCallWithDescriptor * instr)3757 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3758 DCHECK(ToRegister(instr->result()).is(eax));
3759
3760 LPointerMap* pointers = instr->pointer_map();
3761 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3762
3763 if (instr->target()->IsConstantOperand()) {
3764 LConstantOperand* target = LConstantOperand::cast(instr->target());
3765 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3766 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3767 __ call(code, RelocInfo::CODE_TARGET);
3768 } else {
3769 DCHECK(instr->target()->IsRegister());
3770 Register target = ToRegister(instr->target());
3771 generator.BeforeCall(__ CallSize(Operand(target)));
3772 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3773 __ call(target);
3774 }
3775 generator.AfterCall();
3776 }
3777
3778
DoCallJSFunction(LCallJSFunction * instr)3779 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3780 DCHECK(ToRegister(instr->function()).is(edi));
3781 DCHECK(ToRegister(instr->result()).is(eax));
3782
3783 if (instr->hydrogen()->pass_argument_count()) {
3784 __ mov(eax, instr->arity());
3785 }
3786
3787 // Change context.
3788 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3789
3790 bool is_self_call = false;
3791 if (instr->hydrogen()->function()->IsConstant()) {
3792 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3793 Handle<JSFunction> jsfun =
3794 Handle<JSFunction>::cast(fun_const->handle(isolate()));
3795 is_self_call = jsfun.is_identical_to(info()->closure());
3796 }
3797
3798 if (is_self_call) {
3799 __ CallSelf();
3800 } else {
3801 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
3802 }
3803
3804 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3805 }
3806
3807
DoDeferredMathAbsTaggedHeapNumber(LMathAbs * instr)3808 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3809 Register input_reg = ToRegister(instr->value());
3810 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3811 factory()->heap_number_map());
3812 DeoptimizeIf(not_equal, instr, "not a heap number");
3813
3814 Label slow, allocated, done;
3815 Register tmp = input_reg.is(eax) ? ecx : eax;
3816 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
3817
3818 // Preserve the value of all registers.
3819 PushSafepointRegistersScope scope(this);
3820
3821 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3822 // Check the sign of the argument. If the argument is positive, just
3823 // return it. We do not need to patch the stack since |input| and
3824 // |result| are the same register and |input| will be restored
3825 // unchanged by popping safepoint registers.
3826 __ test(tmp, Immediate(HeapNumber::kSignMask));
3827 __ j(zero, &done, Label::kNear);
3828
3829 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3830 __ jmp(&allocated, Label::kNear);
3831
3832 // Slow case: Call the runtime system to do the number allocation.
3833 __ bind(&slow);
3834 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3835 instr, instr->context());
3836 // Set the pointer to the new heap number in tmp.
3837 if (!tmp.is(eax)) __ mov(tmp, eax);
3838 // Restore input_reg after call to runtime.
3839 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3840
3841 __ bind(&allocated);
3842 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3843 __ and_(tmp2, ~HeapNumber::kSignMask);
3844 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3845 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3846 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3847 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3848
3849 __ bind(&done);
3850 }
3851
3852
EmitIntegerMathAbs(LMathAbs * instr)3853 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3854 Register input_reg = ToRegister(instr->value());
3855 __ test(input_reg, Operand(input_reg));
3856 Label is_positive;
3857 __ j(not_sign, &is_positive, Label::kNear);
3858 __ neg(input_reg); // Sets flags.
3859 DeoptimizeIf(negative, instr, "overflow");
3860 __ bind(&is_positive);
3861 }
3862
3863
DoMathAbs(LMathAbs * instr)3864 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3865 // Class for deferred case.
3866 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3867 public:
3868 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3869 LMathAbs* instr,
3870 const X87Stack& x87_stack)
3871 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
3872 virtual void Generate() OVERRIDE {
3873 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3874 }
3875 virtual LInstruction* instr() OVERRIDE { return instr_; }
3876 private:
3877 LMathAbs* instr_;
3878 };
3879
3880 DCHECK(instr->value()->Equals(instr->result()));
3881 Representation r = instr->hydrogen()->value()->representation();
3882
3883 if (r.IsDouble()) {
3884 X87Register value = ToX87Register(instr->value());
3885 X87Fxch(value);
3886 __ fabs();
3887 } else if (r.IsSmiOrInteger32()) {
3888 EmitIntegerMathAbs(instr);
3889 } else { // Tagged case.
3890 DeferredMathAbsTaggedHeapNumber* deferred =
3891 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
3892 Register input_reg = ToRegister(instr->value());
3893 // Smi check.
3894 __ JumpIfNotSmi(input_reg, deferred->entry());
3895 EmitIntegerMathAbs(instr);
3896 __ bind(deferred->exit());
3897 }
3898 }
3899
3900
DoMathFloor(LMathFloor * instr)3901 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3902 Register output_reg = ToRegister(instr->result());
3903 X87Register input_reg = ToX87Register(instr->value());
3904 X87Fxch(input_reg);
3905
3906 Label not_minus_zero, done;
3907 // Deoptimize on unordered.
3908 __ fldz();
3909 __ fld(1);
3910 __ FCmp();
3911 DeoptimizeIf(parity_even, instr, "NaN");
3912 __ j(below, ¬_minus_zero, Label::kNear);
3913
3914 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3915 // Check for negative zero.
3916 __ j(not_equal, ¬_minus_zero, Label::kNear);
3917 // +- 0.0.
3918 __ fld(0);
3919 __ FXamSign();
3920 DeoptimizeIf(not_zero, instr, "minus zero");
3921 __ Move(output_reg, Immediate(0));
3922 __ jmp(&done, Label::kFar);
3923 }
3924
3925 // Positive input.
3926 // rc=01B, round down.
3927 __ bind(¬_minus_zero);
3928 __ fnclex();
3929 __ X87SetRC(0x0400);
3930 __ sub(esp, Immediate(kPointerSize));
3931 __ fist_s(Operand(esp, 0));
3932 __ pop(output_reg);
3933 __ X87CheckIA();
3934 DeoptimizeIf(equal, instr, "overflow");
3935 __ fnclex();
3936 __ X87SetRC(0x0000);
3937 __ bind(&done);
3938 }
3939
3940
DoMathRound(LMathRound * instr)3941 void LCodeGen::DoMathRound(LMathRound* instr) {
3942 X87Register input_reg = ToX87Register(instr->value());
3943 Register result = ToRegister(instr->result());
3944 X87Fxch(input_reg);
3945 Label below_one_half, below_minus_one_half, done;
3946
3947 ExternalReference one_half = ExternalReference::address_of_one_half();
3948 ExternalReference minus_one_half =
3949 ExternalReference::address_of_minus_one_half();
3950
3951 __ fld_d(Operand::StaticVariable(one_half));
3952 __ fld(1);
3953 __ FCmp();
3954 __ j(carry, &below_one_half);
3955
3956 // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x)
3957 __ fld(0);
3958 __ fadd_d(Operand::StaticVariable(one_half));
3959 // rc=11B, round toward zero.
3960 __ X87SetRC(0x0c00);
3961 __ sub(esp, Immediate(kPointerSize));
3962 // Clear exception bits.
3963 __ fnclex();
3964 __ fistp_s(MemOperand(esp, 0));
3965 // Check overflow.
3966 __ X87CheckIA();
3967 __ pop(result);
3968 DeoptimizeIf(equal, instr, "conversion overflow");
3969 __ fnclex();
3970 // Restore round mode.
3971 __ X87SetRC(0x0000);
3972 __ jmp(&done);
3973
3974 __ bind(&below_one_half);
3975 __ fld_d(Operand::StaticVariable(minus_one_half));
3976 __ fld(1);
3977 __ FCmp();
3978 __ j(carry, &below_minus_one_half);
3979 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3980 // we can ignore the difference between a result of -0 and +0.
3981 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3982 // If the sign is positive, we return +0.
3983 __ fld(0);
3984 __ FXamSign();
3985 DeoptimizeIf(not_zero, instr, "minus zero");
3986 }
3987 __ Move(result, Immediate(0));
3988 __ jmp(&done);
3989
3990 __ bind(&below_minus_one_half);
3991 __ fld(0);
3992 __ fadd_d(Operand::StaticVariable(one_half));
3993 // rc=01B, round down.
3994 __ X87SetRC(0x0400);
3995 __ sub(esp, Immediate(kPointerSize));
3996 // Clear exception bits.
3997 __ fnclex();
3998 __ fistp_s(MemOperand(esp, 0));
3999 // Check overflow.
4000 __ X87CheckIA();
4001 __ pop(result);
4002 DeoptimizeIf(equal, instr, "conversion overflow");
4003 __ fnclex();
4004 // Restore round mode.
4005 __ X87SetRC(0x0000);
4006
4007 __ bind(&done);
4008 }
4009
4010
DoMathFround(LMathFround * instr)4011 void LCodeGen::DoMathFround(LMathFround* instr) {
4012 X87Register input_reg = ToX87Register(instr->value());
4013 X87Fxch(input_reg);
4014 __ sub(esp, Immediate(kPointerSize));
4015 __ fstp_s(MemOperand(esp, 0));
4016 X87Fld(MemOperand(esp, 0), kX87FloatOperand);
4017 __ add(esp, Immediate(kPointerSize));
4018 }
4019
4020
DoMathSqrt(LMathSqrt * instr)4021 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4022 X87Register input = ToX87Register(instr->value());
4023 X87Register result_reg = ToX87Register(instr->result());
4024 Register temp_result = ToRegister(instr->temp1());
4025 Register temp = ToRegister(instr->temp2());
4026 Label slow, done, smi, finish;
4027 DCHECK(result_reg.is(input));
4028
4029 // Store input into Heap number and call runtime function kMathExpRT.
4030 if (FLAG_inline_new) {
4031 __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
4032 __ jmp(&done, Label::kNear);
4033 }
4034
4035 // Slow case: Call the runtime system to do the number allocation.
4036 __ bind(&slow);
4037 {
4038 // TODO(3095996): Put a valid pointer value in the stack slot where the
4039 // result register is stored, as this register is in the pointer map, but
4040 // contains an integer value.
4041 __ Move(temp_result, Immediate(0));
4042
4043 // Preserve the value of all registers.
4044 PushSafepointRegistersScope scope(this);
4045
4046 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4047 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4048 RecordSafepointWithRegisters(
4049 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4050 __ StoreToSafepointRegisterSlot(temp_result, eax);
4051 }
4052 __ bind(&done);
4053 X87LoadForUsage(input);
4054 __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
4055
4056 {
4057 // Preserve the value of all registers.
4058 PushSafepointRegistersScope scope(this);
4059
4060 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4061 __ push(temp_result);
4062 __ CallRuntimeSaveDoubles(Runtime::kMathSqrtRT);
4063 RecordSafepointWithRegisters(instr->pointer_map(), 1,
4064 Safepoint::kNoLazyDeopt);
4065 __ StoreToSafepointRegisterSlot(temp_result, eax);
4066 }
4067 X87PrepareToWrite(result_reg);
4068 // return value of MathExpRT is Smi or Heap Number.
4069 __ JumpIfSmi(temp_result, &smi);
4070 // Heap number(double)
4071 __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
4072 __ jmp(&finish);
4073 // SMI
4074 __ bind(&smi);
4075 __ SmiUntag(temp_result);
4076 __ push(temp_result);
4077 __ fild_s(MemOperand(esp, 0));
4078 __ pop(temp_result);
4079 __ bind(&finish);
4080 X87CommitWrite(result_reg);
4081 }
4082
4083
DoMathPowHalf(LMathPowHalf * instr)4084 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4085 X87Register input_reg = ToX87Register(instr->value());
4086 DCHECK(ToX87Register(instr->result()).is(input_reg));
4087 X87Fxch(input_reg);
4088 // Note that according to ECMA-262 15.8.2.13:
4089 // Math.pow(-Infinity, 0.5) == Infinity
4090 // Math.sqrt(-Infinity) == NaN
4091 Label done, sqrt;
4092 // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1
4093 __ fxam();
4094 __ push(eax);
4095 __ fnstsw_ax();
4096 __ and_(eax, Immediate(0x4700));
4097 __ cmp(eax, Immediate(0x0700));
4098 __ j(not_equal, &sqrt, Label::kNear);
4099 // If input is -Infinity, return Infinity.
4100 __ fchs();
4101 __ jmp(&done, Label::kNear);
4102
4103 // Square root.
4104 __ bind(&sqrt);
4105 __ fldz();
4106 __ faddp(); // Convert -0 to +0.
4107 __ fsqrt();
4108 __ bind(&done);
4109 __ pop(eax);
4110 }
4111
4112
DoPower(LPower * instr)4113 void LCodeGen::DoPower(LPower* instr) {
4114 Representation exponent_type = instr->hydrogen()->right()->representation();
4115 X87Register result = ToX87Register(instr->result());
4116 // Having marked this as a call, we can use any registers.
4117 X87Register base = ToX87Register(instr->left());
4118 ExternalReference one_half = ExternalReference::address_of_one_half();
4119
4120 if (exponent_type.IsSmi()) {
4121 Register exponent = ToRegister(instr->right());
4122 X87LoadForUsage(base);
4123 __ SmiUntag(exponent);
4124 __ push(exponent);
4125 __ fild_s(MemOperand(esp, 0));
4126 __ pop(exponent);
4127 } else if (exponent_type.IsTagged()) {
4128 Register exponent = ToRegister(instr->right());
4129 Register temp = exponent.is(ecx) ? eax : ecx;
4130 Label no_deopt, done;
4131 X87LoadForUsage(base);
4132 __ JumpIfSmi(exponent, &no_deopt);
4133 __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
4134 DeoptimizeIf(not_equal, instr, "not a heap number");
4135 // Heap number(double)
4136 __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
4137 __ jmp(&done);
4138 // SMI
4139 __ bind(&no_deopt);
4140 __ SmiUntag(exponent);
4141 __ push(exponent);
4142 __ fild_s(MemOperand(esp, 0));
4143 __ pop(exponent);
4144 __ bind(&done);
4145 } else if (exponent_type.IsInteger32()) {
4146 Register exponent = ToRegister(instr->right());
4147 X87LoadForUsage(base);
4148 __ push(exponent);
4149 __ fild_s(MemOperand(esp, 0));
4150 __ pop(exponent);
4151 } else {
4152 DCHECK(exponent_type.IsDouble());
4153 X87Register exponent_double = ToX87Register(instr->right());
4154 X87LoadForUsage(base, exponent_double);
4155 }
4156
4157 // FP data stack {base, exponent(TOS)}.
4158 // Handle (exponent==+-0.5 && base == -0).
4159 Label not_plus_0;
4160 __ fld(0);
4161 __ fabs();
4162 X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand);
4163 __ FCmp();
4164 __ j(parity_even, ¬_plus_0, Label::kNear); // NaN.
4165 __ j(not_equal, ¬_plus_0, Label::kNear);
4166 __ fldz();
4167 // FP data stack {base, exponent(TOS), zero}.
4168 __ faddp(2);
4169 __ bind(¬_plus_0);
4170
4171 {
4172 __ PrepareCallCFunction(4, eax);
4173 __ fstp_d(MemOperand(esp, kDoubleSize)); // Exponent value.
4174 __ fstp_d(MemOperand(esp, 0)); // Base value.
4175 X87PrepareToWrite(result);
4176 __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
4177 4);
4178 // Return value is in st(0) on ia32.
4179 X87CommitWrite(result);
4180 }
4181 }
4182
4183
DoMathLog(LMathLog * instr)4184 void LCodeGen::DoMathLog(LMathLog* instr) {
4185 DCHECK(instr->value()->Equals(instr->result()));
4186 X87Register input_reg = ToX87Register(instr->value());
4187 X87Fxch(input_reg);
4188
4189 Label positive, done, zero, nan_result;
4190 __ fldz();
4191 __ fld(1);
4192 __ FCmp();
4193 __ j(below, &nan_result, Label::kNear);
4194 __ j(equal, &zero, Label::kNear);
4195 // Positive input.
4196 // {input, ln2}.
4197 __ fldln2();
4198 // {ln2, input}.
4199 __ fxch();
4200 // {result}.
4201 __ fyl2x();
4202 __ jmp(&done, Label::kNear);
4203
4204 __ bind(&nan_result);
4205 ExternalReference nan =
4206 ExternalReference::address_of_canonical_non_hole_nan();
4207 X87PrepareToWrite(input_reg);
4208 __ fld_d(Operand::StaticVariable(nan));
4209 X87CommitWrite(input_reg);
4210 __ jmp(&done, Label::kNear);
4211
4212 __ bind(&zero);
4213 ExternalReference ninf = ExternalReference::address_of_negative_infinity();
4214 X87PrepareToWrite(input_reg);
4215 __ fld_d(Operand::StaticVariable(ninf));
4216 X87CommitWrite(input_reg);
4217
4218 __ bind(&done);
4219 }
4220
4221
DoMathClz32(LMathClz32 * instr)4222 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4223 Register input = ToRegister(instr->value());
4224 Register result = ToRegister(instr->result());
4225 Label not_zero_input;
4226 __ bsr(result, input);
4227
4228 __ j(not_zero, ¬_zero_input);
4229 __ Move(result, Immediate(63)); // 63^31 == 32
4230
4231 __ bind(¬_zero_input);
4232 __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
4233 }
4234
4235
DoMathExp(LMathExp * instr)4236 void LCodeGen::DoMathExp(LMathExp* instr) {
4237 X87Register input = ToX87Register(instr->value());
4238 X87Register result_reg = ToX87Register(instr->result());
4239 Register temp_result = ToRegister(instr->temp1());
4240 Register temp = ToRegister(instr->temp2());
4241 Label slow, done, smi, finish;
4242 DCHECK(result_reg.is(input));
4243
4244 // Store input into Heap number and call runtime function kMathExpRT.
4245 if (FLAG_inline_new) {
4246 __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
4247 __ jmp(&done, Label::kNear);
4248 }
4249
4250 // Slow case: Call the runtime system to do the number allocation.
4251 __ bind(&slow);
4252 {
4253 // TODO(3095996): Put a valid pointer value in the stack slot where the
4254 // result register is stored, as this register is in the pointer map, but
4255 // contains an integer value.
4256 __ Move(temp_result, Immediate(0));
4257
4258 // Preserve the value of all registers.
4259 PushSafepointRegistersScope scope(this);
4260
4261 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4262 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4263 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4264 Safepoint::kNoLazyDeopt);
4265 __ StoreToSafepointRegisterSlot(temp_result, eax);
4266 }
4267 __ bind(&done);
4268 X87LoadForUsage(input);
4269 __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
4270
4271 {
4272 // Preserve the value of all registers.
4273 PushSafepointRegistersScope scope(this);
4274
4275 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4276 __ push(temp_result);
4277 __ CallRuntimeSaveDoubles(Runtime::kMathExpRT);
4278 RecordSafepointWithRegisters(instr->pointer_map(), 1,
4279 Safepoint::kNoLazyDeopt);
4280 __ StoreToSafepointRegisterSlot(temp_result, eax);
4281 }
4282 X87PrepareToWrite(result_reg);
4283 // return value of MathExpRT is Smi or Heap Number.
4284 __ JumpIfSmi(temp_result, &smi);
4285 // Heap number(double)
4286 __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
4287 __ jmp(&finish);
4288 // SMI
4289 __ bind(&smi);
4290 __ SmiUntag(temp_result);
4291 __ push(temp_result);
4292 __ fild_s(MemOperand(esp, 0));
4293 __ pop(temp_result);
4294 __ bind(&finish);
4295 X87CommitWrite(result_reg);
4296 }
4297
4298
DoInvokeFunction(LInvokeFunction * instr)4299 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4300 DCHECK(ToRegister(instr->context()).is(esi));
4301 DCHECK(ToRegister(instr->function()).is(edi));
4302 DCHECK(instr->HasPointerMap());
4303
4304 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4305 if (known_function.is_null()) {
4306 LPointerMap* pointers = instr->pointer_map();
4307 SafepointGenerator generator(
4308 this, pointers, Safepoint::kLazyDeopt);
4309 ParameterCount count(instr->arity());
4310 __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
4311 } else {
4312 CallKnownFunction(known_function,
4313 instr->hydrogen()->formal_parameter_count(),
4314 instr->arity(),
4315 instr,
4316 EDI_CONTAINS_TARGET);
4317 }
4318 }
4319
4320
DoCallFunction(LCallFunction * instr)4321 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4322 DCHECK(ToRegister(instr->context()).is(esi));
4323 DCHECK(ToRegister(instr->function()).is(edi));
4324 DCHECK(ToRegister(instr->result()).is(eax));
4325
4326 int arity = instr->arity();
4327 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4328 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4329 }
4330
4331
DoCallNew(LCallNew * instr)4332 void LCodeGen::DoCallNew(LCallNew* instr) {
4333 DCHECK(ToRegister(instr->context()).is(esi));
4334 DCHECK(ToRegister(instr->constructor()).is(edi));
4335 DCHECK(ToRegister(instr->result()).is(eax));
4336
4337 // No cell in ebx for construct type feedback in optimized code
4338 __ mov(ebx, isolate()->factory()->undefined_value());
4339 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4340 __ Move(eax, Immediate(instr->arity()));
4341 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4342 }
4343
4344
DoCallNewArray(LCallNewArray * instr)4345 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4346 DCHECK(ToRegister(instr->context()).is(esi));
4347 DCHECK(ToRegister(instr->constructor()).is(edi));
4348 DCHECK(ToRegister(instr->result()).is(eax));
4349
4350 __ Move(eax, Immediate(instr->arity()));
4351 __ mov(ebx, isolate()->factory()->undefined_value());
4352 ElementsKind kind = instr->hydrogen()->elements_kind();
4353 AllocationSiteOverrideMode override_mode =
4354 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4355 ? DISABLE_ALLOCATION_SITES
4356 : DONT_OVERRIDE;
4357
4358 if (instr->arity() == 0) {
4359 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4360 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4361 } else if (instr->arity() == 1) {
4362 Label done;
4363 if (IsFastPackedElementsKind(kind)) {
4364 Label packed_case;
4365 // We might need a change here
4366 // look at the first argument
4367 __ mov(ecx, Operand(esp, 0));
4368 __ test(ecx, ecx);
4369 __ j(zero, &packed_case, Label::kNear);
4370
4371 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4372 ArraySingleArgumentConstructorStub stub(isolate(),
4373 holey_kind,
4374 override_mode);
4375 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4376 __ jmp(&done, Label::kNear);
4377 __ bind(&packed_case);
4378 }
4379
4380 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4381 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4382 __ bind(&done);
4383 } else {
4384 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4385 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4386 }
4387 }
4388
4389
DoCallRuntime(LCallRuntime * instr)4390 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4391 DCHECK(ToRegister(instr->context()).is(esi));
4392 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4393 }
4394
4395
DoStoreCodeEntry(LStoreCodeEntry * instr)4396 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4397 Register function = ToRegister(instr->function());
4398 Register code_object = ToRegister(instr->code_object());
4399 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
4400 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4401 }
4402
4403
DoInnerAllocatedObject(LInnerAllocatedObject * instr)4404 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4405 Register result = ToRegister(instr->result());
4406 Register base = ToRegister(instr->base_object());
4407 if (instr->offset()->IsConstantOperand()) {
4408 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4409 __ lea(result, Operand(base, ToInteger32(offset)));
4410 } else {
4411 Register offset = ToRegister(instr->offset());
4412 __ lea(result, Operand(base, offset, times_1, 0));
4413 }
4414 }
4415
4416
DoStoreNamedField(LStoreNamedField * instr)4417 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4418 Representation representation = instr->hydrogen()->field_representation();
4419
4420 HObjectAccess access = instr->hydrogen()->access();
4421 int offset = access.offset();
4422
4423 if (access.IsExternalMemory()) {
4424 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4425 MemOperand operand = instr->object()->IsConstantOperand()
4426 ? MemOperand::StaticVariable(
4427 ToExternalReference(LConstantOperand::cast(instr->object())))
4428 : MemOperand(ToRegister(instr->object()), offset);
4429 if (instr->value()->IsConstantOperand()) {
4430 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4431 __ mov(operand, Immediate(ToInteger32(operand_value)));
4432 } else {
4433 Register value = ToRegister(instr->value());
4434 __ Store(value, operand, representation);
4435 }
4436 return;
4437 }
4438
4439 Register object = ToRegister(instr->object());
4440 __ AssertNotSmi(object);
4441 DCHECK(!representation.IsSmi() ||
4442 !instr->value()->IsConstantOperand() ||
4443 IsSmi(LConstantOperand::cast(instr->value())));
4444 if (representation.IsDouble()) {
4445 DCHECK(access.IsInobject());
4446 DCHECK(!instr->hydrogen()->has_transition());
4447 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4448 X87Register value = ToX87Register(instr->value());
4449 X87Mov(FieldOperand(object, offset), value);
4450 return;
4451 }
4452
4453 if (instr->hydrogen()->has_transition()) {
4454 Handle<Map> transition = instr->hydrogen()->transition_map();
4455 AddDeprecationDependency(transition);
4456 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
4457 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4458 Register temp = ToRegister(instr->temp());
4459 Register temp_map = ToRegister(instr->temp_map());
4460 __ mov(temp_map, transition);
4461 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
4462 // Update the write barrier for the map field.
4463 __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
4464 }
4465 }
4466
4467 // Do the store.
4468 Register write_register = object;
4469 if (!access.IsInobject()) {
4470 write_register = ToRegister(instr->temp());
4471 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4472 }
4473
4474 MemOperand operand = FieldOperand(write_register, offset);
4475 if (instr->value()->IsConstantOperand()) {
4476 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4477 if (operand_value->IsRegister()) {
4478 Register value = ToRegister(operand_value);
4479 __ Store(value, operand, representation);
4480 } else if (representation.IsInteger32()) {
4481 Immediate immediate = ToImmediate(operand_value, representation);
4482 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4483 __ mov(operand, immediate);
4484 } else {
4485 Handle<Object> handle_value = ToHandle(operand_value);
4486 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4487 __ mov(operand, handle_value);
4488 }
4489 } else {
4490 Register value = ToRegister(instr->value());
4491 __ Store(value, operand, representation);
4492 }
4493
4494 if (instr->hydrogen()->NeedsWriteBarrier()) {
4495 Register value = ToRegister(instr->value());
4496 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4497 // Update the write barrier for the object for in-object properties.
4498 __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs,
4499 EMIT_REMEMBERED_SET,
4500 instr->hydrogen()->SmiCheckForWriteBarrier(),
4501 instr->hydrogen()->PointersToHereCheckForValue());
4502 }
4503 }
4504
4505
DoStoreNamedGeneric(LStoreNamedGeneric * instr)4506 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4507 DCHECK(ToRegister(instr->context()).is(esi));
4508 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4509 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4510
4511 __ mov(StoreDescriptor::NameRegister(), instr->name());
4512 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4513 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4514 }
4515
4516
DoBoundsCheck(LBoundsCheck * instr)4517 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4518 Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
4519 if (instr->index()->IsConstantOperand()) {
4520 __ cmp(ToOperand(instr->length()),
4521 ToImmediate(LConstantOperand::cast(instr->index()),
4522 instr->hydrogen()->length()->representation()));
4523 cc = CommuteCondition(cc);
4524 } else if (instr->length()->IsConstantOperand()) {
4525 __ cmp(ToOperand(instr->index()),
4526 ToImmediate(LConstantOperand::cast(instr->length()),
4527 instr->hydrogen()->index()->representation()));
4528 } else {
4529 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4530 }
4531 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4532 Label done;
4533 __ j(NegateCondition(cc), &done, Label::kNear);
4534 __ int3();
4535 __ bind(&done);
4536 } else {
4537 DeoptimizeIf(cc, instr, "out of bounds");
4538 }
4539 }
4540
4541
DoStoreKeyedExternalArray(LStoreKeyed * instr)4542 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4543 ElementsKind elements_kind = instr->elements_kind();
4544 LOperand* key = instr->key();
4545 if (!key->IsConstantOperand() &&
4546 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4547 elements_kind)) {
4548 __ SmiUntag(ToRegister(key));
4549 }
4550 Operand operand(BuildFastArrayOperand(
4551 instr->elements(),
4552 key,
4553 instr->hydrogen()->key()->representation(),
4554 elements_kind,
4555 instr->base_offset()));
4556 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4557 elements_kind == FLOAT32_ELEMENTS) {
4558 X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
4559 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4560 elements_kind == FLOAT64_ELEMENTS) {
4561 X87Mov(operand, ToX87Register(instr->value()));
4562 } else {
4563 Register value = ToRegister(instr->value());
4564 switch (elements_kind) {
4565 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4566 case EXTERNAL_UINT8_ELEMENTS:
4567 case EXTERNAL_INT8_ELEMENTS:
4568 case UINT8_ELEMENTS:
4569 case INT8_ELEMENTS:
4570 case UINT8_CLAMPED_ELEMENTS:
4571 __ mov_b(operand, value);
4572 break;
4573 case EXTERNAL_INT16_ELEMENTS:
4574 case EXTERNAL_UINT16_ELEMENTS:
4575 case UINT16_ELEMENTS:
4576 case INT16_ELEMENTS:
4577 __ mov_w(operand, value);
4578 break;
4579 case EXTERNAL_INT32_ELEMENTS:
4580 case EXTERNAL_UINT32_ELEMENTS:
4581 case UINT32_ELEMENTS:
4582 case INT32_ELEMENTS:
4583 __ mov(operand, value);
4584 break;
4585 case EXTERNAL_FLOAT32_ELEMENTS:
4586 case EXTERNAL_FLOAT64_ELEMENTS:
4587 case FLOAT32_ELEMENTS:
4588 case FLOAT64_ELEMENTS:
4589 case FAST_SMI_ELEMENTS:
4590 case FAST_ELEMENTS:
4591 case FAST_DOUBLE_ELEMENTS:
4592 case FAST_HOLEY_SMI_ELEMENTS:
4593 case FAST_HOLEY_ELEMENTS:
4594 case FAST_HOLEY_DOUBLE_ELEMENTS:
4595 case DICTIONARY_ELEMENTS:
4596 case SLOPPY_ARGUMENTS_ELEMENTS:
4597 UNREACHABLE();
4598 break;
4599 }
4600 }
4601 }
4602
4603
DoStoreKeyedFixedDoubleArray(LStoreKeyed * instr)4604 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4605 ExternalReference canonical_nan_reference =
4606 ExternalReference::address_of_canonical_non_hole_nan();
4607 Operand double_store_operand = BuildFastArrayOperand(
4608 instr->elements(),
4609 instr->key(),
4610 instr->hydrogen()->key()->representation(),
4611 FAST_DOUBLE_ELEMENTS,
4612 instr->base_offset());
4613
4614 // Can't use SSE2 in the serializer
4615 if (instr->hydrogen()->IsConstantHoleStore()) {
4616 // This means we should store the (double) hole. No floating point
4617 // registers required.
4618 double nan_double = FixedDoubleArray::hole_nan_as_double();
4619 uint64_t int_val = bit_cast<uint64_t, double>(nan_double);
4620 int32_t lower = static_cast<int32_t>(int_val);
4621 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4622
4623 __ mov(double_store_operand, Immediate(lower));
4624 Operand double_store_operand2 = BuildFastArrayOperand(
4625 instr->elements(),
4626 instr->key(),
4627 instr->hydrogen()->key()->representation(),
4628 FAST_DOUBLE_ELEMENTS,
4629 instr->base_offset() + kPointerSize);
4630 __ mov(double_store_operand2, Immediate(upper));
4631 } else {
4632 Label no_special_nan_handling;
4633 X87Register value = ToX87Register(instr->value());
4634 X87Fxch(value);
4635
4636 if (instr->NeedsCanonicalization()) {
4637 __ fld(0);
4638 __ fld(0);
4639 __ FCmp();
4640
4641 __ j(parity_odd, &no_special_nan_handling, Label::kNear);
4642 __ sub(esp, Immediate(kDoubleSize));
4643 __ fst_d(MemOperand(esp, 0));
4644 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4645 Immediate(kHoleNanUpper32));
4646 __ add(esp, Immediate(kDoubleSize));
4647 Label canonicalize;
4648 __ j(not_equal, &canonicalize, Label::kNear);
4649 __ jmp(&no_special_nan_handling, Label::kNear);
4650 __ bind(&canonicalize);
4651 __ fstp(0);
4652 __ fld_d(Operand::StaticVariable(canonical_nan_reference));
4653 }
4654
4655 __ bind(&no_special_nan_handling);
4656 __ fst_d(double_store_operand);
4657 }
4658 }
4659
4660
DoStoreKeyedFixedArray(LStoreKeyed * instr)4661 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4662 Register elements = ToRegister(instr->elements());
4663 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4664
4665 Operand operand = BuildFastArrayOperand(
4666 instr->elements(),
4667 instr->key(),
4668 instr->hydrogen()->key()->representation(),
4669 FAST_ELEMENTS,
4670 instr->base_offset());
4671 if (instr->value()->IsRegister()) {
4672 __ mov(operand, ToRegister(instr->value()));
4673 } else {
4674 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4675 if (IsSmi(operand_value)) {
4676 Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4677 __ mov(operand, immediate);
4678 } else {
4679 DCHECK(!IsInteger32(operand_value));
4680 Handle<Object> handle_value = ToHandle(operand_value);
4681 __ mov(operand, handle_value);
4682 }
4683 }
4684
4685 if (instr->hydrogen()->NeedsWriteBarrier()) {
4686 DCHECK(instr->value()->IsRegister());
4687 Register value = ToRegister(instr->value());
4688 DCHECK(!instr->key()->IsConstantOperand());
4689 SmiCheck check_needed =
4690 instr->hydrogen()->value()->type().IsHeapObject()
4691 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4692 // Compute address of modified element and store it into key register.
4693 __ lea(key, operand);
4694 __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET,
4695 check_needed,
4696 instr->hydrogen()->PointersToHereCheckForValue());
4697 }
4698 }
4699
4700
DoStoreKeyed(LStoreKeyed * instr)4701 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4702 // By cases...external, fast-double, fast
4703 if (instr->is_typed_elements()) {
4704 DoStoreKeyedExternalArray(instr);
4705 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4706 DoStoreKeyedFixedDoubleArray(instr);
4707 } else {
4708 DoStoreKeyedFixedArray(instr);
4709 }
4710 }
4711
4712
DoStoreKeyedGeneric(LStoreKeyedGeneric * instr)4713 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4714 DCHECK(ToRegister(instr->context()).is(esi));
4715 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4716 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4717 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4718
4719 Handle<Code> ic =
4720 CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4721 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4722 }
4723
4724
DoTrapAllocationMemento(LTrapAllocationMemento * instr)4725 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4726 Register object = ToRegister(instr->object());
4727 Register temp = ToRegister(instr->temp());
4728 Label no_memento_found;
4729 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4730 DeoptimizeIf(equal, instr, "memento found");
4731 __ bind(&no_memento_found);
4732 }
4733
4734
DoTransitionElementsKind(LTransitionElementsKind * instr)4735 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4736 Register object_reg = ToRegister(instr->object());
4737
4738 Handle<Map> from_map = instr->original_map();
4739 Handle<Map> to_map = instr->transitioned_map();
4740 ElementsKind from_kind = instr->from_kind();
4741 ElementsKind to_kind = instr->to_kind();
4742
4743 Label not_applicable;
4744 bool is_simple_map_transition =
4745 IsSimpleMapChangeTransition(from_kind, to_kind);
4746 Label::Distance branch_distance =
4747 is_simple_map_transition ? Label::kNear : Label::kFar;
4748 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4749 __ j(not_equal, ¬_applicable, branch_distance);
4750 if (is_simple_map_transition) {
4751 Register new_map_reg = ToRegister(instr->new_map_temp());
4752 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4753 Immediate(to_map));
4754 // Write barrier.
4755 DCHECK_NE(instr->temp(), NULL);
4756 __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4757 ToRegister(instr->temp()), kDontSaveFPRegs);
4758 } else {
4759 DCHECK(ToRegister(instr->context()).is(esi));
4760 DCHECK(object_reg.is(eax));
4761 PushSafepointRegistersScope scope(this);
4762 __ mov(ebx, to_map);
4763 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4764 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4765 __ CallStub(&stub);
4766 RecordSafepointWithLazyDeopt(instr,
4767 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4768 }
4769 __ bind(¬_applicable);
4770 }
4771
4772
DoStringCharCodeAt(LStringCharCodeAt * instr)4773 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4774 class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4775 public:
4776 DeferredStringCharCodeAt(LCodeGen* codegen,
4777 LStringCharCodeAt* instr,
4778 const X87Stack& x87_stack)
4779 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4780 virtual void Generate() OVERRIDE {
4781 codegen()->DoDeferredStringCharCodeAt(instr_);
4782 }
4783 virtual LInstruction* instr() OVERRIDE { return instr_; }
4784 private:
4785 LStringCharCodeAt* instr_;
4786 };
4787
4788 DeferredStringCharCodeAt* deferred =
4789 new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
4790
4791 StringCharLoadGenerator::Generate(masm(),
4792 factory(),
4793 ToRegister(instr->string()),
4794 ToRegister(instr->index()),
4795 ToRegister(instr->result()),
4796 deferred->entry());
4797 __ bind(deferred->exit());
4798 }
4799
4800
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)4801 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4802 Register string = ToRegister(instr->string());
4803 Register result = ToRegister(instr->result());
4804
4805 // TODO(3095996): Get rid of this. For now, we need to make the
4806 // result register contain a valid pointer because it is already
4807 // contained in the register pointer map.
4808 __ Move(result, Immediate(0));
4809
4810 PushSafepointRegistersScope scope(this);
4811 __ push(string);
4812 // Push the index as a smi. This is safe because of the checks in
4813 // DoStringCharCodeAt above.
4814 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4815 if (instr->index()->IsConstantOperand()) {
4816 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4817 Representation::Smi());
4818 __ push(immediate);
4819 } else {
4820 Register index = ToRegister(instr->index());
4821 __ SmiTag(index);
4822 __ push(index);
4823 }
4824 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
4825 instr, instr->context());
4826 __ AssertSmi(eax);
4827 __ SmiUntag(eax);
4828 __ StoreToSafepointRegisterSlot(result, eax);
4829 }
4830
4831
DoStringCharFromCode(LStringCharFromCode * instr)4832 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4833 class DeferredStringCharFromCode FINAL : public LDeferredCode {
4834 public:
4835 DeferredStringCharFromCode(LCodeGen* codegen,
4836 LStringCharFromCode* instr,
4837 const X87Stack& x87_stack)
4838 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4839 virtual void Generate() OVERRIDE {
4840 codegen()->DoDeferredStringCharFromCode(instr_);
4841 }
4842 virtual LInstruction* instr() OVERRIDE { return instr_; }
4843 private:
4844 LStringCharFromCode* instr_;
4845 };
4846
4847 DeferredStringCharFromCode* deferred =
4848 new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
4849
4850 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4851 Register char_code = ToRegister(instr->char_code());
4852 Register result = ToRegister(instr->result());
4853 DCHECK(!char_code.is(result));
4854
4855 __ cmp(char_code, String::kMaxOneByteCharCode);
4856 __ j(above, deferred->entry());
4857 __ Move(result, Immediate(factory()->single_character_string_cache()));
4858 __ mov(result, FieldOperand(result,
4859 char_code, times_pointer_size,
4860 FixedArray::kHeaderSize));
4861 __ cmp(result, factory()->undefined_value());
4862 __ j(equal, deferred->entry());
4863 __ bind(deferred->exit());
4864 }
4865
4866
DoDeferredStringCharFromCode(LStringCharFromCode * instr)4867 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4868 Register char_code = ToRegister(instr->char_code());
4869 Register result = ToRegister(instr->result());
4870
4871 // TODO(3095996): Get rid of this. For now, we need to make the
4872 // result register contain a valid pointer because it is already
4873 // contained in the register pointer map.
4874 __ Move(result, Immediate(0));
4875
4876 PushSafepointRegistersScope scope(this);
4877 __ SmiTag(char_code);
4878 __ push(char_code);
4879 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4880 __ StoreToSafepointRegisterSlot(result, eax);
4881 }
4882
4883
DoStringAdd(LStringAdd * instr)4884 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4885 DCHECK(ToRegister(instr->context()).is(esi));
4886 DCHECK(ToRegister(instr->left()).is(edx));
4887 DCHECK(ToRegister(instr->right()).is(eax));
4888 StringAddStub stub(isolate(),
4889 instr->hydrogen()->flags(),
4890 instr->hydrogen()->pretenure_flag());
4891 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4892 }
4893
4894
DoInteger32ToDouble(LInteger32ToDouble * instr)4895 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4896 LOperand* input = instr->value();
4897 LOperand* output = instr->result();
4898 DCHECK(input->IsRegister() || input->IsStackSlot());
4899 DCHECK(output->IsDoubleRegister());
4900 if (input->IsRegister()) {
4901 Register input_reg = ToRegister(input);
4902 __ push(input_reg);
4903 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
4904 __ pop(input_reg);
4905 } else {
4906 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
4907 }
4908 }
4909
4910
DoUint32ToDouble(LUint32ToDouble * instr)4911 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4912 LOperand* input = instr->value();
4913 LOperand* output = instr->result();
4914 X87Register res = ToX87Register(output);
4915 X87PrepareToWrite(res);
4916 __ LoadUint32NoSSE2(ToRegister(input));
4917 X87CommitWrite(res);
4918 }
4919
4920
DoNumberTagI(LNumberTagI * instr)4921 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4922 class DeferredNumberTagI FINAL : public LDeferredCode {
4923 public:
4924 DeferredNumberTagI(LCodeGen* codegen,
4925 LNumberTagI* instr,
4926 const X87Stack& x87_stack)
4927 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4928 virtual void Generate() OVERRIDE {
4929 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4930 SIGNED_INT32);
4931 }
4932 virtual LInstruction* instr() OVERRIDE { return instr_; }
4933 private:
4934 LNumberTagI* instr_;
4935 };
4936
4937 LOperand* input = instr->value();
4938 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4939 Register reg = ToRegister(input);
4940
4941 DeferredNumberTagI* deferred =
4942 new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
4943 __ SmiTag(reg);
4944 __ j(overflow, deferred->entry());
4945 __ bind(deferred->exit());
4946 }
4947
4948
DoNumberTagU(LNumberTagU * instr)4949 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4950 class DeferredNumberTagU FINAL : public LDeferredCode {
4951 public:
4952 DeferredNumberTagU(LCodeGen* codegen,
4953 LNumberTagU* instr,
4954 const X87Stack& x87_stack)
4955 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4956 virtual void Generate() OVERRIDE {
4957 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4958 UNSIGNED_INT32);
4959 }
4960 virtual LInstruction* instr() OVERRIDE { return instr_; }
4961 private:
4962 LNumberTagU* instr_;
4963 };
4964
4965 LOperand* input = instr->value();
4966 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4967 Register reg = ToRegister(input);
4968
4969 DeferredNumberTagU* deferred =
4970 new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
4971 __ cmp(reg, Immediate(Smi::kMaxValue));
4972 __ j(above, deferred->entry());
4973 __ SmiTag(reg);
4974 __ bind(deferred->exit());
4975 }
4976
4977
DoDeferredNumberTagIU(LInstruction * instr,LOperand * value,LOperand * temp,IntegerSignedness signedness)4978 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4979 LOperand* value,
4980 LOperand* temp,
4981 IntegerSignedness signedness) {
4982 Label done, slow;
4983 Register reg = ToRegister(value);
4984 Register tmp = ToRegister(temp);
4985
4986 if (signedness == SIGNED_INT32) {
4987 // There was overflow, so bits 30 and 31 of the original integer
4988 // disagree. Try to allocate a heap number in new space and store
4989 // the value in there. If that fails, call the runtime system.
4990 __ SmiUntag(reg);
4991 __ xor_(reg, 0x80000000);
4992 __ push(reg);
4993 __ fild_s(Operand(esp, 0));
4994 __ pop(reg);
4995 } else {
4996 // There's no fild variant for unsigned values, so zero-extend to a 64-bit
4997 // int manually.
4998 __ push(Immediate(0));
4999 __ push(reg);
5000 __ fild_d(Operand(esp, 0));
5001 __ pop(reg);
5002 __ pop(reg);
5003 }
5004
5005 if (FLAG_inline_new) {
5006 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
5007 __ jmp(&done, Label::kNear);
5008 }
5009
5010 // Slow case: Call the runtime system to do the number allocation.
5011 __ bind(&slow);
5012 {
5013 // TODO(3095996): Put a valid pointer value in the stack slot where the
5014 // result register is stored, as this register is in the pointer map, but
5015 // contains an integer value.
5016 __ Move(reg, Immediate(0));
5017
5018 // Preserve the value of all registers.
5019 PushSafepointRegistersScope scope(this);
5020
5021 // NumberTagI and NumberTagD use the context from the frame, rather than
5022 // the environment's HContext or HInlinedContext value.
5023 // They only call Runtime::kAllocateHeapNumber.
5024 // The corresponding HChange instructions are added in a phase that does
5025 // not have easy access to the local context.
5026 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5027 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5028 RecordSafepointWithRegisters(
5029 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5030 __ StoreToSafepointRegisterSlot(reg, eax);
5031 }
5032
5033 __ bind(&done);
5034 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5035 }
5036
5037
DoNumberTagD(LNumberTagD * instr)5038 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5039 class DeferredNumberTagD FINAL : public LDeferredCode {
5040 public:
5041 DeferredNumberTagD(LCodeGen* codegen,
5042 LNumberTagD* instr,
5043 const X87Stack& x87_stack)
5044 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5045 virtual void Generate() OVERRIDE {
5046 codegen()->DoDeferredNumberTagD(instr_);
5047 }
5048 virtual LInstruction* instr() OVERRIDE { return instr_; }
5049 private:
5050 LNumberTagD* instr_;
5051 };
5052
5053 Register reg = ToRegister(instr->result());
5054
5055 // Put the value to the top of stack
5056 X87Register src = ToX87Register(instr->value());
5057 // Don't use X87LoadForUsage here, which is only used by Instruction which
5058 // clobbers fp registers.
5059 x87_stack_.Fxch(src);
5060
5061 DeferredNumberTagD* deferred =
5062 new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
5063 if (FLAG_inline_new) {
5064 Register tmp = ToRegister(instr->temp());
5065 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
5066 } else {
5067 __ jmp(deferred->entry());
5068 }
5069 __ bind(deferred->exit());
5070 __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
5071 }
5072
5073
DoDeferredNumberTagD(LNumberTagD * instr)5074 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5075 // TODO(3095996): Get rid of this. For now, we need to make the
5076 // result register contain a valid pointer because it is already
5077 // contained in the register pointer map.
5078 Register reg = ToRegister(instr->result());
5079 __ Move(reg, Immediate(0));
5080
5081 PushSafepointRegistersScope scope(this);
5082 // NumberTagI and NumberTagD use the context from the frame, rather than
5083 // the environment's HContext or HInlinedContext value.
5084 // They only call Runtime::kAllocateHeapNumber.
5085 // The corresponding HChange instructions are added in a phase that does
5086 // not have easy access to the local context.
5087 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5088 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5089 RecordSafepointWithRegisters(
5090 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5091 __ StoreToSafepointRegisterSlot(reg, eax);
5092 }
5093
5094
DoSmiTag(LSmiTag * instr)5095 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5096 HChange* hchange = instr->hydrogen();
5097 Register input = ToRegister(instr->value());
5098 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5099 hchange->value()->CheckFlag(HValue::kUint32)) {
5100 __ test(input, Immediate(0xc0000000));
5101 DeoptimizeIf(not_zero, instr, "overflow");
5102 }
5103 __ SmiTag(input);
5104 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5105 !hchange->value()->CheckFlag(HValue::kUint32)) {
5106 DeoptimizeIf(overflow, instr, "overflow");
5107 }
5108 }
5109
5110
DoSmiUntag(LSmiUntag * instr)5111 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5112 LOperand* input = instr->value();
5113 Register result = ToRegister(input);
5114 DCHECK(input->IsRegister() && input->Equals(instr->result()));
5115 if (instr->needs_check()) {
5116 __ test(result, Immediate(kSmiTagMask));
5117 DeoptimizeIf(not_zero, instr, "not a Smi");
5118 } else {
5119 __ AssertSmi(result);
5120 }
5121 __ SmiUntag(result);
5122 }
5123
5124
EmitNumberUntagDNoSSE2(LNumberUntagD * instr,Register input_reg,Register temp_reg,X87Register res_reg,NumberUntagDMode mode)5125 void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
5126 Register temp_reg, X87Register res_reg,
5127 NumberUntagDMode mode) {
5128 bool can_convert_undefined_to_nan =
5129 instr->hydrogen()->can_convert_undefined_to_nan();
5130 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
5131
5132 Label load_smi, done;
5133
5134 X87PrepareToWrite(res_reg);
5135 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5136 // Smi check.
5137 __ JumpIfSmi(input_reg, &load_smi);
5138
5139 // Heap number map check.
5140 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5141 factory()->heap_number_map());
5142 if (!can_convert_undefined_to_nan) {
5143 DeoptimizeIf(not_equal, instr, "not a heap number");
5144 } else {
5145 Label heap_number, convert;
5146 __ j(equal, &heap_number);
5147
5148 // Convert undefined (or hole) to NaN.
5149 __ cmp(input_reg, factory()->undefined_value());
5150 DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
5151
5152 __ bind(&convert);
5153 ExternalReference nan =
5154 ExternalReference::address_of_canonical_non_hole_nan();
5155 __ fld_d(Operand::StaticVariable(nan));
5156 __ jmp(&done, Label::kNear);
5157
5158 __ bind(&heap_number);
5159 }
5160 // Heap number to x87 conversion.
5161 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5162 if (deoptimize_on_minus_zero) {
5163 __ fldz();
5164 __ FCmp();
5165 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5166 __ j(not_zero, &done, Label::kNear);
5167
5168 // Use general purpose registers to check if we have -0.0
5169 __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5170 __ test(temp_reg, Immediate(HeapNumber::kSignMask));
5171 __ j(zero, &done, Label::kNear);
5172
5173 // Pop FPU stack before deoptimizing.
5174 __ fstp(0);
5175 DeoptimizeIf(not_zero, instr, "minus zero");
5176 }
5177 __ jmp(&done, Label::kNear);
5178 } else {
5179 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
5180 }
5181
5182 __ bind(&load_smi);
5183 // Clobbering a temp is faster than re-tagging the
5184 // input register since we avoid dependencies.
5185 __ mov(temp_reg, input_reg);
5186 __ SmiUntag(temp_reg); // Untag smi before converting to float.
5187 __ push(temp_reg);
5188 __ fild_s(Operand(esp, 0));
5189 __ add(esp, Immediate(kPointerSize));
5190 __ bind(&done);
5191 X87CommitWrite(res_reg);
5192 }
5193
5194
DoDeferredTaggedToI(LTaggedToI * instr,Label * done)5195 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5196 Register input_reg = ToRegister(instr->value());
5197
5198 // The input was optimistically untagged; revert it.
5199 STATIC_ASSERT(kSmiTagSize == 1);
5200 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
5201
5202 if (instr->truncating()) {
5203 Label no_heap_number, check_bools, check_false;
5204
5205 // Heap number map check.
5206 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5207 factory()->heap_number_map());
5208 __ j(not_equal, &no_heap_number, Label::kNear);
5209 __ TruncateHeapNumberToI(input_reg, input_reg);
5210 __ jmp(done);
5211
5212 __ bind(&no_heap_number);
5213 // Check for Oddballs. Undefined/False is converted to zero and True to one
5214 // for truncating conversions.
5215 __ cmp(input_reg, factory()->undefined_value());
5216 __ j(not_equal, &check_bools, Label::kNear);
5217 __ Move(input_reg, Immediate(0));
5218 __ jmp(done);
5219
5220 __ bind(&check_bools);
5221 __ cmp(input_reg, factory()->true_value());
5222 __ j(not_equal, &check_false, Label::kNear);
5223 __ Move(input_reg, Immediate(1));
5224 __ jmp(done);
5225
5226 __ bind(&check_false);
5227 __ cmp(input_reg, factory()->false_value());
5228 DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
5229 __ Move(input_reg, Immediate(0));
5230 } else {
5231 // TODO(olivf) Converting a number on the fpu is actually quite slow. We
5232 // should first try a fast conversion and then bailout to this slow case.
5233 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5234 isolate()->factory()->heap_number_map());
5235 DeoptimizeIf(not_equal, instr, "not a heap number");
5236
5237 __ sub(esp, Immediate(kPointerSize));
5238 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5239
5240 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
5241 Label no_precision_lost, not_nan, zero_check;
5242 __ fld(0);
5243
5244 __ fist_s(MemOperand(esp, 0));
5245 __ fild_s(MemOperand(esp, 0));
5246 __ FCmp();
5247 __ pop(input_reg);
5248
5249 __ j(equal, &no_precision_lost, Label::kNear);
5250 __ fstp(0);
5251 DeoptimizeIf(no_condition, instr, "lost precision");
5252 __ bind(&no_precision_lost);
5253
5254 __ j(parity_odd, ¬_nan);
5255 __ fstp(0);
5256 DeoptimizeIf(no_condition, instr, "NaN");
5257 __ bind(¬_nan);
5258
5259 __ test(input_reg, Operand(input_reg));
5260 __ j(zero, &zero_check, Label::kNear);
5261 __ fstp(0);
5262 __ jmp(done);
5263
5264 __ bind(&zero_check);
5265 // To check for minus zero, we load the value again as float, and check
5266 // if that is still 0.
5267 __ sub(esp, Immediate(kPointerSize));
5268 __ fstp_s(Operand(esp, 0));
5269 __ pop(input_reg);
5270 __ test(input_reg, Operand(input_reg));
5271 DeoptimizeIf(not_zero, instr, "minus zero");
5272 } else {
5273 __ fist_s(MemOperand(esp, 0));
5274 __ fild_s(MemOperand(esp, 0));
5275 __ FCmp();
5276 __ pop(input_reg);
5277 DeoptimizeIf(not_equal, instr, "lost precision");
5278 DeoptimizeIf(parity_even, instr, "NaN");
5279 }
5280 }
5281 }
5282
5283
DoTaggedToI(LTaggedToI * instr)5284 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5285 class DeferredTaggedToI FINAL : public LDeferredCode {
5286 public:
5287 DeferredTaggedToI(LCodeGen* codegen,
5288 LTaggedToI* instr,
5289 const X87Stack& x87_stack)
5290 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5291 virtual void Generate() OVERRIDE {
5292 codegen()->DoDeferredTaggedToI(instr_, done());
5293 }
5294 virtual LInstruction* instr() OVERRIDE { return instr_; }
5295 private:
5296 LTaggedToI* instr_;
5297 };
5298
5299 LOperand* input = instr->value();
5300 DCHECK(input->IsRegister());
5301 Register input_reg = ToRegister(input);
5302 DCHECK(input_reg.is(ToRegister(instr->result())));
5303
5304 if (instr->hydrogen()->value()->representation().IsSmi()) {
5305 __ SmiUntag(input_reg);
5306 } else {
5307 DeferredTaggedToI* deferred =
5308 new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5309 // Optimistically untag the input.
5310 // If the input is a HeapObject, SmiUntag will set the carry flag.
5311 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
5312 __ SmiUntag(input_reg);
5313 // Branch to deferred code if the input was tagged.
5314 // The deferred code will take care of restoring the tag.
5315 __ j(carry, deferred->entry());
5316 __ bind(deferred->exit());
5317 }
5318 }
5319
5320
DoNumberUntagD(LNumberUntagD * instr)5321 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5322 LOperand* input = instr->value();
5323 DCHECK(input->IsRegister());
5324 LOperand* temp = instr->temp();
5325 DCHECK(temp->IsRegister());
5326 LOperand* result = instr->result();
5327 DCHECK(result->IsDoubleRegister());
5328
5329 Register input_reg = ToRegister(input);
5330 Register temp_reg = ToRegister(temp);
5331
5332 HValue* value = instr->hydrogen()->value();
5333 NumberUntagDMode mode = value->representation().IsSmi()
5334 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5335
5336 EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result),
5337 mode);
5338 }
5339
5340
DoDoubleToI(LDoubleToI * instr)5341 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5342 LOperand* input = instr->value();
5343 DCHECK(input->IsDoubleRegister());
5344 LOperand* result = instr->result();
5345 DCHECK(result->IsRegister());
5346 Register result_reg = ToRegister(result);
5347
5348 if (instr->truncating()) {
5349 X87Register input_reg = ToX87Register(input);
5350 X87Fxch(input_reg);
5351 __ TruncateX87TOSToI(result_reg);
5352 } else {
5353 Label lost_precision, is_nan, minus_zero, done;
5354 X87Register input_reg = ToX87Register(input);
5355 X87Fxch(input_reg);
5356 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5357 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5358 &lost_precision, &is_nan, &minus_zero, dist);
5359 __ jmp(&done);
5360 __ bind(&lost_precision);
5361 DeoptimizeIf(no_condition, instr, "lost precision");
5362 __ bind(&is_nan);
5363 DeoptimizeIf(no_condition, instr, "NaN");
5364 __ bind(&minus_zero);
5365 DeoptimizeIf(no_condition, instr, "minus zero");
5366 __ bind(&done);
5367 }
5368 }
5369
5370
DoDoubleToSmi(LDoubleToSmi * instr)5371 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5372 LOperand* input = instr->value();
5373 DCHECK(input->IsDoubleRegister());
5374 LOperand* result = instr->result();
5375 DCHECK(result->IsRegister());
5376 Register result_reg = ToRegister(result);
5377
5378 Label lost_precision, is_nan, minus_zero, done;
5379 X87Register input_reg = ToX87Register(input);
5380 X87Fxch(input_reg);
5381 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5382 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5383 &lost_precision, &is_nan, &minus_zero, dist);
5384 __ jmp(&done);
5385 __ bind(&lost_precision);
5386 DeoptimizeIf(no_condition, instr, "lost precision");
5387 __ bind(&is_nan);
5388 DeoptimizeIf(no_condition, instr, "NaN");
5389 __ bind(&minus_zero);
5390 DeoptimizeIf(no_condition, instr, "minus zero");
5391 __ bind(&done);
5392 __ SmiTag(result_reg);
5393 DeoptimizeIf(overflow, instr, "overflow");
5394 }
5395
5396
DoCheckSmi(LCheckSmi * instr)5397 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5398 LOperand* input = instr->value();
5399 __ test(ToOperand(input), Immediate(kSmiTagMask));
5400 DeoptimizeIf(not_zero, instr, "not a Smi");
5401 }
5402
5403
DoCheckNonSmi(LCheckNonSmi * instr)5404 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5405 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5406 LOperand* input = instr->value();
5407 __ test(ToOperand(input), Immediate(kSmiTagMask));
5408 DeoptimizeIf(zero, instr, "Smi");
5409 }
5410 }
5411
5412
DoCheckInstanceType(LCheckInstanceType * instr)5413 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5414 Register input = ToRegister(instr->value());
5415 Register temp = ToRegister(instr->temp());
5416
5417 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
5418
5419 if (instr->hydrogen()->is_interval_check()) {
5420 InstanceType first;
5421 InstanceType last;
5422 instr->hydrogen()->GetCheckInterval(&first, &last);
5423
5424 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5425 static_cast<int8_t>(first));
5426
5427 // If there is only one type in the interval check for equality.
5428 if (first == last) {
5429 DeoptimizeIf(not_equal, instr, "wrong instance type");
5430 } else {
5431 DeoptimizeIf(below, instr, "wrong instance type");
5432 // Omit check for the last type.
5433 if (last != LAST_TYPE) {
5434 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5435 static_cast<int8_t>(last));
5436 DeoptimizeIf(above, instr, "wrong instance type");
5437 }
5438 }
5439 } else {
5440 uint8_t mask;
5441 uint8_t tag;
5442 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5443
5444 if (base::bits::IsPowerOfTwo32(mask)) {
5445 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5446 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
5447 DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
5448 } else {
5449 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5450 __ and_(temp, mask);
5451 __ cmp(temp, tag);
5452 DeoptimizeIf(not_equal, instr, "wrong instance type");
5453 }
5454 }
5455 }
5456
5457
DoCheckValue(LCheckValue * instr)5458 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5459 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5460 if (instr->hydrogen()->object_in_new_space()) {
5461 Register reg = ToRegister(instr->value());
5462 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5463 __ cmp(reg, Operand::ForCell(cell));
5464 } else {
5465 Operand operand = ToOperand(instr->value());
5466 __ cmp(operand, object);
5467 }
5468 DeoptimizeIf(not_equal, instr, "value mismatch");
5469 }
5470
5471
DoDeferredInstanceMigration(LCheckMaps * instr,Register object)5472 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5473 {
5474 PushSafepointRegistersScope scope(this);
5475 __ push(object);
5476 __ xor_(esi, esi);
5477 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5478 RecordSafepointWithRegisters(
5479 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5480
5481 __ test(eax, Immediate(kSmiTagMask));
5482 }
5483 DeoptimizeIf(zero, instr, "instance migration failed");
5484 }
5485
5486
DoCheckMaps(LCheckMaps * instr)5487 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5488 class DeferredCheckMaps FINAL : public LDeferredCode {
5489 public:
5490 DeferredCheckMaps(LCodeGen* codegen,
5491 LCheckMaps* instr,
5492 Register object,
5493 const X87Stack& x87_stack)
5494 : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
5495 SetExit(check_maps());
5496 }
5497 virtual void Generate() OVERRIDE {
5498 codegen()->DoDeferredInstanceMigration(instr_, object_);
5499 }
5500 Label* check_maps() { return &check_maps_; }
5501 virtual LInstruction* instr() OVERRIDE { return instr_; }
5502 private:
5503 LCheckMaps* instr_;
5504 Label check_maps_;
5505 Register object_;
5506 };
5507
5508 if (instr->hydrogen()->IsStabilityCheck()) {
5509 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5510 for (int i = 0; i < maps->size(); ++i) {
5511 AddStabilityDependency(maps->at(i).handle());
5512 }
5513 return;
5514 }
5515
5516 LOperand* input = instr->value();
5517 DCHECK(input->IsRegister());
5518 Register reg = ToRegister(input);
5519
5520 DeferredCheckMaps* deferred = NULL;
5521 if (instr->hydrogen()->HasMigrationTarget()) {
5522 deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
5523 __ bind(deferred->check_maps());
5524 }
5525
5526 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5527 Label success;
5528 for (int i = 0; i < maps->size() - 1; i++) {
5529 Handle<Map> map = maps->at(i).handle();
5530 __ CompareMap(reg, map);
5531 __ j(equal, &success, Label::kNear);
5532 }
5533
5534 Handle<Map> map = maps->at(maps->size() - 1).handle();
5535 __ CompareMap(reg, map);
5536 if (instr->hydrogen()->HasMigrationTarget()) {
5537 __ j(not_equal, deferred->entry());
5538 } else {
5539 DeoptimizeIf(not_equal, instr, "wrong map");
5540 }
5541
5542 __ bind(&success);
5543 }
5544
5545
DoClampDToUint8(LClampDToUint8 * instr)5546 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5547 X87Register value_reg = ToX87Register(instr->unclamped());
5548 Register result_reg = ToRegister(instr->result());
5549 X87Fxch(value_reg);
5550 __ ClampTOSToUint8(result_reg);
5551 }
5552
5553
DoClampIToUint8(LClampIToUint8 * instr)5554 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5555 DCHECK(instr->unclamped()->Equals(instr->result()));
5556 Register value_reg = ToRegister(instr->result());
5557 __ ClampUint8(value_reg);
5558 }
5559
5560
DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2 * instr)5561 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5562 Register input_reg = ToRegister(instr->unclamped());
5563 Register result_reg = ToRegister(instr->result());
5564 Register scratch = ToRegister(instr->scratch());
5565 Register scratch2 = ToRegister(instr->scratch2());
5566 Register scratch3 = ToRegister(instr->scratch3());
5567 Label is_smi, done, heap_number, valid_exponent,
5568 largest_value, zero_result, maybe_nan_or_infinity;
5569
5570 __ JumpIfSmi(input_reg, &is_smi);
5571
5572 // Check for heap number
5573 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5574 factory()->heap_number_map());
5575 __ j(equal, &heap_number, Label::kNear);
5576
5577 // Check for undefined. Undefined is converted to zero for clamping
5578 // conversions.
5579 __ cmp(input_reg, factory()->undefined_value());
5580 DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
5581 __ jmp(&zero_result, Label::kNear);
5582
5583 // Heap number
5584 __ bind(&heap_number);
5585
5586 // Surprisingly, all of the hand-crafted bit-manipulations below are much
5587 // faster than the x86 FPU built-in instruction, especially since "banker's
5588 // rounding" would be additionally very expensive
5589
5590 // Get exponent word.
5591 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5592 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5593
5594 // Test for negative values --> clamp to zero
5595 __ test(scratch, scratch);
5596 __ j(negative, &zero_result, Label::kNear);
5597
5598 // Get exponent alone in scratch2.
5599 __ mov(scratch2, scratch);
5600 __ and_(scratch2, HeapNumber::kExponentMask);
5601 __ shr(scratch2, HeapNumber::kExponentShift);
5602 __ j(zero, &zero_result, Label::kNear);
5603 __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5604 __ j(negative, &zero_result, Label::kNear);
5605
5606 const uint32_t non_int8_exponent = 7;
5607 __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5608 // If the exponent is too big, check for special values.
5609 __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5610
5611 __ bind(&valid_exponent);
5612 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5613 // < 7. The shift bias is the number of bits to shift the mantissa such that
5614 // with an exponent of 7 such the that top-most one is in bit 30, allowing
5615 // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5616 // 1).
5617 int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5618 __ lea(result_reg, MemOperand(scratch2, shift_bias));
5619 // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
5620 // top bits of the mantissa.
5621 __ and_(scratch, HeapNumber::kMantissaMask);
5622 // Put back the implicit 1 of the mantissa
5623 __ or_(scratch, 1 << HeapNumber::kExponentShift);
5624 // Shift up to round
5625 __ shl_cl(scratch);
5626 // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5627 // use the bit in the "ones" place and add it to the "halves" place, which has
5628 // the effect of rounding to even.
5629 __ mov(scratch2, scratch);
5630 const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5631 const uint32_t one_bit_shift = one_half_bit_shift + 1;
5632 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5633 __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5634 Label no_round;
5635 __ j(less, &no_round, Label::kNear);
5636 Label round_up;
5637 __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5638 __ j(greater, &round_up, Label::kNear);
5639 __ test(scratch3, scratch3);
5640 __ j(not_zero, &round_up, Label::kNear);
5641 __ mov(scratch2, scratch);
5642 __ and_(scratch2, Immediate(1 << one_bit_shift));
5643 __ shr(scratch2, 1);
5644 __ bind(&round_up);
5645 __ add(scratch, scratch2);
5646 __ j(overflow, &largest_value, Label::kNear);
5647 __ bind(&no_round);
5648 __ shr(scratch, 23);
5649 __ mov(result_reg, scratch);
5650 __ jmp(&done, Label::kNear);
5651
5652 __ bind(&maybe_nan_or_infinity);
5653 // Check for NaN/Infinity, all other values map to 255
5654 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5655 __ j(not_equal, &largest_value, Label::kNear);
5656
5657 // Check for NaN, which differs from Infinity in that at least one mantissa
5658 // bit is set.
5659 __ and_(scratch, HeapNumber::kMantissaMask);
5660 __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5661 __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
5662 // Infinity -> Fall through to map to 255.
5663
5664 __ bind(&largest_value);
5665 __ mov(result_reg, Immediate(255));
5666 __ jmp(&done, Label::kNear);
5667
5668 __ bind(&zero_result);
5669 __ xor_(result_reg, result_reg);
5670 __ jmp(&done, Label::kNear);
5671
5672 // smi
5673 __ bind(&is_smi);
5674 if (!input_reg.is(result_reg)) {
5675 __ mov(result_reg, input_reg);
5676 }
5677 __ SmiUntag(result_reg);
5678 __ ClampUint8(result_reg);
5679 __ bind(&done);
5680 }
5681
5682
DoDoubleBits(LDoubleBits * instr)5683 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5684 X87Register value_reg = ToX87Register(instr->value());
5685 Register result_reg = ToRegister(instr->result());
5686 X87Fxch(value_reg);
5687 __ sub(esp, Immediate(kDoubleSize));
5688 __ fst_d(Operand(esp, 0));
5689 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5690 __ mov(result_reg, Operand(esp, kPointerSize));
5691 } else {
5692 __ mov(result_reg, Operand(esp, 0));
5693 }
5694 __ add(esp, Immediate(kDoubleSize));
5695 }
5696
5697
DoConstructDouble(LConstructDouble * instr)5698 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5699 Register hi_reg = ToRegister(instr->hi());
5700 Register lo_reg = ToRegister(instr->lo());
5701 X87Register result_reg = ToX87Register(instr->result());
5702 // Follow below pattern to write a x87 fp register.
5703 X87PrepareToWrite(result_reg);
5704 __ sub(esp, Immediate(kDoubleSize));
5705 __ mov(Operand(esp, 0), lo_reg);
5706 __ mov(Operand(esp, kPointerSize), hi_reg);
5707 __ fld_d(Operand(esp, 0));
5708 __ add(esp, Immediate(kDoubleSize));
5709 X87CommitWrite(result_reg);
5710 }
5711
5712
DoAllocate(LAllocate * instr)5713 void LCodeGen::DoAllocate(LAllocate* instr) {
5714 class DeferredAllocate FINAL : public LDeferredCode {
5715 public:
5716 DeferredAllocate(LCodeGen* codegen,
5717 LAllocate* instr,
5718 const X87Stack& x87_stack)
5719 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5720 virtual void Generate() OVERRIDE {
5721 codegen()->DoDeferredAllocate(instr_);
5722 }
5723 virtual LInstruction* instr() OVERRIDE { return instr_; }
5724 private:
5725 LAllocate* instr_;
5726 };
5727
5728 DeferredAllocate* deferred =
5729 new(zone()) DeferredAllocate(this, instr, x87_stack_);
5730
5731 Register result = ToRegister(instr->result());
5732 Register temp = ToRegister(instr->temp());
5733
5734 // Allocate memory for the object.
5735 AllocationFlags flags = TAG_OBJECT;
5736 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5737 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5738 }
5739 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5740 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5741 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5742 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5743 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5744 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5745 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5746 }
5747
5748 if (instr->size()->IsConstantOperand()) {
5749 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5750 if (size <= Page::kMaxRegularHeapObjectSize) {
5751 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5752 } else {
5753 __ jmp(deferred->entry());
5754 }
5755 } else {
5756 Register size = ToRegister(instr->size());
5757 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5758 }
5759
5760 __ bind(deferred->exit());
5761
5762 if (instr->hydrogen()->MustPrefillWithFiller()) {
5763 if (instr->size()->IsConstantOperand()) {
5764 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5765 __ mov(temp, (size / kPointerSize) - 1);
5766 } else {
5767 temp = ToRegister(instr->size());
5768 __ shr(temp, kPointerSizeLog2);
5769 __ dec(temp);
5770 }
5771 Label loop;
5772 __ bind(&loop);
5773 __ mov(FieldOperand(result, temp, times_pointer_size, 0),
5774 isolate()->factory()->one_pointer_filler_map());
5775 __ dec(temp);
5776 __ j(not_zero, &loop);
5777 }
5778 }
5779
5780
DoDeferredAllocate(LAllocate * instr)5781 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5782 Register result = ToRegister(instr->result());
5783
5784 // TODO(3095996): Get rid of this. For now, we need to make the
5785 // result register contain a valid pointer because it is already
5786 // contained in the register pointer map.
5787 __ Move(result, Immediate(Smi::FromInt(0)));
5788
5789 PushSafepointRegistersScope scope(this);
5790 if (instr->size()->IsRegister()) {
5791 Register size = ToRegister(instr->size());
5792 DCHECK(!size.is(result));
5793 __ SmiTag(ToRegister(instr->size()));
5794 __ push(size);
5795 } else {
5796 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5797 if (size >= 0 && size <= Smi::kMaxValue) {
5798 __ push(Immediate(Smi::FromInt(size)));
5799 } else {
5800 // We should never get here at runtime => abort
5801 __ int3();
5802 return;
5803 }
5804 }
5805
5806 int flags = AllocateDoubleAlignFlag::encode(
5807 instr->hydrogen()->MustAllocateDoubleAligned());
5808 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5809 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5810 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5811 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5812 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5813 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5814 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5815 } else {
5816 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5817 }
5818 __ push(Immediate(Smi::FromInt(flags)));
5819
5820 CallRuntimeFromDeferred(
5821 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5822 __ StoreToSafepointRegisterSlot(result, eax);
5823 }
5824
5825
DoToFastProperties(LToFastProperties * instr)5826 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5827 DCHECK(ToRegister(instr->value()).is(eax));
5828 __ push(eax);
5829 CallRuntime(Runtime::kToFastProperties, 1, instr);
5830 }
5831
5832
DoRegExpLiteral(LRegExpLiteral * instr)5833 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5834 DCHECK(ToRegister(instr->context()).is(esi));
5835 Label materialized;
5836 // Registers will be used as follows:
5837 // ecx = literals array.
5838 // ebx = regexp literal.
5839 // eax = regexp literal clone.
5840 // esi = context.
5841 int literal_offset =
5842 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5843 __ LoadHeapObject(ecx, instr->hydrogen()->literals());
5844 __ mov(ebx, FieldOperand(ecx, literal_offset));
5845 __ cmp(ebx, factory()->undefined_value());
5846 __ j(not_equal, &materialized, Label::kNear);
5847
5848 // Create regexp literal using runtime function
5849 // Result will be in eax.
5850 __ push(ecx);
5851 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
5852 __ push(Immediate(instr->hydrogen()->pattern()));
5853 __ push(Immediate(instr->hydrogen()->flags()));
5854 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5855 __ mov(ebx, eax);
5856
5857 __ bind(&materialized);
5858 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5859 Label allocated, runtime_allocate;
5860 __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
5861 __ jmp(&allocated, Label::kNear);
5862
5863 __ bind(&runtime_allocate);
5864 __ push(ebx);
5865 __ push(Immediate(Smi::FromInt(size)));
5866 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5867 __ pop(ebx);
5868
5869 __ bind(&allocated);
5870 // Copy the content into the newly allocated memory.
5871 // (Unroll copy loop once for better throughput).
5872 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5873 __ mov(edx, FieldOperand(ebx, i));
5874 __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
5875 __ mov(FieldOperand(eax, i), edx);
5876 __ mov(FieldOperand(eax, i + kPointerSize), ecx);
5877 }
5878 if ((size % (2 * kPointerSize)) != 0) {
5879 __ mov(edx, FieldOperand(ebx, size - kPointerSize));
5880 __ mov(FieldOperand(eax, size - kPointerSize), edx);
5881 }
5882 }
5883
5884
DoFunctionLiteral(LFunctionLiteral * instr)5885 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5886 DCHECK(ToRegister(instr->context()).is(esi));
5887 // Use the fast case closure allocation code that allocates in new
5888 // space for nested functions that don't need literals cloning.
5889 bool pretenure = instr->hydrogen()->pretenure();
5890 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5891 FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5892 instr->hydrogen()->kind());
5893 __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
5894 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5895 } else {
5896 __ push(esi);
5897 __ push(Immediate(instr->hydrogen()->shared_info()));
5898 __ push(Immediate(pretenure ? factory()->true_value()
5899 : factory()->false_value()));
5900 CallRuntime(Runtime::kNewClosure, 3, instr);
5901 }
5902 }
5903
5904
DoTypeof(LTypeof * instr)5905 void LCodeGen::DoTypeof(LTypeof* instr) {
5906 DCHECK(ToRegister(instr->context()).is(esi));
5907 LOperand* input = instr->value();
5908 EmitPushTaggedOperand(input);
5909 CallRuntime(Runtime::kTypeof, 1, instr);
5910 }
5911
5912
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)5913 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5914 Register input = ToRegister(instr->value());
5915 Condition final_branch_condition = EmitTypeofIs(instr, input);
5916 if (final_branch_condition != no_condition) {
5917 EmitBranch(instr, final_branch_condition);
5918 }
5919 }
5920
5921
EmitTypeofIs(LTypeofIsAndBranch * instr,Register input)5922 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5923 Label* true_label = instr->TrueLabel(chunk_);
5924 Label* false_label = instr->FalseLabel(chunk_);
5925 Handle<String> type_name = instr->type_literal();
5926 int left_block = instr->TrueDestination(chunk_);
5927 int right_block = instr->FalseDestination(chunk_);
5928 int next_block = GetNextEmittedBlock();
5929
5930 Label::Distance true_distance = left_block == next_block ? Label::kNear
5931 : Label::kFar;
5932 Label::Distance false_distance = right_block == next_block ? Label::kNear
5933 : Label::kFar;
5934 Condition final_branch_condition = no_condition;
5935 if (String::Equals(type_name, factory()->number_string())) {
5936 __ JumpIfSmi(input, true_label, true_distance);
5937 __ cmp(FieldOperand(input, HeapObject::kMapOffset),
5938 factory()->heap_number_map());
5939 final_branch_condition = equal;
5940
5941 } else if (String::Equals(type_name, factory()->string_string())) {
5942 __ JumpIfSmi(input, false_label, false_distance);
5943 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5944 __ j(above_equal, false_label, false_distance);
5945 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5946 1 << Map::kIsUndetectable);
5947 final_branch_condition = zero;
5948
5949 } else if (String::Equals(type_name, factory()->symbol_string())) {
5950 __ JumpIfSmi(input, false_label, false_distance);
5951 __ CmpObjectType(input, SYMBOL_TYPE, input);
5952 final_branch_condition = equal;
5953
5954 } else if (String::Equals(type_name, factory()->boolean_string())) {
5955 __ cmp(input, factory()->true_value());
5956 __ j(equal, true_label, true_distance);
5957 __ cmp(input, factory()->false_value());
5958 final_branch_condition = equal;
5959
5960 } else if (String::Equals(type_name, factory()->undefined_string())) {
5961 __ cmp(input, factory()->undefined_value());
5962 __ j(equal, true_label, true_distance);
5963 __ JumpIfSmi(input, false_label, false_distance);
5964 // Check for undetectable objects => true.
5965 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5966 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5967 1 << Map::kIsUndetectable);
5968 final_branch_condition = not_zero;
5969
5970 } else if (String::Equals(type_name, factory()->function_string())) {
5971 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5972 __ JumpIfSmi(input, false_label, false_distance);
5973 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
5974 __ j(equal, true_label, true_distance);
5975 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
5976 final_branch_condition = equal;
5977
5978 } else if (String::Equals(type_name, factory()->object_string())) {
5979 __ JumpIfSmi(input, false_label, false_distance);
5980 __ cmp(input, factory()->null_value());
5981 __ j(equal, true_label, true_distance);
5982 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
5983 __ j(below, false_label, false_distance);
5984 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5985 __ j(above, false_label, false_distance);
5986 // Check for undetectable objects => false.
5987 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5988 1 << Map::kIsUndetectable);
5989 final_branch_condition = zero;
5990
5991 } else {
5992 __ jmp(false_label, false_distance);
5993 }
5994 return final_branch_condition;
5995 }
5996
5997
DoIsConstructCallAndBranch(LIsConstructCallAndBranch * instr)5998 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5999 Register temp = ToRegister(instr->temp());
6000
6001 EmitIsConstructCall(temp);
6002 EmitBranch(instr, equal);
6003 }
6004
6005
EmitIsConstructCall(Register temp)6006 void LCodeGen::EmitIsConstructCall(Register temp) {
6007 // Get the frame pointer for the calling frame.
6008 __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
6009
6010 // Skip the arguments adaptor frame if it exists.
6011 Label check_frame_marker;
6012 __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
6013 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6014 __ j(not_equal, &check_frame_marker, Label::kNear);
6015 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
6016
6017 // Check the marker in the calling frame.
6018 __ bind(&check_frame_marker);
6019 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
6020 Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
6021 }
6022
6023
EnsureSpaceForLazyDeopt(int space_needed)6024 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6025 if (!info()->IsStub()) {
6026 // Ensure that we have enough space after the previous lazy-bailout
6027 // instruction for patching the code here.
6028 int current_pc = masm()->pc_offset();
6029 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6030 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6031 __ Nop(padding_size);
6032 }
6033 }
6034 last_lazy_deopt_pc_ = masm()->pc_offset();
6035 }
6036
6037
DoLazyBailout(LLazyBailout * instr)6038 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6039 last_lazy_deopt_pc_ = masm()->pc_offset();
6040 DCHECK(instr->HasEnvironment());
6041 LEnvironment* env = instr->environment();
6042 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6043 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6044 }
6045
6046
DoDeoptimize(LDeoptimize * instr)6047 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6048 Deoptimizer::BailoutType type = instr->hydrogen()->type();
6049 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6050 // needed return address), even though the implementation of LAZY and EAGER is
6051 // now identical. When LAZY is eventually completely folded into EAGER, remove
6052 // the special case below.
6053 if (info()->IsStub() && type == Deoptimizer::EAGER) {
6054 type = Deoptimizer::LAZY;
6055 }
6056 DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
6057 }
6058
6059
DoDummy(LDummy * instr)6060 void LCodeGen::DoDummy(LDummy* instr) {
6061 // Nothing to see here, move on!
6062 }
6063
6064
DoDummyUse(LDummyUse * instr)6065 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6066 // Nothing to see here, move on!
6067 }
6068
6069
DoDeferredStackCheck(LStackCheck * instr)6070 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6071 PushSafepointRegistersScope scope(this);
6072 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
6073 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
6074 RecordSafepointWithLazyDeopt(
6075 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
6076 DCHECK(instr->HasEnvironment());
6077 LEnvironment* env = instr->environment();
6078 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6079 }
6080
6081
DoStackCheck(LStackCheck * instr)6082 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6083 class DeferredStackCheck FINAL : public LDeferredCode {
6084 public:
6085 DeferredStackCheck(LCodeGen* codegen,
6086 LStackCheck* instr,
6087 const X87Stack& x87_stack)
6088 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
6089 virtual void Generate() OVERRIDE {
6090 codegen()->DoDeferredStackCheck(instr_);
6091 }
6092 virtual LInstruction* instr() OVERRIDE { return instr_; }
6093 private:
6094 LStackCheck* instr_;
6095 };
6096
6097 DCHECK(instr->HasEnvironment());
6098 LEnvironment* env = instr->environment();
6099 // There is no LLazyBailout instruction for stack-checks. We have to
6100 // prepare for lazy deoptimization explicitly here.
6101 if (instr->hydrogen()->is_function_entry()) {
6102 // Perform stack overflow check.
6103 Label done;
6104 ExternalReference stack_limit =
6105 ExternalReference::address_of_stack_limit(isolate());
6106 __ cmp(esp, Operand::StaticVariable(stack_limit));
6107 __ j(above_equal, &done, Label::kNear);
6108
6109 DCHECK(instr->context()->IsRegister());
6110 DCHECK(ToRegister(instr->context()).is(esi));
6111 CallCode(isolate()->builtins()->StackCheck(),
6112 RelocInfo::CODE_TARGET,
6113 instr);
6114 __ bind(&done);
6115 } else {
6116 DCHECK(instr->hydrogen()->is_backwards_branch());
6117 // Perform stack overflow check if this goto needs it before jumping.
6118 DeferredStackCheck* deferred_stack_check =
6119 new(zone()) DeferredStackCheck(this, instr, x87_stack_);
6120 ExternalReference stack_limit =
6121 ExternalReference::address_of_stack_limit(isolate());
6122 __ cmp(esp, Operand::StaticVariable(stack_limit));
6123 __ j(below, deferred_stack_check->entry());
6124 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6125 __ bind(instr->done_label());
6126 deferred_stack_check->SetExit(instr->done_label());
6127 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6128 // Don't record a deoptimization index for the safepoint here.
6129 // This will be done explicitly when emitting call and the safepoint in
6130 // the deferred code.
6131 }
6132 }
6133
6134
DoOsrEntry(LOsrEntry * instr)6135 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6136 // This is a pseudo-instruction that ensures that the environment here is
6137 // properly registered for deoptimization and records the assembler's PC
6138 // offset.
6139 LEnvironment* environment = instr->environment();
6140
6141 // If the environment were already registered, we would have no way of
6142 // backpatching it with the spill slot operands.
6143 DCHECK(!environment->HasBeenRegistered());
6144 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6145
6146 GenerateOsrPrologue();
6147 }
6148
6149
DoForInPrepareMap(LForInPrepareMap * instr)6150 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6151 DCHECK(ToRegister(instr->context()).is(esi));
6152 __ cmp(eax, isolate()->factory()->undefined_value());
6153 DeoptimizeIf(equal, instr, "undefined");
6154
6155 __ cmp(eax, isolate()->factory()->null_value());
6156 DeoptimizeIf(equal, instr, "null");
6157
6158 __ test(eax, Immediate(kSmiTagMask));
6159 DeoptimizeIf(zero, instr, "Smi");
6160
6161 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6162 __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
6163 DeoptimizeIf(below_equal, instr, "wrong instance type");
6164
6165 Label use_cache, call_runtime;
6166 __ CheckEnumCache(&call_runtime);
6167
6168 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
6169 __ jmp(&use_cache, Label::kNear);
6170
6171 // Get the set of properties to enumerate.
6172 __ bind(&call_runtime);
6173 __ push(eax);
6174 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6175
6176 __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
6177 isolate()->factory()->meta_map());
6178 DeoptimizeIf(not_equal, instr, "wrong map");
6179 __ bind(&use_cache);
6180 }
6181
6182
DoForInCacheArray(LForInCacheArray * instr)6183 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6184 Register map = ToRegister(instr->map());
6185 Register result = ToRegister(instr->result());
6186 Label load_cache, done;
6187 __ EnumLength(result, map);
6188 __ cmp(result, Immediate(Smi::FromInt(0)));
6189 __ j(not_equal, &load_cache, Label::kNear);
6190 __ mov(result, isolate()->factory()->empty_fixed_array());
6191 __ jmp(&done, Label::kNear);
6192
6193 __ bind(&load_cache);
6194 __ LoadInstanceDescriptors(map, result);
6195 __ mov(result,
6196 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
6197 __ mov(result,
6198 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
6199 __ bind(&done);
6200 __ test(result, result);
6201 DeoptimizeIf(equal, instr, "no cache");
6202 }
6203
6204
DoCheckMapValue(LCheckMapValue * instr)6205 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6206 Register object = ToRegister(instr->value());
6207 __ cmp(ToRegister(instr->map()),
6208 FieldOperand(object, HeapObject::kMapOffset));
6209 DeoptimizeIf(not_equal, instr, "wrong map");
6210 }
6211
6212
DoDeferredLoadMutableDouble(LLoadFieldByIndex * instr,Register object,Register index)6213 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
6214 Register object,
6215 Register index) {
6216 PushSafepointRegistersScope scope(this);
6217 __ push(object);
6218 __ push(index);
6219 __ xor_(esi, esi);
6220 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
6221 RecordSafepointWithRegisters(
6222 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
6223 __ StoreToSafepointRegisterSlot(object, eax);
6224 }
6225
6226
DoLoadFieldByIndex(LLoadFieldByIndex * instr)6227 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6228 class DeferredLoadMutableDouble FINAL : public LDeferredCode {
6229 public:
6230 DeferredLoadMutableDouble(LCodeGen* codegen,
6231 LLoadFieldByIndex* instr,
6232 Register object,
6233 Register index,
6234 const X87Stack& x87_stack)
6235 : LDeferredCode(codegen, x87_stack),
6236 instr_(instr),
6237 object_(object),
6238 index_(index) {
6239 }
6240 virtual void Generate() OVERRIDE {
6241 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
6242 }
6243 virtual LInstruction* instr() OVERRIDE { return instr_; }
6244 private:
6245 LLoadFieldByIndex* instr_;
6246 Register object_;
6247 Register index_;
6248 };
6249
6250 Register object = ToRegister(instr->object());
6251 Register index = ToRegister(instr->index());
6252
6253 DeferredLoadMutableDouble* deferred;
6254 deferred = new(zone()) DeferredLoadMutableDouble(
6255 this, instr, object, index, x87_stack_);
6256
6257 Label out_of_object, done;
6258 __ test(index, Immediate(Smi::FromInt(1)));
6259 __ j(not_zero, deferred->entry());
6260
6261 __ sar(index, 1);
6262
6263 __ cmp(index, Immediate(0));
6264 __ j(less, &out_of_object, Label::kNear);
6265 __ mov(object, FieldOperand(object,
6266 index,
6267 times_half_pointer_size,
6268 JSObject::kHeaderSize));
6269 __ jmp(&done, Label::kNear);
6270
6271 __ bind(&out_of_object);
6272 __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
6273 __ neg(index);
6274 // Index is now equal to out of object property index plus 1.
6275 __ mov(object, FieldOperand(object,
6276 index,
6277 times_half_pointer_size,
6278 FixedArray::kHeaderSize - kPointerSize));
6279 __ bind(deferred->exit());
6280 __ bind(&done);
6281 }
6282
6283
DoStoreFrameContext(LStoreFrameContext * instr)6284 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6285 Register context = ToRegister(instr->context());
6286 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
6287 }
6288
6289
DoAllocateBlockContext(LAllocateBlockContext * instr)6290 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6291 Handle<ScopeInfo> scope_info = instr->scope_info();
6292 __ Push(scope_info);
6293 __ push(ToRegister(instr->function()));
6294 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6295 RecordSafepoint(Safepoint::kNoLazyDeopt);
6296 }
6297
6298
6299 #undef __
6300
6301 } } // namespace v8::internal
6302
6303 #endif // V8_TARGET_ARCH_X87
6304