1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #if V8_TARGET_ARCH_X64
8
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/hydrogen-osr.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 #include "src/x64/lithium-codegen-x64.h"
16
17 namespace v8 {
18 namespace internal {
19
20
21 // When invoking builtins, we need to record the safepoint in the middle of
22 // the invoke instruction sequence generated by the macro assembler.
23 class SafepointGenerator FINAL : public CallWrapper {
24 public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)25 SafepointGenerator(LCodeGen* codegen,
26 LPointerMap* pointers,
27 Safepoint::DeoptMode mode)
28 : codegen_(codegen),
29 pointers_(pointers),
30 deopt_mode_(mode) { }
~SafepointGenerator()31 virtual ~SafepointGenerator() {}
32
BeforeCall(int call_size) const33 virtual void BeforeCall(int call_size) const OVERRIDE {}
34
AfterCall() const35 virtual void AfterCall() const OVERRIDE {
36 codegen_->RecordSafepoint(pointers_, deopt_mode_);
37 }
38
39 private:
40 LCodeGen* codegen_;
41 LPointerMap* pointers_;
42 Safepoint::DeoptMode deopt_mode_;
43 };
44
45
46 #define __ masm()->
47
GenerateCode()48 bool LCodeGen::GenerateCode() {
49 LPhase phase("Z_Code generation", chunk());
50 DCHECK(is_unused());
51 status_ = GENERATING;
52
53 // Open a frame scope to indicate that there is a frame on the stack. The
54 // MANUAL indicates that the scope shouldn't actually generate code to set up
55 // the frame (that is done in GeneratePrologue).
56 FrameScope frame_scope(masm_, StackFrame::MANUAL);
57
58 return GeneratePrologue() &&
59 GenerateBody() &&
60 GenerateDeferredCode() &&
61 GenerateJumpTable() &&
62 GenerateSafepointTable();
63 }
64
65
FinishCode(Handle<Code> code)66 void LCodeGen::FinishCode(Handle<Code> code) {
67 DCHECK(is_done());
68 code->set_stack_slots(GetStackSlotCount());
69 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
70 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
71 PopulateDeoptimizationData(code);
72 }
73
74
75 #ifdef _MSC_VER
MakeSureStackPagesMapped(int offset)76 void LCodeGen::MakeSureStackPagesMapped(int offset) {
77 const int kPageSize = 4 * KB;
78 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
79 __ movp(Operand(rsp, offset), rax);
80 }
81 }
82 #endif
83
84
SaveCallerDoubles()85 void LCodeGen::SaveCallerDoubles() {
86 DCHECK(info()->saves_caller_doubles());
87 DCHECK(NeedsEagerFrame());
88 Comment(";;; Save clobbered callee double registers");
89 int count = 0;
90 BitVector* doubles = chunk()->allocated_double_registers();
91 BitVector::Iterator save_iterator(doubles);
92 while (!save_iterator.Done()) {
93 __ movsd(MemOperand(rsp, count * kDoubleSize),
94 XMMRegister::FromAllocationIndex(save_iterator.Current()));
95 save_iterator.Advance();
96 count++;
97 }
98 }
99
100
RestoreCallerDoubles()101 void LCodeGen::RestoreCallerDoubles() {
102 DCHECK(info()->saves_caller_doubles());
103 DCHECK(NeedsEagerFrame());
104 Comment(";;; Restore clobbered callee double registers");
105 BitVector* doubles = chunk()->allocated_double_registers();
106 BitVector::Iterator save_iterator(doubles);
107 int count = 0;
108 while (!save_iterator.Done()) {
109 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
110 MemOperand(rsp, count * kDoubleSize));
111 save_iterator.Advance();
112 count++;
113 }
114 }
115
116
GeneratePrologue()117 bool LCodeGen::GeneratePrologue() {
118 DCHECK(is_generating());
119
120 if (info()->IsOptimizing()) {
121 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
122
123 #ifdef DEBUG
124 if (strlen(FLAG_stop_at) > 0 &&
125 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
126 __ int3();
127 }
128 #endif
129
130 // Sloppy mode functions need to replace the receiver with the global proxy
131 // when called as functions (without an explicit receiver object).
132 if (info_->this_has_uses() &&
133 info_->strict_mode() == SLOPPY &&
134 !info_->is_native()) {
135 Label ok;
136 StackArgumentsAccessor args(rsp, scope()->num_parameters());
137 __ movp(rcx, args.GetReceiverOperand());
138
139 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
140 __ j(not_equal, &ok, Label::kNear);
141
142 __ movp(rcx, GlobalObjectOperand());
143 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
144
145 __ movp(args.GetReceiverOperand(), rcx);
146
147 __ bind(&ok);
148 }
149 }
150
151 info()->set_prologue_offset(masm_->pc_offset());
152 if (NeedsEagerFrame()) {
153 DCHECK(!frame_is_built_);
154 frame_is_built_ = true;
155 if (info()->IsStub()) {
156 __ StubPrologue();
157 } else {
158 __ Prologue(info()->IsCodePreAgingActive());
159 }
160 info()->AddNoFrameRange(0, masm_->pc_offset());
161 }
162
163 // Reserve space for the stack slots needed by the code.
164 int slots = GetStackSlotCount();
165 if (slots > 0) {
166 if (FLAG_debug_code) {
167 __ subp(rsp, Immediate(slots * kPointerSize));
168 #ifdef _MSC_VER
169 MakeSureStackPagesMapped(slots * kPointerSize);
170 #endif
171 __ Push(rax);
172 __ Set(rax, slots);
173 __ Set(kScratchRegister, kSlotsZapValue);
174 Label loop;
175 __ bind(&loop);
176 __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
177 kScratchRegister);
178 __ decl(rax);
179 __ j(not_zero, &loop);
180 __ Pop(rax);
181 } else {
182 __ subp(rsp, Immediate(slots * kPointerSize));
183 #ifdef _MSC_VER
184 MakeSureStackPagesMapped(slots * kPointerSize);
185 #endif
186 }
187
188 if (info()->saves_caller_doubles()) {
189 SaveCallerDoubles();
190 }
191 }
192
193 // Possibly allocate a local context.
194 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
195 if (heap_slots > 0) {
196 Comment(";;; Allocate local context");
197 bool need_write_barrier = true;
198 // Argument to NewContext is the function, which is still in rdi.
199 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
200 FastNewContextStub stub(isolate(), heap_slots);
201 __ CallStub(&stub);
202 // Result of FastNewContextStub is always in new space.
203 need_write_barrier = false;
204 } else {
205 __ Push(rdi);
206 __ CallRuntime(Runtime::kNewFunctionContext, 1);
207 }
208 RecordSafepoint(Safepoint::kNoLazyDeopt);
209 // Context is returned in rax. It replaces the context passed to us.
210 // It's saved in the stack and kept live in rsi.
211 __ movp(rsi, rax);
212 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
213
214 // Copy any necessary parameters into the context.
215 int num_parameters = scope()->num_parameters();
216 for (int i = 0; i < num_parameters; i++) {
217 Variable* var = scope()->parameter(i);
218 if (var->IsContextSlot()) {
219 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
220 (num_parameters - 1 - i) * kPointerSize;
221 // Load parameter from stack.
222 __ movp(rax, Operand(rbp, parameter_offset));
223 // Store it in the context.
224 int context_offset = Context::SlotOffset(var->index());
225 __ movp(Operand(rsi, context_offset), rax);
226 // Update the write barrier. This clobbers rax and rbx.
227 if (need_write_barrier) {
228 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
229 } else if (FLAG_debug_code) {
230 Label done;
231 __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
232 __ Abort(kExpectedNewSpaceObject);
233 __ bind(&done);
234 }
235 }
236 }
237 Comment(";;; End allocate local context");
238 }
239
240 // Trace the call.
241 if (FLAG_trace && info()->IsOptimizing()) {
242 __ CallRuntime(Runtime::kTraceEnter, 0);
243 }
244 return !is_aborted();
245 }
246
247
GenerateOsrPrologue()248 void LCodeGen::GenerateOsrPrologue() {
249 // Generate the OSR entry prologue at the first unknown OSR value, or if there
250 // are none, at the OSR entrypoint instruction.
251 if (osr_pc_offset_ >= 0) return;
252
253 osr_pc_offset_ = masm()->pc_offset();
254
255 // Adjust the frame size, subsuming the unoptimized frame into the
256 // optimized frame.
257 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
258 DCHECK(slots >= 0);
259 __ subp(rsp, Immediate(slots * kPointerSize));
260 }
261
262
GenerateBodyInstructionPre(LInstruction * instr)263 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
264 if (instr->IsCall()) {
265 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
266 }
267 if (!instr->IsLazyBailout() && !instr->IsGap()) {
268 safepoints_.BumpLastLazySafepointIndex();
269 }
270 }
271
272
GenerateBodyInstructionPost(LInstruction * instr)273 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
274 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
275 instr->hydrogen_value()->representation().IsInteger32() &&
276 instr->result()->IsRegister()) {
277 __ AssertZeroExtended(ToRegister(instr->result()));
278 }
279
280 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
281 // We sign extend the dehoisted key at the definition point when the pointer
282 // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
283 // points and MustSignExtendResult is always false. We can't use
284 // STATIC_ASSERT here as the pointer size is 32-bit for x32.
285 DCHECK(kPointerSize == kInt64Size);
286 if (instr->result()->IsRegister()) {
287 Register result_reg = ToRegister(instr->result());
288 __ movsxlq(result_reg, result_reg);
289 } else {
290 // Sign extend the 32bit result in the stack slots.
291 DCHECK(instr->result()->IsStackSlot());
292 Operand src = ToOperand(instr->result());
293 __ movsxlq(kScratchRegister, src);
294 __ movq(src, kScratchRegister);
295 }
296 }
297 }
298
299
GenerateJumpTable()300 bool LCodeGen::GenerateJumpTable() {
301 Label needs_frame;
302 if (jump_table_.length() > 0) {
303 Comment(";;; -------------------- Jump table --------------------");
304 }
305 for (int i = 0; i < jump_table_.length(); i++) {
306 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
307 __ bind(&table_entry->label);
308 Address entry = table_entry->address;
309 DeoptComment(table_entry->reason);
310 if (table_entry->needs_frame) {
311 DCHECK(!info()->saves_caller_doubles());
312 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
313 if (needs_frame.is_bound()) {
314 __ jmp(&needs_frame);
315 } else {
316 __ bind(&needs_frame);
317 __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
318 __ pushq(rbp);
319 __ movp(rbp, rsp);
320 __ Push(rsi);
321 // This variant of deopt can only be used with stubs. Since we don't
322 // have a function pointer to install in the stack frame that we're
323 // building, install a special marker there instead.
324 DCHECK(info()->IsStub());
325 __ Move(rsi, Smi::FromInt(StackFrame::STUB));
326 __ Push(rsi);
327 __ movp(rsi, MemOperand(rsp, kPointerSize));
328 __ call(kScratchRegister);
329 }
330 } else {
331 if (info()->saves_caller_doubles()) {
332 DCHECK(info()->IsStub());
333 RestoreCallerDoubles();
334 }
335 __ call(entry, RelocInfo::RUNTIME_ENTRY);
336 }
337 }
338 return !is_aborted();
339 }
340
341
GenerateDeferredCode()342 bool LCodeGen::GenerateDeferredCode() {
343 DCHECK(is_generating());
344 if (deferred_.length() > 0) {
345 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
346 LDeferredCode* code = deferred_[i];
347
348 HValue* value =
349 instructions_->at(code->instruction_index())->hydrogen_value();
350 RecordAndWritePosition(
351 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
352
353 Comment(";;; <@%d,#%d> "
354 "-------------------- Deferred %s --------------------",
355 code->instruction_index(),
356 code->instr()->hydrogen_value()->id(),
357 code->instr()->Mnemonic());
358 __ bind(code->entry());
359 if (NeedsDeferredFrame()) {
360 Comment(";;; Build frame");
361 DCHECK(!frame_is_built_);
362 DCHECK(info()->IsStub());
363 frame_is_built_ = true;
364 // Build the frame in such a way that esi isn't trashed.
365 __ pushq(rbp); // Caller's frame pointer.
366 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
367 __ Push(Smi::FromInt(StackFrame::STUB));
368 __ leap(rbp, Operand(rsp, 2 * kPointerSize));
369 Comment(";;; Deferred code");
370 }
371 code->Generate();
372 if (NeedsDeferredFrame()) {
373 __ bind(code->done());
374 Comment(";;; Destroy frame");
375 DCHECK(frame_is_built_);
376 frame_is_built_ = false;
377 __ movp(rsp, rbp);
378 __ popq(rbp);
379 }
380 __ jmp(code->exit());
381 }
382 }
383
384 // Deferred code is the last part of the instruction sequence. Mark
385 // the generated code as done unless we bailed out.
386 if (!is_aborted()) status_ = DONE;
387 return !is_aborted();
388 }
389
390
GenerateSafepointTable()391 bool LCodeGen::GenerateSafepointTable() {
392 DCHECK(is_done());
393 safepoints_.Emit(masm(), GetStackSlotCount());
394 return !is_aborted();
395 }
396
397
ToRegister(int index) const398 Register LCodeGen::ToRegister(int index) const {
399 return Register::FromAllocationIndex(index);
400 }
401
402
ToDoubleRegister(int index) const403 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
404 return XMMRegister::FromAllocationIndex(index);
405 }
406
407
ToRegister(LOperand * op) const408 Register LCodeGen::ToRegister(LOperand* op) const {
409 DCHECK(op->IsRegister());
410 return ToRegister(op->index());
411 }
412
413
ToDoubleRegister(LOperand * op) const414 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
415 DCHECK(op->IsDoubleRegister());
416 return ToDoubleRegister(op->index());
417 }
418
419
IsInteger32Constant(LConstantOperand * op) const420 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
421 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
422 }
423
424
IsDehoistedKeyConstant(LConstantOperand * op) const425 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
426 return op->IsConstantOperand() &&
427 chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
428 }
429
430
IsSmiConstant(LConstantOperand * op) const431 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
432 return chunk_->LookupLiteralRepresentation(op).IsSmi();
433 }
434
435
ToInteger32(LConstantOperand * op) const436 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
437 return ToRepresentation(op, Representation::Integer32());
438 }
439
440
ToRepresentation(LConstantOperand * op,const Representation & r) const441 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
442 const Representation& r) const {
443 HConstant* constant = chunk_->LookupConstant(op);
444 int32_t value = constant->Integer32Value();
445 if (r.IsInteger32()) return value;
446 DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
447 return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
448 }
449
450
ToSmi(LConstantOperand * op) const451 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
452 HConstant* constant = chunk_->LookupConstant(op);
453 return Smi::FromInt(constant->Integer32Value());
454 }
455
456
ToDouble(LConstantOperand * op) const457 double LCodeGen::ToDouble(LConstantOperand* op) const {
458 HConstant* constant = chunk_->LookupConstant(op);
459 DCHECK(constant->HasDoubleValue());
460 return constant->DoubleValue();
461 }
462
463
ToExternalReference(LConstantOperand * op) const464 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
465 HConstant* constant = chunk_->LookupConstant(op);
466 DCHECK(constant->HasExternalReferenceValue());
467 return constant->ExternalReferenceValue();
468 }
469
470
ToHandle(LConstantOperand * op) const471 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
472 HConstant* constant = chunk_->LookupConstant(op);
473 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
474 return constant->handle(isolate());
475 }
476
477
ArgumentsOffsetWithoutFrame(int index)478 static int ArgumentsOffsetWithoutFrame(int index) {
479 DCHECK(index < 0);
480 return -(index + 1) * kPointerSize + kPCOnStackSize;
481 }
482
483
ToOperand(LOperand * op) const484 Operand LCodeGen::ToOperand(LOperand* op) const {
485 // Does not handle registers. In X64 assembler, plain registers are not
486 // representable as an Operand.
487 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
488 if (NeedsEagerFrame()) {
489 return Operand(rbp, StackSlotOffset(op->index()));
490 } else {
491 // Retrieve parameter without eager stack-frame relative to the
492 // stack-pointer.
493 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
494 }
495 }
496
497
WriteTranslation(LEnvironment * environment,Translation * translation)498 void LCodeGen::WriteTranslation(LEnvironment* environment,
499 Translation* translation) {
500 if (environment == NULL) return;
501
502 // The translation includes one command per value in the environment.
503 int translation_size = environment->translation_size();
504 // The output frame height does not include the parameters.
505 int height = translation_size - environment->parameter_count();
506
507 WriteTranslation(environment->outer(), translation);
508 bool has_closure_id = !info()->closure().is_null() &&
509 !info()->closure().is_identical_to(environment->closure());
510 int closure_id = has_closure_id
511 ? DefineDeoptimizationLiteral(environment->closure())
512 : Translation::kSelfLiteralId;
513
514 switch (environment->frame_type()) {
515 case JS_FUNCTION:
516 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
517 break;
518 case JS_CONSTRUCT:
519 translation->BeginConstructStubFrame(closure_id, translation_size);
520 break;
521 case JS_GETTER:
522 DCHECK(translation_size == 1);
523 DCHECK(height == 0);
524 translation->BeginGetterStubFrame(closure_id);
525 break;
526 case JS_SETTER:
527 DCHECK(translation_size == 2);
528 DCHECK(height == 0);
529 translation->BeginSetterStubFrame(closure_id);
530 break;
531 case ARGUMENTS_ADAPTOR:
532 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
533 break;
534 case STUB:
535 translation->BeginCompiledStubFrame();
536 break;
537 }
538
539 int object_index = 0;
540 int dematerialized_index = 0;
541 for (int i = 0; i < translation_size; ++i) {
542 LOperand* value = environment->values()->at(i);
543 AddToTranslation(environment,
544 translation,
545 value,
546 environment->HasTaggedValueAt(i),
547 environment->HasUint32ValueAt(i),
548 &object_index,
549 &dematerialized_index);
550 }
551 }
552
553
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)554 void LCodeGen::AddToTranslation(LEnvironment* environment,
555 Translation* translation,
556 LOperand* op,
557 bool is_tagged,
558 bool is_uint32,
559 int* object_index_pointer,
560 int* dematerialized_index_pointer) {
561 if (op == LEnvironment::materialization_marker()) {
562 int object_index = (*object_index_pointer)++;
563 if (environment->ObjectIsDuplicateAt(object_index)) {
564 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
565 translation->DuplicateObject(dupe_of);
566 return;
567 }
568 int object_length = environment->ObjectLengthAt(object_index);
569 if (environment->ObjectIsArgumentsAt(object_index)) {
570 translation->BeginArgumentsObject(object_length);
571 } else {
572 translation->BeginCapturedObject(object_length);
573 }
574 int dematerialized_index = *dematerialized_index_pointer;
575 int env_offset = environment->translation_size() + dematerialized_index;
576 *dematerialized_index_pointer += object_length;
577 for (int i = 0; i < object_length; ++i) {
578 LOperand* value = environment->values()->at(env_offset + i);
579 AddToTranslation(environment,
580 translation,
581 value,
582 environment->HasTaggedValueAt(env_offset + i),
583 environment->HasUint32ValueAt(env_offset + i),
584 object_index_pointer,
585 dematerialized_index_pointer);
586 }
587 return;
588 }
589
590 if (op->IsStackSlot()) {
591 if (is_tagged) {
592 translation->StoreStackSlot(op->index());
593 } else if (is_uint32) {
594 translation->StoreUint32StackSlot(op->index());
595 } else {
596 translation->StoreInt32StackSlot(op->index());
597 }
598 } else if (op->IsDoubleStackSlot()) {
599 translation->StoreDoubleStackSlot(op->index());
600 } else if (op->IsRegister()) {
601 Register reg = ToRegister(op);
602 if (is_tagged) {
603 translation->StoreRegister(reg);
604 } else if (is_uint32) {
605 translation->StoreUint32Register(reg);
606 } else {
607 translation->StoreInt32Register(reg);
608 }
609 } else if (op->IsDoubleRegister()) {
610 XMMRegister reg = ToDoubleRegister(op);
611 translation->StoreDoubleRegister(reg);
612 } else if (op->IsConstantOperand()) {
613 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
614 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
615 translation->StoreLiteral(src_index);
616 } else {
617 UNREACHABLE();
618 }
619 }
620
621
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode,int argc)622 void LCodeGen::CallCodeGeneric(Handle<Code> code,
623 RelocInfo::Mode mode,
624 LInstruction* instr,
625 SafepointMode safepoint_mode,
626 int argc) {
627 DCHECK(instr != NULL);
628 __ call(code, mode);
629 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
630
631 // Signal that we don't inline smi code before these stubs in the
632 // optimizing code generator.
633 if (code->kind() == Code::BINARY_OP_IC ||
634 code->kind() == Code::COMPARE_IC) {
635 __ nop();
636 }
637 }
638
639
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)640 void LCodeGen::CallCode(Handle<Code> code,
641 RelocInfo::Mode mode,
642 LInstruction* instr) {
643 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
644 }
645
646
CallRuntime(const Runtime::Function * function,int num_arguments,LInstruction * instr,SaveFPRegsMode save_doubles)647 void LCodeGen::CallRuntime(const Runtime::Function* function,
648 int num_arguments,
649 LInstruction* instr,
650 SaveFPRegsMode save_doubles) {
651 DCHECK(instr != NULL);
652 DCHECK(instr->HasPointerMap());
653
654 __ CallRuntime(function, num_arguments, save_doubles);
655
656 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
657 }
658
659
LoadContextFromDeferred(LOperand * context)660 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
661 if (context->IsRegister()) {
662 if (!ToRegister(context).is(rsi)) {
663 __ movp(rsi, ToRegister(context));
664 }
665 } else if (context->IsStackSlot()) {
666 __ movp(rsi, ToOperand(context));
667 } else if (context->IsConstantOperand()) {
668 HConstant* constant =
669 chunk_->LookupConstant(LConstantOperand::cast(context));
670 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
671 } else {
672 UNREACHABLE();
673 }
674 }
675
676
677
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)678 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
679 int argc,
680 LInstruction* instr,
681 LOperand* context) {
682 LoadContextFromDeferred(context);
683
684 __ CallRuntimeSaveDoubles(id);
685 RecordSafepointWithRegisters(
686 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
687 }
688
689
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)690 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
691 Safepoint::DeoptMode mode) {
692 environment->set_has_been_used();
693 if (!environment->HasBeenRegistered()) {
694 // Physical stack frame layout:
695 // -x ............. -4 0 ..................................... y
696 // [incoming arguments] [spill slots] [pushed outgoing arguments]
697
698 // Layout of the environment:
699 // 0 ..................................................... size-1
700 // [parameters] [locals] [expression stack including arguments]
701
702 // Layout of the translation:
703 // 0 ........................................................ size - 1 + 4
704 // [expression stack including arguments] [locals] [4 words] [parameters]
705 // |>------------ translation_size ------------<|
706
707 int frame_count = 0;
708 int jsframe_count = 0;
709 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
710 ++frame_count;
711 if (e->frame_type() == JS_FUNCTION) {
712 ++jsframe_count;
713 }
714 }
715 Translation translation(&translations_, frame_count, jsframe_count, zone());
716 WriteTranslation(environment, &translation);
717 int deoptimization_index = deoptimizations_.length();
718 int pc_offset = masm()->pc_offset();
719 environment->Register(deoptimization_index,
720 translation.index(),
721 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
722 deoptimizations_.Add(environment, environment->zone());
723 }
724 }
725
726
DeoptimizeIf(Condition cc,LInstruction * instr,const char * detail,Deoptimizer::BailoutType bailout_type)727 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
728 const char* detail,
729 Deoptimizer::BailoutType bailout_type) {
730 LEnvironment* environment = instr->environment();
731 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
732 DCHECK(environment->HasBeenRegistered());
733 int id = environment->deoptimization_index();
734 DCHECK(info()->IsOptimizing() || info()->IsStub());
735 Address entry =
736 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
737 if (entry == NULL) {
738 Abort(kBailoutWasNotPrepared);
739 return;
740 }
741
742 if (DeoptEveryNTimes()) {
743 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
744 Label no_deopt;
745 __ pushfq();
746 __ pushq(rax);
747 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
748 __ movl(rax, count_operand);
749 __ subl(rax, Immediate(1));
750 __ j(not_zero, &no_deopt, Label::kNear);
751 if (FLAG_trap_on_deopt) __ int3();
752 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
753 __ movl(count_operand, rax);
754 __ popq(rax);
755 __ popfq();
756 DCHECK(frame_is_built_);
757 __ call(entry, RelocInfo::RUNTIME_ENTRY);
758 __ bind(&no_deopt);
759 __ movl(count_operand, rax);
760 __ popq(rax);
761 __ popfq();
762 }
763
764 if (info()->ShouldTrapOnDeopt()) {
765 Label done;
766 if (cc != no_condition) {
767 __ j(NegateCondition(cc), &done, Label::kNear);
768 }
769 __ int3();
770 __ bind(&done);
771 }
772
773 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
774 instr->Mnemonic(), detail);
775 DCHECK(info()->IsStub() || frame_is_built_);
776 // Go through jump table if we need to handle condition, build frame, or
777 // restore caller doubles.
778 if (cc == no_condition && frame_is_built_ &&
779 !info()->saves_caller_doubles()) {
780 DeoptComment(reason);
781 __ call(entry, RelocInfo::RUNTIME_ENTRY);
782 } else {
783 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
784 !frame_is_built_);
785 // We often have several deopts to the same entry, reuse the last
786 // jump entry if this is the case.
787 if (jump_table_.is_empty() ||
788 !table_entry.IsEquivalentTo(jump_table_.last())) {
789 jump_table_.Add(table_entry, zone());
790 }
791 if (cc == no_condition) {
792 __ jmp(&jump_table_.last().label);
793 } else {
794 __ j(cc, &jump_table_.last().label);
795 }
796 }
797 }
798
799
DeoptimizeIf(Condition cc,LInstruction * instr,const char * detail)800 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
801 const char* detail) {
802 Deoptimizer::BailoutType bailout_type = info()->IsStub()
803 ? Deoptimizer::LAZY
804 : Deoptimizer::EAGER;
805 DeoptimizeIf(cc, instr, detail, bailout_type);
806 }
807
808
PopulateDeoptimizationData(Handle<Code> code)809 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
810 int length = deoptimizations_.length();
811 if (length == 0) return;
812 Handle<DeoptimizationInputData> data =
813 DeoptimizationInputData::New(isolate(), length, TENURED);
814
815 Handle<ByteArray> translations =
816 translations_.CreateByteArray(isolate()->factory());
817 data->SetTranslationByteArray(*translations);
818 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
819 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
820 if (info_->IsOptimizing()) {
821 // Reference to shared function info does not change between phases.
822 AllowDeferredHandleDereference allow_handle_dereference;
823 data->SetSharedFunctionInfo(*info_->shared_info());
824 } else {
825 data->SetSharedFunctionInfo(Smi::FromInt(0));
826 }
827
828 Handle<FixedArray> literals =
829 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
830 { AllowDeferredHandleDereference copy_handles;
831 for (int i = 0; i < deoptimization_literals_.length(); i++) {
832 literals->set(i, *deoptimization_literals_[i]);
833 }
834 data->SetLiteralArray(*literals);
835 }
836
837 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
838 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
839
840 // Populate the deoptimization entries.
841 for (int i = 0; i < length; i++) {
842 LEnvironment* env = deoptimizations_[i];
843 data->SetAstId(i, env->ast_id());
844 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
845 data->SetArgumentsStackHeight(i,
846 Smi::FromInt(env->arguments_stack_height()));
847 data->SetPc(i, Smi::FromInt(env->pc_offset()));
848 }
849 code->set_deoptimization_data(*data);
850 }
851
852
DefineDeoptimizationLiteral(Handle<Object> literal)853 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
854 int result = deoptimization_literals_.length();
855 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
856 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
857 }
858 deoptimization_literals_.Add(literal, zone());
859 return result;
860 }
861
862
PopulateDeoptimizationLiteralsWithInlinedFunctions()863 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
864 DCHECK(deoptimization_literals_.length() == 0);
865
866 const ZoneList<Handle<JSFunction> >* inlined_closures =
867 chunk()->inlined_closures();
868
869 for (int i = 0, length = inlined_closures->length();
870 i < length;
871 i++) {
872 DefineDeoptimizationLiteral(inlined_closures->at(i));
873 }
874
875 inlined_function_count_ = deoptimization_literals_.length();
876 }
877
878
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode,int argc)879 void LCodeGen::RecordSafepointWithLazyDeopt(
880 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
881 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
882 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
883 } else {
884 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
885 RecordSafepointWithRegisters(
886 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
887 }
888 }
889
890
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)891 void LCodeGen::RecordSafepoint(
892 LPointerMap* pointers,
893 Safepoint::Kind kind,
894 int arguments,
895 Safepoint::DeoptMode deopt_mode) {
896 DCHECK(kind == expected_safepoint_kind_);
897
898 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
899
900 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
901 kind, arguments, deopt_mode);
902 for (int i = 0; i < operands->length(); i++) {
903 LOperand* pointer = operands->at(i);
904 if (pointer->IsStackSlot()) {
905 safepoint.DefinePointerSlot(pointer->index(), zone());
906 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
907 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
908 }
909 }
910 }
911
912
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode deopt_mode)913 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
914 Safepoint::DeoptMode deopt_mode) {
915 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
916 }
917
918
RecordSafepoint(Safepoint::DeoptMode deopt_mode)919 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
920 LPointerMap empty_pointers(zone());
921 RecordSafepoint(&empty_pointers, deopt_mode);
922 }
923
924
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode deopt_mode)925 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
926 int arguments,
927 Safepoint::DeoptMode deopt_mode) {
928 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
929 }
930
931
RecordAndWritePosition(int position)932 void LCodeGen::RecordAndWritePosition(int position) {
933 if (position == RelocInfo::kNoPosition) return;
934 masm()->positions_recorder()->RecordPosition(position);
935 masm()->positions_recorder()->WriteRecordedPositions();
936 }
937
938
LabelType(LLabel * label)939 static const char* LabelType(LLabel* label) {
940 if (label->is_loop_header()) return " (loop header)";
941 if (label->is_osr_entry()) return " (OSR entry)";
942 return "";
943 }
944
945
DoLabel(LLabel * label)946 void LCodeGen::DoLabel(LLabel* label) {
947 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
948 current_instruction_,
949 label->hydrogen_value()->id(),
950 label->block_id(),
951 LabelType(label));
952 __ bind(label->label());
953 current_block_ = label->block_id();
954 DoGap(label);
955 }
956
957
DoParallelMove(LParallelMove * move)958 void LCodeGen::DoParallelMove(LParallelMove* move) {
959 resolver_.Resolve(move);
960 }
961
962
DoGap(LGap * gap)963 void LCodeGen::DoGap(LGap* gap) {
964 for (int i = LGap::FIRST_INNER_POSITION;
965 i <= LGap::LAST_INNER_POSITION;
966 i++) {
967 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
968 LParallelMove* move = gap->GetParallelMove(inner_pos);
969 if (move != NULL) DoParallelMove(move);
970 }
971 }
972
973
DoInstructionGap(LInstructionGap * instr)974 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
975 DoGap(instr);
976 }
977
978
DoParameter(LParameter * instr)979 void LCodeGen::DoParameter(LParameter* instr) {
980 // Nothing to do.
981 }
982
983
DoCallStub(LCallStub * instr)984 void LCodeGen::DoCallStub(LCallStub* instr) {
985 DCHECK(ToRegister(instr->context()).is(rsi));
986 DCHECK(ToRegister(instr->result()).is(rax));
987 switch (instr->hydrogen()->major_key()) {
988 case CodeStub::RegExpExec: {
989 RegExpExecStub stub(isolate());
990 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
991 break;
992 }
993 case CodeStub::SubString: {
994 SubStringStub stub(isolate());
995 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
996 break;
997 }
998 case CodeStub::StringCompare: {
999 StringCompareStub stub(isolate());
1000 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1001 break;
1002 }
1003 default:
1004 UNREACHABLE();
1005 }
1006 }
1007
1008
DoUnknownOSRValue(LUnknownOSRValue * instr)1009 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1010 GenerateOsrPrologue();
1011 }
1012
1013
DoModByPowerOf2I(LModByPowerOf2I * instr)1014 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1015 Register dividend = ToRegister(instr->dividend());
1016 int32_t divisor = instr->divisor();
1017 DCHECK(dividend.is(ToRegister(instr->result())));
1018
1019 // Theoretically, a variation of the branch-free code for integer division by
1020 // a power of 2 (calculating the remainder via an additional multiplication
1021 // (which gets simplified to an 'and') and subtraction) should be faster, and
1022 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1023 // indicate that positive dividends are heavily favored, so the branching
1024 // version performs better.
1025 HMod* hmod = instr->hydrogen();
1026 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1027 Label dividend_is_not_negative, done;
1028 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1029 __ testl(dividend, dividend);
1030 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1031 // Note that this is correct even for kMinInt operands.
1032 __ negl(dividend);
1033 __ andl(dividend, Immediate(mask));
1034 __ negl(dividend);
1035 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1036 DeoptimizeIf(zero, instr, "minus zero");
1037 }
1038 __ jmp(&done, Label::kNear);
1039 }
1040
1041 __ bind(÷nd_is_not_negative);
1042 __ andl(dividend, Immediate(mask));
1043 __ bind(&done);
1044 }
1045
1046
DoModByConstI(LModByConstI * instr)1047 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1048 Register dividend = ToRegister(instr->dividend());
1049 int32_t divisor = instr->divisor();
1050 DCHECK(ToRegister(instr->result()).is(rax));
1051
1052 if (divisor == 0) {
1053 DeoptimizeIf(no_condition, instr, "division by zero");
1054 return;
1055 }
1056
1057 __ TruncatingDiv(dividend, Abs(divisor));
1058 __ imull(rdx, rdx, Immediate(Abs(divisor)));
1059 __ movl(rax, dividend);
1060 __ subl(rax, rdx);
1061
1062 // Check for negative zero.
1063 HMod* hmod = instr->hydrogen();
1064 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1065 Label remainder_not_zero;
1066 __ j(not_zero, &remainder_not_zero, Label::kNear);
1067 __ cmpl(dividend, Immediate(0));
1068 DeoptimizeIf(less, instr, "minus zero");
1069 __ bind(&remainder_not_zero);
1070 }
1071 }
1072
1073
DoModI(LModI * instr)1074 void LCodeGen::DoModI(LModI* instr) {
1075 HMod* hmod = instr->hydrogen();
1076
1077 Register left_reg = ToRegister(instr->left());
1078 DCHECK(left_reg.is(rax));
1079 Register right_reg = ToRegister(instr->right());
1080 DCHECK(!right_reg.is(rax));
1081 DCHECK(!right_reg.is(rdx));
1082 Register result_reg = ToRegister(instr->result());
1083 DCHECK(result_reg.is(rdx));
1084
1085 Label done;
1086 // Check for x % 0, idiv would signal a divide error. We have to
1087 // deopt in this case because we can't return a NaN.
1088 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1089 __ testl(right_reg, right_reg);
1090 DeoptimizeIf(zero, instr, "division by zero");
1091 }
1092
1093 // Check for kMinInt % -1, idiv would signal a divide error. We
1094 // have to deopt if we care about -0, because we can't return that.
1095 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1096 Label no_overflow_possible;
1097 __ cmpl(left_reg, Immediate(kMinInt));
1098 __ j(not_zero, &no_overflow_possible, Label::kNear);
1099 __ cmpl(right_reg, Immediate(-1));
1100 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1101 DeoptimizeIf(equal, instr, "minus zero");
1102 } else {
1103 __ j(not_equal, &no_overflow_possible, Label::kNear);
1104 __ Set(result_reg, 0);
1105 __ jmp(&done, Label::kNear);
1106 }
1107 __ bind(&no_overflow_possible);
1108 }
1109
1110 // Sign extend dividend in eax into edx:eax, since we are using only the low
1111 // 32 bits of the values.
1112 __ cdq();
1113
1114 // If we care about -0, test if the dividend is <0 and the result is 0.
1115 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1116 Label positive_left;
1117 __ testl(left_reg, left_reg);
1118 __ j(not_sign, &positive_left, Label::kNear);
1119 __ idivl(right_reg);
1120 __ testl(result_reg, result_reg);
1121 DeoptimizeIf(zero, instr, "minus zero");
1122 __ jmp(&done, Label::kNear);
1123 __ bind(&positive_left);
1124 }
1125 __ idivl(right_reg);
1126 __ bind(&done);
1127 }
1128
1129
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)1130 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1131 Register dividend = ToRegister(instr->dividend());
1132 int32_t divisor = instr->divisor();
1133 DCHECK(dividend.is(ToRegister(instr->result())));
1134
1135 // If the divisor is positive, things are easy: There can be no deopts and we
1136 // can simply do an arithmetic right shift.
1137 if (divisor == 1) return;
1138 int32_t shift = WhichPowerOf2Abs(divisor);
1139 if (divisor > 1) {
1140 __ sarl(dividend, Immediate(shift));
1141 return;
1142 }
1143
1144 // If the divisor is negative, we have to negate and handle edge cases.
1145 __ negl(dividend);
1146 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1147 DeoptimizeIf(zero, instr, "minus zero");
1148 }
1149
1150 // Dividing by -1 is basically negation, unless we overflow.
1151 if (divisor == -1) {
1152 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1153 DeoptimizeIf(overflow, instr, "overflow");
1154 }
1155 return;
1156 }
1157
1158 // If the negation could not overflow, simply shifting is OK.
1159 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1160 __ sarl(dividend, Immediate(shift));
1161 return;
1162 }
1163
1164 Label not_kmin_int, done;
1165 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1166 __ movl(dividend, Immediate(kMinInt / divisor));
1167 __ jmp(&done, Label::kNear);
1168 __ bind(¬_kmin_int);
1169 __ sarl(dividend, Immediate(shift));
1170 __ bind(&done);
1171 }
1172
1173
DoFlooringDivByConstI(LFlooringDivByConstI * instr)1174 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1175 Register dividend = ToRegister(instr->dividend());
1176 int32_t divisor = instr->divisor();
1177 DCHECK(ToRegister(instr->result()).is(rdx));
1178
1179 if (divisor == 0) {
1180 DeoptimizeIf(no_condition, instr, "division by zero");
1181 return;
1182 }
1183
1184 // Check for (0 / -x) that will produce negative zero.
1185 HMathFloorOfDiv* hdiv = instr->hydrogen();
1186 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1187 __ testl(dividend, dividend);
1188 DeoptimizeIf(zero, instr, "minus zero");
1189 }
1190
1191 // Easy case: We need no dynamic check for the dividend and the flooring
1192 // division is the same as the truncating division.
1193 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1194 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1195 __ TruncatingDiv(dividend, Abs(divisor));
1196 if (divisor < 0) __ negl(rdx);
1197 return;
1198 }
1199
1200 // In the general case we may need to adjust before and after the truncating
1201 // division to get a flooring division.
1202 Register temp = ToRegister(instr->temp3());
1203 DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1204 Label needs_adjustment, done;
1205 __ cmpl(dividend, Immediate(0));
1206 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1207 __ TruncatingDiv(dividend, Abs(divisor));
1208 if (divisor < 0) __ negl(rdx);
1209 __ jmp(&done, Label::kNear);
1210 __ bind(&needs_adjustment);
1211 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1212 __ TruncatingDiv(temp, Abs(divisor));
1213 if (divisor < 0) __ negl(rdx);
1214 __ decl(rdx);
1215 __ bind(&done);
1216 }
1217
1218
1219 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)1220 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1221 HBinaryOperation* hdiv = instr->hydrogen();
1222 Register dividend = ToRegister(instr->dividend());
1223 Register divisor = ToRegister(instr->divisor());
1224 Register remainder = ToRegister(instr->temp());
1225 Register result = ToRegister(instr->result());
1226 DCHECK(dividend.is(rax));
1227 DCHECK(remainder.is(rdx));
1228 DCHECK(result.is(rax));
1229 DCHECK(!divisor.is(rax));
1230 DCHECK(!divisor.is(rdx));
1231
1232 // Check for x / 0.
1233 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1234 __ testl(divisor, divisor);
1235 DeoptimizeIf(zero, instr, "division by zero");
1236 }
1237
1238 // Check for (0 / -x) that will produce negative zero.
1239 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1240 Label dividend_not_zero;
1241 __ testl(dividend, dividend);
1242 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1243 __ testl(divisor, divisor);
1244 DeoptimizeIf(sign, instr, "minus zero");
1245 __ bind(÷nd_not_zero);
1246 }
1247
1248 // Check for (kMinInt / -1).
1249 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1250 Label dividend_not_min_int;
1251 __ cmpl(dividend, Immediate(kMinInt));
1252 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1253 __ cmpl(divisor, Immediate(-1));
1254 DeoptimizeIf(zero, instr, "overflow");
1255 __ bind(÷nd_not_min_int);
1256 }
1257
1258 // Sign extend to rdx (= remainder).
1259 __ cdq();
1260 __ idivl(divisor);
1261
1262 Label done;
1263 __ testl(remainder, remainder);
1264 __ j(zero, &done, Label::kNear);
1265 __ xorl(remainder, divisor);
1266 __ sarl(remainder, Immediate(31));
1267 __ addl(result, remainder);
1268 __ bind(&done);
1269 }
1270
1271
DoDivByPowerOf2I(LDivByPowerOf2I * instr)1272 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1273 Register dividend = ToRegister(instr->dividend());
1274 int32_t divisor = instr->divisor();
1275 Register result = ToRegister(instr->result());
1276 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1277 DCHECK(!result.is(dividend));
1278
1279 // Check for (0 / -x) that will produce negative zero.
1280 HDiv* hdiv = instr->hydrogen();
1281 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1282 __ testl(dividend, dividend);
1283 DeoptimizeIf(zero, instr, "minus zero");
1284 }
1285 // Check for (kMinInt / -1).
1286 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1287 __ cmpl(dividend, Immediate(kMinInt));
1288 DeoptimizeIf(zero, instr, "overflow");
1289 }
1290 // Deoptimize if remainder will not be 0.
1291 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1292 divisor != 1 && divisor != -1) {
1293 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1294 __ testl(dividend, Immediate(mask));
1295 DeoptimizeIf(not_zero, instr, "lost precision");
1296 }
1297 __ Move(result, dividend);
1298 int32_t shift = WhichPowerOf2Abs(divisor);
1299 if (shift > 0) {
1300 // The arithmetic shift is always OK, the 'if' is an optimization only.
1301 if (shift > 1) __ sarl(result, Immediate(31));
1302 __ shrl(result, Immediate(32 - shift));
1303 __ addl(result, dividend);
1304 __ sarl(result, Immediate(shift));
1305 }
1306 if (divisor < 0) __ negl(result);
1307 }
1308
1309
DoDivByConstI(LDivByConstI * instr)1310 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1311 Register dividend = ToRegister(instr->dividend());
1312 int32_t divisor = instr->divisor();
1313 DCHECK(ToRegister(instr->result()).is(rdx));
1314
1315 if (divisor == 0) {
1316 DeoptimizeIf(no_condition, instr, "division by zero");
1317 return;
1318 }
1319
1320 // Check for (0 / -x) that will produce negative zero.
1321 HDiv* hdiv = instr->hydrogen();
1322 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1323 __ testl(dividend, dividend);
1324 DeoptimizeIf(zero, instr, "minus zero");
1325 }
1326
1327 __ TruncatingDiv(dividend, Abs(divisor));
1328 if (divisor < 0) __ negl(rdx);
1329
1330 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1331 __ movl(rax, rdx);
1332 __ imull(rax, rax, Immediate(divisor));
1333 __ subl(rax, dividend);
1334 DeoptimizeIf(not_equal, instr, "lost precision");
1335 }
1336 }
1337
1338
1339 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)1340 void LCodeGen::DoDivI(LDivI* instr) {
1341 HBinaryOperation* hdiv = instr->hydrogen();
1342 Register dividend = ToRegister(instr->dividend());
1343 Register divisor = ToRegister(instr->divisor());
1344 Register remainder = ToRegister(instr->temp());
1345 DCHECK(dividend.is(rax));
1346 DCHECK(remainder.is(rdx));
1347 DCHECK(ToRegister(instr->result()).is(rax));
1348 DCHECK(!divisor.is(rax));
1349 DCHECK(!divisor.is(rdx));
1350
1351 // Check for x / 0.
1352 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1353 __ testl(divisor, divisor);
1354 DeoptimizeIf(zero, instr, "division by zero");
1355 }
1356
1357 // Check for (0 / -x) that will produce negative zero.
1358 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1359 Label dividend_not_zero;
1360 __ testl(dividend, dividend);
1361 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1362 __ testl(divisor, divisor);
1363 DeoptimizeIf(sign, instr, "minus zero");
1364 __ bind(÷nd_not_zero);
1365 }
1366
1367 // Check for (kMinInt / -1).
1368 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1369 Label dividend_not_min_int;
1370 __ cmpl(dividend, Immediate(kMinInt));
1371 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1372 __ cmpl(divisor, Immediate(-1));
1373 DeoptimizeIf(zero, instr, "overflow");
1374 __ bind(÷nd_not_min_int);
1375 }
1376
1377 // Sign extend to rdx (= remainder).
1378 __ cdq();
1379 __ idivl(divisor);
1380
1381 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1382 // Deoptimize if remainder is not 0.
1383 __ testl(remainder, remainder);
1384 DeoptimizeIf(not_zero, instr, "lost precision");
1385 }
1386 }
1387
1388
DoMulI(LMulI * instr)1389 void LCodeGen::DoMulI(LMulI* instr) {
1390 Register left = ToRegister(instr->left());
1391 LOperand* right = instr->right();
1392
1393 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1394 if (instr->hydrogen_value()->representation().IsSmi()) {
1395 __ movp(kScratchRegister, left);
1396 } else {
1397 __ movl(kScratchRegister, left);
1398 }
1399 }
1400
1401 bool can_overflow =
1402 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1403 if (right->IsConstantOperand()) {
1404 int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1405 if (right_value == -1) {
1406 __ negl(left);
1407 } else if (right_value == 0) {
1408 __ xorl(left, left);
1409 } else if (right_value == 2) {
1410 __ addl(left, left);
1411 } else if (!can_overflow) {
1412 // If the multiplication is known to not overflow, we
1413 // can use operations that don't set the overflow flag
1414 // correctly.
1415 switch (right_value) {
1416 case 1:
1417 // Do nothing.
1418 break;
1419 case 3:
1420 __ leal(left, Operand(left, left, times_2, 0));
1421 break;
1422 case 4:
1423 __ shll(left, Immediate(2));
1424 break;
1425 case 5:
1426 __ leal(left, Operand(left, left, times_4, 0));
1427 break;
1428 case 8:
1429 __ shll(left, Immediate(3));
1430 break;
1431 case 9:
1432 __ leal(left, Operand(left, left, times_8, 0));
1433 break;
1434 case 16:
1435 __ shll(left, Immediate(4));
1436 break;
1437 default:
1438 __ imull(left, left, Immediate(right_value));
1439 break;
1440 }
1441 } else {
1442 __ imull(left, left, Immediate(right_value));
1443 }
1444 } else if (right->IsStackSlot()) {
1445 if (instr->hydrogen_value()->representation().IsSmi()) {
1446 __ SmiToInteger64(left, left);
1447 __ imulp(left, ToOperand(right));
1448 } else {
1449 __ imull(left, ToOperand(right));
1450 }
1451 } else {
1452 if (instr->hydrogen_value()->representation().IsSmi()) {
1453 __ SmiToInteger64(left, left);
1454 __ imulp(left, ToRegister(right));
1455 } else {
1456 __ imull(left, ToRegister(right));
1457 }
1458 }
1459
1460 if (can_overflow) {
1461 DeoptimizeIf(overflow, instr, "overflow");
1462 }
1463
1464 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1465 // Bail out if the result is supposed to be negative zero.
1466 Label done;
1467 if (instr->hydrogen_value()->representation().IsSmi()) {
1468 __ testp(left, left);
1469 } else {
1470 __ testl(left, left);
1471 }
1472 __ j(not_zero, &done, Label::kNear);
1473 if (right->IsConstantOperand()) {
1474 // Constant can't be represented as 32-bit Smi due to immediate size
1475 // limit.
1476 DCHECK(SmiValuesAre32Bits()
1477 ? !instr->hydrogen_value()->representation().IsSmi()
1478 : SmiValuesAre31Bits());
1479 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1480 DeoptimizeIf(no_condition, instr, "minus zero");
1481 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1482 __ cmpl(kScratchRegister, Immediate(0));
1483 DeoptimizeIf(less, instr, "minus zero");
1484 }
1485 } else if (right->IsStackSlot()) {
1486 if (instr->hydrogen_value()->representation().IsSmi()) {
1487 __ orp(kScratchRegister, ToOperand(right));
1488 } else {
1489 __ orl(kScratchRegister, ToOperand(right));
1490 }
1491 DeoptimizeIf(sign, instr, "minus zero");
1492 } else {
1493 // Test the non-zero operand for negative sign.
1494 if (instr->hydrogen_value()->representation().IsSmi()) {
1495 __ orp(kScratchRegister, ToRegister(right));
1496 } else {
1497 __ orl(kScratchRegister, ToRegister(right));
1498 }
1499 DeoptimizeIf(sign, instr, "minus zero");
1500 }
1501 __ bind(&done);
1502 }
1503 }
1504
1505
DoBitI(LBitI * instr)1506 void LCodeGen::DoBitI(LBitI* instr) {
1507 LOperand* left = instr->left();
1508 LOperand* right = instr->right();
1509 DCHECK(left->Equals(instr->result()));
1510 DCHECK(left->IsRegister());
1511
1512 if (right->IsConstantOperand()) {
1513 int32_t right_operand =
1514 ToRepresentation(LConstantOperand::cast(right),
1515 instr->hydrogen()->right()->representation());
1516 switch (instr->op()) {
1517 case Token::BIT_AND:
1518 __ andl(ToRegister(left), Immediate(right_operand));
1519 break;
1520 case Token::BIT_OR:
1521 __ orl(ToRegister(left), Immediate(right_operand));
1522 break;
1523 case Token::BIT_XOR:
1524 if (right_operand == int32_t(~0)) {
1525 __ notl(ToRegister(left));
1526 } else {
1527 __ xorl(ToRegister(left), Immediate(right_operand));
1528 }
1529 break;
1530 default:
1531 UNREACHABLE();
1532 break;
1533 }
1534 } else if (right->IsStackSlot()) {
1535 switch (instr->op()) {
1536 case Token::BIT_AND:
1537 if (instr->IsInteger32()) {
1538 __ andl(ToRegister(left), ToOperand(right));
1539 } else {
1540 __ andp(ToRegister(left), ToOperand(right));
1541 }
1542 break;
1543 case Token::BIT_OR:
1544 if (instr->IsInteger32()) {
1545 __ orl(ToRegister(left), ToOperand(right));
1546 } else {
1547 __ orp(ToRegister(left), ToOperand(right));
1548 }
1549 break;
1550 case Token::BIT_XOR:
1551 if (instr->IsInteger32()) {
1552 __ xorl(ToRegister(left), ToOperand(right));
1553 } else {
1554 __ xorp(ToRegister(left), ToOperand(right));
1555 }
1556 break;
1557 default:
1558 UNREACHABLE();
1559 break;
1560 }
1561 } else {
1562 DCHECK(right->IsRegister());
1563 switch (instr->op()) {
1564 case Token::BIT_AND:
1565 if (instr->IsInteger32()) {
1566 __ andl(ToRegister(left), ToRegister(right));
1567 } else {
1568 __ andp(ToRegister(left), ToRegister(right));
1569 }
1570 break;
1571 case Token::BIT_OR:
1572 if (instr->IsInteger32()) {
1573 __ orl(ToRegister(left), ToRegister(right));
1574 } else {
1575 __ orp(ToRegister(left), ToRegister(right));
1576 }
1577 break;
1578 case Token::BIT_XOR:
1579 if (instr->IsInteger32()) {
1580 __ xorl(ToRegister(left), ToRegister(right));
1581 } else {
1582 __ xorp(ToRegister(left), ToRegister(right));
1583 }
1584 break;
1585 default:
1586 UNREACHABLE();
1587 break;
1588 }
1589 }
1590 }
1591
1592
DoShiftI(LShiftI * instr)1593 void LCodeGen::DoShiftI(LShiftI* instr) {
1594 LOperand* left = instr->left();
1595 LOperand* right = instr->right();
1596 DCHECK(left->Equals(instr->result()));
1597 DCHECK(left->IsRegister());
1598 if (right->IsRegister()) {
1599 DCHECK(ToRegister(right).is(rcx));
1600
1601 switch (instr->op()) {
1602 case Token::ROR:
1603 __ rorl_cl(ToRegister(left));
1604 break;
1605 case Token::SAR:
1606 __ sarl_cl(ToRegister(left));
1607 break;
1608 case Token::SHR:
1609 __ shrl_cl(ToRegister(left));
1610 if (instr->can_deopt()) {
1611 __ testl(ToRegister(left), ToRegister(left));
1612 DeoptimizeIf(negative, instr, "negative value");
1613 }
1614 break;
1615 case Token::SHL:
1616 __ shll_cl(ToRegister(left));
1617 break;
1618 default:
1619 UNREACHABLE();
1620 break;
1621 }
1622 } else {
1623 int32_t value = ToInteger32(LConstantOperand::cast(right));
1624 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1625 switch (instr->op()) {
1626 case Token::ROR:
1627 if (shift_count != 0) {
1628 __ rorl(ToRegister(left), Immediate(shift_count));
1629 }
1630 break;
1631 case Token::SAR:
1632 if (shift_count != 0) {
1633 __ sarl(ToRegister(left), Immediate(shift_count));
1634 }
1635 break;
1636 case Token::SHR:
1637 if (shift_count != 0) {
1638 __ shrl(ToRegister(left), Immediate(shift_count));
1639 } else if (instr->can_deopt()) {
1640 __ testl(ToRegister(left), ToRegister(left));
1641 DeoptimizeIf(negative, instr, "negative value");
1642 }
1643 break;
1644 case Token::SHL:
1645 if (shift_count != 0) {
1646 if (instr->hydrogen_value()->representation().IsSmi()) {
1647 if (SmiValuesAre32Bits()) {
1648 __ shlp(ToRegister(left), Immediate(shift_count));
1649 } else {
1650 DCHECK(SmiValuesAre31Bits());
1651 if (instr->can_deopt()) {
1652 if (shift_count != 1) {
1653 __ shll(ToRegister(left), Immediate(shift_count - 1));
1654 }
1655 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
1656 DeoptimizeIf(overflow, instr, "overflow");
1657 } else {
1658 __ shll(ToRegister(left), Immediate(shift_count));
1659 }
1660 }
1661 } else {
1662 __ shll(ToRegister(left), Immediate(shift_count));
1663 }
1664 }
1665 break;
1666 default:
1667 UNREACHABLE();
1668 break;
1669 }
1670 }
1671 }
1672
1673
DoSubI(LSubI * instr)1674 void LCodeGen::DoSubI(LSubI* instr) {
1675 LOperand* left = instr->left();
1676 LOperand* right = instr->right();
1677 DCHECK(left->Equals(instr->result()));
1678
1679 if (right->IsConstantOperand()) {
1680 int32_t right_operand =
1681 ToRepresentation(LConstantOperand::cast(right),
1682 instr->hydrogen()->right()->representation());
1683 __ subl(ToRegister(left), Immediate(right_operand));
1684 } else if (right->IsRegister()) {
1685 if (instr->hydrogen_value()->representation().IsSmi()) {
1686 __ subp(ToRegister(left), ToRegister(right));
1687 } else {
1688 __ subl(ToRegister(left), ToRegister(right));
1689 }
1690 } else {
1691 if (instr->hydrogen_value()->representation().IsSmi()) {
1692 __ subp(ToRegister(left), ToOperand(right));
1693 } else {
1694 __ subl(ToRegister(left), ToOperand(right));
1695 }
1696 }
1697
1698 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1699 DeoptimizeIf(overflow, instr, "overflow");
1700 }
1701 }
1702
1703
DoConstantI(LConstantI * instr)1704 void LCodeGen::DoConstantI(LConstantI* instr) {
1705 Register dst = ToRegister(instr->result());
1706 if (instr->value() == 0) {
1707 __ xorl(dst, dst);
1708 } else {
1709 __ movl(dst, Immediate(instr->value()));
1710 }
1711 }
1712
1713
DoConstantS(LConstantS * instr)1714 void LCodeGen::DoConstantS(LConstantS* instr) {
1715 __ Move(ToRegister(instr->result()), instr->value());
1716 }
1717
1718
DoConstantD(LConstantD * instr)1719 void LCodeGen::DoConstantD(LConstantD* instr) {
1720 DCHECK(instr->result()->IsDoubleRegister());
1721 XMMRegister res = ToDoubleRegister(instr->result());
1722 double v = instr->value();
1723 uint64_t int_val = bit_cast<uint64_t, double>(v);
1724 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1725 // do so if the constant is -0.0.
1726 if (int_val == 0) {
1727 __ xorps(res, res);
1728 } else {
1729 Register tmp = ToRegister(instr->temp());
1730 __ Set(tmp, int_val);
1731 __ movq(res, tmp);
1732 }
1733 }
1734
1735
DoConstantE(LConstantE * instr)1736 void LCodeGen::DoConstantE(LConstantE* instr) {
1737 __ LoadAddress(ToRegister(instr->result()), instr->value());
1738 }
1739
1740
DoConstantT(LConstantT * instr)1741 void LCodeGen::DoConstantT(LConstantT* instr) {
1742 Handle<Object> object = instr->value(isolate());
1743 AllowDeferredHandleDereference smi_check;
1744 __ Move(ToRegister(instr->result()), object);
1745 }
1746
1747
DoMapEnumLength(LMapEnumLength * instr)1748 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1749 Register result = ToRegister(instr->result());
1750 Register map = ToRegister(instr->value());
1751 __ EnumLength(result, map);
1752 }
1753
1754
DoDateField(LDateField * instr)1755 void LCodeGen::DoDateField(LDateField* instr) {
1756 Register object = ToRegister(instr->date());
1757 Register result = ToRegister(instr->result());
1758 Smi* index = instr->index();
1759 Label runtime, done, not_date_object;
1760 DCHECK(object.is(result));
1761 DCHECK(object.is(rax));
1762
1763 Condition cc = masm()->CheckSmi(object);
1764 DeoptimizeIf(cc, instr, "Smi");
1765 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1766 DeoptimizeIf(not_equal, instr, "not a date object");
1767
1768 if (index->value() == 0) {
1769 __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1770 } else {
1771 if (index->value() < JSDate::kFirstUncachedField) {
1772 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1773 Operand stamp_operand = __ ExternalOperand(stamp);
1774 __ movp(kScratchRegister, stamp_operand);
1775 __ cmpp(kScratchRegister, FieldOperand(object,
1776 JSDate::kCacheStampOffset));
1777 __ j(not_equal, &runtime, Label::kNear);
1778 __ movp(result, FieldOperand(object, JSDate::kValueOffset +
1779 kPointerSize * index->value()));
1780 __ jmp(&done, Label::kNear);
1781 }
1782 __ bind(&runtime);
1783 __ PrepareCallCFunction(2);
1784 __ movp(arg_reg_1, object);
1785 __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
1786 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1787 __ bind(&done);
1788 }
1789 }
1790
1791
BuildSeqStringOperand(Register string,LOperand * index,String::Encoding encoding)1792 Operand LCodeGen::BuildSeqStringOperand(Register string,
1793 LOperand* index,
1794 String::Encoding encoding) {
1795 if (index->IsConstantOperand()) {
1796 int offset = ToInteger32(LConstantOperand::cast(index));
1797 if (encoding == String::TWO_BYTE_ENCODING) {
1798 offset *= kUC16Size;
1799 }
1800 STATIC_ASSERT(kCharSize == 1);
1801 return FieldOperand(string, SeqString::kHeaderSize + offset);
1802 }
1803 return FieldOperand(
1804 string, ToRegister(index),
1805 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1806 SeqString::kHeaderSize);
1807 }
1808
1809
DoSeqStringGetChar(LSeqStringGetChar * instr)1810 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1811 String::Encoding encoding = instr->hydrogen()->encoding();
1812 Register result = ToRegister(instr->result());
1813 Register string = ToRegister(instr->string());
1814
1815 if (FLAG_debug_code) {
1816 __ Push(string);
1817 __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1818 __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1819
1820 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1821 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1822 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1823 __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1824 ? one_byte_seq_type : two_byte_seq_type));
1825 __ Check(equal, kUnexpectedStringType);
1826 __ Pop(string);
1827 }
1828
1829 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1830 if (encoding == String::ONE_BYTE_ENCODING) {
1831 __ movzxbl(result, operand);
1832 } else {
1833 __ movzxwl(result, operand);
1834 }
1835 }
1836
1837
DoSeqStringSetChar(LSeqStringSetChar * instr)1838 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1839 String::Encoding encoding = instr->hydrogen()->encoding();
1840 Register string = ToRegister(instr->string());
1841
1842 if (FLAG_debug_code) {
1843 Register value = ToRegister(instr->value());
1844 Register index = ToRegister(instr->index());
1845 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1846 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1847 int encoding_mask =
1848 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1849 ? one_byte_seq_type : two_byte_seq_type;
1850 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1851 }
1852
1853 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1854 if (instr->value()->IsConstantOperand()) {
1855 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1856 DCHECK_LE(0, value);
1857 if (encoding == String::ONE_BYTE_ENCODING) {
1858 DCHECK_LE(value, String::kMaxOneByteCharCode);
1859 __ movb(operand, Immediate(value));
1860 } else {
1861 DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1862 __ movw(operand, Immediate(value));
1863 }
1864 } else {
1865 Register value = ToRegister(instr->value());
1866 if (encoding == String::ONE_BYTE_ENCODING) {
1867 __ movb(operand, value);
1868 } else {
1869 __ movw(operand, value);
1870 }
1871 }
1872 }
1873
1874
DoAddI(LAddI * instr)1875 void LCodeGen::DoAddI(LAddI* instr) {
1876 LOperand* left = instr->left();
1877 LOperand* right = instr->right();
1878
1879 Representation target_rep = instr->hydrogen()->representation();
1880 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1881
1882 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1883 if (right->IsConstantOperand()) {
1884 // No support for smi-immediates for 32-bit SMI.
1885 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1886 int32_t offset =
1887 ToRepresentation(LConstantOperand::cast(right),
1888 instr->hydrogen()->right()->representation());
1889 if (is_p) {
1890 __ leap(ToRegister(instr->result()),
1891 MemOperand(ToRegister(left), offset));
1892 } else {
1893 __ leal(ToRegister(instr->result()),
1894 MemOperand(ToRegister(left), offset));
1895 }
1896 } else {
1897 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1898 if (is_p) {
1899 __ leap(ToRegister(instr->result()), address);
1900 } else {
1901 __ leal(ToRegister(instr->result()), address);
1902 }
1903 }
1904 } else {
1905 if (right->IsConstantOperand()) {
1906 // No support for smi-immediates for 32-bit SMI.
1907 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1908 int32_t right_operand =
1909 ToRepresentation(LConstantOperand::cast(right),
1910 instr->hydrogen()->right()->representation());
1911 if (is_p) {
1912 __ addp(ToRegister(left), Immediate(right_operand));
1913 } else {
1914 __ addl(ToRegister(left), Immediate(right_operand));
1915 }
1916 } else if (right->IsRegister()) {
1917 if (is_p) {
1918 __ addp(ToRegister(left), ToRegister(right));
1919 } else {
1920 __ addl(ToRegister(left), ToRegister(right));
1921 }
1922 } else {
1923 if (is_p) {
1924 __ addp(ToRegister(left), ToOperand(right));
1925 } else {
1926 __ addl(ToRegister(left), ToOperand(right));
1927 }
1928 }
1929 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1930 DeoptimizeIf(overflow, instr, "overflow");
1931 }
1932 }
1933 }
1934
1935
DoMathMinMax(LMathMinMax * instr)1936 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1937 LOperand* left = instr->left();
1938 LOperand* right = instr->right();
1939 DCHECK(left->Equals(instr->result()));
1940 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1941 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1942 Label return_left;
1943 Condition condition = (operation == HMathMinMax::kMathMin)
1944 ? less_equal
1945 : greater_equal;
1946 Register left_reg = ToRegister(left);
1947 if (right->IsConstantOperand()) {
1948 Immediate right_imm = Immediate(
1949 ToRepresentation(LConstantOperand::cast(right),
1950 instr->hydrogen()->right()->representation()));
1951 DCHECK(SmiValuesAre32Bits()
1952 ? !instr->hydrogen()->representation().IsSmi()
1953 : SmiValuesAre31Bits());
1954 __ cmpl(left_reg, right_imm);
1955 __ j(condition, &return_left, Label::kNear);
1956 __ movp(left_reg, right_imm);
1957 } else if (right->IsRegister()) {
1958 Register right_reg = ToRegister(right);
1959 if (instr->hydrogen_value()->representation().IsSmi()) {
1960 __ cmpp(left_reg, right_reg);
1961 } else {
1962 __ cmpl(left_reg, right_reg);
1963 }
1964 __ j(condition, &return_left, Label::kNear);
1965 __ movp(left_reg, right_reg);
1966 } else {
1967 Operand right_op = ToOperand(right);
1968 if (instr->hydrogen_value()->representation().IsSmi()) {
1969 __ cmpp(left_reg, right_op);
1970 } else {
1971 __ cmpl(left_reg, right_op);
1972 }
1973 __ j(condition, &return_left, Label::kNear);
1974 __ movp(left_reg, right_op);
1975 }
1976 __ bind(&return_left);
1977 } else {
1978 DCHECK(instr->hydrogen()->representation().IsDouble());
1979 Label check_nan_left, check_zero, return_left, return_right;
1980 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1981 XMMRegister left_reg = ToDoubleRegister(left);
1982 XMMRegister right_reg = ToDoubleRegister(right);
1983 __ ucomisd(left_reg, right_reg);
1984 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1985 __ j(equal, &check_zero, Label::kNear); // left == right.
1986 __ j(condition, &return_left, Label::kNear);
1987 __ jmp(&return_right, Label::kNear);
1988
1989 __ bind(&check_zero);
1990 XMMRegister xmm_scratch = double_scratch0();
1991 __ xorps(xmm_scratch, xmm_scratch);
1992 __ ucomisd(left_reg, xmm_scratch);
1993 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1994 // At this point, both left and right are either 0 or -0.
1995 if (operation == HMathMinMax::kMathMin) {
1996 __ orps(left_reg, right_reg);
1997 } else {
1998 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1999 __ addsd(left_reg, right_reg);
2000 }
2001 __ jmp(&return_left, Label::kNear);
2002
2003 __ bind(&check_nan_left);
2004 __ ucomisd(left_reg, left_reg); // NaN check.
2005 __ j(parity_even, &return_left, Label::kNear);
2006 __ bind(&return_right);
2007 __ movaps(left_reg, right_reg);
2008
2009 __ bind(&return_left);
2010 }
2011 }
2012
2013
DoArithmeticD(LArithmeticD * instr)2014 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2015 XMMRegister left = ToDoubleRegister(instr->left());
2016 XMMRegister right = ToDoubleRegister(instr->right());
2017 XMMRegister result = ToDoubleRegister(instr->result());
2018 // All operations except MOD are computed in-place.
2019 DCHECK(instr->op() == Token::MOD || left.is(result));
2020 switch (instr->op()) {
2021 case Token::ADD:
2022 __ addsd(left, right);
2023 break;
2024 case Token::SUB:
2025 __ subsd(left, right);
2026 break;
2027 case Token::MUL:
2028 __ mulsd(left, right);
2029 break;
2030 case Token::DIV:
2031 __ divsd(left, right);
2032 // Don't delete this mov. It may improve performance on some CPUs,
2033 // when there is a mulsd depending on the result
2034 __ movaps(left, left);
2035 break;
2036 case Token::MOD: {
2037 XMMRegister xmm_scratch = double_scratch0();
2038 __ PrepareCallCFunction(2);
2039 __ movaps(xmm_scratch, left);
2040 DCHECK(right.is(xmm1));
2041 __ CallCFunction(
2042 ExternalReference::mod_two_doubles_operation(isolate()), 2);
2043 __ movaps(result, xmm_scratch);
2044 break;
2045 }
2046 default:
2047 UNREACHABLE();
2048 break;
2049 }
2050 }
2051
2052
DoArithmeticT(LArithmeticT * instr)2053 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2054 DCHECK(ToRegister(instr->context()).is(rsi));
2055 DCHECK(ToRegister(instr->left()).is(rdx));
2056 DCHECK(ToRegister(instr->right()).is(rax));
2057 DCHECK(ToRegister(instr->result()).is(rax));
2058
2059 Handle<Code> code =
2060 CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2061 CallCode(code, RelocInfo::CODE_TARGET, instr);
2062 }
2063
2064
2065 template<class InstrType>
EmitBranch(InstrType instr,Condition cc)2066 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2067 int left_block = instr->TrueDestination(chunk_);
2068 int right_block = instr->FalseDestination(chunk_);
2069
2070 int next_block = GetNextEmittedBlock();
2071
2072 if (right_block == left_block || cc == no_condition) {
2073 EmitGoto(left_block);
2074 } else if (left_block == next_block) {
2075 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2076 } else if (right_block == next_block) {
2077 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2078 } else {
2079 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2080 if (cc != always) {
2081 __ jmp(chunk_->GetAssemblyLabel(right_block));
2082 }
2083 }
2084 }
2085
2086
2087 template<class InstrType>
EmitFalseBranch(InstrType instr,Condition cc)2088 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2089 int false_block = instr->FalseDestination(chunk_);
2090 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2091 }
2092
2093
DoDebugBreak(LDebugBreak * instr)2094 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2095 __ int3();
2096 }
2097
2098
DoBranch(LBranch * instr)2099 void LCodeGen::DoBranch(LBranch* instr) {
2100 Representation r = instr->hydrogen()->value()->representation();
2101 if (r.IsInteger32()) {
2102 DCHECK(!info()->IsStub());
2103 Register reg = ToRegister(instr->value());
2104 __ testl(reg, reg);
2105 EmitBranch(instr, not_zero);
2106 } else if (r.IsSmi()) {
2107 DCHECK(!info()->IsStub());
2108 Register reg = ToRegister(instr->value());
2109 __ testp(reg, reg);
2110 EmitBranch(instr, not_zero);
2111 } else if (r.IsDouble()) {
2112 DCHECK(!info()->IsStub());
2113 XMMRegister reg = ToDoubleRegister(instr->value());
2114 XMMRegister xmm_scratch = double_scratch0();
2115 __ xorps(xmm_scratch, xmm_scratch);
2116 __ ucomisd(reg, xmm_scratch);
2117 EmitBranch(instr, not_equal);
2118 } else {
2119 DCHECK(r.IsTagged());
2120 Register reg = ToRegister(instr->value());
2121 HType type = instr->hydrogen()->value()->type();
2122 if (type.IsBoolean()) {
2123 DCHECK(!info()->IsStub());
2124 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2125 EmitBranch(instr, equal);
2126 } else if (type.IsSmi()) {
2127 DCHECK(!info()->IsStub());
2128 __ SmiCompare(reg, Smi::FromInt(0));
2129 EmitBranch(instr, not_equal);
2130 } else if (type.IsJSArray()) {
2131 DCHECK(!info()->IsStub());
2132 EmitBranch(instr, no_condition);
2133 } else if (type.IsHeapNumber()) {
2134 DCHECK(!info()->IsStub());
2135 XMMRegister xmm_scratch = double_scratch0();
2136 __ xorps(xmm_scratch, xmm_scratch);
2137 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2138 EmitBranch(instr, not_equal);
2139 } else if (type.IsString()) {
2140 DCHECK(!info()->IsStub());
2141 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2142 EmitBranch(instr, not_equal);
2143 } else {
2144 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2145 // Avoid deopts in the case where we've never executed this path before.
2146 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2147
2148 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2149 // undefined -> false.
2150 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2151 __ j(equal, instr->FalseLabel(chunk_));
2152 }
2153 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2154 // true -> true.
2155 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2156 __ j(equal, instr->TrueLabel(chunk_));
2157 // false -> false.
2158 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2159 __ j(equal, instr->FalseLabel(chunk_));
2160 }
2161 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2162 // 'null' -> false.
2163 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2164 __ j(equal, instr->FalseLabel(chunk_));
2165 }
2166
2167 if (expected.Contains(ToBooleanStub::SMI)) {
2168 // Smis: 0 -> false, all other -> true.
2169 __ Cmp(reg, Smi::FromInt(0));
2170 __ j(equal, instr->FalseLabel(chunk_));
2171 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2172 } else if (expected.NeedsMap()) {
2173 // If we need a map later and have a Smi -> deopt.
2174 __ testb(reg, Immediate(kSmiTagMask));
2175 DeoptimizeIf(zero, instr, "Smi");
2176 }
2177
2178 const Register map = kScratchRegister;
2179 if (expected.NeedsMap()) {
2180 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2181
2182 if (expected.CanBeUndetectable()) {
2183 // Undetectable -> false.
2184 __ testb(FieldOperand(map, Map::kBitFieldOffset),
2185 Immediate(1 << Map::kIsUndetectable));
2186 __ j(not_zero, instr->FalseLabel(chunk_));
2187 }
2188 }
2189
2190 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2191 // spec object -> true.
2192 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2193 __ j(above_equal, instr->TrueLabel(chunk_));
2194 }
2195
2196 if (expected.Contains(ToBooleanStub::STRING)) {
2197 // String value -> false iff empty.
2198 Label not_string;
2199 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2200 __ j(above_equal, ¬_string, Label::kNear);
2201 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2202 __ j(not_zero, instr->TrueLabel(chunk_));
2203 __ jmp(instr->FalseLabel(chunk_));
2204 __ bind(¬_string);
2205 }
2206
2207 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2208 // Symbol value -> true.
2209 __ CmpInstanceType(map, SYMBOL_TYPE);
2210 __ j(equal, instr->TrueLabel(chunk_));
2211 }
2212
2213 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2214 // heap number -> false iff +0, -0, or NaN.
2215 Label not_heap_number;
2216 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2217 __ j(not_equal, ¬_heap_number, Label::kNear);
2218 XMMRegister xmm_scratch = double_scratch0();
2219 __ xorps(xmm_scratch, xmm_scratch);
2220 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2221 __ j(zero, instr->FalseLabel(chunk_));
2222 __ jmp(instr->TrueLabel(chunk_));
2223 __ bind(¬_heap_number);
2224 }
2225
2226 if (!expected.IsGeneric()) {
2227 // We've seen something for the first time -> deopt.
2228 // This can only happen if we are not generic already.
2229 DeoptimizeIf(no_condition, instr, "unexpected object");
2230 }
2231 }
2232 }
2233 }
2234
2235
EmitGoto(int block)2236 void LCodeGen::EmitGoto(int block) {
2237 if (!IsNextEmittedBlock(block)) {
2238 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2239 }
2240 }
2241
2242
DoGoto(LGoto * instr)2243 void LCodeGen::DoGoto(LGoto* instr) {
2244 EmitGoto(instr->block_id());
2245 }
2246
2247
TokenToCondition(Token::Value op,bool is_unsigned)2248 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2249 Condition cond = no_condition;
2250 switch (op) {
2251 case Token::EQ:
2252 case Token::EQ_STRICT:
2253 cond = equal;
2254 break;
2255 case Token::NE:
2256 case Token::NE_STRICT:
2257 cond = not_equal;
2258 break;
2259 case Token::LT:
2260 cond = is_unsigned ? below : less;
2261 break;
2262 case Token::GT:
2263 cond = is_unsigned ? above : greater;
2264 break;
2265 case Token::LTE:
2266 cond = is_unsigned ? below_equal : less_equal;
2267 break;
2268 case Token::GTE:
2269 cond = is_unsigned ? above_equal : greater_equal;
2270 break;
2271 case Token::IN:
2272 case Token::INSTANCEOF:
2273 default:
2274 UNREACHABLE();
2275 }
2276 return cond;
2277 }
2278
2279
DoCompareNumericAndBranch(LCompareNumericAndBranch * instr)2280 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2281 LOperand* left = instr->left();
2282 LOperand* right = instr->right();
2283 bool is_unsigned =
2284 instr->is_double() ||
2285 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2286 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2287 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2288
2289 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2290 // We can statically evaluate the comparison.
2291 double left_val = ToDouble(LConstantOperand::cast(left));
2292 double right_val = ToDouble(LConstantOperand::cast(right));
2293 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2294 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2295 EmitGoto(next_block);
2296 } else {
2297 if (instr->is_double()) {
2298 // Don't base result on EFLAGS when a NaN is involved. Instead
2299 // jump to the false block.
2300 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2301 __ j(parity_even, instr->FalseLabel(chunk_));
2302 } else {
2303 int32_t value;
2304 if (right->IsConstantOperand()) {
2305 value = ToInteger32(LConstantOperand::cast(right));
2306 if (instr->hydrogen_value()->representation().IsSmi()) {
2307 __ Cmp(ToRegister(left), Smi::FromInt(value));
2308 } else {
2309 __ cmpl(ToRegister(left), Immediate(value));
2310 }
2311 } else if (left->IsConstantOperand()) {
2312 value = ToInteger32(LConstantOperand::cast(left));
2313 if (instr->hydrogen_value()->representation().IsSmi()) {
2314 if (right->IsRegister()) {
2315 __ Cmp(ToRegister(right), Smi::FromInt(value));
2316 } else {
2317 __ Cmp(ToOperand(right), Smi::FromInt(value));
2318 }
2319 } else if (right->IsRegister()) {
2320 __ cmpl(ToRegister(right), Immediate(value));
2321 } else {
2322 __ cmpl(ToOperand(right), Immediate(value));
2323 }
2324 // We commuted the operands, so commute the condition.
2325 cc = CommuteCondition(cc);
2326 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2327 if (right->IsRegister()) {
2328 __ cmpp(ToRegister(left), ToRegister(right));
2329 } else {
2330 __ cmpp(ToRegister(left), ToOperand(right));
2331 }
2332 } else {
2333 if (right->IsRegister()) {
2334 __ cmpl(ToRegister(left), ToRegister(right));
2335 } else {
2336 __ cmpl(ToRegister(left), ToOperand(right));
2337 }
2338 }
2339 }
2340 EmitBranch(instr, cc);
2341 }
2342 }
2343
2344
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)2345 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2346 Register left = ToRegister(instr->left());
2347
2348 if (instr->right()->IsConstantOperand()) {
2349 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2350 __ Cmp(left, right);
2351 } else {
2352 Register right = ToRegister(instr->right());
2353 __ cmpp(left, right);
2354 }
2355 EmitBranch(instr, equal);
2356 }
2357
2358
DoCmpHoleAndBranch(LCmpHoleAndBranch * instr)2359 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2360 if (instr->hydrogen()->representation().IsTagged()) {
2361 Register input_reg = ToRegister(instr->object());
2362 __ Cmp(input_reg, factory()->the_hole_value());
2363 EmitBranch(instr, equal);
2364 return;
2365 }
2366
2367 XMMRegister input_reg = ToDoubleRegister(instr->object());
2368 __ ucomisd(input_reg, input_reg);
2369 EmitFalseBranch(instr, parity_odd);
2370
2371 __ subp(rsp, Immediate(kDoubleSize));
2372 __ movsd(MemOperand(rsp, 0), input_reg);
2373 __ addp(rsp, Immediate(kDoubleSize));
2374
2375 int offset = sizeof(kHoleNanUpper32);
2376 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2377 EmitBranch(instr, equal);
2378 }
2379
2380
DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch * instr)2381 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2382 Representation rep = instr->hydrogen()->value()->representation();
2383 DCHECK(!rep.IsInteger32());
2384
2385 if (rep.IsDouble()) {
2386 XMMRegister value = ToDoubleRegister(instr->value());
2387 XMMRegister xmm_scratch = double_scratch0();
2388 __ xorps(xmm_scratch, xmm_scratch);
2389 __ ucomisd(xmm_scratch, value);
2390 EmitFalseBranch(instr, not_equal);
2391 __ movmskpd(kScratchRegister, value);
2392 __ testl(kScratchRegister, Immediate(1));
2393 EmitBranch(instr, not_zero);
2394 } else {
2395 Register value = ToRegister(instr->value());
2396 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2397 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2398 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2399 Immediate(0x1));
2400 EmitFalseBranch(instr, no_overflow);
2401 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2402 Immediate(0x00000000));
2403 EmitBranch(instr, equal);
2404 }
2405 }
2406
2407
EmitIsObject(Register input,Label * is_not_object,Label * is_object)2408 Condition LCodeGen::EmitIsObject(Register input,
2409 Label* is_not_object,
2410 Label* is_object) {
2411 DCHECK(!input.is(kScratchRegister));
2412
2413 __ JumpIfSmi(input, is_not_object);
2414
2415 __ CompareRoot(input, Heap::kNullValueRootIndex);
2416 __ j(equal, is_object);
2417
2418 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
2419 // Undetectable objects behave like undefined.
2420 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
2421 Immediate(1 << Map::kIsUndetectable));
2422 __ j(not_zero, is_not_object);
2423
2424 __ movzxbl(kScratchRegister,
2425 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
2426 __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2427 __ j(below, is_not_object);
2428 __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2429 return below_equal;
2430 }
2431
2432
DoIsObjectAndBranch(LIsObjectAndBranch * instr)2433 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2434 Register reg = ToRegister(instr->value());
2435
2436 Condition true_cond = EmitIsObject(
2437 reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2438
2439 EmitBranch(instr, true_cond);
2440 }
2441
2442
EmitIsString(Register input,Register temp1,Label * is_not_string,SmiCheck check_needed=INLINE_SMI_CHECK)2443 Condition LCodeGen::EmitIsString(Register input,
2444 Register temp1,
2445 Label* is_not_string,
2446 SmiCheck check_needed = INLINE_SMI_CHECK) {
2447 if (check_needed == INLINE_SMI_CHECK) {
2448 __ JumpIfSmi(input, is_not_string);
2449 }
2450
2451 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2452
2453 return cond;
2454 }
2455
2456
DoIsStringAndBranch(LIsStringAndBranch * instr)2457 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2458 Register reg = ToRegister(instr->value());
2459 Register temp = ToRegister(instr->temp());
2460
2461 SmiCheck check_needed =
2462 instr->hydrogen()->value()->type().IsHeapObject()
2463 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2464
2465 Condition true_cond = EmitIsString(
2466 reg, temp, instr->FalseLabel(chunk_), check_needed);
2467
2468 EmitBranch(instr, true_cond);
2469 }
2470
2471
DoIsSmiAndBranch(LIsSmiAndBranch * instr)2472 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2473 Condition is_smi;
2474 if (instr->value()->IsRegister()) {
2475 Register input = ToRegister(instr->value());
2476 is_smi = masm()->CheckSmi(input);
2477 } else {
2478 Operand input = ToOperand(instr->value());
2479 is_smi = masm()->CheckSmi(input);
2480 }
2481 EmitBranch(instr, is_smi);
2482 }
2483
2484
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)2485 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2486 Register input = ToRegister(instr->value());
2487 Register temp = ToRegister(instr->temp());
2488
2489 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2490 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2491 }
2492 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2493 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2494 Immediate(1 << Map::kIsUndetectable));
2495 EmitBranch(instr, not_zero);
2496 }
2497
2498
DoStringCompareAndBranch(LStringCompareAndBranch * instr)2499 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2500 DCHECK(ToRegister(instr->context()).is(rsi));
2501 Token::Value op = instr->op();
2502
2503 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2504 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2505
2506 Condition condition = TokenToCondition(op, false);
2507 __ testp(rax, rax);
2508
2509 EmitBranch(instr, condition);
2510 }
2511
2512
TestType(HHasInstanceTypeAndBranch * instr)2513 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2514 InstanceType from = instr->from();
2515 InstanceType to = instr->to();
2516 if (from == FIRST_TYPE) return to;
2517 DCHECK(from == to || to == LAST_TYPE);
2518 return from;
2519 }
2520
2521
BranchCondition(HHasInstanceTypeAndBranch * instr)2522 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2523 InstanceType from = instr->from();
2524 InstanceType to = instr->to();
2525 if (from == to) return equal;
2526 if (to == LAST_TYPE) return above_equal;
2527 if (from == FIRST_TYPE) return below_equal;
2528 UNREACHABLE();
2529 return equal;
2530 }
2531
2532
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)2533 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2534 Register input = ToRegister(instr->value());
2535
2536 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2537 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2538 }
2539
2540 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2541 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2542 }
2543
2544
DoGetCachedArrayIndex(LGetCachedArrayIndex * instr)2545 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2546 Register input = ToRegister(instr->value());
2547 Register result = ToRegister(instr->result());
2548
2549 __ AssertString(input);
2550
2551 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2552 DCHECK(String::kHashShift >= kSmiTagSize);
2553 __ IndexFromHash(result, result);
2554 }
2555
2556
DoHasCachedArrayIndexAndBranch(LHasCachedArrayIndexAndBranch * instr)2557 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2558 LHasCachedArrayIndexAndBranch* instr) {
2559 Register input = ToRegister(instr->value());
2560
2561 __ testl(FieldOperand(input, String::kHashFieldOffset),
2562 Immediate(String::kContainsCachedArrayIndexMask));
2563 EmitBranch(instr, equal);
2564 }
2565
2566
2567 // Branches to a label or falls through with the answer in the z flag.
2568 // Trashes the temp register.
EmitClassOfTest(Label * is_true,Label * is_false,Handle<String> class_name,Register input,Register temp,Register temp2)2569 void LCodeGen::EmitClassOfTest(Label* is_true,
2570 Label* is_false,
2571 Handle<String> class_name,
2572 Register input,
2573 Register temp,
2574 Register temp2) {
2575 DCHECK(!input.is(temp));
2576 DCHECK(!input.is(temp2));
2577 DCHECK(!temp.is(temp2));
2578
2579 __ JumpIfSmi(input, is_false);
2580
2581 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2582 // Assuming the following assertions, we can use the same compares to test
2583 // for both being a function type and being in the object type range.
2584 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2585 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2586 FIRST_SPEC_OBJECT_TYPE + 1);
2587 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2588 LAST_SPEC_OBJECT_TYPE - 1);
2589 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2590 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2591 __ j(below, is_false);
2592 __ j(equal, is_true);
2593 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2594 __ j(equal, is_true);
2595 } else {
2596 // Faster code path to avoid two compares: subtract lower bound from the
2597 // actual type and do a signed compare with the width of the type range.
2598 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2599 __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2600 __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2601 __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2602 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2603 __ j(above, is_false);
2604 }
2605
2606 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2607 // Check if the constructor in the map is a function.
2608 __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
2609
2610 // Objects with a non-function constructor have class 'Object'.
2611 __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
2612 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2613 __ j(not_equal, is_true);
2614 } else {
2615 __ j(not_equal, is_false);
2616 }
2617
2618 // temp now contains the constructor function. Grab the
2619 // instance class name from there.
2620 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2621 __ movp(temp, FieldOperand(temp,
2622 SharedFunctionInfo::kInstanceClassNameOffset));
2623 // The class name we are testing against is internalized since it's a literal.
2624 // The name in the constructor is internalized because of the way the context
2625 // is booted. This routine isn't expected to work for random API-created
2626 // classes and it doesn't have to because you can't access it with natives
2627 // syntax. Since both sides are internalized it is sufficient to use an
2628 // identity comparison.
2629 DCHECK(class_name->IsInternalizedString());
2630 __ Cmp(temp, class_name);
2631 // End with the answer in the z flag.
2632 }
2633
2634
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2635 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2636 Register input = ToRegister(instr->value());
2637 Register temp = ToRegister(instr->temp());
2638 Register temp2 = ToRegister(instr->temp2());
2639 Handle<String> class_name = instr->hydrogen()->class_name();
2640
2641 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2642 class_name, input, temp, temp2);
2643
2644 EmitBranch(instr, equal);
2645 }
2646
2647
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2648 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2649 Register reg = ToRegister(instr->value());
2650
2651 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2652 EmitBranch(instr, equal);
2653 }
2654
2655
DoInstanceOf(LInstanceOf * instr)2656 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2657 DCHECK(ToRegister(instr->context()).is(rsi));
2658 InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
2659 __ Push(ToRegister(instr->left()));
2660 __ Push(ToRegister(instr->right()));
2661 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2662 Label true_value, done;
2663 __ testp(rax, rax);
2664 __ j(zero, &true_value, Label::kNear);
2665 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2666 __ jmp(&done, Label::kNear);
2667 __ bind(&true_value);
2668 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2669 __ bind(&done);
2670 }
2671
2672
DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal * instr)2673 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2674 class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2675 public:
2676 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2677 LInstanceOfKnownGlobal* instr)
2678 : LDeferredCode(codegen), instr_(instr) { }
2679 virtual void Generate() OVERRIDE {
2680 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2681 }
2682 virtual LInstruction* instr() OVERRIDE { return instr_; }
2683 Label* map_check() { return &map_check_; }
2684 private:
2685 LInstanceOfKnownGlobal* instr_;
2686 Label map_check_;
2687 };
2688
2689 DCHECK(ToRegister(instr->context()).is(rsi));
2690 DeferredInstanceOfKnownGlobal* deferred;
2691 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2692
2693 Label done, false_result;
2694 Register object = ToRegister(instr->value());
2695
2696 // A Smi is not an instance of anything.
2697 __ JumpIfSmi(object, &false_result, Label::kNear);
2698
2699 // This is the inlined call site instanceof cache. The two occurences of the
2700 // hole value will be patched to the last map/result pair generated by the
2701 // instanceof stub.
2702 Label cache_miss;
2703 // Use a temp register to avoid memory operands with variable lengths.
2704 Register map = ToRegister(instr->temp());
2705 __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
2706 __ bind(deferred->map_check()); // Label for calculating code patching.
2707 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2708 __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
2709 __ cmpp(map, Operand(kScratchRegister, 0));
2710 __ j(not_equal, &cache_miss, Label::kNear);
2711 // Patched to load either true or false.
2712 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2713 #ifdef DEBUG
2714 // Check that the code size between patch label and patch sites is invariant.
2715 Label end_of_patched_code;
2716 __ bind(&end_of_patched_code);
2717 DCHECK(true);
2718 #endif
2719 __ jmp(&done, Label::kNear);
2720
2721 // The inlined call site cache did not match. Check for null and string
2722 // before calling the deferred code.
2723 __ bind(&cache_miss); // Null is not an instance of anything.
2724 __ CompareRoot(object, Heap::kNullValueRootIndex);
2725 __ j(equal, &false_result, Label::kNear);
2726
2727 // String values are not instances of anything.
2728 __ JumpIfNotString(object, kScratchRegister, deferred->entry());
2729
2730 __ bind(&false_result);
2731 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2732
2733 __ bind(deferred->exit());
2734 __ bind(&done);
2735 }
2736
2737
DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal * instr,Label * map_check)2738 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2739 Label* map_check) {
2740 {
2741 PushSafepointRegistersScope scope(this);
2742 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
2743 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
2744 InstanceofStub stub(isolate(), flags);
2745
2746 __ Push(ToRegister(instr->value()));
2747 __ Push(instr->function());
2748
2749 static const int kAdditionalDelta = kPointerSize == kInt64Size ? 10 : 16;
2750 int delta =
2751 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2752 DCHECK(delta >= 0);
2753 __ PushImm32(delta);
2754
2755 // We are pushing three values on the stack but recording a
2756 // safepoint with two arguments because stub is going to
2757 // remove the third argument from the stack before jumping
2758 // to instanceof builtin on the slow path.
2759 CallCodeGeneric(stub.GetCode(),
2760 RelocInfo::CODE_TARGET,
2761 instr,
2762 RECORD_SAFEPOINT_WITH_REGISTERS,
2763 2);
2764 DCHECK(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2765 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2766 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2767 // Move result to a register that survives the end of the
2768 // PushSafepointRegisterScope.
2769 __ movp(kScratchRegister, rax);
2770 }
2771 __ testp(kScratchRegister, kScratchRegister);
2772 Label load_false;
2773 Label done;
2774 __ j(not_zero, &load_false, Label::kNear);
2775 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2776 __ jmp(&done, Label::kNear);
2777 __ bind(&load_false);
2778 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2779 __ bind(&done);
2780 }
2781
2782
DoCmpT(LCmpT * instr)2783 void LCodeGen::DoCmpT(LCmpT* instr) {
2784 DCHECK(ToRegister(instr->context()).is(rsi));
2785 Token::Value op = instr->op();
2786
2787 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2788 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2789
2790 Condition condition = TokenToCondition(op, false);
2791 Label true_value, done;
2792 __ testp(rax, rax);
2793 __ j(condition, &true_value, Label::kNear);
2794 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2795 __ jmp(&done, Label::kNear);
2796 __ bind(&true_value);
2797 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2798 __ bind(&done);
2799 }
2800
2801
DoReturn(LReturn * instr)2802 void LCodeGen::DoReturn(LReturn* instr) {
2803 if (FLAG_trace && info()->IsOptimizing()) {
2804 // Preserve the return value on the stack and rely on the runtime call
2805 // to return the value in the same register. We're leaving the code
2806 // managed by the register allocator and tearing down the frame, it's
2807 // safe to write to the context register.
2808 __ Push(rax);
2809 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2810 __ CallRuntime(Runtime::kTraceExit, 1);
2811 }
2812 if (info()->saves_caller_doubles()) {
2813 RestoreCallerDoubles();
2814 }
2815 int no_frame_start = -1;
2816 if (NeedsEagerFrame()) {
2817 __ movp(rsp, rbp);
2818 __ popq(rbp);
2819 no_frame_start = masm_->pc_offset();
2820 }
2821 if (instr->has_constant_parameter_count()) {
2822 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2823 rcx);
2824 } else {
2825 Register reg = ToRegister(instr->parameter_count());
2826 // The argument count parameter is a smi
2827 __ SmiToInteger32(reg, reg);
2828 Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2829 __ PopReturnAddressTo(return_addr_reg);
2830 __ shlp(reg, Immediate(kPointerSizeLog2));
2831 __ addp(rsp, reg);
2832 __ jmp(return_addr_reg);
2833 }
2834 if (no_frame_start != -1) {
2835 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2836 }
2837 }
2838
2839
DoLoadGlobalCell(LLoadGlobalCell * instr)2840 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2841 Register result = ToRegister(instr->result());
2842 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
2843 if (instr->hydrogen()->RequiresHoleCheck()) {
2844 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2845 DeoptimizeIf(equal, instr, "hole");
2846 }
2847 }
2848
2849
2850 template <class T>
EmitVectorLoadICRegisters(T * instr)2851 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2852 DCHECK(FLAG_vector_ics);
2853 Register vector = ToRegister(instr->temp_vector());
2854 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
2855 __ Move(vector, instr->hydrogen()->feedback_vector());
2856 // No need to allocate this register.
2857 DCHECK(VectorLoadICDescriptor::SlotRegister().is(rax));
2858 __ Move(VectorLoadICDescriptor::SlotRegister(),
2859 Smi::FromInt(instr->hydrogen()->slot()));
2860 }
2861
2862
DoLoadGlobalGeneric(LLoadGlobalGeneric * instr)2863 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2864 DCHECK(ToRegister(instr->context()).is(rsi));
2865 DCHECK(ToRegister(instr->global_object())
2866 .is(LoadDescriptor::ReceiverRegister()));
2867 DCHECK(ToRegister(instr->result()).is(rax));
2868
2869 __ Move(LoadDescriptor::NameRegister(), instr->name());
2870 if (FLAG_vector_ics) {
2871 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2872 }
2873 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2874 Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
2875 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2876 }
2877
2878
DoStoreGlobalCell(LStoreGlobalCell * instr)2879 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2880 Register value = ToRegister(instr->value());
2881 Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
2882
2883 // If the cell we are storing to contains the hole it could have
2884 // been deleted from the property dictionary. In that case, we need
2885 // to update the property details in the property dictionary to mark
2886 // it as no longer deleted. We deoptimize in that case.
2887 if (instr->hydrogen()->RequiresHoleCheck()) {
2888 // We have a temp because CompareRoot might clobber kScratchRegister.
2889 Register cell = ToRegister(instr->temp());
2890 DCHECK(!value.is(cell));
2891 __ Move(cell, cell_handle, RelocInfo::CELL);
2892 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2893 DeoptimizeIf(equal, instr, "hole");
2894 // Store the value.
2895 __ movp(Operand(cell, 0), value);
2896 } else {
2897 // Store the value.
2898 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
2899 __ movp(Operand(kScratchRegister, 0), value);
2900 }
2901 // Cells are always rescanned, so no write barrier here.
2902 }
2903
2904
DoLoadContextSlot(LLoadContextSlot * instr)2905 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2906 Register context = ToRegister(instr->context());
2907 Register result = ToRegister(instr->result());
2908 __ movp(result, ContextOperand(context, instr->slot_index()));
2909 if (instr->hydrogen()->RequiresHoleCheck()) {
2910 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2911 if (instr->hydrogen()->DeoptimizesOnHole()) {
2912 DeoptimizeIf(equal, instr, "hole");
2913 } else {
2914 Label is_not_hole;
2915 __ j(not_equal, &is_not_hole, Label::kNear);
2916 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2917 __ bind(&is_not_hole);
2918 }
2919 }
2920 }
2921
2922
DoStoreContextSlot(LStoreContextSlot * instr)2923 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2924 Register context = ToRegister(instr->context());
2925 Register value = ToRegister(instr->value());
2926
2927 Operand target = ContextOperand(context, instr->slot_index());
2928
2929 Label skip_assignment;
2930 if (instr->hydrogen()->RequiresHoleCheck()) {
2931 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2932 if (instr->hydrogen()->DeoptimizesOnHole()) {
2933 DeoptimizeIf(equal, instr, "hole");
2934 } else {
2935 __ j(not_equal, &skip_assignment);
2936 }
2937 }
2938 __ movp(target, value);
2939
2940 if (instr->hydrogen()->NeedsWriteBarrier()) {
2941 SmiCheck check_needed =
2942 instr->hydrogen()->value()->type().IsHeapObject()
2943 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2944 int offset = Context::SlotOffset(instr->slot_index());
2945 Register scratch = ToRegister(instr->temp());
2946 __ RecordWriteContextSlot(context,
2947 offset,
2948 value,
2949 scratch,
2950 kSaveFPRegs,
2951 EMIT_REMEMBERED_SET,
2952 check_needed);
2953 }
2954
2955 __ bind(&skip_assignment);
2956 }
2957
2958
DoLoadNamedField(LLoadNamedField * instr)2959 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2960 HObjectAccess access = instr->hydrogen()->access();
2961 int offset = access.offset();
2962
2963 if (access.IsExternalMemory()) {
2964 Register result = ToRegister(instr->result());
2965 if (instr->object()->IsConstantOperand()) {
2966 DCHECK(result.is(rax));
2967 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2968 } else {
2969 Register object = ToRegister(instr->object());
2970 __ Load(result, MemOperand(object, offset), access.representation());
2971 }
2972 return;
2973 }
2974
2975 Register object = ToRegister(instr->object());
2976 if (instr->hydrogen()->representation().IsDouble()) {
2977 XMMRegister result = ToDoubleRegister(instr->result());
2978 __ movsd(result, FieldOperand(object, offset));
2979 return;
2980 }
2981
2982 Register result = ToRegister(instr->result());
2983 if (!access.IsInobject()) {
2984 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2985 object = result;
2986 }
2987
2988 Representation representation = access.representation();
2989 if (representation.IsSmi() && SmiValuesAre32Bits() &&
2990 instr->hydrogen()->representation().IsInteger32()) {
2991 if (FLAG_debug_code) {
2992 Register scratch = kScratchRegister;
2993 __ Load(scratch, FieldOperand(object, offset), representation);
2994 __ AssertSmi(scratch);
2995 }
2996
2997 // Read int value directly from upper half of the smi.
2998 STATIC_ASSERT(kSmiTag == 0);
2999 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3000 offset += kPointerSize / 2;
3001 representation = Representation::Integer32();
3002 }
3003 __ Load(result, FieldOperand(object, offset), representation);
3004 }
3005
3006
DoLoadNamedGeneric(LLoadNamedGeneric * instr)3007 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3008 DCHECK(ToRegister(instr->context()).is(rsi));
3009 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3010 DCHECK(ToRegister(instr->result()).is(rax));
3011
3012 __ Move(LoadDescriptor::NameRegister(), instr->name());
3013 if (FLAG_vector_ics) {
3014 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3015 }
3016 Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3017 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3018 }
3019
3020
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)3021 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3022 Register function = ToRegister(instr->function());
3023 Register result = ToRegister(instr->result());
3024
3025 // Get the prototype or initial map from the function.
3026 __ movp(result,
3027 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3028
3029 // Check that the function has a prototype or an initial map.
3030 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3031 DeoptimizeIf(equal, instr, "hole");
3032
3033 // If the function does not have an initial map, we're done.
3034 Label done;
3035 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
3036 __ j(not_equal, &done, Label::kNear);
3037
3038 // Get the prototype from the initial map.
3039 __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
3040
3041 // All done.
3042 __ bind(&done);
3043 }
3044
3045
DoLoadRoot(LLoadRoot * instr)3046 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3047 Register result = ToRegister(instr->result());
3048 __ LoadRoot(result, instr->index());
3049 }
3050
3051
DoAccessArgumentsAt(LAccessArgumentsAt * instr)3052 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3053 Register arguments = ToRegister(instr->arguments());
3054 Register result = ToRegister(instr->result());
3055
3056 if (instr->length()->IsConstantOperand() &&
3057 instr->index()->IsConstantOperand()) {
3058 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3059 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3060 if (const_index >= 0 && const_index < const_length) {
3061 StackArgumentsAccessor args(arguments, const_length,
3062 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3063 __ movp(result, args.GetArgumentOperand(const_index));
3064 } else if (FLAG_debug_code) {
3065 __ int3();
3066 }
3067 } else {
3068 Register length = ToRegister(instr->length());
3069 // There are two words between the frame pointer and the last argument.
3070 // Subtracting from length accounts for one of them add one more.
3071 if (instr->index()->IsRegister()) {
3072 __ subl(length, ToRegister(instr->index()));
3073 } else {
3074 __ subl(length, ToOperand(instr->index()));
3075 }
3076 StackArgumentsAccessor args(arguments, length,
3077 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3078 __ movp(result, args.GetArgumentOperand(0));
3079 }
3080 }
3081
3082
DoLoadKeyedExternalArray(LLoadKeyed * instr)3083 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3084 ElementsKind elements_kind = instr->elements_kind();
3085 LOperand* key = instr->key();
3086 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
3087 Register key_reg = ToRegister(key);
3088 Representation key_representation =
3089 instr->hydrogen()->key()->representation();
3090 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
3091 __ SmiToInteger64(key_reg, key_reg);
3092 } else if (instr->hydrogen()->IsDehoisted()) {
3093 // Sign extend key because it could be a 32 bit negative value
3094 // and the dehoisted address computation happens in 64 bits
3095 __ movsxlq(key_reg, key_reg);
3096 }
3097 }
3098 Operand operand(BuildFastArrayOperand(
3099 instr->elements(),
3100 key,
3101 instr->hydrogen()->key()->representation(),
3102 elements_kind,
3103 instr->base_offset()));
3104
3105 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3106 elements_kind == FLOAT32_ELEMENTS) {
3107 XMMRegister result(ToDoubleRegister(instr->result()));
3108 __ movss(result, operand);
3109 __ cvtss2sd(result, result);
3110 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3111 elements_kind == FLOAT64_ELEMENTS) {
3112 __ movsd(ToDoubleRegister(instr->result()), operand);
3113 } else {
3114 Register result(ToRegister(instr->result()));
3115 switch (elements_kind) {
3116 case EXTERNAL_INT8_ELEMENTS:
3117 case INT8_ELEMENTS:
3118 __ movsxbl(result, operand);
3119 break;
3120 case EXTERNAL_UINT8_ELEMENTS:
3121 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3122 case UINT8_ELEMENTS:
3123 case UINT8_CLAMPED_ELEMENTS:
3124 __ movzxbl(result, operand);
3125 break;
3126 case EXTERNAL_INT16_ELEMENTS:
3127 case INT16_ELEMENTS:
3128 __ movsxwl(result, operand);
3129 break;
3130 case EXTERNAL_UINT16_ELEMENTS:
3131 case UINT16_ELEMENTS:
3132 __ movzxwl(result, operand);
3133 break;
3134 case EXTERNAL_INT32_ELEMENTS:
3135 case INT32_ELEMENTS:
3136 __ movl(result, operand);
3137 break;
3138 case EXTERNAL_UINT32_ELEMENTS:
3139 case UINT32_ELEMENTS:
3140 __ movl(result, operand);
3141 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3142 __ testl(result, result);
3143 DeoptimizeIf(negative, instr, "negative value");
3144 }
3145 break;
3146 case EXTERNAL_FLOAT32_ELEMENTS:
3147 case EXTERNAL_FLOAT64_ELEMENTS:
3148 case FLOAT32_ELEMENTS:
3149 case FLOAT64_ELEMENTS:
3150 case FAST_ELEMENTS:
3151 case FAST_SMI_ELEMENTS:
3152 case FAST_DOUBLE_ELEMENTS:
3153 case FAST_HOLEY_ELEMENTS:
3154 case FAST_HOLEY_SMI_ELEMENTS:
3155 case FAST_HOLEY_DOUBLE_ELEMENTS:
3156 case DICTIONARY_ELEMENTS:
3157 case SLOPPY_ARGUMENTS_ELEMENTS:
3158 UNREACHABLE();
3159 break;
3160 }
3161 }
3162 }
3163
3164
DoLoadKeyedFixedDoubleArray(LLoadKeyed * instr)3165 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3166 XMMRegister result(ToDoubleRegister(instr->result()));
3167 LOperand* key = instr->key();
3168 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3169 instr->hydrogen()->IsDehoisted()) {
3170 // Sign extend key because it could be a 32 bit negative value
3171 // and the dehoisted address computation happens in 64 bits
3172 __ movsxlq(ToRegister(key), ToRegister(key));
3173 }
3174 if (instr->hydrogen()->RequiresHoleCheck()) {
3175 Operand hole_check_operand = BuildFastArrayOperand(
3176 instr->elements(),
3177 key,
3178 instr->hydrogen()->key()->representation(),
3179 FAST_DOUBLE_ELEMENTS,
3180 instr->base_offset() + sizeof(kHoleNanLower32));
3181 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
3182 DeoptimizeIf(equal, instr, "hole");
3183 }
3184
3185 Operand double_load_operand = BuildFastArrayOperand(
3186 instr->elements(),
3187 key,
3188 instr->hydrogen()->key()->representation(),
3189 FAST_DOUBLE_ELEMENTS,
3190 instr->base_offset());
3191 __ movsd(result, double_load_operand);
3192 }
3193
3194
DoLoadKeyedFixedArray(LLoadKeyed * instr)3195 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3196 HLoadKeyed* hinstr = instr->hydrogen();
3197 Register result = ToRegister(instr->result());
3198 LOperand* key = instr->key();
3199 bool requires_hole_check = hinstr->RequiresHoleCheck();
3200 Representation representation = hinstr->representation();
3201 int offset = instr->base_offset();
3202
3203 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3204 instr->hydrogen()->IsDehoisted()) {
3205 // Sign extend key because it could be a 32 bit negative value
3206 // and the dehoisted address computation happens in 64 bits
3207 __ movsxlq(ToRegister(key), ToRegister(key));
3208 }
3209 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3210 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3211 DCHECK(!requires_hole_check);
3212 if (FLAG_debug_code) {
3213 Register scratch = kScratchRegister;
3214 __ Load(scratch,
3215 BuildFastArrayOperand(instr->elements(),
3216 key,
3217 instr->hydrogen()->key()->representation(),
3218 FAST_ELEMENTS,
3219 offset),
3220 Representation::Smi());
3221 __ AssertSmi(scratch);
3222 }
3223 // Read int value directly from upper half of the smi.
3224 STATIC_ASSERT(kSmiTag == 0);
3225 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3226 offset += kPointerSize / 2;
3227 }
3228
3229 __ Load(result,
3230 BuildFastArrayOperand(instr->elements(), key,
3231 instr->hydrogen()->key()->representation(),
3232 FAST_ELEMENTS, offset),
3233 representation);
3234
3235 // Check for the hole value.
3236 if (requires_hole_check) {
3237 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3238 Condition smi = __ CheckSmi(result);
3239 DeoptimizeIf(NegateCondition(smi), instr, "not a Smi");
3240 } else {
3241 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3242 DeoptimizeIf(equal, instr, "hole");
3243 }
3244 }
3245 }
3246
3247
DoLoadKeyed(LLoadKeyed * instr)3248 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3249 if (instr->is_typed_elements()) {
3250 DoLoadKeyedExternalArray(instr);
3251 } else if (instr->hydrogen()->representation().IsDouble()) {
3252 DoLoadKeyedFixedDoubleArray(instr);
3253 } else {
3254 DoLoadKeyedFixedArray(instr);
3255 }
3256 }
3257
3258
BuildFastArrayOperand(LOperand * elements_pointer,LOperand * key,Representation key_representation,ElementsKind elements_kind,uint32_t offset)3259 Operand LCodeGen::BuildFastArrayOperand(
3260 LOperand* elements_pointer,
3261 LOperand* key,
3262 Representation key_representation,
3263 ElementsKind elements_kind,
3264 uint32_t offset) {
3265 Register elements_pointer_reg = ToRegister(elements_pointer);
3266 int shift_size = ElementsKindToShiftSize(elements_kind);
3267 if (key->IsConstantOperand()) {
3268 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3269 if (constant_value & 0xF0000000) {
3270 Abort(kArrayIndexConstantValueTooBig);
3271 }
3272 return Operand(elements_pointer_reg,
3273 (constant_value << shift_size) + offset);
3274 } else {
3275 // Take the tag bit into account while computing the shift size.
3276 if (key_representation.IsSmi() && (shift_size >= 1)) {
3277 DCHECK(SmiValuesAre31Bits());
3278 shift_size -= kSmiTagSize;
3279 }
3280 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3281 return Operand(elements_pointer_reg,
3282 ToRegister(key),
3283 scale_factor,
3284 offset);
3285 }
3286 }
3287
3288
DoLoadKeyedGeneric(LLoadKeyedGeneric * instr)3289 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3290 DCHECK(ToRegister(instr->context()).is(rsi));
3291 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3292 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3293
3294 if (FLAG_vector_ics) {
3295 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3296 }
3297
3298 Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3299 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3300 }
3301
3302
DoArgumentsElements(LArgumentsElements * instr)3303 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3304 Register result = ToRegister(instr->result());
3305
3306 if (instr->hydrogen()->from_inlined()) {
3307 __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3308 } else {
3309 // Check for arguments adapter frame.
3310 Label done, adapted;
3311 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3312 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3313 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3314 __ j(equal, &adapted, Label::kNear);
3315
3316 // No arguments adaptor frame.
3317 __ movp(result, rbp);
3318 __ jmp(&done, Label::kNear);
3319
3320 // Arguments adaptor frame present.
3321 __ bind(&adapted);
3322 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3323
3324 // Result is the frame pointer for the frame if not adapted and for the real
3325 // frame below the adaptor frame if adapted.
3326 __ bind(&done);
3327 }
3328 }
3329
3330
DoArgumentsLength(LArgumentsLength * instr)3331 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3332 Register result = ToRegister(instr->result());
3333
3334 Label done;
3335
3336 // If no arguments adaptor frame the number of arguments is fixed.
3337 if (instr->elements()->IsRegister()) {
3338 __ cmpp(rbp, ToRegister(instr->elements()));
3339 } else {
3340 __ cmpp(rbp, ToOperand(instr->elements()));
3341 }
3342 __ movl(result, Immediate(scope()->num_parameters()));
3343 __ j(equal, &done, Label::kNear);
3344
3345 // Arguments adaptor frame present. Get argument length from there.
3346 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3347 __ SmiToInteger32(result,
3348 Operand(result,
3349 ArgumentsAdaptorFrameConstants::kLengthOffset));
3350
3351 // Argument length is in result register.
3352 __ bind(&done);
3353 }
3354
3355
DoWrapReceiver(LWrapReceiver * instr)3356 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3357 Register receiver = ToRegister(instr->receiver());
3358 Register function = ToRegister(instr->function());
3359
3360 // If the receiver is null or undefined, we have to pass the global
3361 // object as a receiver to normal functions. Values have to be
3362 // passed unchanged to builtins and strict-mode functions.
3363 Label global_object, receiver_ok;
3364 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3365
3366 if (!instr->hydrogen()->known_function()) {
3367 // Do not transform the receiver to object for strict mode
3368 // functions.
3369 __ movp(kScratchRegister,
3370 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3371 __ testb(FieldOperand(kScratchRegister,
3372 SharedFunctionInfo::kStrictModeByteOffset),
3373 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3374 __ j(not_equal, &receiver_ok, dist);
3375
3376 // Do not transform the receiver to object for builtins.
3377 __ testb(FieldOperand(kScratchRegister,
3378 SharedFunctionInfo::kNativeByteOffset),
3379 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3380 __ j(not_equal, &receiver_ok, dist);
3381 }
3382
3383 // Normal function. Replace undefined or null with global receiver.
3384 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3385 __ j(equal, &global_object, Label::kNear);
3386 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3387 __ j(equal, &global_object, Label::kNear);
3388
3389 // The receiver should be a JS object.
3390 Condition is_smi = __ CheckSmi(receiver);
3391 DeoptimizeIf(is_smi, instr, "Smi");
3392 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
3393 DeoptimizeIf(below, instr, "not a JavaScript object");
3394
3395 __ jmp(&receiver_ok, Label::kNear);
3396 __ bind(&global_object);
3397 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3398 __ movp(receiver,
3399 Operand(receiver,
3400 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3401 __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset));
3402
3403 __ bind(&receiver_ok);
3404 }
3405
3406
DoApplyArguments(LApplyArguments * instr)3407 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3408 Register receiver = ToRegister(instr->receiver());
3409 Register function = ToRegister(instr->function());
3410 Register length = ToRegister(instr->length());
3411 Register elements = ToRegister(instr->elements());
3412 DCHECK(receiver.is(rax)); // Used for parameter count.
3413 DCHECK(function.is(rdi)); // Required by InvokeFunction.
3414 DCHECK(ToRegister(instr->result()).is(rax));
3415
3416 // Copy the arguments to this function possibly from the
3417 // adaptor frame below it.
3418 const uint32_t kArgumentsLimit = 1 * KB;
3419 __ cmpp(length, Immediate(kArgumentsLimit));
3420 DeoptimizeIf(above, instr, "too many arguments");
3421
3422 __ Push(receiver);
3423 __ movp(receiver, length);
3424
3425 // Loop through the arguments pushing them onto the execution
3426 // stack.
3427 Label invoke, loop;
3428 // length is a small non-negative integer, due to the test above.
3429 __ testl(length, length);
3430 __ j(zero, &invoke, Label::kNear);
3431 __ bind(&loop);
3432 StackArgumentsAccessor args(elements, length,
3433 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3434 __ Push(args.GetArgumentOperand(0));
3435 __ decl(length);
3436 __ j(not_zero, &loop);
3437
3438 // Invoke the function.
3439 __ bind(&invoke);
3440 DCHECK(instr->HasPointerMap());
3441 LPointerMap* pointers = instr->pointer_map();
3442 SafepointGenerator safepoint_generator(
3443 this, pointers, Safepoint::kLazyDeopt);
3444 ParameterCount actual(rax);
3445 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3446 }
3447
3448
DoPushArgument(LPushArgument * instr)3449 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3450 LOperand* argument = instr->value();
3451 EmitPushTaggedOperand(argument);
3452 }
3453
3454
DoDrop(LDrop * instr)3455 void LCodeGen::DoDrop(LDrop* instr) {
3456 __ Drop(instr->count());
3457 }
3458
3459
DoThisFunction(LThisFunction * instr)3460 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3461 Register result = ToRegister(instr->result());
3462 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3463 }
3464
3465
DoContext(LContext * instr)3466 void LCodeGen::DoContext(LContext* instr) {
3467 Register result = ToRegister(instr->result());
3468 if (info()->IsOptimizing()) {
3469 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3470 } else {
3471 // If there is no frame, the context must be in rsi.
3472 DCHECK(result.is(rsi));
3473 }
3474 }
3475
3476
DoDeclareGlobals(LDeclareGlobals * instr)3477 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3478 DCHECK(ToRegister(instr->context()).is(rsi));
3479 __ Push(rsi); // The context is the first argument.
3480 __ Push(instr->hydrogen()->pairs());
3481 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3482 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3483 }
3484
3485
CallKnownFunction(Handle<JSFunction> function,int formal_parameter_count,int arity,LInstruction * instr,RDIState rdi_state)3486 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3487 int formal_parameter_count,
3488 int arity,
3489 LInstruction* instr,
3490 RDIState rdi_state) {
3491 bool dont_adapt_arguments =
3492 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3493 bool can_invoke_directly =
3494 dont_adapt_arguments || formal_parameter_count == arity;
3495
3496 LPointerMap* pointers = instr->pointer_map();
3497
3498 if (can_invoke_directly) {
3499 if (rdi_state == RDI_UNINITIALIZED) {
3500 __ Move(rdi, function);
3501 }
3502
3503 // Change context.
3504 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3505
3506 // Set rax to arguments count if adaption is not needed. Assumes that rax
3507 // is available to write to at this point.
3508 if (dont_adapt_arguments) {
3509 __ Set(rax, arity);
3510 }
3511
3512 // Invoke function.
3513 if (function.is_identical_to(info()->closure())) {
3514 __ CallSelf();
3515 } else {
3516 __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3517 }
3518
3519 // Set up deoptimization.
3520 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3521 } else {
3522 // We need to adapt arguments.
3523 SafepointGenerator generator(
3524 this, pointers, Safepoint::kLazyDeopt);
3525 ParameterCount count(arity);
3526 ParameterCount expected(formal_parameter_count);
3527 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3528 }
3529 }
3530
3531
DoTailCallThroughMegamorphicCache(LTailCallThroughMegamorphicCache * instr)3532 void LCodeGen::DoTailCallThroughMegamorphicCache(
3533 LTailCallThroughMegamorphicCache* instr) {
3534 Register receiver = ToRegister(instr->receiver());
3535 Register name = ToRegister(instr->name());
3536 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3537 DCHECK(name.is(LoadDescriptor::NameRegister()));
3538
3539 Register scratch = rbx;
3540 DCHECK(!scratch.is(receiver) && !scratch.is(name));
3541
3542 // Important for the tail-call.
3543 bool must_teardown_frame = NeedsEagerFrame();
3544
3545 // The probe will tail call to a handler if found.
3546 isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
3547 must_teardown_frame, receiver, name,
3548 scratch, no_reg);
3549
3550 // Tail call to miss if we ended up here.
3551 if (must_teardown_frame) __ leave();
3552 LoadIC::GenerateMiss(masm());
3553 }
3554
3555
DoCallWithDescriptor(LCallWithDescriptor * instr)3556 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3557 DCHECK(ToRegister(instr->result()).is(rax));
3558
3559 LPointerMap* pointers = instr->pointer_map();
3560 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3561
3562 if (instr->target()->IsConstantOperand()) {
3563 LConstantOperand* target = LConstantOperand::cast(instr->target());
3564 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3565 generator.BeforeCall(__ CallSize(code));
3566 __ call(code, RelocInfo::CODE_TARGET);
3567 } else {
3568 DCHECK(instr->target()->IsRegister());
3569 Register target = ToRegister(instr->target());
3570 generator.BeforeCall(__ CallSize(target));
3571 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3572 __ call(target);
3573 }
3574 generator.AfterCall();
3575 }
3576
3577
DoCallJSFunction(LCallJSFunction * instr)3578 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3579 DCHECK(ToRegister(instr->function()).is(rdi));
3580 DCHECK(ToRegister(instr->result()).is(rax));
3581
3582 if (instr->hydrogen()->pass_argument_count()) {
3583 __ Set(rax, instr->arity());
3584 }
3585
3586 // Change context.
3587 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3588
3589 LPointerMap* pointers = instr->pointer_map();
3590 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3591
3592 bool is_self_call = false;
3593 if (instr->hydrogen()->function()->IsConstant()) {
3594 Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3595 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3596 jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3597 is_self_call = jsfun.is_identical_to(info()->closure());
3598 }
3599
3600 if (is_self_call) {
3601 __ CallSelf();
3602 } else {
3603 Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3604 generator.BeforeCall(__ CallSize(target));
3605 __ Call(target);
3606 }
3607 generator.AfterCall();
3608 }
3609
3610
DoDeferredMathAbsTaggedHeapNumber(LMathAbs * instr)3611 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3612 Register input_reg = ToRegister(instr->value());
3613 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3614 Heap::kHeapNumberMapRootIndex);
3615 DeoptimizeIf(not_equal, instr, "not a heap number");
3616
3617 Label slow, allocated, done;
3618 Register tmp = input_reg.is(rax) ? rcx : rax;
3619 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3620
3621 // Preserve the value of all registers.
3622 PushSafepointRegistersScope scope(this);
3623
3624 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3625 // Check the sign of the argument. If the argument is positive, just
3626 // return it. We do not need to patch the stack since |input| and
3627 // |result| are the same register and |input| will be restored
3628 // unchanged by popping safepoint registers.
3629 __ testl(tmp, Immediate(HeapNumber::kSignMask));
3630 __ j(zero, &done);
3631
3632 __ AllocateHeapNumber(tmp, tmp2, &slow);
3633 __ jmp(&allocated, Label::kNear);
3634
3635 // Slow case: Call the runtime system to do the number allocation.
3636 __ bind(&slow);
3637 CallRuntimeFromDeferred(
3638 Runtime::kAllocateHeapNumber, 0, instr, instr->context());
3639 // Set the pointer to the new heap number in tmp.
3640 if (!tmp.is(rax)) __ movp(tmp, rax);
3641 // Restore input_reg after call to runtime.
3642 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3643
3644 __ bind(&allocated);
3645 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3646 __ shlq(tmp2, Immediate(1));
3647 __ shrq(tmp2, Immediate(1));
3648 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3649 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3650
3651 __ bind(&done);
3652 }
3653
3654
EmitIntegerMathAbs(LMathAbs * instr)3655 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3656 Register input_reg = ToRegister(instr->value());
3657 __ testl(input_reg, input_reg);
3658 Label is_positive;
3659 __ j(not_sign, &is_positive, Label::kNear);
3660 __ negl(input_reg); // Sets flags.
3661 DeoptimizeIf(negative, instr, "overflow");
3662 __ bind(&is_positive);
3663 }
3664
3665
EmitSmiMathAbs(LMathAbs * instr)3666 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3667 Register input_reg = ToRegister(instr->value());
3668 __ testp(input_reg, input_reg);
3669 Label is_positive;
3670 __ j(not_sign, &is_positive, Label::kNear);
3671 __ negp(input_reg); // Sets flags.
3672 DeoptimizeIf(negative, instr, "overflow");
3673 __ bind(&is_positive);
3674 }
3675
3676
DoMathAbs(LMathAbs * instr)3677 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3678 // Class for deferred case.
3679 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3680 public:
3681 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3682 : LDeferredCode(codegen), instr_(instr) { }
3683 virtual void Generate() OVERRIDE {
3684 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3685 }
3686 virtual LInstruction* instr() OVERRIDE { return instr_; }
3687 private:
3688 LMathAbs* instr_;
3689 };
3690
3691 DCHECK(instr->value()->Equals(instr->result()));
3692 Representation r = instr->hydrogen()->value()->representation();
3693
3694 if (r.IsDouble()) {
3695 XMMRegister scratch = double_scratch0();
3696 XMMRegister input_reg = ToDoubleRegister(instr->value());
3697 __ xorps(scratch, scratch);
3698 __ subsd(scratch, input_reg);
3699 __ andps(input_reg, scratch);
3700 } else if (r.IsInteger32()) {
3701 EmitIntegerMathAbs(instr);
3702 } else if (r.IsSmi()) {
3703 EmitSmiMathAbs(instr);
3704 } else { // Tagged case.
3705 DeferredMathAbsTaggedHeapNumber* deferred =
3706 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3707 Register input_reg = ToRegister(instr->value());
3708 // Smi check.
3709 __ JumpIfNotSmi(input_reg, deferred->entry());
3710 EmitSmiMathAbs(instr);
3711 __ bind(deferred->exit());
3712 }
3713 }
3714
3715
DoMathFloor(LMathFloor * instr)3716 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3717 XMMRegister xmm_scratch = double_scratch0();
3718 Register output_reg = ToRegister(instr->result());
3719 XMMRegister input_reg = ToDoubleRegister(instr->value());
3720
3721 if (CpuFeatures::IsSupported(SSE4_1)) {
3722 CpuFeatureScope scope(masm(), SSE4_1);
3723 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3724 // Deoptimize if minus zero.
3725 __ movq(output_reg, input_reg);
3726 __ subq(output_reg, Immediate(1));
3727 DeoptimizeIf(overflow, instr, "minus zero");
3728 }
3729 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3730 __ cvttsd2si(output_reg, xmm_scratch);
3731 __ cmpl(output_reg, Immediate(0x1));
3732 DeoptimizeIf(overflow, instr, "overflow");
3733 } else {
3734 Label negative_sign, done;
3735 // Deoptimize on unordered.
3736 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3737 __ ucomisd(input_reg, xmm_scratch);
3738 DeoptimizeIf(parity_even, instr, "NaN");
3739 __ j(below, &negative_sign, Label::kNear);
3740
3741 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3742 // Check for negative zero.
3743 Label positive_sign;
3744 __ j(above, &positive_sign, Label::kNear);
3745 __ movmskpd(output_reg, input_reg);
3746 __ testq(output_reg, Immediate(1));
3747 DeoptimizeIf(not_zero, instr, "minus zero");
3748 __ Set(output_reg, 0);
3749 __ jmp(&done);
3750 __ bind(&positive_sign);
3751 }
3752
3753 // Use truncating instruction (OK because input is positive).
3754 __ cvttsd2si(output_reg, input_reg);
3755 // Overflow is signalled with minint.
3756 __ cmpl(output_reg, Immediate(0x1));
3757 DeoptimizeIf(overflow, instr, "overflow");
3758 __ jmp(&done, Label::kNear);
3759
3760 // Non-zero negative reaches here.
3761 __ bind(&negative_sign);
3762 // Truncate, then compare and compensate.
3763 __ cvttsd2si(output_reg, input_reg);
3764 __ Cvtlsi2sd(xmm_scratch, output_reg);
3765 __ ucomisd(input_reg, xmm_scratch);
3766 __ j(equal, &done, Label::kNear);
3767 __ subl(output_reg, Immediate(1));
3768 DeoptimizeIf(overflow, instr, "overflow");
3769
3770 __ bind(&done);
3771 }
3772 }
3773
3774
DoMathRound(LMathRound * instr)3775 void LCodeGen::DoMathRound(LMathRound* instr) {
3776 const XMMRegister xmm_scratch = double_scratch0();
3777 Register output_reg = ToRegister(instr->result());
3778 XMMRegister input_reg = ToDoubleRegister(instr->value());
3779 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3780 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3781 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3782
3783 Label done, round_to_zero, below_one_half;
3784 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3785 __ movq(kScratchRegister, one_half);
3786 __ movq(xmm_scratch, kScratchRegister);
3787 __ ucomisd(xmm_scratch, input_reg);
3788 __ j(above, &below_one_half, Label::kNear);
3789
3790 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3791 __ addsd(xmm_scratch, input_reg);
3792 __ cvttsd2si(output_reg, xmm_scratch);
3793 // Overflow is signalled with minint.
3794 __ cmpl(output_reg, Immediate(0x1));
3795 DeoptimizeIf(overflow, instr, "overflow");
3796 __ jmp(&done, dist);
3797
3798 __ bind(&below_one_half);
3799 __ movq(kScratchRegister, minus_one_half);
3800 __ movq(xmm_scratch, kScratchRegister);
3801 __ ucomisd(xmm_scratch, input_reg);
3802 __ j(below_equal, &round_to_zero, Label::kNear);
3803
3804 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3805 // compare and compensate.
3806 __ movq(input_temp, input_reg); // Do not alter input_reg.
3807 __ subsd(input_temp, xmm_scratch);
3808 __ cvttsd2si(output_reg, input_temp);
3809 // Catch minint due to overflow, and to prevent overflow when compensating.
3810 __ cmpl(output_reg, Immediate(0x1));
3811 DeoptimizeIf(overflow, instr, "overflow");
3812
3813 __ Cvtlsi2sd(xmm_scratch, output_reg);
3814 __ ucomisd(xmm_scratch, input_temp);
3815 __ j(equal, &done, dist);
3816 __ subl(output_reg, Immediate(1));
3817 // No overflow because we already ruled out minint.
3818 __ jmp(&done, dist);
3819
3820 __ bind(&round_to_zero);
3821 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3822 // we can ignore the difference between a result of -0 and +0.
3823 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3824 __ movq(output_reg, input_reg);
3825 __ testq(output_reg, output_reg);
3826 DeoptimizeIf(negative, instr, "minus zero");
3827 }
3828 __ Set(output_reg, 0);
3829 __ bind(&done);
3830 }
3831
3832
DoMathFround(LMathFround * instr)3833 void LCodeGen::DoMathFround(LMathFround* instr) {
3834 XMMRegister input_reg = ToDoubleRegister(instr->value());
3835 XMMRegister output_reg = ToDoubleRegister(instr->result());
3836 __ cvtsd2ss(output_reg, input_reg);
3837 __ cvtss2sd(output_reg, output_reg);
3838 }
3839
3840
DoMathSqrt(LMathSqrt * instr)3841 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3842 XMMRegister output = ToDoubleRegister(instr->result());
3843 if (instr->value()->IsDoubleRegister()) {
3844 XMMRegister input = ToDoubleRegister(instr->value());
3845 __ sqrtsd(output, input);
3846 } else {
3847 Operand input = ToOperand(instr->value());
3848 __ sqrtsd(output, input);
3849 }
3850 }
3851
3852
DoMathPowHalf(LMathPowHalf * instr)3853 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3854 XMMRegister xmm_scratch = double_scratch0();
3855 XMMRegister input_reg = ToDoubleRegister(instr->value());
3856 DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
3857
3858 // Note that according to ECMA-262 15.8.2.13:
3859 // Math.pow(-Infinity, 0.5) == Infinity
3860 // Math.sqrt(-Infinity) == NaN
3861 Label done, sqrt;
3862 // Check base for -Infinity. According to IEEE-754, double-precision
3863 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3864 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3865 __ movq(xmm_scratch, kScratchRegister);
3866 __ ucomisd(xmm_scratch, input_reg);
3867 // Comparing -Infinity with NaN results in "unordered", which sets the
3868 // zero flag as if both were equal. However, it also sets the carry flag.
3869 __ j(not_equal, &sqrt, Label::kNear);
3870 __ j(carry, &sqrt, Label::kNear);
3871 // If input is -Infinity, return Infinity.
3872 __ xorps(input_reg, input_reg);
3873 __ subsd(input_reg, xmm_scratch);
3874 __ jmp(&done, Label::kNear);
3875
3876 // Square root.
3877 __ bind(&sqrt);
3878 __ xorps(xmm_scratch, xmm_scratch);
3879 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3880 __ sqrtsd(input_reg, input_reg);
3881 __ bind(&done);
3882 }
3883
3884
DoPower(LPower * instr)3885 void LCodeGen::DoPower(LPower* instr) {
3886 Representation exponent_type = instr->hydrogen()->right()->representation();
3887 // Having marked this as a call, we can use any registers.
3888 // Just make sure that the input/output registers are the expected ones.
3889
3890 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3891 DCHECK(!instr->right()->IsRegister() ||
3892 ToRegister(instr->right()).is(tagged_exponent));
3893 DCHECK(!instr->right()->IsDoubleRegister() ||
3894 ToDoubleRegister(instr->right()).is(xmm1));
3895 DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
3896 DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
3897
3898 if (exponent_type.IsSmi()) {
3899 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3900 __ CallStub(&stub);
3901 } else if (exponent_type.IsTagged()) {
3902 Label no_deopt;
3903 __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
3904 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
3905 DeoptimizeIf(not_equal, instr, "not a heap number");
3906 __ bind(&no_deopt);
3907 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3908 __ CallStub(&stub);
3909 } else if (exponent_type.IsInteger32()) {
3910 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3911 __ CallStub(&stub);
3912 } else {
3913 DCHECK(exponent_type.IsDouble());
3914 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3915 __ CallStub(&stub);
3916 }
3917 }
3918
3919
DoMathExp(LMathExp * instr)3920 void LCodeGen::DoMathExp(LMathExp* instr) {
3921 XMMRegister input = ToDoubleRegister(instr->value());
3922 XMMRegister result = ToDoubleRegister(instr->result());
3923 XMMRegister temp0 = double_scratch0();
3924 Register temp1 = ToRegister(instr->temp1());
3925 Register temp2 = ToRegister(instr->temp2());
3926
3927 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
3928 }
3929
3930
DoMathLog(LMathLog * instr)3931 void LCodeGen::DoMathLog(LMathLog* instr) {
3932 DCHECK(instr->value()->Equals(instr->result()));
3933 XMMRegister input_reg = ToDoubleRegister(instr->value());
3934 XMMRegister xmm_scratch = double_scratch0();
3935 Label positive, done, zero;
3936 __ xorps(xmm_scratch, xmm_scratch);
3937 __ ucomisd(input_reg, xmm_scratch);
3938 __ j(above, &positive, Label::kNear);
3939 __ j(not_carry, &zero, Label::kNear);
3940 ExternalReference nan =
3941 ExternalReference::address_of_canonical_non_hole_nan();
3942 Operand nan_operand = masm()->ExternalOperand(nan);
3943 __ movsd(input_reg, nan_operand);
3944 __ jmp(&done, Label::kNear);
3945 __ bind(&zero);
3946 ExternalReference ninf =
3947 ExternalReference::address_of_negative_infinity();
3948 Operand ninf_operand = masm()->ExternalOperand(ninf);
3949 __ movsd(input_reg, ninf_operand);
3950 __ jmp(&done, Label::kNear);
3951 __ bind(&positive);
3952 __ fldln2();
3953 __ subp(rsp, Immediate(kDoubleSize));
3954 __ movsd(Operand(rsp, 0), input_reg);
3955 __ fld_d(Operand(rsp, 0));
3956 __ fyl2x();
3957 __ fstp_d(Operand(rsp, 0));
3958 __ movsd(input_reg, Operand(rsp, 0));
3959 __ addp(rsp, Immediate(kDoubleSize));
3960 __ bind(&done);
3961 }
3962
3963
DoMathClz32(LMathClz32 * instr)3964 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3965 Register input = ToRegister(instr->value());
3966 Register result = ToRegister(instr->result());
3967 Label not_zero_input;
3968 __ bsrl(result, input);
3969
3970 __ j(not_zero, ¬_zero_input);
3971 __ Set(result, 63); // 63^31 == 32
3972
3973 __ bind(¬_zero_input);
3974 __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
3975 }
3976
3977
DoInvokeFunction(LInvokeFunction * instr)3978 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3979 DCHECK(ToRegister(instr->context()).is(rsi));
3980 DCHECK(ToRegister(instr->function()).is(rdi));
3981 DCHECK(instr->HasPointerMap());
3982
3983 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3984 if (known_function.is_null()) {
3985 LPointerMap* pointers = instr->pointer_map();
3986 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3987 ParameterCount count(instr->arity());
3988 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
3989 } else {
3990 CallKnownFunction(known_function,
3991 instr->hydrogen()->formal_parameter_count(),
3992 instr->arity(),
3993 instr,
3994 RDI_CONTAINS_TARGET);
3995 }
3996 }
3997
3998
DoCallFunction(LCallFunction * instr)3999 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4000 DCHECK(ToRegister(instr->context()).is(rsi));
4001 DCHECK(ToRegister(instr->function()).is(rdi));
4002 DCHECK(ToRegister(instr->result()).is(rax));
4003
4004 int arity = instr->arity();
4005 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4006 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4007 }
4008
4009
DoCallNew(LCallNew * instr)4010 void LCodeGen::DoCallNew(LCallNew* instr) {
4011 DCHECK(ToRegister(instr->context()).is(rsi));
4012 DCHECK(ToRegister(instr->constructor()).is(rdi));
4013 DCHECK(ToRegister(instr->result()).is(rax));
4014
4015 __ Set(rax, instr->arity());
4016 // No cell in ebx for construct type feedback in optimized code
4017 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4018 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4019 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4020 }
4021
4022
DoCallNewArray(LCallNewArray * instr)4023 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4024 DCHECK(ToRegister(instr->context()).is(rsi));
4025 DCHECK(ToRegister(instr->constructor()).is(rdi));
4026 DCHECK(ToRegister(instr->result()).is(rax));
4027
4028 __ Set(rax, instr->arity());
4029 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4030 ElementsKind kind = instr->hydrogen()->elements_kind();
4031 AllocationSiteOverrideMode override_mode =
4032 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4033 ? DISABLE_ALLOCATION_SITES
4034 : DONT_OVERRIDE;
4035
4036 if (instr->arity() == 0) {
4037 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4038 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4039 } else if (instr->arity() == 1) {
4040 Label done;
4041 if (IsFastPackedElementsKind(kind)) {
4042 Label packed_case;
4043 // We might need a change here
4044 // look at the first argument
4045 __ movp(rcx, Operand(rsp, 0));
4046 __ testp(rcx, rcx);
4047 __ j(zero, &packed_case, Label::kNear);
4048
4049 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4050 ArraySingleArgumentConstructorStub stub(isolate(),
4051 holey_kind,
4052 override_mode);
4053 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4054 __ jmp(&done, Label::kNear);
4055 __ bind(&packed_case);
4056 }
4057
4058 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4059 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4060 __ bind(&done);
4061 } else {
4062 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4063 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4064 }
4065 }
4066
4067
DoCallRuntime(LCallRuntime * instr)4068 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4069 DCHECK(ToRegister(instr->context()).is(rsi));
4070 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4071 }
4072
4073
DoStoreCodeEntry(LStoreCodeEntry * instr)4074 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4075 Register function = ToRegister(instr->function());
4076 Register code_object = ToRegister(instr->code_object());
4077 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
4078 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4079 }
4080
4081
DoInnerAllocatedObject(LInnerAllocatedObject * instr)4082 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4083 Register result = ToRegister(instr->result());
4084 Register base = ToRegister(instr->base_object());
4085 if (instr->offset()->IsConstantOperand()) {
4086 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4087 __ leap(result, Operand(base, ToInteger32(offset)));
4088 } else {
4089 Register offset = ToRegister(instr->offset());
4090 __ leap(result, Operand(base, offset, times_1, 0));
4091 }
4092 }
4093
4094
DoStoreNamedField(LStoreNamedField * instr)4095 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4096 HStoreNamedField* hinstr = instr->hydrogen();
4097 Representation representation = instr->representation();
4098
4099 HObjectAccess access = hinstr->access();
4100 int offset = access.offset();
4101
4102 if (access.IsExternalMemory()) {
4103 DCHECK(!hinstr->NeedsWriteBarrier());
4104 Register value = ToRegister(instr->value());
4105 if (instr->object()->IsConstantOperand()) {
4106 DCHECK(value.is(rax));
4107 LConstantOperand* object = LConstantOperand::cast(instr->object());
4108 __ store_rax(ToExternalReference(object));
4109 } else {
4110 Register object = ToRegister(instr->object());
4111 __ Store(MemOperand(object, offset), value, representation);
4112 }
4113 return;
4114 }
4115
4116 Register object = ToRegister(instr->object());
4117 __ AssertNotSmi(object);
4118
4119 DCHECK(!representation.IsSmi() ||
4120 !instr->value()->IsConstantOperand() ||
4121 IsInteger32Constant(LConstantOperand::cast(instr->value())));
4122 if (representation.IsDouble()) {
4123 DCHECK(access.IsInobject());
4124 DCHECK(!hinstr->has_transition());
4125 DCHECK(!hinstr->NeedsWriteBarrier());
4126 XMMRegister value = ToDoubleRegister(instr->value());
4127 __ movsd(FieldOperand(object, offset), value);
4128 return;
4129 }
4130
4131 if (hinstr->has_transition()) {
4132 Handle<Map> transition = hinstr->transition_map();
4133 AddDeprecationDependency(transition);
4134 if (!hinstr->NeedsWriteBarrierForMap()) {
4135 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
4136 } else {
4137 Register temp = ToRegister(instr->temp());
4138 __ Move(kScratchRegister, transition);
4139 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
4140 // Update the write barrier for the map field.
4141 __ RecordWriteForMap(object,
4142 kScratchRegister,
4143 temp,
4144 kSaveFPRegs);
4145 }
4146 }
4147
4148 // Do the store.
4149 Register write_register = object;
4150 if (!access.IsInobject()) {
4151 write_register = ToRegister(instr->temp());
4152 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4153 }
4154
4155 if (representation.IsSmi() && SmiValuesAre32Bits() &&
4156 hinstr->value()->representation().IsInteger32()) {
4157 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4158 if (FLAG_debug_code) {
4159 Register scratch = kScratchRegister;
4160 __ Load(scratch, FieldOperand(write_register, offset), representation);
4161 __ AssertSmi(scratch);
4162 }
4163 // Store int value directly to upper half of the smi.
4164 STATIC_ASSERT(kSmiTag == 0);
4165 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4166 offset += kPointerSize / 2;
4167 representation = Representation::Integer32();
4168 }
4169
4170 Operand operand = FieldOperand(write_register, offset);
4171
4172 if (instr->value()->IsRegister()) {
4173 Register value = ToRegister(instr->value());
4174 __ Store(operand, value, representation);
4175 } else {
4176 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4177 if (IsInteger32Constant(operand_value)) {
4178 DCHECK(!hinstr->NeedsWriteBarrier());
4179 int32_t value = ToInteger32(operand_value);
4180 if (representation.IsSmi()) {
4181 __ Move(operand, Smi::FromInt(value));
4182
4183 } else {
4184 __ movl(operand, Immediate(value));
4185 }
4186
4187 } else {
4188 Handle<Object> handle_value = ToHandle(operand_value);
4189 DCHECK(!hinstr->NeedsWriteBarrier());
4190 __ Move(operand, handle_value);
4191 }
4192 }
4193
4194 if (hinstr->NeedsWriteBarrier()) {
4195 Register value = ToRegister(instr->value());
4196 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4197 // Update the write barrier for the object for in-object properties.
4198 __ RecordWriteField(write_register,
4199 offset,
4200 value,
4201 temp,
4202 kSaveFPRegs,
4203 EMIT_REMEMBERED_SET,
4204 hinstr->SmiCheckForWriteBarrier(),
4205 hinstr->PointersToHereCheckForValue());
4206 }
4207 }
4208
4209
DoStoreNamedGeneric(LStoreNamedGeneric * instr)4210 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4211 DCHECK(ToRegister(instr->context()).is(rsi));
4212 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4213 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4214
4215 __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
4216 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4217 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4218 }
4219
4220
DoBoundsCheck(LBoundsCheck * instr)4221 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4222 Representation representation = instr->hydrogen()->length()->representation();
4223 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4224 DCHECK(representation.IsSmiOrInteger32());
4225
4226 Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
4227 if (instr->length()->IsConstantOperand()) {
4228 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4229 Register index = ToRegister(instr->index());
4230 if (representation.IsSmi()) {
4231 __ Cmp(index, Smi::FromInt(length));
4232 } else {
4233 __ cmpl(index, Immediate(length));
4234 }
4235 cc = CommuteCondition(cc);
4236 } else if (instr->index()->IsConstantOperand()) {
4237 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4238 if (instr->length()->IsRegister()) {
4239 Register length = ToRegister(instr->length());
4240 if (representation.IsSmi()) {
4241 __ Cmp(length, Smi::FromInt(index));
4242 } else {
4243 __ cmpl(length, Immediate(index));
4244 }
4245 } else {
4246 Operand length = ToOperand(instr->length());
4247 if (representation.IsSmi()) {
4248 __ Cmp(length, Smi::FromInt(index));
4249 } else {
4250 __ cmpl(length, Immediate(index));
4251 }
4252 }
4253 } else {
4254 Register index = ToRegister(instr->index());
4255 if (instr->length()->IsRegister()) {
4256 Register length = ToRegister(instr->length());
4257 if (representation.IsSmi()) {
4258 __ cmpp(length, index);
4259 } else {
4260 __ cmpl(length, index);
4261 }
4262 } else {
4263 Operand length = ToOperand(instr->length());
4264 if (representation.IsSmi()) {
4265 __ cmpp(length, index);
4266 } else {
4267 __ cmpl(length, index);
4268 }
4269 }
4270 }
4271 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4272 Label done;
4273 __ j(NegateCondition(cc), &done, Label::kNear);
4274 __ int3();
4275 __ bind(&done);
4276 } else {
4277 DeoptimizeIf(cc, instr, "out of bounds");
4278 }
4279 }
4280
4281
DoStoreKeyedExternalArray(LStoreKeyed * instr)4282 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4283 ElementsKind elements_kind = instr->elements_kind();
4284 LOperand* key = instr->key();
4285 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
4286 Register key_reg = ToRegister(key);
4287 Representation key_representation =
4288 instr->hydrogen()->key()->representation();
4289 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
4290 __ SmiToInteger64(key_reg, key_reg);
4291 } else if (instr->hydrogen()->IsDehoisted()) {
4292 // Sign extend key because it could be a 32 bit negative value
4293 // and the dehoisted address computation happens in 64 bits
4294 __ movsxlq(key_reg, key_reg);
4295 }
4296 }
4297 Operand operand(BuildFastArrayOperand(
4298 instr->elements(),
4299 key,
4300 instr->hydrogen()->key()->representation(),
4301 elements_kind,
4302 instr->base_offset()));
4303
4304 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4305 elements_kind == FLOAT32_ELEMENTS) {
4306 XMMRegister value(ToDoubleRegister(instr->value()));
4307 __ cvtsd2ss(value, value);
4308 __ movss(operand, value);
4309 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4310 elements_kind == FLOAT64_ELEMENTS) {
4311 __ movsd(operand, ToDoubleRegister(instr->value()));
4312 } else {
4313 Register value(ToRegister(instr->value()));
4314 switch (elements_kind) {
4315 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4316 case EXTERNAL_INT8_ELEMENTS:
4317 case EXTERNAL_UINT8_ELEMENTS:
4318 case INT8_ELEMENTS:
4319 case UINT8_ELEMENTS:
4320 case UINT8_CLAMPED_ELEMENTS:
4321 __ movb(operand, value);
4322 break;
4323 case EXTERNAL_INT16_ELEMENTS:
4324 case EXTERNAL_UINT16_ELEMENTS:
4325 case INT16_ELEMENTS:
4326 case UINT16_ELEMENTS:
4327 __ movw(operand, value);
4328 break;
4329 case EXTERNAL_INT32_ELEMENTS:
4330 case EXTERNAL_UINT32_ELEMENTS:
4331 case INT32_ELEMENTS:
4332 case UINT32_ELEMENTS:
4333 __ movl(operand, value);
4334 break;
4335 case EXTERNAL_FLOAT32_ELEMENTS:
4336 case EXTERNAL_FLOAT64_ELEMENTS:
4337 case FLOAT32_ELEMENTS:
4338 case FLOAT64_ELEMENTS:
4339 case FAST_ELEMENTS:
4340 case FAST_SMI_ELEMENTS:
4341 case FAST_DOUBLE_ELEMENTS:
4342 case FAST_HOLEY_ELEMENTS:
4343 case FAST_HOLEY_SMI_ELEMENTS:
4344 case FAST_HOLEY_DOUBLE_ELEMENTS:
4345 case DICTIONARY_ELEMENTS:
4346 case SLOPPY_ARGUMENTS_ELEMENTS:
4347 UNREACHABLE();
4348 break;
4349 }
4350 }
4351 }
4352
4353
DoStoreKeyedFixedDoubleArray(LStoreKeyed * instr)4354 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4355 XMMRegister value = ToDoubleRegister(instr->value());
4356 LOperand* key = instr->key();
4357 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4358 instr->hydrogen()->IsDehoisted()) {
4359 // Sign extend key because it could be a 32 bit negative value
4360 // and the dehoisted address computation happens in 64 bits
4361 __ movsxlq(ToRegister(key), ToRegister(key));
4362 }
4363 if (instr->NeedsCanonicalization()) {
4364 Label have_value;
4365
4366 __ ucomisd(value, value);
4367 __ j(parity_odd, &have_value, Label::kNear); // NaN.
4368
4369 __ Set(kScratchRegister,
4370 bit_cast<uint64_t>(
4371 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
4372 __ movq(value, kScratchRegister);
4373
4374 __ bind(&have_value);
4375 }
4376
4377 Operand double_store_operand = BuildFastArrayOperand(
4378 instr->elements(),
4379 key,
4380 instr->hydrogen()->key()->representation(),
4381 FAST_DOUBLE_ELEMENTS,
4382 instr->base_offset());
4383
4384 __ movsd(double_store_operand, value);
4385 }
4386
4387
DoStoreKeyedFixedArray(LStoreKeyed * instr)4388 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4389 HStoreKeyed* hinstr = instr->hydrogen();
4390 LOperand* key = instr->key();
4391 int offset = instr->base_offset();
4392 Representation representation = hinstr->value()->representation();
4393
4394 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4395 instr->hydrogen()->IsDehoisted()) {
4396 // Sign extend key because it could be a 32 bit negative value
4397 // and the dehoisted address computation happens in 64 bits
4398 __ movsxlq(ToRegister(key), ToRegister(key));
4399 }
4400 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4401 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4402 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4403 if (FLAG_debug_code) {
4404 Register scratch = kScratchRegister;
4405 __ Load(scratch,
4406 BuildFastArrayOperand(instr->elements(),
4407 key,
4408 instr->hydrogen()->key()->representation(),
4409 FAST_ELEMENTS,
4410 offset),
4411 Representation::Smi());
4412 __ AssertSmi(scratch);
4413 }
4414 // Store int value directly to upper half of the smi.
4415 STATIC_ASSERT(kSmiTag == 0);
4416 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4417 offset += kPointerSize / 2;
4418 }
4419
4420 Operand operand =
4421 BuildFastArrayOperand(instr->elements(),
4422 key,
4423 instr->hydrogen()->key()->representation(),
4424 FAST_ELEMENTS,
4425 offset);
4426 if (instr->value()->IsRegister()) {
4427 __ Store(operand, ToRegister(instr->value()), representation);
4428 } else {
4429 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4430 if (IsInteger32Constant(operand_value)) {
4431 int32_t value = ToInteger32(operand_value);
4432 if (representation.IsSmi()) {
4433 __ Move(operand, Smi::FromInt(value));
4434
4435 } else {
4436 __ movl(operand, Immediate(value));
4437 }
4438 } else {
4439 Handle<Object> handle_value = ToHandle(operand_value);
4440 __ Move(operand, handle_value);
4441 }
4442 }
4443
4444 if (hinstr->NeedsWriteBarrier()) {
4445 Register elements = ToRegister(instr->elements());
4446 DCHECK(instr->value()->IsRegister());
4447 Register value = ToRegister(instr->value());
4448 DCHECK(!key->IsConstantOperand());
4449 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4450 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4451 // Compute address of modified element and store it into key register.
4452 Register key_reg(ToRegister(key));
4453 __ leap(key_reg, operand);
4454 __ RecordWrite(elements,
4455 key_reg,
4456 value,
4457 kSaveFPRegs,
4458 EMIT_REMEMBERED_SET,
4459 check_needed,
4460 hinstr->PointersToHereCheckForValue());
4461 }
4462 }
4463
4464
DoStoreKeyed(LStoreKeyed * instr)4465 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4466 if (instr->is_typed_elements()) {
4467 DoStoreKeyedExternalArray(instr);
4468 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4469 DoStoreKeyedFixedDoubleArray(instr);
4470 } else {
4471 DoStoreKeyedFixedArray(instr);
4472 }
4473 }
4474
4475
DoStoreKeyedGeneric(LStoreKeyedGeneric * instr)4476 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4477 DCHECK(ToRegister(instr->context()).is(rsi));
4478 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4479 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4480 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4481
4482 Handle<Code> ic =
4483 CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4484 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4485 }
4486
4487
DoTransitionElementsKind(LTransitionElementsKind * instr)4488 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4489 Register object_reg = ToRegister(instr->object());
4490
4491 Handle<Map> from_map = instr->original_map();
4492 Handle<Map> to_map = instr->transitioned_map();
4493 ElementsKind from_kind = instr->from_kind();
4494 ElementsKind to_kind = instr->to_kind();
4495
4496 Label not_applicable;
4497 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4498 __ j(not_equal, ¬_applicable);
4499 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4500 Register new_map_reg = ToRegister(instr->new_map_temp());
4501 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
4502 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
4503 // Write barrier.
4504 __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
4505 kDontSaveFPRegs);
4506 } else {
4507 DCHECK(object_reg.is(rax));
4508 DCHECK(ToRegister(instr->context()).is(rsi));
4509 PushSafepointRegistersScope scope(this);
4510 __ Move(rbx, to_map);
4511 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4512 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4513 __ CallStub(&stub);
4514 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4515 }
4516 __ bind(¬_applicable);
4517 }
4518
4519
DoTrapAllocationMemento(LTrapAllocationMemento * instr)4520 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4521 Register object = ToRegister(instr->object());
4522 Register temp = ToRegister(instr->temp());
4523 Label no_memento_found;
4524 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4525 DeoptimizeIf(equal, instr, "memento found");
4526 __ bind(&no_memento_found);
4527 }
4528
4529
DoStringAdd(LStringAdd * instr)4530 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4531 DCHECK(ToRegister(instr->context()).is(rsi));
4532 DCHECK(ToRegister(instr->left()).is(rdx));
4533 DCHECK(ToRegister(instr->right()).is(rax));
4534 StringAddStub stub(isolate(),
4535 instr->hydrogen()->flags(),
4536 instr->hydrogen()->pretenure_flag());
4537 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4538 }
4539
4540
DoStringCharCodeAt(LStringCharCodeAt * instr)4541 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4542 class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4543 public:
4544 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4545 : LDeferredCode(codegen), instr_(instr) { }
4546 virtual void Generate() OVERRIDE {
4547 codegen()->DoDeferredStringCharCodeAt(instr_);
4548 }
4549 virtual LInstruction* instr() OVERRIDE { return instr_; }
4550 private:
4551 LStringCharCodeAt* instr_;
4552 };
4553
4554 DeferredStringCharCodeAt* deferred =
4555 new(zone()) DeferredStringCharCodeAt(this, instr);
4556
4557 StringCharLoadGenerator::Generate(masm(),
4558 ToRegister(instr->string()),
4559 ToRegister(instr->index()),
4560 ToRegister(instr->result()),
4561 deferred->entry());
4562 __ bind(deferred->exit());
4563 }
4564
4565
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)4566 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4567 Register string = ToRegister(instr->string());
4568 Register result = ToRegister(instr->result());
4569
4570 // TODO(3095996): Get rid of this. For now, we need to make the
4571 // result register contain a valid pointer because it is already
4572 // contained in the register pointer map.
4573 __ Set(result, 0);
4574
4575 PushSafepointRegistersScope scope(this);
4576 __ Push(string);
4577 // Push the index as a smi. This is safe because of the checks in
4578 // DoStringCharCodeAt above.
4579 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4580 if (instr->index()->IsConstantOperand()) {
4581 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4582 __ Push(Smi::FromInt(const_index));
4583 } else {
4584 Register index = ToRegister(instr->index());
4585 __ Integer32ToSmi(index, index);
4586 __ Push(index);
4587 }
4588 CallRuntimeFromDeferred(
4589 Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
4590 __ AssertSmi(rax);
4591 __ SmiToInteger32(rax, rax);
4592 __ StoreToSafepointRegisterSlot(result, rax);
4593 }
4594
4595
DoStringCharFromCode(LStringCharFromCode * instr)4596 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4597 class DeferredStringCharFromCode FINAL : public LDeferredCode {
4598 public:
4599 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4600 : LDeferredCode(codegen), instr_(instr) { }
4601 virtual void Generate() OVERRIDE {
4602 codegen()->DoDeferredStringCharFromCode(instr_);
4603 }
4604 virtual LInstruction* instr() OVERRIDE { return instr_; }
4605 private:
4606 LStringCharFromCode* instr_;
4607 };
4608
4609 DeferredStringCharFromCode* deferred =
4610 new(zone()) DeferredStringCharFromCode(this, instr);
4611
4612 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4613 Register char_code = ToRegister(instr->char_code());
4614 Register result = ToRegister(instr->result());
4615 DCHECK(!char_code.is(result));
4616
4617 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
4618 __ j(above, deferred->entry());
4619 __ movsxlq(char_code, char_code);
4620 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4621 __ movp(result, FieldOperand(result,
4622 char_code, times_pointer_size,
4623 FixedArray::kHeaderSize));
4624 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4625 __ j(equal, deferred->entry());
4626 __ bind(deferred->exit());
4627 }
4628
4629
DoDeferredStringCharFromCode(LStringCharFromCode * instr)4630 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4631 Register char_code = ToRegister(instr->char_code());
4632 Register result = ToRegister(instr->result());
4633
4634 // TODO(3095996): Get rid of this. For now, we need to make the
4635 // result register contain a valid pointer because it is already
4636 // contained in the register pointer map.
4637 __ Set(result, 0);
4638
4639 PushSafepointRegistersScope scope(this);
4640 __ Integer32ToSmi(char_code, char_code);
4641 __ Push(char_code);
4642 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4643 __ StoreToSafepointRegisterSlot(result, rax);
4644 }
4645
4646
DoInteger32ToDouble(LInteger32ToDouble * instr)4647 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4648 LOperand* input = instr->value();
4649 DCHECK(input->IsRegister() || input->IsStackSlot());
4650 LOperand* output = instr->result();
4651 DCHECK(output->IsDoubleRegister());
4652 if (input->IsRegister()) {
4653 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
4654 } else {
4655 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
4656 }
4657 }
4658
4659
DoUint32ToDouble(LUint32ToDouble * instr)4660 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4661 LOperand* input = instr->value();
4662 LOperand* output = instr->result();
4663
4664 __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
4665 }
4666
4667
DoNumberTagI(LNumberTagI * instr)4668 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4669 class DeferredNumberTagI FINAL : public LDeferredCode {
4670 public:
4671 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4672 : LDeferredCode(codegen), instr_(instr) { }
4673 virtual void Generate() OVERRIDE {
4674 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4675 instr_->temp2(), SIGNED_INT32);
4676 }
4677 virtual LInstruction* instr() OVERRIDE { return instr_; }
4678 private:
4679 LNumberTagI* instr_;
4680 };
4681
4682 LOperand* input = instr->value();
4683 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4684 Register reg = ToRegister(input);
4685
4686 if (SmiValuesAre32Bits()) {
4687 __ Integer32ToSmi(reg, reg);
4688 } else {
4689 DCHECK(SmiValuesAre31Bits());
4690 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4691 __ Integer32ToSmi(reg, reg);
4692 __ j(overflow, deferred->entry());
4693 __ bind(deferred->exit());
4694 }
4695 }
4696
4697
DoNumberTagU(LNumberTagU * instr)4698 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4699 class DeferredNumberTagU FINAL : public LDeferredCode {
4700 public:
4701 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4702 : LDeferredCode(codegen), instr_(instr) { }
4703 virtual void Generate() OVERRIDE {
4704 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4705 instr_->temp2(), UNSIGNED_INT32);
4706 }
4707 virtual LInstruction* instr() OVERRIDE { return instr_; }
4708 private:
4709 LNumberTagU* instr_;
4710 };
4711
4712 LOperand* input = instr->value();
4713 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4714 Register reg = ToRegister(input);
4715
4716 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4717 __ cmpl(reg, Immediate(Smi::kMaxValue));
4718 __ j(above, deferred->entry());
4719 __ Integer32ToSmi(reg, reg);
4720 __ bind(deferred->exit());
4721 }
4722
4723
DoDeferredNumberTagIU(LInstruction * instr,LOperand * value,LOperand * temp1,LOperand * temp2,IntegerSignedness signedness)4724 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4725 LOperand* value,
4726 LOperand* temp1,
4727 LOperand* temp2,
4728 IntegerSignedness signedness) {
4729 Label done, slow;
4730 Register reg = ToRegister(value);
4731 Register tmp = ToRegister(temp1);
4732 XMMRegister temp_xmm = ToDoubleRegister(temp2);
4733
4734 // Load value into temp_xmm which will be preserved across potential call to
4735 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
4736 // XMM registers on x64).
4737 if (signedness == SIGNED_INT32) {
4738 DCHECK(SmiValuesAre31Bits());
4739 // There was overflow, so bits 30 and 31 of the original integer
4740 // disagree. Try to allocate a heap number in new space and store
4741 // the value in there. If that fails, call the runtime system.
4742 __ SmiToInteger32(reg, reg);
4743 __ xorl(reg, Immediate(0x80000000));
4744 __ cvtlsi2sd(temp_xmm, reg);
4745 } else {
4746 DCHECK(signedness == UNSIGNED_INT32);
4747 __ LoadUint32(temp_xmm, reg);
4748 }
4749
4750 if (FLAG_inline_new) {
4751 __ AllocateHeapNumber(reg, tmp, &slow);
4752 __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
4753 }
4754
4755 // Slow case: Call the runtime system to do the number allocation.
4756 __ bind(&slow);
4757 {
4758 // Put a valid pointer value in the stack slot where the result
4759 // register is stored, as this register is in the pointer map, but contains
4760 // an integer value.
4761 __ Set(reg, 0);
4762
4763 // Preserve the value of all registers.
4764 PushSafepointRegistersScope scope(this);
4765
4766 // NumberTagIU uses the context from the frame, rather than
4767 // the environment's HContext or HInlinedContext value.
4768 // They only call Runtime::kAllocateHeapNumber.
4769 // The corresponding HChange instructions are added in a phase that does
4770 // not have easy access to the local context.
4771 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4772 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4773 RecordSafepointWithRegisters(
4774 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4775 __ StoreToSafepointRegisterSlot(reg, rax);
4776 }
4777
4778 // Done. Put the value in temp_xmm into the value of the allocated heap
4779 // number.
4780 __ bind(&done);
4781 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
4782 }
4783
4784
DoNumberTagD(LNumberTagD * instr)4785 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4786 class DeferredNumberTagD FINAL : public LDeferredCode {
4787 public:
4788 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4789 : LDeferredCode(codegen), instr_(instr) { }
4790 virtual void Generate() OVERRIDE {
4791 codegen()->DoDeferredNumberTagD(instr_);
4792 }
4793 virtual LInstruction* instr() OVERRIDE { return instr_; }
4794 private:
4795 LNumberTagD* instr_;
4796 };
4797
4798 XMMRegister input_reg = ToDoubleRegister(instr->value());
4799 Register reg = ToRegister(instr->result());
4800 Register tmp = ToRegister(instr->temp());
4801
4802 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4803 if (FLAG_inline_new) {
4804 __ AllocateHeapNumber(reg, tmp, deferred->entry());
4805 } else {
4806 __ jmp(deferred->entry());
4807 }
4808 __ bind(deferred->exit());
4809 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4810 }
4811
4812
DoDeferredNumberTagD(LNumberTagD * instr)4813 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4814 // TODO(3095996): Get rid of this. For now, we need to make the
4815 // result register contain a valid pointer because it is already
4816 // contained in the register pointer map.
4817 Register reg = ToRegister(instr->result());
4818 __ Move(reg, Smi::FromInt(0));
4819
4820 {
4821 PushSafepointRegistersScope scope(this);
4822 // NumberTagD uses the context from the frame, rather than
4823 // the environment's HContext or HInlinedContext value.
4824 // They only call Runtime::kAllocateHeapNumber.
4825 // The corresponding HChange instructions are added in a phase that does
4826 // not have easy access to the local context.
4827 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4828 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4829 RecordSafepointWithRegisters(
4830 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4831 __ movp(kScratchRegister, rax);
4832 }
4833 __ movp(reg, kScratchRegister);
4834 }
4835
4836
DoSmiTag(LSmiTag * instr)4837 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4838 HChange* hchange = instr->hydrogen();
4839 Register input = ToRegister(instr->value());
4840 Register output = ToRegister(instr->result());
4841 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4842 hchange->value()->CheckFlag(HValue::kUint32)) {
4843 Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
4844 DeoptimizeIf(NegateCondition(is_smi), instr, "overflow");
4845 }
4846 __ Integer32ToSmi(output, input);
4847 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4848 !hchange->value()->CheckFlag(HValue::kUint32)) {
4849 DeoptimizeIf(overflow, instr, "overflow");
4850 }
4851 }
4852
4853
DoSmiUntag(LSmiUntag * instr)4854 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4855 DCHECK(instr->value()->Equals(instr->result()));
4856 Register input = ToRegister(instr->value());
4857 if (instr->needs_check()) {
4858 Condition is_smi = __ CheckSmi(input);
4859 DeoptimizeIf(NegateCondition(is_smi), instr, "not a Smi");
4860 } else {
4861 __ AssertSmi(input);
4862 }
4863 __ SmiToInteger32(input, input);
4864 }
4865
4866
EmitNumberUntagD(LNumberUntagD * instr,Register input_reg,XMMRegister result_reg,NumberUntagDMode mode)4867 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4868 XMMRegister result_reg, NumberUntagDMode mode) {
4869 bool can_convert_undefined_to_nan =
4870 instr->hydrogen()->can_convert_undefined_to_nan();
4871 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4872
4873 Label convert, load_smi, done;
4874
4875 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4876 // Smi check.
4877 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4878
4879 // Heap number map check.
4880 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4881 Heap::kHeapNumberMapRootIndex);
4882
4883 // On x64 it is safe to load at heap number offset before evaluating the map
4884 // check, since all heap objects are at least two words long.
4885 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4886
4887 if (can_convert_undefined_to_nan) {
4888 __ j(not_equal, &convert, Label::kNear);
4889 } else {
4890 DeoptimizeIf(not_equal, instr, "not a heap number");
4891 }
4892
4893 if (deoptimize_on_minus_zero) {
4894 XMMRegister xmm_scratch = double_scratch0();
4895 __ xorps(xmm_scratch, xmm_scratch);
4896 __ ucomisd(xmm_scratch, result_reg);
4897 __ j(not_equal, &done, Label::kNear);
4898 __ movmskpd(kScratchRegister, result_reg);
4899 __ testq(kScratchRegister, Immediate(1));
4900 DeoptimizeIf(not_zero, instr, "minus zero");
4901 }
4902 __ jmp(&done, Label::kNear);
4903
4904 if (can_convert_undefined_to_nan) {
4905 __ bind(&convert);
4906
4907 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
4908 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4909 DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
4910
4911 __ xorps(result_reg, result_reg);
4912 __ divsd(result_reg, result_reg);
4913 __ jmp(&done, Label::kNear);
4914 }
4915 } else {
4916 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4917 }
4918
4919 // Smi to XMM conversion
4920 __ bind(&load_smi);
4921 __ SmiToInteger32(kScratchRegister, input_reg);
4922 __ Cvtlsi2sd(result_reg, kScratchRegister);
4923 __ bind(&done);
4924 }
4925
4926
DoDeferredTaggedToI(LTaggedToI * instr,Label * done)4927 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4928 Register input_reg = ToRegister(instr->value());
4929
4930 if (instr->truncating()) {
4931 Label no_heap_number, check_bools, check_false;
4932
4933 // Heap number map check.
4934 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4935 Heap::kHeapNumberMapRootIndex);
4936 __ j(not_equal, &no_heap_number, Label::kNear);
4937 __ TruncateHeapNumberToI(input_reg, input_reg);
4938 __ jmp(done);
4939
4940 __ bind(&no_heap_number);
4941 // Check for Oddballs. Undefined/False is converted to zero and True to one
4942 // for truncating conversions.
4943 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4944 __ j(not_equal, &check_bools, Label::kNear);
4945 __ Set(input_reg, 0);
4946 __ jmp(done);
4947
4948 __ bind(&check_bools);
4949 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
4950 __ j(not_equal, &check_false, Label::kNear);
4951 __ Set(input_reg, 1);
4952 __ jmp(done);
4953
4954 __ bind(&check_false);
4955 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
4956 DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
4957 __ Set(input_reg, 0);
4958 } else {
4959 XMMRegister scratch = ToDoubleRegister(instr->temp());
4960 DCHECK(!scratch.is(xmm0));
4961 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4962 Heap::kHeapNumberMapRootIndex);
4963 DeoptimizeIf(not_equal, instr, "not a heap number");
4964 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4965 __ cvttsd2si(input_reg, xmm0);
4966 __ Cvtlsi2sd(scratch, input_reg);
4967 __ ucomisd(xmm0, scratch);
4968 DeoptimizeIf(not_equal, instr, "lost precision");
4969 DeoptimizeIf(parity_even, instr, "NaN");
4970 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
4971 __ testl(input_reg, input_reg);
4972 __ j(not_zero, done);
4973 __ movmskpd(input_reg, xmm0);
4974 __ andl(input_reg, Immediate(1));
4975 DeoptimizeIf(not_zero, instr, "minus zero");
4976 }
4977 }
4978 }
4979
4980
DoTaggedToI(LTaggedToI * instr)4981 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4982 class DeferredTaggedToI FINAL : public LDeferredCode {
4983 public:
4984 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4985 : LDeferredCode(codegen), instr_(instr) { }
4986 virtual void Generate() OVERRIDE {
4987 codegen()->DoDeferredTaggedToI(instr_, done());
4988 }
4989 virtual LInstruction* instr() OVERRIDE { return instr_; }
4990 private:
4991 LTaggedToI* instr_;
4992 };
4993
4994 LOperand* input = instr->value();
4995 DCHECK(input->IsRegister());
4996 DCHECK(input->Equals(instr->result()));
4997 Register input_reg = ToRegister(input);
4998
4999 if (instr->hydrogen()->value()->representation().IsSmi()) {
5000 __ SmiToInteger32(input_reg, input_reg);
5001 } else {
5002 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5003 __ JumpIfNotSmi(input_reg, deferred->entry());
5004 __ SmiToInteger32(input_reg, input_reg);
5005 __ bind(deferred->exit());
5006 }
5007 }
5008
5009
DoNumberUntagD(LNumberUntagD * instr)5010 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5011 LOperand* input = instr->value();
5012 DCHECK(input->IsRegister());
5013 LOperand* result = instr->result();
5014 DCHECK(result->IsDoubleRegister());
5015
5016 Register input_reg = ToRegister(input);
5017 XMMRegister result_reg = ToDoubleRegister(result);
5018
5019 HValue* value = instr->hydrogen()->value();
5020 NumberUntagDMode mode = value->representation().IsSmi()
5021 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5022
5023 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5024 }
5025
5026
DoDoubleToI(LDoubleToI * instr)5027 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5028 LOperand* input = instr->value();
5029 DCHECK(input->IsDoubleRegister());
5030 LOperand* result = instr->result();
5031 DCHECK(result->IsRegister());
5032
5033 XMMRegister input_reg = ToDoubleRegister(input);
5034 Register result_reg = ToRegister(result);
5035
5036 if (instr->truncating()) {
5037 __ TruncateDoubleToI(result_reg, input_reg);
5038 } else {
5039 Label lost_precision, is_nan, minus_zero, done;
5040 XMMRegister xmm_scratch = double_scratch0();
5041 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5042 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5043 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
5044 &is_nan, &minus_zero, dist);
5045 __ jmp(&done, dist);
5046 __ bind(&lost_precision);
5047 DeoptimizeIf(no_condition, instr, "lost precision");
5048 __ bind(&is_nan);
5049 DeoptimizeIf(no_condition, instr, "NaN");
5050 __ bind(&minus_zero);
5051 DeoptimizeIf(no_condition, instr, "minus zero");
5052 __ bind(&done);
5053 }
5054 }
5055
5056
DoDoubleToSmi(LDoubleToSmi * instr)5057 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5058 LOperand* input = instr->value();
5059 DCHECK(input->IsDoubleRegister());
5060 LOperand* result = instr->result();
5061 DCHECK(result->IsRegister());
5062
5063 XMMRegister input_reg = ToDoubleRegister(input);
5064 Register result_reg = ToRegister(result);
5065
5066 Label lost_precision, is_nan, minus_zero, done;
5067 XMMRegister xmm_scratch = double_scratch0();
5068 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5069 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5070 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
5071 &minus_zero, dist);
5072 __ jmp(&done, dist);
5073 __ bind(&lost_precision);
5074 DeoptimizeIf(no_condition, instr, "lost precision");
5075 __ bind(&is_nan);
5076 DeoptimizeIf(no_condition, instr, "NaN");
5077 __ bind(&minus_zero);
5078 DeoptimizeIf(no_condition, instr, "minus zero");
5079 __ bind(&done);
5080 __ Integer32ToSmi(result_reg, result_reg);
5081 DeoptimizeIf(overflow, instr, "overflow");
5082 }
5083
5084
DoCheckSmi(LCheckSmi * instr)5085 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5086 LOperand* input = instr->value();
5087 Condition cc = masm()->CheckSmi(ToRegister(input));
5088 DeoptimizeIf(NegateCondition(cc), instr, "not a Smi");
5089 }
5090
5091
DoCheckNonSmi(LCheckNonSmi * instr)5092 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5093 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5094 LOperand* input = instr->value();
5095 Condition cc = masm()->CheckSmi(ToRegister(input));
5096 DeoptimizeIf(cc, instr, "Smi");
5097 }
5098 }
5099
5100
DoCheckInstanceType(LCheckInstanceType * instr)5101 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5102 Register input = ToRegister(instr->value());
5103
5104 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
5105
5106 if (instr->hydrogen()->is_interval_check()) {
5107 InstanceType first;
5108 InstanceType last;
5109 instr->hydrogen()->GetCheckInterval(&first, &last);
5110
5111 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5112 Immediate(static_cast<int8_t>(first)));
5113
5114 // If there is only one type in the interval check for equality.
5115 if (first == last) {
5116 DeoptimizeIf(not_equal, instr, "wrong instance type");
5117 } else {
5118 DeoptimizeIf(below, instr, "wrong instance type");
5119 // Omit check for the last type.
5120 if (last != LAST_TYPE) {
5121 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5122 Immediate(static_cast<int8_t>(last)));
5123 DeoptimizeIf(above, instr, "wrong instance type");
5124 }
5125 }
5126 } else {
5127 uint8_t mask;
5128 uint8_t tag;
5129 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5130
5131 if (base::bits::IsPowerOfTwo32(mask)) {
5132 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5133 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5134 Immediate(mask));
5135 DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
5136 } else {
5137 __ movzxbl(kScratchRegister,
5138 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
5139 __ andb(kScratchRegister, Immediate(mask));
5140 __ cmpb(kScratchRegister, Immediate(tag));
5141 DeoptimizeIf(not_equal, instr, "wrong instance type");
5142 }
5143 }
5144 }
5145
5146
DoCheckValue(LCheckValue * instr)5147 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5148 Register reg = ToRegister(instr->value());
5149 __ Cmp(reg, instr->hydrogen()->object().handle());
5150 DeoptimizeIf(not_equal, instr, "value mismatch");
5151 }
5152
5153
DoDeferredInstanceMigration(LCheckMaps * instr,Register object)5154 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5155 {
5156 PushSafepointRegistersScope scope(this);
5157 __ Push(object);
5158 __ Set(rsi, 0);
5159 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5160 RecordSafepointWithRegisters(
5161 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5162
5163 __ testp(rax, Immediate(kSmiTagMask));
5164 }
5165 DeoptimizeIf(zero, instr, "instance migration failed");
5166 }
5167
5168
DoCheckMaps(LCheckMaps * instr)5169 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5170 class DeferredCheckMaps FINAL : public LDeferredCode {
5171 public:
5172 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5173 : LDeferredCode(codegen), instr_(instr), object_(object) {
5174 SetExit(check_maps());
5175 }
5176 virtual void Generate() OVERRIDE {
5177 codegen()->DoDeferredInstanceMigration(instr_, object_);
5178 }
5179 Label* check_maps() { return &check_maps_; }
5180 virtual LInstruction* instr() OVERRIDE { return instr_; }
5181 private:
5182 LCheckMaps* instr_;
5183 Label check_maps_;
5184 Register object_;
5185 };
5186
5187 if (instr->hydrogen()->IsStabilityCheck()) {
5188 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5189 for (int i = 0; i < maps->size(); ++i) {
5190 AddStabilityDependency(maps->at(i).handle());
5191 }
5192 return;
5193 }
5194
5195 LOperand* input = instr->value();
5196 DCHECK(input->IsRegister());
5197 Register reg = ToRegister(input);
5198
5199 DeferredCheckMaps* deferred = NULL;
5200 if (instr->hydrogen()->HasMigrationTarget()) {
5201 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5202 __ bind(deferred->check_maps());
5203 }
5204
5205 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5206 Label success;
5207 for (int i = 0; i < maps->size() - 1; i++) {
5208 Handle<Map> map = maps->at(i).handle();
5209 __ CompareMap(reg, map);
5210 __ j(equal, &success, Label::kNear);
5211 }
5212
5213 Handle<Map> map = maps->at(maps->size() - 1).handle();
5214 __ CompareMap(reg, map);
5215 if (instr->hydrogen()->HasMigrationTarget()) {
5216 __ j(not_equal, deferred->entry());
5217 } else {
5218 DeoptimizeIf(not_equal, instr, "wrong map");
5219 }
5220
5221 __ bind(&success);
5222 }
5223
5224
DoClampDToUint8(LClampDToUint8 * instr)5225 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5226 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5227 XMMRegister xmm_scratch = double_scratch0();
5228 Register result_reg = ToRegister(instr->result());
5229 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5230 }
5231
5232
DoClampIToUint8(LClampIToUint8 * instr)5233 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5234 DCHECK(instr->unclamped()->Equals(instr->result()));
5235 Register value_reg = ToRegister(instr->result());
5236 __ ClampUint8(value_reg);
5237 }
5238
5239
DoClampTToUint8(LClampTToUint8 * instr)5240 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5241 DCHECK(instr->unclamped()->Equals(instr->result()));
5242 Register input_reg = ToRegister(instr->unclamped());
5243 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5244 XMMRegister xmm_scratch = double_scratch0();
5245 Label is_smi, done, heap_number;
5246 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5247 __ JumpIfSmi(input_reg, &is_smi, dist);
5248
5249 // Check for heap number
5250 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5251 factory()->heap_number_map());
5252 __ j(equal, &heap_number, Label::kNear);
5253
5254 // Check for undefined. Undefined is converted to zero for clamping
5255 // conversions.
5256 __ Cmp(input_reg, factory()->undefined_value());
5257 DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
5258 __ xorl(input_reg, input_reg);
5259 __ jmp(&done, Label::kNear);
5260
5261 // Heap number
5262 __ bind(&heap_number);
5263 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5264 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5265 __ jmp(&done, Label::kNear);
5266
5267 // smi
5268 __ bind(&is_smi);
5269 __ SmiToInteger32(input_reg, input_reg);
5270 __ ClampUint8(input_reg);
5271
5272 __ bind(&done);
5273 }
5274
5275
DoDoubleBits(LDoubleBits * instr)5276 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5277 XMMRegister value_reg = ToDoubleRegister(instr->value());
5278 Register result_reg = ToRegister(instr->result());
5279 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5280 __ movq(result_reg, value_reg);
5281 __ shrq(result_reg, Immediate(32));
5282 } else {
5283 __ movd(result_reg, value_reg);
5284 }
5285 }
5286
5287
DoConstructDouble(LConstructDouble * instr)5288 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5289 Register hi_reg = ToRegister(instr->hi());
5290 Register lo_reg = ToRegister(instr->lo());
5291 XMMRegister result_reg = ToDoubleRegister(instr->result());
5292 XMMRegister xmm_scratch = double_scratch0();
5293 __ movd(result_reg, hi_reg);
5294 __ psllq(result_reg, 32);
5295 __ movd(xmm_scratch, lo_reg);
5296 __ orps(result_reg, xmm_scratch);
5297 }
5298
5299
DoAllocate(LAllocate * instr)5300 void LCodeGen::DoAllocate(LAllocate* instr) {
5301 class DeferredAllocate FINAL : public LDeferredCode {
5302 public:
5303 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5304 : LDeferredCode(codegen), instr_(instr) { }
5305 virtual void Generate() OVERRIDE {
5306 codegen()->DoDeferredAllocate(instr_);
5307 }
5308 virtual LInstruction* instr() OVERRIDE { return instr_; }
5309 private:
5310 LAllocate* instr_;
5311 };
5312
5313 DeferredAllocate* deferred =
5314 new(zone()) DeferredAllocate(this, instr);
5315
5316 Register result = ToRegister(instr->result());
5317 Register temp = ToRegister(instr->temp());
5318
5319 // Allocate memory for the object.
5320 AllocationFlags flags = TAG_OBJECT;
5321 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5322 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5323 }
5324 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5325 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5326 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5327 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5328 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5329 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5330 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5331 }
5332
5333 if (instr->size()->IsConstantOperand()) {
5334 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5335 if (size <= Page::kMaxRegularHeapObjectSize) {
5336 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5337 } else {
5338 __ jmp(deferred->entry());
5339 }
5340 } else {
5341 Register size = ToRegister(instr->size());
5342 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5343 }
5344
5345 __ bind(deferred->exit());
5346
5347 if (instr->hydrogen()->MustPrefillWithFiller()) {
5348 if (instr->size()->IsConstantOperand()) {
5349 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5350 __ movl(temp, Immediate((size / kPointerSize) - 1));
5351 } else {
5352 temp = ToRegister(instr->size());
5353 __ sarp(temp, Immediate(kPointerSizeLog2));
5354 __ decl(temp);
5355 }
5356 Label loop;
5357 __ bind(&loop);
5358 __ Move(FieldOperand(result, temp, times_pointer_size, 0),
5359 isolate()->factory()->one_pointer_filler_map());
5360 __ decl(temp);
5361 __ j(not_zero, &loop);
5362 }
5363 }
5364
5365
DoDeferredAllocate(LAllocate * instr)5366 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5367 Register result = ToRegister(instr->result());
5368
5369 // TODO(3095996): Get rid of this. For now, we need to make the
5370 // result register contain a valid pointer because it is already
5371 // contained in the register pointer map.
5372 __ Move(result, Smi::FromInt(0));
5373
5374 PushSafepointRegistersScope scope(this);
5375 if (instr->size()->IsRegister()) {
5376 Register size = ToRegister(instr->size());
5377 DCHECK(!size.is(result));
5378 __ Integer32ToSmi(size, size);
5379 __ Push(size);
5380 } else {
5381 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5382 __ Push(Smi::FromInt(size));
5383 }
5384
5385 int flags = 0;
5386 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5387 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5388 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5389 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5390 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5391 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5392 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5393 } else {
5394 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5395 }
5396 __ Push(Smi::FromInt(flags));
5397
5398 CallRuntimeFromDeferred(
5399 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5400 __ StoreToSafepointRegisterSlot(result, rax);
5401 }
5402
5403
DoToFastProperties(LToFastProperties * instr)5404 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5405 DCHECK(ToRegister(instr->value()).is(rax));
5406 __ Push(rax);
5407 CallRuntime(Runtime::kToFastProperties, 1, instr);
5408 }
5409
5410
DoRegExpLiteral(LRegExpLiteral * instr)5411 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5412 DCHECK(ToRegister(instr->context()).is(rsi));
5413 Label materialized;
5414 // Registers will be used as follows:
5415 // rcx = literals array.
5416 // rbx = regexp literal.
5417 // rax = regexp literal clone.
5418 int literal_offset =
5419 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5420 __ Move(rcx, instr->hydrogen()->literals());
5421 __ movp(rbx, FieldOperand(rcx, literal_offset));
5422 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
5423 __ j(not_equal, &materialized, Label::kNear);
5424
5425 // Create regexp literal using runtime function
5426 // Result will be in rax.
5427 __ Push(rcx);
5428 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
5429 __ Push(instr->hydrogen()->pattern());
5430 __ Push(instr->hydrogen()->flags());
5431 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5432 __ movp(rbx, rax);
5433
5434 __ bind(&materialized);
5435 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5436 Label allocated, runtime_allocate;
5437 __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
5438 __ jmp(&allocated, Label::kNear);
5439
5440 __ bind(&runtime_allocate);
5441 __ Push(rbx);
5442 __ Push(Smi::FromInt(size));
5443 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5444 __ Pop(rbx);
5445
5446 __ bind(&allocated);
5447 // Copy the content into the newly allocated memory.
5448 // (Unroll copy loop once for better throughput).
5449 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5450 __ movp(rdx, FieldOperand(rbx, i));
5451 __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
5452 __ movp(FieldOperand(rax, i), rdx);
5453 __ movp(FieldOperand(rax, i + kPointerSize), rcx);
5454 }
5455 if ((size % (2 * kPointerSize)) != 0) {
5456 __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
5457 __ movp(FieldOperand(rax, size - kPointerSize), rdx);
5458 }
5459 }
5460
5461
DoFunctionLiteral(LFunctionLiteral * instr)5462 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5463 DCHECK(ToRegister(instr->context()).is(rsi));
5464 // Use the fast case closure allocation code that allocates in new
5465 // space for nested functions that don't need literals cloning.
5466 bool pretenure = instr->hydrogen()->pretenure();
5467 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5468 FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5469 instr->hydrogen()->kind());
5470 __ Move(rbx, instr->hydrogen()->shared_info());
5471 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5472 } else {
5473 __ Push(rsi);
5474 __ Push(instr->hydrogen()->shared_info());
5475 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
5476 Heap::kFalseValueRootIndex);
5477 CallRuntime(Runtime::kNewClosure, 3, instr);
5478 }
5479 }
5480
5481
DoTypeof(LTypeof * instr)5482 void LCodeGen::DoTypeof(LTypeof* instr) {
5483 DCHECK(ToRegister(instr->context()).is(rsi));
5484 LOperand* input = instr->value();
5485 EmitPushTaggedOperand(input);
5486 CallRuntime(Runtime::kTypeof, 1, instr);
5487 }
5488
5489
EmitPushTaggedOperand(LOperand * operand)5490 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
5491 DCHECK(!operand->IsDoubleRegister());
5492 if (operand->IsConstantOperand()) {
5493 __ Push(ToHandle(LConstantOperand::cast(operand)));
5494 } else if (operand->IsRegister()) {
5495 __ Push(ToRegister(operand));
5496 } else {
5497 __ Push(ToOperand(operand));
5498 }
5499 }
5500
5501
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)5502 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5503 Register input = ToRegister(instr->value());
5504 Condition final_branch_condition = EmitTypeofIs(instr, input);
5505 if (final_branch_condition != no_condition) {
5506 EmitBranch(instr, final_branch_condition);
5507 }
5508 }
5509
5510
EmitTypeofIs(LTypeofIsAndBranch * instr,Register input)5511 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5512 Label* true_label = instr->TrueLabel(chunk_);
5513 Label* false_label = instr->FalseLabel(chunk_);
5514 Handle<String> type_name = instr->type_literal();
5515 int left_block = instr->TrueDestination(chunk_);
5516 int right_block = instr->FalseDestination(chunk_);
5517 int next_block = GetNextEmittedBlock();
5518
5519 Label::Distance true_distance = left_block == next_block ? Label::kNear
5520 : Label::kFar;
5521 Label::Distance false_distance = right_block == next_block ? Label::kNear
5522 : Label::kFar;
5523 Condition final_branch_condition = no_condition;
5524 Factory* factory = isolate()->factory();
5525 if (String::Equals(type_name, factory->number_string())) {
5526 __ JumpIfSmi(input, true_label, true_distance);
5527 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
5528 Heap::kHeapNumberMapRootIndex);
5529
5530 final_branch_condition = equal;
5531
5532 } else if (String::Equals(type_name, factory->string_string())) {
5533 __ JumpIfSmi(input, false_label, false_distance);
5534 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5535 __ j(above_equal, false_label, false_distance);
5536 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5537 Immediate(1 << Map::kIsUndetectable));
5538 final_branch_condition = zero;
5539
5540 } else if (String::Equals(type_name, factory->symbol_string())) {
5541 __ JumpIfSmi(input, false_label, false_distance);
5542 __ CmpObjectType(input, SYMBOL_TYPE, input);
5543 final_branch_condition = equal;
5544
5545 } else if (String::Equals(type_name, factory->boolean_string())) {
5546 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5547 __ j(equal, true_label, true_distance);
5548 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5549 final_branch_condition = equal;
5550
5551 } else if (String::Equals(type_name, factory->undefined_string())) {
5552 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5553 __ j(equal, true_label, true_distance);
5554 __ JumpIfSmi(input, false_label, false_distance);
5555 // Check for undetectable objects => true.
5556 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
5557 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5558 Immediate(1 << Map::kIsUndetectable));
5559 final_branch_condition = not_zero;
5560
5561 } else if (String::Equals(type_name, factory->function_string())) {
5562 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5563 __ JumpIfSmi(input, false_label, false_distance);
5564 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
5565 __ j(equal, true_label, true_distance);
5566 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
5567 final_branch_condition = equal;
5568
5569 } else if (String::Equals(type_name, factory->object_string())) {
5570 __ JumpIfSmi(input, false_label, false_distance);
5571 __ CompareRoot(input, Heap::kNullValueRootIndex);
5572 __ j(equal, true_label, true_distance);
5573 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
5574 __ j(below, false_label, false_distance);
5575 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5576 __ j(above, false_label, false_distance);
5577 // Check for undetectable objects => false.
5578 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5579 Immediate(1 << Map::kIsUndetectable));
5580 final_branch_condition = zero;
5581
5582 } else {
5583 __ jmp(false_label, false_distance);
5584 }
5585
5586 return final_branch_condition;
5587 }
5588
5589
DoIsConstructCallAndBranch(LIsConstructCallAndBranch * instr)5590 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5591 Register temp = ToRegister(instr->temp());
5592
5593 EmitIsConstructCall(temp);
5594 EmitBranch(instr, equal);
5595 }
5596
5597
EmitIsConstructCall(Register temp)5598 void LCodeGen::EmitIsConstructCall(Register temp) {
5599 // Get the frame pointer for the calling frame.
5600 __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
5601
5602 // Skip the arguments adaptor frame if it exists.
5603 Label check_frame_marker;
5604 __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5605 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
5606 __ j(not_equal, &check_frame_marker, Label::kNear);
5607 __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5608
5609 // Check the marker in the calling frame.
5610 __ bind(&check_frame_marker);
5611 __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5612 Smi::FromInt(StackFrame::CONSTRUCT));
5613 }
5614
5615
EnsureSpaceForLazyDeopt(int space_needed)5616 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5617 if (!info()->IsStub()) {
5618 // Ensure that we have enough space after the previous lazy-bailout
5619 // instruction for patching the code here.
5620 int current_pc = masm()->pc_offset();
5621 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5622 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5623 __ Nop(padding_size);
5624 }
5625 }
5626 last_lazy_deopt_pc_ = masm()->pc_offset();
5627 }
5628
5629
DoLazyBailout(LLazyBailout * instr)5630 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5631 last_lazy_deopt_pc_ = masm()->pc_offset();
5632 DCHECK(instr->HasEnvironment());
5633 LEnvironment* env = instr->environment();
5634 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5635 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5636 }
5637
5638
DoDeoptimize(LDeoptimize * instr)5639 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5640 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5641 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5642 // needed return address), even though the implementation of LAZY and EAGER is
5643 // now identical. When LAZY is eventually completely folded into EAGER, remove
5644 // the special case below.
5645 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5646 type = Deoptimizer::LAZY;
5647 }
5648 DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5649 }
5650
5651
DoDummy(LDummy * instr)5652 void LCodeGen::DoDummy(LDummy* instr) {
5653 // Nothing to see here, move on!
5654 }
5655
5656
DoDummyUse(LDummyUse * instr)5657 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5658 // Nothing to see here, move on!
5659 }
5660
5661
DoDeferredStackCheck(LStackCheck * instr)5662 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5663 PushSafepointRegistersScope scope(this);
5664 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5665 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5666 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5667 DCHECK(instr->HasEnvironment());
5668 LEnvironment* env = instr->environment();
5669 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5670 }
5671
5672
DoStackCheck(LStackCheck * instr)5673 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5674 class DeferredStackCheck FINAL : public LDeferredCode {
5675 public:
5676 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5677 : LDeferredCode(codegen), instr_(instr) { }
5678 virtual void Generate() OVERRIDE {
5679 codegen()->DoDeferredStackCheck(instr_);
5680 }
5681 virtual LInstruction* instr() OVERRIDE { return instr_; }
5682 private:
5683 LStackCheck* instr_;
5684 };
5685
5686 DCHECK(instr->HasEnvironment());
5687 LEnvironment* env = instr->environment();
5688 // There is no LLazyBailout instruction for stack-checks. We have to
5689 // prepare for lazy deoptimization explicitly here.
5690 if (instr->hydrogen()->is_function_entry()) {
5691 // Perform stack overflow check.
5692 Label done;
5693 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5694 __ j(above_equal, &done, Label::kNear);
5695
5696 DCHECK(instr->context()->IsRegister());
5697 DCHECK(ToRegister(instr->context()).is(rsi));
5698 CallCode(isolate()->builtins()->StackCheck(),
5699 RelocInfo::CODE_TARGET,
5700 instr);
5701 __ bind(&done);
5702 } else {
5703 DCHECK(instr->hydrogen()->is_backwards_branch());
5704 // Perform stack overflow check if this goto needs it before jumping.
5705 DeferredStackCheck* deferred_stack_check =
5706 new(zone()) DeferredStackCheck(this, instr);
5707 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5708 __ j(below, deferred_stack_check->entry());
5709 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5710 __ bind(instr->done_label());
5711 deferred_stack_check->SetExit(instr->done_label());
5712 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5713 // Don't record a deoptimization index for the safepoint here.
5714 // This will be done explicitly when emitting call and the safepoint in
5715 // the deferred code.
5716 }
5717 }
5718
5719
DoOsrEntry(LOsrEntry * instr)5720 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5721 // This is a pseudo-instruction that ensures that the environment here is
5722 // properly registered for deoptimization and records the assembler's PC
5723 // offset.
5724 LEnvironment* environment = instr->environment();
5725
5726 // If the environment were already registered, we would have no way of
5727 // backpatching it with the spill slot operands.
5728 DCHECK(!environment->HasBeenRegistered());
5729 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5730
5731 GenerateOsrPrologue();
5732 }
5733
5734
DoForInPrepareMap(LForInPrepareMap * instr)5735 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5736 DCHECK(ToRegister(instr->context()).is(rsi));
5737 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
5738 DeoptimizeIf(equal, instr, "undefined");
5739
5740 Register null_value = rdi;
5741 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5742 __ cmpp(rax, null_value);
5743 DeoptimizeIf(equal, instr, "null");
5744
5745 Condition cc = masm()->CheckSmi(rax);
5746 DeoptimizeIf(cc, instr, "Smi");
5747
5748 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5749 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
5750 DeoptimizeIf(below_equal, instr, "wrong instance type");
5751
5752 Label use_cache, call_runtime;
5753 __ CheckEnumCache(null_value, &call_runtime);
5754
5755 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
5756 __ jmp(&use_cache, Label::kNear);
5757
5758 // Get the set of properties to enumerate.
5759 __ bind(&call_runtime);
5760 __ Push(rax);
5761 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5762
5763 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
5764 Heap::kMetaMapRootIndex);
5765 DeoptimizeIf(not_equal, instr, "wrong map");
5766 __ bind(&use_cache);
5767 }
5768
5769
DoForInCacheArray(LForInCacheArray * instr)5770 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5771 Register map = ToRegister(instr->map());
5772 Register result = ToRegister(instr->result());
5773 Label load_cache, done;
5774 __ EnumLength(result, map);
5775 __ Cmp(result, Smi::FromInt(0));
5776 __ j(not_equal, &load_cache, Label::kNear);
5777 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
5778 __ jmp(&done, Label::kNear);
5779 __ bind(&load_cache);
5780 __ LoadInstanceDescriptors(map, result);
5781 __ movp(result,
5782 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5783 __ movp(result,
5784 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5785 __ bind(&done);
5786 Condition cc = masm()->CheckSmi(result);
5787 DeoptimizeIf(cc, instr, "no cache");
5788 }
5789
5790
DoCheckMapValue(LCheckMapValue * instr)5791 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5792 Register object = ToRegister(instr->value());
5793 __ cmpp(ToRegister(instr->map()),
5794 FieldOperand(object, HeapObject::kMapOffset));
5795 DeoptimizeIf(not_equal, instr, "wrong map");
5796 }
5797
5798
DoDeferredLoadMutableDouble(LLoadFieldByIndex * instr,Register object,Register index)5799 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5800 Register object,
5801 Register index) {
5802 PushSafepointRegistersScope scope(this);
5803 __ Push(object);
5804 __ Push(index);
5805 __ xorp(rsi, rsi);
5806 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5807 RecordSafepointWithRegisters(
5808 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5809 __ StoreToSafepointRegisterSlot(object, rax);
5810 }
5811
5812
DoLoadFieldByIndex(LLoadFieldByIndex * instr)5813 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5814 class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5815 public:
5816 DeferredLoadMutableDouble(LCodeGen* codegen,
5817 LLoadFieldByIndex* instr,
5818 Register object,
5819 Register index)
5820 : LDeferredCode(codegen),
5821 instr_(instr),
5822 object_(object),
5823 index_(index) {
5824 }
5825 virtual void Generate() OVERRIDE {
5826 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5827 }
5828 virtual LInstruction* instr() OVERRIDE { return instr_; }
5829 private:
5830 LLoadFieldByIndex* instr_;
5831 Register object_;
5832 Register index_;
5833 };
5834
5835 Register object = ToRegister(instr->object());
5836 Register index = ToRegister(instr->index());
5837
5838 DeferredLoadMutableDouble* deferred;
5839 deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
5840
5841 Label out_of_object, done;
5842 __ Move(kScratchRegister, Smi::FromInt(1));
5843 __ testp(index, kScratchRegister);
5844 __ j(not_zero, deferred->entry());
5845
5846 __ sarp(index, Immediate(1));
5847
5848 __ SmiToInteger32(index, index);
5849 __ cmpl(index, Immediate(0));
5850 __ j(less, &out_of_object, Label::kNear);
5851 __ movp(object, FieldOperand(object,
5852 index,
5853 times_pointer_size,
5854 JSObject::kHeaderSize));
5855 __ jmp(&done, Label::kNear);
5856
5857 __ bind(&out_of_object);
5858 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
5859 __ negl(index);
5860 // Index is now equal to out of object property index plus 1.
5861 __ movp(object, FieldOperand(object,
5862 index,
5863 times_pointer_size,
5864 FixedArray::kHeaderSize - kPointerSize));
5865 __ bind(deferred->exit());
5866 __ bind(&done);
5867 }
5868
5869
DoStoreFrameContext(LStoreFrameContext * instr)5870 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5871 Register context = ToRegister(instr->context());
5872 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
5873 }
5874
5875
DoAllocateBlockContext(LAllocateBlockContext * instr)5876 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5877 Handle<ScopeInfo> scope_info = instr->scope_info();
5878 __ Push(scope_info);
5879 __ Push(ToRegister(instr->function()));
5880 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5881 RecordSafepoint(Safepoint::kNoLazyDeopt);
5882 }
5883
5884
5885 #undef __
5886
5887 } } // namespace v8::internal
5888
5889 #endif // V8_TARGET_ARCH_X64
5890