1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_X64
6
7 #include "src/crankshaft/x64/lithium-codegen-x64.h"
8
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/crankshaft/hydrogen-osr.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 #include "src/profiler/cpu-profiler.h"
16
17 namespace v8 {
18 namespace internal {
19
20
21 // When invoking builtins, we need to record the safepoint in the middle of
22 // the invoke instruction sequence generated by the macro assembler.
23 class SafepointGenerator final : public CallWrapper {
24 public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)25 SafepointGenerator(LCodeGen* codegen,
26 LPointerMap* pointers,
27 Safepoint::DeoptMode mode)
28 : codegen_(codegen),
29 pointers_(pointers),
30 deopt_mode_(mode) { }
~SafepointGenerator()31 virtual ~SafepointGenerator() {}
32
BeforeCall(int call_size) const33 void BeforeCall(int call_size) const override {}
34
AfterCall() const35 void AfterCall() const override {
36 codegen_->RecordSafepoint(pointers_, deopt_mode_);
37 }
38
39 private:
40 LCodeGen* codegen_;
41 LPointerMap* pointers_;
42 Safepoint::DeoptMode deopt_mode_;
43 };
44
45
46 #define __ masm()->
47
GenerateCode()48 bool LCodeGen::GenerateCode() {
49 LPhase phase("Z_Code generation", chunk());
50 DCHECK(is_unused());
51 status_ = GENERATING;
52
53 // Open a frame scope to indicate that there is a frame on the stack. The
54 // MANUAL indicates that the scope shouldn't actually generate code to set up
55 // the frame (that is done in GeneratePrologue).
56 FrameScope frame_scope(masm_, StackFrame::MANUAL);
57
58 return GeneratePrologue() &&
59 GenerateBody() &&
60 GenerateDeferredCode() &&
61 GenerateJumpTable() &&
62 GenerateSafepointTable();
63 }
64
65
FinishCode(Handle<Code> code)66 void LCodeGen::FinishCode(Handle<Code> code) {
67 DCHECK(is_done());
68 code->set_stack_slots(GetStackSlotCount());
69 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
70 PopulateDeoptimizationData(code);
71 }
72
73
74 #ifdef _MSC_VER
MakeSureStackPagesMapped(int offset)75 void LCodeGen::MakeSureStackPagesMapped(int offset) {
76 const int kPageSize = 4 * KB;
77 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
78 __ movp(Operand(rsp, offset), rax);
79 }
80 }
81 #endif
82
83
SaveCallerDoubles()84 void LCodeGen::SaveCallerDoubles() {
85 DCHECK(info()->saves_caller_doubles());
86 DCHECK(NeedsEagerFrame());
87 Comment(";;; Save clobbered callee double registers");
88 int count = 0;
89 BitVector* doubles = chunk()->allocated_double_registers();
90 BitVector::Iterator save_iterator(doubles);
91 while (!save_iterator.Done()) {
92 __ Movsd(MemOperand(rsp, count * kDoubleSize),
93 XMMRegister::from_code(save_iterator.Current()));
94 save_iterator.Advance();
95 count++;
96 }
97 }
98
99
RestoreCallerDoubles()100 void LCodeGen::RestoreCallerDoubles() {
101 DCHECK(info()->saves_caller_doubles());
102 DCHECK(NeedsEagerFrame());
103 Comment(";;; Restore clobbered callee double registers");
104 BitVector* doubles = chunk()->allocated_double_registers();
105 BitVector::Iterator save_iterator(doubles);
106 int count = 0;
107 while (!save_iterator.Done()) {
108 __ Movsd(XMMRegister::from_code(save_iterator.Current()),
109 MemOperand(rsp, count * kDoubleSize));
110 save_iterator.Advance();
111 count++;
112 }
113 }
114
115
GeneratePrologue()116 bool LCodeGen::GeneratePrologue() {
117 DCHECK(is_generating());
118
119 if (info()->IsOptimizing()) {
120 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
121
122 #ifdef DEBUG
123 if (strlen(FLAG_stop_at) > 0 &&
124 info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
125 __ int3();
126 }
127 #endif
128 }
129
130 info()->set_prologue_offset(masm_->pc_offset());
131 if (NeedsEagerFrame()) {
132 DCHECK(!frame_is_built_);
133 frame_is_built_ = true;
134 if (info()->IsStub()) {
135 __ StubPrologue();
136 } else {
137 __ Prologue(info()->GeneratePreagedPrologue());
138 }
139 }
140
141 // Reserve space for the stack slots needed by the code.
142 int slots = GetStackSlotCount();
143 if (slots > 0) {
144 if (FLAG_debug_code) {
145 __ subp(rsp, Immediate(slots * kPointerSize));
146 #ifdef _MSC_VER
147 MakeSureStackPagesMapped(slots * kPointerSize);
148 #endif
149 __ Push(rax);
150 __ Set(rax, slots);
151 __ Set(kScratchRegister, kSlotsZapValue);
152 Label loop;
153 __ bind(&loop);
154 __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
155 kScratchRegister);
156 __ decl(rax);
157 __ j(not_zero, &loop);
158 __ Pop(rax);
159 } else {
160 __ subp(rsp, Immediate(slots * kPointerSize));
161 #ifdef _MSC_VER
162 MakeSureStackPagesMapped(slots * kPointerSize);
163 #endif
164 }
165
166 if (info()->saves_caller_doubles()) {
167 SaveCallerDoubles();
168 }
169 }
170 return !is_aborted();
171 }
172
173
DoPrologue(LPrologue * instr)174 void LCodeGen::DoPrologue(LPrologue* instr) {
175 Comment(";;; Prologue begin");
176
177 // Possibly allocate a local context.
178 if (info_->num_heap_slots() > 0) {
179 Comment(";;; Allocate local context");
180 bool need_write_barrier = true;
181 // Argument to NewContext is the function, which is still in rdi.
182 int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
183 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
184 if (info()->scope()->is_script_scope()) {
185 __ Push(rdi);
186 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
187 __ CallRuntime(Runtime::kNewScriptContext);
188 deopt_mode = Safepoint::kLazyDeopt;
189 } else if (slots <= FastNewContextStub::kMaximumSlots) {
190 FastNewContextStub stub(isolate(), slots);
191 __ CallStub(&stub);
192 // Result of FastNewContextStub is always in new space.
193 need_write_barrier = false;
194 } else {
195 __ Push(rdi);
196 __ CallRuntime(Runtime::kNewFunctionContext);
197 }
198 RecordSafepoint(deopt_mode);
199
200 // Context is returned in rax. It replaces the context passed to us.
201 // It's saved in the stack and kept live in rsi.
202 __ movp(rsi, rax);
203 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
204
205 // Copy any necessary parameters into the context.
206 int num_parameters = scope()->num_parameters();
207 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
208 for (int i = first_parameter; i < num_parameters; i++) {
209 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
210 if (var->IsContextSlot()) {
211 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
212 (num_parameters - 1 - i) * kPointerSize;
213 // Load parameter from stack.
214 __ movp(rax, Operand(rbp, parameter_offset));
215 // Store it in the context.
216 int context_offset = Context::SlotOffset(var->index());
217 __ movp(Operand(rsi, context_offset), rax);
218 // Update the write barrier. This clobbers rax and rbx.
219 if (need_write_barrier) {
220 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
221 } else if (FLAG_debug_code) {
222 Label done;
223 __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
224 __ Abort(kExpectedNewSpaceObject);
225 __ bind(&done);
226 }
227 }
228 }
229 Comment(";;; End allocate local context");
230 }
231
232 Comment(";;; Prologue end");
233 }
234
235
GenerateOsrPrologue()236 void LCodeGen::GenerateOsrPrologue() {
237 // Generate the OSR entry prologue at the first unknown OSR value, or if there
238 // are none, at the OSR entrypoint instruction.
239 if (osr_pc_offset_ >= 0) return;
240
241 osr_pc_offset_ = masm()->pc_offset();
242
243 // Adjust the frame size, subsuming the unoptimized frame into the
244 // optimized frame.
245 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
246 DCHECK(slots >= 0);
247 __ subp(rsp, Immediate(slots * kPointerSize));
248 }
249
250
GenerateBodyInstructionPre(LInstruction * instr)251 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
252 if (instr->IsCall()) {
253 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
254 }
255 if (!instr->IsLazyBailout() && !instr->IsGap()) {
256 safepoints_.BumpLastLazySafepointIndex();
257 }
258 }
259
260
GenerateBodyInstructionPost(LInstruction * instr)261 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
262 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
263 instr->hydrogen_value()->representation().IsInteger32() &&
264 instr->result()->IsRegister()) {
265 __ AssertZeroExtended(ToRegister(instr->result()));
266 }
267
268 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
269 // We sign extend the dehoisted key at the definition point when the pointer
270 // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
271 // points and MustSignExtendResult is always false. We can't use
272 // STATIC_ASSERT here as the pointer size is 32-bit for x32.
273 DCHECK(kPointerSize == kInt64Size);
274 if (instr->result()->IsRegister()) {
275 Register result_reg = ToRegister(instr->result());
276 __ movsxlq(result_reg, result_reg);
277 } else {
278 // Sign extend the 32bit result in the stack slots.
279 DCHECK(instr->result()->IsStackSlot());
280 Operand src = ToOperand(instr->result());
281 __ movsxlq(kScratchRegister, src);
282 __ movq(src, kScratchRegister);
283 }
284 }
285 }
286
287
GenerateJumpTable()288 bool LCodeGen::GenerateJumpTable() {
289 if (jump_table_.length() == 0) return !is_aborted();
290
291 Label needs_frame;
292 Comment(";;; -------------------- Jump table --------------------");
293 for (int i = 0; i < jump_table_.length(); i++) {
294 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
295 __ bind(&table_entry->label);
296 Address entry = table_entry->address;
297 DeoptComment(table_entry->deopt_info);
298 if (table_entry->needs_frame) {
299 DCHECK(!info()->saves_caller_doubles());
300 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
301 __ call(&needs_frame);
302 } else {
303 if (info()->saves_caller_doubles()) {
304 DCHECK(info()->IsStub());
305 RestoreCallerDoubles();
306 }
307 __ call(entry, RelocInfo::RUNTIME_ENTRY);
308 }
309 info()->LogDeoptCallPosition(masm()->pc_offset(),
310 table_entry->deopt_info.inlining_id);
311 }
312
313 if (needs_frame.is_linked()) {
314 __ bind(&needs_frame);
315 /* stack layout
316 4: return address <-- rsp
317 3: garbage
318 2: garbage
319 1: garbage
320 0: garbage
321 */
322 // Reserve space for context and stub marker.
323 __ subp(rsp, Immediate(2 * kPointerSize));
324 __ Push(MemOperand(rsp, 2 * kPointerSize)); // Copy return address.
325 __ Push(kScratchRegister); // Save entry address for ret(0)
326
327 /* stack layout
328 4: return address
329 3: garbage
330 2: garbage
331 1: return address
332 0: entry address <-- rsp
333 */
334
335 // Remember context pointer.
336 __ movp(kScratchRegister,
337 MemOperand(rbp, StandardFrameConstants::kContextOffset));
338 // Save context pointer into the stack frame.
339 __ movp(MemOperand(rsp, 3 * kPointerSize), kScratchRegister);
340
341 // Create a stack frame.
342 __ movp(MemOperand(rsp, 4 * kPointerSize), rbp);
343 __ leap(rbp, MemOperand(rsp, 4 * kPointerSize));
344
345 // This variant of deopt can only be used with stubs. Since we don't
346 // have a function pointer to install in the stack frame that we're
347 // building, install a special marker there instead.
348 DCHECK(info()->IsStub());
349 __ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
350
351 /* stack layout
352 4: old rbp
353 3: context pointer
354 2: stub marker
355 1: return address
356 0: entry address <-- rsp
357 */
358 __ ret(0);
359 }
360
361 return !is_aborted();
362 }
363
364
GenerateDeferredCode()365 bool LCodeGen::GenerateDeferredCode() {
366 DCHECK(is_generating());
367 if (deferred_.length() > 0) {
368 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
369 LDeferredCode* code = deferred_[i];
370
371 HValue* value =
372 instructions_->at(code->instruction_index())->hydrogen_value();
373 RecordAndWritePosition(
374 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
375
376 Comment(";;; <@%d,#%d> "
377 "-------------------- Deferred %s --------------------",
378 code->instruction_index(),
379 code->instr()->hydrogen_value()->id(),
380 code->instr()->Mnemonic());
381 __ bind(code->entry());
382 if (NeedsDeferredFrame()) {
383 Comment(";;; Build frame");
384 DCHECK(!frame_is_built_);
385 DCHECK(info()->IsStub());
386 frame_is_built_ = true;
387 // Build the frame in such a way that esi isn't trashed.
388 __ pushq(rbp); // Caller's frame pointer.
389 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
390 __ Push(Smi::FromInt(StackFrame::STUB));
391 __ leap(rbp, Operand(rsp, 2 * kPointerSize));
392 Comment(";;; Deferred code");
393 }
394 code->Generate();
395 if (NeedsDeferredFrame()) {
396 __ bind(code->done());
397 Comment(";;; Destroy frame");
398 DCHECK(frame_is_built_);
399 frame_is_built_ = false;
400 __ movp(rsp, rbp);
401 __ popq(rbp);
402 }
403 __ jmp(code->exit());
404 }
405 }
406
407 // Deferred code is the last part of the instruction sequence. Mark
408 // the generated code as done unless we bailed out.
409 if (!is_aborted()) status_ = DONE;
410 return !is_aborted();
411 }
412
413
GenerateSafepointTable()414 bool LCodeGen::GenerateSafepointTable() {
415 DCHECK(is_done());
416 safepoints_.Emit(masm(), GetStackSlotCount());
417 return !is_aborted();
418 }
419
420
ToRegister(int index) const421 Register LCodeGen::ToRegister(int index) const {
422 return Register::from_code(index);
423 }
424
425
ToDoubleRegister(int index) const426 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
427 return XMMRegister::from_code(index);
428 }
429
430
ToRegister(LOperand * op) const431 Register LCodeGen::ToRegister(LOperand* op) const {
432 DCHECK(op->IsRegister());
433 return ToRegister(op->index());
434 }
435
436
ToDoubleRegister(LOperand * op) const437 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
438 DCHECK(op->IsDoubleRegister());
439 return ToDoubleRegister(op->index());
440 }
441
442
IsInteger32Constant(LConstantOperand * op) const443 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
444 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
445 }
446
447
IsExternalConstant(LConstantOperand * op) const448 bool LCodeGen::IsExternalConstant(LConstantOperand* op) const {
449 return chunk_->LookupLiteralRepresentation(op).IsExternal();
450 }
451
452
IsDehoistedKeyConstant(LConstantOperand * op) const453 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
454 return op->IsConstantOperand() &&
455 chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
456 }
457
458
IsSmiConstant(LConstantOperand * op) const459 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
460 return chunk_->LookupLiteralRepresentation(op).IsSmi();
461 }
462
463
ToInteger32(LConstantOperand * op) const464 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
465 return ToRepresentation(op, Representation::Integer32());
466 }
467
468
ToRepresentation(LConstantOperand * op,const Representation & r) const469 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
470 const Representation& r) const {
471 HConstant* constant = chunk_->LookupConstant(op);
472 int32_t value = constant->Integer32Value();
473 if (r.IsInteger32()) return value;
474 DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
475 return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
476 }
477
478
ToSmi(LConstantOperand * op) const479 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
480 HConstant* constant = chunk_->LookupConstant(op);
481 return Smi::FromInt(constant->Integer32Value());
482 }
483
484
ToDouble(LConstantOperand * op) const485 double LCodeGen::ToDouble(LConstantOperand* op) const {
486 HConstant* constant = chunk_->LookupConstant(op);
487 DCHECK(constant->HasDoubleValue());
488 return constant->DoubleValue();
489 }
490
491
ToExternalReference(LConstantOperand * op) const492 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
493 HConstant* constant = chunk_->LookupConstant(op);
494 DCHECK(constant->HasExternalReferenceValue());
495 return constant->ExternalReferenceValue();
496 }
497
498
ToHandle(LConstantOperand * op) const499 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
500 HConstant* constant = chunk_->LookupConstant(op);
501 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
502 return constant->handle(isolate());
503 }
504
505
ArgumentsOffsetWithoutFrame(int index)506 static int ArgumentsOffsetWithoutFrame(int index) {
507 DCHECK(index < 0);
508 return -(index + 1) * kPointerSize + kPCOnStackSize;
509 }
510
511
ToOperand(LOperand * op) const512 Operand LCodeGen::ToOperand(LOperand* op) const {
513 // Does not handle registers. In X64 assembler, plain registers are not
514 // representable as an Operand.
515 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
516 if (NeedsEagerFrame()) {
517 return Operand(rbp, StackSlotOffset(op->index()));
518 } else {
519 // Retrieve parameter without eager stack-frame relative to the
520 // stack-pointer.
521 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
522 }
523 }
524
525
WriteTranslation(LEnvironment * environment,Translation * translation)526 void LCodeGen::WriteTranslation(LEnvironment* environment,
527 Translation* translation) {
528 if (environment == NULL) return;
529
530 // The translation includes one command per value in the environment.
531 int translation_size = environment->translation_size();
532
533 WriteTranslation(environment->outer(), translation);
534 WriteTranslationFrame(environment, translation);
535
536 int object_index = 0;
537 int dematerialized_index = 0;
538 for (int i = 0; i < translation_size; ++i) {
539 LOperand* value = environment->values()->at(i);
540 AddToTranslation(
541 environment, translation, value, environment->HasTaggedValueAt(i),
542 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
543 }
544 }
545
546
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)547 void LCodeGen::AddToTranslation(LEnvironment* environment,
548 Translation* translation,
549 LOperand* op,
550 bool is_tagged,
551 bool is_uint32,
552 int* object_index_pointer,
553 int* dematerialized_index_pointer) {
554 if (op == LEnvironment::materialization_marker()) {
555 int object_index = (*object_index_pointer)++;
556 if (environment->ObjectIsDuplicateAt(object_index)) {
557 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
558 translation->DuplicateObject(dupe_of);
559 return;
560 }
561 int object_length = environment->ObjectLengthAt(object_index);
562 if (environment->ObjectIsArgumentsAt(object_index)) {
563 translation->BeginArgumentsObject(object_length);
564 } else {
565 translation->BeginCapturedObject(object_length);
566 }
567 int dematerialized_index = *dematerialized_index_pointer;
568 int env_offset = environment->translation_size() + dematerialized_index;
569 *dematerialized_index_pointer += object_length;
570 for (int i = 0; i < object_length; ++i) {
571 LOperand* value = environment->values()->at(env_offset + i);
572 AddToTranslation(environment,
573 translation,
574 value,
575 environment->HasTaggedValueAt(env_offset + i),
576 environment->HasUint32ValueAt(env_offset + i),
577 object_index_pointer,
578 dematerialized_index_pointer);
579 }
580 return;
581 }
582
583 if (op->IsStackSlot()) {
584 int index = op->index();
585 if (index >= 0) {
586 index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
587 }
588 if (is_tagged) {
589 translation->StoreStackSlot(index);
590 } else if (is_uint32) {
591 translation->StoreUint32StackSlot(index);
592 } else {
593 translation->StoreInt32StackSlot(index);
594 }
595 } else if (op->IsDoubleStackSlot()) {
596 int index = op->index();
597 if (index >= 0) {
598 index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
599 }
600 translation->StoreDoubleStackSlot(index);
601 } else if (op->IsRegister()) {
602 Register reg = ToRegister(op);
603 if (is_tagged) {
604 translation->StoreRegister(reg);
605 } else if (is_uint32) {
606 translation->StoreUint32Register(reg);
607 } else {
608 translation->StoreInt32Register(reg);
609 }
610 } else if (op->IsDoubleRegister()) {
611 XMMRegister reg = ToDoubleRegister(op);
612 translation->StoreDoubleRegister(reg);
613 } else if (op->IsConstantOperand()) {
614 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
615 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
616 translation->StoreLiteral(src_index);
617 } else {
618 UNREACHABLE();
619 }
620 }
621
622
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode,int argc)623 void LCodeGen::CallCodeGeneric(Handle<Code> code,
624 RelocInfo::Mode mode,
625 LInstruction* instr,
626 SafepointMode safepoint_mode,
627 int argc) {
628 DCHECK(instr != NULL);
629 __ call(code, mode);
630 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
631
632 // Signal that we don't inline smi code before these stubs in the
633 // optimizing code generator.
634 if (code->kind() == Code::BINARY_OP_IC ||
635 code->kind() == Code::COMPARE_IC) {
636 __ nop();
637 }
638 }
639
640
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)641 void LCodeGen::CallCode(Handle<Code> code,
642 RelocInfo::Mode mode,
643 LInstruction* instr) {
644 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
645 }
646
647
CallRuntime(const Runtime::Function * function,int num_arguments,LInstruction * instr,SaveFPRegsMode save_doubles)648 void LCodeGen::CallRuntime(const Runtime::Function* function,
649 int num_arguments,
650 LInstruction* instr,
651 SaveFPRegsMode save_doubles) {
652 DCHECK(instr != NULL);
653 DCHECK(instr->HasPointerMap());
654
655 __ CallRuntime(function, num_arguments, save_doubles);
656
657 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
658 }
659
660
LoadContextFromDeferred(LOperand * context)661 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
662 if (context->IsRegister()) {
663 if (!ToRegister(context).is(rsi)) {
664 __ movp(rsi, ToRegister(context));
665 }
666 } else if (context->IsStackSlot()) {
667 __ movp(rsi, ToOperand(context));
668 } else if (context->IsConstantOperand()) {
669 HConstant* constant =
670 chunk_->LookupConstant(LConstantOperand::cast(context));
671 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
672 } else {
673 UNREACHABLE();
674 }
675 }
676
677
678
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)679 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
680 int argc,
681 LInstruction* instr,
682 LOperand* context) {
683 LoadContextFromDeferred(context);
684
685 __ CallRuntimeSaveDoubles(id);
686 RecordSafepointWithRegisters(
687 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
688 }
689
690
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)691 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
692 Safepoint::DeoptMode mode) {
693 environment->set_has_been_used();
694 if (!environment->HasBeenRegistered()) {
695 // Physical stack frame layout:
696 // -x ............. -4 0 ..................................... y
697 // [incoming arguments] [spill slots] [pushed outgoing arguments]
698
699 // Layout of the environment:
700 // 0 ..................................................... size-1
701 // [parameters] [locals] [expression stack including arguments]
702
703 // Layout of the translation:
704 // 0 ........................................................ size - 1 + 4
705 // [expression stack including arguments] [locals] [4 words] [parameters]
706 // |>------------ translation_size ------------<|
707
708 int frame_count = 0;
709 int jsframe_count = 0;
710 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
711 ++frame_count;
712 if (e->frame_type() == JS_FUNCTION) {
713 ++jsframe_count;
714 }
715 }
716 Translation translation(&translations_, frame_count, jsframe_count, zone());
717 WriteTranslation(environment, &translation);
718 int deoptimization_index = deoptimizations_.length();
719 int pc_offset = masm()->pc_offset();
720 environment->Register(deoptimization_index,
721 translation.index(),
722 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
723 deoptimizations_.Add(environment, environment->zone());
724 }
725 }
726
727
DeoptimizeIf(Condition cc,LInstruction * instr,Deoptimizer::DeoptReason deopt_reason,Deoptimizer::BailoutType bailout_type)728 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
729 Deoptimizer::DeoptReason deopt_reason,
730 Deoptimizer::BailoutType bailout_type) {
731 LEnvironment* environment = instr->environment();
732 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
733 DCHECK(environment->HasBeenRegistered());
734 int id = environment->deoptimization_index();
735 Address entry =
736 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
737 if (entry == NULL) {
738 Abort(kBailoutWasNotPrepared);
739 return;
740 }
741
742 if (DeoptEveryNTimes()) {
743 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
744 Label no_deopt;
745 __ pushfq();
746 __ pushq(rax);
747 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
748 __ movl(rax, count_operand);
749 __ subl(rax, Immediate(1));
750 __ j(not_zero, &no_deopt, Label::kNear);
751 if (FLAG_trap_on_deopt) __ int3();
752 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
753 __ movl(count_operand, rax);
754 __ popq(rax);
755 __ popfq();
756 DCHECK(frame_is_built_);
757 __ call(entry, RelocInfo::RUNTIME_ENTRY);
758 __ bind(&no_deopt);
759 __ movl(count_operand, rax);
760 __ popq(rax);
761 __ popfq();
762 }
763
764 if (info()->ShouldTrapOnDeopt()) {
765 Label done;
766 if (cc != no_condition) {
767 __ j(NegateCondition(cc), &done, Label::kNear);
768 }
769 __ int3();
770 __ bind(&done);
771 }
772
773 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
774
775 DCHECK(info()->IsStub() || frame_is_built_);
776 // Go through jump table if we need to handle condition, build frame, or
777 // restore caller doubles.
778 if (cc == no_condition && frame_is_built_ &&
779 !info()->saves_caller_doubles()) {
780 DeoptComment(deopt_info);
781 __ call(entry, RelocInfo::RUNTIME_ENTRY);
782 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
783 } else {
784 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
785 !frame_is_built_);
786 // We often have several deopts to the same entry, reuse the last
787 // jump entry if this is the case.
788 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
789 jump_table_.is_empty() ||
790 !table_entry.IsEquivalentTo(jump_table_.last())) {
791 jump_table_.Add(table_entry, zone());
792 }
793 if (cc == no_condition) {
794 __ jmp(&jump_table_.last().label);
795 } else {
796 __ j(cc, &jump_table_.last().label);
797 }
798 }
799 }
800
801
DeoptimizeIf(Condition cc,LInstruction * instr,Deoptimizer::DeoptReason deopt_reason)802 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
803 Deoptimizer::DeoptReason deopt_reason) {
804 Deoptimizer::BailoutType bailout_type = info()->IsStub()
805 ? Deoptimizer::LAZY
806 : Deoptimizer::EAGER;
807 DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
808 }
809
810
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode,int argc)811 void LCodeGen::RecordSafepointWithLazyDeopt(
812 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
813 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
814 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
815 } else {
816 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
817 RecordSafepointWithRegisters(
818 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
819 }
820 }
821
822
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)823 void LCodeGen::RecordSafepoint(
824 LPointerMap* pointers,
825 Safepoint::Kind kind,
826 int arguments,
827 Safepoint::DeoptMode deopt_mode) {
828 DCHECK(kind == expected_safepoint_kind_);
829
830 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
831
832 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
833 kind, arguments, deopt_mode);
834 for (int i = 0; i < operands->length(); i++) {
835 LOperand* pointer = operands->at(i);
836 if (pointer->IsStackSlot()) {
837 safepoint.DefinePointerSlot(pointer->index(), zone());
838 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
839 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
840 }
841 }
842 }
843
844
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode deopt_mode)845 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
846 Safepoint::DeoptMode deopt_mode) {
847 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
848 }
849
850
RecordSafepoint(Safepoint::DeoptMode deopt_mode)851 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
852 LPointerMap empty_pointers(zone());
853 RecordSafepoint(&empty_pointers, deopt_mode);
854 }
855
856
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode deopt_mode)857 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
858 int arguments,
859 Safepoint::DeoptMode deopt_mode) {
860 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
861 }
862
863
RecordAndWritePosition(int position)864 void LCodeGen::RecordAndWritePosition(int position) {
865 if (position == RelocInfo::kNoPosition) return;
866 masm()->positions_recorder()->RecordPosition(position);
867 masm()->positions_recorder()->WriteRecordedPositions();
868 }
869
870
LabelType(LLabel * label)871 static const char* LabelType(LLabel* label) {
872 if (label->is_loop_header()) return " (loop header)";
873 if (label->is_osr_entry()) return " (OSR entry)";
874 return "";
875 }
876
877
DoLabel(LLabel * label)878 void LCodeGen::DoLabel(LLabel* label) {
879 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
880 current_instruction_,
881 label->hydrogen_value()->id(),
882 label->block_id(),
883 LabelType(label));
884 __ bind(label->label());
885 current_block_ = label->block_id();
886 DoGap(label);
887 }
888
889
DoParallelMove(LParallelMove * move)890 void LCodeGen::DoParallelMove(LParallelMove* move) {
891 resolver_.Resolve(move);
892 }
893
894
DoGap(LGap * gap)895 void LCodeGen::DoGap(LGap* gap) {
896 for (int i = LGap::FIRST_INNER_POSITION;
897 i <= LGap::LAST_INNER_POSITION;
898 i++) {
899 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
900 LParallelMove* move = gap->GetParallelMove(inner_pos);
901 if (move != NULL) DoParallelMove(move);
902 }
903 }
904
905
DoInstructionGap(LInstructionGap * instr)906 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
907 DoGap(instr);
908 }
909
910
DoParameter(LParameter * instr)911 void LCodeGen::DoParameter(LParameter* instr) {
912 // Nothing to do.
913 }
914
915
DoCallStub(LCallStub * instr)916 void LCodeGen::DoCallStub(LCallStub* instr) {
917 DCHECK(ToRegister(instr->context()).is(rsi));
918 DCHECK(ToRegister(instr->result()).is(rax));
919 switch (instr->hydrogen()->major_key()) {
920 case CodeStub::RegExpExec: {
921 RegExpExecStub stub(isolate());
922 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
923 break;
924 }
925 case CodeStub::SubString: {
926 SubStringStub stub(isolate());
927 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
928 break;
929 }
930 default:
931 UNREACHABLE();
932 }
933 }
934
935
DoUnknownOSRValue(LUnknownOSRValue * instr)936 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
937 GenerateOsrPrologue();
938 }
939
940
DoModByPowerOf2I(LModByPowerOf2I * instr)941 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
942 Register dividend = ToRegister(instr->dividend());
943 int32_t divisor = instr->divisor();
944 DCHECK(dividend.is(ToRegister(instr->result())));
945
946 // Theoretically, a variation of the branch-free code for integer division by
947 // a power of 2 (calculating the remainder via an additional multiplication
948 // (which gets simplified to an 'and') and subtraction) should be faster, and
949 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
950 // indicate that positive dividends are heavily favored, so the branching
951 // version performs better.
952 HMod* hmod = instr->hydrogen();
953 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
954 Label dividend_is_not_negative, done;
955 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
956 __ testl(dividend, dividend);
957 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
958 // Note that this is correct even for kMinInt operands.
959 __ negl(dividend);
960 __ andl(dividend, Immediate(mask));
961 __ negl(dividend);
962 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
963 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
964 }
965 __ jmp(&done, Label::kNear);
966 }
967
968 __ bind(÷nd_is_not_negative);
969 __ andl(dividend, Immediate(mask));
970 __ bind(&done);
971 }
972
973
DoModByConstI(LModByConstI * instr)974 void LCodeGen::DoModByConstI(LModByConstI* instr) {
975 Register dividend = ToRegister(instr->dividend());
976 int32_t divisor = instr->divisor();
977 DCHECK(ToRegister(instr->result()).is(rax));
978
979 if (divisor == 0) {
980 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
981 return;
982 }
983
984 __ TruncatingDiv(dividend, Abs(divisor));
985 __ imull(rdx, rdx, Immediate(Abs(divisor)));
986 __ movl(rax, dividend);
987 __ subl(rax, rdx);
988
989 // Check for negative zero.
990 HMod* hmod = instr->hydrogen();
991 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
992 Label remainder_not_zero;
993 __ j(not_zero, &remainder_not_zero, Label::kNear);
994 __ cmpl(dividend, Immediate(0));
995 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
996 __ bind(&remainder_not_zero);
997 }
998 }
999
1000
DoModI(LModI * instr)1001 void LCodeGen::DoModI(LModI* instr) {
1002 HMod* hmod = instr->hydrogen();
1003
1004 Register left_reg = ToRegister(instr->left());
1005 DCHECK(left_reg.is(rax));
1006 Register right_reg = ToRegister(instr->right());
1007 DCHECK(!right_reg.is(rax));
1008 DCHECK(!right_reg.is(rdx));
1009 Register result_reg = ToRegister(instr->result());
1010 DCHECK(result_reg.is(rdx));
1011
1012 Label done;
1013 // Check for x % 0, idiv would signal a divide error. We have to
1014 // deopt in this case because we can't return a NaN.
1015 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1016 __ testl(right_reg, right_reg);
1017 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1018 }
1019
1020 // Check for kMinInt % -1, idiv would signal a divide error. We
1021 // have to deopt if we care about -0, because we can't return that.
1022 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1023 Label no_overflow_possible;
1024 __ cmpl(left_reg, Immediate(kMinInt));
1025 __ j(not_zero, &no_overflow_possible, Label::kNear);
1026 __ cmpl(right_reg, Immediate(-1));
1027 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1028 DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
1029 } else {
1030 __ j(not_equal, &no_overflow_possible, Label::kNear);
1031 __ Set(result_reg, 0);
1032 __ jmp(&done, Label::kNear);
1033 }
1034 __ bind(&no_overflow_possible);
1035 }
1036
1037 // Sign extend dividend in eax into edx:eax, since we are using only the low
1038 // 32 bits of the values.
1039 __ cdq();
1040
1041 // If we care about -0, test if the dividend is <0 and the result is 0.
1042 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1043 Label positive_left;
1044 __ testl(left_reg, left_reg);
1045 __ j(not_sign, &positive_left, Label::kNear);
1046 __ idivl(right_reg);
1047 __ testl(result_reg, result_reg);
1048 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1049 __ jmp(&done, Label::kNear);
1050 __ bind(&positive_left);
1051 }
1052 __ idivl(right_reg);
1053 __ bind(&done);
1054 }
1055
1056
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)1057 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1058 Register dividend = ToRegister(instr->dividend());
1059 int32_t divisor = instr->divisor();
1060 DCHECK(dividend.is(ToRegister(instr->result())));
1061
1062 // If the divisor is positive, things are easy: There can be no deopts and we
1063 // can simply do an arithmetic right shift.
1064 if (divisor == 1) return;
1065 int32_t shift = WhichPowerOf2Abs(divisor);
1066 if (divisor > 1) {
1067 __ sarl(dividend, Immediate(shift));
1068 return;
1069 }
1070
1071 // If the divisor is negative, we have to negate and handle edge cases.
1072 __ negl(dividend);
1073 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1074 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1075 }
1076
1077 // Dividing by -1 is basically negation, unless we overflow.
1078 if (divisor == -1) {
1079 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1080 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1081 }
1082 return;
1083 }
1084
1085 // If the negation could not overflow, simply shifting is OK.
1086 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1087 __ sarl(dividend, Immediate(shift));
1088 return;
1089 }
1090
1091 Label not_kmin_int, done;
1092 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1093 __ movl(dividend, Immediate(kMinInt / divisor));
1094 __ jmp(&done, Label::kNear);
1095 __ bind(¬_kmin_int);
1096 __ sarl(dividend, Immediate(shift));
1097 __ bind(&done);
1098 }
1099
1100
DoFlooringDivByConstI(LFlooringDivByConstI * instr)1101 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1102 Register dividend = ToRegister(instr->dividend());
1103 int32_t divisor = instr->divisor();
1104 DCHECK(ToRegister(instr->result()).is(rdx));
1105
1106 if (divisor == 0) {
1107 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1108 return;
1109 }
1110
1111 // Check for (0 / -x) that will produce negative zero.
1112 HMathFloorOfDiv* hdiv = instr->hydrogen();
1113 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1114 __ testl(dividend, dividend);
1115 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1116 }
1117
1118 // Easy case: We need no dynamic check for the dividend and the flooring
1119 // division is the same as the truncating division.
1120 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1121 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1122 __ TruncatingDiv(dividend, Abs(divisor));
1123 if (divisor < 0) __ negl(rdx);
1124 return;
1125 }
1126
1127 // In the general case we may need to adjust before and after the truncating
1128 // division to get a flooring division.
1129 Register temp = ToRegister(instr->temp3());
1130 DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1131 Label needs_adjustment, done;
1132 __ cmpl(dividend, Immediate(0));
1133 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1134 __ TruncatingDiv(dividend, Abs(divisor));
1135 if (divisor < 0) __ negl(rdx);
1136 __ jmp(&done, Label::kNear);
1137 __ bind(&needs_adjustment);
1138 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1139 __ TruncatingDiv(temp, Abs(divisor));
1140 if (divisor < 0) __ negl(rdx);
1141 __ decl(rdx);
1142 __ bind(&done);
1143 }
1144
1145
1146 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)1147 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1148 HBinaryOperation* hdiv = instr->hydrogen();
1149 Register dividend = ToRegister(instr->dividend());
1150 Register divisor = ToRegister(instr->divisor());
1151 Register remainder = ToRegister(instr->temp());
1152 Register result = ToRegister(instr->result());
1153 DCHECK(dividend.is(rax));
1154 DCHECK(remainder.is(rdx));
1155 DCHECK(result.is(rax));
1156 DCHECK(!divisor.is(rax));
1157 DCHECK(!divisor.is(rdx));
1158
1159 // Check for x / 0.
1160 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1161 __ testl(divisor, divisor);
1162 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1163 }
1164
1165 // Check for (0 / -x) that will produce negative zero.
1166 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1167 Label dividend_not_zero;
1168 __ testl(dividend, dividend);
1169 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1170 __ testl(divisor, divisor);
1171 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1172 __ bind(÷nd_not_zero);
1173 }
1174
1175 // Check for (kMinInt / -1).
1176 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1177 Label dividend_not_min_int;
1178 __ cmpl(dividend, Immediate(kMinInt));
1179 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1180 __ cmpl(divisor, Immediate(-1));
1181 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1182 __ bind(÷nd_not_min_int);
1183 }
1184
1185 // Sign extend to rdx (= remainder).
1186 __ cdq();
1187 __ idivl(divisor);
1188
1189 Label done;
1190 __ testl(remainder, remainder);
1191 __ j(zero, &done, Label::kNear);
1192 __ xorl(remainder, divisor);
1193 __ sarl(remainder, Immediate(31));
1194 __ addl(result, remainder);
1195 __ bind(&done);
1196 }
1197
1198
DoDivByPowerOf2I(LDivByPowerOf2I * instr)1199 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1200 Register dividend = ToRegister(instr->dividend());
1201 int32_t divisor = instr->divisor();
1202 Register result = ToRegister(instr->result());
1203 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1204 DCHECK(!result.is(dividend));
1205
1206 // Check for (0 / -x) that will produce negative zero.
1207 HDiv* hdiv = instr->hydrogen();
1208 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1209 __ testl(dividend, dividend);
1210 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1211 }
1212 // Check for (kMinInt / -1).
1213 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1214 __ cmpl(dividend, Immediate(kMinInt));
1215 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1216 }
1217 // Deoptimize if remainder will not be 0.
1218 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1219 divisor != 1 && divisor != -1) {
1220 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1221 __ testl(dividend, Immediate(mask));
1222 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1223 }
1224 __ Move(result, dividend);
1225 int32_t shift = WhichPowerOf2Abs(divisor);
1226 if (shift > 0) {
1227 // The arithmetic shift is always OK, the 'if' is an optimization only.
1228 if (shift > 1) __ sarl(result, Immediate(31));
1229 __ shrl(result, Immediate(32 - shift));
1230 __ addl(result, dividend);
1231 __ sarl(result, Immediate(shift));
1232 }
1233 if (divisor < 0) __ negl(result);
1234 }
1235
1236
DoDivByConstI(LDivByConstI * instr)1237 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1238 Register dividend = ToRegister(instr->dividend());
1239 int32_t divisor = instr->divisor();
1240 DCHECK(ToRegister(instr->result()).is(rdx));
1241
1242 if (divisor == 0) {
1243 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1244 return;
1245 }
1246
1247 // Check for (0 / -x) that will produce negative zero.
1248 HDiv* hdiv = instr->hydrogen();
1249 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1250 __ testl(dividend, dividend);
1251 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1252 }
1253
1254 __ TruncatingDiv(dividend, Abs(divisor));
1255 if (divisor < 0) __ negl(rdx);
1256
1257 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1258 __ movl(rax, rdx);
1259 __ imull(rax, rax, Immediate(divisor));
1260 __ subl(rax, dividend);
1261 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
1262 }
1263 }
1264
1265
1266 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)1267 void LCodeGen::DoDivI(LDivI* instr) {
1268 HBinaryOperation* hdiv = instr->hydrogen();
1269 Register dividend = ToRegister(instr->dividend());
1270 Register divisor = ToRegister(instr->divisor());
1271 Register remainder = ToRegister(instr->temp());
1272 DCHECK(dividend.is(rax));
1273 DCHECK(remainder.is(rdx));
1274 DCHECK(ToRegister(instr->result()).is(rax));
1275 DCHECK(!divisor.is(rax));
1276 DCHECK(!divisor.is(rdx));
1277
1278 // Check for x / 0.
1279 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1280 __ testl(divisor, divisor);
1281 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1282 }
1283
1284 // Check for (0 / -x) that will produce negative zero.
1285 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1286 Label dividend_not_zero;
1287 __ testl(dividend, dividend);
1288 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1289 __ testl(divisor, divisor);
1290 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1291 __ bind(÷nd_not_zero);
1292 }
1293
1294 // Check for (kMinInt / -1).
1295 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1296 Label dividend_not_min_int;
1297 __ cmpl(dividend, Immediate(kMinInt));
1298 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1299 __ cmpl(divisor, Immediate(-1));
1300 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1301 __ bind(÷nd_not_min_int);
1302 }
1303
1304 // Sign extend to rdx (= remainder).
1305 __ cdq();
1306 __ idivl(divisor);
1307
1308 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1309 // Deoptimize if remainder is not 0.
1310 __ testl(remainder, remainder);
1311 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1312 }
1313 }
1314
1315
DoMulI(LMulI * instr)1316 void LCodeGen::DoMulI(LMulI* instr) {
1317 Register left = ToRegister(instr->left());
1318 LOperand* right = instr->right();
1319
1320 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1321 if (instr->hydrogen_value()->representation().IsSmi()) {
1322 __ movp(kScratchRegister, left);
1323 } else {
1324 __ movl(kScratchRegister, left);
1325 }
1326 }
1327
1328 bool can_overflow =
1329 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1330 if (right->IsConstantOperand()) {
1331 int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1332 if (right_value == -1) {
1333 __ negl(left);
1334 } else if (right_value == 0) {
1335 __ xorl(left, left);
1336 } else if (right_value == 2) {
1337 __ addl(left, left);
1338 } else if (!can_overflow) {
1339 // If the multiplication is known to not overflow, we
1340 // can use operations that don't set the overflow flag
1341 // correctly.
1342 switch (right_value) {
1343 case 1:
1344 // Do nothing.
1345 break;
1346 case 3:
1347 __ leal(left, Operand(left, left, times_2, 0));
1348 break;
1349 case 4:
1350 __ shll(left, Immediate(2));
1351 break;
1352 case 5:
1353 __ leal(left, Operand(left, left, times_4, 0));
1354 break;
1355 case 8:
1356 __ shll(left, Immediate(3));
1357 break;
1358 case 9:
1359 __ leal(left, Operand(left, left, times_8, 0));
1360 break;
1361 case 16:
1362 __ shll(left, Immediate(4));
1363 break;
1364 default:
1365 __ imull(left, left, Immediate(right_value));
1366 break;
1367 }
1368 } else {
1369 __ imull(left, left, Immediate(right_value));
1370 }
1371 } else if (right->IsStackSlot()) {
1372 if (instr->hydrogen_value()->representation().IsSmi()) {
1373 __ SmiToInteger64(left, left);
1374 __ imulp(left, ToOperand(right));
1375 } else {
1376 __ imull(left, ToOperand(right));
1377 }
1378 } else {
1379 if (instr->hydrogen_value()->representation().IsSmi()) {
1380 __ SmiToInteger64(left, left);
1381 __ imulp(left, ToRegister(right));
1382 } else {
1383 __ imull(left, ToRegister(right));
1384 }
1385 }
1386
1387 if (can_overflow) {
1388 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1389 }
1390
1391 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1392 // Bail out if the result is supposed to be negative zero.
1393 Label done;
1394 if (instr->hydrogen_value()->representation().IsSmi()) {
1395 __ testp(left, left);
1396 } else {
1397 __ testl(left, left);
1398 }
1399 __ j(not_zero, &done, Label::kNear);
1400 if (right->IsConstantOperand()) {
1401 // Constant can't be represented as 32-bit Smi due to immediate size
1402 // limit.
1403 DCHECK(SmiValuesAre32Bits()
1404 ? !instr->hydrogen_value()->representation().IsSmi()
1405 : SmiValuesAre31Bits());
1406 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1407 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
1408 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1409 __ cmpl(kScratchRegister, Immediate(0));
1410 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1411 }
1412 } else if (right->IsStackSlot()) {
1413 if (instr->hydrogen_value()->representation().IsSmi()) {
1414 __ orp(kScratchRegister, ToOperand(right));
1415 } else {
1416 __ orl(kScratchRegister, ToOperand(right));
1417 }
1418 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1419 } else {
1420 // Test the non-zero operand for negative sign.
1421 if (instr->hydrogen_value()->representation().IsSmi()) {
1422 __ orp(kScratchRegister, ToRegister(right));
1423 } else {
1424 __ orl(kScratchRegister, ToRegister(right));
1425 }
1426 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1427 }
1428 __ bind(&done);
1429 }
1430 }
1431
1432
DoBitI(LBitI * instr)1433 void LCodeGen::DoBitI(LBitI* instr) {
1434 LOperand* left = instr->left();
1435 LOperand* right = instr->right();
1436 DCHECK(left->Equals(instr->result()));
1437 DCHECK(left->IsRegister());
1438
1439 if (right->IsConstantOperand()) {
1440 int32_t right_operand =
1441 ToRepresentation(LConstantOperand::cast(right),
1442 instr->hydrogen()->right()->representation());
1443 switch (instr->op()) {
1444 case Token::BIT_AND:
1445 __ andl(ToRegister(left), Immediate(right_operand));
1446 break;
1447 case Token::BIT_OR:
1448 __ orl(ToRegister(left), Immediate(right_operand));
1449 break;
1450 case Token::BIT_XOR:
1451 if (right_operand == int32_t(~0)) {
1452 __ notl(ToRegister(left));
1453 } else {
1454 __ xorl(ToRegister(left), Immediate(right_operand));
1455 }
1456 break;
1457 default:
1458 UNREACHABLE();
1459 break;
1460 }
1461 } else if (right->IsStackSlot()) {
1462 switch (instr->op()) {
1463 case Token::BIT_AND:
1464 if (instr->IsInteger32()) {
1465 __ andl(ToRegister(left), ToOperand(right));
1466 } else {
1467 __ andp(ToRegister(left), ToOperand(right));
1468 }
1469 break;
1470 case Token::BIT_OR:
1471 if (instr->IsInteger32()) {
1472 __ orl(ToRegister(left), ToOperand(right));
1473 } else {
1474 __ orp(ToRegister(left), ToOperand(right));
1475 }
1476 break;
1477 case Token::BIT_XOR:
1478 if (instr->IsInteger32()) {
1479 __ xorl(ToRegister(left), ToOperand(right));
1480 } else {
1481 __ xorp(ToRegister(left), ToOperand(right));
1482 }
1483 break;
1484 default:
1485 UNREACHABLE();
1486 break;
1487 }
1488 } else {
1489 DCHECK(right->IsRegister());
1490 switch (instr->op()) {
1491 case Token::BIT_AND:
1492 if (instr->IsInteger32()) {
1493 __ andl(ToRegister(left), ToRegister(right));
1494 } else {
1495 __ andp(ToRegister(left), ToRegister(right));
1496 }
1497 break;
1498 case Token::BIT_OR:
1499 if (instr->IsInteger32()) {
1500 __ orl(ToRegister(left), ToRegister(right));
1501 } else {
1502 __ orp(ToRegister(left), ToRegister(right));
1503 }
1504 break;
1505 case Token::BIT_XOR:
1506 if (instr->IsInteger32()) {
1507 __ xorl(ToRegister(left), ToRegister(right));
1508 } else {
1509 __ xorp(ToRegister(left), ToRegister(right));
1510 }
1511 break;
1512 default:
1513 UNREACHABLE();
1514 break;
1515 }
1516 }
1517 }
1518
1519
DoShiftI(LShiftI * instr)1520 void LCodeGen::DoShiftI(LShiftI* instr) {
1521 LOperand* left = instr->left();
1522 LOperand* right = instr->right();
1523 DCHECK(left->Equals(instr->result()));
1524 DCHECK(left->IsRegister());
1525 if (right->IsRegister()) {
1526 DCHECK(ToRegister(right).is(rcx));
1527
1528 switch (instr->op()) {
1529 case Token::ROR:
1530 __ rorl_cl(ToRegister(left));
1531 break;
1532 case Token::SAR:
1533 __ sarl_cl(ToRegister(left));
1534 break;
1535 case Token::SHR:
1536 __ shrl_cl(ToRegister(left));
1537 if (instr->can_deopt()) {
1538 __ testl(ToRegister(left), ToRegister(left));
1539 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
1540 }
1541 break;
1542 case Token::SHL:
1543 __ shll_cl(ToRegister(left));
1544 break;
1545 default:
1546 UNREACHABLE();
1547 break;
1548 }
1549 } else {
1550 int32_t value = ToInteger32(LConstantOperand::cast(right));
1551 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1552 switch (instr->op()) {
1553 case Token::ROR:
1554 if (shift_count != 0) {
1555 __ rorl(ToRegister(left), Immediate(shift_count));
1556 }
1557 break;
1558 case Token::SAR:
1559 if (shift_count != 0) {
1560 __ sarl(ToRegister(left), Immediate(shift_count));
1561 }
1562 break;
1563 case Token::SHR:
1564 if (shift_count != 0) {
1565 __ shrl(ToRegister(left), Immediate(shift_count));
1566 } else if (instr->can_deopt()) {
1567 __ testl(ToRegister(left), ToRegister(left));
1568 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
1569 }
1570 break;
1571 case Token::SHL:
1572 if (shift_count != 0) {
1573 if (instr->hydrogen_value()->representation().IsSmi()) {
1574 if (SmiValuesAre32Bits()) {
1575 __ shlp(ToRegister(left), Immediate(shift_count));
1576 } else {
1577 DCHECK(SmiValuesAre31Bits());
1578 if (instr->can_deopt()) {
1579 if (shift_count != 1) {
1580 __ shll(ToRegister(left), Immediate(shift_count - 1));
1581 }
1582 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
1583 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1584 } else {
1585 __ shll(ToRegister(left), Immediate(shift_count));
1586 }
1587 }
1588 } else {
1589 __ shll(ToRegister(left), Immediate(shift_count));
1590 }
1591 }
1592 break;
1593 default:
1594 UNREACHABLE();
1595 break;
1596 }
1597 }
1598 }
1599
1600
DoSubI(LSubI * instr)1601 void LCodeGen::DoSubI(LSubI* instr) {
1602 LOperand* left = instr->left();
1603 LOperand* right = instr->right();
1604 DCHECK(left->Equals(instr->result()));
1605
1606 if (right->IsConstantOperand()) {
1607 int32_t right_operand =
1608 ToRepresentation(LConstantOperand::cast(right),
1609 instr->hydrogen()->right()->representation());
1610 __ subl(ToRegister(left), Immediate(right_operand));
1611 } else if (right->IsRegister()) {
1612 if (instr->hydrogen_value()->representation().IsSmi()) {
1613 __ subp(ToRegister(left), ToRegister(right));
1614 } else {
1615 __ subl(ToRegister(left), ToRegister(right));
1616 }
1617 } else {
1618 if (instr->hydrogen_value()->representation().IsSmi()) {
1619 __ subp(ToRegister(left), ToOperand(right));
1620 } else {
1621 __ subl(ToRegister(left), ToOperand(right));
1622 }
1623 }
1624
1625 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1626 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1627 }
1628 }
1629
1630
DoConstantI(LConstantI * instr)1631 void LCodeGen::DoConstantI(LConstantI* instr) {
1632 Register dst = ToRegister(instr->result());
1633 if (instr->value() == 0) {
1634 __ xorl(dst, dst);
1635 } else {
1636 __ movl(dst, Immediate(instr->value()));
1637 }
1638 }
1639
1640
DoConstantS(LConstantS * instr)1641 void LCodeGen::DoConstantS(LConstantS* instr) {
1642 __ Move(ToRegister(instr->result()), instr->value());
1643 }
1644
1645
DoConstantD(LConstantD * instr)1646 void LCodeGen::DoConstantD(LConstantD* instr) {
1647 __ Move(ToDoubleRegister(instr->result()), instr->bits());
1648 }
1649
1650
DoConstantE(LConstantE * instr)1651 void LCodeGen::DoConstantE(LConstantE* instr) {
1652 __ LoadAddress(ToRegister(instr->result()), instr->value());
1653 }
1654
1655
DoConstantT(LConstantT * instr)1656 void LCodeGen::DoConstantT(LConstantT* instr) {
1657 Handle<Object> object = instr->value(isolate());
1658 AllowDeferredHandleDereference smi_check;
1659 __ Move(ToRegister(instr->result()), object);
1660 }
1661
1662
DoMapEnumLength(LMapEnumLength * instr)1663 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1664 Register result = ToRegister(instr->result());
1665 Register map = ToRegister(instr->value());
1666 __ EnumLength(result, map);
1667 }
1668
1669
BuildSeqStringOperand(Register string,LOperand * index,String::Encoding encoding)1670 Operand LCodeGen::BuildSeqStringOperand(Register string,
1671 LOperand* index,
1672 String::Encoding encoding) {
1673 if (index->IsConstantOperand()) {
1674 int offset = ToInteger32(LConstantOperand::cast(index));
1675 if (encoding == String::TWO_BYTE_ENCODING) {
1676 offset *= kUC16Size;
1677 }
1678 STATIC_ASSERT(kCharSize == 1);
1679 return FieldOperand(string, SeqString::kHeaderSize + offset);
1680 }
1681 return FieldOperand(
1682 string, ToRegister(index),
1683 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1684 SeqString::kHeaderSize);
1685 }
1686
1687
DoSeqStringGetChar(LSeqStringGetChar * instr)1688 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1689 String::Encoding encoding = instr->hydrogen()->encoding();
1690 Register result = ToRegister(instr->result());
1691 Register string = ToRegister(instr->string());
1692
1693 if (FLAG_debug_code) {
1694 __ Push(string);
1695 __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1696 __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1697
1698 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1699 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1700 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1701 __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1702 ? one_byte_seq_type : two_byte_seq_type));
1703 __ Check(equal, kUnexpectedStringType);
1704 __ Pop(string);
1705 }
1706
1707 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1708 if (encoding == String::ONE_BYTE_ENCODING) {
1709 __ movzxbl(result, operand);
1710 } else {
1711 __ movzxwl(result, operand);
1712 }
1713 }
1714
1715
DoSeqStringSetChar(LSeqStringSetChar * instr)1716 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1717 String::Encoding encoding = instr->hydrogen()->encoding();
1718 Register string = ToRegister(instr->string());
1719
1720 if (FLAG_debug_code) {
1721 Register value = ToRegister(instr->value());
1722 Register index = ToRegister(instr->index());
1723 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1724 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1725 int encoding_mask =
1726 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1727 ? one_byte_seq_type : two_byte_seq_type;
1728 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1729 }
1730
1731 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1732 if (instr->value()->IsConstantOperand()) {
1733 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1734 DCHECK_LE(0, value);
1735 if (encoding == String::ONE_BYTE_ENCODING) {
1736 DCHECK_LE(value, String::kMaxOneByteCharCode);
1737 __ movb(operand, Immediate(value));
1738 } else {
1739 DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1740 __ movw(operand, Immediate(value));
1741 }
1742 } else {
1743 Register value = ToRegister(instr->value());
1744 if (encoding == String::ONE_BYTE_ENCODING) {
1745 __ movb(operand, value);
1746 } else {
1747 __ movw(operand, value);
1748 }
1749 }
1750 }
1751
1752
DoAddI(LAddI * instr)1753 void LCodeGen::DoAddI(LAddI* instr) {
1754 LOperand* left = instr->left();
1755 LOperand* right = instr->right();
1756
1757 Representation target_rep = instr->hydrogen()->representation();
1758 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1759
1760 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1761 if (right->IsConstantOperand()) {
1762 // No support for smi-immediates for 32-bit SMI.
1763 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1764 int32_t offset =
1765 ToRepresentation(LConstantOperand::cast(right),
1766 instr->hydrogen()->right()->representation());
1767 if (is_p) {
1768 __ leap(ToRegister(instr->result()),
1769 MemOperand(ToRegister(left), offset));
1770 } else {
1771 __ leal(ToRegister(instr->result()),
1772 MemOperand(ToRegister(left), offset));
1773 }
1774 } else {
1775 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1776 if (is_p) {
1777 __ leap(ToRegister(instr->result()), address);
1778 } else {
1779 __ leal(ToRegister(instr->result()), address);
1780 }
1781 }
1782 } else {
1783 if (right->IsConstantOperand()) {
1784 // No support for smi-immediates for 32-bit SMI.
1785 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1786 int32_t right_operand =
1787 ToRepresentation(LConstantOperand::cast(right),
1788 instr->hydrogen()->right()->representation());
1789 if (is_p) {
1790 __ addp(ToRegister(left), Immediate(right_operand));
1791 } else {
1792 __ addl(ToRegister(left), Immediate(right_operand));
1793 }
1794 } else if (right->IsRegister()) {
1795 if (is_p) {
1796 __ addp(ToRegister(left), ToRegister(right));
1797 } else {
1798 __ addl(ToRegister(left), ToRegister(right));
1799 }
1800 } else {
1801 if (is_p) {
1802 __ addp(ToRegister(left), ToOperand(right));
1803 } else {
1804 __ addl(ToRegister(left), ToOperand(right));
1805 }
1806 }
1807 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1808 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1809 }
1810 }
1811 }
1812
1813
DoMathMinMax(LMathMinMax * instr)1814 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1815 LOperand* left = instr->left();
1816 LOperand* right = instr->right();
1817 DCHECK(left->Equals(instr->result()));
1818 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1819 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1820 Label return_left;
1821 Condition condition = (operation == HMathMinMax::kMathMin)
1822 ? less_equal
1823 : greater_equal;
1824 Register left_reg = ToRegister(left);
1825 if (right->IsConstantOperand()) {
1826 Immediate right_imm = Immediate(
1827 ToRepresentation(LConstantOperand::cast(right),
1828 instr->hydrogen()->right()->representation()));
1829 DCHECK(SmiValuesAre32Bits()
1830 ? !instr->hydrogen()->representation().IsSmi()
1831 : SmiValuesAre31Bits());
1832 __ cmpl(left_reg, right_imm);
1833 __ j(condition, &return_left, Label::kNear);
1834 __ movp(left_reg, right_imm);
1835 } else if (right->IsRegister()) {
1836 Register right_reg = ToRegister(right);
1837 if (instr->hydrogen_value()->representation().IsSmi()) {
1838 __ cmpp(left_reg, right_reg);
1839 } else {
1840 __ cmpl(left_reg, right_reg);
1841 }
1842 __ j(condition, &return_left, Label::kNear);
1843 __ movp(left_reg, right_reg);
1844 } else {
1845 Operand right_op = ToOperand(right);
1846 if (instr->hydrogen_value()->representation().IsSmi()) {
1847 __ cmpp(left_reg, right_op);
1848 } else {
1849 __ cmpl(left_reg, right_op);
1850 }
1851 __ j(condition, &return_left, Label::kNear);
1852 __ movp(left_reg, right_op);
1853 }
1854 __ bind(&return_left);
1855 } else {
1856 DCHECK(instr->hydrogen()->representation().IsDouble());
1857 Label not_nan, distinct, return_left, return_right;
1858 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1859 XMMRegister left_reg = ToDoubleRegister(left);
1860 XMMRegister right_reg = ToDoubleRegister(right);
1861 __ Ucomisd(left_reg, right_reg);
1862 __ j(parity_odd, ¬_nan, Label::kNear); // Both are not NaN.
1863
1864 // One of the numbers is NaN. Find which one and return it.
1865 __ Ucomisd(left_reg, left_reg);
1866 __ j(parity_even, &return_left, Label::kNear); // left is NaN.
1867 __ jmp(&return_right, Label::kNear); // right is NaN.
1868
1869 __ bind(¬_nan);
1870 __ j(not_equal, &distinct, Label::kNear); // left != right.
1871
1872 // left == right
1873 XMMRegister xmm_scratch = double_scratch0();
1874 __ Xorpd(xmm_scratch, xmm_scratch);
1875 __ Ucomisd(left_reg, xmm_scratch);
1876 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1877
1878 // At this point, both left and right are either +0 or -0.
1879 if (operation == HMathMinMax::kMathMin) {
1880 __ Orpd(left_reg, right_reg);
1881 } else {
1882 __ Andpd(left_reg, right_reg);
1883 }
1884 __ jmp(&return_left, Label::kNear);
1885
1886 __ bind(&distinct);
1887 __ j(condition, &return_left, Label::kNear);
1888
1889 __ bind(&return_right);
1890 __ Movapd(left_reg, right_reg);
1891
1892 __ bind(&return_left);
1893 }
1894 }
1895
1896
DoArithmeticD(LArithmeticD * instr)1897 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1898 XMMRegister left = ToDoubleRegister(instr->left());
1899 XMMRegister right = ToDoubleRegister(instr->right());
1900 XMMRegister result = ToDoubleRegister(instr->result());
1901 switch (instr->op()) {
1902 case Token::ADD:
1903 if (CpuFeatures::IsSupported(AVX)) {
1904 CpuFeatureScope scope(masm(), AVX);
1905 __ vaddsd(result, left, right);
1906 } else {
1907 DCHECK(result.is(left));
1908 __ addsd(left, right);
1909 }
1910 break;
1911 case Token::SUB:
1912 if (CpuFeatures::IsSupported(AVX)) {
1913 CpuFeatureScope scope(masm(), AVX);
1914 __ vsubsd(result, left, right);
1915 } else {
1916 DCHECK(result.is(left));
1917 __ subsd(left, right);
1918 }
1919 break;
1920 case Token::MUL:
1921 if (CpuFeatures::IsSupported(AVX)) {
1922 CpuFeatureScope scope(masm(), AVX);
1923 __ vmulsd(result, left, right);
1924 } else {
1925 DCHECK(result.is(left));
1926 __ mulsd(left, right);
1927 }
1928 break;
1929 case Token::DIV:
1930 if (CpuFeatures::IsSupported(AVX)) {
1931 CpuFeatureScope scope(masm(), AVX);
1932 __ vdivsd(result, left, right);
1933 } else {
1934 DCHECK(result.is(left));
1935 __ divsd(left, right);
1936 }
1937 // Don't delete this mov. It may improve performance on some CPUs,
1938 // when there is a (v)mulsd depending on the result
1939 __ Movapd(result, result);
1940 break;
1941 case Token::MOD: {
1942 XMMRegister xmm_scratch = double_scratch0();
1943 __ PrepareCallCFunction(2);
1944 __ Movapd(xmm_scratch, left);
1945 DCHECK(right.is(xmm1));
1946 __ CallCFunction(
1947 ExternalReference::mod_two_doubles_operation(isolate()), 2);
1948 __ Movapd(result, xmm_scratch);
1949 break;
1950 }
1951 default:
1952 UNREACHABLE();
1953 break;
1954 }
1955 }
1956
1957
DoArithmeticT(LArithmeticT * instr)1958 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1959 DCHECK(ToRegister(instr->context()).is(rsi));
1960 DCHECK(ToRegister(instr->left()).is(rdx));
1961 DCHECK(ToRegister(instr->right()).is(rax));
1962 DCHECK(ToRegister(instr->result()).is(rax));
1963
1964 Handle<Code> code =
1965 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
1966 CallCode(code, RelocInfo::CODE_TARGET, instr);
1967 }
1968
1969
1970 template<class InstrType>
EmitBranch(InstrType instr,Condition cc)1971 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
1972 int left_block = instr->TrueDestination(chunk_);
1973 int right_block = instr->FalseDestination(chunk_);
1974
1975 int next_block = GetNextEmittedBlock();
1976
1977 if (right_block == left_block || cc == no_condition) {
1978 EmitGoto(left_block);
1979 } else if (left_block == next_block) {
1980 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1981 } else if (right_block == next_block) {
1982 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1983 } else {
1984 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1985 if (cc != always) {
1986 __ jmp(chunk_->GetAssemblyLabel(right_block));
1987 }
1988 }
1989 }
1990
1991
1992 template <class InstrType>
EmitTrueBranch(InstrType instr,Condition cc)1993 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
1994 int true_block = instr->TrueDestination(chunk_);
1995 __ j(cc, chunk_->GetAssemblyLabel(true_block));
1996 }
1997
1998
1999 template <class InstrType>
EmitFalseBranch(InstrType instr,Condition cc)2000 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2001 int false_block = instr->FalseDestination(chunk_);
2002 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2003 }
2004
2005
DoDebugBreak(LDebugBreak * instr)2006 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2007 __ int3();
2008 }
2009
2010
DoBranch(LBranch * instr)2011 void LCodeGen::DoBranch(LBranch* instr) {
2012 Representation r = instr->hydrogen()->value()->representation();
2013 if (r.IsInteger32()) {
2014 DCHECK(!info()->IsStub());
2015 Register reg = ToRegister(instr->value());
2016 __ testl(reg, reg);
2017 EmitBranch(instr, not_zero);
2018 } else if (r.IsSmi()) {
2019 DCHECK(!info()->IsStub());
2020 Register reg = ToRegister(instr->value());
2021 __ testp(reg, reg);
2022 EmitBranch(instr, not_zero);
2023 } else if (r.IsDouble()) {
2024 DCHECK(!info()->IsStub());
2025 XMMRegister reg = ToDoubleRegister(instr->value());
2026 XMMRegister xmm_scratch = double_scratch0();
2027 __ Xorpd(xmm_scratch, xmm_scratch);
2028 __ Ucomisd(reg, xmm_scratch);
2029 EmitBranch(instr, not_equal);
2030 } else {
2031 DCHECK(r.IsTagged());
2032 Register reg = ToRegister(instr->value());
2033 HType type = instr->hydrogen()->value()->type();
2034 if (type.IsBoolean()) {
2035 DCHECK(!info()->IsStub());
2036 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2037 EmitBranch(instr, equal);
2038 } else if (type.IsSmi()) {
2039 DCHECK(!info()->IsStub());
2040 __ SmiCompare(reg, Smi::FromInt(0));
2041 EmitBranch(instr, not_equal);
2042 } else if (type.IsJSArray()) {
2043 DCHECK(!info()->IsStub());
2044 EmitBranch(instr, no_condition);
2045 } else if (type.IsHeapNumber()) {
2046 DCHECK(!info()->IsStub());
2047 XMMRegister xmm_scratch = double_scratch0();
2048 __ Xorpd(xmm_scratch, xmm_scratch);
2049 __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2050 EmitBranch(instr, not_equal);
2051 } else if (type.IsString()) {
2052 DCHECK(!info()->IsStub());
2053 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2054 EmitBranch(instr, not_equal);
2055 } else {
2056 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2057 // Avoid deopts in the case where we've never executed this path before.
2058 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2059
2060 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2061 // undefined -> false.
2062 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2063 __ j(equal, instr->FalseLabel(chunk_));
2064 }
2065 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2066 // true -> true.
2067 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2068 __ j(equal, instr->TrueLabel(chunk_));
2069 // false -> false.
2070 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2071 __ j(equal, instr->FalseLabel(chunk_));
2072 }
2073 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2074 // 'null' -> false.
2075 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2076 __ j(equal, instr->FalseLabel(chunk_));
2077 }
2078
2079 if (expected.Contains(ToBooleanStub::SMI)) {
2080 // Smis: 0 -> false, all other -> true.
2081 __ Cmp(reg, Smi::FromInt(0));
2082 __ j(equal, instr->FalseLabel(chunk_));
2083 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2084 } else if (expected.NeedsMap()) {
2085 // If we need a map later and have a Smi -> deopt.
2086 __ testb(reg, Immediate(kSmiTagMask));
2087 DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
2088 }
2089
2090 const Register map = kScratchRegister;
2091 if (expected.NeedsMap()) {
2092 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2093
2094 if (expected.CanBeUndetectable()) {
2095 // Undetectable -> false.
2096 __ testb(FieldOperand(map, Map::kBitFieldOffset),
2097 Immediate(1 << Map::kIsUndetectable));
2098 __ j(not_zero, instr->FalseLabel(chunk_));
2099 }
2100 }
2101
2102 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2103 // spec object -> true.
2104 __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
2105 __ j(above_equal, instr->TrueLabel(chunk_));
2106 }
2107
2108 if (expected.Contains(ToBooleanStub::STRING)) {
2109 // String value -> false iff empty.
2110 Label not_string;
2111 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2112 __ j(above_equal, ¬_string, Label::kNear);
2113 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2114 __ j(not_zero, instr->TrueLabel(chunk_));
2115 __ jmp(instr->FalseLabel(chunk_));
2116 __ bind(¬_string);
2117 }
2118
2119 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2120 // Symbol value -> true.
2121 __ CmpInstanceType(map, SYMBOL_TYPE);
2122 __ j(equal, instr->TrueLabel(chunk_));
2123 }
2124
2125 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2126 // SIMD value -> true.
2127 __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
2128 __ j(equal, instr->TrueLabel(chunk_));
2129 }
2130
2131 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2132 // heap number -> false iff +0, -0, or NaN.
2133 Label not_heap_number;
2134 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2135 __ j(not_equal, ¬_heap_number, Label::kNear);
2136 XMMRegister xmm_scratch = double_scratch0();
2137 __ Xorpd(xmm_scratch, xmm_scratch);
2138 __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2139 __ j(zero, instr->FalseLabel(chunk_));
2140 __ jmp(instr->TrueLabel(chunk_));
2141 __ bind(¬_heap_number);
2142 }
2143
2144 if (!expected.IsGeneric()) {
2145 // We've seen something for the first time -> deopt.
2146 // This can only happen if we are not generic already.
2147 DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
2148 }
2149 }
2150 }
2151 }
2152
2153
EmitGoto(int block)2154 void LCodeGen::EmitGoto(int block) {
2155 if (!IsNextEmittedBlock(block)) {
2156 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2157 }
2158 }
2159
2160
DoGoto(LGoto * instr)2161 void LCodeGen::DoGoto(LGoto* instr) {
2162 EmitGoto(instr->block_id());
2163 }
2164
2165
TokenToCondition(Token::Value op,bool is_unsigned)2166 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2167 Condition cond = no_condition;
2168 switch (op) {
2169 case Token::EQ:
2170 case Token::EQ_STRICT:
2171 cond = equal;
2172 break;
2173 case Token::NE:
2174 case Token::NE_STRICT:
2175 cond = not_equal;
2176 break;
2177 case Token::LT:
2178 cond = is_unsigned ? below : less;
2179 break;
2180 case Token::GT:
2181 cond = is_unsigned ? above : greater;
2182 break;
2183 case Token::LTE:
2184 cond = is_unsigned ? below_equal : less_equal;
2185 break;
2186 case Token::GTE:
2187 cond = is_unsigned ? above_equal : greater_equal;
2188 break;
2189 case Token::IN:
2190 case Token::INSTANCEOF:
2191 default:
2192 UNREACHABLE();
2193 }
2194 return cond;
2195 }
2196
2197
DoCompareNumericAndBranch(LCompareNumericAndBranch * instr)2198 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2199 LOperand* left = instr->left();
2200 LOperand* right = instr->right();
2201 bool is_unsigned =
2202 instr->is_double() ||
2203 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2204 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2205 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2206
2207 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2208 // We can statically evaluate the comparison.
2209 double left_val = ToDouble(LConstantOperand::cast(left));
2210 double right_val = ToDouble(LConstantOperand::cast(right));
2211 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2212 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2213 EmitGoto(next_block);
2214 } else {
2215 if (instr->is_double()) {
2216 // Don't base result on EFLAGS when a NaN is involved. Instead
2217 // jump to the false block.
2218 __ Ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2219 __ j(parity_even, instr->FalseLabel(chunk_));
2220 } else {
2221 int32_t value;
2222 if (right->IsConstantOperand()) {
2223 value = ToInteger32(LConstantOperand::cast(right));
2224 if (instr->hydrogen_value()->representation().IsSmi()) {
2225 __ Cmp(ToRegister(left), Smi::FromInt(value));
2226 } else {
2227 __ cmpl(ToRegister(left), Immediate(value));
2228 }
2229 } else if (left->IsConstantOperand()) {
2230 value = ToInteger32(LConstantOperand::cast(left));
2231 if (instr->hydrogen_value()->representation().IsSmi()) {
2232 if (right->IsRegister()) {
2233 __ Cmp(ToRegister(right), Smi::FromInt(value));
2234 } else {
2235 __ Cmp(ToOperand(right), Smi::FromInt(value));
2236 }
2237 } else if (right->IsRegister()) {
2238 __ cmpl(ToRegister(right), Immediate(value));
2239 } else {
2240 __ cmpl(ToOperand(right), Immediate(value));
2241 }
2242 // We commuted the operands, so commute the condition.
2243 cc = CommuteCondition(cc);
2244 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2245 if (right->IsRegister()) {
2246 __ cmpp(ToRegister(left), ToRegister(right));
2247 } else {
2248 __ cmpp(ToRegister(left), ToOperand(right));
2249 }
2250 } else {
2251 if (right->IsRegister()) {
2252 __ cmpl(ToRegister(left), ToRegister(right));
2253 } else {
2254 __ cmpl(ToRegister(left), ToOperand(right));
2255 }
2256 }
2257 }
2258 EmitBranch(instr, cc);
2259 }
2260 }
2261
2262
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)2263 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2264 Register left = ToRegister(instr->left());
2265
2266 if (instr->right()->IsConstantOperand()) {
2267 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2268 __ Cmp(left, right);
2269 } else {
2270 Register right = ToRegister(instr->right());
2271 __ cmpp(left, right);
2272 }
2273 EmitBranch(instr, equal);
2274 }
2275
2276
DoCmpHoleAndBranch(LCmpHoleAndBranch * instr)2277 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2278 if (instr->hydrogen()->representation().IsTagged()) {
2279 Register input_reg = ToRegister(instr->object());
2280 __ Cmp(input_reg, factory()->the_hole_value());
2281 EmitBranch(instr, equal);
2282 return;
2283 }
2284
2285 XMMRegister input_reg = ToDoubleRegister(instr->object());
2286 __ Ucomisd(input_reg, input_reg);
2287 EmitFalseBranch(instr, parity_odd);
2288
2289 __ subp(rsp, Immediate(kDoubleSize));
2290 __ Movsd(MemOperand(rsp, 0), input_reg);
2291 __ addp(rsp, Immediate(kDoubleSize));
2292
2293 int offset = sizeof(kHoleNanUpper32);
2294 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2295 EmitBranch(instr, equal);
2296 }
2297
2298
DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch * instr)2299 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2300 Representation rep = instr->hydrogen()->value()->representation();
2301 DCHECK(!rep.IsInteger32());
2302
2303 if (rep.IsDouble()) {
2304 XMMRegister value = ToDoubleRegister(instr->value());
2305 XMMRegister xmm_scratch = double_scratch0();
2306 __ Xorpd(xmm_scratch, xmm_scratch);
2307 __ Ucomisd(xmm_scratch, value);
2308 EmitFalseBranch(instr, not_equal);
2309 __ Movmskpd(kScratchRegister, value);
2310 __ testl(kScratchRegister, Immediate(1));
2311 EmitBranch(instr, not_zero);
2312 } else {
2313 Register value = ToRegister(instr->value());
2314 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2315 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2316 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2317 Immediate(0x1));
2318 EmitFalseBranch(instr, no_overflow);
2319 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2320 Immediate(0x00000000));
2321 EmitBranch(instr, equal);
2322 }
2323 }
2324
2325
EmitIsString(Register input,Register temp1,Label * is_not_string,SmiCheck check_needed=INLINE_SMI_CHECK)2326 Condition LCodeGen::EmitIsString(Register input,
2327 Register temp1,
2328 Label* is_not_string,
2329 SmiCheck check_needed = INLINE_SMI_CHECK) {
2330 if (check_needed == INLINE_SMI_CHECK) {
2331 __ JumpIfSmi(input, is_not_string);
2332 }
2333
2334 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2335
2336 return cond;
2337 }
2338
2339
DoIsStringAndBranch(LIsStringAndBranch * instr)2340 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2341 Register reg = ToRegister(instr->value());
2342 Register temp = ToRegister(instr->temp());
2343
2344 SmiCheck check_needed =
2345 instr->hydrogen()->value()->type().IsHeapObject()
2346 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2347
2348 Condition true_cond = EmitIsString(
2349 reg, temp, instr->FalseLabel(chunk_), check_needed);
2350
2351 EmitBranch(instr, true_cond);
2352 }
2353
2354
DoIsSmiAndBranch(LIsSmiAndBranch * instr)2355 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2356 Condition is_smi;
2357 if (instr->value()->IsRegister()) {
2358 Register input = ToRegister(instr->value());
2359 is_smi = masm()->CheckSmi(input);
2360 } else {
2361 Operand input = ToOperand(instr->value());
2362 is_smi = masm()->CheckSmi(input);
2363 }
2364 EmitBranch(instr, is_smi);
2365 }
2366
2367
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)2368 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2369 Register input = ToRegister(instr->value());
2370 Register temp = ToRegister(instr->temp());
2371
2372 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2373 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2374 }
2375 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2376 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2377 Immediate(1 << Map::kIsUndetectable));
2378 EmitBranch(instr, not_zero);
2379 }
2380
2381
DoStringCompareAndBranch(LStringCompareAndBranch * instr)2382 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2383 DCHECK(ToRegister(instr->context()).is(rsi));
2384 DCHECK(ToRegister(instr->left()).is(rdx));
2385 DCHECK(ToRegister(instr->right()).is(rax));
2386
2387 Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
2388 CallCode(code, RelocInfo::CODE_TARGET, instr);
2389 __ testp(rax, rax);
2390
2391 EmitBranch(instr, TokenToCondition(instr->op(), false));
2392 }
2393
2394
TestType(HHasInstanceTypeAndBranch * instr)2395 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2396 InstanceType from = instr->from();
2397 InstanceType to = instr->to();
2398 if (from == FIRST_TYPE) return to;
2399 DCHECK(from == to || to == LAST_TYPE);
2400 return from;
2401 }
2402
2403
BranchCondition(HHasInstanceTypeAndBranch * instr)2404 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2405 InstanceType from = instr->from();
2406 InstanceType to = instr->to();
2407 if (from == to) return equal;
2408 if (to == LAST_TYPE) return above_equal;
2409 if (from == FIRST_TYPE) return below_equal;
2410 UNREACHABLE();
2411 return equal;
2412 }
2413
2414
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)2415 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2416 Register input = ToRegister(instr->value());
2417
2418 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2419 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2420 }
2421
2422 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2423 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2424 }
2425
2426
DoGetCachedArrayIndex(LGetCachedArrayIndex * instr)2427 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2428 Register input = ToRegister(instr->value());
2429 Register result = ToRegister(instr->result());
2430
2431 __ AssertString(input);
2432
2433 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2434 DCHECK(String::kHashShift >= kSmiTagSize);
2435 __ IndexFromHash(result, result);
2436 }
2437
2438
DoHasCachedArrayIndexAndBranch(LHasCachedArrayIndexAndBranch * instr)2439 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2440 LHasCachedArrayIndexAndBranch* instr) {
2441 Register input = ToRegister(instr->value());
2442
2443 __ testl(FieldOperand(input, String::kHashFieldOffset),
2444 Immediate(String::kContainsCachedArrayIndexMask));
2445 EmitBranch(instr, equal);
2446 }
2447
2448
2449 // Branches to a label or falls through with the answer in the z flag.
2450 // Trashes the temp register.
EmitClassOfTest(Label * is_true,Label * is_false,Handle<String> class_name,Register input,Register temp,Register temp2)2451 void LCodeGen::EmitClassOfTest(Label* is_true,
2452 Label* is_false,
2453 Handle<String> class_name,
2454 Register input,
2455 Register temp,
2456 Register temp2) {
2457 DCHECK(!input.is(temp));
2458 DCHECK(!input.is(temp2));
2459 DCHECK(!temp.is(temp2));
2460
2461 __ JumpIfSmi(input, is_false);
2462
2463 __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
2464 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2465 __ j(equal, is_true);
2466 } else {
2467 __ j(equal, is_false);
2468 }
2469
2470 // Check if the constructor in the map is a function.
2471 __ GetMapConstructor(temp, temp, kScratchRegister);
2472
2473 // Objects with a non-function constructor have class 'Object'.
2474 __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
2475 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2476 __ j(not_equal, is_true);
2477 } else {
2478 __ j(not_equal, is_false);
2479 }
2480
2481 // temp now contains the constructor function. Grab the
2482 // instance class name from there.
2483 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2484 __ movp(temp, FieldOperand(temp,
2485 SharedFunctionInfo::kInstanceClassNameOffset));
2486 // The class name we are testing against is internalized since it's a literal.
2487 // The name in the constructor is internalized because of the way the context
2488 // is booted. This routine isn't expected to work for random API-created
2489 // classes and it doesn't have to because you can't access it with natives
2490 // syntax. Since both sides are internalized it is sufficient to use an
2491 // identity comparison.
2492 DCHECK(class_name->IsInternalizedString());
2493 __ Cmp(temp, class_name);
2494 // End with the answer in the z flag.
2495 }
2496
2497
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2498 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2499 Register input = ToRegister(instr->value());
2500 Register temp = ToRegister(instr->temp());
2501 Register temp2 = ToRegister(instr->temp2());
2502 Handle<String> class_name = instr->hydrogen()->class_name();
2503
2504 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2505 class_name, input, temp, temp2);
2506
2507 EmitBranch(instr, equal);
2508 }
2509
2510
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2511 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2512 Register reg = ToRegister(instr->value());
2513
2514 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2515 EmitBranch(instr, equal);
2516 }
2517
2518
DoInstanceOf(LInstanceOf * instr)2519 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2520 DCHECK(ToRegister(instr->context()).is(rsi));
2521 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
2522 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
2523 DCHECK(ToRegister(instr->result()).is(rax));
2524 InstanceOfStub stub(isolate());
2525 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2526 }
2527
2528
DoHasInPrototypeChainAndBranch(LHasInPrototypeChainAndBranch * instr)2529 void LCodeGen::DoHasInPrototypeChainAndBranch(
2530 LHasInPrototypeChainAndBranch* instr) {
2531 Register const object = ToRegister(instr->object());
2532 Register const object_map = kScratchRegister;
2533 Register const object_prototype = object_map;
2534 Register const prototype = ToRegister(instr->prototype());
2535
2536 // The {object} must be a spec object. It's sufficient to know that {object}
2537 // is not a smi, since all other non-spec objects have {null} prototypes and
2538 // will be ruled out below.
2539 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2540 Condition is_smi = __ CheckSmi(object);
2541 EmitFalseBranch(instr, is_smi);
2542 }
2543
2544 // Loop through the {object}s prototype chain looking for the {prototype}.
2545 __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
2546 Label loop;
2547 __ bind(&loop);
2548
2549
2550 // Deoptimize if the object needs to be access checked.
2551 __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
2552 Immediate(1 << Map::kIsAccessCheckNeeded));
2553 DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
2554 // Deoptimize for proxies.
2555 __ CmpInstanceType(object_map, JS_PROXY_TYPE);
2556 DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
2557
2558 __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
2559 __ cmpp(object_prototype, prototype);
2560 EmitTrueBranch(instr, equal);
2561 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
2562 EmitFalseBranch(instr, equal);
2563 __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
2564 __ jmp(&loop);
2565 }
2566
2567
DoCmpT(LCmpT * instr)2568 void LCodeGen::DoCmpT(LCmpT* instr) {
2569 DCHECK(ToRegister(instr->context()).is(rsi));
2570 Token::Value op = instr->op();
2571
2572 Handle<Code> ic =
2573 CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2574 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2575
2576 Condition condition = TokenToCondition(op, false);
2577 Label true_value, done;
2578 __ testp(rax, rax);
2579 __ j(condition, &true_value, Label::kNear);
2580 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2581 __ jmp(&done, Label::kNear);
2582 __ bind(&true_value);
2583 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2584 __ bind(&done);
2585 }
2586
2587
DoReturn(LReturn * instr)2588 void LCodeGen::DoReturn(LReturn* instr) {
2589 if (FLAG_trace && info()->IsOptimizing()) {
2590 // Preserve the return value on the stack and rely on the runtime call
2591 // to return the value in the same register. We're leaving the code
2592 // managed by the register allocator and tearing down the frame, it's
2593 // safe to write to the context register.
2594 __ Push(rax);
2595 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2596 __ CallRuntime(Runtime::kTraceExit);
2597 }
2598 if (info()->saves_caller_doubles()) {
2599 RestoreCallerDoubles();
2600 }
2601 if (NeedsEagerFrame()) {
2602 __ movp(rsp, rbp);
2603 __ popq(rbp);
2604 }
2605 if (instr->has_constant_parameter_count()) {
2606 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2607 rcx);
2608 } else {
2609 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2610 Register reg = ToRegister(instr->parameter_count());
2611 // The argument count parameter is a smi
2612 __ SmiToInteger32(reg, reg);
2613 Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2614 __ PopReturnAddressTo(return_addr_reg);
2615 __ shlp(reg, Immediate(kPointerSizeLog2));
2616 __ addp(rsp, reg);
2617 __ jmp(return_addr_reg);
2618 }
2619 }
2620
2621
2622 template <class T>
EmitVectorLoadICRegisters(T * instr)2623 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2624 Register vector_register = ToRegister(instr->temp_vector());
2625 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2626 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2627 DCHECK(slot_register.is(rax));
2628
2629 AllowDeferredHandleDereference vector_structure_check;
2630 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2631 __ Move(vector_register, vector);
2632 // No need to allocate this register.
2633 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2634 int index = vector->GetIndex(slot);
2635 __ Move(slot_register, Smi::FromInt(index));
2636 }
2637
2638
2639 template <class T>
EmitVectorStoreICRegisters(T * instr)2640 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2641 Register vector_register = ToRegister(instr->temp_vector());
2642 Register slot_register = ToRegister(instr->temp_slot());
2643
2644 AllowDeferredHandleDereference vector_structure_check;
2645 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2646 __ Move(vector_register, vector);
2647 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2648 int index = vector->GetIndex(slot);
2649 __ Move(slot_register, Smi::FromInt(index));
2650 }
2651
2652
DoLoadGlobalGeneric(LLoadGlobalGeneric * instr)2653 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2654 DCHECK(ToRegister(instr->context()).is(rsi));
2655 DCHECK(ToRegister(instr->global_object())
2656 .is(LoadDescriptor::ReceiverRegister()));
2657 DCHECK(ToRegister(instr->result()).is(rax));
2658
2659 __ Move(LoadDescriptor::NameRegister(), instr->name());
2660 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2661 Handle<Code> ic =
2662 CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
2663 SLOPPY, PREMONOMORPHIC).code();
2664 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2665 }
2666
2667
DoLoadContextSlot(LLoadContextSlot * instr)2668 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2669 Register context = ToRegister(instr->context());
2670 Register result = ToRegister(instr->result());
2671 __ movp(result, ContextOperand(context, instr->slot_index()));
2672 if (instr->hydrogen()->RequiresHoleCheck()) {
2673 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2674 if (instr->hydrogen()->DeoptimizesOnHole()) {
2675 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2676 } else {
2677 Label is_not_hole;
2678 __ j(not_equal, &is_not_hole, Label::kNear);
2679 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2680 __ bind(&is_not_hole);
2681 }
2682 }
2683 }
2684
2685
DoStoreContextSlot(LStoreContextSlot * instr)2686 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2687 Register context = ToRegister(instr->context());
2688 Register value = ToRegister(instr->value());
2689
2690 Operand target = ContextOperand(context, instr->slot_index());
2691
2692 Label skip_assignment;
2693 if (instr->hydrogen()->RequiresHoleCheck()) {
2694 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2695 if (instr->hydrogen()->DeoptimizesOnHole()) {
2696 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2697 } else {
2698 __ j(not_equal, &skip_assignment);
2699 }
2700 }
2701 __ movp(target, value);
2702
2703 if (instr->hydrogen()->NeedsWriteBarrier()) {
2704 SmiCheck check_needed =
2705 instr->hydrogen()->value()->type().IsHeapObject()
2706 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2707 int offset = Context::SlotOffset(instr->slot_index());
2708 Register scratch = ToRegister(instr->temp());
2709 __ RecordWriteContextSlot(context,
2710 offset,
2711 value,
2712 scratch,
2713 kSaveFPRegs,
2714 EMIT_REMEMBERED_SET,
2715 check_needed);
2716 }
2717
2718 __ bind(&skip_assignment);
2719 }
2720
2721
DoLoadNamedField(LLoadNamedField * instr)2722 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2723 HObjectAccess access = instr->hydrogen()->access();
2724 int offset = access.offset();
2725
2726 if (access.IsExternalMemory()) {
2727 Register result = ToRegister(instr->result());
2728 if (instr->object()->IsConstantOperand()) {
2729 DCHECK(result.is(rax));
2730 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2731 } else {
2732 Register object = ToRegister(instr->object());
2733 __ Load(result, MemOperand(object, offset), access.representation());
2734 }
2735 return;
2736 }
2737
2738 Register object = ToRegister(instr->object());
2739 if (instr->hydrogen()->representation().IsDouble()) {
2740 DCHECK(access.IsInobject());
2741 XMMRegister result = ToDoubleRegister(instr->result());
2742 __ Movsd(result, FieldOperand(object, offset));
2743 return;
2744 }
2745
2746 Register result = ToRegister(instr->result());
2747 if (!access.IsInobject()) {
2748 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2749 object = result;
2750 }
2751
2752 Representation representation = access.representation();
2753 if (representation.IsSmi() && SmiValuesAre32Bits() &&
2754 instr->hydrogen()->representation().IsInteger32()) {
2755 if (FLAG_debug_code) {
2756 Register scratch = kScratchRegister;
2757 __ Load(scratch, FieldOperand(object, offset), representation);
2758 __ AssertSmi(scratch);
2759 }
2760
2761 // Read int value directly from upper half of the smi.
2762 STATIC_ASSERT(kSmiTag == 0);
2763 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
2764 offset += kPointerSize / 2;
2765 representation = Representation::Integer32();
2766 }
2767 __ Load(result, FieldOperand(object, offset), representation);
2768 }
2769
2770
DoLoadNamedGeneric(LLoadNamedGeneric * instr)2771 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2772 DCHECK(ToRegister(instr->context()).is(rsi));
2773 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2774 DCHECK(ToRegister(instr->result()).is(rax));
2775
2776 __ Move(LoadDescriptor::NameRegister(), instr->name());
2777 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
2778 Handle<Code> ic =
2779 CodeFactory::LoadICInOptimizedCode(
2780 isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
2781 instr->hydrogen()->initialization_state()).code();
2782 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2783 }
2784
2785
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)2786 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2787 Register function = ToRegister(instr->function());
2788 Register result = ToRegister(instr->result());
2789
2790 // Get the prototype or initial map from the function.
2791 __ movp(result,
2792 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2793
2794 // Check that the function has a prototype or an initial map.
2795 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2796 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2797
2798 // If the function does not have an initial map, we're done.
2799 Label done;
2800 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
2801 __ j(not_equal, &done, Label::kNear);
2802
2803 // Get the prototype from the initial map.
2804 __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
2805
2806 // All done.
2807 __ bind(&done);
2808 }
2809
2810
DoLoadRoot(LLoadRoot * instr)2811 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2812 Register result = ToRegister(instr->result());
2813 __ LoadRoot(result, instr->index());
2814 }
2815
2816
DoAccessArgumentsAt(LAccessArgumentsAt * instr)2817 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2818 Register arguments = ToRegister(instr->arguments());
2819 Register result = ToRegister(instr->result());
2820
2821 if (instr->length()->IsConstantOperand() &&
2822 instr->index()->IsConstantOperand()) {
2823 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2824 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2825 if (const_index >= 0 && const_index < const_length) {
2826 StackArgumentsAccessor args(arguments, const_length,
2827 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2828 __ movp(result, args.GetArgumentOperand(const_index));
2829 } else if (FLAG_debug_code) {
2830 __ int3();
2831 }
2832 } else {
2833 Register length = ToRegister(instr->length());
2834 // There are two words between the frame pointer and the last argument.
2835 // Subtracting from length accounts for one of them add one more.
2836 if (instr->index()->IsRegister()) {
2837 __ subl(length, ToRegister(instr->index()));
2838 } else {
2839 __ subl(length, ToOperand(instr->index()));
2840 }
2841 StackArgumentsAccessor args(arguments, length,
2842 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2843 __ movp(result, args.GetArgumentOperand(0));
2844 }
2845 }
2846
2847
DoLoadKeyedExternalArray(LLoadKeyed * instr)2848 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2849 ElementsKind elements_kind = instr->elements_kind();
2850 LOperand* key = instr->key();
2851 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
2852 Register key_reg = ToRegister(key);
2853 Representation key_representation =
2854 instr->hydrogen()->key()->representation();
2855 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
2856 __ SmiToInteger64(key_reg, key_reg);
2857 } else if (instr->hydrogen()->IsDehoisted()) {
2858 // Sign extend key because it could be a 32 bit negative value
2859 // and the dehoisted address computation happens in 64 bits
2860 __ movsxlq(key_reg, key_reg);
2861 }
2862 }
2863 Operand operand(BuildFastArrayOperand(
2864 instr->elements(),
2865 key,
2866 instr->hydrogen()->key()->representation(),
2867 elements_kind,
2868 instr->base_offset()));
2869
2870 if (elements_kind == FLOAT32_ELEMENTS) {
2871 XMMRegister result(ToDoubleRegister(instr->result()));
2872 __ Cvtss2sd(result, operand);
2873 } else if (elements_kind == FLOAT64_ELEMENTS) {
2874 __ Movsd(ToDoubleRegister(instr->result()), operand);
2875 } else {
2876 Register result(ToRegister(instr->result()));
2877 switch (elements_kind) {
2878 case INT8_ELEMENTS:
2879 __ movsxbl(result, operand);
2880 break;
2881 case UINT8_ELEMENTS:
2882 case UINT8_CLAMPED_ELEMENTS:
2883 __ movzxbl(result, operand);
2884 break;
2885 case INT16_ELEMENTS:
2886 __ movsxwl(result, operand);
2887 break;
2888 case UINT16_ELEMENTS:
2889 __ movzxwl(result, operand);
2890 break;
2891 case INT32_ELEMENTS:
2892 __ movl(result, operand);
2893 break;
2894 case UINT32_ELEMENTS:
2895 __ movl(result, operand);
2896 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2897 __ testl(result, result);
2898 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
2899 }
2900 break;
2901 case FLOAT32_ELEMENTS:
2902 case FLOAT64_ELEMENTS:
2903 case FAST_ELEMENTS:
2904 case FAST_SMI_ELEMENTS:
2905 case FAST_DOUBLE_ELEMENTS:
2906 case FAST_HOLEY_ELEMENTS:
2907 case FAST_HOLEY_SMI_ELEMENTS:
2908 case FAST_HOLEY_DOUBLE_ELEMENTS:
2909 case DICTIONARY_ELEMENTS:
2910 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2911 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
2912 UNREACHABLE();
2913 break;
2914 }
2915 }
2916 }
2917
2918
DoLoadKeyedFixedDoubleArray(LLoadKeyed * instr)2919 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2920 XMMRegister result(ToDoubleRegister(instr->result()));
2921 LOperand* key = instr->key();
2922 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
2923 instr->hydrogen()->IsDehoisted()) {
2924 // Sign extend key because it could be a 32 bit negative value
2925 // and the dehoisted address computation happens in 64 bits
2926 __ movsxlq(ToRegister(key), ToRegister(key));
2927 }
2928 if (instr->hydrogen()->RequiresHoleCheck()) {
2929 Operand hole_check_operand = BuildFastArrayOperand(
2930 instr->elements(),
2931 key,
2932 instr->hydrogen()->key()->representation(),
2933 FAST_DOUBLE_ELEMENTS,
2934 instr->base_offset() + sizeof(kHoleNanLower32));
2935 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
2936 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2937 }
2938
2939 Operand double_load_operand = BuildFastArrayOperand(
2940 instr->elements(),
2941 key,
2942 instr->hydrogen()->key()->representation(),
2943 FAST_DOUBLE_ELEMENTS,
2944 instr->base_offset());
2945 __ Movsd(result, double_load_operand);
2946 }
2947
2948
DoLoadKeyedFixedArray(LLoadKeyed * instr)2949 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2950 HLoadKeyed* hinstr = instr->hydrogen();
2951 Register result = ToRegister(instr->result());
2952 LOperand* key = instr->key();
2953 bool requires_hole_check = hinstr->RequiresHoleCheck();
2954 Representation representation = hinstr->representation();
2955 int offset = instr->base_offset();
2956
2957 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
2958 instr->hydrogen()->IsDehoisted()) {
2959 // Sign extend key because it could be a 32 bit negative value
2960 // and the dehoisted address computation happens in 64 bits
2961 __ movsxlq(ToRegister(key), ToRegister(key));
2962 }
2963 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
2964 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
2965 DCHECK(!requires_hole_check);
2966 if (FLAG_debug_code) {
2967 Register scratch = kScratchRegister;
2968 __ Load(scratch,
2969 BuildFastArrayOperand(instr->elements(),
2970 key,
2971 instr->hydrogen()->key()->representation(),
2972 FAST_ELEMENTS,
2973 offset),
2974 Representation::Smi());
2975 __ AssertSmi(scratch);
2976 }
2977 // Read int value directly from upper half of the smi.
2978 STATIC_ASSERT(kSmiTag == 0);
2979 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
2980 offset += kPointerSize / 2;
2981 }
2982
2983 __ Load(result,
2984 BuildFastArrayOperand(instr->elements(), key,
2985 instr->hydrogen()->key()->representation(),
2986 FAST_ELEMENTS, offset),
2987 representation);
2988
2989 // Check for the hole value.
2990 if (requires_hole_check) {
2991 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
2992 Condition smi = __ CheckSmi(result);
2993 DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi);
2994 } else {
2995 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2996 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2997 }
2998 } else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
2999 DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS);
3000 Label done;
3001 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3002 __ j(not_equal, &done);
3003 if (info()->IsStub()) {
3004 // A stub can safely convert the hole to undefined only if the array
3005 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3006 // it needs to bail out.
3007 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3008 __ Cmp(FieldOperand(result, Cell::kValueOffset),
3009 Smi::FromInt(Isolate::kArrayProtectorValid));
3010 DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
3011 }
3012 __ Move(result, isolate()->factory()->undefined_value());
3013 __ bind(&done);
3014 }
3015 }
3016
3017
DoLoadKeyed(LLoadKeyed * instr)3018 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3019 if (instr->is_fixed_typed_array()) {
3020 DoLoadKeyedExternalArray(instr);
3021 } else if (instr->hydrogen()->representation().IsDouble()) {
3022 DoLoadKeyedFixedDoubleArray(instr);
3023 } else {
3024 DoLoadKeyedFixedArray(instr);
3025 }
3026 }
3027
3028
BuildFastArrayOperand(LOperand * elements_pointer,LOperand * key,Representation key_representation,ElementsKind elements_kind,uint32_t offset)3029 Operand LCodeGen::BuildFastArrayOperand(
3030 LOperand* elements_pointer,
3031 LOperand* key,
3032 Representation key_representation,
3033 ElementsKind elements_kind,
3034 uint32_t offset) {
3035 Register elements_pointer_reg = ToRegister(elements_pointer);
3036 int shift_size = ElementsKindToShiftSize(elements_kind);
3037 if (key->IsConstantOperand()) {
3038 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3039 if (constant_value & 0xF0000000) {
3040 Abort(kArrayIndexConstantValueTooBig);
3041 }
3042 return Operand(elements_pointer_reg,
3043 (constant_value << shift_size) + offset);
3044 } else {
3045 // Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement().
3046 DCHECK(key_representation.IsInteger32());
3047
3048 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3049 return Operand(elements_pointer_reg,
3050 ToRegister(key),
3051 scale_factor,
3052 offset);
3053 }
3054 }
3055
3056
DoLoadKeyedGeneric(LLoadKeyedGeneric * instr)3057 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3058 DCHECK(ToRegister(instr->context()).is(rsi));
3059 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3060 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3061
3062 if (instr->hydrogen()->HasVectorAndSlot()) {
3063 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3064 }
3065
3066 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3067 isolate(), instr->hydrogen()->language_mode(),
3068 instr->hydrogen()->initialization_state()).code();
3069 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3070 }
3071
3072
DoArgumentsElements(LArgumentsElements * instr)3073 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3074 Register result = ToRegister(instr->result());
3075
3076 if (instr->hydrogen()->from_inlined()) {
3077 __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3078 } else {
3079 // Check for arguments adapter frame.
3080 Label done, adapted;
3081 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3082 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3083 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3084 __ j(equal, &adapted, Label::kNear);
3085
3086 // No arguments adaptor frame.
3087 __ movp(result, rbp);
3088 __ jmp(&done, Label::kNear);
3089
3090 // Arguments adaptor frame present.
3091 __ bind(&adapted);
3092 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3093
3094 // Result is the frame pointer for the frame if not adapted and for the real
3095 // frame below the adaptor frame if adapted.
3096 __ bind(&done);
3097 }
3098 }
3099
3100
DoArgumentsLength(LArgumentsLength * instr)3101 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3102 Register result = ToRegister(instr->result());
3103
3104 Label done;
3105
3106 // If no arguments adaptor frame the number of arguments is fixed.
3107 if (instr->elements()->IsRegister()) {
3108 __ cmpp(rbp, ToRegister(instr->elements()));
3109 } else {
3110 __ cmpp(rbp, ToOperand(instr->elements()));
3111 }
3112 __ movl(result, Immediate(scope()->num_parameters()));
3113 __ j(equal, &done, Label::kNear);
3114
3115 // Arguments adaptor frame present. Get argument length from there.
3116 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3117 __ SmiToInteger32(result,
3118 Operand(result,
3119 ArgumentsAdaptorFrameConstants::kLengthOffset));
3120
3121 // Argument length is in result register.
3122 __ bind(&done);
3123 }
3124
3125
DoWrapReceiver(LWrapReceiver * instr)3126 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3127 Register receiver = ToRegister(instr->receiver());
3128 Register function = ToRegister(instr->function());
3129
3130 // If the receiver is null or undefined, we have to pass the global
3131 // object as a receiver to normal functions. Values have to be
3132 // passed unchanged to builtins and strict-mode functions.
3133 Label global_object, receiver_ok;
3134 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3135
3136 if (!instr->hydrogen()->known_function()) {
3137 // Do not transform the receiver to object for strict mode
3138 // functions.
3139 __ movp(kScratchRegister,
3140 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3141 __ testb(FieldOperand(kScratchRegister,
3142 SharedFunctionInfo::kStrictModeByteOffset),
3143 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3144 __ j(not_equal, &receiver_ok, dist);
3145
3146 // Do not transform the receiver to object for builtins.
3147 __ testb(FieldOperand(kScratchRegister,
3148 SharedFunctionInfo::kNativeByteOffset),
3149 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3150 __ j(not_equal, &receiver_ok, dist);
3151 }
3152
3153 // Normal function. Replace undefined or null with global receiver.
3154 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3155 __ j(equal, &global_object, Label::kNear);
3156 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3157 __ j(equal, &global_object, Label::kNear);
3158
3159 // The receiver should be a JS object.
3160 Condition is_smi = __ CheckSmi(receiver);
3161 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
3162 __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister);
3163 DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
3164
3165 __ jmp(&receiver_ok, Label::kNear);
3166 __ bind(&global_object);
3167 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3168 __ movp(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
3169 __ movp(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
3170
3171 __ bind(&receiver_ok);
3172 }
3173
3174
DoApplyArguments(LApplyArguments * instr)3175 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3176 Register receiver = ToRegister(instr->receiver());
3177 Register function = ToRegister(instr->function());
3178 Register length = ToRegister(instr->length());
3179 Register elements = ToRegister(instr->elements());
3180 DCHECK(receiver.is(rax)); // Used for parameter count.
3181 DCHECK(function.is(rdi)); // Required by InvokeFunction.
3182 DCHECK(ToRegister(instr->result()).is(rax));
3183
3184 // Copy the arguments to this function possibly from the
3185 // adaptor frame below it.
3186 const uint32_t kArgumentsLimit = 1 * KB;
3187 __ cmpp(length, Immediate(kArgumentsLimit));
3188 DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
3189
3190 __ Push(receiver);
3191 __ movp(receiver, length);
3192
3193 // Loop through the arguments pushing them onto the execution
3194 // stack.
3195 Label invoke, loop;
3196 // length is a small non-negative integer, due to the test above.
3197 __ testl(length, length);
3198 __ j(zero, &invoke, Label::kNear);
3199 __ bind(&loop);
3200 StackArgumentsAccessor args(elements, length,
3201 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3202 __ Push(args.GetArgumentOperand(0));
3203 __ decl(length);
3204 __ j(not_zero, &loop);
3205
3206 // Invoke the function.
3207 __ bind(&invoke);
3208 DCHECK(instr->HasPointerMap());
3209 LPointerMap* pointers = instr->pointer_map();
3210 SafepointGenerator safepoint_generator(
3211 this, pointers, Safepoint::kLazyDeopt);
3212 ParameterCount actual(rax);
3213 __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
3214 safepoint_generator);
3215 }
3216
3217
DoPushArgument(LPushArgument * instr)3218 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3219 LOperand* argument = instr->value();
3220 EmitPushTaggedOperand(argument);
3221 }
3222
3223
DoDrop(LDrop * instr)3224 void LCodeGen::DoDrop(LDrop* instr) {
3225 __ Drop(instr->count());
3226 }
3227
3228
DoThisFunction(LThisFunction * instr)3229 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3230 Register result = ToRegister(instr->result());
3231 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3232 }
3233
3234
DoContext(LContext * instr)3235 void LCodeGen::DoContext(LContext* instr) {
3236 Register result = ToRegister(instr->result());
3237 if (info()->IsOptimizing()) {
3238 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3239 } else {
3240 // If there is no frame, the context must be in rsi.
3241 DCHECK(result.is(rsi));
3242 }
3243 }
3244
3245
DoDeclareGlobals(LDeclareGlobals * instr)3246 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3247 DCHECK(ToRegister(instr->context()).is(rsi));
3248 __ Push(instr->hydrogen()->pairs());
3249 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3250 CallRuntime(Runtime::kDeclareGlobals, instr);
3251 }
3252
3253
CallKnownFunction(Handle<JSFunction> function,int formal_parameter_count,int arity,LInstruction * instr)3254 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3255 int formal_parameter_count, int arity,
3256 LInstruction* instr) {
3257 bool dont_adapt_arguments =
3258 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3259 bool can_invoke_directly =
3260 dont_adapt_arguments || formal_parameter_count == arity;
3261
3262 Register function_reg = rdi;
3263 LPointerMap* pointers = instr->pointer_map();
3264
3265 if (can_invoke_directly) {
3266 // Change context.
3267 __ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
3268
3269 // Always initialize new target and number of actual arguments.
3270 __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
3271 __ Set(rax, arity);
3272
3273 // Invoke function.
3274 if (function.is_identical_to(info()->closure())) {
3275 __ CallSelf();
3276 } else {
3277 __ Call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
3278 }
3279
3280 // Set up deoptimization.
3281 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3282 } else {
3283 // We need to adapt arguments.
3284 SafepointGenerator generator(
3285 this, pointers, Safepoint::kLazyDeopt);
3286 ParameterCount count(arity);
3287 ParameterCount expected(formal_parameter_count);
3288 __ InvokeFunction(function_reg, no_reg, expected, count, CALL_FUNCTION,
3289 generator);
3290 }
3291 }
3292
3293
DoCallWithDescriptor(LCallWithDescriptor * instr)3294 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3295 DCHECK(ToRegister(instr->result()).is(rax));
3296
3297 if (instr->hydrogen()->IsTailCall()) {
3298 if (NeedsEagerFrame()) __ leave();
3299
3300 if (instr->target()->IsConstantOperand()) {
3301 LConstantOperand* target = LConstantOperand::cast(instr->target());
3302 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3303 __ jmp(code, RelocInfo::CODE_TARGET);
3304 } else {
3305 DCHECK(instr->target()->IsRegister());
3306 Register target = ToRegister(instr->target());
3307 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3308 __ jmp(target);
3309 }
3310 } else {
3311 LPointerMap* pointers = instr->pointer_map();
3312 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3313
3314 if (instr->target()->IsConstantOperand()) {
3315 LConstantOperand* target = LConstantOperand::cast(instr->target());
3316 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3317 generator.BeforeCall(__ CallSize(code));
3318 __ call(code, RelocInfo::CODE_TARGET);
3319 } else {
3320 DCHECK(instr->target()->IsRegister());
3321 Register target = ToRegister(instr->target());
3322 generator.BeforeCall(__ CallSize(target));
3323 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3324 __ call(target);
3325 }
3326 generator.AfterCall();
3327 }
3328 }
3329
3330
DoCallJSFunction(LCallJSFunction * instr)3331 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3332 DCHECK(ToRegister(instr->function()).is(rdi));
3333 DCHECK(ToRegister(instr->result()).is(rax));
3334
3335 // Change context.
3336 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3337
3338 // Always initialize new target and number of actual arguments.
3339 __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
3340 __ Set(rax, instr->arity());
3341
3342 LPointerMap* pointers = instr->pointer_map();
3343 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3344
3345 bool is_self_call = false;
3346 if (instr->hydrogen()->function()->IsConstant()) {
3347 Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3348 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3349 jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3350 is_self_call = jsfun.is_identical_to(info()->closure());
3351 }
3352
3353 if (is_self_call) {
3354 __ CallSelf();
3355 } else {
3356 Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3357 generator.BeforeCall(__ CallSize(target));
3358 __ Call(target);
3359 }
3360 generator.AfterCall();
3361 }
3362
3363
DoDeferredMathAbsTaggedHeapNumber(LMathAbs * instr)3364 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3365 Register input_reg = ToRegister(instr->value());
3366 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3367 Heap::kHeapNumberMapRootIndex);
3368 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3369
3370 Label slow, allocated, done;
3371 Register tmp = input_reg.is(rax) ? rcx : rax;
3372 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3373
3374 // Preserve the value of all registers.
3375 PushSafepointRegistersScope scope(this);
3376
3377 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3378 // Check the sign of the argument. If the argument is positive, just
3379 // return it. We do not need to patch the stack since |input| and
3380 // |result| are the same register and |input| will be restored
3381 // unchanged by popping safepoint registers.
3382 __ testl(tmp, Immediate(HeapNumber::kSignMask));
3383 __ j(zero, &done);
3384
3385 __ AllocateHeapNumber(tmp, tmp2, &slow);
3386 __ jmp(&allocated, Label::kNear);
3387
3388 // Slow case: Call the runtime system to do the number allocation.
3389 __ bind(&slow);
3390 CallRuntimeFromDeferred(
3391 Runtime::kAllocateHeapNumber, 0, instr, instr->context());
3392 // Set the pointer to the new heap number in tmp.
3393 if (!tmp.is(rax)) __ movp(tmp, rax);
3394 // Restore input_reg after call to runtime.
3395 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3396
3397 __ bind(&allocated);
3398 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3399 __ shlq(tmp2, Immediate(1));
3400 __ shrq(tmp2, Immediate(1));
3401 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3402 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3403
3404 __ bind(&done);
3405 }
3406
3407
EmitIntegerMathAbs(LMathAbs * instr)3408 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3409 Register input_reg = ToRegister(instr->value());
3410 __ testl(input_reg, input_reg);
3411 Label is_positive;
3412 __ j(not_sign, &is_positive, Label::kNear);
3413 __ negl(input_reg); // Sets flags.
3414 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3415 __ bind(&is_positive);
3416 }
3417
3418
EmitSmiMathAbs(LMathAbs * instr)3419 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3420 Register input_reg = ToRegister(instr->value());
3421 __ testp(input_reg, input_reg);
3422 Label is_positive;
3423 __ j(not_sign, &is_positive, Label::kNear);
3424 __ negp(input_reg); // Sets flags.
3425 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3426 __ bind(&is_positive);
3427 }
3428
3429
DoMathAbs(LMathAbs * instr)3430 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3431 // Class for deferred case.
3432 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3433 public:
3434 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3435 : LDeferredCode(codegen), instr_(instr) { }
3436 void Generate() override {
3437 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3438 }
3439 LInstruction* instr() override { return instr_; }
3440
3441 private:
3442 LMathAbs* instr_;
3443 };
3444
3445 DCHECK(instr->value()->Equals(instr->result()));
3446 Representation r = instr->hydrogen()->value()->representation();
3447
3448 if (r.IsDouble()) {
3449 XMMRegister scratch = double_scratch0();
3450 XMMRegister input_reg = ToDoubleRegister(instr->value());
3451 __ Xorpd(scratch, scratch);
3452 __ Subsd(scratch, input_reg);
3453 __ Andpd(input_reg, scratch);
3454 } else if (r.IsInteger32()) {
3455 EmitIntegerMathAbs(instr);
3456 } else if (r.IsSmi()) {
3457 EmitSmiMathAbs(instr);
3458 } else { // Tagged case.
3459 DeferredMathAbsTaggedHeapNumber* deferred =
3460 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3461 Register input_reg = ToRegister(instr->value());
3462 // Smi check.
3463 __ JumpIfNotSmi(input_reg, deferred->entry());
3464 EmitSmiMathAbs(instr);
3465 __ bind(deferred->exit());
3466 }
3467 }
3468
3469
DoMathFloor(LMathFloor * instr)3470 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3471 XMMRegister xmm_scratch = double_scratch0();
3472 Register output_reg = ToRegister(instr->result());
3473 XMMRegister input_reg = ToDoubleRegister(instr->value());
3474
3475 if (CpuFeatures::IsSupported(SSE4_1)) {
3476 CpuFeatureScope scope(masm(), SSE4_1);
3477 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3478 // Deoptimize if minus zero.
3479 __ Movq(output_reg, input_reg);
3480 __ subq(output_reg, Immediate(1));
3481 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
3482 }
3483 __ Roundsd(xmm_scratch, input_reg, kRoundDown);
3484 __ Cvttsd2si(output_reg, xmm_scratch);
3485 __ cmpl(output_reg, Immediate(0x1));
3486 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3487 } else {
3488 Label negative_sign, done;
3489 // Deoptimize on unordered.
3490 __ Xorpd(xmm_scratch, xmm_scratch); // Zero the register.
3491 __ Ucomisd(input_reg, xmm_scratch);
3492 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
3493 __ j(below, &negative_sign, Label::kNear);
3494
3495 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3496 // Check for negative zero.
3497 Label positive_sign;
3498 __ j(above, &positive_sign, Label::kNear);
3499 __ Movmskpd(output_reg, input_reg);
3500 __ testl(output_reg, Immediate(1));
3501 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3502 __ Set(output_reg, 0);
3503 __ jmp(&done);
3504 __ bind(&positive_sign);
3505 }
3506
3507 // Use truncating instruction (OK because input is positive).
3508 __ Cvttsd2si(output_reg, input_reg);
3509 // Overflow is signalled with minint.
3510 __ cmpl(output_reg, Immediate(0x1));
3511 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3512 __ jmp(&done, Label::kNear);
3513
3514 // Non-zero negative reaches here.
3515 __ bind(&negative_sign);
3516 // Truncate, then compare and compensate.
3517 __ Cvttsd2si(output_reg, input_reg);
3518 __ Cvtlsi2sd(xmm_scratch, output_reg);
3519 __ Ucomisd(input_reg, xmm_scratch);
3520 __ j(equal, &done, Label::kNear);
3521 __ subl(output_reg, Immediate(1));
3522 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3523
3524 __ bind(&done);
3525 }
3526 }
3527
3528
DoMathRound(LMathRound * instr)3529 void LCodeGen::DoMathRound(LMathRound* instr) {
3530 const XMMRegister xmm_scratch = double_scratch0();
3531 Register output_reg = ToRegister(instr->result());
3532 XMMRegister input_reg = ToDoubleRegister(instr->value());
3533 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3534 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3535 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3536
3537 Label done, round_to_zero, below_one_half;
3538 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3539 __ movq(kScratchRegister, one_half);
3540 __ Movq(xmm_scratch, kScratchRegister);
3541 __ Ucomisd(xmm_scratch, input_reg);
3542 __ j(above, &below_one_half, Label::kNear);
3543
3544 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3545 __ Addsd(xmm_scratch, input_reg);
3546 __ Cvttsd2si(output_reg, xmm_scratch);
3547 // Overflow is signalled with minint.
3548 __ cmpl(output_reg, Immediate(0x1));
3549 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3550 __ jmp(&done, dist);
3551
3552 __ bind(&below_one_half);
3553 __ movq(kScratchRegister, minus_one_half);
3554 __ Movq(xmm_scratch, kScratchRegister);
3555 __ Ucomisd(xmm_scratch, input_reg);
3556 __ j(below_equal, &round_to_zero, Label::kNear);
3557
3558 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3559 // compare and compensate.
3560 __ Movapd(input_temp, input_reg); // Do not alter input_reg.
3561 __ Subsd(input_temp, xmm_scratch);
3562 __ Cvttsd2si(output_reg, input_temp);
3563 // Catch minint due to overflow, and to prevent overflow when compensating.
3564 __ cmpl(output_reg, Immediate(0x1));
3565 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3566
3567 __ Cvtlsi2sd(xmm_scratch, output_reg);
3568 __ Ucomisd(xmm_scratch, input_temp);
3569 __ j(equal, &done, dist);
3570 __ subl(output_reg, Immediate(1));
3571 // No overflow because we already ruled out minint.
3572 __ jmp(&done, dist);
3573
3574 __ bind(&round_to_zero);
3575 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3576 // we can ignore the difference between a result of -0 and +0.
3577 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3578 __ Movq(output_reg, input_reg);
3579 __ testq(output_reg, output_reg);
3580 DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
3581 }
3582 __ Set(output_reg, 0);
3583 __ bind(&done);
3584 }
3585
3586
DoMathFround(LMathFround * instr)3587 void LCodeGen::DoMathFround(LMathFround* instr) {
3588 XMMRegister input_reg = ToDoubleRegister(instr->value());
3589 XMMRegister output_reg = ToDoubleRegister(instr->result());
3590 __ Cvtsd2ss(output_reg, input_reg);
3591 __ Cvtss2sd(output_reg, output_reg);
3592 }
3593
3594
DoMathSqrt(LMathSqrt * instr)3595 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3596 XMMRegister output = ToDoubleRegister(instr->result());
3597 if (instr->value()->IsDoubleRegister()) {
3598 XMMRegister input = ToDoubleRegister(instr->value());
3599 __ Sqrtsd(output, input);
3600 } else {
3601 Operand input = ToOperand(instr->value());
3602 __ Sqrtsd(output, input);
3603 }
3604 }
3605
3606
DoMathPowHalf(LMathPowHalf * instr)3607 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3608 XMMRegister xmm_scratch = double_scratch0();
3609 XMMRegister input_reg = ToDoubleRegister(instr->value());
3610 DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
3611
3612 // Note that according to ECMA-262 15.8.2.13:
3613 // Math.pow(-Infinity, 0.5) == Infinity
3614 // Math.sqrt(-Infinity) == NaN
3615 Label done, sqrt;
3616 // Check base for -Infinity. According to IEEE-754, double-precision
3617 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3618 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3619 __ Movq(xmm_scratch, kScratchRegister);
3620 __ Ucomisd(xmm_scratch, input_reg);
3621 // Comparing -Infinity with NaN results in "unordered", which sets the
3622 // zero flag as if both were equal. However, it also sets the carry flag.
3623 __ j(not_equal, &sqrt, Label::kNear);
3624 __ j(carry, &sqrt, Label::kNear);
3625 // If input is -Infinity, return Infinity.
3626 __ Xorpd(input_reg, input_reg);
3627 __ Subsd(input_reg, xmm_scratch);
3628 __ jmp(&done, Label::kNear);
3629
3630 // Square root.
3631 __ bind(&sqrt);
3632 __ Xorpd(xmm_scratch, xmm_scratch);
3633 __ Addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3634 __ Sqrtsd(input_reg, input_reg);
3635 __ bind(&done);
3636 }
3637
3638
DoPower(LPower * instr)3639 void LCodeGen::DoPower(LPower* instr) {
3640 Representation exponent_type = instr->hydrogen()->right()->representation();
3641 // Having marked this as a call, we can use any registers.
3642 // Just make sure that the input/output registers are the expected ones.
3643
3644 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3645 DCHECK(!instr->right()->IsRegister() ||
3646 ToRegister(instr->right()).is(tagged_exponent));
3647 DCHECK(!instr->right()->IsDoubleRegister() ||
3648 ToDoubleRegister(instr->right()).is(xmm1));
3649 DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
3650 DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
3651
3652 if (exponent_type.IsSmi()) {
3653 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3654 __ CallStub(&stub);
3655 } else if (exponent_type.IsTagged()) {
3656 Label no_deopt;
3657 __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
3658 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
3659 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3660 __ bind(&no_deopt);
3661 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3662 __ CallStub(&stub);
3663 } else if (exponent_type.IsInteger32()) {
3664 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3665 __ CallStub(&stub);
3666 } else {
3667 DCHECK(exponent_type.IsDouble());
3668 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3669 __ CallStub(&stub);
3670 }
3671 }
3672
3673
DoMathExp(LMathExp * instr)3674 void LCodeGen::DoMathExp(LMathExp* instr) {
3675 XMMRegister input = ToDoubleRegister(instr->value());
3676 XMMRegister result = ToDoubleRegister(instr->result());
3677 XMMRegister temp0 = double_scratch0();
3678 Register temp1 = ToRegister(instr->temp1());
3679 Register temp2 = ToRegister(instr->temp2());
3680
3681 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
3682 }
3683
3684
DoMathLog(LMathLog * instr)3685 void LCodeGen::DoMathLog(LMathLog* instr) {
3686 DCHECK(instr->value()->Equals(instr->result()));
3687 XMMRegister input_reg = ToDoubleRegister(instr->value());
3688 XMMRegister xmm_scratch = double_scratch0();
3689 Label positive, done, zero;
3690 __ Xorpd(xmm_scratch, xmm_scratch);
3691 __ Ucomisd(input_reg, xmm_scratch);
3692 __ j(above, &positive, Label::kNear);
3693 __ j(not_carry, &zero, Label::kNear);
3694 __ Pcmpeqd(input_reg, input_reg);
3695 __ jmp(&done, Label::kNear);
3696 __ bind(&zero);
3697 ExternalReference ninf =
3698 ExternalReference::address_of_negative_infinity();
3699 Operand ninf_operand = masm()->ExternalOperand(ninf);
3700 __ Movsd(input_reg, ninf_operand);
3701 __ jmp(&done, Label::kNear);
3702 __ bind(&positive);
3703 __ fldln2();
3704 __ subp(rsp, Immediate(kDoubleSize));
3705 __ Movsd(Operand(rsp, 0), input_reg);
3706 __ fld_d(Operand(rsp, 0));
3707 __ fyl2x();
3708 __ fstp_d(Operand(rsp, 0));
3709 __ Movsd(input_reg, Operand(rsp, 0));
3710 __ addp(rsp, Immediate(kDoubleSize));
3711 __ bind(&done);
3712 }
3713
3714
DoMathClz32(LMathClz32 * instr)3715 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3716 Register input = ToRegister(instr->value());
3717 Register result = ToRegister(instr->result());
3718
3719 __ Lzcntl(result, input);
3720 }
3721
3722
DoInvokeFunction(LInvokeFunction * instr)3723 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3724 DCHECK(ToRegister(instr->context()).is(rsi));
3725 DCHECK(ToRegister(instr->function()).is(rdi));
3726 DCHECK(instr->HasPointerMap());
3727
3728 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3729 if (known_function.is_null()) {
3730 LPointerMap* pointers = instr->pointer_map();
3731 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3732 ParameterCount count(instr->arity());
3733 __ InvokeFunction(rdi, no_reg, count, CALL_FUNCTION, generator);
3734 } else {
3735 CallKnownFunction(known_function,
3736 instr->hydrogen()->formal_parameter_count(),
3737 instr->arity(), instr);
3738 }
3739 }
3740
3741
DoCallFunction(LCallFunction * instr)3742 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3743 DCHECK(ToRegister(instr->context()).is(rsi));
3744 DCHECK(ToRegister(instr->function()).is(rdi));
3745 DCHECK(ToRegister(instr->result()).is(rax));
3746
3747 int arity = instr->arity();
3748 ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
3749 if (instr->hydrogen()->HasVectorAndSlot()) {
3750 Register slot_register = ToRegister(instr->temp_slot());
3751 Register vector_register = ToRegister(instr->temp_vector());
3752 DCHECK(slot_register.is(rdx));
3753 DCHECK(vector_register.is(rbx));
3754
3755 AllowDeferredHandleDereference vector_structure_check;
3756 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3757 int index = vector->GetIndex(instr->hydrogen()->slot());
3758
3759 __ Move(vector_register, vector);
3760 __ Move(slot_register, Smi::FromInt(index));
3761
3762 Handle<Code> ic =
3763 CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
3764 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3765 } else {
3766 __ Set(rax, arity);
3767 CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
3768 }
3769 }
3770
3771
DoCallNewArray(LCallNewArray * instr)3772 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3773 DCHECK(ToRegister(instr->context()).is(rsi));
3774 DCHECK(ToRegister(instr->constructor()).is(rdi));
3775 DCHECK(ToRegister(instr->result()).is(rax));
3776
3777 __ Set(rax, instr->arity());
3778 if (instr->arity() == 1) {
3779 // We only need the allocation site for the case we have a length argument.
3780 // The case may bail out to the runtime, which will determine the correct
3781 // elements kind with the site.
3782 __ Move(rbx, instr->hydrogen()->site());
3783 } else {
3784 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
3785 }
3786
3787 ElementsKind kind = instr->hydrogen()->elements_kind();
3788 AllocationSiteOverrideMode override_mode =
3789 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3790 ? DISABLE_ALLOCATION_SITES
3791 : DONT_OVERRIDE;
3792
3793 if (instr->arity() == 0) {
3794 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3795 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3796 } else if (instr->arity() == 1) {
3797 Label done;
3798 if (IsFastPackedElementsKind(kind)) {
3799 Label packed_case;
3800 // We might need a change here
3801 // look at the first argument
3802 __ movp(rcx, Operand(rsp, 0));
3803 __ testp(rcx, rcx);
3804 __ j(zero, &packed_case, Label::kNear);
3805
3806 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3807 ArraySingleArgumentConstructorStub stub(isolate(),
3808 holey_kind,
3809 override_mode);
3810 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3811 __ jmp(&done, Label::kNear);
3812 __ bind(&packed_case);
3813 }
3814
3815 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3816 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3817 __ bind(&done);
3818 } else {
3819 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3820 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3821 }
3822 }
3823
3824
DoCallRuntime(LCallRuntime * instr)3825 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3826 DCHECK(ToRegister(instr->context()).is(rsi));
3827 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
3828 }
3829
3830
DoStoreCodeEntry(LStoreCodeEntry * instr)3831 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3832 Register function = ToRegister(instr->function());
3833 Register code_object = ToRegister(instr->code_object());
3834 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
3835 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
3836 }
3837
3838
DoInnerAllocatedObject(LInnerAllocatedObject * instr)3839 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3840 Register result = ToRegister(instr->result());
3841 Register base = ToRegister(instr->base_object());
3842 if (instr->offset()->IsConstantOperand()) {
3843 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3844 __ leap(result, Operand(base, ToInteger32(offset)));
3845 } else {
3846 Register offset = ToRegister(instr->offset());
3847 __ leap(result, Operand(base, offset, times_1, 0));
3848 }
3849 }
3850
3851
DoStoreNamedField(LStoreNamedField * instr)3852 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3853 HStoreNamedField* hinstr = instr->hydrogen();
3854 Representation representation = instr->representation();
3855
3856 HObjectAccess access = hinstr->access();
3857 int offset = access.offset();
3858
3859 if (access.IsExternalMemory()) {
3860 DCHECK(!hinstr->NeedsWriteBarrier());
3861 Register value = ToRegister(instr->value());
3862 if (instr->object()->IsConstantOperand()) {
3863 DCHECK(value.is(rax));
3864 LConstantOperand* object = LConstantOperand::cast(instr->object());
3865 __ store_rax(ToExternalReference(object));
3866 } else {
3867 Register object = ToRegister(instr->object());
3868 __ Store(MemOperand(object, offset), value, representation);
3869 }
3870 return;
3871 }
3872
3873 Register object = ToRegister(instr->object());
3874 __ AssertNotSmi(object);
3875
3876 DCHECK(!representation.IsSmi() ||
3877 !instr->value()->IsConstantOperand() ||
3878 IsInteger32Constant(LConstantOperand::cast(instr->value())));
3879 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
3880 DCHECK(access.IsInobject());
3881 DCHECK(!hinstr->has_transition());
3882 DCHECK(!hinstr->NeedsWriteBarrier());
3883 XMMRegister value = ToDoubleRegister(instr->value());
3884 __ Movsd(FieldOperand(object, offset), value);
3885 return;
3886 }
3887
3888 if (hinstr->has_transition()) {
3889 Handle<Map> transition = hinstr->transition_map();
3890 AddDeprecationDependency(transition);
3891 if (!hinstr->NeedsWriteBarrierForMap()) {
3892 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
3893 } else {
3894 Register temp = ToRegister(instr->temp());
3895 __ Move(kScratchRegister, transition);
3896 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
3897 // Update the write barrier for the map field.
3898 __ RecordWriteForMap(object,
3899 kScratchRegister,
3900 temp,
3901 kSaveFPRegs);
3902 }
3903 }
3904
3905 // Do the store.
3906 Register write_register = object;
3907 if (!access.IsInobject()) {
3908 write_register = ToRegister(instr->temp());
3909 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
3910 }
3911
3912 if (representation.IsSmi() && SmiValuesAre32Bits() &&
3913 hinstr->value()->representation().IsInteger32()) {
3914 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
3915 if (FLAG_debug_code) {
3916 Register scratch = kScratchRegister;
3917 __ Load(scratch, FieldOperand(write_register, offset), representation);
3918 __ AssertSmi(scratch);
3919 }
3920 // Store int value directly to upper half of the smi.
3921 STATIC_ASSERT(kSmiTag == 0);
3922 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3923 offset += kPointerSize / 2;
3924 representation = Representation::Integer32();
3925 }
3926
3927 Operand operand = FieldOperand(write_register, offset);
3928
3929 if (FLAG_unbox_double_fields && representation.IsDouble()) {
3930 DCHECK(access.IsInobject());
3931 XMMRegister value = ToDoubleRegister(instr->value());
3932 __ Movsd(operand, value);
3933
3934 } else if (instr->value()->IsRegister()) {
3935 Register value = ToRegister(instr->value());
3936 __ Store(operand, value, representation);
3937 } else {
3938 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3939 if (IsInteger32Constant(operand_value)) {
3940 DCHECK(!hinstr->NeedsWriteBarrier());
3941 int32_t value = ToInteger32(operand_value);
3942 if (representation.IsSmi()) {
3943 __ Move(operand, Smi::FromInt(value));
3944
3945 } else {
3946 __ movl(operand, Immediate(value));
3947 }
3948
3949 } else if (IsExternalConstant(operand_value)) {
3950 DCHECK(!hinstr->NeedsWriteBarrier());
3951 ExternalReference ptr = ToExternalReference(operand_value);
3952 __ Move(kScratchRegister, ptr);
3953 __ movp(operand, kScratchRegister);
3954 } else {
3955 Handle<Object> handle_value = ToHandle(operand_value);
3956 DCHECK(!hinstr->NeedsWriteBarrier());
3957 __ Move(operand, handle_value);
3958 }
3959 }
3960
3961 if (hinstr->NeedsWriteBarrier()) {
3962 Register value = ToRegister(instr->value());
3963 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
3964 // Update the write barrier for the object for in-object properties.
3965 __ RecordWriteField(write_register,
3966 offset,
3967 value,
3968 temp,
3969 kSaveFPRegs,
3970 EMIT_REMEMBERED_SET,
3971 hinstr->SmiCheckForWriteBarrier(),
3972 hinstr->PointersToHereCheckForValue());
3973 }
3974 }
3975
3976
DoStoreNamedGeneric(LStoreNamedGeneric * instr)3977 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3978 DCHECK(ToRegister(instr->context()).is(rsi));
3979 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
3980 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
3981
3982 if (instr->hydrogen()->HasVectorAndSlot()) {
3983 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
3984 }
3985
3986 __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
3987 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
3988 isolate(), instr->language_mode(),
3989 instr->hydrogen()->initialization_state()).code();
3990 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3991 }
3992
3993
DoBoundsCheck(LBoundsCheck * instr)3994 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3995 Representation representation = instr->hydrogen()->length()->representation();
3996 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
3997 DCHECK(representation.IsSmiOrInteger32());
3998
3999 Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
4000 if (instr->length()->IsConstantOperand()) {
4001 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4002 Register index = ToRegister(instr->index());
4003 if (representation.IsSmi()) {
4004 __ Cmp(index, Smi::FromInt(length));
4005 } else {
4006 __ cmpl(index, Immediate(length));
4007 }
4008 cc = CommuteCondition(cc);
4009 } else if (instr->index()->IsConstantOperand()) {
4010 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4011 if (instr->length()->IsRegister()) {
4012 Register length = ToRegister(instr->length());
4013 if (representation.IsSmi()) {
4014 __ Cmp(length, Smi::FromInt(index));
4015 } else {
4016 __ cmpl(length, Immediate(index));
4017 }
4018 } else {
4019 Operand length = ToOperand(instr->length());
4020 if (representation.IsSmi()) {
4021 __ Cmp(length, Smi::FromInt(index));
4022 } else {
4023 __ cmpl(length, Immediate(index));
4024 }
4025 }
4026 } else {
4027 Register index = ToRegister(instr->index());
4028 if (instr->length()->IsRegister()) {
4029 Register length = ToRegister(instr->length());
4030 if (representation.IsSmi()) {
4031 __ cmpp(length, index);
4032 } else {
4033 __ cmpl(length, index);
4034 }
4035 } else {
4036 Operand length = ToOperand(instr->length());
4037 if (representation.IsSmi()) {
4038 __ cmpp(length, index);
4039 } else {
4040 __ cmpl(length, index);
4041 }
4042 }
4043 }
4044 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4045 Label done;
4046 __ j(NegateCondition(cc), &done, Label::kNear);
4047 __ int3();
4048 __ bind(&done);
4049 } else {
4050 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4051 }
4052 }
4053
4054
DoStoreKeyedExternalArray(LStoreKeyed * instr)4055 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4056 ElementsKind elements_kind = instr->elements_kind();
4057 LOperand* key = instr->key();
4058 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
4059 Register key_reg = ToRegister(key);
4060 Representation key_representation =
4061 instr->hydrogen()->key()->representation();
4062 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
4063 __ SmiToInteger64(key_reg, key_reg);
4064 } else if (instr->hydrogen()->IsDehoisted()) {
4065 // Sign extend key because it could be a 32 bit negative value
4066 // and the dehoisted address computation happens in 64 bits
4067 __ movsxlq(key_reg, key_reg);
4068 }
4069 }
4070 Operand operand(BuildFastArrayOperand(
4071 instr->elements(),
4072 key,
4073 instr->hydrogen()->key()->representation(),
4074 elements_kind,
4075 instr->base_offset()));
4076
4077 if (elements_kind == FLOAT32_ELEMENTS) {
4078 XMMRegister value(ToDoubleRegister(instr->value()));
4079 __ Cvtsd2ss(value, value);
4080 __ Movss(operand, value);
4081 } else if (elements_kind == FLOAT64_ELEMENTS) {
4082 __ Movsd(operand, ToDoubleRegister(instr->value()));
4083 } else {
4084 Register value(ToRegister(instr->value()));
4085 switch (elements_kind) {
4086 case INT8_ELEMENTS:
4087 case UINT8_ELEMENTS:
4088 case UINT8_CLAMPED_ELEMENTS:
4089 __ movb(operand, value);
4090 break;
4091 case INT16_ELEMENTS:
4092 case UINT16_ELEMENTS:
4093 __ movw(operand, value);
4094 break;
4095 case INT32_ELEMENTS:
4096 case UINT32_ELEMENTS:
4097 __ movl(operand, value);
4098 break;
4099 case FLOAT32_ELEMENTS:
4100 case FLOAT64_ELEMENTS:
4101 case FAST_ELEMENTS:
4102 case FAST_SMI_ELEMENTS:
4103 case FAST_DOUBLE_ELEMENTS:
4104 case FAST_HOLEY_ELEMENTS:
4105 case FAST_HOLEY_SMI_ELEMENTS:
4106 case FAST_HOLEY_DOUBLE_ELEMENTS:
4107 case DICTIONARY_ELEMENTS:
4108 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4109 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4110 UNREACHABLE();
4111 break;
4112 }
4113 }
4114 }
4115
4116
DoStoreKeyedFixedDoubleArray(LStoreKeyed * instr)4117 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4118 XMMRegister value = ToDoubleRegister(instr->value());
4119 LOperand* key = instr->key();
4120 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4121 instr->hydrogen()->IsDehoisted()) {
4122 // Sign extend key because it could be a 32 bit negative value
4123 // and the dehoisted address computation happens in 64 bits
4124 __ movsxlq(ToRegister(key), ToRegister(key));
4125 }
4126 if (instr->NeedsCanonicalization()) {
4127 XMMRegister xmm_scratch = double_scratch0();
4128 // Turn potential sNaN value into qNaN.
4129 __ Xorpd(xmm_scratch, xmm_scratch);
4130 __ Subsd(value, xmm_scratch);
4131 }
4132
4133 Operand double_store_operand = BuildFastArrayOperand(
4134 instr->elements(),
4135 key,
4136 instr->hydrogen()->key()->representation(),
4137 FAST_DOUBLE_ELEMENTS,
4138 instr->base_offset());
4139
4140 __ Movsd(double_store_operand, value);
4141 }
4142
4143
DoStoreKeyedFixedArray(LStoreKeyed * instr)4144 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4145 HStoreKeyed* hinstr = instr->hydrogen();
4146 LOperand* key = instr->key();
4147 int offset = instr->base_offset();
4148 Representation representation = hinstr->value()->representation();
4149
4150 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4151 instr->hydrogen()->IsDehoisted()) {
4152 // Sign extend key because it could be a 32 bit negative value
4153 // and the dehoisted address computation happens in 64 bits
4154 __ movsxlq(ToRegister(key), ToRegister(key));
4155 }
4156 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4157 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4158 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4159 if (FLAG_debug_code) {
4160 Register scratch = kScratchRegister;
4161 __ Load(scratch,
4162 BuildFastArrayOperand(instr->elements(),
4163 key,
4164 instr->hydrogen()->key()->representation(),
4165 FAST_ELEMENTS,
4166 offset),
4167 Representation::Smi());
4168 __ AssertSmi(scratch);
4169 }
4170 // Store int value directly to upper half of the smi.
4171 STATIC_ASSERT(kSmiTag == 0);
4172 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4173 offset += kPointerSize / 2;
4174 }
4175
4176 Operand operand =
4177 BuildFastArrayOperand(instr->elements(),
4178 key,
4179 instr->hydrogen()->key()->representation(),
4180 FAST_ELEMENTS,
4181 offset);
4182 if (instr->value()->IsRegister()) {
4183 __ Store(operand, ToRegister(instr->value()), representation);
4184 } else {
4185 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4186 if (IsInteger32Constant(operand_value)) {
4187 int32_t value = ToInteger32(operand_value);
4188 if (representation.IsSmi()) {
4189 __ Move(operand, Smi::FromInt(value));
4190
4191 } else {
4192 __ movl(operand, Immediate(value));
4193 }
4194 } else {
4195 Handle<Object> handle_value = ToHandle(operand_value);
4196 __ Move(operand, handle_value);
4197 }
4198 }
4199
4200 if (hinstr->NeedsWriteBarrier()) {
4201 Register elements = ToRegister(instr->elements());
4202 DCHECK(instr->value()->IsRegister());
4203 Register value = ToRegister(instr->value());
4204 DCHECK(!key->IsConstantOperand());
4205 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4206 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4207 // Compute address of modified element and store it into key register.
4208 Register key_reg(ToRegister(key));
4209 __ leap(key_reg, operand);
4210 __ RecordWrite(elements,
4211 key_reg,
4212 value,
4213 kSaveFPRegs,
4214 EMIT_REMEMBERED_SET,
4215 check_needed,
4216 hinstr->PointersToHereCheckForValue());
4217 }
4218 }
4219
4220
DoStoreKeyed(LStoreKeyed * instr)4221 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4222 if (instr->is_fixed_typed_array()) {
4223 DoStoreKeyedExternalArray(instr);
4224 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4225 DoStoreKeyedFixedDoubleArray(instr);
4226 } else {
4227 DoStoreKeyedFixedArray(instr);
4228 }
4229 }
4230
4231
DoStoreKeyedGeneric(LStoreKeyedGeneric * instr)4232 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4233 DCHECK(ToRegister(instr->context()).is(rsi));
4234 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4235 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4236 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4237
4238 if (instr->hydrogen()->HasVectorAndSlot()) {
4239 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4240 }
4241
4242 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4243 isolate(), instr->language_mode(),
4244 instr->hydrogen()->initialization_state()).code();
4245 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4246 }
4247
4248
DoMaybeGrowElements(LMaybeGrowElements * instr)4249 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4250 class DeferredMaybeGrowElements final : public LDeferredCode {
4251 public:
4252 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4253 : LDeferredCode(codegen), instr_(instr) {}
4254 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4255 LInstruction* instr() override { return instr_; }
4256
4257 private:
4258 LMaybeGrowElements* instr_;
4259 };
4260
4261 Register result = rax;
4262 DeferredMaybeGrowElements* deferred =
4263 new (zone()) DeferredMaybeGrowElements(this, instr);
4264 LOperand* key = instr->key();
4265 LOperand* current_capacity = instr->current_capacity();
4266
4267 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4268 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4269 DCHECK(key->IsConstantOperand() || key->IsRegister());
4270 DCHECK(current_capacity->IsConstantOperand() ||
4271 current_capacity->IsRegister());
4272
4273 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4274 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4275 int32_t constant_capacity =
4276 ToInteger32(LConstantOperand::cast(current_capacity));
4277 if (constant_key >= constant_capacity) {
4278 // Deferred case.
4279 __ jmp(deferred->entry());
4280 }
4281 } else if (key->IsConstantOperand()) {
4282 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4283 __ cmpl(ToRegister(current_capacity), Immediate(constant_key));
4284 __ j(less_equal, deferred->entry());
4285 } else if (current_capacity->IsConstantOperand()) {
4286 int32_t constant_capacity =
4287 ToInteger32(LConstantOperand::cast(current_capacity));
4288 __ cmpl(ToRegister(key), Immediate(constant_capacity));
4289 __ j(greater_equal, deferred->entry());
4290 } else {
4291 __ cmpl(ToRegister(key), ToRegister(current_capacity));
4292 __ j(greater_equal, deferred->entry());
4293 }
4294
4295 if (instr->elements()->IsRegister()) {
4296 __ movp(result, ToRegister(instr->elements()));
4297 } else {
4298 __ movp(result, ToOperand(instr->elements()));
4299 }
4300
4301 __ bind(deferred->exit());
4302 }
4303
4304
DoDeferredMaybeGrowElements(LMaybeGrowElements * instr)4305 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4306 // TODO(3095996): Get rid of this. For now, we need to make the
4307 // result register contain a valid pointer because it is already
4308 // contained in the register pointer map.
4309 Register result = rax;
4310 __ Move(result, Smi::FromInt(0));
4311
4312 // We have to call a stub.
4313 {
4314 PushSafepointRegistersScope scope(this);
4315 if (instr->object()->IsConstantOperand()) {
4316 LConstantOperand* constant_object =
4317 LConstantOperand::cast(instr->object());
4318 if (IsSmiConstant(constant_object)) {
4319 Smi* immediate = ToSmi(constant_object);
4320 __ Move(result, immediate);
4321 } else {
4322 Handle<Object> handle_value = ToHandle(constant_object);
4323 __ Move(result, handle_value);
4324 }
4325 } else if (instr->object()->IsRegister()) {
4326 __ Move(result, ToRegister(instr->object()));
4327 } else {
4328 __ movp(result, ToOperand(instr->object()));
4329 }
4330
4331 LOperand* key = instr->key();
4332 if (key->IsConstantOperand()) {
4333 __ Move(rbx, ToSmi(LConstantOperand::cast(key)));
4334 } else {
4335 __ Move(rbx, ToRegister(key));
4336 __ Integer32ToSmi(rbx, rbx);
4337 }
4338
4339 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4340 instr->hydrogen()->kind());
4341 __ CallStub(&stub);
4342 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4343 __ StoreToSafepointRegisterSlot(result, result);
4344 }
4345
4346 // Deopt on smi, which means the elements array changed to dictionary mode.
4347 Condition is_smi = __ CheckSmi(result);
4348 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
4349 }
4350
4351
DoTransitionElementsKind(LTransitionElementsKind * instr)4352 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4353 Register object_reg = ToRegister(instr->object());
4354
4355 Handle<Map> from_map = instr->original_map();
4356 Handle<Map> to_map = instr->transitioned_map();
4357 ElementsKind from_kind = instr->from_kind();
4358 ElementsKind to_kind = instr->to_kind();
4359
4360 Label not_applicable;
4361 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4362 __ j(not_equal, ¬_applicable);
4363 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4364 Register new_map_reg = ToRegister(instr->new_map_temp());
4365 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
4366 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
4367 // Write barrier.
4368 __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
4369 kDontSaveFPRegs);
4370 } else {
4371 DCHECK(object_reg.is(rax));
4372 DCHECK(ToRegister(instr->context()).is(rsi));
4373 PushSafepointRegistersScope scope(this);
4374 __ Move(rbx, to_map);
4375 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4376 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4377 __ CallStub(&stub);
4378 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4379 }
4380 __ bind(¬_applicable);
4381 }
4382
4383
DoTrapAllocationMemento(LTrapAllocationMemento * instr)4384 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4385 Register object = ToRegister(instr->object());
4386 Register temp = ToRegister(instr->temp());
4387 Label no_memento_found;
4388 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4389 DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
4390 __ bind(&no_memento_found);
4391 }
4392
4393
DoStringAdd(LStringAdd * instr)4394 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4395 DCHECK(ToRegister(instr->context()).is(rsi));
4396 DCHECK(ToRegister(instr->left()).is(rdx));
4397 DCHECK(ToRegister(instr->right()).is(rax));
4398 StringAddStub stub(isolate(),
4399 instr->hydrogen()->flags(),
4400 instr->hydrogen()->pretenure_flag());
4401 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4402 }
4403
4404
DoStringCharCodeAt(LStringCharCodeAt * instr)4405 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4406 class DeferredStringCharCodeAt final : public LDeferredCode {
4407 public:
4408 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4409 : LDeferredCode(codegen), instr_(instr) { }
4410 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4411 LInstruction* instr() override { return instr_; }
4412
4413 private:
4414 LStringCharCodeAt* instr_;
4415 };
4416
4417 DeferredStringCharCodeAt* deferred =
4418 new(zone()) DeferredStringCharCodeAt(this, instr);
4419
4420 StringCharLoadGenerator::Generate(masm(),
4421 ToRegister(instr->string()),
4422 ToRegister(instr->index()),
4423 ToRegister(instr->result()),
4424 deferred->entry());
4425 __ bind(deferred->exit());
4426 }
4427
4428
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)4429 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4430 Register string = ToRegister(instr->string());
4431 Register result = ToRegister(instr->result());
4432
4433 // TODO(3095996): Get rid of this. For now, we need to make the
4434 // result register contain a valid pointer because it is already
4435 // contained in the register pointer map.
4436 __ Set(result, 0);
4437
4438 PushSafepointRegistersScope scope(this);
4439 __ Push(string);
4440 // Push the index as a smi. This is safe because of the checks in
4441 // DoStringCharCodeAt above.
4442 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4443 if (instr->index()->IsConstantOperand()) {
4444 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4445 __ Push(Smi::FromInt(const_index));
4446 } else {
4447 Register index = ToRegister(instr->index());
4448 __ Integer32ToSmi(index, index);
4449 __ Push(index);
4450 }
4451 CallRuntimeFromDeferred(
4452 Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
4453 __ AssertSmi(rax);
4454 __ SmiToInteger32(rax, rax);
4455 __ StoreToSafepointRegisterSlot(result, rax);
4456 }
4457
4458
DoStringCharFromCode(LStringCharFromCode * instr)4459 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4460 class DeferredStringCharFromCode final : public LDeferredCode {
4461 public:
4462 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4463 : LDeferredCode(codegen), instr_(instr) { }
4464 void Generate() override {
4465 codegen()->DoDeferredStringCharFromCode(instr_);
4466 }
4467 LInstruction* instr() override { return instr_; }
4468
4469 private:
4470 LStringCharFromCode* instr_;
4471 };
4472
4473 DeferredStringCharFromCode* deferred =
4474 new(zone()) DeferredStringCharFromCode(this, instr);
4475
4476 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4477 Register char_code = ToRegister(instr->char_code());
4478 Register result = ToRegister(instr->result());
4479 DCHECK(!char_code.is(result));
4480
4481 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
4482 __ j(above, deferred->entry());
4483 __ movsxlq(char_code, char_code);
4484 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4485 __ movp(result, FieldOperand(result,
4486 char_code, times_pointer_size,
4487 FixedArray::kHeaderSize));
4488 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4489 __ j(equal, deferred->entry());
4490 __ bind(deferred->exit());
4491 }
4492
4493
DoDeferredStringCharFromCode(LStringCharFromCode * instr)4494 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4495 Register char_code = ToRegister(instr->char_code());
4496 Register result = ToRegister(instr->result());
4497
4498 // TODO(3095996): Get rid of this. For now, we need to make the
4499 // result register contain a valid pointer because it is already
4500 // contained in the register pointer map.
4501 __ Set(result, 0);
4502
4503 PushSafepointRegistersScope scope(this);
4504 __ Integer32ToSmi(char_code, char_code);
4505 __ Push(char_code);
4506 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4507 instr->context());
4508 __ StoreToSafepointRegisterSlot(result, rax);
4509 }
4510
4511
DoInteger32ToDouble(LInteger32ToDouble * instr)4512 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4513 LOperand* input = instr->value();
4514 DCHECK(input->IsRegister() || input->IsStackSlot());
4515 LOperand* output = instr->result();
4516 DCHECK(output->IsDoubleRegister());
4517 if (input->IsRegister()) {
4518 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
4519 } else {
4520 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
4521 }
4522 }
4523
4524
DoUint32ToDouble(LUint32ToDouble * instr)4525 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4526 LOperand* input = instr->value();
4527 LOperand* output = instr->result();
4528
4529 __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
4530 }
4531
4532
DoNumberTagI(LNumberTagI * instr)4533 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4534 class DeferredNumberTagI final : public LDeferredCode {
4535 public:
4536 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4537 : LDeferredCode(codegen), instr_(instr) { }
4538 void Generate() override {
4539 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4540 instr_->temp2(), SIGNED_INT32);
4541 }
4542 LInstruction* instr() override { return instr_; }
4543
4544 private:
4545 LNumberTagI* instr_;
4546 };
4547
4548 LOperand* input = instr->value();
4549 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4550 Register reg = ToRegister(input);
4551
4552 if (SmiValuesAre32Bits()) {
4553 __ Integer32ToSmi(reg, reg);
4554 } else {
4555 DCHECK(SmiValuesAre31Bits());
4556 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4557 __ Integer32ToSmi(reg, reg);
4558 __ j(overflow, deferred->entry());
4559 __ bind(deferred->exit());
4560 }
4561 }
4562
4563
DoNumberTagU(LNumberTagU * instr)4564 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4565 class DeferredNumberTagU final : public LDeferredCode {
4566 public:
4567 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4568 : LDeferredCode(codegen), instr_(instr) { }
4569 void Generate() override {
4570 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4571 instr_->temp2(), UNSIGNED_INT32);
4572 }
4573 LInstruction* instr() override { return instr_; }
4574
4575 private:
4576 LNumberTagU* instr_;
4577 };
4578
4579 LOperand* input = instr->value();
4580 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4581 Register reg = ToRegister(input);
4582
4583 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4584 __ cmpl(reg, Immediate(Smi::kMaxValue));
4585 __ j(above, deferred->entry());
4586 __ Integer32ToSmi(reg, reg);
4587 __ bind(deferred->exit());
4588 }
4589
4590
DoDeferredNumberTagIU(LInstruction * instr,LOperand * value,LOperand * temp1,LOperand * temp2,IntegerSignedness signedness)4591 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4592 LOperand* value,
4593 LOperand* temp1,
4594 LOperand* temp2,
4595 IntegerSignedness signedness) {
4596 Label done, slow;
4597 Register reg = ToRegister(value);
4598 Register tmp = ToRegister(temp1);
4599 XMMRegister temp_xmm = ToDoubleRegister(temp2);
4600
4601 // Load value into temp_xmm which will be preserved across potential call to
4602 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
4603 // XMM registers on x64).
4604 if (signedness == SIGNED_INT32) {
4605 DCHECK(SmiValuesAre31Bits());
4606 // There was overflow, so bits 30 and 31 of the original integer
4607 // disagree. Try to allocate a heap number in new space and store
4608 // the value in there. If that fails, call the runtime system.
4609 __ SmiToInteger32(reg, reg);
4610 __ xorl(reg, Immediate(0x80000000));
4611 __ Cvtlsi2sd(temp_xmm, reg);
4612 } else {
4613 DCHECK(signedness == UNSIGNED_INT32);
4614 __ LoadUint32(temp_xmm, reg);
4615 }
4616
4617 if (FLAG_inline_new) {
4618 __ AllocateHeapNumber(reg, tmp, &slow);
4619 __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
4620 }
4621
4622 // Slow case: Call the runtime system to do the number allocation.
4623 __ bind(&slow);
4624 {
4625 // Put a valid pointer value in the stack slot where the result
4626 // register is stored, as this register is in the pointer map, but contains
4627 // an integer value.
4628 __ Set(reg, 0);
4629
4630 // Preserve the value of all registers.
4631 PushSafepointRegistersScope scope(this);
4632
4633 // NumberTagIU uses the context from the frame, rather than
4634 // the environment's HContext or HInlinedContext value.
4635 // They only call Runtime::kAllocateHeapNumber.
4636 // The corresponding HChange instructions are added in a phase that does
4637 // not have easy access to the local context.
4638 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4639 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4640 RecordSafepointWithRegisters(
4641 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4642 __ StoreToSafepointRegisterSlot(reg, rax);
4643 }
4644
4645 // Done. Put the value in temp_xmm into the value of the allocated heap
4646 // number.
4647 __ bind(&done);
4648 __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
4649 }
4650
4651
DoNumberTagD(LNumberTagD * instr)4652 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4653 class DeferredNumberTagD final : public LDeferredCode {
4654 public:
4655 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4656 : LDeferredCode(codegen), instr_(instr) { }
4657 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4658 LInstruction* instr() override { return instr_; }
4659
4660 private:
4661 LNumberTagD* instr_;
4662 };
4663
4664 XMMRegister input_reg = ToDoubleRegister(instr->value());
4665 Register reg = ToRegister(instr->result());
4666 Register tmp = ToRegister(instr->temp());
4667
4668 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4669 if (FLAG_inline_new) {
4670 __ AllocateHeapNumber(reg, tmp, deferred->entry());
4671 } else {
4672 __ jmp(deferred->entry());
4673 }
4674 __ bind(deferred->exit());
4675 __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4676 }
4677
4678
DoDeferredNumberTagD(LNumberTagD * instr)4679 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4680 // TODO(3095996): Get rid of this. For now, we need to make the
4681 // result register contain a valid pointer because it is already
4682 // contained in the register pointer map.
4683 Register reg = ToRegister(instr->result());
4684 __ Move(reg, Smi::FromInt(0));
4685
4686 {
4687 PushSafepointRegistersScope scope(this);
4688 // NumberTagD uses the context from the frame, rather than
4689 // the environment's HContext or HInlinedContext value.
4690 // They only call Runtime::kAllocateHeapNumber.
4691 // The corresponding HChange instructions are added in a phase that does
4692 // not have easy access to the local context.
4693 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4694 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4695 RecordSafepointWithRegisters(
4696 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4697 __ movp(kScratchRegister, rax);
4698 }
4699 __ movp(reg, kScratchRegister);
4700 }
4701
4702
DoSmiTag(LSmiTag * instr)4703 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4704 HChange* hchange = instr->hydrogen();
4705 Register input = ToRegister(instr->value());
4706 Register output = ToRegister(instr->result());
4707 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4708 hchange->value()->CheckFlag(HValue::kUint32)) {
4709 Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
4710 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow);
4711 }
4712 __ Integer32ToSmi(output, input);
4713 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4714 !hchange->value()->CheckFlag(HValue::kUint32)) {
4715 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
4716 }
4717 }
4718
4719
DoSmiUntag(LSmiUntag * instr)4720 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4721 DCHECK(instr->value()->Equals(instr->result()));
4722 Register input = ToRegister(instr->value());
4723 if (instr->needs_check()) {
4724 Condition is_smi = __ CheckSmi(input);
4725 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi);
4726 } else {
4727 __ AssertSmi(input);
4728 }
4729 __ SmiToInteger32(input, input);
4730 }
4731
4732
EmitNumberUntagD(LNumberUntagD * instr,Register input_reg,XMMRegister result_reg,NumberUntagDMode mode)4733 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4734 XMMRegister result_reg, NumberUntagDMode mode) {
4735 bool can_convert_undefined_to_nan =
4736 instr->hydrogen()->can_convert_undefined_to_nan();
4737 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4738
4739 Label convert, load_smi, done;
4740
4741 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4742 // Smi check.
4743 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4744
4745 // Heap number map check.
4746 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4747 Heap::kHeapNumberMapRootIndex);
4748
4749 // On x64 it is safe to load at heap number offset before evaluating the map
4750 // check, since all heap objects are at least two words long.
4751 __ Movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4752
4753 if (can_convert_undefined_to_nan) {
4754 __ j(not_equal, &convert, Label::kNear);
4755 } else {
4756 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
4757 }
4758
4759 if (deoptimize_on_minus_zero) {
4760 XMMRegister xmm_scratch = double_scratch0();
4761 __ Xorpd(xmm_scratch, xmm_scratch);
4762 __ Ucomisd(xmm_scratch, result_reg);
4763 __ j(not_equal, &done, Label::kNear);
4764 __ Movmskpd(kScratchRegister, result_reg);
4765 __ testl(kScratchRegister, Immediate(1));
4766 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
4767 }
4768 __ jmp(&done, Label::kNear);
4769
4770 if (can_convert_undefined_to_nan) {
4771 __ bind(&convert);
4772
4773 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
4774 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4775 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
4776
4777 __ Pcmpeqd(result_reg, result_reg);
4778 __ jmp(&done, Label::kNear);
4779 }
4780 } else {
4781 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4782 }
4783
4784 // Smi to XMM conversion
4785 __ bind(&load_smi);
4786 __ SmiToInteger32(kScratchRegister, input_reg);
4787 __ Cvtlsi2sd(result_reg, kScratchRegister);
4788 __ bind(&done);
4789 }
4790
4791
DoDeferredTaggedToI(LTaggedToI * instr,Label * done)4792 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4793 Register input_reg = ToRegister(instr->value());
4794
4795 if (instr->truncating()) {
4796 Label no_heap_number, check_bools, check_false;
4797
4798 // Heap number map check.
4799 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4800 Heap::kHeapNumberMapRootIndex);
4801 __ j(not_equal, &no_heap_number, Label::kNear);
4802 __ TruncateHeapNumberToI(input_reg, input_reg);
4803 __ jmp(done);
4804
4805 __ bind(&no_heap_number);
4806 // Check for Oddballs. Undefined/False is converted to zero and True to one
4807 // for truncating conversions.
4808 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4809 __ j(not_equal, &check_bools, Label::kNear);
4810 __ Set(input_reg, 0);
4811 __ jmp(done);
4812
4813 __ bind(&check_bools);
4814 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
4815 __ j(not_equal, &check_false, Label::kNear);
4816 __ Set(input_reg, 1);
4817 __ jmp(done);
4818
4819 __ bind(&check_false);
4820 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
4821 DeoptimizeIf(not_equal, instr,
4822 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
4823 __ Set(input_reg, 0);
4824 } else {
4825 XMMRegister scratch = ToDoubleRegister(instr->temp());
4826 DCHECK(!scratch.is(xmm0));
4827 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4828 Heap::kHeapNumberMapRootIndex);
4829 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
4830 __ Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4831 __ Cvttsd2si(input_reg, xmm0);
4832 __ Cvtlsi2sd(scratch, input_reg);
4833 __ Ucomisd(xmm0, scratch);
4834 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
4835 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
4836 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
4837 __ testl(input_reg, input_reg);
4838 __ j(not_zero, done);
4839 __ Movmskpd(input_reg, xmm0);
4840 __ andl(input_reg, Immediate(1));
4841 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
4842 }
4843 }
4844 }
4845
4846
DoTaggedToI(LTaggedToI * instr)4847 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4848 class DeferredTaggedToI final : public LDeferredCode {
4849 public:
4850 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4851 : LDeferredCode(codegen), instr_(instr) { }
4852 void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
4853 LInstruction* instr() override { return instr_; }
4854
4855 private:
4856 LTaggedToI* instr_;
4857 };
4858
4859 LOperand* input = instr->value();
4860 DCHECK(input->IsRegister());
4861 DCHECK(input->Equals(instr->result()));
4862 Register input_reg = ToRegister(input);
4863
4864 if (instr->hydrogen()->value()->representation().IsSmi()) {
4865 __ SmiToInteger32(input_reg, input_reg);
4866 } else {
4867 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4868 __ JumpIfNotSmi(input_reg, deferred->entry());
4869 __ SmiToInteger32(input_reg, input_reg);
4870 __ bind(deferred->exit());
4871 }
4872 }
4873
4874
DoNumberUntagD(LNumberUntagD * instr)4875 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4876 LOperand* input = instr->value();
4877 DCHECK(input->IsRegister());
4878 LOperand* result = instr->result();
4879 DCHECK(result->IsDoubleRegister());
4880
4881 Register input_reg = ToRegister(input);
4882 XMMRegister result_reg = ToDoubleRegister(result);
4883
4884 HValue* value = instr->hydrogen()->value();
4885 NumberUntagDMode mode = value->representation().IsSmi()
4886 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4887
4888 EmitNumberUntagD(instr, input_reg, result_reg, mode);
4889 }
4890
4891
DoDoubleToI(LDoubleToI * instr)4892 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4893 LOperand* input = instr->value();
4894 DCHECK(input->IsDoubleRegister());
4895 LOperand* result = instr->result();
4896 DCHECK(result->IsRegister());
4897
4898 XMMRegister input_reg = ToDoubleRegister(input);
4899 Register result_reg = ToRegister(result);
4900
4901 if (instr->truncating()) {
4902 __ TruncateDoubleToI(result_reg, input_reg);
4903 } else {
4904 Label lost_precision, is_nan, minus_zero, done;
4905 XMMRegister xmm_scratch = double_scratch0();
4906 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4907 __ DoubleToI(result_reg, input_reg, xmm_scratch,
4908 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
4909 &is_nan, &minus_zero, dist);
4910 __ jmp(&done, dist);
4911 __ bind(&lost_precision);
4912 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
4913 __ bind(&is_nan);
4914 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
4915 __ bind(&minus_zero);
4916 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
4917 __ bind(&done);
4918 }
4919 }
4920
4921
DoDoubleToSmi(LDoubleToSmi * instr)4922 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4923 LOperand* input = instr->value();
4924 DCHECK(input->IsDoubleRegister());
4925 LOperand* result = instr->result();
4926 DCHECK(result->IsRegister());
4927
4928 XMMRegister input_reg = ToDoubleRegister(input);
4929 Register result_reg = ToRegister(result);
4930
4931 Label lost_precision, is_nan, minus_zero, done;
4932 XMMRegister xmm_scratch = double_scratch0();
4933 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4934 __ DoubleToI(result_reg, input_reg, xmm_scratch,
4935 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
4936 &minus_zero, dist);
4937 __ jmp(&done, dist);
4938 __ bind(&lost_precision);
4939 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
4940 __ bind(&is_nan);
4941 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
4942 __ bind(&minus_zero);
4943 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
4944 __ bind(&done);
4945 __ Integer32ToSmi(result_reg, result_reg);
4946 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
4947 }
4948
4949
DoCheckSmi(LCheckSmi * instr)4950 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4951 LOperand* input = instr->value();
4952 Condition cc = masm()->CheckSmi(ToRegister(input));
4953 DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi);
4954 }
4955
4956
DoCheckNonSmi(LCheckNonSmi * instr)4957 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4958 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4959 LOperand* input = instr->value();
4960 Condition cc = masm()->CheckSmi(ToRegister(input));
4961 DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
4962 }
4963 }
4964
4965
DoCheckArrayBufferNotNeutered(LCheckArrayBufferNotNeutered * instr)4966 void LCodeGen::DoCheckArrayBufferNotNeutered(
4967 LCheckArrayBufferNotNeutered* instr) {
4968 Register view = ToRegister(instr->view());
4969
4970 __ movp(kScratchRegister,
4971 FieldOperand(view, JSArrayBufferView::kBufferOffset));
4972 __ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset),
4973 Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
4974 DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
4975 }
4976
4977
DoCheckInstanceType(LCheckInstanceType * instr)4978 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4979 Register input = ToRegister(instr->value());
4980
4981 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
4982
4983 if (instr->hydrogen()->is_interval_check()) {
4984 InstanceType first;
4985 InstanceType last;
4986 instr->hydrogen()->GetCheckInterval(&first, &last);
4987
4988 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
4989 Immediate(static_cast<int8_t>(first)));
4990
4991 // If there is only one type in the interval check for equality.
4992 if (first == last) {
4993 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
4994 } else {
4995 DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
4996 // Omit check for the last type.
4997 if (last != LAST_TYPE) {
4998 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
4999 Immediate(static_cast<int8_t>(last)));
5000 DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
5001 }
5002 }
5003 } else {
5004 uint8_t mask;
5005 uint8_t tag;
5006 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5007
5008 if (base::bits::IsPowerOfTwo32(mask)) {
5009 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5010 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5011 Immediate(mask));
5012 DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
5013 Deoptimizer::kWrongInstanceType);
5014 } else {
5015 __ movzxbl(kScratchRegister,
5016 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
5017 __ andb(kScratchRegister, Immediate(mask));
5018 __ cmpb(kScratchRegister, Immediate(tag));
5019 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5020 }
5021 }
5022 }
5023
5024
DoCheckValue(LCheckValue * instr)5025 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5026 Register reg = ToRegister(instr->value());
5027 __ Cmp(reg, instr->hydrogen()->object().handle());
5028 DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
5029 }
5030
5031
DoDeferredInstanceMigration(LCheckMaps * instr,Register object)5032 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5033 {
5034 PushSafepointRegistersScope scope(this);
5035 __ Push(object);
5036 __ Set(rsi, 0);
5037 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5038 RecordSafepointWithRegisters(
5039 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5040
5041 __ testp(rax, Immediate(kSmiTagMask));
5042 }
5043 DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
5044 }
5045
5046
DoCheckMaps(LCheckMaps * instr)5047 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5048 class DeferredCheckMaps final : public LDeferredCode {
5049 public:
5050 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5051 : LDeferredCode(codegen), instr_(instr), object_(object) {
5052 SetExit(check_maps());
5053 }
5054 void Generate() override {
5055 codegen()->DoDeferredInstanceMigration(instr_, object_);
5056 }
5057 Label* check_maps() { return &check_maps_; }
5058 LInstruction* instr() override { return instr_; }
5059
5060 private:
5061 LCheckMaps* instr_;
5062 Label check_maps_;
5063 Register object_;
5064 };
5065
5066 if (instr->hydrogen()->IsStabilityCheck()) {
5067 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5068 for (int i = 0; i < maps->size(); ++i) {
5069 AddStabilityDependency(maps->at(i).handle());
5070 }
5071 return;
5072 }
5073
5074 LOperand* input = instr->value();
5075 DCHECK(input->IsRegister());
5076 Register reg = ToRegister(input);
5077
5078 DeferredCheckMaps* deferred = NULL;
5079 if (instr->hydrogen()->HasMigrationTarget()) {
5080 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5081 __ bind(deferred->check_maps());
5082 }
5083
5084 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5085 Label success;
5086 for (int i = 0; i < maps->size() - 1; i++) {
5087 Handle<Map> map = maps->at(i).handle();
5088 __ CompareMap(reg, map);
5089 __ j(equal, &success, Label::kNear);
5090 }
5091
5092 Handle<Map> map = maps->at(maps->size() - 1).handle();
5093 __ CompareMap(reg, map);
5094 if (instr->hydrogen()->HasMigrationTarget()) {
5095 __ j(not_equal, deferred->entry());
5096 } else {
5097 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5098 }
5099
5100 __ bind(&success);
5101 }
5102
5103
DoClampDToUint8(LClampDToUint8 * instr)5104 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5105 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5106 XMMRegister xmm_scratch = double_scratch0();
5107 Register result_reg = ToRegister(instr->result());
5108 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5109 }
5110
5111
DoClampIToUint8(LClampIToUint8 * instr)5112 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5113 DCHECK(instr->unclamped()->Equals(instr->result()));
5114 Register value_reg = ToRegister(instr->result());
5115 __ ClampUint8(value_reg);
5116 }
5117
5118
DoClampTToUint8(LClampTToUint8 * instr)5119 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5120 DCHECK(instr->unclamped()->Equals(instr->result()));
5121 Register input_reg = ToRegister(instr->unclamped());
5122 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5123 XMMRegister xmm_scratch = double_scratch0();
5124 Label is_smi, done, heap_number;
5125 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5126 __ JumpIfSmi(input_reg, &is_smi, dist);
5127
5128 // Check for heap number
5129 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5130 factory()->heap_number_map());
5131 __ j(equal, &heap_number, Label::kNear);
5132
5133 // Check for undefined. Undefined is converted to zero for clamping
5134 // conversions.
5135 __ Cmp(input_reg, factory()->undefined_value());
5136 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
5137 __ xorl(input_reg, input_reg);
5138 __ jmp(&done, Label::kNear);
5139
5140 // Heap number
5141 __ bind(&heap_number);
5142 __ Movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5143 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5144 __ jmp(&done, Label::kNear);
5145
5146 // smi
5147 __ bind(&is_smi);
5148 __ SmiToInteger32(input_reg, input_reg);
5149 __ ClampUint8(input_reg);
5150
5151 __ bind(&done);
5152 }
5153
5154
DoDoubleBits(LDoubleBits * instr)5155 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5156 XMMRegister value_reg = ToDoubleRegister(instr->value());
5157 Register result_reg = ToRegister(instr->result());
5158 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5159 __ Movq(result_reg, value_reg);
5160 __ shrq(result_reg, Immediate(32));
5161 } else {
5162 __ Movd(result_reg, value_reg);
5163 }
5164 }
5165
5166
DoConstructDouble(LConstructDouble * instr)5167 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5168 Register hi_reg = ToRegister(instr->hi());
5169 Register lo_reg = ToRegister(instr->lo());
5170 XMMRegister result_reg = ToDoubleRegister(instr->result());
5171 __ movl(kScratchRegister, hi_reg);
5172 __ shlq(kScratchRegister, Immediate(32));
5173 __ orq(kScratchRegister, lo_reg);
5174 __ Movq(result_reg, kScratchRegister);
5175 }
5176
5177
DoAllocate(LAllocate * instr)5178 void LCodeGen::DoAllocate(LAllocate* instr) {
5179 class DeferredAllocate final : public LDeferredCode {
5180 public:
5181 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5182 : LDeferredCode(codegen), instr_(instr) { }
5183 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5184 LInstruction* instr() override { return instr_; }
5185
5186 private:
5187 LAllocate* instr_;
5188 };
5189
5190 DeferredAllocate* deferred =
5191 new(zone()) DeferredAllocate(this, instr);
5192
5193 Register result = ToRegister(instr->result());
5194 Register temp = ToRegister(instr->temp());
5195
5196 // Allocate memory for the object.
5197 AllocationFlags flags = TAG_OBJECT;
5198 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5199 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5200 }
5201 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5202 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5203 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5204 }
5205
5206 if (instr->size()->IsConstantOperand()) {
5207 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5208 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5209 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5210 } else {
5211 Register size = ToRegister(instr->size());
5212 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5213 }
5214
5215 __ bind(deferred->exit());
5216
5217 if (instr->hydrogen()->MustPrefillWithFiller()) {
5218 if (instr->size()->IsConstantOperand()) {
5219 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5220 __ movl(temp, Immediate((size / kPointerSize) - 1));
5221 } else {
5222 temp = ToRegister(instr->size());
5223 __ sarp(temp, Immediate(kPointerSizeLog2));
5224 __ decl(temp);
5225 }
5226 Label loop;
5227 __ bind(&loop);
5228 __ Move(FieldOperand(result, temp, times_pointer_size, 0),
5229 isolate()->factory()->one_pointer_filler_map());
5230 __ decl(temp);
5231 __ j(not_zero, &loop);
5232 }
5233 }
5234
5235
DoDeferredAllocate(LAllocate * instr)5236 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5237 Register result = ToRegister(instr->result());
5238
5239 // TODO(3095996): Get rid of this. For now, we need to make the
5240 // result register contain a valid pointer because it is already
5241 // contained in the register pointer map.
5242 __ Move(result, Smi::FromInt(0));
5243
5244 PushSafepointRegistersScope scope(this);
5245 if (instr->size()->IsRegister()) {
5246 Register size = ToRegister(instr->size());
5247 DCHECK(!size.is(result));
5248 __ Integer32ToSmi(size, size);
5249 __ Push(size);
5250 } else {
5251 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5252 __ Push(Smi::FromInt(size));
5253 }
5254
5255 int flags = 0;
5256 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5257 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5258 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5259 } else {
5260 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5261 }
5262 __ Push(Smi::FromInt(flags));
5263
5264 CallRuntimeFromDeferred(
5265 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5266 __ StoreToSafepointRegisterSlot(result, rax);
5267 }
5268
5269
DoToFastProperties(LToFastProperties * instr)5270 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5271 DCHECK(ToRegister(instr->value()).is(rax));
5272 __ Push(rax);
5273 CallRuntime(Runtime::kToFastProperties, 1, instr);
5274 }
5275
5276
DoTypeof(LTypeof * instr)5277 void LCodeGen::DoTypeof(LTypeof* instr) {
5278 DCHECK(ToRegister(instr->context()).is(rsi));
5279 DCHECK(ToRegister(instr->value()).is(rbx));
5280 Label end, do_call;
5281 Register value_register = ToRegister(instr->value());
5282 __ JumpIfNotSmi(value_register, &do_call);
5283 __ Move(rax, isolate()->factory()->number_string());
5284 __ jmp(&end);
5285 __ bind(&do_call);
5286 TypeofStub stub(isolate());
5287 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5288 __ bind(&end);
5289 }
5290
5291
EmitPushTaggedOperand(LOperand * operand)5292 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
5293 DCHECK(!operand->IsDoubleRegister());
5294 if (operand->IsConstantOperand()) {
5295 __ Push(ToHandle(LConstantOperand::cast(operand)));
5296 } else if (operand->IsRegister()) {
5297 __ Push(ToRegister(operand));
5298 } else {
5299 __ Push(ToOperand(operand));
5300 }
5301 }
5302
5303
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)5304 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5305 Register input = ToRegister(instr->value());
5306 Condition final_branch_condition = EmitTypeofIs(instr, input);
5307 if (final_branch_condition != no_condition) {
5308 EmitBranch(instr, final_branch_condition);
5309 }
5310 }
5311
5312
EmitTypeofIs(LTypeofIsAndBranch * instr,Register input)5313 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5314 Label* true_label = instr->TrueLabel(chunk_);
5315 Label* false_label = instr->FalseLabel(chunk_);
5316 Handle<String> type_name = instr->type_literal();
5317 int left_block = instr->TrueDestination(chunk_);
5318 int right_block = instr->FalseDestination(chunk_);
5319 int next_block = GetNextEmittedBlock();
5320
5321 Label::Distance true_distance = left_block == next_block ? Label::kNear
5322 : Label::kFar;
5323 Label::Distance false_distance = right_block == next_block ? Label::kNear
5324 : Label::kFar;
5325 Condition final_branch_condition = no_condition;
5326 Factory* factory = isolate()->factory();
5327 if (String::Equals(type_name, factory->number_string())) {
5328 __ JumpIfSmi(input, true_label, true_distance);
5329 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
5330 Heap::kHeapNumberMapRootIndex);
5331
5332 final_branch_condition = equal;
5333
5334 } else if (String::Equals(type_name, factory->string_string())) {
5335 __ JumpIfSmi(input, false_label, false_distance);
5336 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5337 final_branch_condition = below;
5338
5339 } else if (String::Equals(type_name, factory->symbol_string())) {
5340 __ JumpIfSmi(input, false_label, false_distance);
5341 __ CmpObjectType(input, SYMBOL_TYPE, input);
5342 final_branch_condition = equal;
5343
5344 } else if (String::Equals(type_name, factory->boolean_string())) {
5345 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5346 __ j(equal, true_label, true_distance);
5347 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5348 final_branch_condition = equal;
5349
5350 } else if (String::Equals(type_name, factory->undefined_string())) {
5351 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5352 __ j(equal, true_label, true_distance);
5353 __ JumpIfSmi(input, false_label, false_distance);
5354 // Check for undetectable objects => true.
5355 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
5356 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5357 Immediate(1 << Map::kIsUndetectable));
5358 final_branch_condition = not_zero;
5359
5360 } else if (String::Equals(type_name, factory->function_string())) {
5361 __ JumpIfSmi(input, false_label, false_distance);
5362 // Check for callable and not undetectable objects => true.
5363 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
5364 __ movzxbl(input, FieldOperand(input, Map::kBitFieldOffset));
5365 __ andb(input,
5366 Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5367 __ cmpb(input, Immediate(1 << Map::kIsCallable));
5368 final_branch_condition = equal;
5369
5370 } else if (String::Equals(type_name, factory->object_string())) {
5371 __ JumpIfSmi(input, false_label, false_distance);
5372 __ CompareRoot(input, Heap::kNullValueRootIndex);
5373 __ j(equal, true_label, true_distance);
5374 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5375 __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
5376 __ j(below, false_label, false_distance);
5377 // Check for callable or undetectable objects => false.
5378 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5379 Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5380 final_branch_condition = zero;
5381
5382 // clang-format off
5383 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5384 } else if (String::Equals(type_name, factory->type##_string())) { \
5385 __ JumpIfSmi(input, false_label, false_distance); \
5386 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset), \
5387 Heap::k##Type##MapRootIndex); \
5388 final_branch_condition = equal;
5389 SIMD128_TYPES(SIMD128_TYPE)
5390 #undef SIMD128_TYPE
5391 // clang-format on
5392
5393 } else {
5394 __ jmp(false_label, false_distance);
5395 }
5396
5397 return final_branch_condition;
5398 }
5399
5400
EnsureSpaceForLazyDeopt(int space_needed)5401 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5402 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5403 // Ensure that we have enough space after the previous lazy-bailout
5404 // instruction for patching the code here.
5405 int current_pc = masm()->pc_offset();
5406 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5407 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5408 __ Nop(padding_size);
5409 }
5410 }
5411 last_lazy_deopt_pc_ = masm()->pc_offset();
5412 }
5413
5414
DoLazyBailout(LLazyBailout * instr)5415 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5416 last_lazy_deopt_pc_ = masm()->pc_offset();
5417 DCHECK(instr->HasEnvironment());
5418 LEnvironment* env = instr->environment();
5419 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5420 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5421 }
5422
5423
DoDeoptimize(LDeoptimize * instr)5424 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5425 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5426 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5427 // needed return address), even though the implementation of LAZY and EAGER is
5428 // now identical. When LAZY is eventually completely folded into EAGER, remove
5429 // the special case below.
5430 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5431 type = Deoptimizer::LAZY;
5432 }
5433 DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5434 }
5435
5436
DoDummy(LDummy * instr)5437 void LCodeGen::DoDummy(LDummy* instr) {
5438 // Nothing to see here, move on!
5439 }
5440
5441
DoDummyUse(LDummyUse * instr)5442 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5443 // Nothing to see here, move on!
5444 }
5445
5446
DoDeferredStackCheck(LStackCheck * instr)5447 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5448 PushSafepointRegistersScope scope(this);
5449 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5450 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5451 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5452 DCHECK(instr->HasEnvironment());
5453 LEnvironment* env = instr->environment();
5454 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5455 }
5456
5457
DoStackCheck(LStackCheck * instr)5458 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5459 class DeferredStackCheck final : public LDeferredCode {
5460 public:
5461 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5462 : LDeferredCode(codegen), instr_(instr) { }
5463 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5464 LInstruction* instr() override { return instr_; }
5465
5466 private:
5467 LStackCheck* instr_;
5468 };
5469
5470 DCHECK(instr->HasEnvironment());
5471 LEnvironment* env = instr->environment();
5472 // There is no LLazyBailout instruction for stack-checks. We have to
5473 // prepare for lazy deoptimization explicitly here.
5474 if (instr->hydrogen()->is_function_entry()) {
5475 // Perform stack overflow check.
5476 Label done;
5477 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5478 __ j(above_equal, &done, Label::kNear);
5479
5480 DCHECK(instr->context()->IsRegister());
5481 DCHECK(ToRegister(instr->context()).is(rsi));
5482 CallCode(isolate()->builtins()->StackCheck(),
5483 RelocInfo::CODE_TARGET,
5484 instr);
5485 __ bind(&done);
5486 } else {
5487 DCHECK(instr->hydrogen()->is_backwards_branch());
5488 // Perform stack overflow check if this goto needs it before jumping.
5489 DeferredStackCheck* deferred_stack_check =
5490 new(zone()) DeferredStackCheck(this, instr);
5491 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5492 __ j(below, deferred_stack_check->entry());
5493 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5494 __ bind(instr->done_label());
5495 deferred_stack_check->SetExit(instr->done_label());
5496 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5497 // Don't record a deoptimization index for the safepoint here.
5498 // This will be done explicitly when emitting call and the safepoint in
5499 // the deferred code.
5500 }
5501 }
5502
5503
DoOsrEntry(LOsrEntry * instr)5504 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5505 // This is a pseudo-instruction that ensures that the environment here is
5506 // properly registered for deoptimization and records the assembler's PC
5507 // offset.
5508 LEnvironment* environment = instr->environment();
5509
5510 // If the environment were already registered, we would have no way of
5511 // backpatching it with the spill slot operands.
5512 DCHECK(!environment->HasBeenRegistered());
5513 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5514
5515 GenerateOsrPrologue();
5516 }
5517
5518
DoForInPrepareMap(LForInPrepareMap * instr)5519 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5520 DCHECK(ToRegister(instr->context()).is(rsi));
5521
5522 Condition cc = masm()->CheckSmi(rax);
5523 DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
5524
5525 STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
5526 __ CmpObjectType(rax, JS_PROXY_TYPE, rcx);
5527 DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
5528
5529 Label use_cache, call_runtime;
5530 Register null_value = rdi;
5531 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5532 __ CheckEnumCache(null_value, &call_runtime);
5533
5534 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
5535 __ jmp(&use_cache, Label::kNear);
5536
5537 // Get the set of properties to enumerate.
5538 __ bind(&call_runtime);
5539 __ Push(rax);
5540 CallRuntime(Runtime::kGetPropertyNamesFast, instr);
5541
5542 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
5543 Heap::kMetaMapRootIndex);
5544 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5545 __ bind(&use_cache);
5546 }
5547
5548
DoForInCacheArray(LForInCacheArray * instr)5549 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5550 Register map = ToRegister(instr->map());
5551 Register result = ToRegister(instr->result());
5552 Label load_cache, done;
5553 __ EnumLength(result, map);
5554 __ Cmp(result, Smi::FromInt(0));
5555 __ j(not_equal, &load_cache, Label::kNear);
5556 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
5557 __ jmp(&done, Label::kNear);
5558 __ bind(&load_cache);
5559 __ LoadInstanceDescriptors(map, result);
5560 __ movp(result,
5561 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5562 __ movp(result,
5563 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5564 __ bind(&done);
5565 Condition cc = masm()->CheckSmi(result);
5566 DeoptimizeIf(cc, instr, Deoptimizer::kNoCache);
5567 }
5568
5569
DoCheckMapValue(LCheckMapValue * instr)5570 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5571 Register object = ToRegister(instr->value());
5572 __ cmpp(ToRegister(instr->map()),
5573 FieldOperand(object, HeapObject::kMapOffset));
5574 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5575 }
5576
5577
DoDeferredLoadMutableDouble(LLoadFieldByIndex * instr,Register object,Register index)5578 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5579 Register object,
5580 Register index) {
5581 PushSafepointRegistersScope scope(this);
5582 __ Push(object);
5583 __ Push(index);
5584 __ xorp(rsi, rsi);
5585 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5586 RecordSafepointWithRegisters(
5587 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5588 __ StoreToSafepointRegisterSlot(object, rax);
5589 }
5590
5591
DoLoadFieldByIndex(LLoadFieldByIndex * instr)5592 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5593 class DeferredLoadMutableDouble final : public LDeferredCode {
5594 public:
5595 DeferredLoadMutableDouble(LCodeGen* codegen,
5596 LLoadFieldByIndex* instr,
5597 Register object,
5598 Register index)
5599 : LDeferredCode(codegen),
5600 instr_(instr),
5601 object_(object),
5602 index_(index) {
5603 }
5604 void Generate() override {
5605 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5606 }
5607 LInstruction* instr() override { return instr_; }
5608
5609 private:
5610 LLoadFieldByIndex* instr_;
5611 Register object_;
5612 Register index_;
5613 };
5614
5615 Register object = ToRegister(instr->object());
5616 Register index = ToRegister(instr->index());
5617
5618 DeferredLoadMutableDouble* deferred;
5619 deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
5620
5621 Label out_of_object, done;
5622 __ Move(kScratchRegister, Smi::FromInt(1));
5623 __ testp(index, kScratchRegister);
5624 __ j(not_zero, deferred->entry());
5625
5626 __ sarp(index, Immediate(1));
5627
5628 __ SmiToInteger32(index, index);
5629 __ cmpl(index, Immediate(0));
5630 __ j(less, &out_of_object, Label::kNear);
5631 __ movp(object, FieldOperand(object,
5632 index,
5633 times_pointer_size,
5634 JSObject::kHeaderSize));
5635 __ jmp(&done, Label::kNear);
5636
5637 __ bind(&out_of_object);
5638 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
5639 __ negl(index);
5640 // Index is now equal to out of object property index plus 1.
5641 __ movp(object, FieldOperand(object,
5642 index,
5643 times_pointer_size,
5644 FixedArray::kHeaderSize - kPointerSize));
5645 __ bind(deferred->exit());
5646 __ bind(&done);
5647 }
5648
5649
DoStoreFrameContext(LStoreFrameContext * instr)5650 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5651 Register context = ToRegister(instr->context());
5652 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
5653 }
5654
5655
DoAllocateBlockContext(LAllocateBlockContext * instr)5656 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5657 Handle<ScopeInfo> scope_info = instr->scope_info();
5658 __ Push(scope_info);
5659 __ Push(ToRegister(instr->function()));
5660 CallRuntime(Runtime::kPushBlockContext, instr);
5661 RecordSafepoint(Safepoint::kNoLazyDeopt);
5662 }
5663
5664
5665 #undef __
5666
5667 } // namespace internal
5668 } // namespace v8
5669
5670 #endif // V8_TARGET_ARCH_X64
5671