1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/compilation-info.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/osr.h"
12 #include "src/frames.h"
13 #include "src/ia32/assembler-ia32.h"
14 #include "src/ia32/frames-ia32.h"
15 #include "src/ia32/macro-assembler-ia32.h"
16
17 namespace v8 {
18 namespace internal {
19 namespace compiler {
20
21 #define __ masm()->
22
23
24 #define kScratchDoubleReg xmm0
25
26
27 // Adds IA-32 specific methods for decoding operands.
28 class IA32OperandConverter : public InstructionOperandConverter {
29 public:
IA32OperandConverter(CodeGenerator * gen,Instruction * instr)30 IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
31 : InstructionOperandConverter(gen, instr) {}
32
InputOperand(size_t index,int extra=0)33 Operand InputOperand(size_t index, int extra = 0) {
34 return ToOperand(instr_->InputAt(index), extra);
35 }
36
InputImmediate(size_t index)37 Immediate InputImmediate(size_t index) {
38 return ToImmediate(instr_->InputAt(index));
39 }
40
OutputOperand()41 Operand OutputOperand() { return ToOperand(instr_->Output()); }
42
ToOperand(InstructionOperand * op,int extra=0)43 Operand ToOperand(InstructionOperand* op, int extra = 0) {
44 if (op->IsRegister()) {
45 DCHECK(extra == 0);
46 return Operand(ToRegister(op));
47 } else if (op->IsFPRegister()) {
48 DCHECK(extra == 0);
49 return Operand(ToDoubleRegister(op));
50 }
51 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
52 return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
53 }
54
SlotToOperand(int slot,int extra=0)55 Operand SlotToOperand(int slot, int extra = 0) {
56 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
57 return Operand(offset.from_stack_pointer() ? esp : ebp,
58 offset.offset() + extra);
59 }
60
HighOperand(InstructionOperand * op)61 Operand HighOperand(InstructionOperand* op) {
62 DCHECK(op->IsFPStackSlot());
63 return ToOperand(op, kPointerSize);
64 }
65
ToImmediate(InstructionOperand * operand)66 Immediate ToImmediate(InstructionOperand* operand) {
67 Constant constant = ToConstant(operand);
68 if (constant.type() == Constant::kInt32 &&
69 (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
70 constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
71 constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
72 return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
73 constant.rmode());
74 }
75 switch (constant.type()) {
76 case Constant::kInt32:
77 return Immediate(constant.ToInt32());
78 case Constant::kFloat32:
79 return Immediate(
80 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
81 case Constant::kFloat64:
82 return Immediate(
83 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
84 case Constant::kExternalReference:
85 return Immediate(constant.ToExternalReference());
86 case Constant::kHeapObject:
87 return Immediate(constant.ToHeapObject());
88 case Constant::kInt64:
89 break;
90 case Constant::kRpoNumber:
91 return Immediate::CodeRelativeOffset(ToLabel(operand));
92 }
93 UNREACHABLE();
94 return Immediate(-1);
95 }
96
NextOffset(size_t * offset)97 static size_t NextOffset(size_t* offset) {
98 size_t i = *offset;
99 (*offset)++;
100 return i;
101 }
102
ScaleFor(AddressingMode one,AddressingMode mode)103 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
104 STATIC_ASSERT(0 == static_cast<int>(times_1));
105 STATIC_ASSERT(1 == static_cast<int>(times_2));
106 STATIC_ASSERT(2 == static_cast<int>(times_4));
107 STATIC_ASSERT(3 == static_cast<int>(times_8));
108 int scale = static_cast<int>(mode - one);
109 DCHECK(scale >= 0 && scale < 4);
110 return static_cast<ScaleFactor>(scale);
111 }
112
MemoryOperand(size_t * offset)113 Operand MemoryOperand(size_t* offset) {
114 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
115 switch (mode) {
116 case kMode_MR: {
117 Register base = InputRegister(NextOffset(offset));
118 int32_t disp = 0;
119 return Operand(base, disp);
120 }
121 case kMode_MRI: {
122 Register base = InputRegister(NextOffset(offset));
123 Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
124 return Operand(base, ctant.ToInt32(), ctant.rmode());
125 }
126 case kMode_MR1:
127 case kMode_MR2:
128 case kMode_MR4:
129 case kMode_MR8: {
130 Register base = InputRegister(NextOffset(offset));
131 Register index = InputRegister(NextOffset(offset));
132 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
133 int32_t disp = 0;
134 return Operand(base, index, scale, disp);
135 }
136 case kMode_MR1I:
137 case kMode_MR2I:
138 case kMode_MR4I:
139 case kMode_MR8I: {
140 Register base = InputRegister(NextOffset(offset));
141 Register index = InputRegister(NextOffset(offset));
142 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
143 Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
144 return Operand(base, index, scale, ctant.ToInt32(), ctant.rmode());
145 }
146 case kMode_M1:
147 case kMode_M2:
148 case kMode_M4:
149 case kMode_M8: {
150 Register index = InputRegister(NextOffset(offset));
151 ScaleFactor scale = ScaleFor(kMode_M1, mode);
152 int32_t disp = 0;
153 return Operand(index, scale, disp);
154 }
155 case kMode_M1I:
156 case kMode_M2I:
157 case kMode_M4I:
158 case kMode_M8I: {
159 Register index = InputRegister(NextOffset(offset));
160 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
161 Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
162 return Operand(index, scale, ctant.ToInt32(), ctant.rmode());
163 }
164 case kMode_MI: {
165 Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
166 return Operand(ctant.ToInt32(), ctant.rmode());
167 }
168 case kMode_None:
169 UNREACHABLE();
170 return Operand(no_reg, 0);
171 }
172 UNREACHABLE();
173 return Operand(no_reg, 0);
174 }
175
MemoryOperand(size_t first_input=0)176 Operand MemoryOperand(size_t first_input = 0) {
177 return MemoryOperand(&first_input);
178 }
179 };
180
181
182 namespace {
183
HasImmediateInput(Instruction * instr,size_t index)184 bool HasImmediateInput(Instruction* instr, size_t index) {
185 return instr->InputAt(index)->IsImmediate();
186 }
187
188
189 class OutOfLineLoadInteger final : public OutOfLineCode {
190 public:
OutOfLineLoadInteger(CodeGenerator * gen,Register result)191 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
192 : OutOfLineCode(gen), result_(result) {}
193
Generate()194 void Generate() final { __ xor_(result_, result_); }
195
196 private:
197 Register const result_;
198 };
199
200 class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
201 public:
OutOfLineLoadFloat32NaN(CodeGenerator * gen,XMMRegister result)202 OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
203 : OutOfLineCode(gen), result_(result) {}
204
Generate()205 void Generate() final {
206 __ xorps(result_, result_);
207 __ divss(result_, result_);
208 }
209
210 private:
211 XMMRegister const result_;
212 };
213
214 class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
215 public:
OutOfLineLoadFloat64NaN(CodeGenerator * gen,XMMRegister result)216 OutOfLineLoadFloat64NaN(CodeGenerator* gen, XMMRegister result)
217 : OutOfLineCode(gen), result_(result) {}
218
Generate()219 void Generate() final {
220 __ xorpd(result_, result_);
221 __ divsd(result_, result_);
222 }
223
224 private:
225 XMMRegister const result_;
226 };
227
228 class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
229 public:
OutOfLineTruncateDoubleToI(CodeGenerator * gen,Register result,XMMRegister input)230 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
231 XMMRegister input)
232 : OutOfLineCode(gen), result_(result), input_(input) {}
233
Generate()234 void Generate() final {
235 __ sub(esp, Immediate(kDoubleSize));
236 __ movsd(MemOperand(esp, 0), input_);
237 __ SlowTruncateToI(result_, esp, 0);
238 __ add(esp, Immediate(kDoubleSize));
239 }
240
241 private:
242 Register const result_;
243 XMMRegister const input_;
244 };
245
246
247 class OutOfLineRecordWrite final : public OutOfLineCode {
248 public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Operand operand,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)249 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
250 Register value, Register scratch0, Register scratch1,
251 RecordWriteMode mode)
252 : OutOfLineCode(gen),
253 object_(object),
254 operand_(operand),
255 value_(value),
256 scratch0_(scratch0),
257 scratch1_(scratch1),
258 mode_(mode) {}
259
Generate()260 void Generate() final {
261 if (mode_ > RecordWriteMode::kValueIsPointer) {
262 __ JumpIfSmi(value_, exit());
263 }
264 __ CheckPageFlag(value_, scratch0_,
265 MemoryChunk::kPointersToHereAreInterestingMask, zero,
266 exit());
267 RememberedSetAction const remembered_set_action =
268 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
269 : OMIT_REMEMBERED_SET;
270 SaveFPRegsMode const save_fp_mode =
271 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
272 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
273 remembered_set_action, save_fp_mode);
274 __ lea(scratch1_, operand_);
275 __ CallStub(&stub);
276 }
277
278 private:
279 Register const object_;
280 Operand const operand_;
281 Register const value_;
282 Register const scratch0_;
283 Register const scratch1_;
284 RecordWriteMode const mode_;
285 };
286
287 } // namespace
288
289 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
290 do { \
291 auto result = i.OutputDoubleRegister(); \
292 auto offset = i.InputRegister(0); \
293 if (instr->InputAt(1)->IsRegister()) { \
294 __ cmp(offset, i.InputRegister(1)); \
295 } else { \
296 __ cmp(offset, i.InputImmediate(1)); \
297 } \
298 OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
299 __ j(above_equal, ool->entry()); \
300 __ asm_instr(result, i.MemoryOperand(2)); \
301 __ bind(ool->exit()); \
302 } while (false)
303
304 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
305 do { \
306 auto result = i.OutputRegister(); \
307 auto offset = i.InputRegister(0); \
308 if (instr->InputAt(1)->IsRegister()) { \
309 __ cmp(offset, i.InputRegister(1)); \
310 } else { \
311 __ cmp(offset, i.InputImmediate(1)); \
312 } \
313 OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
314 __ j(above_equal, ool->entry()); \
315 __ asm_instr(result, i.MemoryOperand(2)); \
316 __ bind(ool->exit()); \
317 } while (false)
318
319
320 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
321 do { \
322 auto offset = i.InputRegister(0); \
323 if (instr->InputAt(1)->IsRegister()) { \
324 __ cmp(offset, i.InputRegister(1)); \
325 } else { \
326 __ cmp(offset, i.InputImmediate(1)); \
327 } \
328 Label done; \
329 __ j(above_equal, &done, Label::kNear); \
330 __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
331 __ bind(&done); \
332 } while (false)
333
334
335 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
336 do { \
337 auto offset = i.InputRegister(0); \
338 if (instr->InputAt(1)->IsRegister()) { \
339 __ cmp(offset, i.InputRegister(1)); \
340 } else { \
341 __ cmp(offset, i.InputImmediate(1)); \
342 } \
343 Label done; \
344 __ j(above_equal, &done, Label::kNear); \
345 if (instr->InputAt(2)->IsRegister()) { \
346 __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
347 } else { \
348 __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
349 } \
350 __ bind(&done); \
351 } while (false)
352
353 #define ASSEMBLE_COMPARE(asm_instr) \
354 do { \
355 if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
356 size_t index = 0; \
357 Operand left = i.MemoryOperand(&index); \
358 if (HasImmediateInput(instr, index)) { \
359 __ asm_instr(left, i.InputImmediate(index)); \
360 } else { \
361 __ asm_instr(left, i.InputRegister(index)); \
362 } \
363 } else { \
364 if (HasImmediateInput(instr, 1)) { \
365 if (instr->InputAt(0)->IsRegister()) { \
366 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
367 } else { \
368 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
369 } \
370 } else { \
371 if (instr->InputAt(1)->IsRegister()) { \
372 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
373 } else { \
374 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
375 } \
376 } \
377 } \
378 } while (0)
379
380 #define ASSEMBLE_IEEE754_BINOP(name) \
381 do { \
382 /* Pass two doubles as arguments on the stack. */ \
383 __ PrepareCallCFunction(4, eax); \
384 __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
385 __ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1)); \
386 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
387 4); \
388 /* Return value is in st(0) on ia32. */ \
389 /* Store it into the result register. */ \
390 __ sub(esp, Immediate(kDoubleSize)); \
391 __ fstp_d(Operand(esp, 0)); \
392 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
393 __ add(esp, Immediate(kDoubleSize)); \
394 } while (false)
395
396 #define ASSEMBLE_IEEE754_UNOP(name) \
397 do { \
398 /* Pass one double as argument on the stack. */ \
399 __ PrepareCallCFunction(2, eax); \
400 __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
401 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
402 2); \
403 /* Return value is in st(0) on ia32. */ \
404 /* Store it into the result register. */ \
405 __ sub(esp, Immediate(kDoubleSize)); \
406 __ fstp_d(Operand(esp, 0)); \
407 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
408 __ add(esp, Immediate(kDoubleSize)); \
409 } while (false)
410
AssembleDeconstructFrame()411 void CodeGenerator::AssembleDeconstructFrame() {
412 __ mov(esp, ebp);
413 __ pop(ebp);
414 }
415
AssemblePrepareTailCall()416 void CodeGenerator::AssemblePrepareTailCall() {
417 if (frame_access_state()->has_frame()) {
418 __ mov(ebp, MemOperand(ebp, 0));
419 }
420 frame_access_state()->SetFrameAccessToSP();
421 }
422
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register,Register,Register)423 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
424 Register, Register,
425 Register) {
426 // There are not enough temp registers left on ia32 for a call instruction
427 // so we pick some scratch registers and save/restore them manually here.
428 int scratch_count = 3;
429 Register scratch1 = ebx;
430 Register scratch2 = ecx;
431 Register scratch3 = edx;
432 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
433 Label done;
434
435 // Check if current frame is an arguments adaptor frame.
436 __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
437 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
438 __ j(not_equal, &done, Label::kNear);
439
440 __ push(scratch1);
441 __ push(scratch2);
442 __ push(scratch3);
443
444 // Load arguments count from current arguments adaptor frame (note, it
445 // does not include receiver).
446 Register caller_args_count_reg = scratch1;
447 __ mov(caller_args_count_reg,
448 Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
449 __ SmiUntag(caller_args_count_reg);
450
451 ParameterCount callee_args_count(args_reg);
452 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
453 scratch3, ReturnAddressState::kOnStack, scratch_count);
454 __ pop(scratch3);
455 __ pop(scratch2);
456 __ pop(scratch1);
457
458 __ bind(&done);
459 }
460
461 namespace {
462
AdjustStackPointerForTailCall(MacroAssembler * masm,FrameAccessState * state,int new_slot_above_sp,bool allow_shrinkage=true)463 void AdjustStackPointerForTailCall(MacroAssembler* masm,
464 FrameAccessState* state,
465 int new_slot_above_sp,
466 bool allow_shrinkage = true) {
467 int current_sp_offset = state->GetSPToFPSlotCount() +
468 StandardFrameConstants::kFixedSlotCountAboveFp;
469 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
470 if (stack_slot_delta > 0) {
471 masm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
472 state->IncreaseSPDelta(stack_slot_delta);
473 } else if (allow_shrinkage && stack_slot_delta < 0) {
474 masm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
475 state->IncreaseSPDelta(stack_slot_delta);
476 }
477 }
478
479 } // namespace
480
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)481 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
482 int first_unused_stack_slot) {
483 CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
484 ZoneVector<MoveOperands*> pushes(zone());
485 GetPushCompatibleMoves(instr, flags, &pushes);
486
487 if (!pushes.empty() &&
488 (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
489 first_unused_stack_slot)) {
490 IA32OperandConverter g(this, instr);
491 for (auto move : pushes) {
492 LocationOperand destination_location(
493 LocationOperand::cast(move->destination()));
494 InstructionOperand source(move->source());
495 AdjustStackPointerForTailCall(masm(), frame_access_state(),
496 destination_location.index());
497 if (source.IsStackSlot()) {
498 LocationOperand source_location(LocationOperand::cast(source));
499 __ push(g.SlotToOperand(source_location.index()));
500 } else if (source.IsRegister()) {
501 LocationOperand source_location(LocationOperand::cast(source));
502 __ push(source_location.GetRegister());
503 } else if (source.IsImmediate()) {
504 __ push(Immediate(ImmediateOperand::cast(source).inline_value()));
505 } else {
506 // Pushes of non-scalar data types is not supported.
507 UNIMPLEMENTED();
508 }
509 frame_access_state()->IncreaseSPDelta(1);
510 move->Eliminate();
511 }
512 }
513 AdjustStackPointerForTailCall(masm(), frame_access_state(),
514 first_unused_stack_slot, false);
515 }
516
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)517 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
518 int first_unused_stack_slot) {
519 AdjustStackPointerForTailCall(masm(), frame_access_state(),
520 first_unused_stack_slot);
521 }
522
523 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)524 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
525 Instruction* instr) {
526 IA32OperandConverter i(this, instr);
527 InstructionCode opcode = instr->opcode();
528 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
529 switch (arch_opcode) {
530 case kArchCallCodeObject: {
531 EnsureSpaceForLazyDeopt();
532 if (HasImmediateInput(instr, 0)) {
533 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
534 __ call(code, RelocInfo::CODE_TARGET);
535 } else {
536 Register reg = i.InputRegister(0);
537 __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
538 __ call(reg);
539 }
540 RecordCallPosition(instr);
541 frame_access_state()->ClearSPDelta();
542 break;
543 }
544 case kArchTailCallCodeObjectFromJSFunction:
545 case kArchTailCallCodeObject: {
546 if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
547 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
548 no_reg, no_reg, no_reg);
549 }
550 if (HasImmediateInput(instr, 0)) {
551 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
552 __ jmp(code, RelocInfo::CODE_TARGET);
553 } else {
554 Register reg = i.InputRegister(0);
555 __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
556 __ jmp(reg);
557 }
558 frame_access_state()->ClearSPDelta();
559 frame_access_state()->SetFrameAccessToDefault();
560 break;
561 }
562 case kArchTailCallAddress: {
563 CHECK(!HasImmediateInput(instr, 0));
564 Register reg = i.InputRegister(0);
565 __ jmp(reg);
566 frame_access_state()->ClearSPDelta();
567 frame_access_state()->SetFrameAccessToDefault();
568 break;
569 }
570 case kArchCallJSFunction: {
571 EnsureSpaceForLazyDeopt();
572 Register func = i.InputRegister(0);
573 if (FLAG_debug_code) {
574 // Check the function's context matches the context argument.
575 __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
576 __ Assert(equal, kWrongFunctionContext);
577 }
578 __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
579 RecordCallPosition(instr);
580 frame_access_state()->ClearSPDelta();
581 break;
582 }
583 case kArchTailCallJSFunctionFromJSFunction: {
584 Register func = i.InputRegister(0);
585 if (FLAG_debug_code) {
586 // Check the function's context matches the context argument.
587 __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
588 __ Assert(equal, kWrongFunctionContext);
589 }
590 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, no_reg,
591 no_reg, no_reg);
592 __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
593 frame_access_state()->ClearSPDelta();
594 frame_access_state()->SetFrameAccessToDefault();
595 break;
596 }
597 case kArchPrepareCallCFunction: {
598 // Frame alignment requires using FP-relative frame addressing.
599 frame_access_state()->SetFrameAccessToFP();
600 int const num_parameters = MiscField::decode(instr->opcode());
601 __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
602 break;
603 }
604 case kArchPrepareTailCall:
605 AssemblePrepareTailCall();
606 break;
607 case kArchCallCFunction: {
608 int const num_parameters = MiscField::decode(instr->opcode());
609 if (HasImmediateInput(instr, 0)) {
610 ExternalReference ref = i.InputExternalReference(0);
611 __ CallCFunction(ref, num_parameters);
612 } else {
613 Register func = i.InputRegister(0);
614 __ CallCFunction(func, num_parameters);
615 }
616 frame_access_state()->SetFrameAccessToDefault();
617 frame_access_state()->ClearSPDelta();
618 break;
619 }
620 case kArchJmp:
621 AssembleArchJump(i.InputRpo(0));
622 break;
623 case kArchLookupSwitch:
624 AssembleArchLookupSwitch(instr);
625 break;
626 case kArchTableSwitch:
627 AssembleArchTableSwitch(instr);
628 break;
629 case kArchComment: {
630 Address comment_string = i.InputExternalReference(0).address();
631 __ RecordComment(reinterpret_cast<const char*>(comment_string));
632 break;
633 }
634 case kArchDebugBreak:
635 __ int3();
636 break;
637 case kArchNop:
638 case kArchThrowTerminator:
639 // don't emit code for nops.
640 break;
641 case kArchDeoptimize: {
642 int deopt_state_id =
643 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
644 Deoptimizer::BailoutType bailout_type =
645 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
646 CodeGenResult result = AssembleDeoptimizerCall(
647 deopt_state_id, bailout_type, current_source_position_);
648 if (result != kSuccess) return result;
649 break;
650 }
651 case kArchRet:
652 AssembleReturn(instr->InputAt(0));
653 break;
654 case kArchStackPointer:
655 __ mov(i.OutputRegister(), esp);
656 break;
657 case kArchFramePointer:
658 __ mov(i.OutputRegister(), ebp);
659 break;
660 case kArchParentFramePointer:
661 if (frame_access_state()->has_frame()) {
662 __ mov(i.OutputRegister(), Operand(ebp, 0));
663 } else {
664 __ mov(i.OutputRegister(), ebp);
665 }
666 break;
667 case kArchTruncateDoubleToI: {
668 auto result = i.OutputRegister();
669 auto input = i.InputDoubleRegister(0);
670 auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
671 __ cvttsd2si(result, Operand(input));
672 __ cmp(result, 1);
673 __ j(overflow, ool->entry());
674 __ bind(ool->exit());
675 break;
676 }
677 case kArchStoreWithWriteBarrier: {
678 RecordWriteMode mode =
679 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
680 Register object = i.InputRegister(0);
681 size_t index = 0;
682 Operand operand = i.MemoryOperand(&index);
683 Register value = i.InputRegister(index);
684 Register scratch0 = i.TempRegister(0);
685 Register scratch1 = i.TempRegister(1);
686 auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
687 scratch0, scratch1, mode);
688 __ mov(operand, value);
689 __ CheckPageFlag(object, scratch0,
690 MemoryChunk::kPointersFromHereAreInterestingMask,
691 not_zero, ool->entry());
692 __ bind(ool->exit());
693 break;
694 }
695 case kArchStackSlot: {
696 FrameOffset offset =
697 frame_access_state()->GetFrameOffset(i.InputInt32(0));
698 Register base;
699 if (offset.from_stack_pointer()) {
700 base = esp;
701 } else {
702 base = ebp;
703 }
704 __ lea(i.OutputRegister(), Operand(base, offset.offset()));
705 break;
706 }
707 case kIeee754Float64Acos:
708 ASSEMBLE_IEEE754_UNOP(acos);
709 break;
710 case kIeee754Float64Acosh:
711 ASSEMBLE_IEEE754_UNOP(acosh);
712 break;
713 case kIeee754Float64Asin:
714 ASSEMBLE_IEEE754_UNOP(asin);
715 break;
716 case kIeee754Float64Asinh:
717 ASSEMBLE_IEEE754_UNOP(asinh);
718 break;
719 case kIeee754Float64Atan:
720 ASSEMBLE_IEEE754_UNOP(atan);
721 break;
722 case kIeee754Float64Atanh:
723 ASSEMBLE_IEEE754_UNOP(atanh);
724 break;
725 case kIeee754Float64Atan2:
726 ASSEMBLE_IEEE754_BINOP(atan2);
727 break;
728 case kIeee754Float64Cbrt:
729 ASSEMBLE_IEEE754_UNOP(cbrt);
730 break;
731 case kIeee754Float64Cos:
732 ASSEMBLE_IEEE754_UNOP(cos);
733 break;
734 case kIeee754Float64Cosh:
735 ASSEMBLE_IEEE754_UNOP(cosh);
736 break;
737 case kIeee754Float64Expm1:
738 ASSEMBLE_IEEE754_UNOP(expm1);
739 break;
740 case kIeee754Float64Exp:
741 ASSEMBLE_IEEE754_UNOP(exp);
742 break;
743 case kIeee754Float64Log:
744 ASSEMBLE_IEEE754_UNOP(log);
745 break;
746 case kIeee754Float64Log1p:
747 ASSEMBLE_IEEE754_UNOP(log1p);
748 break;
749 case kIeee754Float64Log2:
750 ASSEMBLE_IEEE754_UNOP(log2);
751 break;
752 case kIeee754Float64Log10:
753 ASSEMBLE_IEEE754_UNOP(log10);
754 break;
755 case kIeee754Float64Pow: {
756 // TODO(bmeurer): Improve integration of the stub.
757 if (!i.InputDoubleRegister(1).is(xmm2)) {
758 __ movaps(xmm2, i.InputDoubleRegister(0));
759 __ movaps(xmm1, i.InputDoubleRegister(1));
760 } else {
761 __ movaps(xmm0, i.InputDoubleRegister(0));
762 __ movaps(xmm1, xmm2);
763 __ movaps(xmm2, xmm0);
764 }
765 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
766 __ CallStub(&stub);
767 __ movaps(i.OutputDoubleRegister(), xmm3);
768 break;
769 }
770 case kIeee754Float64Sin:
771 ASSEMBLE_IEEE754_UNOP(sin);
772 break;
773 case kIeee754Float64Sinh:
774 ASSEMBLE_IEEE754_UNOP(sinh);
775 break;
776 case kIeee754Float64Tan:
777 ASSEMBLE_IEEE754_UNOP(tan);
778 break;
779 case kIeee754Float64Tanh:
780 ASSEMBLE_IEEE754_UNOP(tanh);
781 break;
782 case kIA32Add:
783 if (HasImmediateInput(instr, 1)) {
784 __ add(i.InputOperand(0), i.InputImmediate(1));
785 } else {
786 __ add(i.InputRegister(0), i.InputOperand(1));
787 }
788 break;
789 case kIA32And:
790 if (HasImmediateInput(instr, 1)) {
791 __ and_(i.InputOperand(0), i.InputImmediate(1));
792 } else {
793 __ and_(i.InputRegister(0), i.InputOperand(1));
794 }
795 break;
796 case kIA32Cmp:
797 ASSEMBLE_COMPARE(cmp);
798 break;
799 case kIA32Cmp16:
800 ASSEMBLE_COMPARE(cmpw);
801 break;
802 case kIA32Cmp8:
803 ASSEMBLE_COMPARE(cmpb);
804 break;
805 case kIA32Test:
806 ASSEMBLE_COMPARE(test);
807 break;
808 case kIA32Test16:
809 ASSEMBLE_COMPARE(test_w);
810 break;
811 case kIA32Test8:
812 ASSEMBLE_COMPARE(test_b);
813 break;
814 case kIA32Imul:
815 if (HasImmediateInput(instr, 1)) {
816 __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
817 } else {
818 __ imul(i.OutputRegister(), i.InputOperand(1));
819 }
820 break;
821 case kIA32ImulHigh:
822 __ imul(i.InputRegister(1));
823 break;
824 case kIA32UmulHigh:
825 __ mul(i.InputRegister(1));
826 break;
827 case kIA32Idiv:
828 __ cdq();
829 __ idiv(i.InputOperand(1));
830 break;
831 case kIA32Udiv:
832 __ Move(edx, Immediate(0));
833 __ div(i.InputOperand(1));
834 break;
835 case kIA32Not:
836 __ not_(i.OutputOperand());
837 break;
838 case kIA32Neg:
839 __ neg(i.OutputOperand());
840 break;
841 case kIA32Or:
842 if (HasImmediateInput(instr, 1)) {
843 __ or_(i.InputOperand(0), i.InputImmediate(1));
844 } else {
845 __ or_(i.InputRegister(0), i.InputOperand(1));
846 }
847 break;
848 case kIA32Xor:
849 if (HasImmediateInput(instr, 1)) {
850 __ xor_(i.InputOperand(0), i.InputImmediate(1));
851 } else {
852 __ xor_(i.InputRegister(0), i.InputOperand(1));
853 }
854 break;
855 case kIA32Sub:
856 if (HasImmediateInput(instr, 1)) {
857 __ sub(i.InputOperand(0), i.InputImmediate(1));
858 } else {
859 __ sub(i.InputRegister(0), i.InputOperand(1));
860 }
861 break;
862 case kIA32Shl:
863 if (HasImmediateInput(instr, 1)) {
864 __ shl(i.OutputOperand(), i.InputInt5(1));
865 } else {
866 __ shl_cl(i.OutputOperand());
867 }
868 break;
869 case kIA32Shr:
870 if (HasImmediateInput(instr, 1)) {
871 __ shr(i.OutputOperand(), i.InputInt5(1));
872 } else {
873 __ shr_cl(i.OutputOperand());
874 }
875 break;
876 case kIA32Sar:
877 if (HasImmediateInput(instr, 1)) {
878 __ sar(i.OutputOperand(), i.InputInt5(1));
879 } else {
880 __ sar_cl(i.OutputOperand());
881 }
882 break;
883 case kIA32AddPair: {
884 // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
885 // i.InputRegister(1) ... left high word.
886 // i.InputRegister(2) ... right low word.
887 // i.InputRegister(3) ... right high word.
888 bool use_temp = false;
889 if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
890 i.OutputRegister(0).code() == i.InputRegister(3).code()) {
891 // We cannot write to the output register directly, because it would
892 // overwrite an input for adc. We have to use the temp register.
893 use_temp = true;
894 __ Move(i.TempRegister(0), i.InputRegister(0));
895 __ add(i.TempRegister(0), i.InputRegister(2));
896 } else {
897 __ add(i.OutputRegister(0), i.InputRegister(2));
898 }
899 __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
900 if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
901 __ Move(i.OutputRegister(1), i.InputRegister(1));
902 }
903 if (use_temp) {
904 __ Move(i.OutputRegister(0), i.TempRegister(0));
905 }
906 break;
907 }
908 case kIA32SubPair: {
909 // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
910 // i.InputRegister(1) ... left high word.
911 // i.InputRegister(2) ... right low word.
912 // i.InputRegister(3) ... right high word.
913 bool use_temp = false;
914 if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
915 i.OutputRegister(0).code() == i.InputRegister(3).code()) {
916 // We cannot write to the output register directly, because it would
917 // overwrite an input for adc. We have to use the temp register.
918 use_temp = true;
919 __ Move(i.TempRegister(0), i.InputRegister(0));
920 __ sub(i.TempRegister(0), i.InputRegister(2));
921 } else {
922 __ sub(i.OutputRegister(0), i.InputRegister(2));
923 }
924 __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
925 if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
926 __ Move(i.OutputRegister(1), i.InputRegister(1));
927 }
928 if (use_temp) {
929 __ Move(i.OutputRegister(0), i.TempRegister(0));
930 }
931 break;
932 }
933 case kIA32MulPair: {
934 __ imul(i.OutputRegister(1), i.InputOperand(0));
935 __ mov(i.TempRegister(0), i.InputOperand(1));
936 __ imul(i.TempRegister(0), i.InputOperand(2));
937 __ add(i.OutputRegister(1), i.TempRegister(0));
938 __ mov(i.OutputRegister(0), i.InputOperand(0));
939 // Multiplies the low words and stores them in eax and edx.
940 __ mul(i.InputRegister(2));
941 __ add(i.OutputRegister(1), i.TempRegister(0));
942
943 break;
944 }
945 case kIA32ShlPair:
946 if (HasImmediateInput(instr, 2)) {
947 __ ShlPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
948 } else {
949 // Shift has been loaded into CL by the register allocator.
950 __ ShlPair_cl(i.InputRegister(1), i.InputRegister(0));
951 }
952 break;
953 case kIA32ShrPair:
954 if (HasImmediateInput(instr, 2)) {
955 __ ShrPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
956 } else {
957 // Shift has been loaded into CL by the register allocator.
958 __ ShrPair_cl(i.InputRegister(1), i.InputRegister(0));
959 }
960 break;
961 case kIA32SarPair:
962 if (HasImmediateInput(instr, 2)) {
963 __ SarPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
964 } else {
965 // Shift has been loaded into CL by the register allocator.
966 __ SarPair_cl(i.InputRegister(1), i.InputRegister(0));
967 }
968 break;
969 case kIA32Ror:
970 if (HasImmediateInput(instr, 1)) {
971 __ ror(i.OutputOperand(), i.InputInt5(1));
972 } else {
973 __ ror_cl(i.OutputOperand());
974 }
975 break;
976 case kIA32Lzcnt:
977 __ Lzcnt(i.OutputRegister(), i.InputOperand(0));
978 break;
979 case kIA32Tzcnt:
980 __ Tzcnt(i.OutputRegister(), i.InputOperand(0));
981 break;
982 case kIA32Popcnt:
983 __ Popcnt(i.OutputRegister(), i.InputOperand(0));
984 break;
985 case kSSEFloat32Cmp:
986 __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
987 break;
988 case kSSEFloat32Add:
989 __ addss(i.InputDoubleRegister(0), i.InputOperand(1));
990 break;
991 case kSSEFloat32Sub:
992 __ subss(i.InputDoubleRegister(0), i.InputOperand(1));
993 break;
994 case kSSEFloat32Mul:
995 __ mulss(i.InputDoubleRegister(0), i.InputOperand(1));
996 break;
997 case kSSEFloat32Div:
998 __ divss(i.InputDoubleRegister(0), i.InputOperand(1));
999 // Don't delete this mov. It may improve performance on some CPUs,
1000 // when there is a (v)mulss depending on the result.
1001 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1002 break;
1003 case kSSEFloat32Sqrt:
1004 __ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
1005 break;
1006 case kSSEFloat32Abs: {
1007 // TODO(bmeurer): Use 128-bit constants.
1008 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1009 __ psrlq(kScratchDoubleReg, 33);
1010 __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
1011 break;
1012 }
1013 case kSSEFloat32Neg: {
1014 // TODO(bmeurer): Use 128-bit constants.
1015 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1016 __ psllq(kScratchDoubleReg, 31);
1017 __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
1018 break;
1019 }
1020 case kSSEFloat32Round: {
1021 CpuFeatureScope sse_scope(masm(), SSE4_1);
1022 RoundingMode const mode =
1023 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
1024 __ roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
1025 break;
1026 }
1027 case kSSEFloat64Cmp:
1028 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
1029 break;
1030 case kSSEFloat64Add:
1031 __ addsd(i.InputDoubleRegister(0), i.InputOperand(1));
1032 break;
1033 case kSSEFloat64Sub:
1034 __ subsd(i.InputDoubleRegister(0), i.InputOperand(1));
1035 break;
1036 case kSSEFloat64Mul:
1037 __ mulsd(i.InputDoubleRegister(0), i.InputOperand(1));
1038 break;
1039 case kSSEFloat64Div:
1040 __ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
1041 // Don't delete this mov. It may improve performance on some CPUs,
1042 // when there is a (v)mulsd depending on the result.
1043 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1044 break;
1045 case kSSEFloat32Max: {
1046 Label compare_nan, compare_swap, done_compare;
1047 if (instr->InputAt(1)->IsFPRegister()) {
1048 __ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1049 } else {
1050 __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
1051 }
1052 auto ool =
1053 new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
1054 __ j(parity_even, ool->entry());
1055 __ j(above, &done_compare, Label::kNear);
1056 __ j(below, &compare_swap, Label::kNear);
1057 __ movmskps(i.TempRegister(0), i.InputDoubleRegister(0));
1058 __ test(i.TempRegister(0), Immediate(1));
1059 __ j(zero, &done_compare, Label::kNear);
1060 __ bind(&compare_swap);
1061 if (instr->InputAt(1)->IsFPRegister()) {
1062 __ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1063 } else {
1064 __ movss(i.InputDoubleRegister(0), i.InputOperand(1));
1065 }
1066 __ bind(&done_compare);
1067 __ bind(ool->exit());
1068 break;
1069 }
1070
1071 case kSSEFloat64Max: {
1072 Label compare_nan, compare_swap, done_compare;
1073 if (instr->InputAt(1)->IsFPRegister()) {
1074 __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1075 } else {
1076 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
1077 }
1078 auto ool =
1079 new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
1080 __ j(parity_even, ool->entry());
1081 __ j(above, &done_compare, Label::kNear);
1082 __ j(below, &compare_swap, Label::kNear);
1083 __ movmskpd(i.TempRegister(0), i.InputDoubleRegister(0));
1084 __ test(i.TempRegister(0), Immediate(1));
1085 __ j(zero, &done_compare, Label::kNear);
1086 __ bind(&compare_swap);
1087 if (instr->InputAt(1)->IsFPRegister()) {
1088 __ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1089 } else {
1090 __ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
1091 }
1092 __ bind(&done_compare);
1093 __ bind(ool->exit());
1094 break;
1095 }
1096 case kSSEFloat32Min: {
1097 Label compare_swap, done_compare;
1098 if (instr->InputAt(1)->IsFPRegister()) {
1099 __ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1100 } else {
1101 __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
1102 }
1103 auto ool =
1104 new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
1105 __ j(parity_even, ool->entry());
1106 __ j(below, &done_compare, Label::kNear);
1107 __ j(above, &compare_swap, Label::kNear);
1108 if (instr->InputAt(1)->IsFPRegister()) {
1109 __ movmskps(i.TempRegister(0), i.InputDoubleRegister(1));
1110 } else {
1111 __ movss(kScratchDoubleReg, i.InputOperand(1));
1112 __ movmskps(i.TempRegister(0), kScratchDoubleReg);
1113 }
1114 __ test(i.TempRegister(0), Immediate(1));
1115 __ j(zero, &done_compare, Label::kNear);
1116 __ bind(&compare_swap);
1117 if (instr->InputAt(1)->IsFPRegister()) {
1118 __ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1119 } else {
1120 __ movss(i.InputDoubleRegister(0), i.InputOperand(1));
1121 }
1122 __ bind(&done_compare);
1123 __ bind(ool->exit());
1124 break;
1125 }
1126 case kSSEFloat64Min: {
1127 Label compare_swap, done_compare;
1128 if (instr->InputAt(1)->IsFPRegister()) {
1129 __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1130 } else {
1131 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
1132 }
1133 auto ool =
1134 new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
1135 __ j(parity_even, ool->entry());
1136 __ j(below, &done_compare, Label::kNear);
1137 __ j(above, &compare_swap, Label::kNear);
1138 if (instr->InputAt(1)->IsFPRegister()) {
1139 __ movmskpd(i.TempRegister(0), i.InputDoubleRegister(1));
1140 } else {
1141 __ movsd(kScratchDoubleReg, i.InputOperand(1));
1142 __ movmskpd(i.TempRegister(0), kScratchDoubleReg);
1143 }
1144 __ test(i.TempRegister(0), Immediate(1));
1145 __ j(zero, &done_compare, Label::kNear);
1146 __ bind(&compare_swap);
1147 if (instr->InputAt(1)->IsFPRegister()) {
1148 __ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1149 } else {
1150 __ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
1151 }
1152 __ bind(&done_compare);
1153 __ bind(ool->exit());
1154 break;
1155 }
1156 case kSSEFloat64Mod: {
1157 // TODO(dcarney): alignment is wrong.
1158 __ sub(esp, Immediate(kDoubleSize));
1159 // Move values to st(0) and st(1).
1160 __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
1161 __ fld_d(Operand(esp, 0));
1162 __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
1163 __ fld_d(Operand(esp, 0));
1164 // Loop while fprem isn't done.
1165 Label mod_loop;
1166 __ bind(&mod_loop);
1167 // This instructions traps on all kinds inputs, but we are assuming the
1168 // floating point control word is set to ignore them all.
1169 __ fprem();
1170 // The following 2 instruction implicitly use eax.
1171 __ fnstsw_ax();
1172 __ sahf();
1173 __ j(parity_even, &mod_loop);
1174 // Move output to stack and clean up.
1175 __ fstp(1);
1176 __ fstp_d(Operand(esp, 0));
1177 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
1178 __ add(esp, Immediate(kDoubleSize));
1179 break;
1180 }
1181 case kSSEFloat64Abs: {
1182 // TODO(bmeurer): Use 128-bit constants.
1183 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1184 __ psrlq(kScratchDoubleReg, 1);
1185 __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
1186 break;
1187 }
1188 case kSSEFloat64Neg: {
1189 // TODO(bmeurer): Use 128-bit constants.
1190 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1191 __ psllq(kScratchDoubleReg, 63);
1192 __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
1193 break;
1194 }
1195 case kSSEFloat64Sqrt:
1196 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
1197 break;
1198 case kSSEFloat64Round: {
1199 CpuFeatureScope sse_scope(masm(), SSE4_1);
1200 RoundingMode const mode =
1201 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
1202 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
1203 break;
1204 }
1205 case kSSEFloat32ToFloat64:
1206 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
1207 break;
1208 case kSSEFloat64ToFloat32:
1209 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
1210 break;
1211 case kSSEFloat32ToInt32:
1212 __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
1213 break;
1214 case kSSEFloat32ToUint32: {
1215 Label success;
1216 __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
1217 __ test(i.OutputRegister(), i.OutputRegister());
1218 __ j(positive, &success);
1219 __ Move(kScratchDoubleReg, static_cast<float>(INT32_MIN));
1220 __ addss(kScratchDoubleReg, i.InputOperand(0));
1221 __ cvttss2si(i.OutputRegister(), kScratchDoubleReg);
1222 __ or_(i.OutputRegister(), Immediate(0x80000000));
1223 __ bind(&success);
1224 break;
1225 }
1226 case kSSEFloat64ToInt32:
1227 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
1228 break;
1229 case kSSEFloat64ToUint32: {
1230 __ Move(kScratchDoubleReg, -2147483648.0);
1231 __ addsd(kScratchDoubleReg, i.InputOperand(0));
1232 __ cvttsd2si(i.OutputRegister(), kScratchDoubleReg);
1233 __ add(i.OutputRegister(), Immediate(0x80000000));
1234 break;
1235 }
1236 case kSSEInt32ToFloat32:
1237 __ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
1238 break;
1239 case kSSEUint32ToFloat32: {
1240 Register scratch0 = i.TempRegister(0);
1241 Register scratch1 = i.TempRegister(1);
1242 __ mov(scratch0, i.InputOperand(0));
1243 __ Cvtui2ss(i.OutputDoubleRegister(), scratch0, scratch1);
1244 break;
1245 }
1246 case kSSEInt32ToFloat64:
1247 __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
1248 break;
1249 case kSSEUint32ToFloat64:
1250 __ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
1251 break;
1252 case kSSEFloat64ExtractLowWord32:
1253 if (instr->InputAt(0)->IsFPStackSlot()) {
1254 __ mov(i.OutputRegister(), i.InputOperand(0));
1255 } else {
1256 __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
1257 }
1258 break;
1259 case kSSEFloat64ExtractHighWord32:
1260 if (instr->InputAt(0)->IsFPStackSlot()) {
1261 __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
1262 } else {
1263 __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
1264 }
1265 break;
1266 case kSSEFloat64InsertLowWord32:
1267 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
1268 break;
1269 case kSSEFloat64InsertHighWord32:
1270 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
1271 break;
1272 case kSSEFloat64LoadLowWord32:
1273 __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
1274 break;
1275 case kAVXFloat32Add: {
1276 CpuFeatureScope avx_scope(masm(), AVX);
1277 __ vaddss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1278 i.InputOperand(1));
1279 break;
1280 }
1281 case kAVXFloat32Sub: {
1282 CpuFeatureScope avx_scope(masm(), AVX);
1283 __ vsubss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1284 i.InputOperand(1));
1285 break;
1286 }
1287 case kAVXFloat32Mul: {
1288 CpuFeatureScope avx_scope(masm(), AVX);
1289 __ vmulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1290 i.InputOperand(1));
1291 break;
1292 }
1293 case kAVXFloat32Div: {
1294 CpuFeatureScope avx_scope(masm(), AVX);
1295 __ vdivss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1296 i.InputOperand(1));
1297 // Don't delete this mov. It may improve performance on some CPUs,
1298 // when there is a (v)mulss depending on the result.
1299 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1300 break;
1301 }
1302 case kAVXFloat64Add: {
1303 CpuFeatureScope avx_scope(masm(), AVX);
1304 __ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1305 i.InputOperand(1));
1306 break;
1307 }
1308 case kAVXFloat64Sub: {
1309 CpuFeatureScope avx_scope(masm(), AVX);
1310 __ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1311 i.InputOperand(1));
1312 break;
1313 }
1314 case kAVXFloat64Mul: {
1315 CpuFeatureScope avx_scope(masm(), AVX);
1316 __ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1317 i.InputOperand(1));
1318 break;
1319 }
1320 case kAVXFloat64Div: {
1321 CpuFeatureScope avx_scope(masm(), AVX);
1322 __ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1323 i.InputOperand(1));
1324 // Don't delete this mov. It may improve performance on some CPUs,
1325 // when there is a (v)mulsd depending on the result.
1326 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1327 break;
1328 }
1329 case kAVXFloat32Abs: {
1330 // TODO(bmeurer): Use RIP relative 128-bit constants.
1331 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1332 __ psrlq(kScratchDoubleReg, 33);
1333 CpuFeatureScope avx_scope(masm(), AVX);
1334 __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
1335 break;
1336 }
1337 case kAVXFloat32Neg: {
1338 // TODO(bmeurer): Use RIP relative 128-bit constants.
1339 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1340 __ psllq(kScratchDoubleReg, 31);
1341 CpuFeatureScope avx_scope(masm(), AVX);
1342 __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
1343 break;
1344 }
1345 case kAVXFloat64Abs: {
1346 // TODO(bmeurer): Use RIP relative 128-bit constants.
1347 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1348 __ psrlq(kScratchDoubleReg, 1);
1349 CpuFeatureScope avx_scope(masm(), AVX);
1350 __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
1351 break;
1352 }
1353 case kAVXFloat64Neg: {
1354 // TODO(bmeurer): Use RIP relative 128-bit constants.
1355 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1356 __ psllq(kScratchDoubleReg, 63);
1357 CpuFeatureScope avx_scope(masm(), AVX);
1358 __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
1359 break;
1360 }
1361 case kSSEFloat64SilenceNaN:
1362 __ xorpd(kScratchDoubleReg, kScratchDoubleReg);
1363 __ subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
1364 break;
1365 case kIA32Movsxbl:
1366 __ movsx_b(i.OutputRegister(), i.MemoryOperand());
1367 break;
1368 case kIA32Movzxbl:
1369 __ movzx_b(i.OutputRegister(), i.MemoryOperand());
1370 break;
1371 case kIA32Movb: {
1372 size_t index = 0;
1373 Operand operand = i.MemoryOperand(&index);
1374 if (HasImmediateInput(instr, index)) {
1375 __ mov_b(operand, i.InputInt8(index));
1376 } else {
1377 __ mov_b(operand, i.InputRegister(index));
1378 }
1379 break;
1380 }
1381 case kIA32Movsxwl:
1382 __ movsx_w(i.OutputRegister(), i.MemoryOperand());
1383 break;
1384 case kIA32Movzxwl:
1385 __ movzx_w(i.OutputRegister(), i.MemoryOperand());
1386 break;
1387 case kIA32Movw: {
1388 size_t index = 0;
1389 Operand operand = i.MemoryOperand(&index);
1390 if (HasImmediateInput(instr, index)) {
1391 __ mov_w(operand, i.InputInt16(index));
1392 } else {
1393 __ mov_w(operand, i.InputRegister(index));
1394 }
1395 break;
1396 }
1397 case kIA32Movl:
1398 if (instr->HasOutput()) {
1399 __ mov(i.OutputRegister(), i.MemoryOperand());
1400 } else {
1401 size_t index = 0;
1402 Operand operand = i.MemoryOperand(&index);
1403 if (HasImmediateInput(instr, index)) {
1404 __ mov(operand, i.InputImmediate(index));
1405 } else {
1406 __ mov(operand, i.InputRegister(index));
1407 }
1408 }
1409 break;
1410 case kIA32Movsd:
1411 if (instr->HasOutput()) {
1412 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
1413 } else {
1414 size_t index = 0;
1415 Operand operand = i.MemoryOperand(&index);
1416 __ movsd(operand, i.InputDoubleRegister(index));
1417 }
1418 break;
1419 case kIA32Movss:
1420 if (instr->HasOutput()) {
1421 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
1422 } else {
1423 size_t index = 0;
1424 Operand operand = i.MemoryOperand(&index);
1425 __ movss(operand, i.InputDoubleRegister(index));
1426 }
1427 break;
1428 case kIA32BitcastFI:
1429 if (instr->InputAt(0)->IsFPStackSlot()) {
1430 __ mov(i.OutputRegister(), i.InputOperand(0));
1431 } else {
1432 __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
1433 }
1434 break;
1435 case kIA32BitcastIF:
1436 if (instr->InputAt(0)->IsRegister()) {
1437 __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
1438 } else {
1439 __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
1440 }
1441 break;
1442 case kIA32Lea: {
1443 AddressingMode mode = AddressingModeField::decode(instr->opcode());
1444 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1445 // and addressing mode just happens to work out. The "addl"/"subl" forms
1446 // in these cases are faster based on measurements.
1447 if (mode == kMode_MI) {
1448 __ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
1449 } else if (i.InputRegister(0).is(i.OutputRegister())) {
1450 if (mode == kMode_MRI) {
1451 int32_t constant_summand = i.InputInt32(1);
1452 if (constant_summand > 0) {
1453 __ add(i.OutputRegister(), Immediate(constant_summand));
1454 } else if (constant_summand < 0) {
1455 __ sub(i.OutputRegister(), Immediate(-constant_summand));
1456 }
1457 } else if (mode == kMode_MR1) {
1458 if (i.InputRegister(1).is(i.OutputRegister())) {
1459 __ shl(i.OutputRegister(), 1);
1460 } else {
1461 __ add(i.OutputRegister(), i.InputRegister(1));
1462 }
1463 } else if (mode == kMode_M2) {
1464 __ shl(i.OutputRegister(), 1);
1465 } else if (mode == kMode_M4) {
1466 __ shl(i.OutputRegister(), 2);
1467 } else if (mode == kMode_M8) {
1468 __ shl(i.OutputRegister(), 3);
1469 } else {
1470 __ lea(i.OutputRegister(), i.MemoryOperand());
1471 }
1472 } else if (mode == kMode_MR1 &&
1473 i.InputRegister(1).is(i.OutputRegister())) {
1474 __ add(i.OutputRegister(), i.InputRegister(0));
1475 } else {
1476 __ lea(i.OutputRegister(), i.MemoryOperand());
1477 }
1478 break;
1479 }
1480 case kIA32PushFloat32:
1481 if (instr->InputAt(0)->IsFPRegister()) {
1482 __ sub(esp, Immediate(kFloatSize));
1483 __ movss(Operand(esp, 0), i.InputDoubleRegister(0));
1484 frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
1485 } else if (HasImmediateInput(instr, 0)) {
1486 __ Move(kScratchDoubleReg, i.InputDouble(0));
1487 __ sub(esp, Immediate(kDoubleSize));
1488 __ movss(Operand(esp, 0), kScratchDoubleReg);
1489 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1490 } else {
1491 __ movsd(kScratchDoubleReg, i.InputOperand(0));
1492 __ sub(esp, Immediate(kDoubleSize));
1493 __ movss(Operand(esp, 0), kScratchDoubleReg);
1494 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1495 }
1496 break;
1497 case kIA32PushFloat64:
1498 if (instr->InputAt(0)->IsFPRegister()) {
1499 __ sub(esp, Immediate(kDoubleSize));
1500 __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
1501 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1502 } else if (HasImmediateInput(instr, 0)) {
1503 __ Move(kScratchDoubleReg, i.InputDouble(0));
1504 __ sub(esp, Immediate(kDoubleSize));
1505 __ movsd(Operand(esp, 0), kScratchDoubleReg);
1506 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1507 } else {
1508 __ movsd(kScratchDoubleReg, i.InputOperand(0));
1509 __ sub(esp, Immediate(kDoubleSize));
1510 __ movsd(Operand(esp, 0), kScratchDoubleReg);
1511 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1512 }
1513 break;
1514 case kIA32Push:
1515 if (instr->InputAt(0)->IsFPRegister()) {
1516 __ sub(esp, Immediate(kFloatSize));
1517 __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
1518 frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
1519 } else if (HasImmediateInput(instr, 0)) {
1520 __ push(i.InputImmediate(0));
1521 frame_access_state()->IncreaseSPDelta(1);
1522 } else {
1523 __ push(i.InputOperand(0));
1524 frame_access_state()->IncreaseSPDelta(1);
1525 }
1526 break;
1527 case kIA32Poke: {
1528 int const slot = MiscField::decode(instr->opcode());
1529 if (HasImmediateInput(instr, 0)) {
1530 __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
1531 } else {
1532 __ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
1533 }
1534 break;
1535 }
1536 case kIA32Xchgb: {
1537 size_t index = 0;
1538 Operand operand = i.MemoryOperand(&index);
1539 __ xchg_b(i.InputRegister(index), operand);
1540 break;
1541 }
1542 case kIA32Xchgw: {
1543 size_t index = 0;
1544 Operand operand = i.MemoryOperand(&index);
1545 __ xchg_w(i.InputRegister(index), operand);
1546 break;
1547 }
1548 case kIA32Xchgl: {
1549 size_t index = 0;
1550 Operand operand = i.MemoryOperand(&index);
1551 __ xchg(i.InputRegister(index), operand);
1552 break;
1553 }
1554 case kCheckedLoadInt8:
1555 ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
1556 break;
1557 case kCheckedLoadUint8:
1558 ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
1559 break;
1560 case kCheckedLoadInt16:
1561 ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
1562 break;
1563 case kCheckedLoadUint16:
1564 ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
1565 break;
1566 case kCheckedLoadWord32:
1567 ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
1568 break;
1569 case kCheckedLoadFloat32:
1570 ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN);
1571 break;
1572 case kCheckedLoadFloat64:
1573 ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN);
1574 break;
1575 case kCheckedStoreWord8:
1576 ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
1577 break;
1578 case kCheckedStoreWord16:
1579 ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
1580 break;
1581 case kCheckedStoreWord32:
1582 ASSEMBLE_CHECKED_STORE_INTEGER(mov);
1583 break;
1584 case kCheckedStoreFloat32:
1585 ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1586 break;
1587 case kCheckedStoreFloat64:
1588 ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1589 break;
1590 case kIA32StackCheck: {
1591 ExternalReference const stack_limit =
1592 ExternalReference::address_of_stack_limit(isolate());
1593 __ cmp(esp, Operand::StaticVariable(stack_limit));
1594 break;
1595 }
1596 case kCheckedLoadWord64:
1597 case kCheckedStoreWord64:
1598 UNREACHABLE(); // currently unsupported checked int64 load/store.
1599 break;
1600 case kAtomicLoadInt8:
1601 case kAtomicLoadUint8:
1602 case kAtomicLoadInt16:
1603 case kAtomicLoadUint16:
1604 case kAtomicLoadWord32:
1605 case kAtomicStoreWord8:
1606 case kAtomicStoreWord16:
1607 case kAtomicStoreWord32:
1608 UNREACHABLE(); // Won't be generated by instruction selector.
1609 break;
1610 }
1611 return kSuccess;
1612 } // NOLINT(readability/fn_size)
1613
1614
1615 // Assembles a branch after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)1616 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1617 IA32OperandConverter i(this, instr);
1618 Label::Distance flabel_distance =
1619 branch->fallthru ? Label::kNear : Label::kFar;
1620 Label* tlabel = branch->true_label;
1621 Label* flabel = branch->false_label;
1622 switch (branch->condition) {
1623 case kUnorderedEqual:
1624 __ j(parity_even, flabel, flabel_distance);
1625 // Fall through.
1626 case kEqual:
1627 __ j(equal, tlabel);
1628 break;
1629 case kUnorderedNotEqual:
1630 __ j(parity_even, tlabel);
1631 // Fall through.
1632 case kNotEqual:
1633 __ j(not_equal, tlabel);
1634 break;
1635 case kSignedLessThan:
1636 __ j(less, tlabel);
1637 break;
1638 case kSignedGreaterThanOrEqual:
1639 __ j(greater_equal, tlabel);
1640 break;
1641 case kSignedLessThanOrEqual:
1642 __ j(less_equal, tlabel);
1643 break;
1644 case kSignedGreaterThan:
1645 __ j(greater, tlabel);
1646 break;
1647 case kUnsignedLessThan:
1648 __ j(below, tlabel);
1649 break;
1650 case kUnsignedGreaterThanOrEqual:
1651 __ j(above_equal, tlabel);
1652 break;
1653 case kUnsignedLessThanOrEqual:
1654 __ j(below_equal, tlabel);
1655 break;
1656 case kUnsignedGreaterThan:
1657 __ j(above, tlabel);
1658 break;
1659 case kOverflow:
1660 __ j(overflow, tlabel);
1661 break;
1662 case kNotOverflow:
1663 __ j(no_overflow, tlabel);
1664 break;
1665 default:
1666 UNREACHABLE();
1667 break;
1668 }
1669 // Add a jump if not falling through to the next block.
1670 if (!branch->fallthru) __ jmp(flabel);
1671 }
1672
1673
AssembleArchJump(RpoNumber target)1674 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1675 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1676 }
1677
1678
1679 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)1680 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1681 FlagsCondition condition) {
1682 IA32OperandConverter i(this, instr);
1683 Label done;
1684
1685 // Materialize a full 32-bit 1 or 0 value. The result register is always the
1686 // last output of the instruction.
1687 Label check;
1688 DCHECK_NE(0u, instr->OutputCount());
1689 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1690 Condition cc = no_condition;
1691 switch (condition) {
1692 case kUnorderedEqual:
1693 __ j(parity_odd, &check, Label::kNear);
1694 __ Move(reg, Immediate(0));
1695 __ jmp(&done, Label::kNear);
1696 // Fall through.
1697 case kEqual:
1698 cc = equal;
1699 break;
1700 case kUnorderedNotEqual:
1701 __ j(parity_odd, &check, Label::kNear);
1702 __ mov(reg, Immediate(1));
1703 __ jmp(&done, Label::kNear);
1704 // Fall through.
1705 case kNotEqual:
1706 cc = not_equal;
1707 break;
1708 case kSignedLessThan:
1709 cc = less;
1710 break;
1711 case kSignedGreaterThanOrEqual:
1712 cc = greater_equal;
1713 break;
1714 case kSignedLessThanOrEqual:
1715 cc = less_equal;
1716 break;
1717 case kSignedGreaterThan:
1718 cc = greater;
1719 break;
1720 case kUnsignedLessThan:
1721 cc = below;
1722 break;
1723 case kUnsignedGreaterThanOrEqual:
1724 cc = above_equal;
1725 break;
1726 case kUnsignedLessThanOrEqual:
1727 cc = below_equal;
1728 break;
1729 case kUnsignedGreaterThan:
1730 cc = above;
1731 break;
1732 case kOverflow:
1733 cc = overflow;
1734 break;
1735 case kNotOverflow:
1736 cc = no_overflow;
1737 break;
1738 default:
1739 UNREACHABLE();
1740 break;
1741 }
1742 __ bind(&check);
1743 if (reg.is_byte_register()) {
1744 // setcc for byte registers (al, bl, cl, dl).
1745 __ setcc(cc, reg);
1746 __ movzx_b(reg, reg);
1747 } else {
1748 // Emit a branch to set a register to either 1 or 0.
1749 Label set;
1750 __ j(cc, &set, Label::kNear);
1751 __ Move(reg, Immediate(0));
1752 __ jmp(&done, Label::kNear);
1753 __ bind(&set);
1754 __ mov(reg, Immediate(1));
1755 }
1756 __ bind(&done);
1757 }
1758
1759
AssembleArchLookupSwitch(Instruction * instr)1760 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1761 IA32OperandConverter i(this, instr);
1762 Register input = i.InputRegister(0);
1763 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1764 __ cmp(input, Immediate(i.InputInt32(index + 0)));
1765 __ j(equal, GetLabel(i.InputRpo(index + 1)));
1766 }
1767 AssembleArchJump(i.InputRpo(1));
1768 }
1769
1770
AssembleArchTableSwitch(Instruction * instr)1771 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1772 IA32OperandConverter i(this, instr);
1773 Register input = i.InputRegister(0);
1774 size_t const case_count = instr->InputCount() - 2;
1775 Label** cases = zone()->NewArray<Label*>(case_count);
1776 for (size_t index = 0; index < case_count; ++index) {
1777 cases[index] = GetLabel(i.InputRpo(index + 2));
1778 }
1779 Label* const table = AddJumpTable(cases, case_count);
1780 __ cmp(input, Immediate(case_count));
1781 __ j(above_equal, GetLabel(i.InputRpo(1)));
1782 __ jmp(Operand::JumpTable(input, times_4, table));
1783 }
1784
AssembleDeoptimizerCall(int deoptimization_id,Deoptimizer::BailoutType bailout_type,SourcePosition pos)1785 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
1786 int deoptimization_id, Deoptimizer::BailoutType bailout_type,
1787 SourcePosition pos) {
1788 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1789 isolate(), deoptimization_id, bailout_type);
1790 if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
1791 DeoptimizeReason deoptimization_reason =
1792 GetDeoptimizationReason(deoptimization_id);
1793 __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
1794 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1795 return kSuccess;
1796 }
1797
1798
1799 // The calling convention for JSFunctions on IA32 passes arguments on the
1800 // stack and the JSFunction and context in EDI and ESI, respectively, thus
1801 // the steps of the call look as follows:
1802
1803 // --{ before the call instruction }--------------------------------------------
1804 // | caller frame |
1805 // ^ esp ^ ebp
1806
1807 // --{ push arguments and setup ESI, EDI }--------------------------------------
1808 // | args + receiver | caller frame |
1809 // ^ esp ^ ebp
1810 // [edi = JSFunction, esi = context]
1811
1812 // --{ call [edi + kCodeEntryOffset] }------------------------------------------
1813 // | RET | args + receiver | caller frame |
1814 // ^ esp ^ ebp
1815
1816 // =={ prologue of called function }============================================
1817 // --{ push ebp }---------------------------------------------------------------
1818 // | FP | RET | args + receiver | caller frame |
1819 // ^ esp ^ ebp
1820
1821 // --{ mov ebp, esp }-----------------------------------------------------------
1822 // | FP | RET | args + receiver | caller frame |
1823 // ^ ebp,esp
1824
1825 // --{ push esi }---------------------------------------------------------------
1826 // | CTX | FP | RET | args + receiver | caller frame |
1827 // ^esp ^ ebp
1828
1829 // --{ push edi }---------------------------------------------------------------
1830 // | FNC | CTX | FP | RET | args + receiver | caller frame |
1831 // ^esp ^ ebp
1832
1833 // --{ subi esp, #N }-----------------------------------------------------------
1834 // | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
1835 // ^esp ^ ebp
1836
1837 // =={ body of called function }================================================
1838
1839 // =={ epilogue of called function }============================================
1840 // --{ mov esp, ebp }-----------------------------------------------------------
1841 // | FP | RET | args + receiver | caller frame |
1842 // ^ esp,ebp
1843
1844 // --{ pop ebp }-----------------------------------------------------------
1845 // | | RET | args + receiver | caller frame |
1846 // ^ esp ^ ebp
1847
1848 // --{ ret #A+1 }-----------------------------------------------------------
1849 // | | caller frame |
1850 // ^ esp ^ ebp
1851
1852
1853 // Runtime function calls are accomplished by doing a stub call to the
1854 // CEntryStub (a real code object). On IA32 passes arguments on the
1855 // stack, the number of arguments in EAX, the address of the runtime function
1856 // in EBX, and the context in ESI.
1857
1858 // --{ before the call instruction }--------------------------------------------
1859 // | caller frame |
1860 // ^ esp ^ ebp
1861
1862 // --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
1863 // | args + receiver | caller frame |
1864 // ^ esp ^ ebp
1865 // [eax = #args, ebx = runtime function, esi = context]
1866
1867 // --{ call #CEntryStub }-------------------------------------------------------
1868 // | RET | args + receiver | caller frame |
1869 // ^ esp ^ ebp
1870
1871 // =={ body of runtime function }===============================================
1872
1873 // --{ runtime returns }--------------------------------------------------------
1874 // | caller frame |
1875 // ^ esp ^ ebp
1876
1877 // Other custom linkages (e.g. for calling directly into and out of C++) may
1878 // need to save callee-saved registers on the stack, which is done in the
1879 // function prologue of generated code.
1880
1881 // --{ before the call instruction }--------------------------------------------
1882 // | caller frame |
1883 // ^ esp ^ ebp
1884
1885 // --{ set up arguments in registers on stack }---------------------------------
1886 // | args | caller frame |
1887 // ^ esp ^ ebp
1888 // [r0 = arg0, r1 = arg1, ...]
1889
1890 // --{ call code }--------------------------------------------------------------
1891 // | RET | args | caller frame |
1892 // ^ esp ^ ebp
1893
1894 // =={ prologue of called function }============================================
1895 // --{ push ebp }---------------------------------------------------------------
1896 // | FP | RET | args | caller frame |
1897 // ^ esp ^ ebp
1898
1899 // --{ mov ebp, esp }-----------------------------------------------------------
1900 // | FP | RET | args | caller frame |
1901 // ^ ebp,esp
1902
1903 // --{ save registers }---------------------------------------------------------
1904 // | regs | FP | RET | args | caller frame |
1905 // ^ esp ^ ebp
1906
1907 // --{ subi esp, #N }-----------------------------------------------------------
1908 // | callee frame | regs | FP | RET | args | caller frame |
1909 // ^esp ^ ebp
1910
1911 // =={ body of called function }================================================
1912
1913 // =={ epilogue of called function }============================================
1914 // --{ restore registers }------------------------------------------------------
1915 // | regs | FP | RET | args | caller frame |
1916 // ^ esp ^ ebp
1917
1918 // --{ mov esp, ebp }-----------------------------------------------------------
1919 // | FP | RET | args | caller frame |
1920 // ^ esp,ebp
1921
1922 // --{ pop ebp }----------------------------------------------------------------
1923 // | RET | args | caller frame |
1924 // ^ esp ^ ebp
1925
FinishFrame(Frame * frame)1926 void CodeGenerator::FinishFrame(Frame* frame) {
1927 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1928 const RegList saves = descriptor->CalleeSavedRegisters();
1929 if (saves != 0) { // Save callee-saved registers.
1930 DCHECK(!info()->is_osr());
1931 int pushed = 0;
1932 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1933 if (!((1 << i) & saves)) continue;
1934 ++pushed;
1935 }
1936 frame->AllocateSavedCalleeRegisterSlots(pushed);
1937 }
1938 }
1939
AssembleConstructFrame()1940 void CodeGenerator::AssembleConstructFrame() {
1941 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1942 if (frame_access_state()->has_frame()) {
1943 if (descriptor->IsCFunctionCall()) {
1944 __ push(ebp);
1945 __ mov(ebp, esp);
1946 } else if (descriptor->IsJSFunctionCall()) {
1947 __ Prologue(this->info()->GeneratePreagedPrologue());
1948 if (descriptor->PushArgumentCount()) {
1949 __ push(kJavaScriptCallArgCountRegister);
1950 }
1951 } else {
1952 __ StubPrologue(info()->GetOutputStackFrameType());
1953 }
1954 }
1955
1956 int shrink_slots =
1957 frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
1958
1959 if (info()->is_osr()) {
1960 // TurboFan OSR-compiled functions cannot be entered directly.
1961 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1962
1963 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1964 // frame is still on the stack. Optimized code uses OSR values directly from
1965 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1966 // remaining stack slots.
1967 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1968 osr_pc_offset_ = __ pc_offset();
1969 shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1970 }
1971
1972 const RegList saves = descriptor->CalleeSavedRegisters();
1973 if (shrink_slots > 0) {
1974 __ sub(esp, Immediate(shrink_slots * kPointerSize));
1975 }
1976
1977 if (saves != 0) { // Save callee-saved registers.
1978 DCHECK(!info()->is_osr());
1979 int pushed = 0;
1980 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1981 if (!((1 << i) & saves)) continue;
1982 __ push(Register::from_code(i));
1983 ++pushed;
1984 }
1985 }
1986 }
1987
AssembleReturn(InstructionOperand * pop)1988 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
1989 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1990
1991 const RegList saves = descriptor->CalleeSavedRegisters();
1992 // Restore registers.
1993 if (saves != 0) {
1994 for (int i = 0; i < Register::kNumRegisters; i++) {
1995 if (!((1 << i) & saves)) continue;
1996 __ pop(Register::from_code(i));
1997 }
1998 }
1999
2000 // Might need ecx for scratch if pop_size is too big or if there is a variable
2001 // pop count.
2002 DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
2003 size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
2004 IA32OperandConverter g(this, nullptr);
2005 if (descriptor->IsCFunctionCall()) {
2006 AssembleDeconstructFrame();
2007 } else if (frame_access_state()->has_frame()) {
2008 // Canonicalize JSFunction return sites for now if they always have the same
2009 // number of return args.
2010 if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
2011 if (return_label_.is_bound()) {
2012 __ jmp(&return_label_);
2013 return;
2014 } else {
2015 __ bind(&return_label_);
2016 AssembleDeconstructFrame();
2017 }
2018 } else {
2019 AssembleDeconstructFrame();
2020 }
2021 }
2022 DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & edx.bit());
2023 DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
2024 if (pop->IsImmediate()) {
2025 DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
2026 pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
2027 __ Ret(static_cast<int>(pop_size), ecx);
2028 } else {
2029 Register pop_reg = g.ToRegister(pop);
2030 Register scratch_reg = pop_reg.is(ecx) ? edx : ecx;
2031 __ pop(scratch_reg);
2032 __ lea(esp, Operand(esp, pop_reg, times_4, static_cast<int>(pop_size)));
2033 __ jmp(scratch_reg);
2034 }
2035 }
2036
2037
AssembleMove(InstructionOperand * source,InstructionOperand * destination)2038 void CodeGenerator::AssembleMove(InstructionOperand* source,
2039 InstructionOperand* destination) {
2040 IA32OperandConverter g(this, nullptr);
2041 // Dispatch on the source and destination operand kinds. Not all
2042 // combinations are possible.
2043 if (source->IsRegister()) {
2044 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2045 Register src = g.ToRegister(source);
2046 Operand dst = g.ToOperand(destination);
2047 __ mov(dst, src);
2048 } else if (source->IsStackSlot()) {
2049 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2050 Operand src = g.ToOperand(source);
2051 if (destination->IsRegister()) {
2052 Register dst = g.ToRegister(destination);
2053 __ mov(dst, src);
2054 } else {
2055 Operand dst = g.ToOperand(destination);
2056 __ push(src);
2057 __ pop(dst);
2058 }
2059 } else if (source->IsConstant()) {
2060 Constant src_constant = g.ToConstant(source);
2061 if (src_constant.type() == Constant::kHeapObject) {
2062 Handle<HeapObject> src = src_constant.ToHeapObject();
2063 if (destination->IsRegister()) {
2064 Register dst = g.ToRegister(destination);
2065 __ LoadHeapObject(dst, src);
2066 } else {
2067 DCHECK(destination->IsStackSlot());
2068 Operand dst = g.ToOperand(destination);
2069 AllowDeferredHandleDereference embedding_raw_address;
2070 if (isolate()->heap()->InNewSpace(*src)) {
2071 __ PushHeapObject(src);
2072 __ pop(dst);
2073 } else {
2074 __ mov(dst, src);
2075 }
2076 }
2077 } else if (destination->IsRegister()) {
2078 Register dst = g.ToRegister(destination);
2079 __ Move(dst, g.ToImmediate(source));
2080 } else if (destination->IsStackSlot()) {
2081 Operand dst = g.ToOperand(destination);
2082 __ Move(dst, g.ToImmediate(source));
2083 } else if (src_constant.type() == Constant::kFloat32) {
2084 // TODO(turbofan): Can we do better here?
2085 uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
2086 if (destination->IsFPRegister()) {
2087 XMMRegister dst = g.ToDoubleRegister(destination);
2088 __ Move(dst, src);
2089 } else {
2090 DCHECK(destination->IsFPStackSlot());
2091 Operand dst = g.ToOperand(destination);
2092 __ Move(dst, Immediate(src));
2093 }
2094 } else {
2095 DCHECK_EQ(Constant::kFloat64, src_constant.type());
2096 uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
2097 uint32_t lower = static_cast<uint32_t>(src);
2098 uint32_t upper = static_cast<uint32_t>(src >> 32);
2099 if (destination->IsFPRegister()) {
2100 XMMRegister dst = g.ToDoubleRegister(destination);
2101 __ Move(dst, src);
2102 } else {
2103 DCHECK(destination->IsFPStackSlot());
2104 Operand dst0 = g.ToOperand(destination);
2105 Operand dst1 = g.HighOperand(destination);
2106 __ Move(dst0, Immediate(lower));
2107 __ Move(dst1, Immediate(upper));
2108 }
2109 }
2110 } else if (source->IsFPRegister()) {
2111 XMMRegister src = g.ToDoubleRegister(source);
2112 if (destination->IsFPRegister()) {
2113 XMMRegister dst = g.ToDoubleRegister(destination);
2114 __ movaps(dst, src);
2115 } else {
2116 DCHECK(destination->IsFPStackSlot());
2117 Operand dst = g.ToOperand(destination);
2118 MachineRepresentation rep =
2119 LocationOperand::cast(source)->representation();
2120 if (rep == MachineRepresentation::kFloat64) {
2121 __ movsd(dst, src);
2122 } else if (rep == MachineRepresentation::kFloat32) {
2123 __ movss(dst, src);
2124 } else {
2125 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
2126 __ movups(dst, src);
2127 }
2128 }
2129 } else if (source->IsFPStackSlot()) {
2130 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
2131 Operand src = g.ToOperand(source);
2132 MachineRepresentation rep = LocationOperand::cast(source)->representation();
2133 if (destination->IsFPRegister()) {
2134 XMMRegister dst = g.ToDoubleRegister(destination);
2135 if (rep == MachineRepresentation::kFloat64) {
2136 __ movsd(dst, src);
2137 } else if (rep == MachineRepresentation::kFloat32) {
2138 __ movss(dst, src);
2139 } else {
2140 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
2141 __ movups(dst, src);
2142 }
2143 } else {
2144 Operand dst = g.ToOperand(destination);
2145 if (rep == MachineRepresentation::kFloat64) {
2146 __ movsd(kScratchDoubleReg, src);
2147 __ movsd(dst, kScratchDoubleReg);
2148 } else if (rep == MachineRepresentation::kFloat32) {
2149 __ movss(kScratchDoubleReg, src);
2150 __ movss(dst, kScratchDoubleReg);
2151 } else {
2152 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
2153 __ movups(kScratchDoubleReg, src);
2154 __ movups(dst, kScratchDoubleReg);
2155 }
2156 }
2157 } else {
2158 UNREACHABLE();
2159 }
2160 }
2161
2162
AssembleSwap(InstructionOperand * source,InstructionOperand * destination)2163 void CodeGenerator::AssembleSwap(InstructionOperand* source,
2164 InstructionOperand* destination) {
2165 IA32OperandConverter g(this, nullptr);
2166 // Dispatch on the source and destination operand kinds. Not all
2167 // combinations are possible.
2168 if (source->IsRegister() && destination->IsRegister()) {
2169 // Register-register.
2170 Register src = g.ToRegister(source);
2171 Register dst = g.ToRegister(destination);
2172 __ push(src);
2173 __ mov(src, dst);
2174 __ pop(dst);
2175 } else if (source->IsRegister() && destination->IsStackSlot()) {
2176 // Register-memory.
2177 Register src = g.ToRegister(source);
2178 __ push(src);
2179 frame_access_state()->IncreaseSPDelta(1);
2180 Operand dst = g.ToOperand(destination);
2181 __ mov(src, dst);
2182 frame_access_state()->IncreaseSPDelta(-1);
2183 dst = g.ToOperand(destination);
2184 __ pop(dst);
2185 } else if (source->IsStackSlot() && destination->IsStackSlot()) {
2186 // Memory-memory.
2187 Operand dst1 = g.ToOperand(destination);
2188 __ push(dst1);
2189 frame_access_state()->IncreaseSPDelta(1);
2190 Operand src1 = g.ToOperand(source);
2191 __ push(src1);
2192 Operand dst2 = g.ToOperand(destination);
2193 __ pop(dst2);
2194 frame_access_state()->IncreaseSPDelta(-1);
2195 Operand src2 = g.ToOperand(source);
2196 __ pop(src2);
2197 } else if (source->IsFPRegister() && destination->IsFPRegister()) {
2198 // XMM register-register swap.
2199 XMMRegister src = g.ToDoubleRegister(source);
2200 XMMRegister dst = g.ToDoubleRegister(destination);
2201 __ movaps(kScratchDoubleReg, src);
2202 __ movaps(src, dst);
2203 __ movaps(dst, kScratchDoubleReg);
2204 } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
2205 // XMM register-memory swap.
2206 XMMRegister reg = g.ToDoubleRegister(source);
2207 Operand other = g.ToOperand(destination);
2208 MachineRepresentation rep = LocationOperand::cast(source)->representation();
2209 if (rep == MachineRepresentation::kFloat64) {
2210 __ movsd(kScratchDoubleReg, other);
2211 __ movsd(other, reg);
2212 __ movaps(reg, kScratchDoubleReg);
2213 } else if (rep == MachineRepresentation::kFloat32) {
2214 __ movss(kScratchDoubleReg, other);
2215 __ movss(other, reg);
2216 __ movaps(reg, kScratchDoubleReg);
2217 } else {
2218 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
2219 __ movups(kScratchDoubleReg, other);
2220 __ movups(other, reg);
2221 __ movups(reg, kScratchDoubleReg);
2222 }
2223 } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
2224 // Double-width memory-to-memory.
2225 Operand src0 = g.ToOperand(source);
2226 Operand dst0 = g.ToOperand(destination);
2227 MachineRepresentation rep = LocationOperand::cast(source)->representation();
2228 if (rep == MachineRepresentation::kFloat64) {
2229 Operand src1 = g.HighOperand(source);
2230 Operand dst1 = g.HighOperand(destination);
2231 __ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register.
2232 __ push(src0); // Then use stack to copy src to destination.
2233 __ pop(dst0);
2234 __ push(src1);
2235 __ pop(dst1);
2236 __ movsd(src0, kScratchDoubleReg);
2237 } else if (rep == MachineRepresentation::kFloat32) {
2238 __ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
2239 __ push(src0); // Then use stack to copy src to destination.
2240 __ pop(dst0);
2241 __ movss(src0, kScratchDoubleReg);
2242 } else {
2243 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
2244 // Use the XOR trick to swap without a temporary.
2245 __ movups(kScratchDoubleReg, src0);
2246 __ xorps(kScratchDoubleReg, dst0); // scratch contains src ^ dst.
2247 __ movups(src0, kScratchDoubleReg);
2248 __ xorps(kScratchDoubleReg, dst0); // scratch contains src.
2249 __ movups(dst0, kScratchDoubleReg);
2250 __ xorps(kScratchDoubleReg, src0); // scratch contains dst.
2251 __ movups(src0, kScratchDoubleReg);
2252 }
2253 } else {
2254 // No other combinations are possible.
2255 UNREACHABLE();
2256 }
2257 }
2258
2259
AssembleJumpTable(Label ** targets,size_t target_count)2260 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2261 for (size_t index = 0; index < target_count; ++index) {
2262 __ dd(targets[index]);
2263 }
2264 }
2265
2266
EnsureSpaceForLazyDeopt()2267 void CodeGenerator::EnsureSpaceForLazyDeopt() {
2268 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2269 return;
2270 }
2271
2272 int space_needed = Deoptimizer::patch_size();
2273 // Ensure that we have enough space after the previous lazy-bailout
2274 // instruction for patching the code here.
2275 int current_pc = masm()->pc_offset();
2276 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2277 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2278 __ Nop(padding_size);
2279 }
2280 }
2281
2282 #undef __
2283
2284 } // namespace compiler
2285 } // namespace internal
2286 } // namespace v8
2287