1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/arm/macro-assembler-arm.h"
8 #include "src/compilation-info.h"
9 #include "src/compiler/code-generator-impl.h"
10 #include "src/compiler/gap-resolver.h"
11 #include "src/compiler/node-matchers.h"
12 #include "src/compiler/osr.h"
13
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17
18 #define __ masm()->
19
20
21 #define kScratchReg r9
22
23
24 // Adds Arm-specific methods to convert InstructionOperands.
25 class ArmOperandConverter final : public InstructionOperandConverter {
26 public:
ArmOperandConverter(CodeGenerator * gen,Instruction * instr)27 ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
28 : InstructionOperandConverter(gen, instr) {}
29
OutputSBit() const30 SBit OutputSBit() const {
31 switch (instr_->flags_mode()) {
32 case kFlags_branch:
33 case kFlags_deoptimize:
34 case kFlags_set:
35 return SetCC;
36 case kFlags_none:
37 return LeaveCC;
38 }
39 UNREACHABLE();
40 return LeaveCC;
41 }
42
InputImmediate(size_t index)43 Operand InputImmediate(size_t index) {
44 Constant constant = ToConstant(instr_->InputAt(index));
45 switch (constant.type()) {
46 case Constant::kInt32:
47 return Operand(constant.ToInt32());
48 case Constant::kFloat32:
49 return Operand(
50 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
51 case Constant::kFloat64:
52 return Operand(
53 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
54 case Constant::kInt64:
55 case Constant::kExternalReference:
56 case Constant::kHeapObject:
57 case Constant::kRpoNumber:
58 break;
59 }
60 UNREACHABLE();
61 return Operand::Zero();
62 }
63
InputOperand2(size_t first_index)64 Operand InputOperand2(size_t first_index) {
65 const size_t index = first_index;
66 switch (AddressingModeField::decode(instr_->opcode())) {
67 case kMode_None:
68 case kMode_Offset_RI:
69 case kMode_Offset_RR:
70 break;
71 case kMode_Operand2_I:
72 return InputImmediate(index + 0);
73 case kMode_Operand2_R:
74 return Operand(InputRegister(index + 0));
75 case kMode_Operand2_R_ASR_I:
76 return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
77 case kMode_Operand2_R_ASR_R:
78 return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
79 case kMode_Operand2_R_LSL_I:
80 return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
81 case kMode_Operand2_R_LSL_R:
82 return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
83 case kMode_Operand2_R_LSR_I:
84 return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
85 case kMode_Operand2_R_LSR_R:
86 return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
87 case kMode_Operand2_R_ROR_I:
88 return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1));
89 case kMode_Operand2_R_ROR_R:
90 return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
91 }
92 UNREACHABLE();
93 return Operand::Zero();
94 }
95
InputOffset(size_t * first_index)96 MemOperand InputOffset(size_t* first_index) {
97 const size_t index = *first_index;
98 switch (AddressingModeField::decode(instr_->opcode())) {
99 case kMode_None:
100 case kMode_Operand2_I:
101 case kMode_Operand2_R:
102 case kMode_Operand2_R_ASR_I:
103 case kMode_Operand2_R_ASR_R:
104 case kMode_Operand2_R_LSL_R:
105 case kMode_Operand2_R_LSR_I:
106 case kMode_Operand2_R_LSR_R:
107 case kMode_Operand2_R_ROR_I:
108 case kMode_Operand2_R_ROR_R:
109 break;
110 case kMode_Operand2_R_LSL_I:
111 *first_index += 3;
112 return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
113 LSL, InputInt32(index + 2));
114 case kMode_Offset_RI:
115 *first_index += 2;
116 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
117 case kMode_Offset_RR:
118 *first_index += 2;
119 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
120 }
121 UNREACHABLE();
122 return MemOperand(r0);
123 }
124
InputOffset(size_t first_index=0)125 MemOperand InputOffset(size_t first_index = 0) {
126 return InputOffset(&first_index);
127 }
128
ToMemOperand(InstructionOperand * op) const129 MemOperand ToMemOperand(InstructionOperand* op) const {
130 DCHECK_NOT_NULL(op);
131 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
132 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
133 }
134
SlotToMemOperand(int slot) const135 MemOperand SlotToMemOperand(int slot) const {
136 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
137 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
138 }
139 };
140
141 namespace {
142
143 class OutOfLineLoadFloat final : public OutOfLineCode {
144 public:
OutOfLineLoadFloat(CodeGenerator * gen,SwVfpRegister result)145 OutOfLineLoadFloat(CodeGenerator* gen, SwVfpRegister result)
146 : OutOfLineCode(gen), result_(result) {}
147
Generate()148 void Generate() final {
149 // Compute sqrtf(-1.0f), which results in a quiet single-precision NaN.
150 __ vmov(result_, -1.0f);
151 __ vsqrt(result_, result_);
152 }
153
154 private:
155 SwVfpRegister const result_;
156 };
157
158 class OutOfLineLoadDouble final : public OutOfLineCode {
159 public:
OutOfLineLoadDouble(CodeGenerator * gen,DwVfpRegister result)160 OutOfLineLoadDouble(CodeGenerator* gen, DwVfpRegister result)
161 : OutOfLineCode(gen), result_(result) {}
162
Generate()163 void Generate() final {
164 // Compute sqrt(-1.0), which results in a quiet double-precision NaN.
165 __ vmov(result_, -1.0);
166 __ vsqrt(result_, result_);
167 }
168
169 private:
170 DwVfpRegister const result_;
171 };
172
173
174 class OutOfLineLoadInteger final : public OutOfLineCode {
175 public:
OutOfLineLoadInteger(CodeGenerator * gen,Register result)176 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
177 : OutOfLineCode(gen), result_(result) {}
178
Generate()179 void Generate() final { __ mov(result_, Operand::Zero()); }
180
181 private:
182 Register const result_;
183 };
184
185
186 class OutOfLineRecordWrite final : public OutOfLineCode {
187 public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register index,Register value,Register scratch0,Register scratch1,RecordWriteMode mode,UnwindingInfoWriter * unwinding_info_writer)188 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
189 Register value, Register scratch0, Register scratch1,
190 RecordWriteMode mode,
191 UnwindingInfoWriter* unwinding_info_writer)
192 : OutOfLineCode(gen),
193 object_(object),
194 index_(index),
195 index_immediate_(0),
196 value_(value),
197 scratch0_(scratch0),
198 scratch1_(scratch1),
199 mode_(mode),
200 must_save_lr_(!gen->frame_access_state()->has_frame()),
201 unwinding_info_writer_(unwinding_info_writer) {}
202
OutOfLineRecordWrite(CodeGenerator * gen,Register object,int32_t index,Register value,Register scratch0,Register scratch1,RecordWriteMode mode,UnwindingInfoWriter * unwinding_info_writer)203 OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
204 Register value, Register scratch0, Register scratch1,
205 RecordWriteMode mode,
206 UnwindingInfoWriter* unwinding_info_writer)
207 : OutOfLineCode(gen),
208 object_(object),
209 index_(no_reg),
210 index_immediate_(index),
211 value_(value),
212 scratch0_(scratch0),
213 scratch1_(scratch1),
214 mode_(mode),
215 must_save_lr_(!gen->frame_access_state()->has_frame()),
216 unwinding_info_writer_(unwinding_info_writer) {}
217
Generate()218 void Generate() final {
219 if (mode_ > RecordWriteMode::kValueIsPointer) {
220 __ JumpIfSmi(value_, exit());
221 }
222 __ CheckPageFlag(value_, scratch0_,
223 MemoryChunk::kPointersToHereAreInterestingMask, eq,
224 exit());
225 RememberedSetAction const remembered_set_action =
226 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
227 : OMIT_REMEMBERED_SET;
228 SaveFPRegsMode const save_fp_mode =
229 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
230 if (must_save_lr_) {
231 // We need to save and restore lr if the frame was elided.
232 __ Push(lr);
233 unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
234 }
235 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
236 remembered_set_action, save_fp_mode);
237 if (index_.is(no_reg)) {
238 __ add(scratch1_, object_, Operand(index_immediate_));
239 } else {
240 DCHECK_EQ(0, index_immediate_);
241 __ add(scratch1_, object_, Operand(index_));
242 }
243 __ CallStub(&stub);
244 if (must_save_lr_) {
245 __ Pop(lr);
246 unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
247 }
248 }
249
250 private:
251 Register const object_;
252 Register const index_;
253 int32_t const index_immediate_; // Valid if index_.is(no_reg).
254 Register const value_;
255 Register const scratch0_;
256 Register const scratch1_;
257 RecordWriteMode const mode_;
258 bool must_save_lr_;
259 UnwindingInfoWriter* const unwinding_info_writer_;
260 };
261
262 template <typename T>
263 class OutOfLineFloatMin final : public OutOfLineCode {
264 public:
OutOfLineFloatMin(CodeGenerator * gen,T result,T left,T right)265 OutOfLineFloatMin(CodeGenerator* gen, T result, T left, T right)
266 : OutOfLineCode(gen), result_(result), left_(left), right_(right) {}
267
Generate()268 void Generate() final { __ FloatMinOutOfLine(result_, left_, right_); }
269
270 private:
271 T const result_;
272 T const left_;
273 T const right_;
274 };
275 typedef OutOfLineFloatMin<SwVfpRegister> OutOfLineFloat32Min;
276 typedef OutOfLineFloatMin<DwVfpRegister> OutOfLineFloat64Min;
277
278 template <typename T>
279 class OutOfLineFloatMax final : public OutOfLineCode {
280 public:
OutOfLineFloatMax(CodeGenerator * gen,T result,T left,T right)281 OutOfLineFloatMax(CodeGenerator* gen, T result, T left, T right)
282 : OutOfLineCode(gen), result_(result), left_(left), right_(right) {}
283
Generate()284 void Generate() final { __ FloatMaxOutOfLine(result_, left_, right_); }
285
286 private:
287 T const result_;
288 T const left_;
289 T const right_;
290 };
291 typedef OutOfLineFloatMax<SwVfpRegister> OutOfLineFloat32Max;
292 typedef OutOfLineFloatMax<DwVfpRegister> OutOfLineFloat64Max;
293
FlagsConditionToCondition(FlagsCondition condition)294 Condition FlagsConditionToCondition(FlagsCondition condition) {
295 switch (condition) {
296 case kEqual:
297 return eq;
298 case kNotEqual:
299 return ne;
300 case kSignedLessThan:
301 return lt;
302 case kSignedGreaterThanOrEqual:
303 return ge;
304 case kSignedLessThanOrEqual:
305 return le;
306 case kSignedGreaterThan:
307 return gt;
308 case kUnsignedLessThan:
309 return lo;
310 case kUnsignedGreaterThanOrEqual:
311 return hs;
312 case kUnsignedLessThanOrEqual:
313 return ls;
314 case kUnsignedGreaterThan:
315 return hi;
316 case kFloatLessThanOrUnordered:
317 return lt;
318 case kFloatGreaterThanOrEqual:
319 return ge;
320 case kFloatLessThanOrEqual:
321 return ls;
322 case kFloatGreaterThanOrUnordered:
323 return hi;
324 case kFloatLessThan:
325 return lo;
326 case kFloatGreaterThanOrEqualOrUnordered:
327 return hs;
328 case kFloatLessThanOrEqualOrUnordered:
329 return le;
330 case kFloatGreaterThan:
331 return gt;
332 case kOverflow:
333 return vs;
334 case kNotOverflow:
335 return vc;
336 case kPositiveOrZero:
337 return pl;
338 case kNegative:
339 return mi;
340 default:
341 break;
342 }
343 UNREACHABLE();
344 return kNoCondition;
345 }
346
347 } // namespace
348
349 #define ASSEMBLE_CHECKED_LOAD_FP(Type) \
350 do { \
351 auto result = i.Output##Type##Register(); \
352 auto offset = i.InputRegister(0); \
353 if (instr->InputAt(1)->IsRegister()) { \
354 __ cmp(offset, i.InputRegister(1)); \
355 } else { \
356 __ cmp(offset, i.InputImmediate(1)); \
357 } \
358 auto ool = new (zone()) OutOfLineLoad##Type(this, result); \
359 __ b(hs, ool->entry()); \
360 __ vldr(result, i.InputOffset(2)); \
361 __ bind(ool->exit()); \
362 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
363 } while (0)
364
365 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
366 do { \
367 auto result = i.OutputRegister(); \
368 auto offset = i.InputRegister(0); \
369 if (instr->InputAt(1)->IsRegister()) { \
370 __ cmp(offset, i.InputRegister(1)); \
371 } else { \
372 __ cmp(offset, i.InputImmediate(1)); \
373 } \
374 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
375 __ b(hs, ool->entry()); \
376 __ asm_instr(result, i.InputOffset(2)); \
377 __ bind(ool->exit()); \
378 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
379 } while (0)
380
381 #define ASSEMBLE_CHECKED_STORE_FP(Type) \
382 do { \
383 auto offset = i.InputRegister(0); \
384 if (instr->InputAt(1)->IsRegister()) { \
385 __ cmp(offset, i.InputRegister(1)); \
386 } else { \
387 __ cmp(offset, i.InputImmediate(1)); \
388 } \
389 auto value = i.Input##Type##Register(2); \
390 __ vstr(value, i.InputOffset(3), lo); \
391 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
392 } while (0)
393
394 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
395 do { \
396 auto offset = i.InputRegister(0); \
397 if (instr->InputAt(1)->IsRegister()) { \
398 __ cmp(offset, i.InputRegister(1)); \
399 } else { \
400 __ cmp(offset, i.InputImmediate(1)); \
401 } \
402 auto value = i.InputRegister(2); \
403 __ asm_instr(value, i.InputOffset(3), lo); \
404 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
405 } while (0)
406
407 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
408 do { \
409 __ asm_instr(i.OutputRegister(), \
410 MemOperand(i.InputRegister(0), i.InputRegister(1))); \
411 __ dmb(ISH); \
412 } while (0)
413
414 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
415 do { \
416 __ dmb(ISH); \
417 __ asm_instr(i.InputRegister(2), \
418 MemOperand(i.InputRegister(0), i.InputRegister(1))); \
419 __ dmb(ISH); \
420 } while (0)
421
422 #define ASSEMBLE_IEEE754_BINOP(name) \
423 do { \
424 /* TODO(bmeurer): We should really get rid of this special instruction, */ \
425 /* and generate a CallAddress instruction instead. */ \
426 FrameScope scope(masm(), StackFrame::MANUAL); \
427 __ PrepareCallCFunction(0, 2, kScratchReg); \
428 __ MovToFloatParameters(i.InputDoubleRegister(0), \
429 i.InputDoubleRegister(1)); \
430 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
431 0, 2); \
432 /* Move the result in the double result register. */ \
433 __ MovFromFloatResult(i.OutputDoubleRegister()); \
434 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
435 } while (0)
436
437 #define ASSEMBLE_IEEE754_UNOP(name) \
438 do { \
439 /* TODO(bmeurer): We should really get rid of this special instruction, */ \
440 /* and generate a CallAddress instruction instead. */ \
441 FrameScope scope(masm(), StackFrame::MANUAL); \
442 __ PrepareCallCFunction(0, 1, kScratchReg); \
443 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
444 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
445 0, 1); \
446 /* Move the result in the double result register. */ \
447 __ MovFromFloatResult(i.OutputDoubleRegister()); \
448 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
449 } while (0)
450
AssembleDeconstructFrame()451 void CodeGenerator::AssembleDeconstructFrame() {
452 __ LeaveFrame(StackFrame::MANUAL);
453 unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
454 }
455
AssemblePrepareTailCall()456 void CodeGenerator::AssemblePrepareTailCall() {
457 if (frame_access_state()->has_frame()) {
458 if (FLAG_enable_embedded_constant_pool) {
459 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
460 }
461 __ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
462 __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
463 }
464 frame_access_state()->SetFrameAccessToSP();
465 }
466
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register scratch1,Register scratch2,Register scratch3)467 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
468 Register scratch1,
469 Register scratch2,
470 Register scratch3) {
471 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
472 Label done;
473
474 // Check if current frame is an arguments adaptor frame.
475 __ ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
476 __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
477 __ b(ne, &done);
478
479 // Load arguments count from current arguments adaptor frame (note, it
480 // does not include receiver).
481 Register caller_args_count_reg = scratch1;
482 __ ldr(caller_args_count_reg,
483 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
484 __ SmiUntag(caller_args_count_reg);
485
486 ParameterCount callee_args_count(args_reg);
487 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
488 scratch3);
489 __ bind(&done);
490 }
491
492 namespace {
493
FlushPendingPushRegisters(MacroAssembler * masm,FrameAccessState * frame_access_state,ZoneVector<Register> * pending_pushes)494 void FlushPendingPushRegisters(MacroAssembler* masm,
495 FrameAccessState* frame_access_state,
496 ZoneVector<Register>* pending_pushes) {
497 switch (pending_pushes->size()) {
498 case 0:
499 break;
500 case 1:
501 masm->push((*pending_pushes)[0]);
502 break;
503 case 2:
504 masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
505 break;
506 case 3:
507 masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
508 (*pending_pushes)[2]);
509 break;
510 default:
511 UNREACHABLE();
512 break;
513 }
514 frame_access_state->IncreaseSPDelta(pending_pushes->size());
515 pending_pushes->resize(0);
516 }
517
AddPendingPushRegister(MacroAssembler * masm,FrameAccessState * frame_access_state,ZoneVector<Register> * pending_pushes,Register reg)518 void AddPendingPushRegister(MacroAssembler* masm,
519 FrameAccessState* frame_access_state,
520 ZoneVector<Register>* pending_pushes,
521 Register reg) {
522 pending_pushes->push_back(reg);
523 if (pending_pushes->size() == 3 || reg.is(ip)) {
524 FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
525 }
526 }
527
AdjustStackPointerForTailCall(MacroAssembler * masm,FrameAccessState * state,int new_slot_above_sp,ZoneVector<Register> * pending_pushes=nullptr,bool allow_shrinkage=true)528 void AdjustStackPointerForTailCall(
529 MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
530 ZoneVector<Register>* pending_pushes = nullptr,
531 bool allow_shrinkage = true) {
532 int current_sp_offset = state->GetSPToFPSlotCount() +
533 StandardFrameConstants::kFixedSlotCountAboveFp;
534 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
535 if (stack_slot_delta > 0) {
536 if (pending_pushes != nullptr) {
537 FlushPendingPushRegisters(masm, state, pending_pushes);
538 }
539 masm->sub(sp, sp, Operand(stack_slot_delta * kPointerSize));
540 state->IncreaseSPDelta(stack_slot_delta);
541 } else if (allow_shrinkage && stack_slot_delta < 0) {
542 if (pending_pushes != nullptr) {
543 FlushPendingPushRegisters(masm, state, pending_pushes);
544 }
545 masm->add(sp, sp, Operand(-stack_slot_delta * kPointerSize));
546 state->IncreaseSPDelta(stack_slot_delta);
547 }
548 }
549
550 } // namespace
551
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)552 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
553 int first_unused_stack_slot) {
554 CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
555 ZoneVector<MoveOperands*> pushes(zone());
556 GetPushCompatibleMoves(instr, flags, &pushes);
557
558 if (!pushes.empty() &&
559 (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
560 first_unused_stack_slot)) {
561 ArmOperandConverter g(this, instr);
562 ZoneVector<Register> pending_pushes(zone());
563 for (auto move : pushes) {
564 LocationOperand destination_location(
565 LocationOperand::cast(move->destination()));
566 InstructionOperand source(move->source());
567 AdjustStackPointerForTailCall(
568 masm(), frame_access_state(),
569 destination_location.index() - pending_pushes.size(),
570 &pending_pushes);
571 if (source.IsStackSlot()) {
572 LocationOperand source_location(LocationOperand::cast(source));
573 __ ldr(ip, g.SlotToMemOperand(source_location.index()));
574 AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
575 ip);
576 } else if (source.IsRegister()) {
577 LocationOperand source_location(LocationOperand::cast(source));
578 AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
579 source_location.GetRegister());
580 } else if (source.IsImmediate()) {
581 AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
582 ip);
583 } else {
584 // Pushes of non-scalar data types is not supported.
585 UNIMPLEMENTED();
586 }
587 move->Eliminate();
588 }
589 FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
590 }
591 AdjustStackPointerForTailCall(masm(), frame_access_state(),
592 first_unused_stack_slot, nullptr, false);
593 }
594
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)595 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
596 int first_unused_stack_slot) {
597 AdjustStackPointerForTailCall(masm(), frame_access_state(),
598 first_unused_stack_slot);
599 }
600
601 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)602 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
603 Instruction* instr) {
604 ArmOperandConverter i(this, instr);
605
606 __ MaybeCheckConstPool();
607 InstructionCode opcode = instr->opcode();
608 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
609 switch (arch_opcode) {
610 case kArchCallCodeObject: {
611 EnsureSpaceForLazyDeopt();
612 if (instr->InputAt(0)->IsImmediate()) {
613 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
614 RelocInfo::CODE_TARGET);
615 } else {
616 __ add(ip, i.InputRegister(0),
617 Operand(Code::kHeaderSize - kHeapObjectTag));
618 __ Call(ip);
619 }
620 RecordCallPosition(instr);
621 DCHECK_EQ(LeaveCC, i.OutputSBit());
622 frame_access_state()->ClearSPDelta();
623 break;
624 }
625 case kArchTailCallCodeObjectFromJSFunction:
626 case kArchTailCallCodeObject: {
627 if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
628 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
629 i.TempRegister(0), i.TempRegister(1),
630 i.TempRegister(2));
631 }
632 if (instr->InputAt(0)->IsImmediate()) {
633 __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
634 RelocInfo::CODE_TARGET);
635 } else {
636 __ add(ip, i.InputRegister(0),
637 Operand(Code::kHeaderSize - kHeapObjectTag));
638 __ Jump(ip);
639 }
640 DCHECK_EQ(LeaveCC, i.OutputSBit());
641 unwinding_info_writer_.MarkBlockWillExit();
642 frame_access_state()->ClearSPDelta();
643 frame_access_state()->SetFrameAccessToDefault();
644 break;
645 }
646 case kArchTailCallAddress: {
647 CHECK(!instr->InputAt(0)->IsImmediate());
648 __ Jump(i.InputRegister(0));
649 unwinding_info_writer_.MarkBlockWillExit();
650 frame_access_state()->ClearSPDelta();
651 frame_access_state()->SetFrameAccessToDefault();
652 break;
653 }
654 case kArchCallJSFunction: {
655 EnsureSpaceForLazyDeopt();
656 Register func = i.InputRegister(0);
657 if (FLAG_debug_code) {
658 // Check the function's context matches the context argument.
659 __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
660 __ cmp(cp, kScratchReg);
661 __ Assert(eq, kWrongFunctionContext);
662 }
663 __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
664 __ Call(ip);
665 RecordCallPosition(instr);
666 DCHECK_EQ(LeaveCC, i.OutputSBit());
667 frame_access_state()->ClearSPDelta();
668 break;
669 }
670 case kArchTailCallJSFunctionFromJSFunction: {
671 Register func = i.InputRegister(0);
672 if (FLAG_debug_code) {
673 // Check the function's context matches the context argument.
674 __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
675 __ cmp(cp, kScratchReg);
676 __ Assert(eq, kWrongFunctionContext);
677 }
678 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
679 i.TempRegister(0), i.TempRegister(1),
680 i.TempRegister(2));
681 __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
682 __ Jump(ip);
683 DCHECK_EQ(LeaveCC, i.OutputSBit());
684 frame_access_state()->ClearSPDelta();
685 frame_access_state()->SetFrameAccessToDefault();
686 break;
687 }
688 case kArchPrepareCallCFunction: {
689 int const num_parameters = MiscField::decode(instr->opcode());
690 __ PrepareCallCFunction(num_parameters, kScratchReg);
691 // Frame alignment requires using FP-relative frame addressing.
692 frame_access_state()->SetFrameAccessToFP();
693 break;
694 }
695 case kArchPrepareTailCall:
696 AssemblePrepareTailCall();
697 break;
698 case kArchCallCFunction: {
699 int const num_parameters = MiscField::decode(instr->opcode());
700 if (instr->InputAt(0)->IsImmediate()) {
701 ExternalReference ref = i.InputExternalReference(0);
702 __ CallCFunction(ref, num_parameters);
703 } else {
704 Register func = i.InputRegister(0);
705 __ CallCFunction(func, num_parameters);
706 }
707 frame_access_state()->SetFrameAccessToDefault();
708 frame_access_state()->ClearSPDelta();
709 break;
710 }
711 case kArchJmp:
712 AssembleArchJump(i.InputRpo(0));
713 DCHECK_EQ(LeaveCC, i.OutputSBit());
714 break;
715 case kArchLookupSwitch:
716 AssembleArchLookupSwitch(instr);
717 DCHECK_EQ(LeaveCC, i.OutputSBit());
718 break;
719 case kArchTableSwitch:
720 AssembleArchTableSwitch(instr);
721 DCHECK_EQ(LeaveCC, i.OutputSBit());
722 break;
723 case kArchDebugBreak:
724 __ stop("kArchDebugBreak");
725 break;
726 case kArchComment: {
727 Address comment_string = i.InputExternalReference(0).address();
728 __ RecordComment(reinterpret_cast<const char*>(comment_string));
729 break;
730 }
731 case kArchNop:
732 case kArchThrowTerminator:
733 // don't emit code for nops.
734 DCHECK_EQ(LeaveCC, i.OutputSBit());
735 break;
736 case kArchDeoptimize: {
737 int deopt_state_id =
738 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
739 Deoptimizer::BailoutType bailout_type =
740 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
741 CodeGenResult result = AssembleDeoptimizerCall(
742 deopt_state_id, bailout_type, current_source_position_);
743 if (result != kSuccess) return result;
744 break;
745 }
746 case kArchRet:
747 AssembleReturn(instr->InputAt(0));
748 DCHECK_EQ(LeaveCC, i.OutputSBit());
749 break;
750 case kArchStackPointer:
751 __ mov(i.OutputRegister(), sp);
752 DCHECK_EQ(LeaveCC, i.OutputSBit());
753 break;
754 case kArchFramePointer:
755 __ mov(i.OutputRegister(), fp);
756 DCHECK_EQ(LeaveCC, i.OutputSBit());
757 break;
758 case kArchParentFramePointer:
759 if (frame_access_state()->has_frame()) {
760 __ ldr(i.OutputRegister(), MemOperand(fp, 0));
761 } else {
762 __ mov(i.OutputRegister(), fp);
763 }
764 break;
765 case kArchTruncateDoubleToI:
766 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
767 DCHECK_EQ(LeaveCC, i.OutputSBit());
768 break;
769 case kArchStoreWithWriteBarrier: {
770 RecordWriteMode mode =
771 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
772 Register object = i.InputRegister(0);
773 Register value = i.InputRegister(2);
774 Register scratch0 = i.TempRegister(0);
775 Register scratch1 = i.TempRegister(1);
776 OutOfLineRecordWrite* ool;
777
778 AddressingMode addressing_mode =
779 AddressingModeField::decode(instr->opcode());
780 if (addressing_mode == kMode_Offset_RI) {
781 int32_t index = i.InputInt32(1);
782 ool = new (zone())
783 OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
784 mode, &unwinding_info_writer_);
785 __ str(value, MemOperand(object, index));
786 } else {
787 DCHECK_EQ(kMode_Offset_RR, addressing_mode);
788 Register index(i.InputRegister(1));
789 ool = new (zone())
790 OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
791 mode, &unwinding_info_writer_);
792 __ str(value, MemOperand(object, index));
793 }
794 __ CheckPageFlag(object, scratch0,
795 MemoryChunk::kPointersFromHereAreInterestingMask, ne,
796 ool->entry());
797 __ bind(ool->exit());
798 break;
799 }
800 case kArchStackSlot: {
801 FrameOffset offset =
802 frame_access_state()->GetFrameOffset(i.InputInt32(0));
803 Register base;
804 if (offset.from_stack_pointer()) {
805 base = sp;
806 } else {
807 base = fp;
808 }
809 __ add(i.OutputRegister(0), base, Operand(offset.offset()));
810 break;
811 }
812 case kIeee754Float64Acos:
813 ASSEMBLE_IEEE754_UNOP(acos);
814 break;
815 case kIeee754Float64Acosh:
816 ASSEMBLE_IEEE754_UNOP(acosh);
817 break;
818 case kIeee754Float64Asin:
819 ASSEMBLE_IEEE754_UNOP(asin);
820 break;
821 case kIeee754Float64Asinh:
822 ASSEMBLE_IEEE754_UNOP(asinh);
823 break;
824 case kIeee754Float64Atan:
825 ASSEMBLE_IEEE754_UNOP(atan);
826 break;
827 case kIeee754Float64Atanh:
828 ASSEMBLE_IEEE754_UNOP(atanh);
829 break;
830 case kIeee754Float64Atan2:
831 ASSEMBLE_IEEE754_BINOP(atan2);
832 break;
833 case kIeee754Float64Cbrt:
834 ASSEMBLE_IEEE754_UNOP(cbrt);
835 break;
836 case kIeee754Float64Cos:
837 ASSEMBLE_IEEE754_UNOP(cos);
838 break;
839 case kIeee754Float64Cosh:
840 ASSEMBLE_IEEE754_UNOP(cosh);
841 break;
842 case kIeee754Float64Exp:
843 ASSEMBLE_IEEE754_UNOP(exp);
844 break;
845 case kIeee754Float64Expm1:
846 ASSEMBLE_IEEE754_UNOP(expm1);
847 break;
848 case kIeee754Float64Log:
849 ASSEMBLE_IEEE754_UNOP(log);
850 break;
851 case kIeee754Float64Log1p:
852 ASSEMBLE_IEEE754_UNOP(log1p);
853 break;
854 case kIeee754Float64Log2:
855 ASSEMBLE_IEEE754_UNOP(log2);
856 break;
857 case kIeee754Float64Log10:
858 ASSEMBLE_IEEE754_UNOP(log10);
859 break;
860 case kIeee754Float64Pow: {
861 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
862 __ CallStub(&stub);
863 __ vmov(d0, d2);
864 break;
865 }
866 case kIeee754Float64Sin:
867 ASSEMBLE_IEEE754_UNOP(sin);
868 break;
869 case kIeee754Float64Sinh:
870 ASSEMBLE_IEEE754_UNOP(sinh);
871 break;
872 case kIeee754Float64Tan:
873 ASSEMBLE_IEEE754_UNOP(tan);
874 break;
875 case kIeee754Float64Tanh:
876 ASSEMBLE_IEEE754_UNOP(tanh);
877 break;
878 case kArmAdd:
879 __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
880 i.OutputSBit());
881 break;
882 case kArmAnd:
883 __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
884 i.OutputSBit());
885 break;
886 case kArmBic:
887 __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
888 i.OutputSBit());
889 break;
890 case kArmMul:
891 __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
892 i.OutputSBit());
893 break;
894 case kArmMla:
895 __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
896 i.InputRegister(2), i.OutputSBit());
897 break;
898 case kArmMls: {
899 CpuFeatureScope scope(masm(), ARMv7);
900 __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
901 i.InputRegister(2));
902 DCHECK_EQ(LeaveCC, i.OutputSBit());
903 break;
904 }
905 case kArmSmull:
906 __ smull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
907 i.InputRegister(1));
908 break;
909 case kArmSmmul:
910 __ smmul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
911 DCHECK_EQ(LeaveCC, i.OutputSBit());
912 break;
913 case kArmSmmla:
914 __ smmla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
915 i.InputRegister(2));
916 DCHECK_EQ(LeaveCC, i.OutputSBit());
917 break;
918 case kArmUmull:
919 __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
920 i.InputRegister(1), i.OutputSBit());
921 break;
922 case kArmSdiv: {
923 CpuFeatureScope scope(masm(), SUDIV);
924 __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
925 DCHECK_EQ(LeaveCC, i.OutputSBit());
926 break;
927 }
928 case kArmUdiv: {
929 CpuFeatureScope scope(masm(), SUDIV);
930 __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
931 DCHECK_EQ(LeaveCC, i.OutputSBit());
932 break;
933 }
934 case kArmMov:
935 __ Move(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
936 break;
937 case kArmMvn:
938 __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
939 break;
940 case kArmOrr:
941 __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
942 i.OutputSBit());
943 break;
944 case kArmEor:
945 __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
946 i.OutputSBit());
947 break;
948 case kArmSub:
949 __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
950 i.OutputSBit());
951 break;
952 case kArmRsb:
953 __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
954 i.OutputSBit());
955 break;
956 case kArmBfc: {
957 CpuFeatureScope scope(masm(), ARMv7);
958 __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
959 DCHECK_EQ(LeaveCC, i.OutputSBit());
960 break;
961 }
962 case kArmUbfx: {
963 CpuFeatureScope scope(masm(), ARMv7);
964 __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
965 i.InputInt8(2));
966 DCHECK_EQ(LeaveCC, i.OutputSBit());
967 break;
968 }
969 case kArmSbfx: {
970 CpuFeatureScope scope(masm(), ARMv7);
971 __ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
972 i.InputInt8(2));
973 DCHECK_EQ(LeaveCC, i.OutputSBit());
974 break;
975 }
976 case kArmSxtb:
977 __ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
978 DCHECK_EQ(LeaveCC, i.OutputSBit());
979 break;
980 case kArmSxth:
981 __ sxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
982 DCHECK_EQ(LeaveCC, i.OutputSBit());
983 break;
984 case kArmSxtab:
985 __ sxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
986 i.InputInt32(2));
987 DCHECK_EQ(LeaveCC, i.OutputSBit());
988 break;
989 case kArmSxtah:
990 __ sxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
991 i.InputInt32(2));
992 DCHECK_EQ(LeaveCC, i.OutputSBit());
993 break;
994 case kArmUxtb:
995 __ uxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
996 DCHECK_EQ(LeaveCC, i.OutputSBit());
997 break;
998 case kArmUxth:
999 __ uxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
1000 DCHECK_EQ(LeaveCC, i.OutputSBit());
1001 break;
1002 case kArmUxtab:
1003 __ uxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1004 i.InputInt32(2));
1005 DCHECK_EQ(LeaveCC, i.OutputSBit());
1006 break;
1007 case kArmUxtah:
1008 __ uxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1009 i.InputInt32(2));
1010 DCHECK_EQ(LeaveCC, i.OutputSBit());
1011 break;
1012 case kArmRbit: {
1013 CpuFeatureScope scope(masm(), ARMv7);
1014 __ rbit(i.OutputRegister(), i.InputRegister(0));
1015 DCHECK_EQ(LeaveCC, i.OutputSBit());
1016 break;
1017 }
1018 case kArmClz:
1019 __ clz(i.OutputRegister(), i.InputRegister(0));
1020 DCHECK_EQ(LeaveCC, i.OutputSBit());
1021 break;
1022 case kArmCmp:
1023 __ cmp(i.InputRegister(0), i.InputOperand2(1));
1024 DCHECK_EQ(SetCC, i.OutputSBit());
1025 break;
1026 case kArmCmn:
1027 __ cmn(i.InputRegister(0), i.InputOperand2(1));
1028 DCHECK_EQ(SetCC, i.OutputSBit());
1029 break;
1030 case kArmTst:
1031 __ tst(i.InputRegister(0), i.InputOperand2(1));
1032 DCHECK_EQ(SetCC, i.OutputSBit());
1033 break;
1034 case kArmTeq:
1035 __ teq(i.InputRegister(0), i.InputOperand2(1));
1036 DCHECK_EQ(SetCC, i.OutputSBit());
1037 break;
1038 case kArmAddPair:
1039 // i.InputRegister(0) ... left low word.
1040 // i.InputRegister(1) ... left high word.
1041 // i.InputRegister(2) ... right low word.
1042 // i.InputRegister(3) ... right high word.
1043 __ add(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2),
1044 SBit::SetCC);
1045 __ adc(i.OutputRegister(1), i.InputRegister(1),
1046 Operand(i.InputRegister(3)));
1047 DCHECK_EQ(LeaveCC, i.OutputSBit());
1048 break;
1049 case kArmSubPair:
1050 // i.InputRegister(0) ... left low word.
1051 // i.InputRegister(1) ... left high word.
1052 // i.InputRegister(2) ... right low word.
1053 // i.InputRegister(3) ... right high word.
1054 __ sub(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2),
1055 SBit::SetCC);
1056 __ sbc(i.OutputRegister(1), i.InputRegister(1),
1057 Operand(i.InputRegister(3)));
1058 DCHECK_EQ(LeaveCC, i.OutputSBit());
1059 break;
1060 case kArmMulPair:
1061 // i.InputRegister(0) ... left low word.
1062 // i.InputRegister(1) ... left high word.
1063 // i.InputRegister(2) ... right low word.
1064 // i.InputRegister(3) ... right high word.
1065 __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1066 i.InputRegister(2));
1067 __ mla(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(3),
1068 i.OutputRegister(1));
1069 __ mla(i.OutputRegister(1), i.InputRegister(2), i.InputRegister(1),
1070 i.OutputRegister(1));
1071 break;
1072 case kArmLslPair: {
1073 Register second_output =
1074 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1075 if (instr->InputAt(2)->IsImmediate()) {
1076 __ LslPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1077 i.InputRegister(1), i.InputInt32(2));
1078 } else {
1079 __ LslPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1080 i.InputRegister(1), kScratchReg, i.InputRegister(2));
1081 }
1082 break;
1083 }
1084 case kArmLsrPair: {
1085 Register second_output =
1086 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1087 if (instr->InputAt(2)->IsImmediate()) {
1088 __ LsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1089 i.InputRegister(1), i.InputInt32(2));
1090 } else {
1091 __ LsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1092 i.InputRegister(1), kScratchReg, i.InputRegister(2));
1093 }
1094 break;
1095 }
1096 case kArmAsrPair: {
1097 Register second_output =
1098 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1099 if (instr->InputAt(2)->IsImmediate()) {
1100 __ AsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1101 i.InputRegister(1), i.InputInt32(2));
1102 } else {
1103 __ AsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1104 i.InputRegister(1), kScratchReg, i.InputRegister(2));
1105 }
1106 break;
1107 }
1108 case kArmVcmpF32:
1109 if (instr->InputAt(1)->IsFPRegister()) {
1110 __ VFPCompareAndSetFlags(i.InputFloatRegister(0),
1111 i.InputFloatRegister(1));
1112 } else {
1113 DCHECK(instr->InputAt(1)->IsImmediate());
1114 // 0.0 is the only immediate supported by vcmp instructions.
1115 DCHECK(i.InputFloat32(1) == 0.0f);
1116 __ VFPCompareAndSetFlags(i.InputFloatRegister(0), i.InputFloat32(1));
1117 }
1118 DCHECK_EQ(SetCC, i.OutputSBit());
1119 break;
1120 case kArmVaddF32:
1121 __ vadd(i.OutputFloatRegister(), i.InputFloatRegister(0),
1122 i.InputFloatRegister(1));
1123 DCHECK_EQ(LeaveCC, i.OutputSBit());
1124 break;
1125 case kArmVsubF32:
1126 __ vsub(i.OutputFloatRegister(), i.InputFloatRegister(0),
1127 i.InputFloatRegister(1));
1128 DCHECK_EQ(LeaveCC, i.OutputSBit());
1129 break;
1130 case kArmVmulF32:
1131 __ vmul(i.OutputFloatRegister(), i.InputFloatRegister(0),
1132 i.InputFloatRegister(1));
1133 DCHECK_EQ(LeaveCC, i.OutputSBit());
1134 break;
1135 case kArmVmlaF32:
1136 __ vmla(i.OutputFloatRegister(), i.InputFloatRegister(1),
1137 i.InputFloatRegister(2));
1138 DCHECK_EQ(LeaveCC, i.OutputSBit());
1139 break;
1140 case kArmVmlsF32:
1141 __ vmls(i.OutputFloatRegister(), i.InputFloatRegister(1),
1142 i.InputFloatRegister(2));
1143 DCHECK_EQ(LeaveCC, i.OutputSBit());
1144 break;
1145 case kArmVdivF32:
1146 __ vdiv(i.OutputFloatRegister(), i.InputFloatRegister(0),
1147 i.InputFloatRegister(1));
1148 DCHECK_EQ(LeaveCC, i.OutputSBit());
1149 break;
1150 case kArmVsqrtF32:
1151 __ vsqrt(i.OutputFloatRegister(), i.InputFloatRegister(0));
1152 break;
1153 case kArmVabsF32:
1154 __ vabs(i.OutputFloatRegister(), i.InputFloatRegister(0));
1155 break;
1156 case kArmVnegF32:
1157 __ vneg(i.OutputFloatRegister(), i.InputFloatRegister(0));
1158 break;
1159 case kArmVcmpF64:
1160 if (instr->InputAt(1)->IsFPRegister()) {
1161 __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
1162 i.InputDoubleRegister(1));
1163 } else {
1164 DCHECK(instr->InputAt(1)->IsImmediate());
1165 // 0.0 is the only immediate supported by vcmp instructions.
1166 DCHECK(i.InputDouble(1) == 0.0);
1167 __ VFPCompareAndSetFlags(i.InputDoubleRegister(0), i.InputDouble(1));
1168 }
1169 DCHECK_EQ(SetCC, i.OutputSBit());
1170 break;
1171 case kArmVaddF64:
1172 __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1173 i.InputDoubleRegister(1));
1174 DCHECK_EQ(LeaveCC, i.OutputSBit());
1175 break;
1176 case kArmVsubF64:
1177 __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1178 i.InputDoubleRegister(1));
1179 DCHECK_EQ(LeaveCC, i.OutputSBit());
1180 break;
1181 case kArmVmulF64:
1182 __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1183 i.InputDoubleRegister(1));
1184 DCHECK_EQ(LeaveCC, i.OutputSBit());
1185 break;
1186 case kArmVmlaF64:
1187 __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1188 i.InputDoubleRegister(2));
1189 DCHECK_EQ(LeaveCC, i.OutputSBit());
1190 break;
1191 case kArmVmlsF64:
1192 __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1193 i.InputDoubleRegister(2));
1194 DCHECK_EQ(LeaveCC, i.OutputSBit());
1195 break;
1196 case kArmVdivF64:
1197 __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1198 i.InputDoubleRegister(1));
1199 DCHECK_EQ(LeaveCC, i.OutputSBit());
1200 break;
1201 case kArmVmodF64: {
1202 // TODO(bmeurer): We should really get rid of this special instruction,
1203 // and generate a CallAddress instruction instead.
1204 FrameScope scope(masm(), StackFrame::MANUAL);
1205 __ PrepareCallCFunction(0, 2, kScratchReg);
1206 __ MovToFloatParameters(i.InputDoubleRegister(0),
1207 i.InputDoubleRegister(1));
1208 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1209 0, 2);
1210 // Move the result in the double result register.
1211 __ MovFromFloatResult(i.OutputDoubleRegister());
1212 DCHECK_EQ(LeaveCC, i.OutputSBit());
1213 break;
1214 }
1215 case kArmVsqrtF64:
1216 __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1217 break;
1218 case kArmVabsF64:
1219 __ vabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1220 break;
1221 case kArmVnegF64:
1222 __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1223 break;
1224 case kArmVrintmF32: {
1225 CpuFeatureScope scope(masm(), ARMv8);
1226 __ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
1227 break;
1228 }
1229 case kArmVrintmF64: {
1230 CpuFeatureScope scope(masm(), ARMv8);
1231 __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1232 break;
1233 }
1234 case kArmVrintpF32: {
1235 CpuFeatureScope scope(masm(), ARMv8);
1236 __ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
1237 break;
1238 }
1239 case kArmVrintpF64: {
1240 CpuFeatureScope scope(masm(), ARMv8);
1241 __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1242 break;
1243 }
1244 case kArmVrintzF32: {
1245 CpuFeatureScope scope(masm(), ARMv8);
1246 __ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
1247 break;
1248 }
1249 case kArmVrintzF64: {
1250 CpuFeatureScope scope(masm(), ARMv8);
1251 __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1252 break;
1253 }
1254 case kArmVrintaF64: {
1255 CpuFeatureScope scope(masm(), ARMv8);
1256 __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1257 break;
1258 }
1259 case kArmVrintnF32: {
1260 CpuFeatureScope scope(masm(), ARMv8);
1261 __ vrintn(i.OutputFloatRegister(), i.InputFloatRegister(0));
1262 break;
1263 }
1264 case kArmVrintnF64: {
1265 CpuFeatureScope scope(masm(), ARMv8);
1266 __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1267 break;
1268 }
1269 case kArmVcvtF32F64: {
1270 __ vcvt_f32_f64(i.OutputFloatRegister(), i.InputDoubleRegister(0));
1271 DCHECK_EQ(LeaveCC, i.OutputSBit());
1272 break;
1273 }
1274 case kArmVcvtF64F32: {
1275 __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloatRegister(0));
1276 DCHECK_EQ(LeaveCC, i.OutputSBit());
1277 break;
1278 }
1279 case kArmVcvtF32S32: {
1280 SwVfpRegister scratch = kScratchDoubleReg.low();
1281 __ vmov(scratch, i.InputRegister(0));
1282 __ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
1283 DCHECK_EQ(LeaveCC, i.OutputSBit());
1284 break;
1285 }
1286 case kArmVcvtF32U32: {
1287 SwVfpRegister scratch = kScratchDoubleReg.low();
1288 __ vmov(scratch, i.InputRegister(0));
1289 __ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
1290 DCHECK_EQ(LeaveCC, i.OutputSBit());
1291 break;
1292 }
1293 case kArmVcvtF64S32: {
1294 SwVfpRegister scratch = kScratchDoubleReg.low();
1295 __ vmov(scratch, i.InputRegister(0));
1296 __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
1297 DCHECK_EQ(LeaveCC, i.OutputSBit());
1298 break;
1299 }
1300 case kArmVcvtF64U32: {
1301 SwVfpRegister scratch = kScratchDoubleReg.low();
1302 __ vmov(scratch, i.InputRegister(0));
1303 __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
1304 DCHECK_EQ(LeaveCC, i.OutputSBit());
1305 break;
1306 }
1307 case kArmVcvtS32F32: {
1308 SwVfpRegister scratch = kScratchDoubleReg.low();
1309 __ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
1310 __ vmov(i.OutputRegister(), scratch);
1311 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1312 // because INT32_MIN allows easier out-of-bounds detection.
1313 __ cmn(i.OutputRegister(), Operand(1));
1314 __ mov(i.OutputRegister(), Operand(INT32_MIN), SBit::LeaveCC, vs);
1315 DCHECK_EQ(LeaveCC, i.OutputSBit());
1316 break;
1317 }
1318 case kArmVcvtU32F32: {
1319 SwVfpRegister scratch = kScratchDoubleReg.low();
1320 __ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
1321 __ vmov(i.OutputRegister(), scratch);
1322 // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1323 // because 0 allows easier out-of-bounds detection.
1324 __ cmn(i.OutputRegister(), Operand(1));
1325 __ adc(i.OutputRegister(), i.OutputRegister(), Operand::Zero());
1326 DCHECK_EQ(LeaveCC, i.OutputSBit());
1327 break;
1328 }
1329 case kArmVcvtS32F64: {
1330 SwVfpRegister scratch = kScratchDoubleReg.low();
1331 __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
1332 __ vmov(i.OutputRegister(), scratch);
1333 DCHECK_EQ(LeaveCC, i.OutputSBit());
1334 break;
1335 }
1336 case kArmVcvtU32F64: {
1337 SwVfpRegister scratch = kScratchDoubleReg.low();
1338 __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
1339 __ vmov(i.OutputRegister(), scratch);
1340 DCHECK_EQ(LeaveCC, i.OutputSBit());
1341 break;
1342 }
1343 case kArmVmovU32F32:
1344 __ vmov(i.OutputRegister(), i.InputFloatRegister(0));
1345 DCHECK_EQ(LeaveCC, i.OutputSBit());
1346 break;
1347 case kArmVmovF32U32:
1348 __ vmov(i.OutputFloatRegister(), i.InputRegister(0));
1349 DCHECK_EQ(LeaveCC, i.OutputSBit());
1350 break;
1351 case kArmVmovLowU32F64:
1352 __ VmovLow(i.OutputRegister(), i.InputDoubleRegister(0));
1353 DCHECK_EQ(LeaveCC, i.OutputSBit());
1354 break;
1355 case kArmVmovLowF64U32:
1356 __ VmovLow(i.OutputDoubleRegister(), i.InputRegister(1));
1357 DCHECK_EQ(LeaveCC, i.OutputSBit());
1358 break;
1359 case kArmVmovHighU32F64:
1360 __ VmovHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1361 DCHECK_EQ(LeaveCC, i.OutputSBit());
1362 break;
1363 case kArmVmovHighF64U32:
1364 __ VmovHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1365 DCHECK_EQ(LeaveCC, i.OutputSBit());
1366 break;
1367 case kArmVmovF64U32U32:
1368 __ vmov(i.OutputDoubleRegister(), i.InputRegister(0), i.InputRegister(1));
1369 DCHECK_EQ(LeaveCC, i.OutputSBit());
1370 break;
1371 case kArmVmovU32U32F64:
1372 __ vmov(i.OutputRegister(0), i.OutputRegister(1),
1373 i.InputDoubleRegister(0));
1374 DCHECK_EQ(LeaveCC, i.OutputSBit());
1375 break;
1376 case kArmLdrb:
1377 __ ldrb(i.OutputRegister(), i.InputOffset());
1378 DCHECK_EQ(LeaveCC, i.OutputSBit());
1379 break;
1380 case kArmLdrsb:
1381 __ ldrsb(i.OutputRegister(), i.InputOffset());
1382 DCHECK_EQ(LeaveCC, i.OutputSBit());
1383 break;
1384 case kArmStrb:
1385 __ strb(i.InputRegister(0), i.InputOffset(1));
1386 DCHECK_EQ(LeaveCC, i.OutputSBit());
1387 break;
1388 case kArmLdrh:
1389 __ ldrh(i.OutputRegister(), i.InputOffset());
1390 break;
1391 case kArmLdrsh:
1392 __ ldrsh(i.OutputRegister(), i.InputOffset());
1393 break;
1394 case kArmStrh:
1395 __ strh(i.InputRegister(0), i.InputOffset(1));
1396 DCHECK_EQ(LeaveCC, i.OutputSBit());
1397 break;
1398 case kArmLdr:
1399 __ ldr(i.OutputRegister(), i.InputOffset());
1400 break;
1401 case kArmStr:
1402 __ str(i.InputRegister(0), i.InputOffset(1));
1403 DCHECK_EQ(LeaveCC, i.OutputSBit());
1404 break;
1405 case kArmVldrF32: {
1406 __ vldr(i.OutputFloatRegister(), i.InputOffset());
1407 DCHECK_EQ(LeaveCC, i.OutputSBit());
1408 break;
1409 }
1410 case kArmVstrF32:
1411 __ vstr(i.InputFloatRegister(0), i.InputOffset(1));
1412 DCHECK_EQ(LeaveCC, i.OutputSBit());
1413 break;
1414 case kArmVldrF64:
1415 __ vldr(i.OutputDoubleRegister(), i.InputOffset());
1416 DCHECK_EQ(LeaveCC, i.OutputSBit());
1417 break;
1418 case kArmVstrF64:
1419 __ vstr(i.InputDoubleRegister(0), i.InputOffset(1));
1420 DCHECK_EQ(LeaveCC, i.OutputSBit());
1421 break;
1422 case kArmFloat32Max: {
1423 SwVfpRegister result = i.OutputFloatRegister();
1424 SwVfpRegister left = i.InputFloatRegister(0);
1425 SwVfpRegister right = i.InputFloatRegister(1);
1426 if (left.is(right)) {
1427 __ Move(result, left);
1428 } else {
1429 auto ool = new (zone()) OutOfLineFloat32Max(this, result, left, right);
1430 __ FloatMax(result, left, right, ool->entry());
1431 __ bind(ool->exit());
1432 }
1433 DCHECK_EQ(LeaveCC, i.OutputSBit());
1434 break;
1435 }
1436 case kArmFloat64Max: {
1437 DwVfpRegister result = i.OutputDoubleRegister();
1438 DwVfpRegister left = i.InputDoubleRegister(0);
1439 DwVfpRegister right = i.InputDoubleRegister(1);
1440 if (left.is(right)) {
1441 __ Move(result, left);
1442 } else {
1443 auto ool = new (zone()) OutOfLineFloat64Max(this, result, left, right);
1444 __ FloatMax(result, left, right, ool->entry());
1445 __ bind(ool->exit());
1446 }
1447 DCHECK_EQ(LeaveCC, i.OutputSBit());
1448 break;
1449 }
1450 case kArmFloat32Min: {
1451 SwVfpRegister result = i.OutputFloatRegister();
1452 SwVfpRegister left = i.InputFloatRegister(0);
1453 SwVfpRegister right = i.InputFloatRegister(1);
1454 if (left.is(right)) {
1455 __ Move(result, left);
1456 } else {
1457 auto ool = new (zone()) OutOfLineFloat32Min(this, result, left, right);
1458 __ FloatMin(result, left, right, ool->entry());
1459 __ bind(ool->exit());
1460 }
1461 DCHECK_EQ(LeaveCC, i.OutputSBit());
1462 break;
1463 }
1464 case kArmFloat64Min: {
1465 DwVfpRegister result = i.OutputDoubleRegister();
1466 DwVfpRegister left = i.InputDoubleRegister(0);
1467 DwVfpRegister right = i.InputDoubleRegister(1);
1468 if (left.is(right)) {
1469 __ Move(result, left);
1470 } else {
1471 auto ool = new (zone()) OutOfLineFloat64Min(this, result, left, right);
1472 __ FloatMin(result, left, right, ool->entry());
1473 __ bind(ool->exit());
1474 }
1475 DCHECK_EQ(LeaveCC, i.OutputSBit());
1476 break;
1477 }
1478 case kArmFloat64SilenceNaN: {
1479 DwVfpRegister value = i.InputDoubleRegister(0);
1480 DwVfpRegister result = i.OutputDoubleRegister();
1481 __ VFPCanonicalizeNaN(result, value);
1482 break;
1483 }
1484 case kArmPush:
1485 if (instr->InputAt(0)->IsFPRegister()) {
1486 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1487 if (op->representation() == MachineRepresentation::kFloat64) {
1488 __ vpush(i.InputDoubleRegister(0));
1489 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1490 } else {
1491 DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
1492 __ vpush(i.InputFloatRegister(0));
1493 frame_access_state()->IncreaseSPDelta(1);
1494 }
1495 } else {
1496 __ push(i.InputRegister(0));
1497 frame_access_state()->IncreaseSPDelta(1);
1498 }
1499 DCHECK_EQ(LeaveCC, i.OutputSBit());
1500 break;
1501 case kArmPoke: {
1502 int const slot = MiscField::decode(instr->opcode());
1503 __ str(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
1504 DCHECK_EQ(LeaveCC, i.OutputSBit());
1505 break;
1506 }
1507 case kCheckedLoadInt8:
1508 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
1509 break;
1510 case kCheckedLoadUint8:
1511 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
1512 break;
1513 case kCheckedLoadInt16:
1514 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
1515 break;
1516 case kCheckedLoadUint16:
1517 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
1518 break;
1519 case kCheckedLoadWord32:
1520 ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
1521 break;
1522 case kCheckedLoadFloat32:
1523 ASSEMBLE_CHECKED_LOAD_FP(Float);
1524 break;
1525 case kCheckedLoadFloat64:
1526 ASSEMBLE_CHECKED_LOAD_FP(Double);
1527 break;
1528 case kCheckedStoreWord8:
1529 ASSEMBLE_CHECKED_STORE_INTEGER(strb);
1530 break;
1531 case kCheckedStoreWord16:
1532 ASSEMBLE_CHECKED_STORE_INTEGER(strh);
1533 break;
1534 case kCheckedStoreWord32:
1535 ASSEMBLE_CHECKED_STORE_INTEGER(str);
1536 break;
1537 case kCheckedStoreFloat32:
1538 ASSEMBLE_CHECKED_STORE_FP(Float);
1539 break;
1540 case kCheckedStoreFloat64:
1541 ASSEMBLE_CHECKED_STORE_FP(Double);
1542 break;
1543 case kCheckedLoadWord64:
1544 case kCheckedStoreWord64:
1545 UNREACHABLE(); // currently unsupported checked int64 load/store.
1546 break;
1547
1548 case kAtomicLoadInt8:
1549 ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
1550 break;
1551 case kAtomicLoadUint8:
1552 ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrb);
1553 break;
1554 case kAtomicLoadInt16:
1555 ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsh);
1556 break;
1557 case kAtomicLoadUint16:
1558 ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrh);
1559 break;
1560 case kAtomicLoadWord32:
1561 ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
1562 break;
1563
1564 case kAtomicStoreWord8:
1565 ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
1566 break;
1567 case kAtomicStoreWord16:
1568 ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
1569 break;
1570 case kAtomicStoreWord32:
1571 ASSEMBLE_ATOMIC_STORE_INTEGER(str);
1572 break;
1573 }
1574 return kSuccess;
1575 } // NOLINT(readability/fn_size)
1576
1577
1578 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)1579 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1580 ArmOperandConverter i(this, instr);
1581 Label* tlabel = branch->true_label;
1582 Label* flabel = branch->false_label;
1583 Condition cc = FlagsConditionToCondition(branch->condition);
1584 __ b(cc, tlabel);
1585 if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
1586 }
1587
1588
AssembleArchJump(RpoNumber target)1589 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1590 if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
1591 }
1592
1593
1594 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)1595 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1596 FlagsCondition condition) {
1597 ArmOperandConverter i(this, instr);
1598
1599 // Materialize a full 32-bit 1 or 0 value. The result register is always the
1600 // last output of the instruction.
1601 DCHECK_NE(0u, instr->OutputCount());
1602 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1603 Condition cc = FlagsConditionToCondition(condition);
1604 __ mov(reg, Operand(0));
1605 __ mov(reg, Operand(1), LeaveCC, cc);
1606 }
1607
1608
AssembleArchLookupSwitch(Instruction * instr)1609 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1610 ArmOperandConverter i(this, instr);
1611 Register input = i.InputRegister(0);
1612 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1613 __ cmp(input, Operand(i.InputInt32(index + 0)));
1614 __ b(eq, GetLabel(i.InputRpo(index + 1)));
1615 }
1616 AssembleArchJump(i.InputRpo(1));
1617 }
1618
1619
AssembleArchTableSwitch(Instruction * instr)1620 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1621 ArmOperandConverter i(this, instr);
1622 Register input = i.InputRegister(0);
1623 size_t const case_count = instr->InputCount() - 2;
1624 // Ensure to emit the constant pool first if necessary.
1625 __ CheckConstPool(true, true);
1626 __ cmp(input, Operand(case_count));
1627 __ BlockConstPoolFor(case_count + 2);
1628 __ add(pc, pc, Operand(input, LSL, 2), LeaveCC, lo);
1629 __ b(GetLabel(i.InputRpo(1)));
1630 for (size_t index = 0; index < case_count; ++index) {
1631 __ b(GetLabel(i.InputRpo(index + 2)));
1632 }
1633 }
1634
AssembleDeoptimizerCall(int deoptimization_id,Deoptimizer::BailoutType bailout_type,SourcePosition pos)1635 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
1636 int deoptimization_id, Deoptimizer::BailoutType bailout_type,
1637 SourcePosition pos) {
1638 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1639 isolate(), deoptimization_id, bailout_type);
1640 // TODO(turbofan): We should be able to generate better code by sharing the
1641 // actual final call site and just bl'ing to it here, similar to what we do
1642 // in the lithium backend.
1643 if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
1644 DeoptimizeReason deoptimization_reason =
1645 GetDeoptimizationReason(deoptimization_id);
1646 __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
1647 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1648 __ CheckConstPool(false, false);
1649 return kSuccess;
1650 }
1651
FinishFrame(Frame * frame)1652 void CodeGenerator::FinishFrame(Frame* frame) {
1653 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1654
1655 const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1656 if (saves_fp != 0) {
1657 frame->AlignSavedCalleeRegisterSlots();
1658 }
1659
1660 if (saves_fp != 0) {
1661 // Save callee-saved FP registers.
1662 STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
1663 uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
1664 uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
1665 DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
1666 frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
1667 (kDoubleSize / kPointerSize));
1668 }
1669 const RegList saves = FLAG_enable_embedded_constant_pool
1670 ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
1671 : descriptor->CalleeSavedRegisters();
1672 if (saves != 0) {
1673 // Save callee-saved registers.
1674 frame->AllocateSavedCalleeRegisterSlots(
1675 base::bits::CountPopulation32(saves));
1676 }
1677 }
1678
AssembleConstructFrame()1679 void CodeGenerator::AssembleConstructFrame() {
1680 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1681 if (frame_access_state()->has_frame()) {
1682 if (descriptor->IsCFunctionCall()) {
1683 if (FLAG_enable_embedded_constant_pool) {
1684 __ Push(lr, fp, pp);
1685 // Adjust FP to point to saved FP.
1686 __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
1687 } else {
1688 __ Push(lr, fp);
1689 __ mov(fp, sp);
1690 }
1691 } else if (descriptor->IsJSFunctionCall()) {
1692 __ Prologue(this->info()->GeneratePreagedPrologue());
1693 if (descriptor->PushArgumentCount()) {
1694 __ Push(kJavaScriptCallArgCountRegister);
1695 }
1696 } else {
1697 __ StubPrologue(info()->GetOutputStackFrameType());
1698 }
1699
1700 if (!info()->GeneratePreagedPrologue()) {
1701 unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
1702 }
1703 }
1704
1705 int shrink_slots =
1706 frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
1707
1708 if (info()->is_osr()) {
1709 // TurboFan OSR-compiled functions cannot be entered directly.
1710 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1711
1712 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1713 // frame is still on the stack. Optimized code uses OSR values directly from
1714 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1715 // remaining stack slots.
1716 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1717 osr_pc_offset_ = __ pc_offset();
1718 shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1719 }
1720
1721 const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1722 if (shrink_slots > 0) {
1723 __ sub(sp, sp, Operand(shrink_slots * kPointerSize));
1724 }
1725
1726 if (saves_fp != 0) {
1727 // Save callee-saved FP registers.
1728 STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
1729 uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
1730 uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
1731 DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
1732 __ vstm(db_w, sp, DwVfpRegister::from_code(first),
1733 DwVfpRegister::from_code(last));
1734 }
1735 const RegList saves = FLAG_enable_embedded_constant_pool
1736 ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
1737 : descriptor->CalleeSavedRegisters();
1738 if (saves != 0) {
1739 // Save callee-saved registers.
1740 __ stm(db_w, sp, saves);
1741 }
1742 }
1743
AssembleReturn(InstructionOperand * pop)1744 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
1745 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1746 int pop_count = static_cast<int>(descriptor->StackParameterCount());
1747
1748 // Restore registers.
1749 const RegList saves = FLAG_enable_embedded_constant_pool
1750 ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
1751 : descriptor->CalleeSavedRegisters();
1752 if (saves != 0) {
1753 __ ldm(ia_w, sp, saves);
1754 }
1755
1756 // Restore FP registers.
1757 const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1758 if (saves_fp != 0) {
1759 STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
1760 uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
1761 uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
1762 __ vldm(ia_w, sp, DwVfpRegister::from_code(first),
1763 DwVfpRegister::from_code(last));
1764 }
1765
1766 unwinding_info_writer_.MarkBlockWillExit();
1767
1768 ArmOperandConverter g(this, nullptr);
1769 if (descriptor->IsCFunctionCall()) {
1770 AssembleDeconstructFrame();
1771 } else if (frame_access_state()->has_frame()) {
1772 // Canonicalize JSFunction return sites for now unless they have an variable
1773 // number of stack slot pops.
1774 if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
1775 if (return_label_.is_bound()) {
1776 __ b(&return_label_);
1777 return;
1778 } else {
1779 __ bind(&return_label_);
1780 AssembleDeconstructFrame();
1781 }
1782 } else {
1783 AssembleDeconstructFrame();
1784 }
1785 }
1786
1787 if (pop->IsImmediate()) {
1788 DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
1789 pop_count += g.ToConstant(pop).ToInt32();
1790 } else {
1791 __ Drop(g.ToRegister(pop));
1792 }
1793 __ Drop(pop_count);
1794 __ Ret();
1795 }
1796
AssembleMove(InstructionOperand * source,InstructionOperand * destination)1797 void CodeGenerator::AssembleMove(InstructionOperand* source,
1798 InstructionOperand* destination) {
1799 ArmOperandConverter g(this, nullptr);
1800 // Dispatch on the source and destination operand kinds. Not all
1801 // combinations are possible.
1802 if (source->IsRegister()) {
1803 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1804 Register src = g.ToRegister(source);
1805 if (destination->IsRegister()) {
1806 __ mov(g.ToRegister(destination), src);
1807 } else {
1808 __ str(src, g.ToMemOperand(destination));
1809 }
1810 } else if (source->IsStackSlot()) {
1811 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1812 MemOperand src = g.ToMemOperand(source);
1813 if (destination->IsRegister()) {
1814 __ ldr(g.ToRegister(destination), src);
1815 } else {
1816 Register temp = kScratchReg;
1817 __ ldr(temp, src);
1818 __ str(temp, g.ToMemOperand(destination));
1819 }
1820 } else if (source->IsConstant()) {
1821 Constant src = g.ToConstant(source);
1822 if (destination->IsRegister() || destination->IsStackSlot()) {
1823 Register dst =
1824 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1825 switch (src.type()) {
1826 case Constant::kInt32:
1827 if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
1828 src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
1829 src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
1830 __ mov(dst, Operand(src.ToInt32(), src.rmode()));
1831 } else {
1832 __ mov(dst, Operand(src.ToInt32()));
1833 }
1834 break;
1835 case Constant::kInt64:
1836 UNREACHABLE();
1837 break;
1838 case Constant::kFloat32:
1839 __ Move(dst,
1840 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1841 break;
1842 case Constant::kFloat64:
1843 __ Move(dst,
1844 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1845 break;
1846 case Constant::kExternalReference:
1847 __ mov(dst, Operand(src.ToExternalReference()));
1848 break;
1849 case Constant::kHeapObject: {
1850 Handle<HeapObject> src_object = src.ToHeapObject();
1851 Heap::RootListIndex index;
1852 if (IsMaterializableFromRoot(src_object, &index)) {
1853 __ LoadRoot(dst, index);
1854 } else {
1855 __ Move(dst, src_object);
1856 }
1857 break;
1858 }
1859 case Constant::kRpoNumber:
1860 UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm.
1861 break;
1862 }
1863 if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
1864 } else if (src.type() == Constant::kFloat32) {
1865 if (destination->IsFloatStackSlot()) {
1866 MemOperand dst = g.ToMemOperand(destination);
1867 __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
1868 __ str(ip, dst);
1869 } else {
1870 SwVfpRegister dst = g.ToFloatRegister(destination);
1871 __ vmov(dst, src.ToFloat32());
1872 }
1873 } else {
1874 DCHECK_EQ(Constant::kFloat64, src.type());
1875 DwVfpRegister dst = destination->IsFPRegister()
1876 ? g.ToDoubleRegister(destination)
1877 : kScratchDoubleReg;
1878 __ vmov(dst, src.ToFloat64(), kScratchReg);
1879 if (destination->IsDoubleStackSlot()) {
1880 __ vstr(dst, g.ToMemOperand(destination));
1881 }
1882 }
1883 } else if (source->IsFPRegister()) {
1884 MachineRepresentation rep = LocationOperand::cast(source)->representation();
1885 if (rep == MachineRepresentation::kFloat64) {
1886 DwVfpRegister src = g.ToDoubleRegister(source);
1887 if (destination->IsDoubleRegister()) {
1888 DwVfpRegister dst = g.ToDoubleRegister(destination);
1889 __ Move(dst, src);
1890 } else {
1891 DCHECK(destination->IsDoubleStackSlot());
1892 __ vstr(src, g.ToMemOperand(destination));
1893 }
1894 } else {
1895 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1896 // GapResolver may give us reg codes that don't map to actual s-registers.
1897 // Generate code to work around those cases.
1898 int src_code = LocationOperand::cast(source)->register_code();
1899 if (destination->IsFloatRegister()) {
1900 int dst_code = LocationOperand::cast(destination)->register_code();
1901 __ VmovExtended(dst_code, src_code, kScratchReg);
1902 } else {
1903 DCHECK(destination->IsFloatStackSlot());
1904 __ VmovExtended(g.ToMemOperand(destination), src_code, kScratchReg);
1905 }
1906 }
1907 } else if (source->IsFPStackSlot()) {
1908 MemOperand src = g.ToMemOperand(source);
1909 MachineRepresentation rep =
1910 LocationOperand::cast(destination)->representation();
1911 if (destination->IsFPRegister()) {
1912 if (rep == MachineRepresentation::kFloat64) {
1913 __ vldr(g.ToDoubleRegister(destination), src);
1914 } else {
1915 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1916 // GapResolver may give us reg codes that don't map to actual
1917 // s-registers. Generate code to work around those cases.
1918 int dst_code = LocationOperand::cast(destination)->register_code();
1919 __ VmovExtended(dst_code, src, kScratchReg);
1920 }
1921 } else {
1922 DCHECK(destination->IsFPStackSlot());
1923 if (rep == MachineRepresentation::kFloat64) {
1924 DwVfpRegister temp = kScratchDoubleReg;
1925 __ vldr(temp, src);
1926 __ vstr(temp, g.ToMemOperand(destination));
1927 } else {
1928 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1929 SwVfpRegister temp = kScratchDoubleReg.low();
1930 __ vldr(temp, src);
1931 __ vstr(temp, g.ToMemOperand(destination));
1932 }
1933 }
1934 } else {
1935 UNREACHABLE();
1936 }
1937 }
1938
1939
AssembleSwap(InstructionOperand * source,InstructionOperand * destination)1940 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1941 InstructionOperand* destination) {
1942 ArmOperandConverter g(this, nullptr);
1943 // Dispatch on the source and destination operand kinds. Not all
1944 // combinations are possible.
1945 if (source->IsRegister()) {
1946 // Register-register.
1947 Register temp = kScratchReg;
1948 Register src = g.ToRegister(source);
1949 if (destination->IsRegister()) {
1950 Register dst = g.ToRegister(destination);
1951 __ Move(temp, src);
1952 __ Move(src, dst);
1953 __ Move(dst, temp);
1954 } else {
1955 DCHECK(destination->IsStackSlot());
1956 MemOperand dst = g.ToMemOperand(destination);
1957 __ mov(temp, src);
1958 __ ldr(src, dst);
1959 __ str(temp, dst);
1960 }
1961 } else if (source->IsStackSlot()) {
1962 DCHECK(destination->IsStackSlot());
1963 Register temp_0 = kScratchReg;
1964 SwVfpRegister temp_1 = kScratchDoubleReg.low();
1965 MemOperand src = g.ToMemOperand(source);
1966 MemOperand dst = g.ToMemOperand(destination);
1967 __ ldr(temp_0, src);
1968 __ vldr(temp_1, dst);
1969 __ str(temp_0, dst);
1970 __ vstr(temp_1, src);
1971 } else if (source->IsFPRegister()) {
1972 MachineRepresentation rep = LocationOperand::cast(source)->representation();
1973 LowDwVfpRegister temp = kScratchDoubleReg;
1974 if (rep == MachineRepresentation::kFloat64) {
1975 DwVfpRegister src = g.ToDoubleRegister(source);
1976 if (destination->IsFPRegister()) {
1977 DwVfpRegister dst = g.ToDoubleRegister(destination);
1978 __ vswp(src, dst);
1979 } else {
1980 DCHECK(destination->IsFPStackSlot());
1981 MemOperand dst = g.ToMemOperand(destination);
1982 __ Move(temp, src);
1983 __ vldr(src, dst);
1984 __ vstr(temp, dst);
1985 }
1986 } else {
1987 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1988 int src_code = LocationOperand::cast(source)->register_code();
1989 if (destination->IsFPRegister()) {
1990 int dst_code = LocationOperand::cast(destination)->register_code();
1991 __ VmovExtended(temp.low().code(), src_code, kScratchReg);
1992 __ VmovExtended(src_code, dst_code, kScratchReg);
1993 __ VmovExtended(dst_code, temp.low().code(), kScratchReg);
1994 } else {
1995 DCHECK(destination->IsFPStackSlot());
1996 MemOperand dst = g.ToMemOperand(destination);
1997 __ VmovExtended(temp.low().code(), src_code, kScratchReg);
1998 __ VmovExtended(src_code, dst, kScratchReg);
1999 __ vstr(temp.low(), dst);
2000 }
2001 }
2002 } else if (source->IsFPStackSlot()) {
2003 DCHECK(destination->IsFPStackSlot());
2004 Register temp_0 = kScratchReg;
2005 LowDwVfpRegister temp_1 = kScratchDoubleReg;
2006 MemOperand src0 = g.ToMemOperand(source);
2007 MemOperand dst0 = g.ToMemOperand(destination);
2008 MachineRepresentation rep = LocationOperand::cast(source)->representation();
2009 if (rep == MachineRepresentation::kFloat64) {
2010 MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
2011 MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
2012 __ vldr(temp_1, dst0); // Save destination in temp_1.
2013 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
2014 __ str(temp_0, dst0);
2015 __ ldr(temp_0, src1);
2016 __ str(temp_0, dst1);
2017 __ vstr(temp_1, src0);
2018 } else {
2019 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
2020 __ vldr(temp_1.low(), dst0); // Save destination in temp_1.
2021 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
2022 __ str(temp_0, dst0);
2023 __ vstr(temp_1.low(), src0);
2024 }
2025 } else {
2026 // No other combinations are possible.
2027 UNREACHABLE();
2028 }
2029 }
2030
AssembleJumpTable(Label ** targets,size_t target_count)2031 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2032 // On 32-bit ARM we emit the jump tables inline.
2033 UNREACHABLE();
2034 }
2035
2036
EnsureSpaceForLazyDeopt()2037 void CodeGenerator::EnsureSpaceForLazyDeopt() {
2038 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2039 return;
2040 }
2041
2042 int space_needed = Deoptimizer::patch_size();
2043 // Ensure that we have enough space after the previous lazy-bailout
2044 // instruction for patching the code here.
2045 int current_pc = masm()->pc_offset();
2046 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2047 // Block literal pool emission for duration of padding.
2048 v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
2049 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2050 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
2051 while (padding_size > 0) {
2052 __ nop();
2053 padding_size -= v8::internal::Assembler::kInstrSize;
2054 }
2055 }
2056 }
2057
2058 #undef __
2059
2060 } // namespace compiler
2061 } // namespace internal
2062 } // namespace v8
2063