1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/arm64/macro-assembler-arm64.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/node-properties-inl.h"
12 #include "src/scopes.h"
13
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17
18 #define __ masm()->
19
20
21 // Adds Arm64-specific methods to convert InstructionOperands.
22 class Arm64OperandConverter FINAL : public InstructionOperandConverter {
23 public:
Arm64OperandConverter(CodeGenerator * gen,Instruction * instr)24 Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
25 : InstructionOperandConverter(gen, instr) {}
26
InputRegister32(int index)27 Register InputRegister32(int index) {
28 return ToRegister(instr_->InputAt(index)).W();
29 }
30
InputRegister64(int index)31 Register InputRegister64(int index) { return InputRegister(index); }
32
InputImmediate(int index)33 Operand InputImmediate(int index) {
34 return ToImmediate(instr_->InputAt(index));
35 }
36
InputOperand(int index)37 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
38
InputOperand64(int index)39 Operand InputOperand64(int index) { return InputOperand(index); }
40
InputOperand32(int index)41 Operand InputOperand32(int index) {
42 return ToOperand32(instr_->InputAt(index));
43 }
44
OutputRegister64()45 Register OutputRegister64() { return OutputRegister(); }
46
OutputRegister32()47 Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
48
MemoryOperand(int * first_index)49 MemOperand MemoryOperand(int* first_index) {
50 const int index = *first_index;
51 switch (AddressingModeField::decode(instr_->opcode())) {
52 case kMode_None:
53 break;
54 case kMode_MRI:
55 *first_index += 2;
56 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
57 case kMode_MRR:
58 *first_index += 2;
59 return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
60 SXTW);
61 }
62 UNREACHABLE();
63 return MemOperand(no_reg);
64 }
65
MemoryOperand()66 MemOperand MemoryOperand() {
67 int index = 0;
68 return MemoryOperand(&index);
69 }
70
ToOperand(InstructionOperand * op)71 Operand ToOperand(InstructionOperand* op) {
72 if (op->IsRegister()) {
73 return Operand(ToRegister(op));
74 }
75 return ToImmediate(op);
76 }
77
ToOperand32(InstructionOperand * op)78 Operand ToOperand32(InstructionOperand* op) {
79 if (op->IsRegister()) {
80 return Operand(ToRegister(op).W());
81 }
82 return ToImmediate(op);
83 }
84
ToImmediate(InstructionOperand * operand)85 Operand ToImmediate(InstructionOperand* operand) {
86 Constant constant = ToConstant(operand);
87 switch (constant.type()) {
88 case Constant::kInt32:
89 return Operand(constant.ToInt32());
90 case Constant::kInt64:
91 return Operand(constant.ToInt64());
92 case Constant::kFloat64:
93 return Operand(
94 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
95 case Constant::kExternalReference:
96 return Operand(constant.ToExternalReference());
97 case Constant::kHeapObject:
98 return Operand(constant.ToHeapObject());
99 }
100 UNREACHABLE();
101 return Operand(-1);
102 }
103
ToMemOperand(InstructionOperand * op,MacroAssembler * masm) const104 MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
105 DCHECK(op != NULL);
106 DCHECK(!op->IsRegister());
107 DCHECK(!op->IsDoubleRegister());
108 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
109 // The linkage computes where all spill slots are located.
110 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
111 return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
112 offset.offset());
113 }
114 };
115
116
117 #define ASSEMBLE_SHIFT(asm_instr, width) \
118 do { \
119 if (instr->InputAt(1)->IsRegister()) { \
120 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
121 i.InputRegister##width(1)); \
122 } else { \
123 int64_t imm = i.InputOperand##width(1).immediate().value(); \
124 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
125 } \
126 } while (0);
127
128
129 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)130 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
131 Arm64OperandConverter i(this, instr);
132 InstructionCode opcode = instr->opcode();
133 switch (ArchOpcodeField::decode(opcode)) {
134 case kArchCallCodeObject: {
135 EnsureSpaceForLazyDeopt();
136 if (instr->InputAt(0)->IsImmediate()) {
137 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
138 RelocInfo::CODE_TARGET);
139 } else {
140 Register target = i.InputRegister(0);
141 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
142 __ Call(target);
143 }
144 AddSafepointAndDeopt(instr);
145 break;
146 }
147 case kArchCallJSFunction: {
148 EnsureSpaceForLazyDeopt();
149 Register func = i.InputRegister(0);
150 if (FLAG_debug_code) {
151 // Check the function's context matches the context argument.
152 UseScratchRegisterScope scope(masm());
153 Register temp = scope.AcquireX();
154 __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
155 __ cmp(cp, temp);
156 __ Assert(eq, kWrongFunctionContext);
157 }
158 __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
159 __ Call(x10);
160 AddSafepointAndDeopt(instr);
161 break;
162 }
163 case kArchJmp:
164 __ B(code_->GetLabel(i.InputBlock(0)));
165 break;
166 case kArchNop:
167 // don't emit code for nops.
168 break;
169 case kArchRet:
170 AssembleReturn();
171 break;
172 case kArchTruncateDoubleToI:
173 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
174 break;
175 case kArm64Add:
176 __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
177 break;
178 case kArm64Add32:
179 if (FlagsModeField::decode(opcode) != kFlags_none) {
180 __ Adds(i.OutputRegister32(), i.InputRegister32(0),
181 i.InputOperand32(1));
182 } else {
183 __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
184 }
185 break;
186 case kArm64And:
187 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
188 break;
189 case kArm64And32:
190 __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
191 break;
192 case kArm64Mul:
193 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
194 break;
195 case kArm64Mul32:
196 __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
197 break;
198 case kArm64Idiv:
199 __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
200 break;
201 case kArm64Idiv32:
202 __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
203 break;
204 case kArm64Udiv:
205 __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
206 break;
207 case kArm64Udiv32:
208 __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
209 break;
210 case kArm64Imod: {
211 UseScratchRegisterScope scope(masm());
212 Register temp = scope.AcquireX();
213 __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
214 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
215 break;
216 }
217 case kArm64Imod32: {
218 UseScratchRegisterScope scope(masm());
219 Register temp = scope.AcquireW();
220 __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
221 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
222 i.InputRegister32(0));
223 break;
224 }
225 case kArm64Umod: {
226 UseScratchRegisterScope scope(masm());
227 Register temp = scope.AcquireX();
228 __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
229 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
230 break;
231 }
232 case kArm64Umod32: {
233 UseScratchRegisterScope scope(masm());
234 Register temp = scope.AcquireW();
235 __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
236 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
237 i.InputRegister32(0));
238 break;
239 }
240 // TODO(dcarney): use mvn instr??
241 case kArm64Not:
242 __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
243 break;
244 case kArm64Not32:
245 __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
246 break;
247 case kArm64Neg:
248 __ Neg(i.OutputRegister(), i.InputOperand(0));
249 break;
250 case kArm64Neg32:
251 __ Neg(i.OutputRegister32(), i.InputOperand32(0));
252 break;
253 case kArm64Or:
254 __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
255 break;
256 case kArm64Or32:
257 __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
258 break;
259 case kArm64Xor:
260 __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
261 break;
262 case kArm64Xor32:
263 __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
264 break;
265 case kArm64Sub:
266 __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
267 break;
268 case kArm64Sub32:
269 if (FlagsModeField::decode(opcode) != kFlags_none) {
270 __ Subs(i.OutputRegister32(), i.InputRegister32(0),
271 i.InputOperand32(1));
272 } else {
273 __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
274 }
275 break;
276 case kArm64Shl:
277 ASSEMBLE_SHIFT(Lsl, 64);
278 break;
279 case kArm64Shl32:
280 ASSEMBLE_SHIFT(Lsl, 32);
281 break;
282 case kArm64Shr:
283 ASSEMBLE_SHIFT(Lsr, 64);
284 break;
285 case kArm64Shr32:
286 ASSEMBLE_SHIFT(Lsr, 32);
287 break;
288 case kArm64Sar:
289 ASSEMBLE_SHIFT(Asr, 64);
290 break;
291 case kArm64Sar32:
292 ASSEMBLE_SHIFT(Asr, 32);
293 break;
294 case kArm64Ror:
295 ASSEMBLE_SHIFT(Ror, 64);
296 break;
297 case kArm64Ror32:
298 ASSEMBLE_SHIFT(Ror, 32);
299 break;
300 case kArm64Mov32:
301 __ Mov(i.OutputRegister32(), i.InputRegister32(0));
302 break;
303 case kArm64Sxtw:
304 __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
305 break;
306 case kArm64Claim: {
307 int words = MiscField::decode(instr->opcode());
308 __ Claim(words);
309 break;
310 }
311 case kArm64Poke: {
312 int slot = MiscField::decode(instr->opcode());
313 Operand operand(slot * kPointerSize);
314 __ Poke(i.InputRegister(0), operand);
315 break;
316 }
317 case kArm64PokePairZero: {
318 // TODO(dcarney): test slot offset and register order.
319 int slot = MiscField::decode(instr->opcode()) - 1;
320 __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
321 break;
322 }
323 case kArm64PokePair: {
324 int slot = MiscField::decode(instr->opcode()) - 1;
325 __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
326 break;
327 }
328 case kArm64Cmp:
329 __ Cmp(i.InputRegister(0), i.InputOperand(1));
330 break;
331 case kArm64Cmp32:
332 __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
333 break;
334 case kArm64Cmn:
335 __ Cmn(i.InputRegister(0), i.InputOperand(1));
336 break;
337 case kArm64Cmn32:
338 __ Cmn(i.InputRegister32(0), i.InputOperand32(1));
339 break;
340 case kArm64Tst:
341 __ Tst(i.InputRegister(0), i.InputOperand(1));
342 break;
343 case kArm64Tst32:
344 __ Tst(i.InputRegister32(0), i.InputOperand32(1));
345 break;
346 case kArm64Float64Cmp:
347 __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
348 break;
349 case kArm64Float64Add:
350 __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
351 i.InputDoubleRegister(1));
352 break;
353 case kArm64Float64Sub:
354 __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
355 i.InputDoubleRegister(1));
356 break;
357 case kArm64Float64Mul:
358 __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
359 i.InputDoubleRegister(1));
360 break;
361 case kArm64Float64Div:
362 __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
363 i.InputDoubleRegister(1));
364 break;
365 case kArm64Float64Mod: {
366 // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
367 FrameScope scope(masm(), StackFrame::MANUAL);
368 DCHECK(d0.is(i.InputDoubleRegister(0)));
369 DCHECK(d1.is(i.InputDoubleRegister(1)));
370 DCHECK(d0.is(i.OutputDoubleRegister()));
371 // TODO(dcarney): make sure this saves all relevant registers.
372 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
373 0, 2);
374 break;
375 }
376 case kArm64Float64Sqrt:
377 __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
378 break;
379 case kArm64Float64ToInt32:
380 __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
381 break;
382 case kArm64Float64ToUint32:
383 __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
384 break;
385 case kArm64Int32ToFloat64:
386 __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
387 break;
388 case kArm64Uint32ToFloat64:
389 __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
390 break;
391 case kArm64Ldrb:
392 __ Ldrb(i.OutputRegister(), i.MemoryOperand());
393 break;
394 case kArm64Ldrsb:
395 __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
396 break;
397 case kArm64Strb:
398 __ Strb(i.InputRegister(2), i.MemoryOperand());
399 break;
400 case kArm64Ldrh:
401 __ Ldrh(i.OutputRegister(), i.MemoryOperand());
402 break;
403 case kArm64Ldrsh:
404 __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
405 break;
406 case kArm64Strh:
407 __ Strh(i.InputRegister(2), i.MemoryOperand());
408 break;
409 case kArm64LdrW:
410 __ Ldr(i.OutputRegister32(), i.MemoryOperand());
411 break;
412 case kArm64StrW:
413 __ Str(i.InputRegister32(2), i.MemoryOperand());
414 break;
415 case kArm64Ldr:
416 __ Ldr(i.OutputRegister(), i.MemoryOperand());
417 break;
418 case kArm64Str:
419 __ Str(i.InputRegister(2), i.MemoryOperand());
420 break;
421 case kArm64LdrS: {
422 UseScratchRegisterScope scope(masm());
423 FPRegister scratch = scope.AcquireS();
424 __ Ldr(scratch, i.MemoryOperand());
425 __ Fcvt(i.OutputDoubleRegister(), scratch);
426 break;
427 }
428 case kArm64StrS: {
429 UseScratchRegisterScope scope(masm());
430 FPRegister scratch = scope.AcquireS();
431 __ Fcvt(scratch, i.InputDoubleRegister(2));
432 __ Str(scratch, i.MemoryOperand());
433 break;
434 }
435 case kArm64LdrD:
436 __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
437 break;
438 case kArm64StrD:
439 __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
440 break;
441 case kArm64StoreWriteBarrier: {
442 Register object = i.InputRegister(0);
443 Register index = i.InputRegister(1);
444 Register value = i.InputRegister(2);
445 __ Add(index, object, Operand(index, SXTW));
446 __ Str(value, MemOperand(index));
447 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
448 ? kSaveFPRegs
449 : kDontSaveFPRegs;
450 // TODO(dcarney): we shouldn't test write barriers from c calls.
451 LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
452 UseScratchRegisterScope scope(masm());
453 Register temp = no_reg;
454 if (csp.is(masm()->StackPointer())) {
455 temp = scope.AcquireX();
456 lr_status = kLRHasBeenSaved;
457 __ Push(lr, temp); // Need to push a pair
458 }
459 __ RecordWrite(object, index, value, lr_status, mode);
460 if (csp.is(masm()->StackPointer())) {
461 __ Pop(temp, lr);
462 }
463 break;
464 }
465 }
466 }
467
468
469 // Assemble branches after this instruction.
AssembleArchBranch(Instruction * instr,FlagsCondition condition)470 void CodeGenerator::AssembleArchBranch(Instruction* instr,
471 FlagsCondition condition) {
472 Arm64OperandConverter i(this, instr);
473 Label done;
474
475 // Emit a branch. The true and false targets are always the last two inputs
476 // to the instruction.
477 BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
478 BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
479 bool fallthru = IsNextInAssemblyOrder(fblock);
480 Label* tlabel = code()->GetLabel(tblock);
481 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
482 switch (condition) {
483 case kUnorderedEqual:
484 __ B(vs, flabel);
485 // Fall through.
486 case kEqual:
487 __ B(eq, tlabel);
488 break;
489 case kUnorderedNotEqual:
490 __ B(vs, tlabel);
491 // Fall through.
492 case kNotEqual:
493 __ B(ne, tlabel);
494 break;
495 case kSignedLessThan:
496 __ B(lt, tlabel);
497 break;
498 case kSignedGreaterThanOrEqual:
499 __ B(ge, tlabel);
500 break;
501 case kSignedLessThanOrEqual:
502 __ B(le, tlabel);
503 break;
504 case kSignedGreaterThan:
505 __ B(gt, tlabel);
506 break;
507 case kUnorderedLessThan:
508 __ B(vs, flabel);
509 // Fall through.
510 case kUnsignedLessThan:
511 __ B(lo, tlabel);
512 break;
513 case kUnorderedGreaterThanOrEqual:
514 __ B(vs, tlabel);
515 // Fall through.
516 case kUnsignedGreaterThanOrEqual:
517 __ B(hs, tlabel);
518 break;
519 case kUnorderedLessThanOrEqual:
520 __ B(vs, flabel);
521 // Fall through.
522 case kUnsignedLessThanOrEqual:
523 __ B(ls, tlabel);
524 break;
525 case kUnorderedGreaterThan:
526 __ B(vs, tlabel);
527 // Fall through.
528 case kUnsignedGreaterThan:
529 __ B(hi, tlabel);
530 break;
531 case kOverflow:
532 __ B(vs, tlabel);
533 break;
534 case kNotOverflow:
535 __ B(vc, tlabel);
536 break;
537 }
538 if (!fallthru) __ B(flabel); // no fallthru to flabel.
539 __ Bind(&done);
540 }
541
542
543 // Assemble boolean materializations after this instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)544 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
545 FlagsCondition condition) {
546 Arm64OperandConverter i(this, instr);
547 Label done;
548
549 // Materialize a full 64-bit 1 or 0 value. The result register is always the
550 // last output of the instruction.
551 Label check;
552 DCHECK_NE(0, instr->OutputCount());
553 Register reg = i.OutputRegister(instr->OutputCount() - 1);
554 Condition cc = nv;
555 switch (condition) {
556 case kUnorderedEqual:
557 __ B(vc, &check);
558 __ Mov(reg, 0);
559 __ B(&done);
560 // Fall through.
561 case kEqual:
562 cc = eq;
563 break;
564 case kUnorderedNotEqual:
565 __ B(vc, &check);
566 __ Mov(reg, 1);
567 __ B(&done);
568 // Fall through.
569 case kNotEqual:
570 cc = ne;
571 break;
572 case kSignedLessThan:
573 cc = lt;
574 break;
575 case kSignedGreaterThanOrEqual:
576 cc = ge;
577 break;
578 case kSignedLessThanOrEqual:
579 cc = le;
580 break;
581 case kSignedGreaterThan:
582 cc = gt;
583 break;
584 case kUnorderedLessThan:
585 __ B(vc, &check);
586 __ Mov(reg, 0);
587 __ B(&done);
588 // Fall through.
589 case kUnsignedLessThan:
590 cc = lo;
591 break;
592 case kUnorderedGreaterThanOrEqual:
593 __ B(vc, &check);
594 __ Mov(reg, 1);
595 __ B(&done);
596 // Fall through.
597 case kUnsignedGreaterThanOrEqual:
598 cc = hs;
599 break;
600 case kUnorderedLessThanOrEqual:
601 __ B(vc, &check);
602 __ Mov(reg, 0);
603 __ B(&done);
604 // Fall through.
605 case kUnsignedLessThanOrEqual:
606 cc = ls;
607 break;
608 case kUnorderedGreaterThan:
609 __ B(vc, &check);
610 __ Mov(reg, 1);
611 __ B(&done);
612 // Fall through.
613 case kUnsignedGreaterThan:
614 cc = hi;
615 break;
616 case kOverflow:
617 cc = vs;
618 break;
619 case kNotOverflow:
620 cc = vc;
621 break;
622 }
623 __ bind(&check);
624 __ Cset(reg, cc);
625 __ Bind(&done);
626 }
627
628
AssembleDeoptimizerCall(int deoptimization_id)629 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
630 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
631 isolate(), deoptimization_id, Deoptimizer::LAZY);
632 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
633 }
634
635
636 // TODO(dcarney): increase stack slots in frame once before first use.
AlignedStackSlots(int stack_slots)637 static int AlignedStackSlots(int stack_slots) {
638 if (stack_slots & 1) stack_slots++;
639 return stack_slots;
640 }
641
642
AssemblePrologue()643 void CodeGenerator::AssemblePrologue() {
644 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
645 if (descriptor->kind() == CallDescriptor::kCallAddress) {
646 __ SetStackPointer(csp);
647 __ Push(lr, fp);
648 __ Mov(fp, csp);
649 // TODO(dcarney): correct callee saved registers.
650 __ PushCalleeSavedRegisters();
651 frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
652 } else if (descriptor->IsJSFunctionCall()) {
653 CompilationInfo* info = linkage()->info();
654 __ SetStackPointer(jssp);
655 __ Prologue(info->IsCodePreAgingActive());
656 frame()->SetRegisterSaveAreaSize(
657 StandardFrameConstants::kFixedFrameSizeFromFp);
658
659 // Sloppy mode functions and builtins need to replace the receiver with the
660 // global proxy when called as functions (without an explicit receiver
661 // object).
662 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
663 if (info->strict_mode() == SLOPPY && !info->is_native()) {
664 Label ok;
665 // +2 for return address and saved frame pointer.
666 int receiver_slot = info->scope()->num_parameters() + 2;
667 __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
668 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
669 __ Ldr(x10, GlobalObjectMemOperand());
670 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
671 __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
672 __ Bind(&ok);
673 }
674
675 } else {
676 __ SetStackPointer(jssp);
677 __ StubPrologue();
678 frame()->SetRegisterSaveAreaSize(
679 StandardFrameConstants::kFixedFrameSizeFromFp);
680 }
681 int stack_slots = frame()->GetSpillSlotCount();
682 if (stack_slots > 0) {
683 Register sp = __ StackPointer();
684 if (!sp.Is(csp)) {
685 __ Sub(sp, sp, stack_slots * kPointerSize);
686 }
687 __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
688 }
689 }
690
691
AssembleReturn()692 void CodeGenerator::AssembleReturn() {
693 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
694 if (descriptor->kind() == CallDescriptor::kCallAddress) {
695 if (frame()->GetRegisterSaveAreaSize() > 0) {
696 // Remove this frame's spill slots first.
697 int stack_slots = frame()->GetSpillSlotCount();
698 if (stack_slots > 0) {
699 __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
700 }
701 // Restore registers.
702 // TODO(dcarney): correct callee saved registers.
703 __ PopCalleeSavedRegisters();
704 }
705 __ Mov(csp, fp);
706 __ Pop(fp, lr);
707 __ Ret();
708 } else {
709 __ Mov(jssp, fp);
710 __ Pop(fp, lr);
711 int pop_count = descriptor->IsJSFunctionCall()
712 ? static_cast<int>(descriptor->JSParameterCount())
713 : 0;
714 __ Drop(pop_count);
715 __ Ret();
716 }
717 }
718
719
AssembleMove(InstructionOperand * source,InstructionOperand * destination)720 void CodeGenerator::AssembleMove(InstructionOperand* source,
721 InstructionOperand* destination) {
722 Arm64OperandConverter g(this, NULL);
723 // Dispatch on the source and destination operand kinds. Not all
724 // combinations are possible.
725 if (source->IsRegister()) {
726 DCHECK(destination->IsRegister() || destination->IsStackSlot());
727 Register src = g.ToRegister(source);
728 if (destination->IsRegister()) {
729 __ Mov(g.ToRegister(destination), src);
730 } else {
731 __ Str(src, g.ToMemOperand(destination, masm()));
732 }
733 } else if (source->IsStackSlot()) {
734 MemOperand src = g.ToMemOperand(source, masm());
735 DCHECK(destination->IsRegister() || destination->IsStackSlot());
736 if (destination->IsRegister()) {
737 __ Ldr(g.ToRegister(destination), src);
738 } else {
739 UseScratchRegisterScope scope(masm());
740 Register temp = scope.AcquireX();
741 __ Ldr(temp, src);
742 __ Str(temp, g.ToMemOperand(destination, masm()));
743 }
744 } else if (source->IsConstant()) {
745 ConstantOperand* constant_source = ConstantOperand::cast(source);
746 if (destination->IsRegister() || destination->IsStackSlot()) {
747 UseScratchRegisterScope scope(masm());
748 Register dst = destination->IsRegister() ? g.ToRegister(destination)
749 : scope.AcquireX();
750 Constant src = g.ToConstant(source);
751 if (src.type() == Constant::kHeapObject) {
752 __ LoadObject(dst, src.ToHeapObject());
753 } else {
754 __ Mov(dst, g.ToImmediate(source));
755 }
756 if (destination->IsStackSlot()) {
757 __ Str(dst, g.ToMemOperand(destination, masm()));
758 }
759 } else if (destination->IsDoubleRegister()) {
760 FPRegister result = g.ToDoubleRegister(destination);
761 __ Fmov(result, g.ToDouble(constant_source));
762 } else {
763 DCHECK(destination->IsDoubleStackSlot());
764 UseScratchRegisterScope scope(masm());
765 FPRegister temp = scope.AcquireD();
766 __ Fmov(temp, g.ToDouble(constant_source));
767 __ Str(temp, g.ToMemOperand(destination, masm()));
768 }
769 } else if (source->IsDoubleRegister()) {
770 FPRegister src = g.ToDoubleRegister(source);
771 if (destination->IsDoubleRegister()) {
772 FPRegister dst = g.ToDoubleRegister(destination);
773 __ Fmov(dst, src);
774 } else {
775 DCHECK(destination->IsDoubleStackSlot());
776 __ Str(src, g.ToMemOperand(destination, masm()));
777 }
778 } else if (source->IsDoubleStackSlot()) {
779 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
780 MemOperand src = g.ToMemOperand(source, masm());
781 if (destination->IsDoubleRegister()) {
782 __ Ldr(g.ToDoubleRegister(destination), src);
783 } else {
784 UseScratchRegisterScope scope(masm());
785 FPRegister temp = scope.AcquireD();
786 __ Ldr(temp, src);
787 __ Str(temp, g.ToMemOperand(destination, masm()));
788 }
789 } else {
790 UNREACHABLE();
791 }
792 }
793
794
AssembleSwap(InstructionOperand * source,InstructionOperand * destination)795 void CodeGenerator::AssembleSwap(InstructionOperand* source,
796 InstructionOperand* destination) {
797 Arm64OperandConverter g(this, NULL);
798 // Dispatch on the source and destination operand kinds. Not all
799 // combinations are possible.
800 if (source->IsRegister()) {
801 // Register-register.
802 UseScratchRegisterScope scope(masm());
803 Register temp = scope.AcquireX();
804 Register src = g.ToRegister(source);
805 if (destination->IsRegister()) {
806 Register dst = g.ToRegister(destination);
807 __ Mov(temp, src);
808 __ Mov(src, dst);
809 __ Mov(dst, temp);
810 } else {
811 DCHECK(destination->IsStackSlot());
812 MemOperand dst = g.ToMemOperand(destination, masm());
813 __ Mov(temp, src);
814 __ Ldr(src, dst);
815 __ Str(temp, dst);
816 }
817 } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
818 UseScratchRegisterScope scope(masm());
819 CPURegister temp_0 = scope.AcquireX();
820 CPURegister temp_1 = scope.AcquireX();
821 MemOperand src = g.ToMemOperand(source, masm());
822 MemOperand dst = g.ToMemOperand(destination, masm());
823 __ Ldr(temp_0, src);
824 __ Ldr(temp_1, dst);
825 __ Str(temp_0, dst);
826 __ Str(temp_1, src);
827 } else if (source->IsDoubleRegister()) {
828 UseScratchRegisterScope scope(masm());
829 FPRegister temp = scope.AcquireD();
830 FPRegister src = g.ToDoubleRegister(source);
831 if (destination->IsDoubleRegister()) {
832 FPRegister dst = g.ToDoubleRegister(destination);
833 __ Fmov(temp, src);
834 __ Fmov(src, dst);
835 __ Fmov(dst, temp);
836 } else {
837 DCHECK(destination->IsDoubleStackSlot());
838 MemOperand dst = g.ToMemOperand(destination, masm());
839 __ Fmov(temp, src);
840 __ Ldr(src, dst);
841 __ Str(temp, dst);
842 }
843 } else {
844 // No other combinations are possible.
845 UNREACHABLE();
846 }
847 }
848
849
AddNopForSmiCodeInlining()850 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
851
852
EnsureSpaceForLazyDeopt()853 void CodeGenerator::EnsureSpaceForLazyDeopt() {
854 int space_needed = Deoptimizer::patch_size();
855 if (!linkage()->info()->IsStub()) {
856 // Ensure that we have enough space after the previous lazy-bailout
857 // instruction for patching the code here.
858 intptr_t current_pc = masm()->pc_offset();
859
860 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
861 intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
862 DCHECK((padding_size % kInstructionSize) == 0);
863 InstructionAccurateScope instruction_accurate(
864 masm(), padding_size / kInstructionSize);
865
866 while (padding_size > 0) {
867 __ nop();
868 padding_size -= kInstructionSize;
869 }
870 }
871 }
872 MarkLazyDeoptSite();
873 }
874
875 #undef __
876
877 } // namespace compiler
878 } // namespace internal
879 } // namespace v8
880