1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_ARM64
6
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/debug/debug.h"
12 #include "src/register-configuration.h"
13 #include "src/runtime/runtime.h"
14
15 #include "src/arm64/frames-arm64.h"
16 #include "src/arm64/macro-assembler-arm64.h"
17
18 namespace v8 {
19 namespace internal {
20
21 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
22 #define __
23
24
MacroAssembler(Isolate * arg_isolate,byte * buffer,unsigned buffer_size,CodeObjectRequired create_code_object)25 MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer,
26 unsigned buffer_size,
27 CodeObjectRequired create_code_object)
28 : Assembler(arg_isolate, buffer, buffer_size),
29 generating_stub_(false),
30 #if DEBUG
31 allow_macro_instructions_(true),
32 #endif
33 has_frame_(false),
34 use_real_aborts_(true),
35 sp_(jssp),
36 tmp_list_(DefaultTmpList()),
37 fptmp_list_(DefaultFPTmpList()) {
38 if (create_code_object == CodeObjectRequired::kYes) {
39 code_object_ =
40 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
41 }
42 }
43
44
DefaultTmpList()45 CPURegList MacroAssembler::DefaultTmpList() {
46 return CPURegList(ip0, ip1);
47 }
48
49
DefaultFPTmpList()50 CPURegList MacroAssembler::DefaultFPTmpList() {
51 return CPURegList(fp_scratch1, fp_scratch2);
52 }
53
54
LogicalMacro(const Register & rd,const Register & rn,const Operand & operand,LogicalOp op)55 void MacroAssembler::LogicalMacro(const Register& rd,
56 const Register& rn,
57 const Operand& operand,
58 LogicalOp op) {
59 UseScratchRegisterScope temps(this);
60
61 if (operand.NeedsRelocation(this)) {
62 Register temp = temps.AcquireX();
63 Ldr(temp, operand.immediate());
64 Logical(rd, rn, temp, op);
65
66 } else if (operand.IsImmediate()) {
67 int64_t immediate = operand.ImmediateValue();
68 unsigned reg_size = rd.SizeInBits();
69
70 // If the operation is NOT, invert the operation and immediate.
71 if ((op & NOT) == NOT) {
72 op = static_cast<LogicalOp>(op & ~NOT);
73 immediate = ~immediate;
74 }
75
76 // Ignore the top 32 bits of an immediate if we're moving to a W register.
77 if (rd.Is32Bits()) {
78 // Check that the top 32 bits are consistent.
79 DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
80 ((immediate >> kWRegSizeInBits) == -1));
81 immediate &= kWRegMask;
82 }
83
84 DCHECK(rd.Is64Bits() || is_uint32(immediate));
85
86 // Special cases for all set or all clear immediates.
87 if (immediate == 0) {
88 switch (op) {
89 case AND:
90 Mov(rd, 0);
91 return;
92 case ORR: // Fall through.
93 case EOR:
94 Mov(rd, rn);
95 return;
96 case ANDS: // Fall through.
97 case BICS:
98 break;
99 default:
100 UNREACHABLE();
101 }
102 } else if ((rd.Is64Bits() && (immediate == -1L)) ||
103 (rd.Is32Bits() && (immediate == 0xffffffffL))) {
104 switch (op) {
105 case AND:
106 Mov(rd, rn);
107 return;
108 case ORR:
109 Mov(rd, immediate);
110 return;
111 case EOR:
112 Mvn(rd, rn);
113 return;
114 case ANDS: // Fall through.
115 case BICS:
116 break;
117 default:
118 UNREACHABLE();
119 }
120 }
121
122 unsigned n, imm_s, imm_r;
123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
124 // Immediate can be encoded in the instruction.
125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
126 } else {
127 // Immediate can't be encoded: synthesize using move immediate.
128 Register temp = temps.AcquireSameSizeAs(rn);
129 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
130 if (rd.Is(csp)) {
131 // If rd is the stack pointer we cannot use it as the destination
132 // register so we use the temp register as an intermediate again.
133 Logical(temp, rn, imm_operand, op);
134 Mov(csp, temp);
135 AssertStackConsistency();
136 } else {
137 Logical(rd, rn, imm_operand, op);
138 }
139 }
140
141 } else if (operand.IsExtendedRegister()) {
142 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
143 // Add/sub extended supports shift <= 4. We want to support exactly the
144 // same modes here.
145 DCHECK(operand.shift_amount() <= 4);
146 DCHECK(operand.reg().Is64Bits() ||
147 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
148 Register temp = temps.AcquireSameSizeAs(rn);
149 EmitExtendShift(temp, operand.reg(), operand.extend(),
150 operand.shift_amount());
151 Logical(rd, rn, temp, op);
152
153 } else {
154 // The operand can be encoded in the instruction.
155 DCHECK(operand.IsShiftedRegister());
156 Logical(rd, rn, operand, op);
157 }
158 }
159
160
Mov(const Register & rd,uint64_t imm)161 void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
162 DCHECK(allow_macro_instructions_);
163 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
164 DCHECK(!rd.IsZero());
165
166 // TODO(all) extend to support more immediates.
167 //
168 // Immediates on Aarch64 can be produced using an initial value, and zero to
169 // three move keep operations.
170 //
171 // Initial values can be generated with:
172 // 1. 64-bit move zero (movz).
173 // 2. 32-bit move inverted (movn).
174 // 3. 64-bit move inverted.
175 // 4. 32-bit orr immediate.
176 // 5. 64-bit orr immediate.
177 // Move-keep may then be used to modify each of the 16-bit half-words.
178 //
179 // The code below supports all five initial value generators, and
180 // applying move-keep operations to move-zero and move-inverted initial
181 // values.
182
183 // Try to move the immediate in one instruction, and if that fails, switch to
184 // using multiple instructions.
185 if (!TryOneInstrMoveImmediate(rd, imm)) {
186 unsigned reg_size = rd.SizeInBits();
187
188 // Generic immediate case. Imm will be represented by
189 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
190 // A move-zero or move-inverted is generated for the first non-zero or
191 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
192
193 uint64_t ignored_halfword = 0;
194 bool invert_move = false;
195 // If the number of 0xffff halfwords is greater than the number of 0x0000
196 // halfwords, it's more efficient to use move-inverted.
197 if (CountClearHalfWords(~imm, reg_size) >
198 CountClearHalfWords(imm, reg_size)) {
199 ignored_halfword = 0xffffL;
200 invert_move = true;
201 }
202
203 // Mov instructions can't move immediate values into the stack pointer, so
204 // set up a temporary register, if needed.
205 UseScratchRegisterScope temps(this);
206 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
207
208 // Iterate through the halfwords. Use movn/movz for the first non-ignored
209 // halfword, and movk for subsequent halfwords.
210 DCHECK((reg_size % 16) == 0);
211 bool first_mov_done = false;
212 for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
213 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
214 if (imm16 != ignored_halfword) {
215 if (!first_mov_done) {
216 if (invert_move) {
217 movn(temp, (~imm16) & 0xffffL, 16 * i);
218 } else {
219 movz(temp, imm16, 16 * i);
220 }
221 first_mov_done = true;
222 } else {
223 // Construct a wider constant.
224 movk(temp, imm16, 16 * i);
225 }
226 }
227 }
228 DCHECK(first_mov_done);
229
230 // Move the temporary if the original destination register was the stack
231 // pointer.
232 if (rd.IsSP()) {
233 mov(rd, temp);
234 AssertStackConsistency();
235 }
236 }
237 }
238
239
Mov(const Register & rd,const Operand & operand,DiscardMoveMode discard_mode)240 void MacroAssembler::Mov(const Register& rd,
241 const Operand& operand,
242 DiscardMoveMode discard_mode) {
243 DCHECK(allow_macro_instructions_);
244 DCHECK(!rd.IsZero());
245
246 // Provide a swap register for instructions that need to write into the
247 // system stack pointer (and can't do this inherently).
248 UseScratchRegisterScope temps(this);
249 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
250
251 if (operand.NeedsRelocation(this)) {
252 Ldr(dst, operand.immediate());
253
254 } else if (operand.IsImmediate()) {
255 // Call the macro assembler for generic immediates.
256 Mov(dst, operand.ImmediateValue());
257
258 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
259 // Emit a shift instruction if moving a shifted register. This operation
260 // could also be achieved using an orr instruction (like orn used by Mvn),
261 // but using a shift instruction makes the disassembly clearer.
262 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
263
264 } else if (operand.IsExtendedRegister()) {
265 // Emit an extend instruction if moving an extended register. This handles
266 // extend with post-shift operations, too.
267 EmitExtendShift(dst, operand.reg(), operand.extend(),
268 operand.shift_amount());
269
270 } else {
271 // Otherwise, emit a register move only if the registers are distinct, or
272 // if they are not X registers.
273 //
274 // Note that mov(w0, w0) is not a no-op because it clears the top word of
275 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
276 // registers is not required to clear the top word of the X register. In
277 // this case, the instruction is discarded.
278 //
279 // If csp is an operand, add #0 is emitted, otherwise, orr #0.
280 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
281 (discard_mode == kDontDiscardForSameWReg))) {
282 Assembler::mov(rd, operand.reg());
283 }
284 // This case can handle writes into the system stack pointer directly.
285 dst = rd;
286 }
287
288 // Copy the result to the system stack pointer.
289 if (!dst.Is(rd)) {
290 DCHECK(rd.IsSP());
291 Assembler::mov(rd, dst);
292 }
293 }
294
295
Mvn(const Register & rd,const Operand & operand)296 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
297 DCHECK(allow_macro_instructions_);
298
299 if (operand.NeedsRelocation(this)) {
300 Ldr(rd, operand.immediate());
301 mvn(rd, rd);
302
303 } else if (operand.IsImmediate()) {
304 // Call the macro assembler for generic immediates.
305 Mov(rd, ~operand.ImmediateValue());
306
307 } else if (operand.IsExtendedRegister()) {
308 // Emit two instructions for the extend case. This differs from Mov, as
309 // the extend and invert can't be achieved in one instruction.
310 EmitExtendShift(rd, operand.reg(), operand.extend(),
311 operand.shift_amount());
312 mvn(rd, rd);
313
314 } else {
315 mvn(rd, operand);
316 }
317 }
318
319
CountClearHalfWords(uint64_t imm,unsigned reg_size)320 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
321 DCHECK((reg_size % 8) == 0);
322 int count = 0;
323 for (unsigned i = 0; i < (reg_size / 16); i++) {
324 if ((imm & 0xffff) == 0) {
325 count++;
326 }
327 imm >>= 16;
328 }
329 return count;
330 }
331
332
333 // The movz instruction can generate immediates containing an arbitrary 16-bit
334 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
IsImmMovz(uint64_t imm,unsigned reg_size)335 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
336 DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
337 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
338 }
339
340
341 // The movn instruction can generate immediates containing an arbitrary 16-bit
342 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
IsImmMovn(uint64_t imm,unsigned reg_size)343 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
344 return IsImmMovz(~imm, reg_size);
345 }
346
347
ConditionalCompareMacro(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond,ConditionalCompareOp op)348 void MacroAssembler::ConditionalCompareMacro(const Register& rn,
349 const Operand& operand,
350 StatusFlags nzcv,
351 Condition cond,
352 ConditionalCompareOp op) {
353 DCHECK((cond != al) && (cond != nv));
354 if (operand.NeedsRelocation(this)) {
355 UseScratchRegisterScope temps(this);
356 Register temp = temps.AcquireX();
357 Ldr(temp, operand.immediate());
358 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
359
360 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
361 (operand.IsImmediate() &&
362 IsImmConditionalCompare(operand.ImmediateValue()))) {
363 // The immediate can be encoded in the instruction, or the operand is an
364 // unshifted register: call the assembler.
365 ConditionalCompare(rn, operand, nzcv, cond, op);
366
367 } else {
368 // The operand isn't directly supported by the instruction: perform the
369 // operation on a temporary register.
370 UseScratchRegisterScope temps(this);
371 Register temp = temps.AcquireSameSizeAs(rn);
372 Mov(temp, operand);
373 ConditionalCompare(rn, temp, nzcv, cond, op);
374 }
375 }
376
377
Csel(const Register & rd,const Register & rn,const Operand & operand,Condition cond)378 void MacroAssembler::Csel(const Register& rd,
379 const Register& rn,
380 const Operand& operand,
381 Condition cond) {
382 DCHECK(allow_macro_instructions_);
383 DCHECK(!rd.IsZero());
384 DCHECK((cond != al) && (cond != nv));
385 if (operand.IsImmediate()) {
386 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
387 // register.
388 int64_t imm = operand.ImmediateValue();
389 Register zr = AppropriateZeroRegFor(rn);
390 if (imm == 0) {
391 csel(rd, rn, zr, cond);
392 } else if (imm == 1) {
393 csinc(rd, rn, zr, cond);
394 } else if (imm == -1) {
395 csinv(rd, rn, zr, cond);
396 } else {
397 UseScratchRegisterScope temps(this);
398 Register temp = temps.AcquireSameSizeAs(rn);
399 Mov(temp, imm);
400 csel(rd, rn, temp, cond);
401 }
402 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
403 // Unshifted register argument.
404 csel(rd, rn, operand.reg(), cond);
405 } else {
406 // All other arguments.
407 UseScratchRegisterScope temps(this);
408 Register temp = temps.AcquireSameSizeAs(rn);
409 Mov(temp, operand);
410 csel(rd, rn, temp, cond);
411 }
412 }
413
414
TryOneInstrMoveImmediate(const Register & dst,int64_t imm)415 bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
416 int64_t imm) {
417 unsigned n, imm_s, imm_r;
418 int reg_size = dst.SizeInBits();
419 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
420 // Immediate can be represented in a move zero instruction. Movz can't write
421 // to the stack pointer.
422 movz(dst, imm);
423 return true;
424 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
425 // Immediate can be represented in a move not instruction. Movn can't write
426 // to the stack pointer.
427 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
428 return true;
429 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
430 // Immediate can be represented in a logical orr instruction.
431 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
432 return true;
433 }
434 return false;
435 }
436
437
MoveImmediateForShiftedOp(const Register & dst,int64_t imm)438 Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
439 int64_t imm) {
440 int reg_size = dst.SizeInBits();
441
442 // Encode the immediate in a single move instruction, if possible.
443 if (TryOneInstrMoveImmediate(dst, imm)) {
444 // The move was successful; nothing to do here.
445 } else {
446 // Pre-shift the immediate to the least-significant bits of the register.
447 int shift_low = CountTrailingZeros(imm, reg_size);
448 int64_t imm_low = imm >> shift_low;
449
450 // Pre-shift the immediate to the most-significant bits of the register. We
451 // insert set bits in the least-significant bits, as this creates a
452 // different immediate that may be encodable using movn or orr-immediate.
453 // If this new immediate is encodable, the set bits will be eliminated by
454 // the post shift on the following instruction.
455 int shift_high = CountLeadingZeros(imm, reg_size);
456 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
457
458 if (TryOneInstrMoveImmediate(dst, imm_low)) {
459 // The new immediate has been moved into the destination's low bits:
460 // return a new leftward-shifting operand.
461 return Operand(dst, LSL, shift_low);
462 } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
463 // The new immediate has been moved into the destination's high bits:
464 // return a new rightward-shifting operand.
465 return Operand(dst, LSR, shift_high);
466 } else {
467 // Use the generic move operation to set up the immediate.
468 Mov(dst, imm);
469 }
470 }
471 return Operand(dst);
472 }
473
474
AddSubMacro(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubOp op)475 void MacroAssembler::AddSubMacro(const Register& rd,
476 const Register& rn,
477 const Operand& operand,
478 FlagsUpdate S,
479 AddSubOp op) {
480 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
481 !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
482 // The instruction would be a nop. Avoid generating useless code.
483 return;
484 }
485
486 if (operand.NeedsRelocation(this)) {
487 UseScratchRegisterScope temps(this);
488 Register temp = temps.AcquireX();
489 Ldr(temp, operand.immediate());
490 AddSubMacro(rd, rn, temp, S, op);
491 } else if ((operand.IsImmediate() &&
492 !IsImmAddSub(operand.ImmediateValue())) ||
493 (rn.IsZero() && !operand.IsShiftedRegister()) ||
494 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
495 UseScratchRegisterScope temps(this);
496 Register temp = temps.AcquireSameSizeAs(rn);
497 if (operand.IsImmediate()) {
498 Operand imm_operand =
499 MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
500 AddSub(rd, rn, imm_operand, S, op);
501 } else {
502 Mov(temp, operand);
503 AddSub(rd, rn, temp, S, op);
504 }
505 } else {
506 AddSub(rd, rn, operand, S, op);
507 }
508 }
509
510
AddSubWithCarryMacro(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubWithCarryOp op)511 void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
512 const Register& rn,
513 const Operand& operand,
514 FlagsUpdate S,
515 AddSubWithCarryOp op) {
516 DCHECK(rd.SizeInBits() == rn.SizeInBits());
517 UseScratchRegisterScope temps(this);
518
519 if (operand.NeedsRelocation(this)) {
520 Register temp = temps.AcquireX();
521 Ldr(temp, operand.immediate());
522 AddSubWithCarryMacro(rd, rn, temp, S, op);
523
524 } else if (operand.IsImmediate() ||
525 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
526 // Add/sub with carry (immediate or ROR shifted register.)
527 Register temp = temps.AcquireSameSizeAs(rn);
528 Mov(temp, operand);
529 AddSubWithCarry(rd, rn, temp, S, op);
530
531 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
532 // Add/sub with carry (shifted register).
533 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
534 DCHECK(operand.shift() != ROR);
535 DCHECK(is_uintn(operand.shift_amount(),
536 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
537 : kWRegSizeInBitsLog2));
538 Register temp = temps.AcquireSameSizeAs(rn);
539 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
540 AddSubWithCarry(rd, rn, temp, S, op);
541
542 } else if (operand.IsExtendedRegister()) {
543 // Add/sub with carry (extended register).
544 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
545 // Add/sub extended supports a shift <= 4. We want to support exactly the
546 // same modes.
547 DCHECK(operand.shift_amount() <= 4);
548 DCHECK(operand.reg().Is64Bits() ||
549 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
550 Register temp = temps.AcquireSameSizeAs(rn);
551 EmitExtendShift(temp, operand.reg(), operand.extend(),
552 operand.shift_amount());
553 AddSubWithCarry(rd, rn, temp, S, op);
554
555 } else {
556 // The addressing mode is directly supported by the instruction.
557 AddSubWithCarry(rd, rn, operand, S, op);
558 }
559 }
560
561
LoadStoreMacro(const CPURegister & rt,const MemOperand & addr,LoadStoreOp op)562 void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
563 const MemOperand& addr,
564 LoadStoreOp op) {
565 int64_t offset = addr.offset();
566 LSDataSize size = CalcLSDataSize(op);
567
568 // Check if an immediate offset fits in the immediate field of the
569 // appropriate instruction. If not, emit two instructions to perform
570 // the operation.
571 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
572 !IsImmLSUnscaled(offset)) {
573 // Immediate offset that can't be encoded using unsigned or unscaled
574 // addressing modes.
575 UseScratchRegisterScope temps(this);
576 Register temp = temps.AcquireSameSizeAs(addr.base());
577 Mov(temp, addr.offset());
578 LoadStore(rt, MemOperand(addr.base(), temp), op);
579 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
580 // Post-index beyond unscaled addressing range.
581 LoadStore(rt, MemOperand(addr.base()), op);
582 add(addr.base(), addr.base(), offset);
583 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
584 // Pre-index beyond unscaled addressing range.
585 add(addr.base(), addr.base(), offset);
586 LoadStore(rt, MemOperand(addr.base()), op);
587 } else {
588 // Encodable in one load/store instruction.
589 LoadStore(rt, addr, op);
590 }
591 }
592
LoadStorePairMacro(const CPURegister & rt,const CPURegister & rt2,const MemOperand & addr,LoadStorePairOp op)593 void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
594 const CPURegister& rt2,
595 const MemOperand& addr,
596 LoadStorePairOp op) {
597 // TODO(all): Should we support register offset for load-store-pair?
598 DCHECK(!addr.IsRegisterOffset());
599
600 int64_t offset = addr.offset();
601 LSDataSize size = CalcLSPairDataSize(op);
602
603 // Check if the offset fits in the immediate field of the appropriate
604 // instruction. If not, emit two instructions to perform the operation.
605 if (IsImmLSPair(offset, size)) {
606 // Encodable in one load/store pair instruction.
607 LoadStorePair(rt, rt2, addr, op);
608 } else {
609 Register base = addr.base();
610 if (addr.IsImmediateOffset()) {
611 UseScratchRegisterScope temps(this);
612 Register temp = temps.AcquireSameSizeAs(base);
613 Add(temp, base, offset);
614 LoadStorePair(rt, rt2, MemOperand(temp), op);
615 } else if (addr.IsPostIndex()) {
616 LoadStorePair(rt, rt2, MemOperand(base), op);
617 Add(base, base, offset);
618 } else {
619 DCHECK(addr.IsPreIndex());
620 Add(base, base, offset);
621 LoadStorePair(rt, rt2, MemOperand(base), op);
622 }
623 }
624 }
625
626
Load(const Register & rt,const MemOperand & addr,Representation r)627 void MacroAssembler::Load(const Register& rt,
628 const MemOperand& addr,
629 Representation r) {
630 DCHECK(!r.IsDouble());
631
632 if (r.IsInteger8()) {
633 Ldrsb(rt, addr);
634 } else if (r.IsUInteger8()) {
635 Ldrb(rt, addr);
636 } else if (r.IsInteger16()) {
637 Ldrsh(rt, addr);
638 } else if (r.IsUInteger16()) {
639 Ldrh(rt, addr);
640 } else if (r.IsInteger32()) {
641 Ldr(rt.W(), addr);
642 } else {
643 DCHECK(rt.Is64Bits());
644 Ldr(rt, addr);
645 }
646 }
647
648
Store(const Register & rt,const MemOperand & addr,Representation r)649 void MacroAssembler::Store(const Register& rt,
650 const MemOperand& addr,
651 Representation r) {
652 DCHECK(!r.IsDouble());
653
654 if (r.IsInteger8() || r.IsUInteger8()) {
655 Strb(rt, addr);
656 } else if (r.IsInteger16() || r.IsUInteger16()) {
657 Strh(rt, addr);
658 } else if (r.IsInteger32()) {
659 Str(rt.W(), addr);
660 } else {
661 DCHECK(rt.Is64Bits());
662 if (r.IsHeapObject()) {
663 AssertNotSmi(rt);
664 } else if (r.IsSmi()) {
665 AssertSmi(rt);
666 }
667 Str(rt, addr);
668 }
669 }
670
671
NeedExtraInstructionsOrRegisterBranch(Label * label,ImmBranchType b_type)672 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
673 Label *label, ImmBranchType b_type) {
674 bool need_longer_range = false;
675 // There are two situations in which we care about the offset being out of
676 // range:
677 // - The label is bound but too far away.
678 // - The label is not bound but linked, and the previous branch
679 // instruction in the chain is too far away.
680 if (label->is_bound() || label->is_linked()) {
681 need_longer_range =
682 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
683 }
684 if (!need_longer_range && !label->is_bound()) {
685 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
686 unresolved_branches_.insert(
687 std::pair<int, FarBranchInfo>(max_reachable_pc,
688 FarBranchInfo(pc_offset(), label)));
689 // Also maintain the next pool check.
690 next_veneer_pool_check_ =
691 Min(next_veneer_pool_check_,
692 max_reachable_pc - kVeneerDistanceCheckMargin);
693 }
694 return need_longer_range;
695 }
696
697
Adr(const Register & rd,Label * label,AdrHint hint)698 void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
699 DCHECK(allow_macro_instructions_);
700 DCHECK(!rd.IsZero());
701
702 if (hint == kAdrNear) {
703 adr(rd, label);
704 return;
705 }
706
707 DCHECK(hint == kAdrFar);
708 if (label->is_bound()) {
709 int label_offset = label->pos() - pc_offset();
710 if (Instruction::IsValidPCRelOffset(label_offset)) {
711 adr(rd, label);
712 } else {
713 DCHECK(label_offset <= 0);
714 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
715 adr(rd, min_adr_offset);
716 Add(rd, rd, label_offset - min_adr_offset);
717 }
718 } else {
719 UseScratchRegisterScope temps(this);
720 Register scratch = temps.AcquireX();
721
722 InstructionAccurateScope scope(
723 this, PatchingAssembler::kAdrFarPatchableNInstrs);
724 adr(rd, label);
725 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
726 nop(ADR_FAR_NOP);
727 }
728 movz(scratch, 0);
729 }
730 }
731
732
B(Label * label,BranchType type,Register reg,int bit)733 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
734 DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
735 (bit == -1 || type >= kBranchTypeFirstUsingBit));
736 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
737 B(static_cast<Condition>(type), label);
738 } else {
739 switch (type) {
740 case always: B(label); break;
741 case never: break;
742 case reg_zero: Cbz(reg, label); break;
743 case reg_not_zero: Cbnz(reg, label); break;
744 case reg_bit_clear: Tbz(reg, bit, label); break;
745 case reg_bit_set: Tbnz(reg, bit, label); break;
746 default:
747 UNREACHABLE();
748 }
749 }
750 }
751
752
B(Label * label,Condition cond)753 void MacroAssembler::B(Label* label, Condition cond) {
754 DCHECK(allow_macro_instructions_);
755 DCHECK((cond != al) && (cond != nv));
756
757 Label done;
758 bool need_extra_instructions =
759 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
760
761 if (need_extra_instructions) {
762 b(&done, NegateCondition(cond));
763 B(label);
764 } else {
765 b(label, cond);
766 }
767 bind(&done);
768 }
769
770
Tbnz(const Register & rt,unsigned bit_pos,Label * label)771 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
772 DCHECK(allow_macro_instructions_);
773
774 Label done;
775 bool need_extra_instructions =
776 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
777
778 if (need_extra_instructions) {
779 tbz(rt, bit_pos, &done);
780 B(label);
781 } else {
782 tbnz(rt, bit_pos, label);
783 }
784 bind(&done);
785 }
786
787
Tbz(const Register & rt,unsigned bit_pos,Label * label)788 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
789 DCHECK(allow_macro_instructions_);
790
791 Label done;
792 bool need_extra_instructions =
793 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
794
795 if (need_extra_instructions) {
796 tbnz(rt, bit_pos, &done);
797 B(label);
798 } else {
799 tbz(rt, bit_pos, label);
800 }
801 bind(&done);
802 }
803
804
Cbnz(const Register & rt,Label * label)805 void MacroAssembler::Cbnz(const Register& rt, Label* label) {
806 DCHECK(allow_macro_instructions_);
807
808 Label done;
809 bool need_extra_instructions =
810 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
811
812 if (need_extra_instructions) {
813 cbz(rt, &done);
814 B(label);
815 } else {
816 cbnz(rt, label);
817 }
818 bind(&done);
819 }
820
821
Cbz(const Register & rt,Label * label)822 void MacroAssembler::Cbz(const Register& rt, Label* label) {
823 DCHECK(allow_macro_instructions_);
824
825 Label done;
826 bool need_extra_instructions =
827 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
828
829 if (need_extra_instructions) {
830 cbnz(rt, &done);
831 B(label);
832 } else {
833 cbz(rt, label);
834 }
835 bind(&done);
836 }
837
838
839 // Pseudo-instructions.
840
841
Abs(const Register & rd,const Register & rm,Label * is_not_representable,Label * is_representable)842 void MacroAssembler::Abs(const Register& rd, const Register& rm,
843 Label* is_not_representable,
844 Label* is_representable) {
845 DCHECK(allow_macro_instructions_);
846 DCHECK(AreSameSizeAndType(rd, rm));
847
848 Cmp(rm, 1);
849 Cneg(rd, rm, lt);
850
851 // If the comparison sets the v flag, the input was the smallest value
852 // representable by rm, and the mathematical result of abs(rm) is not
853 // representable using two's complement.
854 if ((is_not_representable != NULL) && (is_representable != NULL)) {
855 B(is_not_representable, vs);
856 B(is_representable);
857 } else if (is_not_representable != NULL) {
858 B(is_not_representable, vs);
859 } else if (is_representable != NULL) {
860 B(is_representable, vc);
861 }
862 }
863
864
865 // Abstracted stack operations.
866
867
Push(const CPURegister & src0,const CPURegister & src1,const CPURegister & src2,const CPURegister & src3)868 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
869 const CPURegister& src2, const CPURegister& src3) {
870 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
871
872 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
873 int size = src0.SizeInBytes();
874
875 PushPreamble(count, size);
876 PushHelper(count, size, src0, src1, src2, src3);
877 }
878
879
Push(const CPURegister & src0,const CPURegister & src1,const CPURegister & src2,const CPURegister & src3,const CPURegister & src4,const CPURegister & src5,const CPURegister & src6,const CPURegister & src7)880 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
881 const CPURegister& src2, const CPURegister& src3,
882 const CPURegister& src4, const CPURegister& src5,
883 const CPURegister& src6, const CPURegister& src7) {
884 DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
885
886 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
887 int size = src0.SizeInBytes();
888
889 PushPreamble(count, size);
890 PushHelper(4, size, src0, src1, src2, src3);
891 PushHelper(count - 4, size, src4, src5, src6, src7);
892 }
893
894
Pop(const CPURegister & dst0,const CPURegister & dst1,const CPURegister & dst2,const CPURegister & dst3)895 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
896 const CPURegister& dst2, const CPURegister& dst3) {
897 // It is not valid to pop into the same register more than once in one
898 // instruction, not even into the zero register.
899 DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
900 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
901 DCHECK(dst0.IsValid());
902
903 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
904 int size = dst0.SizeInBytes();
905
906 PopHelper(count, size, dst0, dst1, dst2, dst3);
907 PopPostamble(count, size);
908 }
909
910
Pop(const CPURegister & dst0,const CPURegister & dst1,const CPURegister & dst2,const CPURegister & dst3,const CPURegister & dst4,const CPURegister & dst5,const CPURegister & dst6,const CPURegister & dst7)911 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
912 const CPURegister& dst2, const CPURegister& dst3,
913 const CPURegister& dst4, const CPURegister& dst5,
914 const CPURegister& dst6, const CPURegister& dst7) {
915 // It is not valid to pop into the same register more than once in one
916 // instruction, not even into the zero register.
917 DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
918 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
919 DCHECK(dst0.IsValid());
920
921 int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
922 int size = dst0.SizeInBytes();
923
924 PopHelper(4, size, dst0, dst1, dst2, dst3);
925 PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
926 PopPostamble(count, size);
927 }
928
929
Push(const Register & src0,const FPRegister & src1)930 void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
931 int size = src0.SizeInBytes() + src1.SizeInBytes();
932
933 PushPreamble(size);
934 // Reserve room for src0 and push src1.
935 str(src1, MemOperand(StackPointer(), -size, PreIndex));
936 // Fill the gap with src0.
937 str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
938 }
939
940
PushQueued(PreambleDirective preamble_directive)941 void MacroAssembler::PushPopQueue::PushQueued(
942 PreambleDirective preamble_directive) {
943 if (queued_.empty()) return;
944
945 if (preamble_directive == WITH_PREAMBLE) {
946 masm_->PushPreamble(size_);
947 }
948
949 size_t count = queued_.size();
950 size_t index = 0;
951 while (index < count) {
952 // PushHelper can only handle registers with the same size and type, and it
953 // can handle only four at a time. Batch them up accordingly.
954 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
955 int batch_index = 0;
956 do {
957 batch[batch_index++] = queued_[index++];
958 } while ((batch_index < 4) && (index < count) &&
959 batch[0].IsSameSizeAndType(queued_[index]));
960
961 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
962 batch[0], batch[1], batch[2], batch[3]);
963 }
964
965 queued_.clear();
966 }
967
968
PopQueued()969 void MacroAssembler::PushPopQueue::PopQueued() {
970 if (queued_.empty()) return;
971
972 size_t count = queued_.size();
973 size_t index = 0;
974 while (index < count) {
975 // PopHelper can only handle registers with the same size and type, and it
976 // can handle only four at a time. Batch them up accordingly.
977 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
978 int batch_index = 0;
979 do {
980 batch[batch_index++] = queued_[index++];
981 } while ((batch_index < 4) && (index < count) &&
982 batch[0].IsSameSizeAndType(queued_[index]));
983
984 masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
985 batch[0], batch[1], batch[2], batch[3]);
986 }
987
988 masm_->PopPostamble(size_);
989 queued_.clear();
990 }
991
992
PushCPURegList(CPURegList registers)993 void MacroAssembler::PushCPURegList(CPURegList registers) {
994 int size = registers.RegisterSizeInBytes();
995
996 PushPreamble(registers.Count(), size);
997 // Push up to four registers at a time because if the current stack pointer is
998 // csp and reg_size is 32, registers must be pushed in blocks of four in order
999 // to maintain the 16-byte alignment for csp.
1000 while (!registers.IsEmpty()) {
1001 int count_before = registers.Count();
1002 const CPURegister& src0 = registers.PopHighestIndex();
1003 const CPURegister& src1 = registers.PopHighestIndex();
1004 const CPURegister& src2 = registers.PopHighestIndex();
1005 const CPURegister& src3 = registers.PopHighestIndex();
1006 int count = count_before - registers.Count();
1007 PushHelper(count, size, src0, src1, src2, src3);
1008 }
1009 }
1010
1011
PopCPURegList(CPURegList registers)1012 void MacroAssembler::PopCPURegList(CPURegList registers) {
1013 int size = registers.RegisterSizeInBytes();
1014
1015 // Pop up to four registers at a time because if the current stack pointer is
1016 // csp and reg_size is 32, registers must be pushed in blocks of four in
1017 // order to maintain the 16-byte alignment for csp.
1018 while (!registers.IsEmpty()) {
1019 int count_before = registers.Count();
1020 const CPURegister& dst0 = registers.PopLowestIndex();
1021 const CPURegister& dst1 = registers.PopLowestIndex();
1022 const CPURegister& dst2 = registers.PopLowestIndex();
1023 const CPURegister& dst3 = registers.PopLowestIndex();
1024 int count = count_before - registers.Count();
1025 PopHelper(count, size, dst0, dst1, dst2, dst3);
1026 }
1027 PopPostamble(registers.Count(), size);
1028 }
1029
1030
PushMultipleTimes(CPURegister src,int count)1031 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
1032 int size = src.SizeInBytes();
1033
1034 PushPreamble(count, size);
1035
1036 if (FLAG_optimize_for_size && count > 8) {
1037 UseScratchRegisterScope temps(this);
1038 Register temp = temps.AcquireX();
1039
1040 Label loop;
1041 __ Mov(temp, count / 2);
1042 __ Bind(&loop);
1043 PushHelper(2, size, src, src, NoReg, NoReg);
1044 __ Subs(temp, temp, 1);
1045 __ B(ne, &loop);
1046
1047 count %= 2;
1048 }
1049
1050 // Push up to four registers at a time if possible because if the current
1051 // stack pointer is csp and the register size is 32, registers must be pushed
1052 // in blocks of four in order to maintain the 16-byte alignment for csp.
1053 while (count >= 4) {
1054 PushHelper(4, size, src, src, src, src);
1055 count -= 4;
1056 }
1057 if (count >= 2) {
1058 PushHelper(2, size, src, src, NoReg, NoReg);
1059 count -= 2;
1060 }
1061 if (count == 1) {
1062 PushHelper(1, size, src, NoReg, NoReg, NoReg);
1063 count -= 1;
1064 }
1065 DCHECK(count == 0);
1066 }
1067
1068
PushMultipleTimes(CPURegister src,Register count)1069 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1070 PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
1071
1072 UseScratchRegisterScope temps(this);
1073 Register temp = temps.AcquireSameSizeAs(count);
1074
1075 if (FLAG_optimize_for_size) {
1076 Label loop, done;
1077
1078 Subs(temp, count, 1);
1079 B(mi, &done);
1080
1081 // Push all registers individually, to save code size.
1082 Bind(&loop);
1083 Subs(temp, temp, 1);
1084 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1085 B(pl, &loop);
1086
1087 Bind(&done);
1088 } else {
1089 Label loop, leftover2, leftover1, done;
1090
1091 Subs(temp, count, 4);
1092 B(mi, &leftover2);
1093
1094 // Push groups of four first.
1095 Bind(&loop);
1096 Subs(temp, temp, 4);
1097 PushHelper(4, src.SizeInBytes(), src, src, src, src);
1098 B(pl, &loop);
1099
1100 // Push groups of two.
1101 Bind(&leftover2);
1102 Tbz(count, 1, &leftover1);
1103 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
1104
1105 // Push the last one (if required).
1106 Bind(&leftover1);
1107 Tbz(count, 0, &done);
1108 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1109
1110 Bind(&done);
1111 }
1112 }
1113
1114
PushHelper(int count,int size,const CPURegister & src0,const CPURegister & src1,const CPURegister & src2,const CPURegister & src3)1115 void MacroAssembler::PushHelper(int count, int size,
1116 const CPURegister& src0,
1117 const CPURegister& src1,
1118 const CPURegister& src2,
1119 const CPURegister& src3) {
1120 // Ensure that we don't unintentially modify scratch or debug registers.
1121 InstructionAccurateScope scope(this);
1122
1123 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1124 DCHECK(size == src0.SizeInBytes());
1125
1126 // When pushing multiple registers, the store order is chosen such that
1127 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1128 switch (count) {
1129 case 1:
1130 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1131 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1132 break;
1133 case 2:
1134 DCHECK(src2.IsNone() && src3.IsNone());
1135 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1136 break;
1137 case 3:
1138 DCHECK(src3.IsNone());
1139 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1140 str(src0, MemOperand(StackPointer(), 2 * size));
1141 break;
1142 case 4:
1143 // Skip over 4 * size, then fill in the gap. This allows four W registers
1144 // to be pushed using csp, whilst maintaining 16-byte alignment for csp
1145 // at all times.
1146 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1147 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1148 break;
1149 default:
1150 UNREACHABLE();
1151 }
1152 }
1153
1154
PopHelper(int count,int size,const CPURegister & dst0,const CPURegister & dst1,const CPURegister & dst2,const CPURegister & dst3)1155 void MacroAssembler::PopHelper(int count, int size,
1156 const CPURegister& dst0,
1157 const CPURegister& dst1,
1158 const CPURegister& dst2,
1159 const CPURegister& dst3) {
1160 // Ensure that we don't unintentially modify scratch or debug registers.
1161 InstructionAccurateScope scope(this);
1162
1163 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1164 DCHECK(size == dst0.SizeInBytes());
1165
1166 // When popping multiple registers, the load order is chosen such that
1167 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1168 switch (count) {
1169 case 1:
1170 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1171 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1172 break;
1173 case 2:
1174 DCHECK(dst2.IsNone() && dst3.IsNone());
1175 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1176 break;
1177 case 3:
1178 DCHECK(dst3.IsNone());
1179 ldr(dst2, MemOperand(StackPointer(), 2 * size));
1180 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1181 break;
1182 case 4:
1183 // Load the higher addresses first, then load the lower addresses and
1184 // skip the whole block in the second instruction. This allows four W
1185 // registers to be popped using csp, whilst maintaining 16-byte alignment
1186 // for csp at all times.
1187 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1188 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1189 break;
1190 default:
1191 UNREACHABLE();
1192 }
1193 }
1194
1195
PushPreamble(Operand total_size)1196 void MacroAssembler::PushPreamble(Operand total_size) {
1197 if (csp.Is(StackPointer())) {
1198 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1199 // on entry and the total size of the specified registers must also be a
1200 // multiple of 16 bytes.
1201 if (total_size.IsImmediate()) {
1202 DCHECK((total_size.ImmediateValue() % 16) == 0);
1203 }
1204
1205 // Don't check access size for non-immediate sizes. It's difficult to do
1206 // well, and it will be caught by hardware (or the simulator) anyway.
1207 } else {
1208 // Even if the current stack pointer is not the system stack pointer (csp),
1209 // the system stack pointer will still be modified in order to comply with
1210 // ABI rules about accessing memory below the system stack pointer.
1211 BumpSystemStackPointer(total_size);
1212 }
1213 }
1214
1215
PopPostamble(Operand total_size)1216 void MacroAssembler::PopPostamble(Operand total_size) {
1217 if (csp.Is(StackPointer())) {
1218 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1219 // on entry and the total size of the specified registers must also be a
1220 // multiple of 16 bytes.
1221 if (total_size.IsImmediate()) {
1222 DCHECK((total_size.ImmediateValue() % 16) == 0);
1223 }
1224
1225 // Don't check access size for non-immediate sizes. It's difficult to do
1226 // well, and it will be caught by hardware (or the simulator) anyway.
1227 } else if (emit_debug_code()) {
1228 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1229 // but if we keep it matching StackPointer, the simulator can detect memory
1230 // accesses in the now-free part of the stack.
1231 SyncSystemStackPointer();
1232 }
1233 }
1234
1235
Poke(const CPURegister & src,const Operand & offset)1236 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1237 if (offset.IsImmediate()) {
1238 DCHECK(offset.ImmediateValue() >= 0);
1239 } else if (emit_debug_code()) {
1240 Cmp(xzr, offset);
1241 Check(le, kStackAccessBelowStackPointer);
1242 }
1243
1244 Str(src, MemOperand(StackPointer(), offset));
1245 }
1246
1247
Peek(const CPURegister & dst,const Operand & offset)1248 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1249 if (offset.IsImmediate()) {
1250 DCHECK(offset.ImmediateValue() >= 0);
1251 } else if (emit_debug_code()) {
1252 Cmp(xzr, offset);
1253 Check(le, kStackAccessBelowStackPointer);
1254 }
1255
1256 Ldr(dst, MemOperand(StackPointer(), offset));
1257 }
1258
1259
PokePair(const CPURegister & src1,const CPURegister & src2,int offset)1260 void MacroAssembler::PokePair(const CPURegister& src1,
1261 const CPURegister& src2,
1262 int offset) {
1263 DCHECK(AreSameSizeAndType(src1, src2));
1264 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1265 Stp(src1, src2, MemOperand(StackPointer(), offset));
1266 }
1267
1268
PeekPair(const CPURegister & dst1,const CPURegister & dst2,int offset)1269 void MacroAssembler::PeekPair(const CPURegister& dst1,
1270 const CPURegister& dst2,
1271 int offset) {
1272 DCHECK(AreSameSizeAndType(dst1, dst2));
1273 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1274 Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
1275 }
1276
1277
PushCalleeSavedRegisters()1278 void MacroAssembler::PushCalleeSavedRegisters() {
1279 // Ensure that the macro-assembler doesn't use any scratch registers.
1280 InstructionAccurateScope scope(this);
1281
1282 // This method must not be called unless the current stack pointer is the
1283 // system stack pointer (csp).
1284 DCHECK(csp.Is(StackPointer()));
1285
1286 MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
1287
1288 stp(d14, d15, tos);
1289 stp(d12, d13, tos);
1290 stp(d10, d11, tos);
1291 stp(d8, d9, tos);
1292
1293 stp(x29, x30, tos);
1294 stp(x27, x28, tos); // x28 = jssp
1295 stp(x25, x26, tos);
1296 stp(x23, x24, tos);
1297 stp(x21, x22, tos);
1298 stp(x19, x20, tos);
1299 }
1300
1301
PopCalleeSavedRegisters()1302 void MacroAssembler::PopCalleeSavedRegisters() {
1303 // Ensure that the macro-assembler doesn't use any scratch registers.
1304 InstructionAccurateScope scope(this);
1305
1306 // This method must not be called unless the current stack pointer is the
1307 // system stack pointer (csp).
1308 DCHECK(csp.Is(StackPointer()));
1309
1310 MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1311
1312 ldp(x19, x20, tos);
1313 ldp(x21, x22, tos);
1314 ldp(x23, x24, tos);
1315 ldp(x25, x26, tos);
1316 ldp(x27, x28, tos); // x28 = jssp
1317 ldp(x29, x30, tos);
1318
1319 ldp(d8, d9, tos);
1320 ldp(d10, d11, tos);
1321 ldp(d12, d13, tos);
1322 ldp(d14, d15, tos);
1323 }
1324
1325
AssertStackConsistency()1326 void MacroAssembler::AssertStackConsistency() {
1327 // Avoid emitting code when !use_real_abort() since non-real aborts cause too
1328 // much code to be generated.
1329 if (emit_debug_code() && use_real_aborts()) {
1330 if (csp.Is(StackPointer())) {
1331 // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
1332 // can't check the alignment of csp without using a scratch register (or
1333 // clobbering the flags), but the processor (or simulator) will abort if
1334 // it is not properly aligned during a load.
1335 ldr(xzr, MemOperand(csp, 0));
1336 }
1337 if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
1338 Label ok;
1339 // Check that csp <= StackPointer(), preserving all registers and NZCV.
1340 sub(StackPointer(), csp, StackPointer());
1341 cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
1342 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
1343
1344 // Avoid generating AssertStackConsistency checks for the Push in Abort.
1345 { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
1346 // Restore StackPointer().
1347 sub(StackPointer(), csp, StackPointer());
1348 Abort(kTheCurrentStackPointerIsBelowCsp);
1349 }
1350
1351 bind(&ok);
1352 // Restore StackPointer().
1353 sub(StackPointer(), csp, StackPointer());
1354 }
1355 }
1356 }
1357
AssertCspAligned()1358 void MacroAssembler::AssertCspAligned() {
1359 if (emit_debug_code() && use_real_aborts()) {
1360 // TODO(titzer): use a real assert for alignment check?
1361 UseScratchRegisterScope scope(this);
1362 Register temp = scope.AcquireX();
1363 ldr(temp, MemOperand(csp));
1364 }
1365 }
1366
AssertFPCRState(Register fpcr)1367 void MacroAssembler::AssertFPCRState(Register fpcr) {
1368 if (emit_debug_code()) {
1369 Label unexpected_mode, done;
1370 UseScratchRegisterScope temps(this);
1371 if (fpcr.IsNone()) {
1372 fpcr = temps.AcquireX();
1373 Mrs(fpcr, FPCR);
1374 }
1375
1376 // Settings left to their default values:
1377 // - Assert that flush-to-zero is not set.
1378 Tbnz(fpcr, FZ_offset, &unexpected_mode);
1379 // - Assert that the rounding mode is nearest-with-ties-to-even.
1380 STATIC_ASSERT(FPTieEven == 0);
1381 Tst(fpcr, RMode_mask);
1382 B(eq, &done);
1383
1384 Bind(&unexpected_mode);
1385 Abort(kUnexpectedFPCRMode);
1386
1387 Bind(&done);
1388 }
1389 }
1390
1391
CanonicalizeNaN(const FPRegister & dst,const FPRegister & src)1392 void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
1393 const FPRegister& src) {
1394 AssertFPCRState();
1395
1396 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
1397 // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
1398 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
1399 Fsub(dst, src, fp_zero);
1400 }
1401
1402
LoadRoot(CPURegister destination,Heap::RootListIndex index)1403 void MacroAssembler::LoadRoot(CPURegister destination,
1404 Heap::RootListIndex index) {
1405 // TODO(jbramley): Most root values are constants, and can be synthesized
1406 // without a load. Refer to the ARM back end for details.
1407 Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
1408 }
1409
1410
StoreRoot(Register source,Heap::RootListIndex index)1411 void MacroAssembler::StoreRoot(Register source,
1412 Heap::RootListIndex index) {
1413 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
1414 Str(source, MemOperand(root, index << kPointerSizeLog2));
1415 }
1416
1417
LoadTrueFalseRoots(Register true_root,Register false_root)1418 void MacroAssembler::LoadTrueFalseRoots(Register true_root,
1419 Register false_root) {
1420 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1421 Ldp(true_root, false_root,
1422 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
1423 }
1424
1425
LoadHeapObject(Register result,Handle<HeapObject> object)1426 void MacroAssembler::LoadHeapObject(Register result,
1427 Handle<HeapObject> object) {
1428 Mov(result, Operand(object));
1429 }
1430
1431
LoadInstanceDescriptors(Register map,Register descriptors)1432 void MacroAssembler::LoadInstanceDescriptors(Register map,
1433 Register descriptors) {
1434 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
1435 }
1436
1437
NumberOfOwnDescriptors(Register dst,Register map)1438 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
1439 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
1440 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1441 }
1442
1443
EnumLengthUntagged(Register dst,Register map)1444 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
1445 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
1446 Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
1447 And(dst, dst, Map::EnumLengthBits::kMask);
1448 }
1449
1450
EnumLengthSmi(Register dst,Register map)1451 void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
1452 EnumLengthUntagged(dst, map);
1453 SmiTag(dst, dst);
1454 }
1455
1456
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)1457 void MacroAssembler::LoadAccessor(Register dst, Register holder,
1458 int accessor_index,
1459 AccessorComponent accessor) {
1460 Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
1461 LoadInstanceDescriptors(dst, dst);
1462 Ldr(dst,
1463 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
1464 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
1465 : AccessorPair::kSetterOffset;
1466 Ldr(dst, FieldMemOperand(dst, offset));
1467 }
1468
1469
CheckEnumCache(Register object,Register scratch0,Register scratch1,Register scratch2,Register scratch3,Register scratch4,Label * call_runtime)1470 void MacroAssembler::CheckEnumCache(Register object, Register scratch0,
1471 Register scratch1, Register scratch2,
1472 Register scratch3, Register scratch4,
1473 Label* call_runtime) {
1474 DCHECK(!AreAliased(object, scratch0, scratch1, scratch2, scratch3, scratch4));
1475
1476 Register empty_fixed_array_value = scratch0;
1477 Register current_object = scratch1;
1478 Register null_value = scratch4;
1479
1480 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1481 Label next, start;
1482
1483 Mov(current_object, object);
1484
1485 // Check if the enum length field is properly initialized, indicating that
1486 // there is an enum cache.
1487 Register map = scratch2;
1488 Register enum_length = scratch3;
1489 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1490
1491 EnumLengthUntagged(enum_length, map);
1492 Cmp(enum_length, kInvalidEnumCacheSentinel);
1493 B(eq, call_runtime);
1494
1495 LoadRoot(null_value, Heap::kNullValueRootIndex);
1496 B(&start);
1497
1498 Bind(&next);
1499 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1500
1501 // For all objects but the receiver, check that the cache is empty.
1502 EnumLengthUntagged(enum_length, map);
1503 Cbnz(enum_length, call_runtime);
1504
1505 Bind(&start);
1506
1507 // Check that there are no elements. Register current_object contains the
1508 // current JS object we've reached through the prototype chain.
1509 Label no_elements;
1510 Ldr(current_object, FieldMemOperand(current_object,
1511 JSObject::kElementsOffset));
1512 Cmp(current_object, empty_fixed_array_value);
1513 B(eq, &no_elements);
1514
1515 // Second chance, the object may be using the empty slow element dictionary.
1516 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1517 B(ne, call_runtime);
1518
1519 Bind(&no_elements);
1520 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
1521 Cmp(current_object, null_value);
1522 B(ne, &next);
1523 }
1524
1525
TestJSArrayForAllocationMemento(Register receiver,Register scratch1,Register scratch2,Label * no_memento_found)1526 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
1527 Register scratch1,
1528 Register scratch2,
1529 Label* no_memento_found) {
1530 Label map_check;
1531 Label top_check;
1532 ExternalReference new_space_allocation_top_adr =
1533 ExternalReference::new_space_allocation_top_address(isolate());
1534 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
1535 const int kMementoLastWordOffset =
1536 kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
1537
1538 // Bail out if the object is not in new space.
1539 JumpIfNotInNewSpace(receiver, no_memento_found);
1540 Add(scratch1, receiver, kMementoLastWordOffset);
1541 // If the object is in new space, we need to check whether it is on the same
1542 // page as the current top.
1543 Mov(scratch2, new_space_allocation_top_adr);
1544 Ldr(scratch2, MemOperand(scratch2));
1545 Eor(scratch2, scratch1, scratch2);
1546 Tst(scratch2, ~Page::kPageAlignmentMask);
1547 B(eq, &top_check);
1548 // The object is on a different page than allocation top. Bail out if the
1549 // object sits on the page boundary as no memento can follow and we cannot
1550 // touch the memory following it.
1551 Eor(scratch2, scratch1, receiver);
1552 Tst(scratch2, ~Page::kPageAlignmentMask);
1553 B(ne, no_memento_found);
1554 // Continue with the actual map check.
1555 jmp(&map_check);
1556 // If top is on the same page as the current object, we need to check whether
1557 // we are below top.
1558 bind(&top_check);
1559 Mov(scratch2, new_space_allocation_top_adr);
1560 Ldr(scratch2, MemOperand(scratch2));
1561 Cmp(scratch1, scratch2);
1562 B(ge, no_memento_found);
1563 // Memento map check.
1564 bind(&map_check);
1565 Ldr(scratch1, MemOperand(receiver, kMementoMapOffset));
1566 Cmp(scratch1, Operand(isolate()->factory()->allocation_memento_map()));
1567 }
1568
1569
InNewSpace(Register object,Condition cond,Label * branch)1570 void MacroAssembler::InNewSpace(Register object,
1571 Condition cond,
1572 Label* branch) {
1573 DCHECK(cond == eq || cond == ne);
1574 UseScratchRegisterScope temps(this);
1575 CheckPageFlag(object, temps.AcquireSameSizeAs(object),
1576 MemoryChunk::kIsInNewSpaceMask, cond, branch);
1577 }
1578
1579
AssertSmi(Register object,BailoutReason reason)1580 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1581 if (emit_debug_code()) {
1582 STATIC_ASSERT(kSmiTag == 0);
1583 Tst(object, kSmiTagMask);
1584 Check(eq, reason);
1585 }
1586 }
1587
1588
AssertNotSmi(Register object,BailoutReason reason)1589 void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
1590 if (emit_debug_code()) {
1591 STATIC_ASSERT(kSmiTag == 0);
1592 Tst(object, kSmiTagMask);
1593 Check(ne, reason);
1594 }
1595 }
1596
1597
AssertName(Register object)1598 void MacroAssembler::AssertName(Register object) {
1599 if (emit_debug_code()) {
1600 AssertNotSmi(object, kOperandIsASmiAndNotAName);
1601
1602 UseScratchRegisterScope temps(this);
1603 Register temp = temps.AcquireX();
1604
1605 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1606 CompareInstanceType(temp, temp, LAST_NAME_TYPE);
1607 Check(ls, kOperandIsNotAName);
1608 }
1609 }
1610
1611
AssertFunction(Register object)1612 void MacroAssembler::AssertFunction(Register object) {
1613 if (emit_debug_code()) {
1614 AssertNotSmi(object, kOperandIsASmiAndNotAFunction);
1615
1616 UseScratchRegisterScope temps(this);
1617 Register temp = temps.AcquireX();
1618
1619 CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE);
1620 Check(eq, kOperandIsNotAFunction);
1621 }
1622 }
1623
1624
AssertBoundFunction(Register object)1625 void MacroAssembler::AssertBoundFunction(Register object) {
1626 if (emit_debug_code()) {
1627 AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction);
1628
1629 UseScratchRegisterScope temps(this);
1630 Register temp = temps.AcquireX();
1631
1632 CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
1633 Check(eq, kOperandIsNotABoundFunction);
1634 }
1635 }
1636
AssertGeneratorObject(Register object)1637 void MacroAssembler::AssertGeneratorObject(Register object) {
1638 if (emit_debug_code()) {
1639 AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
1640
1641 UseScratchRegisterScope temps(this);
1642 Register temp = temps.AcquireX();
1643
1644 CompareObjectType(object, temp, temp, JS_GENERATOR_OBJECT_TYPE);
1645 Check(eq, kOperandIsNotAGeneratorObject);
1646 }
1647 }
1648
AssertReceiver(Register object)1649 void MacroAssembler::AssertReceiver(Register object) {
1650 if (emit_debug_code()) {
1651 AssertNotSmi(object, kOperandIsASmiAndNotAReceiver);
1652
1653 UseScratchRegisterScope temps(this);
1654 Register temp = temps.AcquireX();
1655
1656 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1657 CompareObjectType(object, temp, temp, FIRST_JS_RECEIVER_TYPE);
1658 Check(hs, kOperandIsNotAReceiver);
1659 }
1660 }
1661
1662
AssertUndefinedOrAllocationSite(Register object,Register scratch)1663 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1664 Register scratch) {
1665 if (emit_debug_code()) {
1666 Label done_checking;
1667 AssertNotSmi(object);
1668 JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
1669 Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1670 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1671 Assert(eq, kExpectedUndefinedOrCell);
1672 Bind(&done_checking);
1673 }
1674 }
1675
1676
AssertString(Register object)1677 void MacroAssembler::AssertString(Register object) {
1678 if (emit_debug_code()) {
1679 UseScratchRegisterScope temps(this);
1680 Register temp = temps.AcquireX();
1681 STATIC_ASSERT(kSmiTag == 0);
1682 Tst(object, kSmiTagMask);
1683 Check(ne, kOperandIsASmiAndNotAString);
1684 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1685 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
1686 Check(lo, kOperandIsNotAString);
1687 }
1688 }
1689
1690
AssertPositiveOrZero(Register value)1691 void MacroAssembler::AssertPositiveOrZero(Register value) {
1692 if (emit_debug_code()) {
1693 Label done;
1694 int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
1695 Tbz(value, sign_bit, &done);
1696 Abort(kUnexpectedNegativeValue);
1697 Bind(&done);
1698 }
1699 }
1700
AssertNotNumber(Register value)1701 void MacroAssembler::AssertNotNumber(Register value) {
1702 if (emit_debug_code()) {
1703 STATIC_ASSERT(kSmiTag == 0);
1704 Tst(value, kSmiTagMask);
1705 Check(ne, kOperandIsANumber);
1706 Label done;
1707 JumpIfNotHeapNumber(value, &done);
1708 Abort(kOperandIsANumber);
1709 Bind(&done);
1710 }
1711 }
1712
AssertNumber(Register value)1713 void MacroAssembler::AssertNumber(Register value) {
1714 if (emit_debug_code()) {
1715 Label done;
1716 JumpIfSmi(value, &done);
1717 JumpIfHeapNumber(value, &done);
1718 Abort(kOperandIsNotANumber);
1719 Bind(&done);
1720 }
1721 }
1722
CallStub(CodeStub * stub,TypeFeedbackId ast_id)1723 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1724 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1725 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1726 }
1727
1728
TailCallStub(CodeStub * stub)1729 void MacroAssembler::TailCallStub(CodeStub* stub) {
1730 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1731 }
1732
1733
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)1734 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1735 int num_arguments,
1736 SaveFPRegsMode save_doubles) {
1737 // All arguments must be on the stack before this function is called.
1738 // x0 holds the return value after the call.
1739
1740 // Check that the number of arguments matches what the function expects.
1741 // If f->nargs is -1, the function can accept a variable number of arguments.
1742 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1743
1744 // Place the necessary arguments.
1745 Mov(x0, num_arguments);
1746 Mov(x1, ExternalReference(f, isolate()));
1747
1748 CEntryStub stub(isolate(), 1, save_doubles);
1749 CallStub(&stub);
1750 }
1751
1752
CallExternalReference(const ExternalReference & ext,int num_arguments)1753 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1754 int num_arguments) {
1755 Mov(x0, num_arguments);
1756 Mov(x1, ext);
1757
1758 CEntryStub stub(isolate(), 1);
1759 CallStub(&stub);
1760 }
1761
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)1762 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1763 bool builtin_exit_frame) {
1764 Mov(x1, builtin);
1765 CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
1766 builtin_exit_frame);
1767 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1768 }
1769
TailCallRuntime(Runtime::FunctionId fid)1770 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1771 const Runtime::Function* function = Runtime::FunctionForId(fid);
1772 DCHECK_EQ(1, function->result_size);
1773 if (function->nargs >= 0) {
1774 // TODO(1236192): Most runtime routines don't need the number of
1775 // arguments passed in because it is constant. At some point we
1776 // should remove this need and make the runtime routine entry code
1777 // smarter.
1778 Mov(x0, function->nargs);
1779 }
1780 JumpToExternalReference(ExternalReference(fid, isolate()));
1781 }
1782
1783
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)1784 void MacroAssembler::InitializeNewString(Register string,
1785 Register length,
1786 Heap::RootListIndex map_index,
1787 Register scratch1,
1788 Register scratch2) {
1789 DCHECK(!AreAliased(string, length, scratch1, scratch2));
1790 LoadRoot(scratch2, map_index);
1791 SmiTag(scratch1, length);
1792 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1793
1794 Mov(scratch2, String::kEmptyHashField);
1795 Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1796 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
1797 }
1798
1799
ActivationFrameAlignment()1800 int MacroAssembler::ActivationFrameAlignment() {
1801 #if V8_HOST_ARCH_ARM64
1802 // Running on the real platform. Use the alignment as mandated by the local
1803 // environment.
1804 // Note: This will break if we ever start generating snapshots on one ARM
1805 // platform for another ARM platform with a different alignment.
1806 return base::OS::ActivationFrameAlignment();
1807 #else // V8_HOST_ARCH_ARM64
1808 // If we are using the simulator then we should always align to the expected
1809 // alignment. As the simulator is used to generate snapshots we do not know
1810 // if the target platform will need alignment, so this is controlled from a
1811 // flag.
1812 return FLAG_sim_stack_alignment;
1813 #endif // V8_HOST_ARCH_ARM64
1814 }
1815
1816
CallCFunction(ExternalReference function,int num_of_reg_args)1817 void MacroAssembler::CallCFunction(ExternalReference function,
1818 int num_of_reg_args) {
1819 CallCFunction(function, num_of_reg_args, 0);
1820 }
1821
1822
CallCFunction(ExternalReference function,int num_of_reg_args,int num_of_double_args)1823 void MacroAssembler::CallCFunction(ExternalReference function,
1824 int num_of_reg_args,
1825 int num_of_double_args) {
1826 UseScratchRegisterScope temps(this);
1827 Register temp = temps.AcquireX();
1828 Mov(temp, function);
1829 CallCFunction(temp, num_of_reg_args, num_of_double_args);
1830 }
1831
1832
CallCFunction(Register function,int num_of_reg_args,int num_of_double_args)1833 void MacroAssembler::CallCFunction(Register function,
1834 int num_of_reg_args,
1835 int num_of_double_args) {
1836 DCHECK(has_frame());
1837 // We can pass 8 integer arguments in registers. If we need to pass more than
1838 // that, we'll need to implement support for passing them on the stack.
1839 DCHECK(num_of_reg_args <= 8);
1840
1841 // If we're passing doubles, we're limited to the following prototypes
1842 // (defined by ExternalReference::Type):
1843 // BUILTIN_COMPARE_CALL: int f(double, double)
1844 // BUILTIN_FP_FP_CALL: double f(double, double)
1845 // BUILTIN_FP_CALL: double f(double)
1846 // BUILTIN_FP_INT_CALL: double f(double, int)
1847 if (num_of_double_args > 0) {
1848 DCHECK(num_of_reg_args <= 1);
1849 DCHECK((num_of_double_args + num_of_reg_args) <= 2);
1850 }
1851
1852
1853 // If the stack pointer is not csp, we need to derive an aligned csp from the
1854 // current stack pointer.
1855 const Register old_stack_pointer = StackPointer();
1856 if (!csp.Is(old_stack_pointer)) {
1857 AssertStackConsistency();
1858
1859 int sp_alignment = ActivationFrameAlignment();
1860 // The ABI mandates at least 16-byte alignment.
1861 DCHECK(sp_alignment >= 16);
1862 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
1863
1864 // The current stack pointer is a callee saved register, and is preserved
1865 // across the call.
1866 DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1867
1868 // Align and synchronize the system stack pointer with jssp.
1869 Bic(csp, old_stack_pointer, sp_alignment - 1);
1870 SetStackPointer(csp);
1871 }
1872
1873 // Call directly. The function called cannot cause a GC, or allow preemption,
1874 // so the return address in the link register stays correct.
1875 Call(function);
1876
1877 if (!csp.Is(old_stack_pointer)) {
1878 if (emit_debug_code()) {
1879 // Because the stack pointer must be aligned on a 16-byte boundary, the
1880 // aligned csp can be up to 12 bytes below the jssp. This is the case
1881 // where we only pushed one W register on top of an aligned jssp.
1882 UseScratchRegisterScope temps(this);
1883 Register temp = temps.AcquireX();
1884 DCHECK(ActivationFrameAlignment() == 16);
1885 Sub(temp, csp, old_stack_pointer);
1886 // We want temp <= 0 && temp >= -12.
1887 Cmp(temp, 0);
1888 Ccmp(temp, -12, NFlag, le);
1889 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
1890 }
1891 SetStackPointer(old_stack_pointer);
1892 }
1893 }
1894
1895
Jump(Register target)1896 void MacroAssembler::Jump(Register target) {
1897 Br(target);
1898 }
1899
1900
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond)1901 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
1902 Condition cond) {
1903 if (cond == nv) return;
1904 UseScratchRegisterScope temps(this);
1905 Register temp = temps.AcquireX();
1906 Label done;
1907 if (cond != al) B(NegateCondition(cond), &done);
1908 Mov(temp, Operand(target, rmode));
1909 Br(temp);
1910 Bind(&done);
1911 }
1912
1913
Jump(Address target,RelocInfo::Mode rmode,Condition cond)1914 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
1915 Condition cond) {
1916 DCHECK(!RelocInfo::IsCodeTarget(rmode));
1917 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
1918 }
1919
1920
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)1921 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
1922 Condition cond) {
1923 DCHECK(RelocInfo::IsCodeTarget(rmode));
1924 AllowDeferredHandleDereference embedding_raw_address;
1925 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
1926 }
1927
1928
Call(Register target)1929 void MacroAssembler::Call(Register target) {
1930 BlockPoolsScope scope(this);
1931 #ifdef DEBUG
1932 Label start_call;
1933 Bind(&start_call);
1934 #endif
1935
1936 Blr(target);
1937
1938 #ifdef DEBUG
1939 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1940 #endif
1941 }
1942
1943
Call(Label * target)1944 void MacroAssembler::Call(Label* target) {
1945 BlockPoolsScope scope(this);
1946 #ifdef DEBUG
1947 Label start_call;
1948 Bind(&start_call);
1949 #endif
1950
1951 Bl(target);
1952
1953 #ifdef DEBUG
1954 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1955 #endif
1956 }
1957
1958
1959 // MacroAssembler::CallSize is sensitive to changes in this function, as it
1960 // requires to know how many instructions are used to branch to the target.
Call(Address target,RelocInfo::Mode rmode)1961 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
1962 BlockPoolsScope scope(this);
1963 #ifdef DEBUG
1964 Label start_call;
1965 Bind(&start_call);
1966 #endif
1967
1968 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1969 DCHECK(rmode != RelocInfo::NONE32);
1970
1971 UseScratchRegisterScope temps(this);
1972 Register temp = temps.AcquireX();
1973
1974 if (rmode == RelocInfo::NONE64) {
1975 // Addresses are 48 bits so we never need to load the upper 16 bits.
1976 uint64_t imm = reinterpret_cast<uint64_t>(target);
1977 // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
1978 DCHECK(((imm >> 48) & 0xffff) == 0);
1979 movz(temp, (imm >> 0) & 0xffff, 0);
1980 movk(temp, (imm >> 16) & 0xffff, 16);
1981 movk(temp, (imm >> 32) & 0xffff, 32);
1982 } else {
1983 Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
1984 }
1985 Blr(temp);
1986 #ifdef DEBUG
1987 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
1988 #endif
1989 }
1990
1991
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id)1992 void MacroAssembler::Call(Handle<Code> code,
1993 RelocInfo::Mode rmode,
1994 TypeFeedbackId ast_id) {
1995 #ifdef DEBUG
1996 Label start_call;
1997 Bind(&start_call);
1998 #endif
1999
2000 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
2001 SetRecordedAstId(ast_id);
2002 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2003 }
2004
2005 AllowDeferredHandleDereference embedding_raw_address;
2006 Call(reinterpret_cast<Address>(code.location()), rmode);
2007
2008 #ifdef DEBUG
2009 // Check the size of the code generated.
2010 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
2011 #endif
2012 }
2013
2014
CallSize(Register target)2015 int MacroAssembler::CallSize(Register target) {
2016 USE(target);
2017 return kInstructionSize;
2018 }
2019
2020
CallSize(Label * target)2021 int MacroAssembler::CallSize(Label* target) {
2022 USE(target);
2023 return kInstructionSize;
2024 }
2025
2026
CallSize(Address target,RelocInfo::Mode rmode)2027 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
2028 USE(target);
2029
2030 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2031 DCHECK(rmode != RelocInfo::NONE32);
2032
2033 if (rmode == RelocInfo::NONE64) {
2034 return kCallSizeWithoutRelocation;
2035 } else {
2036 return kCallSizeWithRelocation;
2037 }
2038 }
2039
2040
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id)2041 int MacroAssembler::CallSize(Handle<Code> code,
2042 RelocInfo::Mode rmode,
2043 TypeFeedbackId ast_id) {
2044 USE(code);
2045 USE(ast_id);
2046
2047 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2048 DCHECK(rmode != RelocInfo::NONE32);
2049
2050 if (rmode == RelocInfo::NONE64) {
2051 return kCallSizeWithoutRelocation;
2052 } else {
2053 return kCallSizeWithRelocation;
2054 }
2055 }
2056
2057
JumpIfHeapNumber(Register object,Label * on_heap_number,SmiCheckType smi_check_type)2058 void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
2059 SmiCheckType smi_check_type) {
2060 Label on_not_heap_number;
2061
2062 if (smi_check_type == DO_SMI_CHECK) {
2063 JumpIfSmi(object, &on_not_heap_number);
2064 }
2065
2066 AssertNotSmi(object);
2067
2068 UseScratchRegisterScope temps(this);
2069 Register temp = temps.AcquireX();
2070 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2071 JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
2072
2073 Bind(&on_not_heap_number);
2074 }
2075
2076
JumpIfNotHeapNumber(Register object,Label * on_not_heap_number,SmiCheckType smi_check_type)2077 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2078 Label* on_not_heap_number,
2079 SmiCheckType smi_check_type) {
2080 if (smi_check_type == DO_SMI_CHECK) {
2081 JumpIfSmi(object, on_not_heap_number);
2082 }
2083
2084 AssertNotSmi(object);
2085
2086 UseScratchRegisterScope temps(this);
2087 Register temp = temps.AcquireX();
2088 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2089 JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
2090 }
2091
2092
TryRepresentDoubleAsInt(Register as_int,FPRegister value,FPRegister scratch_d,Label * on_successful_conversion,Label * on_failed_conversion)2093 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
2094 FPRegister value,
2095 FPRegister scratch_d,
2096 Label* on_successful_conversion,
2097 Label* on_failed_conversion) {
2098 // Convert to an int and back again, then compare with the original value.
2099 Fcvtzs(as_int, value);
2100 Scvtf(scratch_d, as_int);
2101 Fcmp(value, scratch_d);
2102
2103 if (on_successful_conversion) {
2104 B(on_successful_conversion, eq);
2105 }
2106 if (on_failed_conversion) {
2107 B(on_failed_conversion, ne);
2108 }
2109 }
2110
2111
TestForMinusZero(DoubleRegister input)2112 void MacroAssembler::TestForMinusZero(DoubleRegister input) {
2113 UseScratchRegisterScope temps(this);
2114 Register temp = temps.AcquireX();
2115 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
2116 // cause overflow.
2117 Fmov(temp, input);
2118 Cmp(temp, 1);
2119 }
2120
2121
JumpIfMinusZero(DoubleRegister input,Label * on_negative_zero)2122 void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
2123 Label* on_negative_zero) {
2124 TestForMinusZero(input);
2125 B(vs, on_negative_zero);
2126 }
2127
2128
JumpIfMinusZero(Register input,Label * on_negative_zero)2129 void MacroAssembler::JumpIfMinusZero(Register input,
2130 Label* on_negative_zero) {
2131 DCHECK(input.Is64Bits());
2132 // Floating point value is in an integer register. Detect -0.0 by subtracting
2133 // 1 (cmp), which will cause overflow.
2134 Cmp(input, 1);
2135 B(vs, on_negative_zero);
2136 }
2137
2138
ClampInt32ToUint8(Register output,Register input)2139 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2140 // Clamp the value to [0..255].
2141 Cmp(input.W(), Operand(input.W(), UXTB));
2142 // If input < input & 0xff, it must be < 0, so saturate to 0.
2143 Csel(output.W(), wzr, input.W(), lt);
2144 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
2145 Csel(output.W(), output.W(), 255, le);
2146 }
2147
2148
ClampInt32ToUint8(Register in_out)2149 void MacroAssembler::ClampInt32ToUint8(Register in_out) {
2150 ClampInt32ToUint8(in_out, in_out);
2151 }
2152
2153
ClampDoubleToUint8(Register output,DoubleRegister input,DoubleRegister dbl_scratch)2154 void MacroAssembler::ClampDoubleToUint8(Register output,
2155 DoubleRegister input,
2156 DoubleRegister dbl_scratch) {
2157 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
2158 // - Inputs lower than 0 (including -infinity) produce 0.
2159 // - Inputs higher than 255 (including +infinity) produce 255.
2160 // Also, it seems that PIXEL types use round-to-nearest rather than
2161 // round-towards-zero.
2162
2163 // Squash +infinity before the conversion, since Fcvtnu will normally
2164 // convert it to 0.
2165 Fmov(dbl_scratch, 255);
2166 Fmin(dbl_scratch, dbl_scratch, input);
2167
2168 // Convert double to unsigned integer. Values less than zero become zero.
2169 // Values greater than 255 have already been clamped to 255.
2170 Fcvtnu(output, dbl_scratch);
2171 }
2172
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)2173 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
2174 Register end_address,
2175 Register filler) {
2176 DCHECK(!current_address.Is(csp));
2177 UseScratchRegisterScope temps(this);
2178 Register distance_in_words = temps.AcquireX();
2179 Label done;
2180
2181 // Calculate the distance. If it's <= zero then there's nothing to do.
2182 Subs(distance_in_words, end_address, current_address);
2183 B(le, &done);
2184
2185 // There's at least one field to fill, so do this unconditionally.
2186 Str(filler, MemOperand(current_address));
2187
2188 // If the distance_in_words consists of odd number of words we advance
2189 // start_address by one word, otherwise the pairs loop will ovwerite the
2190 // field that was stored above.
2191 And(distance_in_words, distance_in_words, kPointerSize);
2192 Add(current_address, current_address, distance_in_words);
2193
2194 // Store filler to memory in pairs.
2195 Label loop, entry;
2196 B(&entry);
2197 Bind(&loop);
2198 Stp(filler, filler, MemOperand(current_address, 2 * kPointerSize, PostIndex));
2199 Bind(&entry);
2200 Cmp(current_address, end_address);
2201 B(lo, &loop);
2202
2203 Bind(&done);
2204 }
2205
2206
JumpIfEitherIsNotSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure,SmiCheckType smi_check)2207 void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
2208 Register first, Register second, Register scratch1, Register scratch2,
2209 Label* failure, SmiCheckType smi_check) {
2210 if (smi_check == DO_SMI_CHECK) {
2211 JumpIfEitherSmi(first, second, failure);
2212 } else if (emit_debug_code()) {
2213 DCHECK(smi_check == DONT_DO_SMI_CHECK);
2214 Label not_smi;
2215 JumpIfEitherSmi(first, second, NULL, ¬_smi);
2216
2217 // At least one input is a smi, but the flags indicated a smi check wasn't
2218 // needed.
2219 Abort(kUnexpectedSmi);
2220
2221 Bind(¬_smi);
2222 }
2223
2224 // Test that both first and second are sequential one-byte strings.
2225 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2226 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2227 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2228 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2229
2230 JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
2231 scratch2, failure);
2232 }
2233
2234
JumpIfEitherInstanceTypeIsNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2235 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
2236 Register first, Register second, Register scratch1, Register scratch2,
2237 Label* failure) {
2238 DCHECK(!AreAliased(scratch1, second));
2239 DCHECK(!AreAliased(scratch1, scratch2));
2240 const int kFlatOneByteStringMask =
2241 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2242 const int kFlatOneByteStringTag =
2243 kStringTag | kOneByteStringTag | kSeqStringTag;
2244 And(scratch1, first, kFlatOneByteStringMask);
2245 And(scratch2, second, kFlatOneByteStringMask);
2246 Cmp(scratch1, kFlatOneByteStringTag);
2247 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2248 B(ne, failure);
2249 }
2250
2251
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)2252 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
2253 Register scratch,
2254 Label* failure) {
2255 const int kFlatOneByteStringMask =
2256 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2257 const int kFlatOneByteStringTag =
2258 kStringTag | kOneByteStringTag | kSeqStringTag;
2259 And(scratch, type, kFlatOneByteStringMask);
2260 Cmp(scratch, kFlatOneByteStringTag);
2261 B(ne, failure);
2262 }
2263
2264
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2265 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2266 Register first, Register second, Register scratch1, Register scratch2,
2267 Label* failure) {
2268 DCHECK(!AreAliased(first, second, scratch1, scratch2));
2269 const int kFlatOneByteStringMask =
2270 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2271 const int kFlatOneByteStringTag =
2272 kStringTag | kOneByteStringTag | kSeqStringTag;
2273 And(scratch1, first, kFlatOneByteStringMask);
2274 And(scratch2, second, kFlatOneByteStringMask);
2275 Cmp(scratch1, kFlatOneByteStringTag);
2276 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2277 B(ne, failure);
2278 }
2279
2280
JumpIfNotUniqueNameInstanceType(Register type,Label * not_unique_name)2281 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
2282 Label* not_unique_name) {
2283 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2284 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
2285 // continue
2286 // } else {
2287 // goto not_unique_name
2288 // }
2289 Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
2290 Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
2291 B(ne, not_unique_name);
2292 }
2293
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)2294 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
2295 Register caller_args_count_reg,
2296 Register scratch0, Register scratch1) {
2297 #if DEBUG
2298 if (callee_args_count.is_reg()) {
2299 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
2300 scratch1));
2301 } else {
2302 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
2303 }
2304 #endif
2305
2306 // Calculate the end of destination area where we will put the arguments
2307 // after we drop current frame. We add kPointerSize to count the receiver
2308 // argument which is not included into formal parameters count.
2309 Register dst_reg = scratch0;
2310 __ add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
2311 __ add(dst_reg, dst_reg,
2312 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
2313
2314 Register src_reg = caller_args_count_reg;
2315 // Calculate the end of source area. +kPointerSize is for the receiver.
2316 if (callee_args_count.is_reg()) {
2317 add(src_reg, jssp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
2318 add(src_reg, src_reg, Operand(kPointerSize));
2319 } else {
2320 add(src_reg, jssp,
2321 Operand((callee_args_count.immediate() + 1) * kPointerSize));
2322 }
2323
2324 if (FLAG_debug_code) {
2325 __ Cmp(src_reg, dst_reg);
2326 __ Check(lo, kStackAccessBelowStackPointer);
2327 }
2328
2329 // Restore caller's frame pointer and return address now as they will be
2330 // overwritten by the copying loop.
2331 __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
2332 __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2333
2334 // Now copy callee arguments to the caller frame going backwards to avoid
2335 // callee arguments corruption (source and destination areas could overlap).
2336
2337 // Both src_reg and dst_reg are pointing to the word after the one to copy,
2338 // so they must be pre-decremented in the loop.
2339 Register tmp_reg = scratch1;
2340 Label loop, entry;
2341 __ B(&entry);
2342 __ bind(&loop);
2343 __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
2344 __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
2345 __ bind(&entry);
2346 __ Cmp(jssp, src_reg);
2347 __ B(ne, &loop);
2348
2349 // Leave current frame.
2350 __ Mov(jssp, dst_reg);
2351 __ SetStackPointer(jssp);
2352 __ AssertStackConsistency();
2353 }
2354
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,InvokeFlag flag,bool * definitely_mismatches,const CallWrapper & call_wrapper)2355 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2356 const ParameterCount& actual,
2357 Label* done,
2358 InvokeFlag flag,
2359 bool* definitely_mismatches,
2360 const CallWrapper& call_wrapper) {
2361 bool definitely_matches = false;
2362 *definitely_mismatches = false;
2363 Label regular_invoke;
2364
2365 // Check whether the expected and actual arguments count match. If not,
2366 // setup registers according to contract with ArgumentsAdaptorTrampoline:
2367 // x0: actual arguments count.
2368 // x1: function (passed through to callee).
2369 // x2: expected arguments count.
2370
2371 // The code below is made a lot easier because the calling code already sets
2372 // up actual and expected registers according to the contract if values are
2373 // passed in registers.
2374 DCHECK(actual.is_immediate() || actual.reg().is(x0));
2375 DCHECK(expected.is_immediate() || expected.reg().is(x2));
2376
2377 if (expected.is_immediate()) {
2378 DCHECK(actual.is_immediate());
2379 Mov(x0, actual.immediate());
2380 if (expected.immediate() == actual.immediate()) {
2381 definitely_matches = true;
2382
2383 } else {
2384 if (expected.immediate() ==
2385 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2386 // Don't worry about adapting arguments for builtins that
2387 // don't want that done. Skip adaption code by making it look
2388 // like we have a match between expected and actual number of
2389 // arguments.
2390 definitely_matches = true;
2391 } else {
2392 *definitely_mismatches = true;
2393 // Set up x2 for the argument adaptor.
2394 Mov(x2, expected.immediate());
2395 }
2396 }
2397
2398 } else { // expected is a register.
2399 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2400 : Operand(actual.reg());
2401 Mov(x0, actual_op);
2402 // If actual == expected perform a regular invocation.
2403 Cmp(expected.reg(), actual_op);
2404 B(eq, ®ular_invoke);
2405 }
2406
2407 // If the argument counts may mismatch, generate a call to the argument
2408 // adaptor.
2409 if (!definitely_matches) {
2410 Handle<Code> adaptor =
2411 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2412 if (flag == CALL_FUNCTION) {
2413 call_wrapper.BeforeCall(CallSize(adaptor));
2414 Call(adaptor);
2415 call_wrapper.AfterCall();
2416 if (!*definitely_mismatches) {
2417 // If the arg counts don't match, no extra code is emitted by
2418 // MAsm::InvokeFunctionCode and we can just fall through.
2419 B(done);
2420 }
2421 } else {
2422 Jump(adaptor, RelocInfo::CODE_TARGET);
2423 }
2424 }
2425 Bind(®ular_invoke);
2426 }
2427
2428
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)2429 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
2430 const ParameterCount& expected,
2431 const ParameterCount& actual) {
2432 Label skip_flooding;
2433 ExternalReference last_step_action =
2434 ExternalReference::debug_last_step_action_address(isolate());
2435 STATIC_ASSERT(StepFrame > StepIn);
2436 Mov(x4, Operand(last_step_action));
2437 Ldrsb(x4, MemOperand(x4));
2438 CompareAndBranch(x4, Operand(StepIn), lt, &skip_flooding);
2439 {
2440 FrameScope frame(this,
2441 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
2442 if (expected.is_reg()) {
2443 SmiTag(expected.reg());
2444 Push(expected.reg());
2445 }
2446 if (actual.is_reg()) {
2447 SmiTag(actual.reg());
2448 Push(actual.reg());
2449 }
2450 if (new_target.is_valid()) {
2451 Push(new_target);
2452 }
2453 Push(fun);
2454 Push(fun);
2455 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
2456 Pop(fun);
2457 if (new_target.is_valid()) {
2458 Pop(new_target);
2459 }
2460 if (actual.is_reg()) {
2461 Pop(actual.reg());
2462 SmiUntag(actual.reg());
2463 }
2464 if (expected.is_reg()) {
2465 Pop(expected.reg());
2466 SmiUntag(expected.reg());
2467 }
2468 }
2469 bind(&skip_flooding);
2470 }
2471
2472
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)2473 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
2474 const ParameterCount& expected,
2475 const ParameterCount& actual,
2476 InvokeFlag flag,
2477 const CallWrapper& call_wrapper) {
2478 // You can't call a function without a valid frame.
2479 DCHECK(flag == JUMP_FUNCTION || has_frame());
2480 DCHECK(function.is(x1));
2481 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
2482
2483 FloodFunctionIfStepping(function, new_target, expected, actual);
2484
2485 // Clear the new.target register if not given.
2486 if (!new_target.is_valid()) {
2487 LoadRoot(x3, Heap::kUndefinedValueRootIndex);
2488 }
2489
2490 Label done;
2491 bool definitely_mismatches = false;
2492 InvokePrologue(expected, actual, &done, flag, &definitely_mismatches,
2493 call_wrapper);
2494
2495 // If we are certain that actual != expected, then we know InvokePrologue will
2496 // have handled the call through the argument adaptor mechanism.
2497 // The called function expects the call kind in x5.
2498 if (!definitely_mismatches) {
2499 // We call indirectly through the code field in the function to
2500 // allow recompilation to take effect without changing any of the
2501 // call sites.
2502 Register code = x4;
2503 Ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2504 if (flag == CALL_FUNCTION) {
2505 call_wrapper.BeforeCall(CallSize(code));
2506 Call(code);
2507 call_wrapper.AfterCall();
2508 } else {
2509 DCHECK(flag == JUMP_FUNCTION);
2510 Jump(code);
2511 }
2512 }
2513
2514 // Continue here if InvokePrologue does handle the invocation due to
2515 // mismatched parameter counts.
2516 Bind(&done);
2517 }
2518
2519
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)2520 void MacroAssembler::InvokeFunction(Register function,
2521 Register new_target,
2522 const ParameterCount& actual,
2523 InvokeFlag flag,
2524 const CallWrapper& call_wrapper) {
2525 // You can't call a function without a valid frame.
2526 DCHECK(flag == JUMP_FUNCTION || has_frame());
2527
2528 // Contract with called JS functions requires that function is passed in x1.
2529 // (See FullCodeGenerator::Generate().)
2530 DCHECK(function.is(x1));
2531
2532 Register expected_reg = x2;
2533
2534 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2535 // The number of arguments is stored as an int32_t, and -1 is a marker
2536 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2537 // extension to correctly handle it.
2538 Ldr(expected_reg, FieldMemOperand(function,
2539 JSFunction::kSharedFunctionInfoOffset));
2540 Ldrsw(expected_reg,
2541 FieldMemOperand(expected_reg,
2542 SharedFunctionInfo::kFormalParameterCountOffset));
2543
2544 ParameterCount expected(expected_reg);
2545 InvokeFunctionCode(function, new_target, expected, actual, flag,
2546 call_wrapper);
2547 }
2548
2549
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)2550 void MacroAssembler::InvokeFunction(Register function,
2551 const ParameterCount& expected,
2552 const ParameterCount& actual,
2553 InvokeFlag flag,
2554 const CallWrapper& call_wrapper) {
2555 // You can't call a function without a valid frame.
2556 DCHECK(flag == JUMP_FUNCTION || has_frame());
2557
2558 // Contract with called JS functions requires that function is passed in x1.
2559 // (See FullCodeGenerator::Generate().)
2560 DCHECK(function.Is(x1));
2561
2562 // Set up the context.
2563 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2564
2565 InvokeFunctionCode(function, no_reg, expected, actual, flag, call_wrapper);
2566 }
2567
2568
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)2569 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2570 const ParameterCount& expected,
2571 const ParameterCount& actual,
2572 InvokeFlag flag,
2573 const CallWrapper& call_wrapper) {
2574 // Contract with called JS functions requires that function is passed in x1.
2575 // (See FullCodeGenerator::Generate().)
2576 __ LoadObject(x1, function);
2577 InvokeFunction(x1, expected, actual, flag, call_wrapper);
2578 }
2579
2580
TryConvertDoubleToInt64(Register result,DoubleRegister double_input,Label * done)2581 void MacroAssembler::TryConvertDoubleToInt64(Register result,
2582 DoubleRegister double_input,
2583 Label* done) {
2584 // Try to convert with an FPU convert instruction. It's trivial to compute
2585 // the modulo operation on an integer register so we convert to a 64-bit
2586 // integer.
2587 //
2588 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2589 // when the double is out of range. NaNs and infinities will be converted to 0
2590 // (as ECMA-262 requires).
2591 Fcvtzs(result.X(), double_input);
2592
2593 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2594 // representable using a double, so if the result is one of those then we know
2595 // that saturation occured, and we need to manually handle the conversion.
2596 //
2597 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2598 // 1 will cause signed overflow.
2599 Cmp(result.X(), 1);
2600 Ccmp(result.X(), -1, VFlag, vc);
2601
2602 B(vc, done);
2603 }
2604
2605
TruncateDoubleToI(Register result,DoubleRegister double_input)2606 void MacroAssembler::TruncateDoubleToI(Register result,
2607 DoubleRegister double_input) {
2608 Label done;
2609
2610 // Try to convert the double to an int64. If successful, the bottom 32 bits
2611 // contain our truncated int32 result.
2612 TryConvertDoubleToInt64(result, double_input, &done);
2613
2614 const Register old_stack_pointer = StackPointer();
2615 if (csp.Is(old_stack_pointer)) {
2616 // This currently only happens during compiler-unittest. If it arises
2617 // during regular code generation the DoubleToI stub should be updated to
2618 // cope with csp and have an extra parameter indicating which stack pointer
2619 // it should use.
2620 Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
2621 Mov(jssp, csp);
2622 SetStackPointer(jssp);
2623 }
2624
2625 // If we fell through then inline version didn't succeed - call stub instead.
2626 Push(lr, double_input);
2627
2628 DoubleToIStub stub(isolate(),
2629 jssp,
2630 result,
2631 0,
2632 true, // is_truncating
2633 true); // skip_fastpath
2634 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2635
2636 DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
2637 Pop(xzr, lr); // xzr to drop the double input on the stack.
2638
2639 if (csp.Is(old_stack_pointer)) {
2640 Mov(csp, jssp);
2641 SetStackPointer(csp);
2642 AssertStackConsistency();
2643 Pop(xzr, jssp);
2644 }
2645
2646 Bind(&done);
2647 }
2648
2649
TruncateHeapNumberToI(Register result,Register object)2650 void MacroAssembler::TruncateHeapNumberToI(Register result,
2651 Register object) {
2652 Label done;
2653 DCHECK(!result.is(object));
2654 DCHECK(jssp.Is(StackPointer()));
2655
2656 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2657
2658 // Try to convert the double to an int64. If successful, the bottom 32 bits
2659 // contain our truncated int32 result.
2660 TryConvertDoubleToInt64(result, fp_scratch, &done);
2661
2662 // If we fell through then inline version didn't succeed - call stub instead.
2663 Push(lr);
2664 DoubleToIStub stub(isolate(),
2665 object,
2666 result,
2667 HeapNumber::kValueOffset - kHeapObjectTag,
2668 true, // is_truncating
2669 true); // skip_fastpath
2670 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2671 Pop(lr);
2672
2673 Bind(&done);
2674 }
2675
StubPrologue(StackFrame::Type type,int frame_slots)2676 void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
2677 UseScratchRegisterScope temps(this);
2678 frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
2679 Register temp = temps.AcquireX();
2680 Mov(temp, Smi::FromInt(type));
2681 Push(lr, fp);
2682 Mov(fp, StackPointer());
2683 Claim(frame_slots);
2684 str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
2685 }
2686
Prologue(bool code_pre_aging)2687 void MacroAssembler::Prologue(bool code_pre_aging) {
2688 if (code_pre_aging) {
2689 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
2690 __ EmitCodeAgeSequence(stub);
2691 } else {
2692 __ EmitFrameSetupForCodeAgePatching();
2693 }
2694 }
2695
2696
EmitLoadTypeFeedbackVector(Register vector)2697 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
2698 Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2699 Ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
2700 Ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
2701 }
2702
2703
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)2704 void MacroAssembler::EnterFrame(StackFrame::Type type,
2705 bool load_constant_pool_pointer_reg) {
2706 // Out-of-line constant pool not implemented on arm64.
2707 UNREACHABLE();
2708 }
2709
2710
EnterFrame(StackFrame::Type type)2711 void MacroAssembler::EnterFrame(StackFrame::Type type) {
2712 DCHECK(jssp.Is(StackPointer()));
2713 UseScratchRegisterScope temps(this);
2714 Register type_reg = temps.AcquireX();
2715 Register code_reg = temps.AcquireX();
2716
2717 if (type == StackFrame::INTERNAL) {
2718 Mov(type_reg, Smi::FromInt(type));
2719 Push(lr, fp);
2720 Push(type_reg);
2721 Mov(code_reg, Operand(CodeObject()));
2722 Push(code_reg);
2723 Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
2724 // jssp[4] : lr
2725 // jssp[3] : fp
2726 // jssp[1] : type
2727 // jssp[0] : [code object]
2728 } else {
2729 Mov(type_reg, Smi::FromInt(type));
2730 Push(lr, fp);
2731 Push(type_reg);
2732 Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
2733 // jssp[2] : lr
2734 // jssp[1] : fp
2735 // jssp[0] : type
2736 }
2737 }
2738
2739
LeaveFrame(StackFrame::Type type)2740 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2741 DCHECK(jssp.Is(StackPointer()));
2742 // Drop the execution stack down to the frame pointer and restore
2743 // the caller frame pointer and return address.
2744 Mov(jssp, fp);
2745 AssertStackConsistency();
2746 Pop(fp, lr);
2747 }
2748
2749
ExitFramePreserveFPRegs()2750 void MacroAssembler::ExitFramePreserveFPRegs() {
2751 PushCPURegList(kCallerSavedFP);
2752 }
2753
2754
ExitFrameRestoreFPRegs()2755 void MacroAssembler::ExitFrameRestoreFPRegs() {
2756 // Read the registers from the stack without popping them. The stack pointer
2757 // will be reset as part of the unwinding process.
2758 CPURegList saved_fp_regs = kCallerSavedFP;
2759 DCHECK(saved_fp_regs.Count() % 2 == 0);
2760
2761 int offset = ExitFrameConstants::kLastExitFrameField;
2762 while (!saved_fp_regs.IsEmpty()) {
2763 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2764 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2765 offset -= 2 * kDRegSize;
2766 Ldp(dst1, dst0, MemOperand(fp, offset));
2767 }
2768 }
2769
EnterBuiltinFrame(Register context,Register target,Register argc)2770 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
2771 Register argc) {
2772 Push(lr, fp, context, target);
2773 add(fp, jssp, Operand(2 * kPointerSize));
2774 Push(argc);
2775 }
2776
LeaveBuiltinFrame(Register context,Register target,Register argc)2777 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
2778 Register argc) {
2779 Pop(argc);
2780 Pop(target, context, fp, lr);
2781 }
2782
EnterExitFrame(bool save_doubles,const Register & scratch,int extra_space,StackFrame::Type frame_type)2783 void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
2784 int extra_space,
2785 StackFrame::Type frame_type) {
2786 DCHECK(jssp.Is(StackPointer()));
2787 DCHECK(frame_type == StackFrame::EXIT ||
2788 frame_type == StackFrame::BUILTIN_EXIT);
2789
2790 // Set up the new stack frame.
2791 Push(lr, fp);
2792 Mov(fp, StackPointer());
2793 Mov(scratch, Smi::FromInt(frame_type));
2794 Push(scratch);
2795 Push(xzr);
2796 Mov(scratch, Operand(CodeObject()));
2797 Push(scratch);
2798 // fp[8]: CallerPC (lr)
2799 // fp -> fp[0]: CallerFP (old fp)
2800 // fp[-8]: STUB marker
2801 // fp[-16]: Space reserved for SPOffset.
2802 // jssp -> fp[-24]: CodeObject()
2803 STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
2804 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
2805 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
2806 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
2807 STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
2808
2809 // Save the frame pointer and context pointer in the top frame.
2810 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2811 isolate())));
2812 Str(fp, MemOperand(scratch));
2813 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2814 isolate())));
2815 Str(cp, MemOperand(scratch));
2816
2817 STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
2818 if (save_doubles) {
2819 ExitFramePreserveFPRegs();
2820 }
2821
2822 // Reserve space for the return address and for user requested memory.
2823 // We do this before aligning to make sure that we end up correctly
2824 // aligned with the minimum of wasted space.
2825 Claim(extra_space + 1, kXRegSize);
2826 // fp[8]: CallerPC (lr)
2827 // fp -> fp[0]: CallerFP (old fp)
2828 // fp[-8]: STUB marker
2829 // fp[-16]: Space reserved for SPOffset.
2830 // fp[-24]: CodeObject()
2831 // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
2832 // jssp[8]: Extra space reserved for caller (if extra_space != 0).
2833 // jssp -> jssp[0]: Space reserved for the return address.
2834
2835 // Align and synchronize the system stack pointer with jssp.
2836 AlignAndSetCSPForFrame();
2837 DCHECK(csp.Is(StackPointer()));
2838
2839 // fp[8]: CallerPC (lr)
2840 // fp -> fp[0]: CallerFP (old fp)
2841 // fp[-8]: STUB marker
2842 // fp[-16]: Space reserved for SPOffset.
2843 // fp[-24]: CodeObject()
2844 // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
2845 // csp[8]: Memory reserved for the caller if extra_space != 0.
2846 // Alignment padding, if necessary.
2847 // csp -> csp[0]: Space reserved for the return address.
2848
2849 // ExitFrame::GetStateForFramePointer expects to find the return address at
2850 // the memory address immediately below the pointer stored in SPOffset.
2851 // It is not safe to derive much else from SPOffset, because the size of the
2852 // padding can vary.
2853 Add(scratch, csp, kXRegSize);
2854 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
2855 }
2856
2857
2858 // Leave the current exit frame.
LeaveExitFrame(bool restore_doubles,const Register & scratch,bool restore_context)2859 void MacroAssembler::LeaveExitFrame(bool restore_doubles,
2860 const Register& scratch,
2861 bool restore_context) {
2862 DCHECK(csp.Is(StackPointer()));
2863
2864 if (restore_doubles) {
2865 ExitFrameRestoreFPRegs();
2866 }
2867
2868 // Restore the context pointer from the top frame.
2869 if (restore_context) {
2870 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2871 isolate())));
2872 Ldr(cp, MemOperand(scratch));
2873 }
2874
2875 if (emit_debug_code()) {
2876 // Also emit debug code to clear the cp in the top frame.
2877 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2878 isolate())));
2879 Str(xzr, MemOperand(scratch));
2880 }
2881 // Clear the frame pointer from the top frame.
2882 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2883 isolate())));
2884 Str(xzr, MemOperand(scratch));
2885
2886 // Pop the exit frame.
2887 // fp[8]: CallerPC (lr)
2888 // fp -> fp[0]: CallerFP (old fp)
2889 // fp[...]: The rest of the frame.
2890 Mov(jssp, fp);
2891 SetStackPointer(jssp);
2892 AssertStackConsistency();
2893 Pop(fp, lr);
2894 }
2895
2896
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2897 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2898 Register scratch1, Register scratch2) {
2899 if (FLAG_native_code_counters && counter->Enabled()) {
2900 Mov(scratch1, value);
2901 Mov(scratch2, ExternalReference(counter));
2902 Str(scratch1.W(), MemOperand(scratch2));
2903 }
2904 }
2905
2906
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2907 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2908 Register scratch1, Register scratch2) {
2909 DCHECK(value != 0);
2910 if (FLAG_native_code_counters && counter->Enabled()) {
2911 Mov(scratch2, ExternalReference(counter));
2912 Ldr(scratch1.W(), MemOperand(scratch2));
2913 Add(scratch1.W(), scratch1.W(), value);
2914 Str(scratch1.W(), MemOperand(scratch2));
2915 }
2916 }
2917
2918
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2919 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2920 Register scratch1, Register scratch2) {
2921 IncrementCounter(counter, -value, scratch1, scratch2);
2922 }
2923
2924
LoadContext(Register dst,int context_chain_length)2925 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2926 if (context_chain_length > 0) {
2927 // Move up the chain of contexts to the context containing the slot.
2928 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2929 for (int i = 1; i < context_chain_length; i++) {
2930 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2931 }
2932 } else {
2933 // Slot is in the current function context. Move it into the
2934 // destination register in case we store into it (the write barrier
2935 // cannot be allowed to destroy the context in cp).
2936 Mov(dst, cp);
2937 }
2938 }
2939
2940
DebugBreak()2941 void MacroAssembler::DebugBreak() {
2942 Mov(x0, 0);
2943 Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
2944 CEntryStub ces(isolate(), 1);
2945 DCHECK(AllowThisStubCall(&ces));
2946 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
2947 }
2948
2949
PushStackHandler()2950 void MacroAssembler::PushStackHandler() {
2951 DCHECK(jssp.Is(StackPointer()));
2952 // Adjust this code if the asserts don't hold.
2953 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
2954 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2955
2956 // For the JSEntry handler, we must preserve the live registers x0-x4.
2957 // (See JSEntryStub::GenerateBody().)
2958
2959 // Link the current handler as the next handler.
2960 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
2961 Ldr(x10, MemOperand(x11));
2962 Push(x10);
2963
2964 // Set this new handler as the current one.
2965 Str(jssp, MemOperand(x11));
2966 }
2967
2968
PopStackHandler()2969 void MacroAssembler::PopStackHandler() {
2970 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2971 Pop(x10);
2972 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
2973 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
2974 Str(x10, MemOperand(x11));
2975 }
2976
2977
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)2978 void MacroAssembler::Allocate(int object_size,
2979 Register result,
2980 Register scratch1,
2981 Register scratch2,
2982 Label* gc_required,
2983 AllocationFlags flags) {
2984 DCHECK(object_size <= kMaxRegularHeapObjectSize);
2985 DCHECK((flags & ALLOCATION_FOLDED) == 0);
2986 if (!FLAG_inline_new) {
2987 if (emit_debug_code()) {
2988 // Trash the registers to simulate an allocation failure.
2989 // We apply salt to the original zap value to easily spot the values.
2990 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
2991 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
2992 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
2993 }
2994 B(gc_required);
2995 return;
2996 }
2997
2998 UseScratchRegisterScope temps(this);
2999 Register scratch3 = temps.AcquireX();
3000
3001 DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
3002 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3003
3004 // Make object size into bytes.
3005 if ((flags & SIZE_IN_WORDS) != 0) {
3006 object_size *= kPointerSize;
3007 }
3008 DCHECK(0 == (object_size & kObjectAlignmentMask));
3009
3010 // Check relative positions of allocation top and limit addresses.
3011 // The values must be adjacent in memory to allow the use of LDP.
3012 ExternalReference heap_allocation_top =
3013 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3014 ExternalReference heap_allocation_limit =
3015 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3016 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3017 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3018 DCHECK((limit - top) == kPointerSize);
3019
3020 // Set up allocation top address and allocation limit registers.
3021 Register top_address = scratch1;
3022 Register alloc_limit = scratch2;
3023 Register result_end = scratch3;
3024 Mov(top_address, Operand(heap_allocation_top));
3025
3026 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3027 // Load allocation top into result and allocation limit into alloc_limit.
3028 Ldp(result, alloc_limit, MemOperand(top_address));
3029 } else {
3030 if (emit_debug_code()) {
3031 // Assert that result actually contains top on entry.
3032 Ldr(alloc_limit, MemOperand(top_address));
3033 Cmp(result, alloc_limit);
3034 Check(eq, kUnexpectedAllocationTop);
3035 }
3036 // Load allocation limit. Result already contains allocation top.
3037 Ldr(alloc_limit, MemOperand(top_address, limit - top));
3038 }
3039
3040 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3041 // the same alignment on ARM64.
3042 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3043
3044 // Calculate new top and bail out if new space is exhausted.
3045 Adds(result_end, result, object_size);
3046 Ccmp(result_end, alloc_limit, NoFlag, cc);
3047 B(hi, gc_required);
3048
3049 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
3050 // The top pointer is not updated for allocation folding dominators.
3051 Str(result_end, MemOperand(top_address));
3052 }
3053
3054 // Tag the object.
3055 ObjectTag(result, result);
3056 }
3057
3058
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)3059 void MacroAssembler::Allocate(Register object_size, Register result,
3060 Register result_end, Register scratch,
3061 Label* gc_required, AllocationFlags flags) {
3062 if (!FLAG_inline_new) {
3063 if (emit_debug_code()) {
3064 // Trash the registers to simulate an allocation failure.
3065 // We apply salt to the original zap value to easily spot the values.
3066 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3067 Mov(scratch, (kDebugZapValue & ~0xffL) | 0x21L);
3068 Mov(result_end, (kDebugZapValue & ~0xffL) | 0x21L);
3069 }
3070 B(gc_required);
3071 return;
3072 }
3073
3074 UseScratchRegisterScope temps(this);
3075 Register scratch2 = temps.AcquireX();
3076
3077 // |object_size| and |result_end| may overlap, other registers must not.
3078 DCHECK(!AreAliased(object_size, result, scratch, scratch2));
3079 DCHECK(!AreAliased(result_end, result, scratch, scratch2));
3080 DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
3081 result_end.Is64Bits());
3082
3083 // Check relative positions of allocation top and limit addresses.
3084 // The values must be adjacent in memory to allow the use of LDP.
3085 ExternalReference heap_allocation_top =
3086 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3087 ExternalReference heap_allocation_limit =
3088 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3089 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3090 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3091 DCHECK((limit - top) == kPointerSize);
3092
3093 // Set up allocation top address and allocation limit registers.
3094 Register top_address = scratch;
3095 Register alloc_limit = scratch2;
3096 Mov(top_address, heap_allocation_top);
3097
3098 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3099 // Load allocation top into result and allocation limit into alloc_limit.
3100 Ldp(result, alloc_limit, MemOperand(top_address));
3101 } else {
3102 if (emit_debug_code()) {
3103 // Assert that result actually contains top on entry.
3104 Ldr(alloc_limit, MemOperand(top_address));
3105 Cmp(result, alloc_limit);
3106 Check(eq, kUnexpectedAllocationTop);
3107 }
3108 // Load allocation limit. Result already contains allocation top.
3109 Ldr(alloc_limit, MemOperand(top_address, limit - top));
3110 }
3111
3112 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3113 // the same alignment on ARM64.
3114 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3115
3116 // Calculate new top and bail out if new space is exhausted
3117 if ((flags & SIZE_IN_WORDS) != 0) {
3118 Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
3119 } else {
3120 Adds(result_end, result, object_size);
3121 }
3122
3123 if (emit_debug_code()) {
3124 Tst(result_end, kObjectAlignmentMask);
3125 Check(eq, kUnalignedAllocationInNewSpace);
3126 }
3127
3128 Ccmp(result_end, alloc_limit, NoFlag, cc);
3129 B(hi, gc_required);
3130
3131 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
3132 // The top pointer is not updated for allocation folding dominators.
3133 Str(result_end, MemOperand(top_address));
3134 }
3135
3136 // Tag the object.
3137 ObjectTag(result, result);
3138 }
3139
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)3140 void MacroAssembler::FastAllocate(int object_size, Register result,
3141 Register scratch1, Register scratch2,
3142 AllocationFlags flags) {
3143 DCHECK(object_size <= kMaxRegularHeapObjectSize);
3144
3145 DCHECK(!AreAliased(result, scratch1, scratch2));
3146 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3147
3148 // Make object size into bytes.
3149 if ((flags & SIZE_IN_WORDS) != 0) {
3150 object_size *= kPointerSize;
3151 }
3152 DCHECK(0 == (object_size & kObjectAlignmentMask));
3153
3154 ExternalReference heap_allocation_top =
3155 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3156
3157 // Set up allocation top address and allocation limit registers.
3158 Register top_address = scratch1;
3159 Register result_end = scratch2;
3160 Mov(top_address, Operand(heap_allocation_top));
3161 Ldr(result, MemOperand(top_address));
3162
3163 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3164 // the same alignment on ARM64.
3165 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3166
3167 // Calculate new top and write it back.
3168 Adds(result_end, result, object_size);
3169 Str(result_end, MemOperand(top_address));
3170
3171 ObjectTag(result, result);
3172 }
3173
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)3174 void MacroAssembler::FastAllocate(Register object_size, Register result,
3175 Register result_end, Register scratch,
3176 AllocationFlags flags) {
3177 // |object_size| and |result_end| may overlap, other registers must not.
3178 DCHECK(!AreAliased(object_size, result, scratch));
3179 DCHECK(!AreAliased(result_end, result, scratch));
3180 DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
3181 result_end.Is64Bits());
3182
3183 ExternalReference heap_allocation_top =
3184 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3185
3186 // Set up allocation top address and allocation limit registers.
3187 Register top_address = scratch;
3188 Mov(top_address, heap_allocation_top);
3189 Ldr(result, MemOperand(top_address));
3190
3191 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3192 // the same alignment on ARM64.
3193 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3194
3195 // Calculate new top and write it back.
3196 if ((flags & SIZE_IN_WORDS) != 0) {
3197 Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
3198 } else {
3199 Adds(result_end, result, object_size);
3200 }
3201 Str(result_end, MemOperand(top_address));
3202
3203 if (emit_debug_code()) {
3204 Tst(result_end, kObjectAlignmentMask);
3205 Check(eq, kUnalignedAllocationInNewSpace);
3206 }
3207
3208 ObjectTag(result, result);
3209 }
3210
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)3211 void MacroAssembler::AllocateTwoByteString(Register result,
3212 Register length,
3213 Register scratch1,
3214 Register scratch2,
3215 Register scratch3,
3216 Label* gc_required) {
3217 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3218 // Calculate the number of bytes needed for the characters in the string while
3219 // observing object alignment.
3220 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3221 Add(scratch1, length, length); // Length in bytes, not chars.
3222 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3223 Bic(scratch1, scratch1, kObjectAlignmentMask);
3224
3225 // Allocate two-byte string in new space.
3226 Allocate(scratch1, result, scratch2, scratch3, gc_required,
3227 NO_ALLOCATION_FLAGS);
3228
3229 // Set the map, length and hash field.
3230 InitializeNewString(result,
3231 length,
3232 Heap::kStringMapRootIndex,
3233 scratch1,
3234 scratch2);
3235 }
3236
3237
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)3238 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3239 Register scratch1, Register scratch2,
3240 Register scratch3,
3241 Label* gc_required) {
3242 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3243 // Calculate the number of bytes needed for the characters in the string while
3244 // observing object alignment.
3245 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3246 STATIC_ASSERT(kCharSize == 1);
3247 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3248 Bic(scratch1, scratch1, kObjectAlignmentMask);
3249
3250 // Allocate one-byte string in new space.
3251 Allocate(scratch1, result, scratch2, scratch3, gc_required,
3252 NO_ALLOCATION_FLAGS);
3253
3254 // Set the map, length and hash field.
3255 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3256 scratch1, scratch2);
3257 }
3258
3259
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3260 void MacroAssembler::AllocateTwoByteConsString(Register result,
3261 Register length,
3262 Register scratch1,
3263 Register scratch2,
3264 Label* gc_required) {
3265 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3266 NO_ALLOCATION_FLAGS);
3267
3268 InitializeNewString(result,
3269 length,
3270 Heap::kConsStringMapRootIndex,
3271 scratch1,
3272 scratch2);
3273 }
3274
3275
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3276 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3277 Register scratch1,
3278 Register scratch2,
3279 Label* gc_required) {
3280 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3281 NO_ALLOCATION_FLAGS);
3282
3283 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3284 scratch1, scratch2);
3285 }
3286
3287
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3288 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3289 Register length,
3290 Register scratch1,
3291 Register scratch2,
3292 Label* gc_required) {
3293 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3294 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3295 NO_ALLOCATION_FLAGS);
3296
3297 InitializeNewString(result,
3298 length,
3299 Heap::kSlicedStringMapRootIndex,
3300 scratch1,
3301 scratch2);
3302 }
3303
3304
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3305 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3306 Register length,
3307 Register scratch1,
3308 Register scratch2,
3309 Label* gc_required) {
3310 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3311 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3312 NO_ALLOCATION_FLAGS);
3313
3314 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3315 scratch1, scratch2);
3316 }
3317
3318
3319 // Allocates a heap number or jumps to the need_gc label if the young space
3320 // is full and a scavenge is needed.
AllocateHeapNumber(Register result,Label * gc_required,Register scratch1,Register scratch2,CPURegister value,CPURegister heap_number_map,MutableMode mode)3321 void MacroAssembler::AllocateHeapNumber(Register result,
3322 Label* gc_required,
3323 Register scratch1,
3324 Register scratch2,
3325 CPURegister value,
3326 CPURegister heap_number_map,
3327 MutableMode mode) {
3328 DCHECK(!value.IsValid() || value.Is64Bits());
3329 UseScratchRegisterScope temps(this);
3330
3331 // Allocate an object in the heap for the heap number and tag it as a heap
3332 // object.
3333 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3334 NO_ALLOCATION_FLAGS);
3335
3336 Heap::RootListIndex map_index = mode == MUTABLE
3337 ? Heap::kMutableHeapNumberMapRootIndex
3338 : Heap::kHeapNumberMapRootIndex;
3339
3340 // Prepare the heap number map.
3341 if (!heap_number_map.IsValid()) {
3342 // If we have a valid value register, use the same type of register to store
3343 // the map so we can use STP to store both in one instruction.
3344 if (value.IsValid() && value.IsFPRegister()) {
3345 heap_number_map = temps.AcquireD();
3346 } else {
3347 heap_number_map = scratch1;
3348 }
3349 LoadRoot(heap_number_map, map_index);
3350 }
3351 if (emit_debug_code()) {
3352 Register map;
3353 if (heap_number_map.IsFPRegister()) {
3354 map = scratch1;
3355 Fmov(map, DoubleRegister(heap_number_map));
3356 } else {
3357 map = Register(heap_number_map);
3358 }
3359 AssertRegisterIsRoot(map, map_index);
3360 }
3361
3362 // Store the heap number map and the value in the allocated object.
3363 if (value.IsSameSizeAndType(heap_number_map)) {
3364 STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
3365 HeapNumber::kValueOffset);
3366 Stp(heap_number_map, value,
3367 FieldMemOperand(result, HeapObject::kMapOffset));
3368 } else {
3369 Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3370 if (value.IsValid()) {
3371 Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3372 }
3373 }
3374 }
3375
3376
JumpIfObjectType(Register object,Register map,Register type_reg,InstanceType type,Label * if_cond_pass,Condition cond)3377 void MacroAssembler::JumpIfObjectType(Register object,
3378 Register map,
3379 Register type_reg,
3380 InstanceType type,
3381 Label* if_cond_pass,
3382 Condition cond) {
3383 CompareObjectType(object, map, type_reg, type);
3384 B(cond, if_cond_pass);
3385 }
3386
3387
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)3388 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
3389 Register value, Register scratch1,
3390 Register scratch2, Label* gc_required) {
3391 DCHECK(!result.is(constructor));
3392 DCHECK(!result.is(scratch1));
3393 DCHECK(!result.is(scratch2));
3394 DCHECK(!result.is(value));
3395
3396 // Allocate JSValue in new space.
3397 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
3398 NO_ALLOCATION_FLAGS);
3399
3400 // Initialize the JSValue.
3401 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
3402 Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
3403 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
3404 Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
3405 Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
3406 Str(value, FieldMemOperand(result, JSValue::kValueOffset));
3407 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
3408 }
3409
3410
JumpIfNotObjectType(Register object,Register map,Register type_reg,InstanceType type,Label * if_not_object)3411 void MacroAssembler::JumpIfNotObjectType(Register object,
3412 Register map,
3413 Register type_reg,
3414 InstanceType type,
3415 Label* if_not_object) {
3416 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
3417 }
3418
3419
3420 // Sets condition flags based on comparison, and returns type in type_reg.
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)3421 void MacroAssembler::CompareObjectType(Register object,
3422 Register map,
3423 Register type_reg,
3424 InstanceType type) {
3425 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3426 CompareInstanceType(map, type_reg, type);
3427 }
3428
3429
3430 // Sets condition flags based on comparison, and returns type in type_reg.
CompareInstanceType(Register map,Register type_reg,InstanceType type)3431 void MacroAssembler::CompareInstanceType(Register map,
3432 Register type_reg,
3433 InstanceType type) {
3434 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3435 Cmp(type_reg, type);
3436 }
3437
3438
CompareObjectMap(Register obj,Heap::RootListIndex index)3439 void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
3440 UseScratchRegisterScope temps(this);
3441 Register obj_map = temps.AcquireX();
3442 Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
3443 CompareRoot(obj_map, index);
3444 }
3445
3446
CompareObjectMap(Register obj,Register scratch,Handle<Map> map)3447 void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
3448 Handle<Map> map) {
3449 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3450 CompareMap(scratch, map);
3451 }
3452
3453
CompareMap(Register obj_map,Handle<Map> map)3454 void MacroAssembler::CompareMap(Register obj_map,
3455 Handle<Map> map) {
3456 Cmp(obj_map, Operand(map));
3457 }
3458
3459
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3460 void MacroAssembler::CheckMap(Register obj,
3461 Register scratch,
3462 Handle<Map> map,
3463 Label* fail,
3464 SmiCheckType smi_check_type) {
3465 if (smi_check_type == DO_SMI_CHECK) {
3466 JumpIfSmi(obj, fail);
3467 }
3468
3469 CompareObjectMap(obj, scratch, map);
3470 B(ne, fail);
3471 }
3472
3473
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)3474 void MacroAssembler::CheckMap(Register obj,
3475 Register scratch,
3476 Heap::RootListIndex index,
3477 Label* fail,
3478 SmiCheckType smi_check_type) {
3479 if (smi_check_type == DO_SMI_CHECK) {
3480 JumpIfSmi(obj, fail);
3481 }
3482 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3483 JumpIfNotRoot(scratch, index, fail);
3484 }
3485
3486
CheckMap(Register obj_map,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3487 void MacroAssembler::CheckMap(Register obj_map,
3488 Handle<Map> map,
3489 Label* fail,
3490 SmiCheckType smi_check_type) {
3491 if (smi_check_type == DO_SMI_CHECK) {
3492 JumpIfSmi(obj_map, fail);
3493 }
3494
3495 CompareMap(obj_map, map);
3496 B(ne, fail);
3497 }
3498
3499
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)3500 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3501 Register scratch2, Handle<WeakCell> cell,
3502 Handle<Code> success,
3503 SmiCheckType smi_check_type) {
3504 Label fail;
3505 if (smi_check_type == DO_SMI_CHECK) {
3506 JumpIfSmi(obj, &fail);
3507 }
3508 Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3509 CmpWeakValue(scratch1, cell, scratch2);
3510 B(ne, &fail);
3511 Jump(success, RelocInfo::CODE_TARGET);
3512 Bind(&fail);
3513 }
3514
3515
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch)3516 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
3517 Register scratch) {
3518 Mov(scratch, Operand(cell));
3519 Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
3520 Cmp(value, scratch);
3521 }
3522
3523
GetWeakValue(Register value,Handle<WeakCell> cell)3524 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3525 Mov(value, Operand(cell));
3526 Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
3527 }
3528
3529
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)3530 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3531 Label* miss) {
3532 GetWeakValue(value, cell);
3533 JumpIfSmi(value, miss);
3534 }
3535
3536
TestMapBitfield(Register object,uint64_t mask)3537 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3538 UseScratchRegisterScope temps(this);
3539 Register temp = temps.AcquireX();
3540 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
3541 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3542 Tst(temp, mask);
3543 }
3544
3545
LoadElementsKindFromMap(Register result,Register map)3546 void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3547 // Load the map's "bit field 2".
3548 __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3549 // Retrieve elements_kind from bit field 2.
3550 DecodeField<Map::ElementsKindBits>(result);
3551 }
3552
3553
GetMapConstructor(Register result,Register map,Register temp,Register temp2)3554 void MacroAssembler::GetMapConstructor(Register result, Register map,
3555 Register temp, Register temp2) {
3556 Label done, loop;
3557 Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
3558 Bind(&loop);
3559 JumpIfSmi(result, &done);
3560 CompareObjectType(result, temp, temp2, MAP_TYPE);
3561 B(ne, &done);
3562 Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
3563 B(&loop);
3564 Bind(&done);
3565 }
3566
3567
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)3568 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
3569 Register scratch, Label* miss) {
3570 DCHECK(!AreAliased(function, result, scratch));
3571
3572 // Get the prototype or initial map from the function.
3573 Ldr(result,
3574 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3575
3576 // If the prototype or initial map is the hole, don't return it and simply
3577 // miss the cache instead. This will allow us to allocate a prototype object
3578 // on-demand in the runtime system.
3579 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3580
3581 // If the function does not have an initial map, we're done.
3582 Label done;
3583 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
3584
3585 // Get the prototype from the initial map.
3586 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3587
3588 // All done.
3589 Bind(&done);
3590 }
3591
3592
PushRoot(Heap::RootListIndex index)3593 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
3594 UseScratchRegisterScope temps(this);
3595 Register temp = temps.AcquireX();
3596 LoadRoot(temp, index);
3597 Push(temp);
3598 }
3599
3600
CompareRoot(const Register & obj,Heap::RootListIndex index)3601 void MacroAssembler::CompareRoot(const Register& obj,
3602 Heap::RootListIndex index) {
3603 UseScratchRegisterScope temps(this);
3604 Register temp = temps.AcquireX();
3605 DCHECK(!AreAliased(obj, temp));
3606 LoadRoot(temp, index);
3607 Cmp(obj, temp);
3608 }
3609
3610
JumpIfRoot(const Register & obj,Heap::RootListIndex index,Label * if_equal)3611 void MacroAssembler::JumpIfRoot(const Register& obj,
3612 Heap::RootListIndex index,
3613 Label* if_equal) {
3614 CompareRoot(obj, index);
3615 B(eq, if_equal);
3616 }
3617
3618
JumpIfNotRoot(const Register & obj,Heap::RootListIndex index,Label * if_not_equal)3619 void MacroAssembler::JumpIfNotRoot(const Register& obj,
3620 Heap::RootListIndex index,
3621 Label* if_not_equal) {
3622 CompareRoot(obj, index);
3623 B(ne, if_not_equal);
3624 }
3625
3626
CompareAndSplit(const Register & lhs,const Operand & rhs,Condition cond,Label * if_true,Label * if_false,Label * fall_through)3627 void MacroAssembler::CompareAndSplit(const Register& lhs,
3628 const Operand& rhs,
3629 Condition cond,
3630 Label* if_true,
3631 Label* if_false,
3632 Label* fall_through) {
3633 if ((if_true == if_false) && (if_false == fall_through)) {
3634 // Fall through.
3635 } else if (if_true == if_false) {
3636 B(if_true);
3637 } else if (if_false == fall_through) {
3638 CompareAndBranch(lhs, rhs, cond, if_true);
3639 } else if (if_true == fall_through) {
3640 CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
3641 } else {
3642 CompareAndBranch(lhs, rhs, cond, if_true);
3643 B(if_false);
3644 }
3645 }
3646
3647
TestAndSplit(const Register & reg,uint64_t bit_pattern,Label * if_all_clear,Label * if_any_set,Label * fall_through)3648 void MacroAssembler::TestAndSplit(const Register& reg,
3649 uint64_t bit_pattern,
3650 Label* if_all_clear,
3651 Label* if_any_set,
3652 Label* fall_through) {
3653 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3654 // Fall through.
3655 } else if (if_all_clear == if_any_set) {
3656 B(if_all_clear);
3657 } else if (if_all_clear == fall_through) {
3658 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3659 } else if (if_any_set == fall_through) {
3660 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3661 } else {
3662 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3663 B(if_all_clear);
3664 }
3665 }
3666
CheckFastObjectElements(Register map,Register scratch,Label * fail)3667 void MacroAssembler::CheckFastObjectElements(Register map,
3668 Register scratch,
3669 Label* fail) {
3670 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3671 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3672 STATIC_ASSERT(FAST_ELEMENTS == 2);
3673 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3674 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3675 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3676 // If cond==ls, set cond=hi, otherwise compare.
3677 Ccmp(scratch,
3678 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
3679 B(hi, fail);
3680 }
3681
3682
3683 // Note: The ARM version of this clobbers elements_reg, but this version does
3684 // not. Some uses of this in ARM64 assume that elements_reg will be preserved.
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,FPRegister fpscratch1,Label * fail,int elements_offset)3685 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3686 Register key_reg,
3687 Register elements_reg,
3688 Register scratch1,
3689 FPRegister fpscratch1,
3690 Label* fail,
3691 int elements_offset) {
3692 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3693 Label store_num;
3694
3695 // Speculatively convert the smi to a double - all smis can be exactly
3696 // represented as a double.
3697 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
3698
3699 // If value_reg is a smi, we're done.
3700 JumpIfSmi(value_reg, &store_num);
3701
3702 // Ensure that the object is a heap number.
3703 JumpIfNotHeapNumber(value_reg, fail);
3704
3705 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3706
3707 // Canonicalize NaNs.
3708 CanonicalizeNaN(fpscratch1);
3709
3710 // Store the result.
3711 Bind(&store_num);
3712 Add(scratch1, elements_reg,
3713 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
3714 Str(fpscratch1,
3715 FieldMemOperand(scratch1,
3716 FixedDoubleArray::kHeaderSize - elements_offset));
3717 }
3718
3719
AllowThisStubCall(CodeStub * stub)3720 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3721 return has_frame_ || !stub->SometimesSetsUpAFrame();
3722 }
3723
EmitSeqStringSetCharCheck(Register string,Register index,SeqStringSetCharCheckIndexType index_type,Register scratch,uint32_t encoding_mask)3724 void MacroAssembler::EmitSeqStringSetCharCheck(
3725 Register string,
3726 Register index,
3727 SeqStringSetCharCheckIndexType index_type,
3728 Register scratch,
3729 uint32_t encoding_mask) {
3730 DCHECK(!AreAliased(string, index, scratch));
3731
3732 if (index_type == kIndexIsSmi) {
3733 AssertSmi(index);
3734 }
3735
3736 // Check that string is an object.
3737 AssertNotSmi(string, kNonObject);
3738
3739 // Check that string has an appropriate map.
3740 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
3741 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3742
3743 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
3744 Cmp(scratch, encoding_mask);
3745 Check(eq, kUnexpectedStringType);
3746
3747 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
3748 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
3749 Check(lt, kIndexIsTooLarge);
3750
3751 DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
3752 Cmp(index, 0);
3753 Check(ge, kIndexIsNegative);
3754 }
3755
3756
3757 // Compute the hash code from the untagged key. This must be kept in sync with
3758 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
3759 // code-stub-hydrogen.cc
GetNumberHash(Register key,Register scratch)3760 void MacroAssembler::GetNumberHash(Register key, Register scratch) {
3761 DCHECK(!AreAliased(key, scratch));
3762
3763 // Xor original key with a seed.
3764 LoadRoot(scratch, Heap::kHashSeedRootIndex);
3765 Eor(key, key, Operand::UntagSmi(scratch));
3766
3767 // The algorithm uses 32-bit integer values.
3768 key = key.W();
3769 scratch = scratch.W();
3770
3771 // Compute the hash code from the untagged key. This must be kept in sync
3772 // with ComputeIntegerHash in utils.h.
3773 //
3774 // hash = ~hash + (hash <<1 15);
3775 Mvn(scratch, key);
3776 Add(key, scratch, Operand(key, LSL, 15));
3777 // hash = hash ^ (hash >> 12);
3778 Eor(key, key, Operand(key, LSR, 12));
3779 // hash = hash + (hash << 2);
3780 Add(key, key, Operand(key, LSL, 2));
3781 // hash = hash ^ (hash >> 4);
3782 Eor(key, key, Operand(key, LSR, 4));
3783 // hash = hash * 2057;
3784 Mov(scratch, Operand(key, LSL, 11));
3785 Add(key, key, Operand(key, LSL, 3));
3786 Add(key, key, scratch);
3787 // hash = hash ^ (hash >> 16);
3788 Eor(key, key, Operand(key, LSR, 16));
3789 Bic(key, key, Operand(0xc0000000u));
3790 }
3791
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)3792 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
3793 Register code_entry,
3794 Register scratch) {
3795 const int offset = JSFunction::kCodeEntryOffset;
3796
3797 // Since a code entry (value) is always in old space, we don't need to update
3798 // remembered set. If incremental marking is off, there is nothing for us to
3799 // do.
3800 if (!FLAG_incremental_marking) return;
3801
3802 DCHECK(js_function.is(x1));
3803 DCHECK(code_entry.is(x7));
3804 DCHECK(scratch.is(x5));
3805 AssertNotSmi(js_function);
3806
3807 if (emit_debug_code()) {
3808 UseScratchRegisterScope temps(this);
3809 Register temp = temps.AcquireX();
3810 Add(scratch, js_function, offset - kHeapObjectTag);
3811 Ldr(temp, MemOperand(scratch));
3812 Cmp(temp, code_entry);
3813 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
3814 }
3815
3816 // First, check if a write barrier is even needed. The tests below
3817 // catch stores of Smis and stores into young gen.
3818 Label done;
3819
3820 CheckPageFlagClear(code_entry, scratch,
3821 MemoryChunk::kPointersToHereAreInterestingMask, &done);
3822 CheckPageFlagClear(js_function, scratch,
3823 MemoryChunk::kPointersFromHereAreInterestingMask, &done);
3824
3825 const Register dst = scratch;
3826 Add(dst, js_function, offset - kHeapObjectTag);
3827
3828 // Save caller-saved registers.Both input registers (x1 and x7) are caller
3829 // saved, so there is no need to push them.
3830 PushCPURegList(kCallerSaved);
3831
3832 int argument_count = 3;
3833
3834 Mov(x0, js_function);
3835 Mov(x1, dst);
3836 Mov(x2, ExternalReference::isolate_address(isolate()));
3837
3838 {
3839 AllowExternalCallThatCantCauseGC scope(this);
3840 CallCFunction(
3841 ExternalReference::incremental_marking_record_write_code_entry_function(
3842 isolate()),
3843 argument_count);
3844 }
3845
3846 // Restore caller-saved registers.
3847 PopCPURegList(kCallerSaved);
3848
3849 Bind(&done);
3850 }
3851
RememberedSetHelper(Register object,Register address,Register scratch1,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)3852 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
3853 Register address,
3854 Register scratch1,
3855 SaveFPRegsMode fp_mode,
3856 RememberedSetFinalAction and_then) {
3857 DCHECK(!AreAliased(object, address, scratch1));
3858 Label done, store_buffer_overflow;
3859 if (emit_debug_code()) {
3860 Label ok;
3861 JumpIfNotInNewSpace(object, &ok);
3862 Abort(kRememberedSetPointerInNewSpace);
3863 bind(&ok);
3864 }
3865 UseScratchRegisterScope temps(this);
3866 Register scratch2 = temps.AcquireX();
3867
3868 // Load store buffer top.
3869 Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
3870 Ldr(scratch1, MemOperand(scratch2));
3871 // Store pointer to buffer and increment buffer top.
3872 Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
3873 // Write back new top of buffer.
3874 Str(scratch1, MemOperand(scratch2));
3875 // Call stub on end of buffer.
3876 // Check for end of buffer.
3877 Tst(scratch1, StoreBuffer::kStoreBufferMask);
3878 if (and_then == kFallThroughAtEnd) {
3879 B(ne, &done);
3880 } else {
3881 DCHECK(and_then == kReturnAtEnd);
3882 B(eq, &store_buffer_overflow);
3883 Ret();
3884 }
3885
3886 Bind(&store_buffer_overflow);
3887 Push(lr);
3888 StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
3889 CallStub(&store_buffer_overflow_stub);
3890 Pop(lr);
3891
3892 Bind(&done);
3893 if (and_then == kReturnAtEnd) {
3894 Ret();
3895 }
3896 }
3897
3898
PopSafepointRegisters()3899 void MacroAssembler::PopSafepointRegisters() {
3900 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
3901 PopXRegList(kSafepointSavedRegisters);
3902 Drop(num_unsaved);
3903 }
3904
3905
PushSafepointRegisters()3906 void MacroAssembler::PushSafepointRegisters() {
3907 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
3908 // adjust the stack for unsaved registers.
3909 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
3910 DCHECK(num_unsaved >= 0);
3911 Claim(num_unsaved);
3912 PushXRegList(kSafepointSavedRegisters);
3913 }
3914
3915
PushSafepointRegistersAndDoubles()3916 void MacroAssembler::PushSafepointRegistersAndDoubles() {
3917 PushSafepointRegisters();
3918 PushCPURegList(CPURegList(
3919 CPURegister::kFPRegister, kDRegSizeInBits,
3920 RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
3921 }
3922
3923
PopSafepointRegistersAndDoubles()3924 void MacroAssembler::PopSafepointRegistersAndDoubles() {
3925 PopCPURegList(CPURegList(
3926 CPURegister::kFPRegister, kDRegSizeInBits,
3927 RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
3928 PopSafepointRegisters();
3929 }
3930
3931
SafepointRegisterStackIndex(int reg_code)3932 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
3933 // Make sure the safepoint registers list is what we expect.
3934 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
3935
3936 // Safepoint registers are stored contiguously on the stack, but not all the
3937 // registers are saved. The following registers are excluded:
3938 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
3939 // the macro assembler.
3940 // - x28 (jssp) because JS stack pointer doesn't need to be included in
3941 // safepoint registers.
3942 // - x31 (csp) because the system stack pointer doesn't need to be included
3943 // in safepoint registers.
3944 //
3945 // This function implements the mapping of register code to index into the
3946 // safepoint register slots.
3947 if ((reg_code >= 0) && (reg_code <= 15)) {
3948 return reg_code;
3949 } else if ((reg_code >= 18) && (reg_code <= 27)) {
3950 // Skip ip0 and ip1.
3951 return reg_code - 2;
3952 } else if ((reg_code == 29) || (reg_code == 30)) {
3953 // Also skip jssp.
3954 return reg_code - 3;
3955 } else {
3956 // This register has no safepoint register slot.
3957 UNREACHABLE();
3958 return -1;
3959 }
3960 }
3961
CheckPageFlag(const Register & object,const Register & scratch,int mask,Condition cc,Label * condition_met)3962 void MacroAssembler::CheckPageFlag(const Register& object,
3963 const Register& scratch, int mask,
3964 Condition cc, Label* condition_met) {
3965 And(scratch, object, ~Page::kPageAlignmentMask);
3966 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3967 if (cc == eq) {
3968 TestAndBranchIfAnySet(scratch, mask, condition_met);
3969 } else {
3970 TestAndBranchIfAllClear(scratch, mask, condition_met);
3971 }
3972 }
3973
CheckPageFlagSet(const Register & object,const Register & scratch,int mask,Label * if_any_set)3974 void MacroAssembler::CheckPageFlagSet(const Register& object,
3975 const Register& scratch,
3976 int mask,
3977 Label* if_any_set) {
3978 And(scratch, object, ~Page::kPageAlignmentMask);
3979 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3980 TestAndBranchIfAnySet(scratch, mask, if_any_set);
3981 }
3982
3983
CheckPageFlagClear(const Register & object,const Register & scratch,int mask,Label * if_all_clear)3984 void MacroAssembler::CheckPageFlagClear(const Register& object,
3985 const Register& scratch,
3986 int mask,
3987 Label* if_all_clear) {
3988 And(scratch, object, ~Page::kPageAlignmentMask);
3989 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3990 TestAndBranchIfAllClear(scratch, mask, if_all_clear);
3991 }
3992
3993
RecordWriteField(Register object,int offset,Register value,Register scratch,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)3994 void MacroAssembler::RecordWriteField(
3995 Register object,
3996 int offset,
3997 Register value,
3998 Register scratch,
3999 LinkRegisterStatus lr_status,
4000 SaveFPRegsMode save_fp,
4001 RememberedSetAction remembered_set_action,
4002 SmiCheck smi_check,
4003 PointersToHereCheck pointers_to_here_check_for_value) {
4004 // First, check if a write barrier is even needed. The tests below
4005 // catch stores of Smis.
4006 Label done;
4007
4008 // Skip the barrier if writing a smi.
4009 if (smi_check == INLINE_SMI_CHECK) {
4010 JumpIfSmi(value, &done);
4011 }
4012
4013 // Although the object register is tagged, the offset is relative to the start
4014 // of the object, so offset must be a multiple of kPointerSize.
4015 DCHECK(IsAligned(offset, kPointerSize));
4016
4017 Add(scratch, object, offset - kHeapObjectTag);
4018 if (emit_debug_code()) {
4019 Label ok;
4020 Tst(scratch, (1 << kPointerSizeLog2) - 1);
4021 B(eq, &ok);
4022 Abort(kUnalignedCellInWriteBarrier);
4023 Bind(&ok);
4024 }
4025
4026 RecordWrite(object,
4027 scratch,
4028 value,
4029 lr_status,
4030 save_fp,
4031 remembered_set_action,
4032 OMIT_SMI_CHECK,
4033 pointers_to_here_check_for_value);
4034
4035 Bind(&done);
4036
4037 // Clobber clobbered input registers when running with the debug-code flag
4038 // turned on to provoke errors.
4039 if (emit_debug_code()) {
4040 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
4041 Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
4042 }
4043 }
4044
4045
4046 // Will clobber: object, map, dst.
4047 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
RecordWriteForMap(Register object,Register map,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode)4048 void MacroAssembler::RecordWriteForMap(Register object,
4049 Register map,
4050 Register dst,
4051 LinkRegisterStatus lr_status,
4052 SaveFPRegsMode fp_mode) {
4053 ASM_LOCATION("MacroAssembler::RecordWrite");
4054 DCHECK(!AreAliased(object, map));
4055
4056 if (emit_debug_code()) {
4057 UseScratchRegisterScope temps(this);
4058 Register temp = temps.AcquireX();
4059
4060 CompareObjectMap(map, temp, isolate()->factory()->meta_map());
4061 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4062 }
4063
4064 if (!FLAG_incremental_marking) {
4065 return;
4066 }
4067
4068 if (emit_debug_code()) {
4069 UseScratchRegisterScope temps(this);
4070 Register temp = temps.AcquireX();
4071
4072 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4073 Cmp(temp, map);
4074 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4075 }
4076
4077 // First, check if a write barrier is even needed. The tests below
4078 // catch stores of smis and stores into the young generation.
4079 Label done;
4080
4081 // A single check of the map's pages interesting flag suffices, since it is
4082 // only set during incremental collection, and then it's also guaranteed that
4083 // the from object's page's interesting flag is also set. This optimization
4084 // relies on the fact that maps can never be in new space.
4085 CheckPageFlagClear(map,
4086 map, // Used as scratch.
4087 MemoryChunk::kPointersToHereAreInterestingMask,
4088 &done);
4089
4090 // Record the actual write.
4091 if (lr_status == kLRHasNotBeenSaved) {
4092 Push(lr);
4093 }
4094 Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
4095 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
4096 fp_mode);
4097 CallStub(&stub);
4098 if (lr_status == kLRHasNotBeenSaved) {
4099 Pop(lr);
4100 }
4101
4102 Bind(&done);
4103
4104 // Count number of write barriers in generated code.
4105 isolate()->counters()->write_barriers_static()->Increment();
4106 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
4107 dst);
4108
4109 // Clobber clobbered registers when running with the debug-code flag
4110 // turned on to provoke errors.
4111 if (emit_debug_code()) {
4112 Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
4113 Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
4114 }
4115 }
4116
4117
4118 // Will clobber: object, address, value.
4119 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4120 //
4121 // The register 'object' contains a heap object pointer. The heap object tag is
4122 // shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)4123 void MacroAssembler::RecordWrite(
4124 Register object,
4125 Register address,
4126 Register value,
4127 LinkRegisterStatus lr_status,
4128 SaveFPRegsMode fp_mode,
4129 RememberedSetAction remembered_set_action,
4130 SmiCheck smi_check,
4131 PointersToHereCheck pointers_to_here_check_for_value) {
4132 ASM_LOCATION("MacroAssembler::RecordWrite");
4133 DCHECK(!AreAliased(object, value));
4134
4135 if (emit_debug_code()) {
4136 UseScratchRegisterScope temps(this);
4137 Register temp = temps.AcquireX();
4138
4139 Ldr(temp, MemOperand(address));
4140 Cmp(temp, value);
4141 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4142 }
4143
4144 // First, check if a write barrier is even needed. The tests below
4145 // catch stores of smis and stores into the young generation.
4146 Label done;
4147
4148 if (smi_check == INLINE_SMI_CHECK) {
4149 DCHECK_EQ(0, kSmiTag);
4150 JumpIfSmi(value, &done);
4151 }
4152
4153 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
4154 CheckPageFlagClear(value,
4155 value, // Used as scratch.
4156 MemoryChunk::kPointersToHereAreInterestingMask,
4157 &done);
4158 }
4159 CheckPageFlagClear(object,
4160 value, // Used as scratch.
4161 MemoryChunk::kPointersFromHereAreInterestingMask,
4162 &done);
4163
4164 // Record the actual write.
4165 if (lr_status == kLRHasNotBeenSaved) {
4166 Push(lr);
4167 }
4168 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
4169 fp_mode);
4170 CallStub(&stub);
4171 if (lr_status == kLRHasNotBeenSaved) {
4172 Pop(lr);
4173 }
4174
4175 Bind(&done);
4176
4177 // Count number of write barriers in generated code.
4178 isolate()->counters()->write_barriers_static()->Increment();
4179 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
4180 value);
4181
4182 // Clobber clobbered registers when running with the debug-code flag
4183 // turned on to provoke errors.
4184 if (emit_debug_code()) {
4185 Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
4186 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
4187 }
4188 }
4189
4190
AssertHasValidColor(const Register & reg)4191 void MacroAssembler::AssertHasValidColor(const Register& reg) {
4192 if (emit_debug_code()) {
4193 // The bit sequence is backward. The first character in the string
4194 // represents the least significant bit.
4195 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4196
4197 Label color_is_valid;
4198 Tbnz(reg, 0, &color_is_valid);
4199 Tbz(reg, 1, &color_is_valid);
4200 Abort(kUnexpectedColorFound);
4201 Bind(&color_is_valid);
4202 }
4203 }
4204
4205
GetMarkBits(Register addr_reg,Register bitmap_reg,Register shift_reg)4206 void MacroAssembler::GetMarkBits(Register addr_reg,
4207 Register bitmap_reg,
4208 Register shift_reg) {
4209 DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
4210 DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4211 // addr_reg is divided into fields:
4212 // |63 page base 20|19 high 8|7 shift 3|2 0|
4213 // 'high' gives the index of the cell holding color bits for the object.
4214 // 'shift' gives the offset in the cell for this object's color.
4215 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4216 UseScratchRegisterScope temps(this);
4217 Register temp = temps.AcquireX();
4218 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4219 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4220 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
4221 // bitmap_reg:
4222 // |63 page base 20|19 zeros 15|14 high 3|2 0|
4223 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4224 }
4225
4226
HasColor(Register object,Register bitmap_scratch,Register shift_scratch,Label * has_color,int first_bit,int second_bit)4227 void MacroAssembler::HasColor(Register object,
4228 Register bitmap_scratch,
4229 Register shift_scratch,
4230 Label* has_color,
4231 int first_bit,
4232 int second_bit) {
4233 // See mark-compact.h for color definitions.
4234 DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
4235
4236 GetMarkBits(object, bitmap_scratch, shift_scratch);
4237 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4238 // Shift the bitmap down to get the color of the object in bits [1:0].
4239 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4240
4241 AssertHasValidColor(bitmap_scratch);
4242
4243 // These bit sequences are backwards. The first character in the string
4244 // represents the least significant bit.
4245 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4246 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4247 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
4248
4249 // Check for the color.
4250 if (first_bit == 0) {
4251 // Checking for white.
4252 DCHECK(second_bit == 0);
4253 // We only need to test the first bit.
4254 Tbz(bitmap_scratch, 0, has_color);
4255 } else {
4256 Label other_color;
4257 // Checking for grey or black.
4258 Tbz(bitmap_scratch, 0, &other_color);
4259 if (second_bit == 0) {
4260 Tbz(bitmap_scratch, 1, has_color);
4261 } else {
4262 Tbnz(bitmap_scratch, 1, has_color);
4263 }
4264 Bind(&other_color);
4265 }
4266
4267 // Fall through if it does not have the right color.
4268 }
4269
4270
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)4271 void MacroAssembler::JumpIfBlack(Register object,
4272 Register scratch0,
4273 Register scratch1,
4274 Label* on_black) {
4275 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4276 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
4277 }
4278
4279
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)4280 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4281 Register object,
4282 Register scratch0,
4283 Register scratch1,
4284 Label* found) {
4285 DCHECK(!AreAliased(object, scratch0, scratch1));
4286 Register current = scratch0;
4287 Label loop_again, end;
4288
4289 // Scratch contains elements pointer.
4290 Mov(current, object);
4291 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4292 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4293 CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
4294
4295 // Loop based on the map going up the prototype chain.
4296 Bind(&loop_again);
4297 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4298 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
4299 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
4300 CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
4301 B(lo, found);
4302 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4303 DecodeField<Map::ElementsKindBits>(scratch1);
4304 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
4305 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4306 CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
4307
4308 Bind(&end);
4309 }
4310
4311
JumpIfWhite(Register value,Register bitmap_scratch,Register shift_scratch,Register load_scratch,Register length_scratch,Label * value_is_white)4312 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
4313 Register shift_scratch, Register load_scratch,
4314 Register length_scratch,
4315 Label* value_is_white) {
4316 DCHECK(!AreAliased(
4317 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4318
4319 // These bit sequences are backwards. The first character in the string
4320 // represents the least significant bit.
4321 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4322 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4323 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
4324
4325 GetMarkBits(value, bitmap_scratch, shift_scratch);
4326 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4327 Lsr(load_scratch, load_scratch, shift_scratch);
4328
4329 AssertHasValidColor(load_scratch);
4330
4331 // If the value is black or grey we don't need to do anything.
4332 // Since both black and grey have a 1 in the first position and white does
4333 // not have a 1 there we only need to check one bit.
4334 Tbz(load_scratch, 0, value_is_white);
4335 }
4336
4337
Assert(Condition cond,BailoutReason reason)4338 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
4339 if (emit_debug_code()) {
4340 Check(cond, reason);
4341 }
4342 }
4343
4344
4345
AssertRegisterIsClear(Register reg,BailoutReason reason)4346 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
4347 if (emit_debug_code()) {
4348 CheckRegisterIsClear(reg, reason);
4349 }
4350 }
4351
4352
AssertRegisterIsRoot(Register reg,Heap::RootListIndex index,BailoutReason reason)4353 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4354 Heap::RootListIndex index,
4355 BailoutReason reason) {
4356 if (emit_debug_code()) {
4357 CompareRoot(reg, index);
4358 Check(eq, reason);
4359 }
4360 }
4361
4362
AssertFastElements(Register elements)4363 void MacroAssembler::AssertFastElements(Register elements) {
4364 if (emit_debug_code()) {
4365 UseScratchRegisterScope temps(this);
4366 Register temp = temps.AcquireX();
4367 Label ok;
4368 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
4369 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4370 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4371 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4372 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4373 Bind(&ok);
4374 }
4375 }
4376
4377
AssertIsString(const Register & object)4378 void MacroAssembler::AssertIsString(const Register& object) {
4379 if (emit_debug_code()) {
4380 UseScratchRegisterScope temps(this);
4381 Register temp = temps.AcquireX();
4382 STATIC_ASSERT(kSmiTag == 0);
4383 Tst(object, kSmiTagMask);
4384 Check(ne, kOperandIsNotAString);
4385 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4386 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
4387 Check(lo, kOperandIsNotAString);
4388 }
4389 }
4390
4391
Check(Condition cond,BailoutReason reason)4392 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
4393 Label ok;
4394 B(cond, &ok);
4395 Abort(reason);
4396 // Will not return here.
4397 Bind(&ok);
4398 }
4399
4400
CheckRegisterIsClear(Register reg,BailoutReason reason)4401 void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
4402 Label ok;
4403 Cbz(reg, &ok);
4404 Abort(reason);
4405 // Will not return here.
4406 Bind(&ok);
4407 }
4408
4409
Abort(BailoutReason reason)4410 void MacroAssembler::Abort(BailoutReason reason) {
4411 #ifdef DEBUG
4412 RecordComment("Abort message: ");
4413 RecordComment(GetBailoutReason(reason));
4414
4415 if (FLAG_trap_on_abort) {
4416 Brk(0);
4417 return;
4418 }
4419 #endif
4420
4421 // Abort is used in some contexts where csp is the stack pointer. In order to
4422 // simplify the CallRuntime code, make sure that jssp is the stack pointer.
4423 // There is no risk of register corruption here because Abort doesn't return.
4424 Register old_stack_pointer = StackPointer();
4425 SetStackPointer(jssp);
4426 Mov(jssp, old_stack_pointer);
4427
4428 // We need some scratch registers for the MacroAssembler, so make sure we have
4429 // some. This is safe here because Abort never returns.
4430 RegList old_tmp_list = TmpList()->list();
4431 TmpList()->Combine(MacroAssembler::DefaultTmpList());
4432
4433 if (use_real_aborts()) {
4434 // Avoid infinite recursion; Push contains some assertions that use Abort.
4435 NoUseRealAbortsScope no_real_aborts(this);
4436
4437 // Check if Abort() has already been initialized.
4438 DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
4439
4440 Move(x1, Smi::FromInt(static_cast<int>(reason)));
4441
4442 if (!has_frame_) {
4443 // We don't actually want to generate a pile of code for this, so just
4444 // claim there is a stack frame, without generating one.
4445 FrameScope scope(this, StackFrame::NONE);
4446 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
4447 } else {
4448 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
4449 }
4450 } else {
4451 // Load the string to pass to Printf.
4452 Label msg_address;
4453 Adr(x0, &msg_address);
4454
4455 // Call Printf directly to report the error.
4456 CallPrintf();
4457
4458 // We need a way to stop execution on both the simulator and real hardware,
4459 // and Unreachable() is the best option.
4460 Unreachable();
4461
4462 // Emit the message string directly in the instruction stream.
4463 {
4464 BlockPoolsScope scope(this);
4465 Bind(&msg_address);
4466 EmitStringData(GetBailoutReason(reason));
4467 }
4468 }
4469
4470 SetStackPointer(old_stack_pointer);
4471 TmpList()->set_list(old_tmp_list);
4472 }
4473
4474
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch1,Register scratch2,Label * no_map_match)4475 void MacroAssembler::LoadTransitionedArrayMapConditional(
4476 ElementsKind expected_kind,
4477 ElementsKind transitioned_kind,
4478 Register map_in_out,
4479 Register scratch1,
4480 Register scratch2,
4481 Label* no_map_match) {
4482 DCHECK(IsFastElementsKind(expected_kind));
4483 DCHECK(IsFastElementsKind(transitioned_kind));
4484
4485 // Check that the function's map is the same as the expected cached map.
4486 Ldr(scratch1, NativeContextMemOperand());
4487 Ldr(scratch2,
4488 ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
4489 Cmp(map_in_out, scratch2);
4490 B(ne, no_map_match);
4491
4492 // Use the transitioned cached map.
4493 Ldr(map_in_out,
4494 ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
4495 }
4496
4497
LoadNativeContextSlot(int index,Register dst)4498 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
4499 Ldr(dst, NativeContextMemOperand());
4500 Ldr(dst, ContextMemOperand(dst, index));
4501 }
4502
4503
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)4504 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4505 Register map,
4506 Register scratch) {
4507 // Load the initial map. The global functions all have initial maps.
4508 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4509 if (emit_debug_code()) {
4510 Label ok, fail;
4511 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4512 B(&ok);
4513 Bind(&fail);
4514 Abort(kGlobalFunctionsMustHaveInitialMap);
4515 Bind(&ok);
4516 }
4517 }
4518
4519
4520 // This is the main Printf implementation. All other Printf variants call
4521 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
PrintfNoPreserve(const char * format,const CPURegister & arg0,const CPURegister & arg1,const CPURegister & arg2,const CPURegister & arg3)4522 void MacroAssembler::PrintfNoPreserve(const char * format,
4523 const CPURegister& arg0,
4524 const CPURegister& arg1,
4525 const CPURegister& arg2,
4526 const CPURegister& arg3) {
4527 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4528 // in most cases anyway, so this restriction shouldn't be too serious.
4529 DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4530
4531 // The provided arguments, and their proper procedure-call standard registers.
4532 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
4533 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
4534
4535 int arg_count = kPrintfMaxArgCount;
4536
4537 // The PCS varargs registers for printf. Note that x0 is used for the printf
4538 // format string.
4539 static const CPURegList kPCSVarargs =
4540 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
4541 static const CPURegList kPCSVarargsFP =
4542 CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
4543
4544 // We can use caller-saved registers as scratch values, except for the
4545 // arguments and the PCS registers where they might need to go.
4546 CPURegList tmp_list = kCallerSaved;
4547 tmp_list.Remove(x0); // Used to pass the format string.
4548 tmp_list.Remove(kPCSVarargs);
4549 tmp_list.Remove(arg0, arg1, arg2, arg3);
4550
4551 CPURegList fp_tmp_list = kCallerSavedFP;
4552 fp_tmp_list.Remove(kPCSVarargsFP);
4553 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4554
4555 // Override the MacroAssembler's scratch register list. The lists will be
4556 // reset automatically at the end of the UseScratchRegisterScope.
4557 UseScratchRegisterScope temps(this);
4558 TmpList()->set_list(tmp_list.list());
4559 FPTmpList()->set_list(fp_tmp_list.list());
4560
4561 // Copies of the printf vararg registers that we can pop from.
4562 CPURegList pcs_varargs = kPCSVarargs;
4563 CPURegList pcs_varargs_fp = kPCSVarargsFP;
4564
4565 // Place the arguments. There are lots of clever tricks and optimizations we
4566 // could use here, but Printf is a debug tool so instead we just try to keep
4567 // it simple: Move each input that isn't already in the right place to a
4568 // scratch register, then move everything back.
4569 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
4570 // Work out the proper PCS register for this argument.
4571 if (args[i].IsRegister()) {
4572 pcs[i] = pcs_varargs.PopLowestIndex().X();
4573 // We might only need a W register here. We need to know the size of the
4574 // argument so we can properly encode it for the simulator call.
4575 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
4576 } else if (args[i].IsFPRegister()) {
4577 // In C, floats are always cast to doubles for varargs calls.
4578 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
4579 } else {
4580 DCHECK(args[i].IsNone());
4581 arg_count = i;
4582 break;
4583 }
4584
4585 // If the argument is already in the right place, leave it where it is.
4586 if (args[i].Aliases(pcs[i])) continue;
4587
4588 // Otherwise, if the argument is in a PCS argument register, allocate an
4589 // appropriate scratch register and then move it out of the way.
4590 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
4591 kPCSVarargsFP.IncludesAliasOf(args[i])) {
4592 if (args[i].IsRegister()) {
4593 Register old_arg = Register(args[i]);
4594 Register new_arg = temps.AcquireSameSizeAs(old_arg);
4595 Mov(new_arg, old_arg);
4596 args[i] = new_arg;
4597 } else {
4598 FPRegister old_arg = FPRegister(args[i]);
4599 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
4600 Fmov(new_arg, old_arg);
4601 args[i] = new_arg;
4602 }
4603 }
4604 }
4605
4606 // Do a second pass to move values into their final positions and perform any
4607 // conversions that may be required.
4608 for (int i = 0; i < arg_count; i++) {
4609 DCHECK(pcs[i].type() == args[i].type());
4610 if (pcs[i].IsRegister()) {
4611 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
4612 } else {
4613 DCHECK(pcs[i].IsFPRegister());
4614 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
4615 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
4616 } else {
4617 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
4618 }
4619 }
4620 }
4621
4622 // Load the format string into x0, as per the procedure-call standard.
4623 //
4624 // To make the code as portable as possible, the format string is encoded
4625 // directly in the instruction stream. It might be cleaner to encode it in a
4626 // literal pool, but since Printf is usually used for debugging, it is
4627 // beneficial for it to be minimally dependent on other features.
4628 Label format_address;
4629 Adr(x0, &format_address);
4630
4631 // Emit the format string directly in the instruction stream.
4632 { BlockPoolsScope scope(this);
4633 Label after_data;
4634 B(&after_data);
4635 Bind(&format_address);
4636 EmitStringData(format);
4637 Unreachable();
4638 Bind(&after_data);
4639 }
4640
4641 // We don't pass any arguments on the stack, but we still need to align the C
4642 // stack pointer to a 16-byte boundary for PCS compliance.
4643 if (!csp.Is(StackPointer())) {
4644 Bic(csp, StackPointer(), 0xf);
4645 }
4646
4647 CallPrintf(arg_count, pcs);
4648 }
4649
4650
CallPrintf(int arg_count,const CPURegister * args)4651 void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
4652 // A call to printf needs special handling for the simulator, since the system
4653 // printf function will use a different instruction set and the procedure-call
4654 // standard will not be compatible.
4655 #ifdef USE_SIMULATOR
4656 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
4657 hlt(kImmExceptionIsPrintf);
4658 dc32(arg_count); // kPrintfArgCountOffset
4659
4660 // Determine the argument pattern.
4661 uint32_t arg_pattern_list = 0;
4662 for (int i = 0; i < arg_count; i++) {
4663 uint32_t arg_pattern;
4664 if (args[i].IsRegister()) {
4665 arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
4666 } else {
4667 DCHECK(args[i].Is64Bits());
4668 arg_pattern = kPrintfArgD;
4669 }
4670 DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
4671 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
4672 }
4673 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
4674 }
4675 #else
4676 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
4677 #endif
4678 }
4679
4680
Printf(const char * format,CPURegister arg0,CPURegister arg1,CPURegister arg2,CPURegister arg3)4681 void MacroAssembler::Printf(const char * format,
4682 CPURegister arg0,
4683 CPURegister arg1,
4684 CPURegister arg2,
4685 CPURegister arg3) {
4686 // We can only print sp if it is the current stack pointer.
4687 if (!csp.Is(StackPointer())) {
4688 DCHECK(!csp.Aliases(arg0));
4689 DCHECK(!csp.Aliases(arg1));
4690 DCHECK(!csp.Aliases(arg2));
4691 DCHECK(!csp.Aliases(arg3));
4692 }
4693
4694 // Printf is expected to preserve all registers, so make sure that none are
4695 // available as scratch registers until we've preserved them.
4696 RegList old_tmp_list = TmpList()->list();
4697 RegList old_fp_tmp_list = FPTmpList()->list();
4698 TmpList()->set_list(0);
4699 FPTmpList()->set_list(0);
4700
4701 // Preserve all caller-saved registers as well as NZCV.
4702 // If csp is the stack pointer, PushCPURegList asserts that the size of each
4703 // list is a multiple of 16 bytes.
4704 PushCPURegList(kCallerSaved);
4705 PushCPURegList(kCallerSavedFP);
4706
4707 // We can use caller-saved registers as scratch values (except for argN).
4708 CPURegList tmp_list = kCallerSaved;
4709 CPURegList fp_tmp_list = kCallerSavedFP;
4710 tmp_list.Remove(arg0, arg1, arg2, arg3);
4711 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4712 TmpList()->set_list(tmp_list.list());
4713 FPTmpList()->set_list(fp_tmp_list.list());
4714
4715 { UseScratchRegisterScope temps(this);
4716 // If any of the arguments are the current stack pointer, allocate a new
4717 // register for them, and adjust the value to compensate for pushing the
4718 // caller-saved registers.
4719 bool arg0_sp = StackPointer().Aliases(arg0);
4720 bool arg1_sp = StackPointer().Aliases(arg1);
4721 bool arg2_sp = StackPointer().Aliases(arg2);
4722 bool arg3_sp = StackPointer().Aliases(arg3);
4723 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
4724 // Allocate a register to hold the original stack pointer value, to pass
4725 // to PrintfNoPreserve as an argument.
4726 Register arg_sp = temps.AcquireX();
4727 Add(arg_sp, StackPointer(),
4728 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
4729 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
4730 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
4731 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
4732 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
4733 }
4734
4735 // Preserve NZCV.
4736 { UseScratchRegisterScope temps(this);
4737 Register tmp = temps.AcquireX();
4738 Mrs(tmp, NZCV);
4739 Push(tmp, xzr);
4740 }
4741
4742 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
4743
4744 // Restore NZCV.
4745 { UseScratchRegisterScope temps(this);
4746 Register tmp = temps.AcquireX();
4747 Pop(xzr, tmp);
4748 Msr(NZCV, tmp);
4749 }
4750 }
4751
4752 PopCPURegList(kCallerSavedFP);
4753 PopCPURegList(kCallerSaved);
4754
4755 TmpList()->set_list(old_tmp_list);
4756 FPTmpList()->set_list(old_fp_tmp_list);
4757 }
4758
4759
EmitFrameSetupForCodeAgePatching()4760 void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
4761 // TODO(jbramley): Other architectures use the internal memcpy to copy the
4762 // sequence. If this is a performance bottleneck, we should consider caching
4763 // the sequence and copying it in the same way.
4764 InstructionAccurateScope scope(this,
4765 kNoCodeAgeSequenceLength / kInstructionSize);
4766 DCHECK(jssp.Is(StackPointer()));
4767 EmitFrameSetupForCodeAgePatching(this);
4768 }
4769
4770
4771
EmitCodeAgeSequence(Code * stub)4772 void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
4773 InstructionAccurateScope scope(this,
4774 kNoCodeAgeSequenceLength / kInstructionSize);
4775 DCHECK(jssp.Is(StackPointer()));
4776 EmitCodeAgeSequence(this, stub);
4777 }
4778
4779
4780 #undef __
4781 #define __ assm->
4782
4783
EmitFrameSetupForCodeAgePatching(Assembler * assm)4784 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
4785 Label start;
4786 __ bind(&start);
4787
4788 // We can do this sequence using four instructions, but the code ageing
4789 // sequence that patches it needs five, so we use the extra space to try to
4790 // simplify some addressing modes and remove some dependencies (compared to
4791 // using two stp instructions with write-back).
4792 __ sub(jssp, jssp, 4 * kXRegSize);
4793 __ sub(csp, csp, 4 * kXRegSize);
4794 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
4795 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
4796 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
4797
4798 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
4799 }
4800
4801
EmitCodeAgeSequence(Assembler * assm,Code * stub)4802 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
4803 Code * stub) {
4804 Label start;
4805 __ bind(&start);
4806 // When the stub is called, the sequence is replaced with the young sequence
4807 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
4808 // stub jumps to &start, stored in x0. The young sequence does not call the
4809 // stub so there is no infinite loop here.
4810 //
4811 // A branch (br) is used rather than a call (blr) because this code replaces
4812 // the frame setup code that would normally preserve lr.
4813 __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
4814 __ adr(x0, &start);
4815 __ br(ip0);
4816 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
4817 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
4818 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
4819 if (stub) {
4820 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
4821 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
4822 }
4823 }
4824
4825
IsYoungSequence(Isolate * isolate,byte * sequence)4826 bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
4827 bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
4828 DCHECK(is_young ||
4829 isolate->code_aging_helper()->IsOld(sequence));
4830 return is_young;
4831 }
4832
4833
TruncatingDiv(Register result,Register dividend,int32_t divisor)4834 void MacroAssembler::TruncatingDiv(Register result,
4835 Register dividend,
4836 int32_t divisor) {
4837 DCHECK(!AreAliased(result, dividend));
4838 DCHECK(result.Is32Bits() && dividend.Is32Bits());
4839 base::MagicNumbersForDivision<uint32_t> mag =
4840 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
4841 Mov(result, mag.multiplier);
4842 Smull(result.X(), dividend, result);
4843 Asr(result.X(), result.X(), 32);
4844 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
4845 if (divisor > 0 && neg) Add(result, result, dividend);
4846 if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
4847 if (mag.shift > 0) Asr(result, result, mag.shift);
4848 Add(result, result, Operand(dividend, LSR, 31));
4849 }
4850
4851
4852 #undef __
4853
4854
~UseScratchRegisterScope()4855 UseScratchRegisterScope::~UseScratchRegisterScope() {
4856 available_->set_list(old_available_);
4857 availablefp_->set_list(old_availablefp_);
4858 }
4859
4860
AcquireSameSizeAs(const Register & reg)4861 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
4862 int code = AcquireNextAvailable(available_).code();
4863 return Register::Create(code, reg.SizeInBits());
4864 }
4865
4866
AcquireSameSizeAs(const FPRegister & reg)4867 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
4868 int code = AcquireNextAvailable(availablefp_).code();
4869 return FPRegister::Create(code, reg.SizeInBits());
4870 }
4871
4872
AcquireNextAvailable(CPURegList * available)4873 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
4874 CPURegList* available) {
4875 CHECK(!available->IsEmpty());
4876 CPURegister result = available->PopLowestIndex();
4877 DCHECK(!AreAliased(result, xzr, csp));
4878 return result;
4879 }
4880
4881
UnsafeAcquire(CPURegList * available,const CPURegister & reg)4882 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
4883 const CPURegister& reg) {
4884 DCHECK(available->IncludesAliasOf(reg));
4885 available->Remove(reg);
4886 return reg;
4887 }
4888
4889
4890 #define __ masm->
4891
4892
Emit(MacroAssembler * masm,const Register & reg,const Label * smi_check)4893 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
4894 const Label* smi_check) {
4895 Assembler::BlockPoolsScope scope(masm);
4896 if (reg.IsValid()) {
4897 DCHECK(smi_check->is_bound());
4898 DCHECK(reg.Is64Bits());
4899
4900 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
4901 // 'check' in the other bits. The possible offset is limited in that we
4902 // use BitField to pack the data, and the underlying data type is a
4903 // uint32_t.
4904 uint32_t delta =
4905 static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
4906 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
4907 } else {
4908 DCHECK(!smi_check->is_bound());
4909
4910 // An offset of 0 indicates that there is no patch site.
4911 __ InlineData(0);
4912 }
4913 }
4914
4915
InlineSmiCheckInfo(Address info)4916 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
4917 : reg_(NoReg), smi_check_(NULL) {
4918 InstructionSequence* inline_data = InstructionSequence::At(info);
4919 DCHECK(inline_data->IsInlineData());
4920 if (inline_data->IsInlineData()) {
4921 uint64_t payload = inline_data->InlineData();
4922 // We use BitField to decode the payload, and BitField can only handle
4923 // 32-bit values.
4924 DCHECK(is_uint32(payload));
4925 if (payload != 0) {
4926 uint32_t payload32 = static_cast<uint32_t>(payload);
4927 int reg_code = RegisterBits::decode(payload32);
4928 reg_ = Register::XRegFromCode(reg_code);
4929 int smi_check_delta = DeltaBits::decode(payload32);
4930 DCHECK(smi_check_delta != 0);
4931 smi_check_ = inline_data->preceding(smi_check_delta);
4932 }
4933 }
4934 }
4935
4936
4937 #undef __
4938
4939
4940 } // namespace internal
4941 } // namespace v8
4942
4943 #endif // V8_TARGET_ARCH_ARM64
4944