1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6
7 #if V8_TARGET_ARCH_ARM
8
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/debug/debug.h"
14 #include "src/register-configuration.h"
15 #include "src/runtime/runtime.h"
16
17 #include "src/arm/macro-assembler-arm.h"
18
19 namespace v8 {
20 namespace internal {
21
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
23 CodeObjectRequired create_code_object)
24 : Assembler(arg_isolate, buffer, size),
25 generating_stub_(false),
26 has_frame_(false) {
27 if (create_code_object == CodeObjectRequired::kYes) {
28 code_object_ =
29 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
30 }
31 }
32
33
Jump(Register target,Condition cond)34 void MacroAssembler::Jump(Register target, Condition cond) {
35 bx(target, cond);
36 }
37
38
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond)39 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
40 Condition cond) {
41 DCHECK(RelocInfo::IsCodeTarget(rmode));
42 mov(pc, Operand(target, rmode), LeaveCC, cond);
43 }
44
45
Jump(Address target,RelocInfo::Mode rmode,Condition cond)46 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
47 Condition cond) {
48 DCHECK(!RelocInfo::IsCodeTarget(rmode));
49 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
50 }
51
52
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)53 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
54 Condition cond) {
55 DCHECK(RelocInfo::IsCodeTarget(rmode));
56 // 'code' is always generated ARM code, never THUMB code
57 AllowDeferredHandleDereference embedding_raw_address;
58 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
59 }
60
61
CallSize(Register target,Condition cond)62 int MacroAssembler::CallSize(Register target, Condition cond) {
63 return kInstrSize;
64 }
65
66
Call(Register target,Condition cond)67 void MacroAssembler::Call(Register target, Condition cond) {
68 // Block constant pool for the call instruction sequence.
69 BlockConstPoolScope block_const_pool(this);
70 Label start;
71 bind(&start);
72 blx(target, cond);
73 DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
74 }
75
76
CallSize(Address target,RelocInfo::Mode rmode,Condition cond)77 int MacroAssembler::CallSize(
78 Address target, RelocInfo::Mode rmode, Condition cond) {
79 Instr mov_instr = cond | MOV | LeaveCC;
80 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
81 return kInstrSize +
82 mov_operand.instructions_required(this, mov_instr) * kInstrSize;
83 }
84
85
CallStubSize(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)86 int MacroAssembler::CallStubSize(
87 CodeStub* stub, TypeFeedbackId ast_id, Condition cond) {
88 return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
89 }
90
91
Call(Address target,RelocInfo::Mode rmode,Condition cond,TargetAddressStorageMode mode)92 void MacroAssembler::Call(Address target,
93 RelocInfo::Mode rmode,
94 Condition cond,
95 TargetAddressStorageMode mode) {
96 // Block constant pool for the call instruction sequence.
97 BlockConstPoolScope block_const_pool(this);
98 Label start;
99 bind(&start);
100
101 bool old_predictable_code_size = predictable_code_size();
102 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
103 set_predictable_code_size(true);
104 }
105
106 #ifdef DEBUG
107 // Check the expected size before generating code to ensure we assume the same
108 // constant pool availability (e.g., whether constant pool is full or not).
109 int expected_size = CallSize(target, rmode, cond);
110 #endif
111
112 // Call sequence on V7 or later may be :
113 // movw ip, #... @ call address low 16
114 // movt ip, #... @ call address high 16
115 // blx ip
116 // @ return address
117 // Or for pre-V7 or values that may be back-patched
118 // to avoid ICache flushes:
119 // ldr ip, [pc, #...] @ call address
120 // blx ip
121 // @ return address
122
123 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
124 blx(ip, cond);
125
126 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
127 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
128 set_predictable_code_size(old_predictable_code_size);
129 }
130 }
131
132
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)133 int MacroAssembler::CallSize(Handle<Code> code,
134 RelocInfo::Mode rmode,
135 TypeFeedbackId ast_id,
136 Condition cond) {
137 AllowDeferredHandleDereference using_raw_address;
138 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
139 }
140
141
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,TargetAddressStorageMode mode)142 void MacroAssembler::Call(Handle<Code> code,
143 RelocInfo::Mode rmode,
144 TypeFeedbackId ast_id,
145 Condition cond,
146 TargetAddressStorageMode mode) {
147 Label start;
148 bind(&start);
149 DCHECK(RelocInfo::IsCodeTarget(rmode));
150 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
151 SetRecordedAstId(ast_id);
152 rmode = RelocInfo::CODE_TARGET_WITH_ID;
153 }
154 // 'code' is always generated ARM code, never THUMB code
155 AllowDeferredHandleDereference embedding_raw_address;
156 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
157 }
158
CallDeoptimizer(Address target)159 void MacroAssembler::CallDeoptimizer(Address target) {
160 BlockConstPoolScope block_const_pool(this);
161
162 uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
163
164 // We use blx, like a call, but it does not return here. The link register is
165 // used by the deoptimizer to work out what called it.
166 if (CpuFeatures::IsSupported(ARMv7)) {
167 CpuFeatureScope scope(this, ARMv7);
168 movw(ip, target_raw & 0xffff);
169 movt(ip, (target_raw >> 16) & 0xffff);
170 blx(ip);
171 } else {
172 // We need to load a literal, but we can't use the usual constant pool
173 // because we call this from a patcher, and cannot afford the guard
174 // instruction and other administrative overhead.
175 ldr(ip, MemOperand(pc, (2 * kInstrSize) - kPcLoadDelta));
176 blx(ip);
177 dd(target_raw);
178 }
179 }
180
CallDeoptimizerSize()181 int MacroAssembler::CallDeoptimizerSize() {
182 // ARMv7+:
183 // movw ip, ...
184 // movt ip, ...
185 // blx ip @ This never returns.
186 //
187 // ARMv6:
188 // ldr ip, =address
189 // blx ip @ This never returns.
190 // .word address
191 return 3 * kInstrSize;
192 }
193
Ret(Condition cond)194 void MacroAssembler::Ret(Condition cond) {
195 bx(lr, cond);
196 }
197
198
Drop(int count,Condition cond)199 void MacroAssembler::Drop(int count, Condition cond) {
200 if (count > 0) {
201 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
202 }
203 }
204
Drop(Register count,Condition cond)205 void MacroAssembler::Drop(Register count, Condition cond) {
206 add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
207 }
208
Ret(int drop,Condition cond)209 void MacroAssembler::Ret(int drop, Condition cond) {
210 Drop(drop, cond);
211 Ret(cond);
212 }
213
214
Swap(Register reg1,Register reg2,Register scratch,Condition cond)215 void MacroAssembler::Swap(Register reg1,
216 Register reg2,
217 Register scratch,
218 Condition cond) {
219 if (scratch.is(no_reg)) {
220 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
221 eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
222 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
223 } else {
224 mov(scratch, reg1, LeaveCC, cond);
225 mov(reg1, reg2, LeaveCC, cond);
226 mov(reg2, scratch, LeaveCC, cond);
227 }
228 }
229
230
Call(Label * target)231 void MacroAssembler::Call(Label* target) {
232 bl(target);
233 }
234
235
Push(Handle<Object> handle)236 void MacroAssembler::Push(Handle<Object> handle) {
237 mov(ip, Operand(handle));
238 push(ip);
239 }
240
241
Move(Register dst,Handle<Object> value)242 void MacroAssembler::Move(Register dst, Handle<Object> value) {
243 mov(dst, Operand(value));
244 }
245
246
Move(Register dst,Register src,Condition cond)247 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
248 if (!dst.is(src)) {
249 mov(dst, src, LeaveCC, cond);
250 }
251 }
252
Move(SwVfpRegister dst,SwVfpRegister src,Condition cond)253 void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
254 Condition cond) {
255 if (!dst.is(src)) {
256 vmov(dst, src, cond);
257 }
258 }
259
Move(DwVfpRegister dst,DwVfpRegister src,Condition cond)260 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
261 Condition cond) {
262 if (!dst.is(src)) {
263 vmov(dst, src, cond);
264 }
265 }
266
Mls(Register dst,Register src1,Register src2,Register srcA,Condition cond)267 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
268 Register srcA, Condition cond) {
269 if (CpuFeatures::IsSupported(ARMv7)) {
270 CpuFeatureScope scope(this, ARMv7);
271 mls(dst, src1, src2, srcA, cond);
272 } else {
273 DCHECK(!srcA.is(ip));
274 mul(ip, src1, src2, LeaveCC, cond);
275 sub(dst, srcA, ip, LeaveCC, cond);
276 }
277 }
278
279
And(Register dst,Register src1,const Operand & src2,Condition cond)280 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
281 Condition cond) {
282 if (!src2.is_reg() &&
283 !src2.must_output_reloc_info(this) &&
284 src2.immediate() == 0) {
285 mov(dst, Operand::Zero(), LeaveCC, cond);
286 } else if (!(src2.instructions_required(this) == 1) &&
287 !src2.must_output_reloc_info(this) &&
288 CpuFeatures::IsSupported(ARMv7) &&
289 base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
290 CpuFeatureScope scope(this, ARMv7);
291 ubfx(dst, src1, 0,
292 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
293 } else {
294 and_(dst, src1, src2, LeaveCC, cond);
295 }
296 }
297
298
Ubfx(Register dst,Register src1,int lsb,int width,Condition cond)299 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
300 Condition cond) {
301 DCHECK(lsb < 32);
302 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
303 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
304 and_(dst, src1, Operand(mask), LeaveCC, cond);
305 if (lsb != 0) {
306 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
307 }
308 } else {
309 CpuFeatureScope scope(this, ARMv7);
310 ubfx(dst, src1, lsb, width, cond);
311 }
312 }
313
314
Sbfx(Register dst,Register src1,int lsb,int width,Condition cond)315 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
316 Condition cond) {
317 DCHECK(lsb < 32);
318 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
319 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
320 and_(dst, src1, Operand(mask), LeaveCC, cond);
321 int shift_up = 32 - lsb - width;
322 int shift_down = lsb + shift_up;
323 if (shift_up != 0) {
324 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
325 }
326 if (shift_down != 0) {
327 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
328 }
329 } else {
330 CpuFeatureScope scope(this, ARMv7);
331 sbfx(dst, src1, lsb, width, cond);
332 }
333 }
334
335
Bfi(Register dst,Register src,Register scratch,int lsb,int width,Condition cond)336 void MacroAssembler::Bfi(Register dst,
337 Register src,
338 Register scratch,
339 int lsb,
340 int width,
341 Condition cond) {
342 DCHECK(0 <= lsb && lsb < 32);
343 DCHECK(0 <= width && width < 32);
344 DCHECK(lsb + width < 32);
345 DCHECK(!scratch.is(dst));
346 if (width == 0) return;
347 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
348 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
349 bic(dst, dst, Operand(mask));
350 and_(scratch, src, Operand((1 << width) - 1));
351 mov(scratch, Operand(scratch, LSL, lsb));
352 orr(dst, dst, scratch);
353 } else {
354 CpuFeatureScope scope(this, ARMv7);
355 bfi(dst, src, lsb, width, cond);
356 }
357 }
358
359
Bfc(Register dst,Register src,int lsb,int width,Condition cond)360 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
361 Condition cond) {
362 DCHECK(lsb < 32);
363 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
364 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
365 bic(dst, src, Operand(mask));
366 } else {
367 CpuFeatureScope scope(this, ARMv7);
368 Move(dst, src, cond);
369 bfc(dst, lsb, width, cond);
370 }
371 }
372
373
Load(Register dst,const MemOperand & src,Representation r)374 void MacroAssembler::Load(Register dst,
375 const MemOperand& src,
376 Representation r) {
377 DCHECK(!r.IsDouble());
378 if (r.IsInteger8()) {
379 ldrsb(dst, src);
380 } else if (r.IsUInteger8()) {
381 ldrb(dst, src);
382 } else if (r.IsInteger16()) {
383 ldrsh(dst, src);
384 } else if (r.IsUInteger16()) {
385 ldrh(dst, src);
386 } else {
387 ldr(dst, src);
388 }
389 }
390
391
Store(Register src,const MemOperand & dst,Representation r)392 void MacroAssembler::Store(Register src,
393 const MemOperand& dst,
394 Representation r) {
395 DCHECK(!r.IsDouble());
396 if (r.IsInteger8() || r.IsUInteger8()) {
397 strb(src, dst);
398 } else if (r.IsInteger16() || r.IsUInteger16()) {
399 strh(src, dst);
400 } else {
401 if (r.IsHeapObject()) {
402 AssertNotSmi(src);
403 } else if (r.IsSmi()) {
404 AssertSmi(src);
405 }
406 str(src, dst);
407 }
408 }
409
410
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond)411 void MacroAssembler::LoadRoot(Register destination,
412 Heap::RootListIndex index,
413 Condition cond) {
414 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
415 }
416
417
StoreRoot(Register source,Heap::RootListIndex index,Condition cond)418 void MacroAssembler::StoreRoot(Register source,
419 Heap::RootListIndex index,
420 Condition cond) {
421 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
422 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
423 }
424
425
InNewSpace(Register object,Register scratch,Condition cond,Label * branch)426 void MacroAssembler::InNewSpace(Register object,
427 Register scratch,
428 Condition cond,
429 Label* branch) {
430 DCHECK(cond == eq || cond == ne);
431 CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
432 }
433
434
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)435 void MacroAssembler::RecordWriteField(
436 Register object,
437 int offset,
438 Register value,
439 Register dst,
440 LinkRegisterStatus lr_status,
441 SaveFPRegsMode save_fp,
442 RememberedSetAction remembered_set_action,
443 SmiCheck smi_check,
444 PointersToHereCheck pointers_to_here_check_for_value) {
445 // First, check if a write barrier is even needed. The tests below
446 // catch stores of Smis.
447 Label done;
448
449 // Skip barrier if writing a smi.
450 if (smi_check == INLINE_SMI_CHECK) {
451 JumpIfSmi(value, &done);
452 }
453
454 // Although the object register is tagged, the offset is relative to the start
455 // of the object, so so offset must be a multiple of kPointerSize.
456 DCHECK(IsAligned(offset, kPointerSize));
457
458 add(dst, object, Operand(offset - kHeapObjectTag));
459 if (emit_debug_code()) {
460 Label ok;
461 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
462 b(eq, &ok);
463 stop("Unaligned cell in write barrier");
464 bind(&ok);
465 }
466
467 RecordWrite(object,
468 dst,
469 value,
470 lr_status,
471 save_fp,
472 remembered_set_action,
473 OMIT_SMI_CHECK,
474 pointers_to_here_check_for_value);
475
476 bind(&done);
477
478 // Clobber clobbered input registers when running with the debug-code flag
479 // turned on to provoke errors.
480 if (emit_debug_code()) {
481 mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
482 mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
483 }
484 }
485
486
487 // Will clobber 4 registers: object, map, dst, ip. The
488 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode)489 void MacroAssembler::RecordWriteForMap(Register object,
490 Register map,
491 Register dst,
492 LinkRegisterStatus lr_status,
493 SaveFPRegsMode fp_mode) {
494 if (emit_debug_code()) {
495 ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset));
496 cmp(dst, Operand(isolate()->factory()->meta_map()));
497 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
498 }
499
500 if (!FLAG_incremental_marking) {
501 return;
502 }
503
504 if (emit_debug_code()) {
505 ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
506 cmp(ip, map);
507 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
508 }
509
510 Label done;
511
512 // A single check of the map's pages interesting flag suffices, since it is
513 // only set during incremental collection, and then it's also guaranteed that
514 // the from object's page's interesting flag is also set. This optimization
515 // relies on the fact that maps can never be in new space.
516 CheckPageFlag(map,
517 map, // Used as scratch.
518 MemoryChunk::kPointersToHereAreInterestingMask,
519 eq,
520 &done);
521
522 add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
523 if (emit_debug_code()) {
524 Label ok;
525 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
526 b(eq, &ok);
527 stop("Unaligned cell in write barrier");
528 bind(&ok);
529 }
530
531 // Record the actual write.
532 if (lr_status == kLRHasNotBeenSaved) {
533 push(lr);
534 }
535 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
536 fp_mode);
537 CallStub(&stub);
538 if (lr_status == kLRHasNotBeenSaved) {
539 pop(lr);
540 }
541
542 bind(&done);
543
544 // Count number of write barriers in generated code.
545 isolate()->counters()->write_barriers_static()->Increment();
546 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
547
548 // Clobber clobbered registers when running with the debug-code flag
549 // turned on to provoke errors.
550 if (emit_debug_code()) {
551 mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
552 mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
553 }
554 }
555
556
557 // Will clobber 4 registers: object, address, scratch, ip. The
558 // register 'object' contains a heap object pointer. The heap object
559 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)560 void MacroAssembler::RecordWrite(
561 Register object,
562 Register address,
563 Register value,
564 LinkRegisterStatus lr_status,
565 SaveFPRegsMode fp_mode,
566 RememberedSetAction remembered_set_action,
567 SmiCheck smi_check,
568 PointersToHereCheck pointers_to_here_check_for_value) {
569 DCHECK(!object.is(value));
570 if (emit_debug_code()) {
571 ldr(ip, MemOperand(address));
572 cmp(ip, value);
573 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
574 }
575
576 if (remembered_set_action == OMIT_REMEMBERED_SET &&
577 !FLAG_incremental_marking) {
578 return;
579 }
580
581 // First, check if a write barrier is even needed. The tests below
582 // catch stores of smis and stores into the young generation.
583 Label done;
584
585 if (smi_check == INLINE_SMI_CHECK) {
586 JumpIfSmi(value, &done);
587 }
588
589 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
590 CheckPageFlag(value,
591 value, // Used as scratch.
592 MemoryChunk::kPointersToHereAreInterestingMask,
593 eq,
594 &done);
595 }
596 CheckPageFlag(object,
597 value, // Used as scratch.
598 MemoryChunk::kPointersFromHereAreInterestingMask,
599 eq,
600 &done);
601
602 // Record the actual write.
603 if (lr_status == kLRHasNotBeenSaved) {
604 push(lr);
605 }
606 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
607 fp_mode);
608 CallStub(&stub);
609 if (lr_status == kLRHasNotBeenSaved) {
610 pop(lr);
611 }
612
613 bind(&done);
614
615 // Count number of write barriers in generated code.
616 isolate()->counters()->write_barriers_static()->Increment();
617 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
618 value);
619
620 // Clobber clobbered registers when running with the debug-code flag
621 // turned on to provoke errors.
622 if (emit_debug_code()) {
623 mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
624 mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
625 }
626 }
627
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)628 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
629 Register code_entry,
630 Register scratch) {
631 const int offset = JSFunction::kCodeEntryOffset;
632
633 // Since a code entry (value) is always in old space, we don't need to update
634 // remembered set. If incremental marking is off, there is nothing for us to
635 // do.
636 if (!FLAG_incremental_marking) return;
637
638 DCHECK(js_function.is(r1));
639 DCHECK(code_entry.is(r4));
640 DCHECK(scratch.is(r5));
641 AssertNotSmi(js_function);
642
643 if (emit_debug_code()) {
644 add(scratch, js_function, Operand(offset - kHeapObjectTag));
645 ldr(ip, MemOperand(scratch));
646 cmp(ip, code_entry);
647 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
648 }
649
650 // First, check if a write barrier is even needed. The tests below
651 // catch stores of Smis and stores into young gen.
652 Label done;
653
654 CheckPageFlag(code_entry, scratch,
655 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
656 CheckPageFlag(js_function, scratch,
657 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
658
659 const Register dst = scratch;
660 add(dst, js_function, Operand(offset - kHeapObjectTag));
661
662 push(code_entry);
663
664 // Save caller-saved registers, which includes js_function.
665 DCHECK((kCallerSaved & js_function.bit()) != 0);
666 DCHECK_EQ(kCallerSaved & code_entry.bit(), 0u);
667 stm(db_w, sp, (kCallerSaved | lr.bit()));
668
669 int argument_count = 3;
670 PrepareCallCFunction(argument_count, code_entry);
671
672 mov(r0, js_function);
673 mov(r1, dst);
674 mov(r2, Operand(ExternalReference::isolate_address(isolate())));
675
676 {
677 AllowExternalCallThatCantCauseGC scope(this);
678 CallCFunction(
679 ExternalReference::incremental_marking_record_write_code_entry_function(
680 isolate()),
681 argument_count);
682 }
683
684 // Restore caller-saved registers (including js_function and code_entry).
685 ldm(ia_w, sp, (kCallerSaved | lr.bit()));
686
687 pop(code_entry);
688
689 bind(&done);
690 }
691
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)692 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
693 Register address,
694 Register scratch,
695 SaveFPRegsMode fp_mode,
696 RememberedSetFinalAction and_then) {
697 Label done;
698 if (emit_debug_code()) {
699 Label ok;
700 JumpIfNotInNewSpace(object, scratch, &ok);
701 stop("Remembered set pointer is in new space");
702 bind(&ok);
703 }
704 // Load store buffer top.
705 ExternalReference store_buffer =
706 ExternalReference::store_buffer_top(isolate());
707 mov(ip, Operand(store_buffer));
708 ldr(scratch, MemOperand(ip));
709 // Store pointer to buffer and increment buffer top.
710 str(address, MemOperand(scratch, kPointerSize, PostIndex));
711 // Write back new top of buffer.
712 str(scratch, MemOperand(ip));
713 // Call stub on end of buffer.
714 // Check for end of buffer.
715 tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
716 if (and_then == kFallThroughAtEnd) {
717 b(ne, &done);
718 } else {
719 DCHECK(and_then == kReturnAtEnd);
720 Ret(ne);
721 }
722 push(lr);
723 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
724 CallStub(&store_buffer_overflow);
725 pop(lr);
726 bind(&done);
727 if (and_then == kReturnAtEnd) {
728 Ret();
729 }
730 }
731
PushCommonFrame(Register marker_reg)732 void MacroAssembler::PushCommonFrame(Register marker_reg) {
733 if (marker_reg.is_valid()) {
734 if (FLAG_enable_embedded_constant_pool) {
735 if (marker_reg.code() > pp.code()) {
736 stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
737 add(fp, sp, Operand(kPointerSize));
738 Push(marker_reg);
739 } else {
740 stm(db_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
741 add(fp, sp, Operand(2 * kPointerSize));
742 }
743 } else {
744 if (marker_reg.code() > fp.code()) {
745 stm(db_w, sp, fp.bit() | lr.bit());
746 mov(fp, Operand(sp));
747 Push(marker_reg);
748 } else {
749 stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
750 add(fp, sp, Operand(kPointerSize));
751 }
752 }
753 } else {
754 stm(db_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
755 fp.bit() | lr.bit());
756 add(fp, sp, Operand(FLAG_enable_embedded_constant_pool ? kPointerSize : 0));
757 }
758 }
759
PopCommonFrame(Register marker_reg)760 void MacroAssembler::PopCommonFrame(Register marker_reg) {
761 if (marker_reg.is_valid()) {
762 if (FLAG_enable_embedded_constant_pool) {
763 if (marker_reg.code() > pp.code()) {
764 pop(marker_reg);
765 ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
766 } else {
767 ldm(ia_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
768 }
769 } else {
770 if (marker_reg.code() > fp.code()) {
771 pop(marker_reg);
772 ldm(ia_w, sp, fp.bit() | lr.bit());
773 } else {
774 ldm(ia_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
775 }
776 }
777 } else {
778 ldm(ia_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
779 fp.bit() | lr.bit());
780 }
781 }
782
PushStandardFrame(Register function_reg)783 void MacroAssembler::PushStandardFrame(Register function_reg) {
784 DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
785 stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
786 (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
787 fp.bit() | lr.bit());
788 int offset = -StandardFrameConstants::kContextOffset;
789 offset += function_reg.is_valid() ? kPointerSize : 0;
790 add(fp, sp, Operand(offset));
791 }
792
793
794 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()795 void MacroAssembler::PushSafepointRegisters() {
796 // Safepoints expect a block of contiguous register values starting with r0.
797 // except when FLAG_enable_embedded_constant_pool, which omits pp.
798 DCHECK(kSafepointSavedRegisters ==
799 (FLAG_enable_embedded_constant_pool
800 ? ((1 << (kNumSafepointSavedRegisters + 1)) - 1) & ~pp.bit()
801 : (1 << kNumSafepointSavedRegisters) - 1));
802 // Safepoints expect a block of kNumSafepointRegisters values on the
803 // stack, so adjust the stack for unsaved registers.
804 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
805 DCHECK(num_unsaved >= 0);
806 sub(sp, sp, Operand(num_unsaved * kPointerSize));
807 stm(db_w, sp, kSafepointSavedRegisters);
808 }
809
810
PopSafepointRegisters()811 void MacroAssembler::PopSafepointRegisters() {
812 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
813 ldm(ia_w, sp, kSafepointSavedRegisters);
814 add(sp, sp, Operand(num_unsaved * kPointerSize));
815 }
816
817
StoreToSafepointRegisterSlot(Register src,Register dst)818 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
819 str(src, SafepointRegisterSlot(dst));
820 }
821
822
LoadFromSafepointRegisterSlot(Register dst,Register src)823 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
824 ldr(dst, SafepointRegisterSlot(src));
825 }
826
827
SafepointRegisterStackIndex(int reg_code)828 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
829 // The registers are pushed starting with the highest encoding,
830 // which means that lowest encodings are closest to the stack pointer.
831 if (FLAG_enable_embedded_constant_pool && reg_code > pp.code()) {
832 // RegList omits pp.
833 reg_code -= 1;
834 }
835 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
836 return reg_code;
837 }
838
839
SafepointRegisterSlot(Register reg)840 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
841 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
842 }
843
844
SafepointRegistersAndDoublesSlot(Register reg)845 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
846 // Number of d-regs not known at snapshot time.
847 DCHECK(!serializer_enabled());
848 // General purpose registers are pushed last on the stack.
849 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
850 int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
851 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
852 return MemOperand(sp, doubles_size + register_offset);
853 }
854
855
Ldrd(Register dst1,Register dst2,const MemOperand & src,Condition cond)856 void MacroAssembler::Ldrd(Register dst1, Register dst2,
857 const MemOperand& src, Condition cond) {
858 DCHECK(src.rm().is(no_reg));
859 DCHECK(!dst1.is(lr)); // r14.
860
861 // V8 does not use this addressing mode, so the fallback code
862 // below doesn't support it yet.
863 DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
864
865 // Generate two ldr instructions if ldrd is not applicable.
866 if ((dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
867 ldrd(dst1, dst2, src, cond);
868 } else {
869 if ((src.am() == Offset) || (src.am() == NegOffset)) {
870 MemOperand src2(src);
871 src2.set_offset(src2.offset() + 4);
872 if (dst1.is(src.rn())) {
873 ldr(dst2, src2, cond);
874 ldr(dst1, src, cond);
875 } else {
876 ldr(dst1, src, cond);
877 ldr(dst2, src2, cond);
878 }
879 } else { // PostIndex or NegPostIndex.
880 DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex));
881 if (dst1.is(src.rn())) {
882 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
883 ldr(dst1, src, cond);
884 } else {
885 MemOperand src2(src);
886 src2.set_offset(src2.offset() - 4);
887 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
888 ldr(dst2, src2, cond);
889 }
890 }
891 }
892 }
893
894
Strd(Register src1,Register src2,const MemOperand & dst,Condition cond)895 void MacroAssembler::Strd(Register src1, Register src2,
896 const MemOperand& dst, Condition cond) {
897 DCHECK(dst.rm().is(no_reg));
898 DCHECK(!src1.is(lr)); // r14.
899
900 // V8 does not use this addressing mode, so the fallback code
901 // below doesn't support it yet.
902 DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
903
904 // Generate two str instructions if strd is not applicable.
905 if ((src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
906 strd(src1, src2, dst, cond);
907 } else {
908 MemOperand dst2(dst);
909 if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
910 dst2.set_offset(dst2.offset() + 4);
911 str(src1, dst, cond);
912 str(src2, dst2, cond);
913 } else { // PostIndex or NegPostIndex.
914 DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
915 dst2.set_offset(dst2.offset() - 4);
916 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
917 str(src2, dst2, cond);
918 }
919 }
920 }
921
VFPCanonicalizeNaN(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)922 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
923 const DwVfpRegister src,
924 const Condition cond) {
925 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
926 // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
927 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
928 vsub(dst, src, kDoubleRegZero, cond);
929 }
930
931
VFPCompareAndSetFlags(const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)932 void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
933 const SwVfpRegister src2,
934 const Condition cond) {
935 // Compare and move FPSCR flags to the normal condition flags.
936 VFPCompareAndLoadFlags(src1, src2, pc, cond);
937 }
938
VFPCompareAndSetFlags(const SwVfpRegister src1,const float src2,const Condition cond)939 void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
940 const float src2,
941 const Condition cond) {
942 // Compare and move FPSCR flags to the normal condition flags.
943 VFPCompareAndLoadFlags(src1, src2, pc, cond);
944 }
945
946
VFPCompareAndSetFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)947 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
948 const DwVfpRegister src2,
949 const Condition cond) {
950 // Compare and move FPSCR flags to the normal condition flags.
951 VFPCompareAndLoadFlags(src1, src2, pc, cond);
952 }
953
VFPCompareAndSetFlags(const DwVfpRegister src1,const double src2,const Condition cond)954 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
955 const double src2,
956 const Condition cond) {
957 // Compare and move FPSCR flags to the normal condition flags.
958 VFPCompareAndLoadFlags(src1, src2, pc, cond);
959 }
960
961
VFPCompareAndLoadFlags(const SwVfpRegister src1,const SwVfpRegister src2,const Register fpscr_flags,const Condition cond)962 void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
963 const SwVfpRegister src2,
964 const Register fpscr_flags,
965 const Condition cond) {
966 // Compare and load FPSCR.
967 vcmp(src1, src2, cond);
968 vmrs(fpscr_flags, cond);
969 }
970
VFPCompareAndLoadFlags(const SwVfpRegister src1,const float src2,const Register fpscr_flags,const Condition cond)971 void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
972 const float src2,
973 const Register fpscr_flags,
974 const Condition cond) {
975 // Compare and load FPSCR.
976 vcmp(src1, src2, cond);
977 vmrs(fpscr_flags, cond);
978 }
979
980
VFPCompareAndLoadFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Register fpscr_flags,const Condition cond)981 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
982 const DwVfpRegister src2,
983 const Register fpscr_flags,
984 const Condition cond) {
985 // Compare and load FPSCR.
986 vcmp(src1, src2, cond);
987 vmrs(fpscr_flags, cond);
988 }
989
VFPCompareAndLoadFlags(const DwVfpRegister src1,const double src2,const Register fpscr_flags,const Condition cond)990 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
991 const double src2,
992 const Register fpscr_flags,
993 const Condition cond) {
994 // Compare and load FPSCR.
995 vcmp(src1, src2, cond);
996 vmrs(fpscr_flags, cond);
997 }
998
999
Vmov(const DwVfpRegister dst,const double imm,const Register scratch)1000 void MacroAssembler::Vmov(const DwVfpRegister dst,
1001 const double imm,
1002 const Register scratch) {
1003 int64_t imm_bits = bit_cast<int64_t>(imm);
1004 // Handle special values first.
1005 if (imm_bits == bit_cast<int64_t>(0.0)) {
1006 vmov(dst, kDoubleRegZero);
1007 } else if (imm_bits == bit_cast<int64_t>(-0.0)) {
1008 vneg(dst, kDoubleRegZero);
1009 } else {
1010 vmov(dst, imm, scratch);
1011 }
1012 }
1013
1014
VmovHigh(Register dst,DwVfpRegister src)1015 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
1016 if (src.code() < 16) {
1017 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
1018 vmov(dst, loc.high());
1019 } else {
1020 vmov(dst, VmovIndexHi, src);
1021 }
1022 }
1023
1024
VmovHigh(DwVfpRegister dst,Register src)1025 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
1026 if (dst.code() < 16) {
1027 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
1028 vmov(loc.high(), src);
1029 } else {
1030 vmov(dst, VmovIndexHi, src);
1031 }
1032 }
1033
1034
VmovLow(Register dst,DwVfpRegister src)1035 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
1036 if (src.code() < 16) {
1037 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
1038 vmov(dst, loc.low());
1039 } else {
1040 vmov(dst, VmovIndexLo, src);
1041 }
1042 }
1043
1044
VmovLow(DwVfpRegister dst,Register src)1045 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
1046 if (dst.code() < 16) {
1047 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
1048 vmov(loc.low(), src);
1049 } else {
1050 vmov(dst, VmovIndexLo, src);
1051 }
1052 }
1053
VmovExtended(Register dst,int src_code)1054 void MacroAssembler::VmovExtended(Register dst, int src_code) {
1055 DCHECK_LE(32, src_code);
1056 DCHECK_GT(64, src_code);
1057 if (src_code & 0x1) {
1058 VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
1059 } else {
1060 VmovLow(dst, DwVfpRegister::from_code(src_code / 2));
1061 }
1062 }
1063
VmovExtended(int dst_code,Register src)1064 void MacroAssembler::VmovExtended(int dst_code, Register src) {
1065 DCHECK_LE(32, dst_code);
1066 DCHECK_GT(64, dst_code);
1067 if (dst_code & 0x1) {
1068 VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
1069 } else {
1070 VmovLow(DwVfpRegister::from_code(dst_code / 2), src);
1071 }
1072 }
1073
VmovExtended(int dst_code,int src_code,Register scratch)1074 void MacroAssembler::VmovExtended(int dst_code, int src_code,
1075 Register scratch) {
1076 if (src_code < 32 && dst_code < 32) {
1077 // src and dst are both s-registers.
1078 vmov(SwVfpRegister::from_code(dst_code),
1079 SwVfpRegister::from_code(src_code));
1080 } else if (src_code < 32) {
1081 // src is an s-register.
1082 vmov(scratch, SwVfpRegister::from_code(src_code));
1083 VmovExtended(dst_code, scratch);
1084 } else if (dst_code < 32) {
1085 // dst is an s-register.
1086 VmovExtended(scratch, src_code);
1087 vmov(SwVfpRegister::from_code(dst_code), scratch);
1088 } else {
1089 // Neither src or dst are s-registers.
1090 DCHECK_GT(64, src_code);
1091 DCHECK_GT(64, dst_code);
1092 VmovExtended(scratch, src_code);
1093 VmovExtended(dst_code, scratch);
1094 }
1095 }
1096
VmovExtended(int dst_code,const MemOperand & src,Register scratch)1097 void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src,
1098 Register scratch) {
1099 if (dst_code >= 32) {
1100 ldr(scratch, src);
1101 VmovExtended(dst_code, scratch);
1102 } else {
1103 vldr(SwVfpRegister::from_code(dst_code), src);
1104 }
1105 }
1106
VmovExtended(const MemOperand & dst,int src_code,Register scratch)1107 void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
1108 Register scratch) {
1109 if (src_code >= 32) {
1110 VmovExtended(scratch, src_code);
1111 str(scratch, dst);
1112 } else {
1113 vstr(SwVfpRegister::from_code(src_code), dst);
1114 }
1115 }
1116
LslPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1117 void MacroAssembler::LslPair(Register dst_low, Register dst_high,
1118 Register src_low, Register src_high,
1119 Register scratch, Register shift) {
1120 DCHECK(!AreAliased(dst_high, src_low));
1121 DCHECK(!AreAliased(dst_high, shift));
1122
1123 Label less_than_32;
1124 Label done;
1125 rsb(scratch, shift, Operand(32), SetCC);
1126 b(gt, &less_than_32);
1127 // If shift >= 32
1128 and_(scratch, shift, Operand(0x1f));
1129 lsl(dst_high, src_low, Operand(scratch));
1130 mov(dst_low, Operand(0));
1131 jmp(&done);
1132 bind(&less_than_32);
1133 // If shift < 32
1134 lsl(dst_high, src_high, Operand(shift));
1135 orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
1136 lsl(dst_low, src_low, Operand(shift));
1137 bind(&done);
1138 }
1139
LslPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1140 void MacroAssembler::LslPair(Register dst_low, Register dst_high,
1141 Register src_low, Register src_high,
1142 uint32_t shift) {
1143 DCHECK(!AreAliased(dst_high, src_low));
1144 Label less_than_32;
1145 Label done;
1146 if (shift == 0) {
1147 Move(dst_high, src_high);
1148 Move(dst_low, src_low);
1149 } else if (shift == 32) {
1150 Move(dst_high, src_low);
1151 Move(dst_low, Operand(0));
1152 } else if (shift >= 32) {
1153 shift &= 0x1f;
1154 lsl(dst_high, src_low, Operand(shift));
1155 mov(dst_low, Operand(0));
1156 } else {
1157 lsl(dst_high, src_high, Operand(shift));
1158 orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
1159 lsl(dst_low, src_low, Operand(shift));
1160 }
1161 }
1162
LsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1163 void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
1164 Register src_low, Register src_high,
1165 Register scratch, Register shift) {
1166 DCHECK(!AreAliased(dst_low, src_high));
1167 DCHECK(!AreAliased(dst_low, shift));
1168
1169 Label less_than_32;
1170 Label done;
1171 rsb(scratch, shift, Operand(32), SetCC);
1172 b(gt, &less_than_32);
1173 // If shift >= 32
1174 and_(scratch, shift, Operand(0x1f));
1175 lsr(dst_low, src_high, Operand(scratch));
1176 mov(dst_high, Operand(0));
1177 jmp(&done);
1178 bind(&less_than_32);
1179 // If shift < 32
1180
1181 lsr(dst_low, src_low, Operand(shift));
1182 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1183 lsr(dst_high, src_high, Operand(shift));
1184 bind(&done);
1185 }
1186
LsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1187 void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
1188 Register src_low, Register src_high,
1189 uint32_t shift) {
1190 DCHECK(!AreAliased(dst_low, src_high));
1191 Label less_than_32;
1192 Label done;
1193 if (shift == 32) {
1194 mov(dst_low, src_high);
1195 mov(dst_high, Operand(0));
1196 } else if (shift > 32) {
1197 shift &= 0x1f;
1198 lsr(dst_low, src_high, Operand(shift));
1199 mov(dst_high, Operand(0));
1200 } else if (shift == 0) {
1201 Move(dst_low, src_low);
1202 Move(dst_high, src_high);
1203 } else {
1204 lsr(dst_low, src_low, Operand(shift));
1205 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1206 lsr(dst_high, src_high, Operand(shift));
1207 }
1208 }
1209
AsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1210 void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
1211 Register src_low, Register src_high,
1212 Register scratch, Register shift) {
1213 DCHECK(!AreAliased(dst_low, src_high));
1214 DCHECK(!AreAliased(dst_low, shift));
1215
1216 Label less_than_32;
1217 Label done;
1218 rsb(scratch, shift, Operand(32), SetCC);
1219 b(gt, &less_than_32);
1220 // If shift >= 32
1221 and_(scratch, shift, Operand(0x1f));
1222 asr(dst_low, src_high, Operand(scratch));
1223 asr(dst_high, src_high, Operand(31));
1224 jmp(&done);
1225 bind(&less_than_32);
1226 // If shift < 32
1227 lsr(dst_low, src_low, Operand(shift));
1228 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1229 asr(dst_high, src_high, Operand(shift));
1230 bind(&done);
1231 }
1232
AsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1233 void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
1234 Register src_low, Register src_high,
1235 uint32_t shift) {
1236 DCHECK(!AreAliased(dst_low, src_high));
1237 Label less_than_32;
1238 Label done;
1239 if (shift == 32) {
1240 mov(dst_low, src_high);
1241 asr(dst_high, src_high, Operand(31));
1242 } else if (shift > 32) {
1243 shift &= 0x1f;
1244 asr(dst_low, src_high, Operand(shift));
1245 asr(dst_high, src_high, Operand(31));
1246 } else if (shift == 0) {
1247 Move(dst_low, src_low);
1248 Move(dst_high, src_high);
1249 } else {
1250 lsr(dst_low, src_low, Operand(shift));
1251 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1252 asr(dst_high, src_high, Operand(shift));
1253 }
1254 }
1255
LoadConstantPoolPointerRegisterFromCodeTargetAddress(Register code_target_address)1256 void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
1257 Register code_target_address) {
1258 DCHECK(FLAG_enable_embedded_constant_pool);
1259 ldr(pp, MemOperand(code_target_address,
1260 Code::kConstantPoolOffset - Code::kHeaderSize));
1261 add(pp, pp, code_target_address);
1262 }
1263
1264
LoadConstantPoolPointerRegister()1265 void MacroAssembler::LoadConstantPoolPointerRegister() {
1266 DCHECK(FLAG_enable_embedded_constant_pool);
1267 int entry_offset = pc_offset() + Instruction::kPCReadOffset;
1268 sub(ip, pc, Operand(entry_offset));
1269 LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
1270 }
1271
StubPrologue(StackFrame::Type type)1272 void MacroAssembler::StubPrologue(StackFrame::Type type) {
1273 mov(ip, Operand(Smi::FromInt(type)));
1274 PushCommonFrame(ip);
1275 if (FLAG_enable_embedded_constant_pool) {
1276 LoadConstantPoolPointerRegister();
1277 set_constant_pool_available(true);
1278 }
1279 }
1280
Prologue(bool code_pre_aging)1281 void MacroAssembler::Prologue(bool code_pre_aging) {
1282 { PredictableCodeSizeScope predictible_code_size_scope(
1283 this, kNoCodeAgeSequenceLength);
1284 // The following three instructions must remain together and unmodified
1285 // for code aging to work properly.
1286 if (code_pre_aging) {
1287 // Pre-age the code.
1288 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
1289 add(r0, pc, Operand(-8));
1290 ldr(pc, MemOperand(pc, -4));
1291 emit_code_stub_address(stub);
1292 } else {
1293 PushStandardFrame(r1);
1294 nop(ip.code());
1295 }
1296 }
1297 if (FLAG_enable_embedded_constant_pool) {
1298 LoadConstantPoolPointerRegister();
1299 set_constant_pool_available(true);
1300 }
1301 }
1302
1303
EmitLoadTypeFeedbackVector(Register vector)1304 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
1305 ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1306 ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
1307 ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
1308 }
1309
1310
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)1311 void MacroAssembler::EnterFrame(StackFrame::Type type,
1312 bool load_constant_pool_pointer_reg) {
1313 // r0-r3: preserved
1314 mov(ip, Operand(Smi::FromInt(type)));
1315 PushCommonFrame(ip);
1316 if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
1317 LoadConstantPoolPointerRegister();
1318 }
1319 if (type == StackFrame::INTERNAL) {
1320 mov(ip, Operand(CodeObject()));
1321 push(ip);
1322 }
1323 }
1324
1325
LeaveFrame(StackFrame::Type type)1326 int MacroAssembler::LeaveFrame(StackFrame::Type type) {
1327 // r0: preserved
1328 // r1: preserved
1329 // r2: preserved
1330
1331 // Drop the execution stack down to the frame pointer and restore
1332 // the caller frame pointer, return address and constant pool pointer
1333 // (if FLAG_enable_embedded_constant_pool).
1334 int frame_ends;
1335 if (FLAG_enable_embedded_constant_pool) {
1336 add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
1337 frame_ends = pc_offset();
1338 ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
1339 } else {
1340 mov(sp, fp);
1341 frame_ends = pc_offset();
1342 ldm(ia_w, sp, fp.bit() | lr.bit());
1343 }
1344 return frame_ends;
1345 }
1346
EnterBuiltinFrame(Register context,Register target,Register argc)1347 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
1348 Register argc) {
1349 Push(lr, fp, context, target);
1350 add(fp, sp, Operand(2 * kPointerSize));
1351 Push(argc);
1352 }
1353
LeaveBuiltinFrame(Register context,Register target,Register argc)1354 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
1355 Register argc) {
1356 Pop(argc);
1357 Pop(lr, fp, context, target);
1358 }
1359
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)1360 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1361 StackFrame::Type frame_type) {
1362 DCHECK(frame_type == StackFrame::EXIT ||
1363 frame_type == StackFrame::BUILTIN_EXIT);
1364
1365 // Set up the frame structure on the stack.
1366 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1367 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1368 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1369 mov(ip, Operand(Smi::FromInt(frame_type)));
1370 PushCommonFrame(ip);
1371 // Reserve room for saved entry sp and code object.
1372 sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1373 if (emit_debug_code()) {
1374 mov(ip, Operand::Zero());
1375 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1376 }
1377 if (FLAG_enable_embedded_constant_pool) {
1378 str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1379 }
1380 mov(ip, Operand(CodeObject()));
1381 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1382
1383 // Save the frame pointer and the context in top.
1384 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1385 str(fp, MemOperand(ip));
1386 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1387 str(cp, MemOperand(ip));
1388
1389 // Optionally save all double registers.
1390 if (save_doubles) {
1391 SaveFPRegs(sp, ip);
1392 // Note that d0 will be accessible at
1393 // fp - ExitFrameConstants::kFrameSize -
1394 // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
1395 // since the sp slot, code slot and constant pool slot (if
1396 // FLAG_enable_embedded_constant_pool) were pushed after the fp.
1397 }
1398
1399 // Reserve place for the return address and stack space and align the frame
1400 // preparing for calling the runtime function.
1401 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1402 sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1403 if (frame_alignment > 0) {
1404 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1405 and_(sp, sp, Operand(-frame_alignment));
1406 }
1407
1408 // Set the exit frame sp value to point just before the return address
1409 // location.
1410 add(ip, sp, Operand(kPointerSize));
1411 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1412 }
1413
1414
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)1415 void MacroAssembler::InitializeNewString(Register string,
1416 Register length,
1417 Heap::RootListIndex map_index,
1418 Register scratch1,
1419 Register scratch2) {
1420 SmiTag(scratch1, length);
1421 LoadRoot(scratch2, map_index);
1422 str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1423 mov(scratch1, Operand(String::kEmptyHashField));
1424 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1425 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
1426 }
1427
1428
ActivationFrameAlignment()1429 int MacroAssembler::ActivationFrameAlignment() {
1430 #if V8_HOST_ARCH_ARM
1431 // Running on the real platform. Use the alignment as mandated by the local
1432 // environment.
1433 // Note: This will break if we ever start generating snapshots on one ARM
1434 // platform for another ARM platform with a different alignment.
1435 return base::OS::ActivationFrameAlignment();
1436 #else // V8_HOST_ARCH_ARM
1437 // If we are using the simulator then we should always align to the expected
1438 // alignment. As the simulator is used to generate snapshots we do not know
1439 // if the target platform will need alignment, so this is controlled from a
1440 // flag.
1441 return FLAG_sim_stack_alignment;
1442 #endif // V8_HOST_ARCH_ARM
1443 }
1444
1445
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool argument_count_is_length)1446 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1447 bool restore_context,
1448 bool argument_count_is_length) {
1449 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1450
1451 // Optionally restore all double registers.
1452 if (save_doubles) {
1453 // Calculate the stack location of the saved doubles and restore them.
1454 const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
1455 sub(r3, fp,
1456 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1457 RestoreFPRegs(r3, ip);
1458 }
1459
1460 // Clear top frame.
1461 mov(r3, Operand::Zero());
1462 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1463 str(r3, MemOperand(ip));
1464
1465 // Restore current context from top and clear it in debug mode.
1466 if (restore_context) {
1467 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1468 ldr(cp, MemOperand(ip));
1469 }
1470 #ifdef DEBUG
1471 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1472 str(r3, MemOperand(ip));
1473 #endif
1474
1475 // Tear down the exit frame, pop the arguments, and return.
1476 if (FLAG_enable_embedded_constant_pool) {
1477 ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1478 }
1479 mov(sp, Operand(fp));
1480 ldm(ia_w, sp, fp.bit() | lr.bit());
1481 if (argument_count.is_valid()) {
1482 if (argument_count_is_length) {
1483 add(sp, sp, argument_count);
1484 } else {
1485 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1486 }
1487 }
1488 }
1489
1490
MovFromFloatResult(const DwVfpRegister dst)1491 void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1492 if (use_eabi_hardfloat()) {
1493 Move(dst, d0);
1494 } else {
1495 vmov(dst, r0, r1);
1496 }
1497 }
1498
1499
1500 // On ARM this is just a synonym to make the purpose clear.
MovFromFloatParameter(DwVfpRegister dst)1501 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1502 MovFromFloatResult(dst);
1503 }
1504
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)1505 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1506 Register caller_args_count_reg,
1507 Register scratch0, Register scratch1) {
1508 #if DEBUG
1509 if (callee_args_count.is_reg()) {
1510 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1511 scratch1));
1512 } else {
1513 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1514 }
1515 #endif
1516
1517 // Calculate the end of destination area where we will put the arguments
1518 // after we drop current frame. We add kPointerSize to count the receiver
1519 // argument which is not included into formal parameters count.
1520 Register dst_reg = scratch0;
1521 add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
1522 add(dst_reg, dst_reg,
1523 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1524
1525 Register src_reg = caller_args_count_reg;
1526 // Calculate the end of source area. +kPointerSize is for the receiver.
1527 if (callee_args_count.is_reg()) {
1528 add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
1529 add(src_reg, src_reg, Operand(kPointerSize));
1530 } else {
1531 add(src_reg, sp,
1532 Operand((callee_args_count.immediate() + 1) * kPointerSize));
1533 }
1534
1535 if (FLAG_debug_code) {
1536 cmp(src_reg, dst_reg);
1537 Check(lo, kStackAccessBelowStackPointer);
1538 }
1539
1540 // Restore caller's frame pointer and return address now as they will be
1541 // overwritten by the copying loop.
1542 ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1543 ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1544
1545 // Now copy callee arguments to the caller frame going backwards to avoid
1546 // callee arguments corruption (source and destination areas could overlap).
1547
1548 // Both src_reg and dst_reg are pointing to the word after the one to copy,
1549 // so they must be pre-decremented in the loop.
1550 Register tmp_reg = scratch1;
1551 Label loop, entry;
1552 b(&entry);
1553 bind(&loop);
1554 ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
1555 str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
1556 bind(&entry);
1557 cmp(sp, src_reg);
1558 b(ne, &loop);
1559
1560 // Leave current frame.
1561 mov(sp, dst_reg);
1562 }
1563
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)1564 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1565 const ParameterCount& actual,
1566 Label* done,
1567 bool* definitely_mismatches,
1568 InvokeFlag flag,
1569 const CallWrapper& call_wrapper) {
1570 bool definitely_matches = false;
1571 *definitely_mismatches = false;
1572 Label regular_invoke;
1573
1574 // Check whether the expected and actual arguments count match. If not,
1575 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1576 // r0: actual arguments count
1577 // r1: function (passed through to callee)
1578 // r2: expected arguments count
1579
1580 // The code below is made a lot easier because the calling code already sets
1581 // up actual and expected registers according to the contract if values are
1582 // passed in registers.
1583 DCHECK(actual.is_immediate() || actual.reg().is(r0));
1584 DCHECK(expected.is_immediate() || expected.reg().is(r2));
1585
1586 if (expected.is_immediate()) {
1587 DCHECK(actual.is_immediate());
1588 mov(r0, Operand(actual.immediate()));
1589 if (expected.immediate() == actual.immediate()) {
1590 definitely_matches = true;
1591 } else {
1592 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1593 if (expected.immediate() == sentinel) {
1594 // Don't worry about adapting arguments for builtins that
1595 // don't want that done. Skip adaption code by making it look
1596 // like we have a match between expected and actual number of
1597 // arguments.
1598 definitely_matches = true;
1599 } else {
1600 *definitely_mismatches = true;
1601 mov(r2, Operand(expected.immediate()));
1602 }
1603 }
1604 } else {
1605 if (actual.is_immediate()) {
1606 mov(r0, Operand(actual.immediate()));
1607 cmp(expected.reg(), Operand(actual.immediate()));
1608 b(eq, ®ular_invoke);
1609 } else {
1610 cmp(expected.reg(), Operand(actual.reg()));
1611 b(eq, ®ular_invoke);
1612 }
1613 }
1614
1615 if (!definitely_matches) {
1616 Handle<Code> adaptor =
1617 isolate()->builtins()->ArgumentsAdaptorTrampoline();
1618 if (flag == CALL_FUNCTION) {
1619 call_wrapper.BeforeCall(CallSize(adaptor));
1620 Call(adaptor);
1621 call_wrapper.AfterCall();
1622 if (!*definitely_mismatches) {
1623 b(done);
1624 }
1625 } else {
1626 Jump(adaptor, RelocInfo::CODE_TARGET);
1627 }
1628 bind(®ular_invoke);
1629 }
1630 }
1631
1632
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1633 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
1634 const ParameterCount& expected,
1635 const ParameterCount& actual) {
1636 Label skip_flooding;
1637 ExternalReference last_step_action =
1638 ExternalReference::debug_last_step_action_address(isolate());
1639 STATIC_ASSERT(StepFrame > StepIn);
1640 mov(r4, Operand(last_step_action));
1641 ldrsb(r4, MemOperand(r4));
1642 cmp(r4, Operand(StepIn));
1643 b(lt, &skip_flooding);
1644 {
1645 FrameScope frame(this,
1646 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1647 if (expected.is_reg()) {
1648 SmiTag(expected.reg());
1649 Push(expected.reg());
1650 }
1651 if (actual.is_reg()) {
1652 SmiTag(actual.reg());
1653 Push(actual.reg());
1654 }
1655 if (new_target.is_valid()) {
1656 Push(new_target);
1657 }
1658 Push(fun);
1659 Push(fun);
1660 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
1661 Pop(fun);
1662 if (new_target.is_valid()) {
1663 Pop(new_target);
1664 }
1665 if (actual.is_reg()) {
1666 Pop(actual.reg());
1667 SmiUntag(actual.reg());
1668 }
1669 if (expected.is_reg()) {
1670 Pop(expected.reg());
1671 SmiUntag(expected.reg());
1672 }
1673 }
1674 bind(&skip_flooding);
1675 }
1676
1677
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1678 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1679 const ParameterCount& expected,
1680 const ParameterCount& actual,
1681 InvokeFlag flag,
1682 const CallWrapper& call_wrapper) {
1683 // You can't call a function without a valid frame.
1684 DCHECK(flag == JUMP_FUNCTION || has_frame());
1685 DCHECK(function.is(r1));
1686 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
1687
1688 if (call_wrapper.NeedsDebugStepCheck()) {
1689 FloodFunctionIfStepping(function, new_target, expected, actual);
1690 }
1691
1692 // Clear the new.target register if not given.
1693 if (!new_target.is_valid()) {
1694 LoadRoot(r3, Heap::kUndefinedValueRootIndex);
1695 }
1696
1697 Label done;
1698 bool definitely_mismatches = false;
1699 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1700 call_wrapper);
1701 if (!definitely_mismatches) {
1702 // We call indirectly through the code field in the function to
1703 // allow recompilation to take effect without changing any of the
1704 // call sites.
1705 Register code = r4;
1706 ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1707 if (flag == CALL_FUNCTION) {
1708 call_wrapper.BeforeCall(CallSize(code));
1709 Call(code);
1710 call_wrapper.AfterCall();
1711 } else {
1712 DCHECK(flag == JUMP_FUNCTION);
1713 Jump(code);
1714 }
1715
1716 // Continue here if InvokePrologue does handle the invocation due to
1717 // mismatched parameter counts.
1718 bind(&done);
1719 }
1720 }
1721
1722
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1723 void MacroAssembler::InvokeFunction(Register fun,
1724 Register new_target,
1725 const ParameterCount& actual,
1726 InvokeFlag flag,
1727 const CallWrapper& call_wrapper) {
1728 // You can't call a function without a valid frame.
1729 DCHECK(flag == JUMP_FUNCTION || has_frame());
1730
1731 // Contract with called JS functions requires that function is passed in r1.
1732 DCHECK(fun.is(r1));
1733
1734 Register expected_reg = r2;
1735 Register temp_reg = r4;
1736
1737 ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1738 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1739 ldr(expected_reg,
1740 FieldMemOperand(temp_reg,
1741 SharedFunctionInfo::kFormalParameterCountOffset));
1742 SmiUntag(expected_reg);
1743
1744 ParameterCount expected(expected_reg);
1745 InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
1746 }
1747
1748
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1749 void MacroAssembler::InvokeFunction(Register function,
1750 const ParameterCount& expected,
1751 const ParameterCount& actual,
1752 InvokeFlag flag,
1753 const CallWrapper& call_wrapper) {
1754 // You can't call a function without a valid frame.
1755 DCHECK(flag == JUMP_FUNCTION || has_frame());
1756
1757 // Contract with called JS functions requires that function is passed in r1.
1758 DCHECK(function.is(r1));
1759
1760 // Get the function and setup the context.
1761 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1762
1763 InvokeFunctionCode(r1, no_reg, expected, actual, flag, call_wrapper);
1764 }
1765
1766
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1767 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1768 const ParameterCount& expected,
1769 const ParameterCount& actual,
1770 InvokeFlag flag,
1771 const CallWrapper& call_wrapper) {
1772 Move(r1, function);
1773 InvokeFunction(r1, expected, actual, flag, call_wrapper);
1774 }
1775
1776
IsObjectJSStringType(Register object,Register scratch,Label * fail)1777 void MacroAssembler::IsObjectJSStringType(Register object,
1778 Register scratch,
1779 Label* fail) {
1780 DCHECK(kNotStringTag != 0);
1781
1782 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1783 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1784 tst(scratch, Operand(kIsNotStringMask));
1785 b(ne, fail);
1786 }
1787
1788
IsObjectNameType(Register object,Register scratch,Label * fail)1789 void MacroAssembler::IsObjectNameType(Register object,
1790 Register scratch,
1791 Label* fail) {
1792 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1793 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1794 cmp(scratch, Operand(LAST_NAME_TYPE));
1795 b(hi, fail);
1796 }
1797
1798
DebugBreak()1799 void MacroAssembler::DebugBreak() {
1800 mov(r0, Operand::Zero());
1801 mov(r1,
1802 Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
1803 CEntryStub ces(isolate(), 1);
1804 DCHECK(AllowThisStubCall(&ces));
1805 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
1806 }
1807
1808
PushStackHandler()1809 void MacroAssembler::PushStackHandler() {
1810 // Adjust this code if not the case.
1811 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1812 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1813
1814 // Link the current handler as the next handler.
1815 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1816 ldr(r5, MemOperand(r6));
1817 push(r5);
1818
1819 // Set this new handler as the current one.
1820 str(sp, MemOperand(r6));
1821 }
1822
1823
PopStackHandler()1824 void MacroAssembler::PopStackHandler() {
1825 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1826 pop(r1);
1827 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1828 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1829 str(r1, MemOperand(ip));
1830 }
1831
1832
1833 // Compute the hash code from the untagged key. This must be kept in sync with
1834 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1835 // code-stub-hydrogen.cc
GetNumberHash(Register t0,Register scratch)1836 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1837 // First of all we assign the hash seed to scratch.
1838 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1839 SmiUntag(scratch);
1840
1841 // Xor original key with a seed.
1842 eor(t0, t0, Operand(scratch));
1843
1844 // Compute the hash code from the untagged key. This must be kept in sync
1845 // with ComputeIntegerHash in utils.h.
1846 //
1847 // hash = ~hash + (hash << 15);
1848 mvn(scratch, Operand(t0));
1849 add(t0, scratch, Operand(t0, LSL, 15));
1850 // hash = hash ^ (hash >> 12);
1851 eor(t0, t0, Operand(t0, LSR, 12));
1852 // hash = hash + (hash << 2);
1853 add(t0, t0, Operand(t0, LSL, 2));
1854 // hash = hash ^ (hash >> 4);
1855 eor(t0, t0, Operand(t0, LSR, 4));
1856 // hash = hash * 2057;
1857 mov(scratch, Operand(t0, LSL, 11));
1858 add(t0, t0, Operand(t0, LSL, 3));
1859 add(t0, t0, scratch);
1860 // hash = hash ^ (hash >> 16);
1861 eor(t0, t0, Operand(t0, LSR, 16));
1862 bic(t0, t0, Operand(0xc0000000u));
1863 }
1864
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1865 void MacroAssembler::Allocate(int object_size,
1866 Register result,
1867 Register scratch1,
1868 Register scratch2,
1869 Label* gc_required,
1870 AllocationFlags flags) {
1871 DCHECK(object_size <= kMaxRegularHeapObjectSize);
1872 DCHECK((flags & ALLOCATION_FOLDED) == 0);
1873 if (!FLAG_inline_new) {
1874 if (emit_debug_code()) {
1875 // Trash the registers to simulate an allocation failure.
1876 mov(result, Operand(0x7091));
1877 mov(scratch1, Operand(0x7191));
1878 mov(scratch2, Operand(0x7291));
1879 }
1880 jmp(gc_required);
1881 return;
1882 }
1883
1884 DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1885
1886 // Make object size into bytes.
1887 if ((flags & SIZE_IN_WORDS) != 0) {
1888 object_size *= kPointerSize;
1889 }
1890 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
1891
1892 // Check relative positions of allocation top and limit addresses.
1893 // The values must be adjacent in memory to allow the use of LDM.
1894 // Also, assert that the registers are numbered such that the values
1895 // are loaded in the correct order.
1896 ExternalReference allocation_top =
1897 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1898 ExternalReference allocation_limit =
1899 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1900
1901 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1902 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1903 DCHECK((limit - top) == kPointerSize);
1904 DCHECK(result.code() < ip.code());
1905
1906 // Set up allocation top address register.
1907 Register top_address = scratch1;
1908 // This code stores a temporary value in ip. This is OK, as the code below
1909 // does not need ip for implicit literal generation.
1910 Register alloc_limit = ip;
1911 Register result_end = scratch2;
1912 mov(top_address, Operand(allocation_top));
1913
1914 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1915 // Load allocation top into result and allocation limit into alloc_limit.
1916 ldm(ia, top_address, result.bit() | alloc_limit.bit());
1917 } else {
1918 if (emit_debug_code()) {
1919 // Assert that result actually contains top on entry.
1920 ldr(alloc_limit, MemOperand(top_address));
1921 cmp(result, alloc_limit);
1922 Check(eq, kUnexpectedAllocationTop);
1923 }
1924 // Load allocation limit. Result already contains allocation top.
1925 ldr(alloc_limit, MemOperand(top_address, limit - top));
1926 }
1927
1928 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1929 // Align the next allocation. Storing the filler map without checking top is
1930 // safe in new-space because the limit of the heap is aligned there.
1931 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1932 and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
1933 Label aligned;
1934 b(eq, &aligned);
1935 if ((flags & PRETENURE) != 0) {
1936 cmp(result, Operand(alloc_limit));
1937 b(hs, gc_required);
1938 }
1939 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1940 str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
1941 bind(&aligned);
1942 }
1943
1944 // Calculate new top and bail out if new space is exhausted. Use result
1945 // to calculate the new top. We must preserve the ip register at this
1946 // point, so we cannot just use add().
1947 DCHECK(object_size > 0);
1948 Register source = result;
1949 int shift = 0;
1950 while (object_size != 0) {
1951 if (((object_size >> shift) & 0x03) == 0) {
1952 shift += 2;
1953 } else {
1954 int bits = object_size & (0xff << shift);
1955 object_size -= bits;
1956 shift += 8;
1957 Operand bits_operand(bits);
1958 DCHECK(bits_operand.instructions_required(this) == 1);
1959 add(result_end, source, bits_operand);
1960 source = result_end;
1961 }
1962 }
1963
1964 cmp(result_end, Operand(alloc_limit));
1965 b(hi, gc_required);
1966
1967 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1968 // The top pointer is not updated for allocation folding dominators.
1969 str(result_end, MemOperand(top_address));
1970 }
1971
1972 // Tag object.
1973 add(result, result, Operand(kHeapObjectTag));
1974 }
1975
1976
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)1977 void MacroAssembler::Allocate(Register object_size, Register result,
1978 Register result_end, Register scratch,
1979 Label* gc_required, AllocationFlags flags) {
1980 DCHECK((flags & ALLOCATION_FOLDED) == 0);
1981 if (!FLAG_inline_new) {
1982 if (emit_debug_code()) {
1983 // Trash the registers to simulate an allocation failure.
1984 mov(result, Operand(0x7091));
1985 mov(scratch, Operand(0x7191));
1986 mov(result_end, Operand(0x7291));
1987 }
1988 jmp(gc_required);
1989 return;
1990 }
1991
1992 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1993 // is not specified. Other registers must not overlap.
1994 DCHECK(!AreAliased(object_size, result, scratch, ip));
1995 DCHECK(!AreAliased(result_end, result, scratch, ip));
1996 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1997
1998 // Check relative positions of allocation top and limit addresses.
1999 // The values must be adjacent in memory to allow the use of LDM.
2000 // Also, assert that the registers are numbered such that the values
2001 // are loaded in the correct order.
2002 ExternalReference allocation_top =
2003 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2004 ExternalReference allocation_limit =
2005 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2006 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
2007 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
2008 DCHECK((limit - top) == kPointerSize);
2009 DCHECK(result.code() < ip.code());
2010
2011 // Set up allocation top address and allocation limit registers.
2012 Register top_address = scratch;
2013 // This code stores a temporary value in ip. This is OK, as the code below
2014 // does not need ip for implicit literal generation.
2015 Register alloc_limit = ip;
2016 mov(top_address, Operand(allocation_top));
2017
2018 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2019 // Load allocation top into result and allocation limit into alloc_limit.
2020 ldm(ia, top_address, result.bit() | alloc_limit.bit());
2021 } else {
2022 if (emit_debug_code()) {
2023 // Assert that result actually contains top on entry.
2024 ldr(alloc_limit, MemOperand(top_address));
2025 cmp(result, alloc_limit);
2026 Check(eq, kUnexpectedAllocationTop);
2027 }
2028 // Load allocation limit. Result already contains allocation top.
2029 ldr(alloc_limit, MemOperand(top_address, limit - top));
2030 }
2031
2032 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2033 // Align the next allocation. Storing the filler map without checking top is
2034 // safe in new-space because the limit of the heap is aligned there.
2035 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
2036 and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
2037 Label aligned;
2038 b(eq, &aligned);
2039 if ((flags & PRETENURE) != 0) {
2040 cmp(result, Operand(alloc_limit));
2041 b(hs, gc_required);
2042 }
2043 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
2044 str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
2045 bind(&aligned);
2046 }
2047
2048 // Calculate new top and bail out if new space is exhausted. Use result
2049 // to calculate the new top. Object size may be in words so a shift is
2050 // required to get the number of bytes.
2051 if ((flags & SIZE_IN_WORDS) != 0) {
2052 add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
2053 } else {
2054 add(result_end, result, Operand(object_size), SetCC);
2055 }
2056
2057 cmp(result_end, Operand(alloc_limit));
2058 b(hi, gc_required);
2059
2060 // Update allocation top. result temporarily holds the new top.
2061 if (emit_debug_code()) {
2062 tst(result_end, Operand(kObjectAlignmentMask));
2063 Check(eq, kUnalignedAllocationInNewSpace);
2064 }
2065 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
2066 // The top pointer is not updated for allocation folding dominators.
2067 str(result_end, MemOperand(top_address));
2068 }
2069
2070 // Tag object.
2071 add(result, result, Operand(kHeapObjectTag));
2072 }
2073
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)2074 void MacroAssembler::FastAllocate(Register object_size, Register result,
2075 Register result_end, Register scratch,
2076 AllocationFlags flags) {
2077 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
2078 // is not specified. Other registers must not overlap.
2079 DCHECK(!AreAliased(object_size, result, scratch, ip));
2080 DCHECK(!AreAliased(result_end, result, scratch, ip));
2081 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
2082
2083 ExternalReference allocation_top =
2084 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2085
2086 Register top_address = scratch;
2087 mov(top_address, Operand(allocation_top));
2088 ldr(result, MemOperand(top_address));
2089
2090 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2091 // Align the next allocation. Storing the filler map without checking top is
2092 // safe in new-space because the limit of the heap is aligned there.
2093 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
2094 and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
2095 Label aligned;
2096 b(eq, &aligned);
2097 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
2098 str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
2099 bind(&aligned);
2100 }
2101
2102 // Calculate new top using result. Object size may be in words so a shift is
2103 // required to get the number of bytes.
2104 if ((flags & SIZE_IN_WORDS) != 0) {
2105 add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
2106 } else {
2107 add(result_end, result, Operand(object_size), SetCC);
2108 }
2109
2110 // Update allocation top. result temporarily holds the new top.
2111 if (emit_debug_code()) {
2112 tst(result_end, Operand(kObjectAlignmentMask));
2113 Check(eq, kUnalignedAllocationInNewSpace);
2114 }
2115 // The top pointer is not updated for allocation folding dominators.
2116 str(result_end, MemOperand(top_address));
2117
2118 add(result, result, Operand(kHeapObjectTag));
2119 }
2120
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)2121 void MacroAssembler::FastAllocate(int object_size, Register result,
2122 Register scratch1, Register scratch2,
2123 AllocationFlags flags) {
2124 DCHECK(object_size <= kMaxRegularHeapObjectSize);
2125 DCHECK(!AreAliased(result, scratch1, scratch2, ip));
2126
2127 // Make object size into bytes.
2128 if ((flags & SIZE_IN_WORDS) != 0) {
2129 object_size *= kPointerSize;
2130 }
2131 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
2132
2133 ExternalReference allocation_top =
2134 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2135
2136 // Set up allocation top address register.
2137 Register top_address = scratch1;
2138 Register result_end = scratch2;
2139 mov(top_address, Operand(allocation_top));
2140 ldr(result, MemOperand(top_address));
2141
2142 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2143 // Align the next allocation. Storing the filler map without checking top is
2144 // safe in new-space because the limit of the heap is aligned there.
2145 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2146 and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
2147 Label aligned;
2148 b(eq, &aligned);
2149 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
2150 str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
2151 bind(&aligned);
2152 }
2153
2154 // Calculate new top using result. Object size may be in words so a shift is
2155 // required to get the number of bytes. We must preserve the ip register at
2156 // this point, so we cannot just use add().
2157 DCHECK(object_size > 0);
2158 Register source = result;
2159 int shift = 0;
2160 while (object_size != 0) {
2161 if (((object_size >> shift) & 0x03) == 0) {
2162 shift += 2;
2163 } else {
2164 int bits = object_size & (0xff << shift);
2165 object_size -= bits;
2166 shift += 8;
2167 Operand bits_operand(bits);
2168 DCHECK(bits_operand.instructions_required(this) == 1);
2169 add(result_end, source, bits_operand);
2170 source = result_end;
2171 }
2172 }
2173
2174 // The top pointer is not updated for allocation folding dominators.
2175 str(result_end, MemOperand(top_address));
2176
2177 add(result, result, Operand(kHeapObjectTag));
2178 }
2179
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)2180 void MacroAssembler::AllocateTwoByteString(Register result,
2181 Register length,
2182 Register scratch1,
2183 Register scratch2,
2184 Register scratch3,
2185 Label* gc_required) {
2186 // Calculate the number of bytes needed for the characters in the string while
2187 // observing object alignment.
2188 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2189 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
2190 add(scratch1, scratch1,
2191 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
2192 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2193
2194 // Allocate two-byte string in new space.
2195 Allocate(scratch1, result, scratch2, scratch3, gc_required,
2196 NO_ALLOCATION_FLAGS);
2197
2198 // Set the map, length and hash field.
2199 InitializeNewString(result,
2200 length,
2201 Heap::kStringMapRootIndex,
2202 scratch1,
2203 scratch2);
2204 }
2205
2206
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)2207 void MacroAssembler::AllocateOneByteString(Register result, Register length,
2208 Register scratch1, Register scratch2,
2209 Register scratch3,
2210 Label* gc_required) {
2211 // Calculate the number of bytes needed for the characters in the string while
2212 // observing object alignment.
2213 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2214 DCHECK(kCharSize == 1);
2215 add(scratch1, length,
2216 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
2217 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2218
2219 // Allocate one-byte string in new space.
2220 Allocate(scratch1, result, scratch2, scratch3, gc_required,
2221 NO_ALLOCATION_FLAGS);
2222
2223 // Set the map, length and hash field.
2224 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
2225 scratch1, scratch2);
2226 }
2227
2228
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2229 void MacroAssembler::AllocateTwoByteConsString(Register result,
2230 Register length,
2231 Register scratch1,
2232 Register scratch2,
2233 Label* gc_required) {
2234 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
2235 NO_ALLOCATION_FLAGS);
2236
2237 InitializeNewString(result,
2238 length,
2239 Heap::kConsStringMapRootIndex,
2240 scratch1,
2241 scratch2);
2242 }
2243
2244
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2245 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
2246 Register scratch1,
2247 Register scratch2,
2248 Label* gc_required) {
2249 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
2250 NO_ALLOCATION_FLAGS);
2251
2252 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
2253 scratch1, scratch2);
2254 }
2255
2256
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2257 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2258 Register length,
2259 Register scratch1,
2260 Register scratch2,
2261 Label* gc_required) {
2262 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2263 NO_ALLOCATION_FLAGS);
2264
2265 InitializeNewString(result,
2266 length,
2267 Heap::kSlicedStringMapRootIndex,
2268 scratch1,
2269 scratch2);
2270 }
2271
2272
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2273 void MacroAssembler::AllocateOneByteSlicedString(Register result,
2274 Register length,
2275 Register scratch1,
2276 Register scratch2,
2277 Label* gc_required) {
2278 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2279 NO_ALLOCATION_FLAGS);
2280
2281 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
2282 scratch1, scratch2);
2283 }
2284
2285
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)2286 void MacroAssembler::CompareObjectType(Register object,
2287 Register map,
2288 Register type_reg,
2289 InstanceType type) {
2290 const Register temp = type_reg.is(no_reg) ? ip : type_reg;
2291
2292 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2293 CompareInstanceType(map, temp, type);
2294 }
2295
2296
CompareInstanceType(Register map,Register type_reg,InstanceType type)2297 void MacroAssembler::CompareInstanceType(Register map,
2298 Register type_reg,
2299 InstanceType type) {
2300 // Registers map and type_reg can be ip. These two lines assert
2301 // that ip can be used with the two instructions (the constants
2302 // will never need ip).
2303 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2304 STATIC_ASSERT(LAST_TYPE < 256);
2305 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2306 cmp(type_reg, Operand(type));
2307 }
2308
2309
CompareRoot(Register obj,Heap::RootListIndex index)2310 void MacroAssembler::CompareRoot(Register obj,
2311 Heap::RootListIndex index) {
2312 DCHECK(!obj.is(ip));
2313 LoadRoot(ip, index);
2314 cmp(obj, ip);
2315 }
2316
CheckFastObjectElements(Register map,Register scratch,Label * fail)2317 void MacroAssembler::CheckFastObjectElements(Register map,
2318 Register scratch,
2319 Label* fail) {
2320 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2321 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2322 STATIC_ASSERT(FAST_ELEMENTS == 2);
2323 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2324 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2325 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2326 b(ls, fail);
2327 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2328 b(hi, fail);
2329 }
2330
2331
CheckFastSmiElements(Register map,Register scratch,Label * fail)2332 void MacroAssembler::CheckFastSmiElements(Register map,
2333 Register scratch,
2334 Label* fail) {
2335 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2336 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2337 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2338 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2339 b(hi, fail);
2340 }
2341
2342
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,LowDwVfpRegister double_scratch,Label * fail,int elements_offset)2343 void MacroAssembler::StoreNumberToDoubleElements(
2344 Register value_reg,
2345 Register key_reg,
2346 Register elements_reg,
2347 Register scratch1,
2348 LowDwVfpRegister double_scratch,
2349 Label* fail,
2350 int elements_offset) {
2351 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
2352 Label smi_value, store;
2353
2354 // Handle smi values specially.
2355 JumpIfSmi(value_reg, &smi_value);
2356
2357 // Ensure that the object is a heap number
2358 CheckMap(value_reg,
2359 scratch1,
2360 isolate()->factory()->heap_number_map(),
2361 fail,
2362 DONT_DO_SMI_CHECK);
2363
2364 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2365 VFPCanonicalizeNaN(double_scratch);
2366 b(&store);
2367
2368 bind(&smi_value);
2369 SmiToDouble(double_scratch, value_reg);
2370
2371 bind(&store);
2372 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2373 vstr(double_scratch,
2374 FieldMemOperand(scratch1,
2375 FixedDoubleArray::kHeaderSize - elements_offset));
2376 }
2377
2378
CompareMap(Register obj,Register scratch,Handle<Map> map,Label * early_success)2379 void MacroAssembler::CompareMap(Register obj,
2380 Register scratch,
2381 Handle<Map> map,
2382 Label* early_success) {
2383 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2384 CompareMap(scratch, map, early_success);
2385 }
2386
2387
CompareMap(Register obj_map,Handle<Map> map,Label * early_success)2388 void MacroAssembler::CompareMap(Register obj_map,
2389 Handle<Map> map,
2390 Label* early_success) {
2391 cmp(obj_map, Operand(map));
2392 }
2393
2394
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)2395 void MacroAssembler::CheckMap(Register obj,
2396 Register scratch,
2397 Handle<Map> map,
2398 Label* fail,
2399 SmiCheckType smi_check_type) {
2400 if (smi_check_type == DO_SMI_CHECK) {
2401 JumpIfSmi(obj, fail);
2402 }
2403
2404 Label success;
2405 CompareMap(obj, scratch, map, &success);
2406 b(ne, fail);
2407 bind(&success);
2408 }
2409
2410
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)2411 void MacroAssembler::CheckMap(Register obj,
2412 Register scratch,
2413 Heap::RootListIndex index,
2414 Label* fail,
2415 SmiCheckType smi_check_type) {
2416 if (smi_check_type == DO_SMI_CHECK) {
2417 JumpIfSmi(obj, fail);
2418 }
2419 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2420 LoadRoot(ip, index);
2421 cmp(scratch, ip);
2422 b(ne, fail);
2423 }
2424
2425
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)2426 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2427 Register scratch2, Handle<WeakCell> cell,
2428 Handle<Code> success,
2429 SmiCheckType smi_check_type) {
2430 Label fail;
2431 if (smi_check_type == DO_SMI_CHECK) {
2432 JumpIfSmi(obj, &fail);
2433 }
2434 ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2435 CmpWeakValue(scratch1, cell, scratch2);
2436 Jump(success, RelocInfo::CODE_TARGET, eq);
2437 bind(&fail);
2438 }
2439
2440
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch)2441 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2442 Register scratch) {
2443 mov(scratch, Operand(cell));
2444 ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
2445 cmp(value, scratch);
2446 }
2447
2448
GetWeakValue(Register value,Handle<WeakCell> cell)2449 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2450 mov(value, Operand(cell));
2451 ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
2452 }
2453
2454
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)2455 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2456 Label* miss) {
2457 GetWeakValue(value, cell);
2458 JumpIfSmi(value, miss);
2459 }
2460
2461
GetMapConstructor(Register result,Register map,Register temp,Register temp2)2462 void MacroAssembler::GetMapConstructor(Register result, Register map,
2463 Register temp, Register temp2) {
2464 Label done, loop;
2465 ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
2466 bind(&loop);
2467 JumpIfSmi(result, &done);
2468 CompareObjectType(result, temp, temp2, MAP_TYPE);
2469 b(ne, &done);
2470 ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
2471 b(&loop);
2472 bind(&done);
2473 }
2474
2475
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)2476 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2477 Register scratch, Label* miss) {
2478 // Get the prototype or initial map from the function.
2479 ldr(result,
2480 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2481
2482 // If the prototype or initial map is the hole, don't return it and
2483 // simply miss the cache instead. This will allow us to allocate a
2484 // prototype object on-demand in the runtime system.
2485 LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2486 cmp(result, ip);
2487 b(eq, miss);
2488
2489 // If the function does not have an initial map, we're done.
2490 Label done;
2491 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2492 b(ne, &done);
2493
2494 // Get the prototype from the initial map.
2495 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2496
2497 // All done.
2498 bind(&done);
2499 }
2500
2501
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)2502 void MacroAssembler::CallStub(CodeStub* stub,
2503 TypeFeedbackId ast_id,
2504 Condition cond) {
2505 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2506 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2507 }
2508
2509
TailCallStub(CodeStub * stub,Condition cond)2510 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2511 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2512 }
2513
2514
AllowThisStubCall(CodeStub * stub)2515 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2516 return has_frame_ || !stub->SometimesSetsUpAFrame();
2517 }
2518
SmiToDouble(LowDwVfpRegister value,Register smi)2519 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2520 if (CpuFeatures::IsSupported(VFPv3)) {
2521 CpuFeatureScope scope(this, VFPv3);
2522 vmov(value.low(), smi);
2523 vcvt_f64_s32(value, 1);
2524 } else {
2525 SmiUntag(ip, smi);
2526 vmov(value.low(), ip);
2527 vcvt_f64_s32(value, value.low());
2528 }
2529 }
2530
2531
TestDoubleIsInt32(DwVfpRegister double_input,LowDwVfpRegister double_scratch)2532 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2533 LowDwVfpRegister double_scratch) {
2534 DCHECK(!double_input.is(double_scratch));
2535 vcvt_s32_f64(double_scratch.low(), double_input);
2536 vcvt_f64_s32(double_scratch, double_scratch.low());
2537 VFPCompareAndSetFlags(double_input, double_scratch);
2538 }
2539
2540
TryDoubleToInt32Exact(Register result,DwVfpRegister double_input,LowDwVfpRegister double_scratch)2541 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2542 DwVfpRegister double_input,
2543 LowDwVfpRegister double_scratch) {
2544 DCHECK(!double_input.is(double_scratch));
2545 vcvt_s32_f64(double_scratch.low(), double_input);
2546 vmov(result, double_scratch.low());
2547 vcvt_f64_s32(double_scratch, double_scratch.low());
2548 VFPCompareAndSetFlags(double_input, double_scratch);
2549 }
2550
2551
TryInt32Floor(Register result,DwVfpRegister double_input,Register input_high,LowDwVfpRegister double_scratch,Label * done,Label * exact)2552 void MacroAssembler::TryInt32Floor(Register result,
2553 DwVfpRegister double_input,
2554 Register input_high,
2555 LowDwVfpRegister double_scratch,
2556 Label* done,
2557 Label* exact) {
2558 DCHECK(!result.is(input_high));
2559 DCHECK(!double_input.is(double_scratch));
2560 Label negative, exception;
2561
2562 VmovHigh(input_high, double_input);
2563
2564 // Test for NaN and infinities.
2565 Sbfx(result, input_high,
2566 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2567 cmp(result, Operand(-1));
2568 b(eq, &exception);
2569 // Test for values that can be exactly represented as a
2570 // signed 32-bit integer.
2571 TryDoubleToInt32Exact(result, double_input, double_scratch);
2572 // If exact, return (result already fetched).
2573 b(eq, exact);
2574 cmp(input_high, Operand::Zero());
2575 b(mi, &negative);
2576
2577 // Input is in ]+0, +inf[.
2578 // If result equals 0x7fffffff input was out of range or
2579 // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2580 // could fits into an int32, that means we always think input was
2581 // out of range and always go to exception.
2582 // If result < 0x7fffffff, go to done, result fetched.
2583 cmn(result, Operand(1));
2584 b(mi, &exception);
2585 b(done);
2586
2587 // Input is in ]-inf, -0[.
2588 // If x is a non integer negative number,
2589 // floor(x) <=> round_to_zero(x) - 1.
2590 bind(&negative);
2591 sub(result, result, Operand(1), SetCC);
2592 // If result is still negative, go to done, result fetched.
2593 // Else, we had an overflow and we fall through exception.
2594 b(mi, done);
2595 bind(&exception);
2596 }
2597
TryInlineTruncateDoubleToI(Register result,DwVfpRegister double_input,Label * done)2598 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2599 DwVfpRegister double_input,
2600 Label* done) {
2601 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2602 vcvt_s32_f64(double_scratch.low(), double_input);
2603 vmov(result, double_scratch.low());
2604
2605 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2606 sub(ip, result, Operand(1));
2607 cmp(ip, Operand(0x7ffffffe));
2608 b(lt, done);
2609 }
2610
2611
TruncateDoubleToI(Register result,DwVfpRegister double_input)2612 void MacroAssembler::TruncateDoubleToI(Register result,
2613 DwVfpRegister double_input) {
2614 Label done;
2615
2616 TryInlineTruncateDoubleToI(result, double_input, &done);
2617
2618 // If we fell through then inline version didn't succeed - call stub instead.
2619 push(lr);
2620 sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2621 vstr(double_input, MemOperand(sp, 0));
2622
2623 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2624 CallStub(&stub);
2625
2626 add(sp, sp, Operand(kDoubleSize));
2627 pop(lr);
2628
2629 bind(&done);
2630 }
2631
2632
TruncateHeapNumberToI(Register result,Register object)2633 void MacroAssembler::TruncateHeapNumberToI(Register result,
2634 Register object) {
2635 Label done;
2636 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2637 DCHECK(!result.is(object));
2638
2639 vldr(double_scratch,
2640 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2641 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2642
2643 // If we fell through then inline version didn't succeed - call stub instead.
2644 push(lr);
2645 DoubleToIStub stub(isolate(),
2646 object,
2647 result,
2648 HeapNumber::kValueOffset - kHeapObjectTag,
2649 true,
2650 true);
2651 CallStub(&stub);
2652 pop(lr);
2653
2654 bind(&done);
2655 }
2656
2657
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch1,Label * not_number)2658 void MacroAssembler::TruncateNumberToI(Register object,
2659 Register result,
2660 Register heap_number_map,
2661 Register scratch1,
2662 Label* not_number) {
2663 Label done;
2664 DCHECK(!result.is(object));
2665
2666 UntagAndJumpIfSmi(result, object, &done);
2667 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2668 TruncateHeapNumberToI(result, object);
2669
2670 bind(&done);
2671 }
2672
2673
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2674 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2675 Register src,
2676 int num_least_bits) {
2677 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2678 CpuFeatureScope scope(this, ARMv7);
2679 ubfx(dst, src, kSmiTagSize, num_least_bits);
2680 } else {
2681 SmiUntag(dst, src);
2682 and_(dst, dst, Operand((1 << num_least_bits) - 1));
2683 }
2684 }
2685
2686
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2687 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2688 Register src,
2689 int num_least_bits) {
2690 and_(dst, src, Operand((1 << num_least_bits) - 1));
2691 }
2692
2693
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)2694 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2695 int num_arguments,
2696 SaveFPRegsMode save_doubles) {
2697 // All parameters are on the stack. r0 has the return value after call.
2698
2699 // If the expected number of arguments of the runtime function is
2700 // constant, we check that the actual number of arguments match the
2701 // expectation.
2702 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2703
2704 // TODO(1236192): Most runtime routines don't need the number of
2705 // arguments passed in because it is constant. At some point we
2706 // should remove this need and make the runtime routine entry code
2707 // smarter.
2708 mov(r0, Operand(num_arguments));
2709 mov(r1, Operand(ExternalReference(f, isolate())));
2710 CEntryStub stub(isolate(), 1, save_doubles);
2711 CallStub(&stub);
2712 }
2713
2714
CallExternalReference(const ExternalReference & ext,int num_arguments)2715 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2716 int num_arguments) {
2717 mov(r0, Operand(num_arguments));
2718 mov(r1, Operand(ext));
2719
2720 CEntryStub stub(isolate(), 1);
2721 CallStub(&stub);
2722 }
2723
2724
TailCallRuntime(Runtime::FunctionId fid)2725 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2726 const Runtime::Function* function = Runtime::FunctionForId(fid);
2727 DCHECK_EQ(1, function->result_size);
2728 if (function->nargs >= 0) {
2729 // TODO(1236192): Most runtime routines don't need the number of
2730 // arguments passed in because it is constant. At some point we
2731 // should remove this need and make the runtime routine entry code
2732 // smarter.
2733 mov(r0, Operand(function->nargs));
2734 }
2735 JumpToExternalReference(ExternalReference(fid, isolate()));
2736 }
2737
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)2738 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
2739 bool builtin_exit_frame) {
2740 #if defined(__thumb__)
2741 // Thumb mode builtin.
2742 DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2743 #endif
2744 mov(r1, Operand(builtin));
2745 CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
2746 builtin_exit_frame);
2747 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2748 }
2749
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2750 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2751 Register scratch1, Register scratch2) {
2752 if (FLAG_native_code_counters && counter->Enabled()) {
2753 mov(scratch1, Operand(value));
2754 mov(scratch2, Operand(ExternalReference(counter)));
2755 str(scratch1, MemOperand(scratch2));
2756 }
2757 }
2758
2759
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2760 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2761 Register scratch1, Register scratch2) {
2762 DCHECK(value > 0);
2763 if (FLAG_native_code_counters && counter->Enabled()) {
2764 mov(scratch2, Operand(ExternalReference(counter)));
2765 ldr(scratch1, MemOperand(scratch2));
2766 add(scratch1, scratch1, Operand(value));
2767 str(scratch1, MemOperand(scratch2));
2768 }
2769 }
2770
2771
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2772 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2773 Register scratch1, Register scratch2) {
2774 DCHECK(value > 0);
2775 if (FLAG_native_code_counters && counter->Enabled()) {
2776 mov(scratch2, Operand(ExternalReference(counter)));
2777 ldr(scratch1, MemOperand(scratch2));
2778 sub(scratch1, scratch1, Operand(value));
2779 str(scratch1, MemOperand(scratch2));
2780 }
2781 }
2782
2783
Assert(Condition cond,BailoutReason reason)2784 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2785 if (emit_debug_code())
2786 Check(cond, reason);
2787 }
2788
2789
AssertFastElements(Register elements)2790 void MacroAssembler::AssertFastElements(Register elements) {
2791 if (emit_debug_code()) {
2792 DCHECK(!elements.is(ip));
2793 Label ok;
2794 push(elements);
2795 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2796 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2797 cmp(elements, ip);
2798 b(eq, &ok);
2799 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2800 cmp(elements, ip);
2801 b(eq, &ok);
2802 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2803 cmp(elements, ip);
2804 b(eq, &ok);
2805 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2806 bind(&ok);
2807 pop(elements);
2808 }
2809 }
2810
2811
Check(Condition cond,BailoutReason reason)2812 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2813 Label L;
2814 b(cond, &L);
2815 Abort(reason);
2816 // will not return here
2817 bind(&L);
2818 }
2819
2820
Abort(BailoutReason reason)2821 void MacroAssembler::Abort(BailoutReason reason) {
2822 Label abort_start;
2823 bind(&abort_start);
2824 #ifdef DEBUG
2825 const char* msg = GetBailoutReason(reason);
2826 if (msg != NULL) {
2827 RecordComment("Abort message: ");
2828 RecordComment(msg);
2829 }
2830
2831 if (FLAG_trap_on_abort) {
2832 stop(msg);
2833 return;
2834 }
2835 #endif
2836
2837 // Check if Abort() has already been initialized.
2838 DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
2839
2840 Move(r1, Smi::FromInt(static_cast<int>(reason)));
2841
2842 // Disable stub call restrictions to always allow calls to abort.
2843 if (!has_frame_) {
2844 // We don't actually want to generate a pile of code for this, so just
2845 // claim there is a stack frame, without generating one.
2846 FrameScope scope(this, StackFrame::NONE);
2847 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2848 } else {
2849 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2850 }
2851 // will not return here
2852 if (is_const_pool_blocked()) {
2853 // If the calling code cares about the exact number of
2854 // instructions generated, we insert padding here to keep the size
2855 // of the Abort macro constant.
2856 static const int kExpectedAbortInstructions = 7;
2857 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2858 DCHECK(abort_instructions <= kExpectedAbortInstructions);
2859 while (abort_instructions++ < kExpectedAbortInstructions) {
2860 nop();
2861 }
2862 }
2863 }
2864
2865
LoadContext(Register dst,int context_chain_length)2866 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2867 if (context_chain_length > 0) {
2868 // Move up the chain of contexts to the context containing the slot.
2869 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2870 for (int i = 1; i < context_chain_length; i++) {
2871 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2872 }
2873 } else {
2874 // Slot is in the current function context. Move it into the
2875 // destination register in case we store into it (the write barrier
2876 // cannot be allowed to destroy the context in esi).
2877 mov(dst, cp);
2878 }
2879 }
2880
2881
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)2882 void MacroAssembler::LoadTransitionedArrayMapConditional(
2883 ElementsKind expected_kind,
2884 ElementsKind transitioned_kind,
2885 Register map_in_out,
2886 Register scratch,
2887 Label* no_map_match) {
2888 DCHECK(IsFastElementsKind(expected_kind));
2889 DCHECK(IsFastElementsKind(transitioned_kind));
2890
2891 // Check that the function's map is the same as the expected cached map.
2892 ldr(scratch, NativeContextMemOperand());
2893 ldr(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
2894 cmp(map_in_out, ip);
2895 b(ne, no_map_match);
2896
2897 // Use the transitioned cached map.
2898 ldr(map_in_out,
2899 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
2900 }
2901
2902
LoadNativeContextSlot(int index,Register dst)2903 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2904 ldr(dst, NativeContextMemOperand());
2905 ldr(dst, ContextMemOperand(dst, index));
2906 }
2907
2908
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)2909 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2910 Register map,
2911 Register scratch) {
2912 // Load the initial map. The global functions all have initial maps.
2913 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2914 if (emit_debug_code()) {
2915 Label ok, fail;
2916 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2917 b(&ok);
2918 bind(&fail);
2919 Abort(kGlobalFunctionsMustHaveInitialMap);
2920 bind(&ok);
2921 }
2922 }
2923
2924
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)2925 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2926 Register reg,
2927 Register scratch,
2928 Label* not_power_of_two_or_zero) {
2929 sub(scratch, reg, Operand(1), SetCC);
2930 b(mi, not_power_of_two_or_zero);
2931 tst(scratch, reg);
2932 b(ne, not_power_of_two_or_zero);
2933 }
2934
2935
JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,Register scratch,Label * zero_and_neg,Label * not_power_of_two)2936 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2937 Register reg,
2938 Register scratch,
2939 Label* zero_and_neg,
2940 Label* not_power_of_two) {
2941 sub(scratch, reg, Operand(1), SetCC);
2942 b(mi, zero_and_neg);
2943 tst(scratch, reg);
2944 b(ne, not_power_of_two);
2945 }
2946
2947
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)2948 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2949 Register reg2,
2950 Label* on_not_both_smi) {
2951 STATIC_ASSERT(kSmiTag == 0);
2952 tst(reg1, Operand(kSmiTagMask));
2953 tst(reg2, Operand(kSmiTagMask), eq);
2954 b(ne, on_not_both_smi);
2955 }
2956
2957
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)2958 void MacroAssembler::UntagAndJumpIfSmi(
2959 Register dst, Register src, Label* smi_case) {
2960 STATIC_ASSERT(kSmiTag == 0);
2961 SmiUntag(dst, src, SetCC);
2962 b(cc, smi_case); // Shifter carry is not set for a smi.
2963 }
2964
2965
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)2966 void MacroAssembler::UntagAndJumpIfNotSmi(
2967 Register dst, Register src, Label* non_smi_case) {
2968 STATIC_ASSERT(kSmiTag == 0);
2969 SmiUntag(dst, src, SetCC);
2970 b(cs, non_smi_case); // Shifter carry is set for a non-smi.
2971 }
2972
2973
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)2974 void MacroAssembler::JumpIfEitherSmi(Register reg1,
2975 Register reg2,
2976 Label* on_either_smi) {
2977 STATIC_ASSERT(kSmiTag == 0);
2978 tst(reg1, Operand(kSmiTagMask));
2979 tst(reg2, Operand(kSmiTagMask), ne);
2980 b(eq, on_either_smi);
2981 }
2982
AssertNotNumber(Register object)2983 void MacroAssembler::AssertNotNumber(Register object) {
2984 if (emit_debug_code()) {
2985 STATIC_ASSERT(kSmiTag == 0);
2986 tst(object, Operand(kSmiTagMask));
2987 Check(ne, kOperandIsANumber);
2988 push(object);
2989 CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
2990 pop(object);
2991 Check(ne, kOperandIsANumber);
2992 }
2993 }
2994
AssertNotSmi(Register object)2995 void MacroAssembler::AssertNotSmi(Register object) {
2996 if (emit_debug_code()) {
2997 STATIC_ASSERT(kSmiTag == 0);
2998 tst(object, Operand(kSmiTagMask));
2999 Check(ne, kOperandIsASmi);
3000 }
3001 }
3002
3003
AssertSmi(Register object)3004 void MacroAssembler::AssertSmi(Register object) {
3005 if (emit_debug_code()) {
3006 STATIC_ASSERT(kSmiTag == 0);
3007 tst(object, Operand(kSmiTagMask));
3008 Check(eq, kOperandIsNotSmi);
3009 }
3010 }
3011
3012
AssertString(Register object)3013 void MacroAssembler::AssertString(Register object) {
3014 if (emit_debug_code()) {
3015 STATIC_ASSERT(kSmiTag == 0);
3016 tst(object, Operand(kSmiTagMask));
3017 Check(ne, kOperandIsASmiAndNotAString);
3018 push(object);
3019 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3020 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3021 pop(object);
3022 Check(lo, kOperandIsNotAString);
3023 }
3024 }
3025
3026
AssertName(Register object)3027 void MacroAssembler::AssertName(Register object) {
3028 if (emit_debug_code()) {
3029 STATIC_ASSERT(kSmiTag == 0);
3030 tst(object, Operand(kSmiTagMask));
3031 Check(ne, kOperandIsASmiAndNotAName);
3032 push(object);
3033 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3034 CompareInstanceType(object, object, LAST_NAME_TYPE);
3035 pop(object);
3036 Check(le, kOperandIsNotAName);
3037 }
3038 }
3039
3040
AssertFunction(Register object)3041 void MacroAssembler::AssertFunction(Register object) {
3042 if (emit_debug_code()) {
3043 STATIC_ASSERT(kSmiTag == 0);
3044 tst(object, Operand(kSmiTagMask));
3045 Check(ne, kOperandIsASmiAndNotAFunction);
3046 push(object);
3047 CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
3048 pop(object);
3049 Check(eq, kOperandIsNotAFunction);
3050 }
3051 }
3052
3053
AssertBoundFunction(Register object)3054 void MacroAssembler::AssertBoundFunction(Register object) {
3055 if (emit_debug_code()) {
3056 STATIC_ASSERT(kSmiTag == 0);
3057 tst(object, Operand(kSmiTagMask));
3058 Check(ne, kOperandIsASmiAndNotABoundFunction);
3059 push(object);
3060 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
3061 pop(object);
3062 Check(eq, kOperandIsNotABoundFunction);
3063 }
3064 }
3065
AssertGeneratorObject(Register object)3066 void MacroAssembler::AssertGeneratorObject(Register object) {
3067 if (emit_debug_code()) {
3068 STATIC_ASSERT(kSmiTag == 0);
3069 tst(object, Operand(kSmiTagMask));
3070 Check(ne, kOperandIsASmiAndNotAGeneratorObject);
3071 push(object);
3072 CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
3073 pop(object);
3074 Check(eq, kOperandIsNotAGeneratorObject);
3075 }
3076 }
3077
AssertReceiver(Register object)3078 void MacroAssembler::AssertReceiver(Register object) {
3079 if (emit_debug_code()) {
3080 STATIC_ASSERT(kSmiTag == 0);
3081 tst(object, Operand(kSmiTagMask));
3082 Check(ne, kOperandIsASmiAndNotAReceiver);
3083 push(object);
3084 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3085 CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
3086 pop(object);
3087 Check(hs, kOperandIsNotAReceiver);
3088 }
3089 }
3090
3091
AssertUndefinedOrAllocationSite(Register object,Register scratch)3092 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
3093 Register scratch) {
3094 if (emit_debug_code()) {
3095 Label done_checking;
3096 AssertNotSmi(object);
3097 CompareRoot(object, Heap::kUndefinedValueRootIndex);
3098 b(eq, &done_checking);
3099 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3100 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
3101 Assert(eq, kExpectedUndefinedOrCell);
3102 bind(&done_checking);
3103 }
3104 }
3105
3106
AssertIsRoot(Register reg,Heap::RootListIndex index)3107 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
3108 if (emit_debug_code()) {
3109 CompareRoot(reg, index);
3110 Check(eq, kHeapNumberMapRegisterClobbered);
3111 }
3112 }
3113
3114
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)3115 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3116 Register heap_number_map,
3117 Register scratch,
3118 Label* on_not_heap_number) {
3119 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3120 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3121 cmp(scratch, heap_number_map);
3122 b(ne, on_not_heap_number);
3123 }
3124
3125
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3126 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
3127 Register first, Register second, Register scratch1, Register scratch2,
3128 Label* failure) {
3129 // Test that both first and second are sequential one-byte strings.
3130 // Assume that they are non-smis.
3131 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3132 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3133 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3134 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3135
3136 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
3137 scratch2, failure);
3138 }
3139
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3140 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
3141 Register second,
3142 Register scratch1,
3143 Register scratch2,
3144 Label* failure) {
3145 // Check that neither is a smi.
3146 and_(scratch1, first, Operand(second));
3147 JumpIfSmi(scratch1, failure);
3148 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
3149 scratch2, failure);
3150 }
3151
3152
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)3153 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3154 Label* not_unique_name) {
3155 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3156 Label succeed;
3157 tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3158 b(eq, &succeed);
3159 cmp(reg, Operand(SYMBOL_TYPE));
3160 b(ne, not_unique_name);
3161
3162 bind(&succeed);
3163 }
3164
3165
3166 // Allocates a heap number or jumps to the need_gc label if the young space
3167 // is full and a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required,MutableMode mode)3168 void MacroAssembler::AllocateHeapNumber(Register result,
3169 Register scratch1,
3170 Register scratch2,
3171 Register heap_number_map,
3172 Label* gc_required,
3173 MutableMode mode) {
3174 // Allocate an object in the heap for the heap number and tag it as a heap
3175 // object.
3176 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3177 NO_ALLOCATION_FLAGS);
3178
3179 Heap::RootListIndex map_index = mode == MUTABLE
3180 ? Heap::kMutableHeapNumberMapRootIndex
3181 : Heap::kHeapNumberMapRootIndex;
3182 AssertIsRoot(heap_number_map, map_index);
3183
3184 // Store heap number map in the allocated object.
3185 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3186 }
3187
3188
AllocateHeapNumberWithValue(Register result,DwVfpRegister value,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required)3189 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3190 DwVfpRegister value,
3191 Register scratch1,
3192 Register scratch2,
3193 Register heap_number_map,
3194 Label* gc_required) {
3195 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3196 sub(scratch1, result, Operand(kHeapObjectTag));
3197 vstr(value, scratch1, HeapNumber::kValueOffset);
3198 }
3199
3200
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)3201 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
3202 Register value, Register scratch1,
3203 Register scratch2, Label* gc_required) {
3204 DCHECK(!result.is(constructor));
3205 DCHECK(!result.is(scratch1));
3206 DCHECK(!result.is(scratch2));
3207 DCHECK(!result.is(value));
3208
3209 // Allocate JSValue in new space.
3210 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
3211 NO_ALLOCATION_FLAGS);
3212
3213 // Initialize the JSValue.
3214 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
3215 str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
3216 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
3217 str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
3218 str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
3219 str(value, FieldMemOperand(result, JSValue::kValueOffset));
3220 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
3221 }
3222
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)3223 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
3224 Register end_address,
3225 Register filler) {
3226 Label loop, entry;
3227 b(&entry);
3228 bind(&loop);
3229 str(filler, MemOperand(current_address, kPointerSize, PostIndex));
3230 bind(&entry);
3231 cmp(current_address, end_address);
3232 b(lo, &loop);
3233 }
3234
3235
CheckFor32DRegs(Register scratch)3236 void MacroAssembler::CheckFor32DRegs(Register scratch) {
3237 mov(scratch, Operand(ExternalReference::cpu_features()));
3238 ldr(scratch, MemOperand(scratch));
3239 tst(scratch, Operand(1u << VFP32DREGS));
3240 }
3241
3242
SaveFPRegs(Register location,Register scratch)3243 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3244 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
3245 CheckFor32DRegs(scratch);
3246 vstm(db_w, location, d16, d31, ne);
3247 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3248 vstm(db_w, location, d0, d15);
3249 }
3250
3251
RestoreFPRegs(Register location,Register scratch)3252 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3253 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
3254 CheckFor32DRegs(scratch);
3255 vldm(ia_w, location, d0, d15);
3256 vldm(ia_w, location, d16, d31, ne);
3257 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3258 }
3259
3260 template <typename T>
FloatMaxHelper(T result,T left,T right,Label * out_of_line)3261 void MacroAssembler::FloatMaxHelper(T result, T left, T right,
3262 Label* out_of_line) {
3263 // This trivial case is caught sooner, so that the out-of-line code can be
3264 // completely avoided.
3265 DCHECK(!left.is(right));
3266
3267 if (CpuFeatures::IsSupported(ARMv8)) {
3268 CpuFeatureScope scope(this, ARMv8);
3269 VFPCompareAndSetFlags(left, right);
3270 b(vs, out_of_line);
3271 vmaxnm(result, left, right);
3272 } else {
3273 Label done;
3274 VFPCompareAndSetFlags(left, right);
3275 b(vs, out_of_line);
3276 // Avoid a conditional instruction if the result register is unique.
3277 bool aliased_result_reg = result.is(left) || result.is(right);
3278 Move(result, right, aliased_result_reg ? mi : al);
3279 Move(result, left, gt);
3280 b(ne, &done);
3281 // Left and right are equal, but check for +/-0.
3282 VFPCompareAndSetFlags(left, 0.0);
3283 b(eq, out_of_line);
3284 // The arguments are equal and not zero, so it doesn't matter which input we
3285 // pick. We have already moved one input into the result (if it didn't
3286 // already alias) so there's nothing more to do.
3287 bind(&done);
3288 }
3289 }
3290
3291 template <typename T>
FloatMaxOutOfLineHelper(T result,T left,T right)3292 void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
3293 DCHECK(!left.is(right));
3294
3295 // ARMv8: At least one of left and right is a NaN.
3296 // Anything else: At least one of left and right is a NaN, or both left and
3297 // right are zeroes with unknown sign.
3298
3299 // If left and right are +/-0, select the one with the most positive sign.
3300 // If left or right are NaN, vadd propagates the appropriate one.
3301 vadd(result, left, right);
3302 }
3303
3304 template <typename T>
FloatMinHelper(T result,T left,T right,Label * out_of_line)3305 void MacroAssembler::FloatMinHelper(T result, T left, T right,
3306 Label* out_of_line) {
3307 // This trivial case is caught sooner, so that the out-of-line code can be
3308 // completely avoided.
3309 DCHECK(!left.is(right));
3310
3311 if (CpuFeatures::IsSupported(ARMv8)) {
3312 CpuFeatureScope scope(this, ARMv8);
3313 VFPCompareAndSetFlags(left, right);
3314 b(vs, out_of_line);
3315 vminnm(result, left, right);
3316 } else {
3317 Label done;
3318 VFPCompareAndSetFlags(left, right);
3319 b(vs, out_of_line);
3320 // Avoid a conditional instruction if the result register is unique.
3321 bool aliased_result_reg = result.is(left) || result.is(right);
3322 Move(result, left, aliased_result_reg ? mi : al);
3323 Move(result, right, gt);
3324 b(ne, &done);
3325 // Left and right are equal, but check for +/-0.
3326 VFPCompareAndSetFlags(left, 0.0);
3327 // If the arguments are equal and not zero, it doesn't matter which input we
3328 // pick. We have already moved one input into the result (if it didn't
3329 // already alias) so there's nothing more to do.
3330 b(ne, &done);
3331 // At this point, both left and right are either 0 or -0.
3332 // We could use a single 'vorr' instruction here if we had NEON support.
3333 // The algorithm used is -((-L) + (-R)), which is most efficiently expressed
3334 // as -((-L) - R).
3335 if (left.is(result)) {
3336 DCHECK(!right.is(result));
3337 vneg(result, left);
3338 vsub(result, result, right);
3339 vneg(result, result);
3340 } else {
3341 DCHECK(!left.is(result));
3342 vneg(result, right);
3343 vsub(result, result, left);
3344 vneg(result, result);
3345 }
3346 bind(&done);
3347 }
3348 }
3349
3350 template <typename T>
FloatMinOutOfLineHelper(T result,T left,T right)3351 void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
3352 DCHECK(!left.is(right));
3353
3354 // At least one of left and right is a NaN. Use vadd to propagate the NaN
3355 // appropriately. +/-0 is handled inline.
3356 vadd(result, left, right);
3357 }
3358
FloatMax(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right,Label * out_of_line)3359 void MacroAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
3360 SwVfpRegister right, Label* out_of_line) {
3361 FloatMaxHelper(result, left, right, out_of_line);
3362 }
3363
FloatMin(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right,Label * out_of_line)3364 void MacroAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
3365 SwVfpRegister right, Label* out_of_line) {
3366 FloatMinHelper(result, left, right, out_of_line);
3367 }
3368
FloatMax(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right,Label * out_of_line)3369 void MacroAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
3370 DwVfpRegister right, Label* out_of_line) {
3371 FloatMaxHelper(result, left, right, out_of_line);
3372 }
3373
FloatMin(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right,Label * out_of_line)3374 void MacroAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
3375 DwVfpRegister right, Label* out_of_line) {
3376 FloatMinHelper(result, left, right, out_of_line);
3377 }
3378
FloatMaxOutOfLine(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right)3379 void MacroAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
3380 SwVfpRegister right) {
3381 FloatMaxOutOfLineHelper(result, left, right);
3382 }
3383
FloatMinOutOfLine(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right)3384 void MacroAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
3385 SwVfpRegister right) {
3386 FloatMinOutOfLineHelper(result, left, right);
3387 }
3388
FloatMaxOutOfLine(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right)3389 void MacroAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
3390 DwVfpRegister right) {
3391 FloatMaxOutOfLineHelper(result, left, right);
3392 }
3393
FloatMinOutOfLine(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right)3394 void MacroAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
3395 DwVfpRegister right) {
3396 FloatMinOutOfLineHelper(result, left, right);
3397 }
3398
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3399 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3400 Register first, Register second, Register scratch1, Register scratch2,
3401 Label* failure) {
3402 const int kFlatOneByteStringMask =
3403 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3404 const int kFlatOneByteStringTag =
3405 kStringTag | kOneByteStringTag | kSeqStringTag;
3406 and_(scratch1, first, Operand(kFlatOneByteStringMask));
3407 and_(scratch2, second, Operand(kFlatOneByteStringMask));
3408 cmp(scratch1, Operand(kFlatOneByteStringTag));
3409 // Ignore second test if first test failed.
3410 cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
3411 b(ne, failure);
3412 }
3413
3414
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)3415 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
3416 Register scratch,
3417 Label* failure) {
3418 const int kFlatOneByteStringMask =
3419 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3420 const int kFlatOneByteStringTag =
3421 kStringTag | kOneByteStringTag | kSeqStringTag;
3422 and_(scratch, type, Operand(kFlatOneByteStringMask));
3423 cmp(scratch, Operand(kFlatOneByteStringTag));
3424 b(ne, failure);
3425 }
3426
3427 static const int kRegisterPassedArguments = 4;
3428
3429
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)3430 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3431 int num_double_arguments) {
3432 int stack_passed_words = 0;
3433 if (use_eabi_hardfloat()) {
3434 // In the hard floating point calling convention, we can use
3435 // all double registers to pass doubles.
3436 if (num_double_arguments > DoubleRegister::NumRegisters()) {
3437 stack_passed_words +=
3438 2 * (num_double_arguments - DoubleRegister::NumRegisters());
3439 }
3440 } else {
3441 // In the soft floating point calling convention, every double
3442 // argument is passed using two registers.
3443 num_reg_arguments += 2 * num_double_arguments;
3444 }
3445 // Up to four simple arguments are passed in registers r0..r3.
3446 if (num_reg_arguments > kRegisterPassedArguments) {
3447 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3448 }
3449 return stack_passed_words;
3450 }
3451
3452
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)3453 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3454 Register index,
3455 Register value,
3456 uint32_t encoding_mask) {
3457 Label is_object;
3458 SmiTst(string);
3459 Check(ne, kNonObject);
3460
3461 ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3462 ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3463
3464 and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3465 cmp(ip, Operand(encoding_mask));
3466 Check(eq, kUnexpectedStringType);
3467
3468 // The index is assumed to be untagged coming in, tag it to compare with the
3469 // string length without using a temp register, it is restored at the end of
3470 // this function.
3471 Label index_tag_ok, index_tag_bad;
3472 TrySmiTag(index, index, &index_tag_bad);
3473 b(&index_tag_ok);
3474 bind(&index_tag_bad);
3475 Abort(kIndexIsTooLarge);
3476 bind(&index_tag_ok);
3477
3478 ldr(ip, FieldMemOperand(string, String::kLengthOffset));
3479 cmp(index, ip);
3480 Check(lt, kIndexIsTooLarge);
3481
3482 cmp(index, Operand(Smi::kZero));
3483 Check(ge, kIndexIsNegative);
3484
3485 SmiUntag(index, index);
3486 }
3487
3488
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)3489 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3490 int num_double_arguments,
3491 Register scratch) {
3492 int frame_alignment = ActivationFrameAlignment();
3493 int stack_passed_arguments = CalculateStackPassedWords(
3494 num_reg_arguments, num_double_arguments);
3495 if (frame_alignment > kPointerSize) {
3496 // Make stack end at alignment and make room for num_arguments - 4 words
3497 // and the original value of sp.
3498 mov(scratch, sp);
3499 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3500 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3501 and_(sp, sp, Operand(-frame_alignment));
3502 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3503 } else {
3504 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3505 }
3506 }
3507
3508
PrepareCallCFunction(int num_reg_arguments,Register scratch)3509 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3510 Register scratch) {
3511 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3512 }
3513
3514
MovToFloatParameter(DwVfpRegister src)3515 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
3516 DCHECK(src.is(d0));
3517 if (!use_eabi_hardfloat()) {
3518 vmov(r0, r1, src);
3519 }
3520 }
3521
3522
3523 // On ARM this is just a synonym to make the purpose clear.
MovToFloatResult(DwVfpRegister src)3524 void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
3525 MovToFloatParameter(src);
3526 }
3527
3528
MovToFloatParameters(DwVfpRegister src1,DwVfpRegister src2)3529 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
3530 DwVfpRegister src2) {
3531 DCHECK(src1.is(d0));
3532 DCHECK(src2.is(d1));
3533 if (!use_eabi_hardfloat()) {
3534 vmov(r0, r1, src1);
3535 vmov(r2, r3, src2);
3536 }
3537 }
3538
3539
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)3540 void MacroAssembler::CallCFunction(ExternalReference function,
3541 int num_reg_arguments,
3542 int num_double_arguments) {
3543 mov(ip, Operand(function));
3544 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3545 }
3546
3547
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)3548 void MacroAssembler::CallCFunction(Register function,
3549 int num_reg_arguments,
3550 int num_double_arguments) {
3551 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3552 }
3553
3554
CallCFunction(ExternalReference function,int num_arguments)3555 void MacroAssembler::CallCFunction(ExternalReference function,
3556 int num_arguments) {
3557 CallCFunction(function, num_arguments, 0);
3558 }
3559
3560
CallCFunction(Register function,int num_arguments)3561 void MacroAssembler::CallCFunction(Register function,
3562 int num_arguments) {
3563 CallCFunction(function, num_arguments, 0);
3564 }
3565
3566
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)3567 void MacroAssembler::CallCFunctionHelper(Register function,
3568 int num_reg_arguments,
3569 int num_double_arguments) {
3570 DCHECK(has_frame());
3571 // Make sure that the stack is aligned before calling a C function unless
3572 // running in the simulator. The simulator has its own alignment check which
3573 // provides more information.
3574 #if V8_HOST_ARCH_ARM
3575 if (emit_debug_code()) {
3576 int frame_alignment = base::OS::ActivationFrameAlignment();
3577 int frame_alignment_mask = frame_alignment - 1;
3578 if (frame_alignment > kPointerSize) {
3579 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3580 Label alignment_as_expected;
3581 tst(sp, Operand(frame_alignment_mask));
3582 b(eq, &alignment_as_expected);
3583 // Don't use Check here, as it will call Runtime_Abort possibly
3584 // re-entering here.
3585 stop("Unexpected alignment");
3586 bind(&alignment_as_expected);
3587 }
3588 }
3589 #endif
3590
3591 // Just call directly. The function called cannot cause a GC, or
3592 // allow preemption, so the return address in the link register
3593 // stays correct.
3594 Call(function);
3595 int stack_passed_arguments = CalculateStackPassedWords(
3596 num_reg_arguments, num_double_arguments);
3597 if (ActivationFrameAlignment() > kPointerSize) {
3598 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3599 } else {
3600 add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3601 }
3602 }
3603
3604
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)3605 void MacroAssembler::CheckPageFlag(
3606 Register object,
3607 Register scratch,
3608 int mask,
3609 Condition cc,
3610 Label* condition_met) {
3611 DCHECK(cc == eq || cc == ne);
3612 Bfc(scratch, object, 0, kPageSizeBits);
3613 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3614 tst(scratch, Operand(mask));
3615 b(cc, condition_met);
3616 }
3617
3618
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)3619 void MacroAssembler::JumpIfBlack(Register object,
3620 Register scratch0,
3621 Register scratch1,
3622 Label* on_black) {
3623 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
3624 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3625 }
3626
3627
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)3628 void MacroAssembler::HasColor(Register object,
3629 Register bitmap_scratch,
3630 Register mask_scratch,
3631 Label* has_color,
3632 int first_bit,
3633 int second_bit) {
3634 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3635
3636 GetMarkBits(object, bitmap_scratch, mask_scratch);
3637
3638 Label other_color, word_boundary;
3639 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3640 tst(ip, Operand(mask_scratch));
3641 b(first_bit == 1 ? eq : ne, &other_color);
3642 // Shift left 1 by adding.
3643 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3644 b(eq, &word_boundary);
3645 tst(ip, Operand(mask_scratch));
3646 b(second_bit == 1 ? ne : eq, has_color);
3647 jmp(&other_color);
3648
3649 bind(&word_boundary);
3650 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3651 tst(ip, Operand(1));
3652 b(second_bit == 1 ? ne : eq, has_color);
3653 bind(&other_color);
3654 }
3655
3656
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)3657 void MacroAssembler::GetMarkBits(Register addr_reg,
3658 Register bitmap_reg,
3659 Register mask_reg) {
3660 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3661 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3662 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3663 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3664 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3665 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3666 mov(ip, Operand(1));
3667 mov(mask_reg, Operand(ip, LSL, mask_reg));
3668 }
3669
3670
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)3671 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
3672 Register mask_scratch, Register load_scratch,
3673 Label* value_is_white) {
3674 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3675 GetMarkBits(value, bitmap_scratch, mask_scratch);
3676
3677 // If the value is black or grey we don't need to do anything.
3678 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3679 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3680 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
3681 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3682
3683 // Since both black and grey have a 1 in the first position and white does
3684 // not have a 1 there we only need to check one bit.
3685 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3686 tst(mask_scratch, load_scratch);
3687 b(eq, value_is_white);
3688 }
3689
3690
ClampUint8(Register output_reg,Register input_reg)3691 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3692 usat(output_reg, 8, Operand(input_reg));
3693 }
3694
3695
ClampDoubleToUint8(Register result_reg,DwVfpRegister input_reg,LowDwVfpRegister double_scratch)3696 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3697 DwVfpRegister input_reg,
3698 LowDwVfpRegister double_scratch) {
3699 Label done;
3700
3701 // Handle inputs >= 255 (including +infinity).
3702 Vmov(double_scratch, 255.0, result_reg);
3703 mov(result_reg, Operand(255));
3704 VFPCompareAndSetFlags(input_reg, double_scratch);
3705 b(ge, &done);
3706
3707 // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest
3708 // rounding mode will provide the correct result.
3709 vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3710 vmov(result_reg, double_scratch.low());
3711
3712 bind(&done);
3713 }
3714
3715
LoadInstanceDescriptors(Register map,Register descriptors)3716 void MacroAssembler::LoadInstanceDescriptors(Register map,
3717 Register descriptors) {
3718 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3719 }
3720
3721
NumberOfOwnDescriptors(Register dst,Register map)3722 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3723 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3724 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3725 }
3726
3727
EnumLength(Register dst,Register map)3728 void MacroAssembler::EnumLength(Register dst, Register map) {
3729 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3730 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3731 and_(dst, dst, Operand(Map::EnumLengthBits::kMask));
3732 SmiTag(dst);
3733 }
3734
3735
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)3736 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3737 int accessor_index,
3738 AccessorComponent accessor) {
3739 ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3740 LoadInstanceDescriptors(dst, dst);
3741 ldr(dst,
3742 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3743 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3744 : AccessorPair::kSetterOffset;
3745 ldr(dst, FieldMemOperand(dst, offset));
3746 }
3747
3748
CheckEnumCache(Label * call_runtime)3749 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3750 Register null_value = r5;
3751 Register empty_fixed_array_value = r6;
3752 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3753 Label next, start;
3754 mov(r2, r0);
3755
3756 // Check if the enum length field is properly initialized, indicating that
3757 // there is an enum cache.
3758 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3759
3760 EnumLength(r3, r1);
3761 cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3762 b(eq, call_runtime);
3763
3764 LoadRoot(null_value, Heap::kNullValueRootIndex);
3765 jmp(&start);
3766
3767 bind(&next);
3768 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3769
3770 // For all objects but the receiver, check that the cache is empty.
3771 EnumLength(r3, r1);
3772 cmp(r3, Operand(Smi::kZero));
3773 b(ne, call_runtime);
3774
3775 bind(&start);
3776
3777 // Check that there are no elements. Register r2 contains the current JS
3778 // object we've reached through the prototype chain.
3779 Label no_elements;
3780 ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3781 cmp(r2, empty_fixed_array_value);
3782 b(eq, &no_elements);
3783
3784 // Second chance, the object may be using the empty slow element dictionary.
3785 CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
3786 b(ne, call_runtime);
3787
3788 bind(&no_elements);
3789 ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3790 cmp(r2, null_value);
3791 b(ne, &next);
3792 }
3793
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)3794 void MacroAssembler::TestJSArrayForAllocationMemento(
3795 Register receiver_reg,
3796 Register scratch_reg,
3797 Label* no_memento_found) {
3798 Label map_check;
3799 Label top_check;
3800 ExternalReference new_space_allocation_top_adr =
3801 ExternalReference::new_space_allocation_top_address(isolate());
3802 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
3803 const int kMementoLastWordOffset =
3804 kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
3805
3806 // Bail out if the object is not in new space.
3807 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
3808 // If the object is in new space, we need to check whether it is on the same
3809 // page as the current top.
3810 add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
3811 mov(ip, Operand(new_space_allocation_top_adr));
3812 ldr(ip, MemOperand(ip));
3813 eor(scratch_reg, scratch_reg, Operand(ip));
3814 tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
3815 b(eq, &top_check);
3816 // The object is on a different page than allocation top. Bail out if the
3817 // object sits on the page boundary as no memento can follow and we cannot
3818 // touch the memory following it.
3819 add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
3820 eor(scratch_reg, scratch_reg, Operand(receiver_reg));
3821 tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
3822 b(ne, no_memento_found);
3823 // Continue with the actual map check.
3824 jmp(&map_check);
3825 // If top is on the same page as the current object, we need to check whether
3826 // we are below top.
3827 bind(&top_check);
3828 add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
3829 mov(ip, Operand(new_space_allocation_top_adr));
3830 ldr(ip, MemOperand(ip));
3831 cmp(scratch_reg, ip);
3832 b(ge, no_memento_found);
3833 // Memento map check.
3834 bind(&map_check);
3835 ldr(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
3836 cmp(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
3837 }
3838
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)3839 Register GetRegisterThatIsNotOneOf(Register reg1,
3840 Register reg2,
3841 Register reg3,
3842 Register reg4,
3843 Register reg5,
3844 Register reg6) {
3845 RegList regs = 0;
3846 if (reg1.is_valid()) regs |= reg1.bit();
3847 if (reg2.is_valid()) regs |= reg2.bit();
3848 if (reg3.is_valid()) regs |= reg3.bit();
3849 if (reg4.is_valid()) regs |= reg4.bit();
3850 if (reg5.is_valid()) regs |= reg5.bit();
3851 if (reg6.is_valid()) regs |= reg6.bit();
3852
3853 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
3854 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
3855 int code = config->GetAllocatableGeneralCode(i);
3856 Register candidate = Register::from_code(code);
3857 if (regs & candidate.bit()) continue;
3858 return candidate;
3859 }
3860 UNREACHABLE();
3861 return no_reg;
3862 }
3863
3864
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)3865 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3866 Register object,
3867 Register scratch0,
3868 Register scratch1,
3869 Label* found) {
3870 DCHECK(!scratch1.is(scratch0));
3871 Register current = scratch0;
3872 Label loop_again, end;
3873
3874 // scratch contained elements pointer.
3875 mov(current, object);
3876 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
3877 ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
3878 CompareRoot(current, Heap::kNullValueRootIndex);
3879 b(eq, &end);
3880
3881 // Loop based on the map going up the prototype chain.
3882 bind(&loop_again);
3883 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
3884
3885 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
3886 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
3887 ldrb(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
3888 cmp(scratch1, Operand(JS_OBJECT_TYPE));
3889 b(lo, found);
3890
3891 ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
3892 DecodeField<Map::ElementsKindBits>(scratch1);
3893 cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
3894 b(eq, found);
3895 ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
3896 CompareRoot(current, Heap::kNullValueRootIndex);
3897 b(ne, &loop_again);
3898
3899 bind(&end);
3900 }
3901
3902
3903 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8)3904 bool AreAliased(Register reg1,
3905 Register reg2,
3906 Register reg3,
3907 Register reg4,
3908 Register reg5,
3909 Register reg6,
3910 Register reg7,
3911 Register reg8) {
3912 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
3913 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
3914 reg7.is_valid() + reg8.is_valid();
3915
3916 RegList regs = 0;
3917 if (reg1.is_valid()) regs |= reg1.bit();
3918 if (reg2.is_valid()) regs |= reg2.bit();
3919 if (reg3.is_valid()) regs |= reg3.bit();
3920 if (reg4.is_valid()) regs |= reg4.bit();
3921 if (reg5.is_valid()) regs |= reg5.bit();
3922 if (reg6.is_valid()) regs |= reg6.bit();
3923 if (reg7.is_valid()) regs |= reg7.bit();
3924 if (reg8.is_valid()) regs |= reg8.bit();
3925 int n_of_non_aliasing_regs = NumRegs(regs);
3926
3927 return n_of_valid_regs != n_of_non_aliasing_regs;
3928 }
3929 #endif
3930
3931
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)3932 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
3933 FlushICache flush_cache)
3934 : address_(address),
3935 size_(instructions * Assembler::kInstrSize),
3936 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
3937 flush_cache_(flush_cache) {
3938 // Create a new macro assembler pointing to the address of the code to patch.
3939 // The size is adjusted with kGap on order for the assembler to generate size
3940 // bytes of instructions without failing with buffer size constraints.
3941 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3942 }
3943
3944
~CodePatcher()3945 CodePatcher::~CodePatcher() {
3946 // Indicate that code has changed.
3947 if (flush_cache_ == FLUSH) {
3948 Assembler::FlushICache(masm_.isolate(), address_, size_);
3949 }
3950
3951 // Check that we don't have any pending constant pools.
3952 DCHECK(masm_.pending_32_bit_constants_.empty());
3953 DCHECK(masm_.pending_64_bit_constants_.empty());
3954
3955 // Check that the code was patched as expected.
3956 DCHECK(masm_.pc_ == address_ + size_);
3957 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3958 }
3959
3960
Emit(Instr instr)3961 void CodePatcher::Emit(Instr instr) {
3962 masm()->emit(instr);
3963 }
3964
3965
Emit(Address addr)3966 void CodePatcher::Emit(Address addr) {
3967 masm()->emit(reinterpret_cast<Instr>(addr));
3968 }
3969
3970
EmitCondition(Condition cond)3971 void CodePatcher::EmitCondition(Condition cond) {
3972 Instr instr = Assembler::instr_at(masm_.pc_);
3973 instr = (instr & ~kCondMask) | cond;
3974 masm_.emit(instr);
3975 }
3976
3977
TruncatingDiv(Register result,Register dividend,int32_t divisor)3978 void MacroAssembler::TruncatingDiv(Register result,
3979 Register dividend,
3980 int32_t divisor) {
3981 DCHECK(!dividend.is(result));
3982 DCHECK(!dividend.is(ip));
3983 DCHECK(!result.is(ip));
3984 base::MagicNumbersForDivision<uint32_t> mag =
3985 base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
3986 mov(ip, Operand(mag.multiplier));
3987 bool neg = (mag.multiplier & (1U << 31)) != 0;
3988 if (divisor > 0 && neg) {
3989 smmla(result, dividend, ip, dividend);
3990 } else {
3991 smmul(result, dividend, ip);
3992 if (divisor < 0 && !neg && mag.multiplier > 0) {
3993 sub(result, result, Operand(dividend));
3994 }
3995 }
3996 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
3997 add(result, result, Operand(dividend, LSR, 31));
3998 }
3999
4000 } // namespace internal
4001 } // namespace v8
4002
4003 #endif // V8_TARGET_ARCH_ARM
4004