1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <limits.h>  // For LONG_MIN, LONG_MAX.
6 
7 #include "src/v8.h"
8 
9 #if V8_TARGET_ARCH_ARM
10 
11 #include "src/base/bits.h"
12 #include "src/base/division-by-constant.h"
13 #include "src/bootstrapper.h"
14 #include "src/codegen.h"
15 #include "src/cpu-profiler.h"
16 #include "src/debug.h"
17 #include "src/isolate-inl.h"
18 #include "src/runtime.h"
19 
20 namespace v8 {
21 namespace internal {
22 
MacroAssembler(Isolate * arg_isolate,void * buffer,int size)23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
24     : Assembler(arg_isolate, buffer, size),
25       generating_stub_(false),
26       has_frame_(false) {
27   if (isolate() != NULL) {
28     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
29                                   isolate());
30   }
31 }
32 
33 
Jump(Register target,Condition cond)34 void MacroAssembler::Jump(Register target, Condition cond) {
35   bx(target, cond);
36 }
37 
38 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond)39 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
40                           Condition cond) {
41   DCHECK(RelocInfo::IsCodeTarget(rmode));
42   mov(pc, Operand(target, rmode), LeaveCC, cond);
43 }
44 
45 
Jump(Address target,RelocInfo::Mode rmode,Condition cond)46 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
47                           Condition cond) {
48   DCHECK(!RelocInfo::IsCodeTarget(rmode));
49   Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
50 }
51 
52 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)53 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
54                           Condition cond) {
55   DCHECK(RelocInfo::IsCodeTarget(rmode));
56   // 'code' is always generated ARM code, never THUMB code
57   AllowDeferredHandleDereference embedding_raw_address;
58   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
59 }
60 
61 
CallSize(Register target,Condition cond)62 int MacroAssembler::CallSize(Register target, Condition cond) {
63   return kInstrSize;
64 }
65 
66 
Call(Register target,Condition cond)67 void MacroAssembler::Call(Register target, Condition cond) {
68   // Block constant pool for the call instruction sequence.
69   BlockConstPoolScope block_const_pool(this);
70   Label start;
71   bind(&start);
72   blx(target, cond);
73   DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
74 }
75 
76 
CallSize(Address target,RelocInfo::Mode rmode,Condition cond)77 int MacroAssembler::CallSize(
78     Address target, RelocInfo::Mode rmode, Condition cond) {
79   Instr mov_instr = cond | MOV | LeaveCC;
80   Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
81   return kInstrSize +
82          mov_operand.instructions_required(this, mov_instr) * kInstrSize;
83 }
84 
85 
CallStubSize(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)86 int MacroAssembler::CallStubSize(
87     CodeStub* stub, TypeFeedbackId ast_id, Condition cond) {
88   return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
89 }
90 
91 
CallSizeNotPredictableCodeSize(Isolate * isolate,Address target,RelocInfo::Mode rmode,Condition cond)92 int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
93                                                    Address target,
94                                                    RelocInfo::Mode rmode,
95                                                    Condition cond) {
96   Instr mov_instr = cond | MOV | LeaveCC;
97   Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
98   return kInstrSize +
99          mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
100 }
101 
102 
Call(Address target,RelocInfo::Mode rmode,Condition cond,TargetAddressStorageMode mode)103 void MacroAssembler::Call(Address target,
104                           RelocInfo::Mode rmode,
105                           Condition cond,
106                           TargetAddressStorageMode mode) {
107   // Block constant pool for the call instruction sequence.
108   BlockConstPoolScope block_const_pool(this);
109   Label start;
110   bind(&start);
111 
112   bool old_predictable_code_size = predictable_code_size();
113   if (mode == NEVER_INLINE_TARGET_ADDRESS) {
114     set_predictable_code_size(true);
115   }
116 
117 #ifdef DEBUG
118   // Check the expected size before generating code to ensure we assume the same
119   // constant pool availability (e.g., whether constant pool is full or not).
120   int expected_size = CallSize(target, rmode, cond);
121 #endif
122 
123   // Call sequence on V7 or later may be :
124   //  movw  ip, #... @ call address low 16
125   //  movt  ip, #... @ call address high 16
126   //  blx   ip
127   //                      @ return address
128   // Or for pre-V7 or values that may be back-patched
129   // to avoid ICache flushes:
130   //  ldr   ip, [pc, #...] @ call address
131   //  blx   ip
132   //                      @ return address
133 
134   // Statement positions are expected to be recorded when the target
135   // address is loaded. The mov method will automatically record
136   // positions when pc is the target, since this is not the case here
137   // we have to do it explicitly.
138   positions_recorder()->WriteRecordedPositions();
139 
140   mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
141   blx(ip, cond);
142 
143   DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
144   if (mode == NEVER_INLINE_TARGET_ADDRESS) {
145     set_predictable_code_size(old_predictable_code_size);
146   }
147 }
148 
149 
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)150 int MacroAssembler::CallSize(Handle<Code> code,
151                              RelocInfo::Mode rmode,
152                              TypeFeedbackId ast_id,
153                              Condition cond) {
154   AllowDeferredHandleDereference using_raw_address;
155   return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
156 }
157 
158 
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,TargetAddressStorageMode mode)159 void MacroAssembler::Call(Handle<Code> code,
160                           RelocInfo::Mode rmode,
161                           TypeFeedbackId ast_id,
162                           Condition cond,
163                           TargetAddressStorageMode mode) {
164   Label start;
165   bind(&start);
166   DCHECK(RelocInfo::IsCodeTarget(rmode));
167   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
168     SetRecordedAstId(ast_id);
169     rmode = RelocInfo::CODE_TARGET_WITH_ID;
170   }
171   // 'code' is always generated ARM code, never THUMB code
172   AllowDeferredHandleDereference embedding_raw_address;
173   Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
174 }
175 
176 
Ret(Condition cond)177 void MacroAssembler::Ret(Condition cond) {
178   bx(lr, cond);
179 }
180 
181 
Drop(int count,Condition cond)182 void MacroAssembler::Drop(int count, Condition cond) {
183   if (count > 0) {
184     add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
185   }
186 }
187 
188 
Ret(int drop,Condition cond)189 void MacroAssembler::Ret(int drop, Condition cond) {
190   Drop(drop, cond);
191   Ret(cond);
192 }
193 
194 
Swap(Register reg1,Register reg2,Register scratch,Condition cond)195 void MacroAssembler::Swap(Register reg1,
196                           Register reg2,
197                           Register scratch,
198                           Condition cond) {
199   if (scratch.is(no_reg)) {
200     eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
201     eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
202     eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
203   } else {
204     mov(scratch, reg1, LeaveCC, cond);
205     mov(reg1, reg2, LeaveCC, cond);
206     mov(reg2, scratch, LeaveCC, cond);
207   }
208 }
209 
210 
Call(Label * target)211 void MacroAssembler::Call(Label* target) {
212   bl(target);
213 }
214 
215 
Push(Handle<Object> handle)216 void MacroAssembler::Push(Handle<Object> handle) {
217   mov(ip, Operand(handle));
218   push(ip);
219 }
220 
221 
Move(Register dst,Handle<Object> value)222 void MacroAssembler::Move(Register dst, Handle<Object> value) {
223   AllowDeferredHandleDereference smi_check;
224   if (value->IsSmi()) {
225     mov(dst, Operand(value));
226   } else {
227     DCHECK(value->IsHeapObject());
228     if (isolate()->heap()->InNewSpace(*value)) {
229       Handle<Cell> cell = isolate()->factory()->NewCell(value);
230       mov(dst, Operand(cell));
231       ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
232     } else {
233       mov(dst, Operand(value));
234     }
235   }
236 }
237 
238 
Move(Register dst,Register src,Condition cond)239 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
240   if (!dst.is(src)) {
241     mov(dst, src, LeaveCC, cond);
242   }
243 }
244 
245 
Move(DwVfpRegister dst,DwVfpRegister src)246 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
247   if (!dst.is(src)) {
248     vmov(dst, src);
249   }
250 }
251 
252 
Mls(Register dst,Register src1,Register src2,Register srcA,Condition cond)253 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
254                          Register srcA, Condition cond) {
255   if (CpuFeatures::IsSupported(MLS)) {
256     CpuFeatureScope scope(this, MLS);
257     mls(dst, src1, src2, srcA, cond);
258   } else {
259     DCHECK(!srcA.is(ip));
260     mul(ip, src1, src2, LeaveCC, cond);
261     sub(dst, srcA, ip, LeaveCC, cond);
262   }
263 }
264 
265 
And(Register dst,Register src1,const Operand & src2,Condition cond)266 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
267                          Condition cond) {
268   if (!src2.is_reg() &&
269       !src2.must_output_reloc_info(this) &&
270       src2.immediate() == 0) {
271     mov(dst, Operand::Zero(), LeaveCC, cond);
272   } else if (!(src2.instructions_required(this) == 1) &&
273              !src2.must_output_reloc_info(this) &&
274              CpuFeatures::IsSupported(ARMv7) &&
275              base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
276     ubfx(dst, src1, 0,
277         WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
278   } else {
279     and_(dst, src1, src2, LeaveCC, cond);
280   }
281 }
282 
283 
Ubfx(Register dst,Register src1,int lsb,int width,Condition cond)284 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
285                           Condition cond) {
286   DCHECK(lsb < 32);
287   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
288     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
289     and_(dst, src1, Operand(mask), LeaveCC, cond);
290     if (lsb != 0) {
291       mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
292     }
293   } else {
294     ubfx(dst, src1, lsb, width, cond);
295   }
296 }
297 
298 
Sbfx(Register dst,Register src1,int lsb,int width,Condition cond)299 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
300                           Condition cond) {
301   DCHECK(lsb < 32);
302   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
303     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
304     and_(dst, src1, Operand(mask), LeaveCC, cond);
305     int shift_up = 32 - lsb - width;
306     int shift_down = lsb + shift_up;
307     if (shift_up != 0) {
308       mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
309     }
310     if (shift_down != 0) {
311       mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
312     }
313   } else {
314     sbfx(dst, src1, lsb, width, cond);
315   }
316 }
317 
318 
Bfi(Register dst,Register src,Register scratch,int lsb,int width,Condition cond)319 void MacroAssembler::Bfi(Register dst,
320                          Register src,
321                          Register scratch,
322                          int lsb,
323                          int width,
324                          Condition cond) {
325   DCHECK(0 <= lsb && lsb < 32);
326   DCHECK(0 <= width && width < 32);
327   DCHECK(lsb + width < 32);
328   DCHECK(!scratch.is(dst));
329   if (width == 0) return;
330   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
331     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
332     bic(dst, dst, Operand(mask));
333     and_(scratch, src, Operand((1 << width) - 1));
334     mov(scratch, Operand(scratch, LSL, lsb));
335     orr(dst, dst, scratch);
336   } else {
337     bfi(dst, src, lsb, width, cond);
338   }
339 }
340 
341 
Bfc(Register dst,Register src,int lsb,int width,Condition cond)342 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
343                          Condition cond) {
344   DCHECK(lsb < 32);
345   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
346     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
347     bic(dst, src, Operand(mask));
348   } else {
349     Move(dst, src, cond);
350     bfc(dst, lsb, width, cond);
351   }
352 }
353 
354 
Usat(Register dst,int satpos,const Operand & src,Condition cond)355 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
356                           Condition cond) {
357   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
358     DCHECK(!dst.is(pc) && !src.rm().is(pc));
359     DCHECK((satpos >= 0) && (satpos <= 31));
360 
361     // These asserts are required to ensure compatibility with the ARMv7
362     // implementation.
363     DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
364     DCHECK(src.rs().is(no_reg));
365 
366     Label done;
367     int satval = (1 << satpos) - 1;
368 
369     if (cond != al) {
370       b(NegateCondition(cond), &done);  // Skip saturate if !condition.
371     }
372     if (!(src.is_reg() && dst.is(src.rm()))) {
373       mov(dst, src);
374     }
375     tst(dst, Operand(~satval));
376     b(eq, &done);
377     mov(dst, Operand::Zero(), LeaveCC, mi);  // 0 if negative.
378     mov(dst, Operand(satval), LeaveCC, pl);  // satval if positive.
379     bind(&done);
380   } else {
381     usat(dst, satpos, src, cond);
382   }
383 }
384 
385 
Load(Register dst,const MemOperand & src,Representation r)386 void MacroAssembler::Load(Register dst,
387                           const MemOperand& src,
388                           Representation r) {
389   DCHECK(!r.IsDouble());
390   if (r.IsInteger8()) {
391     ldrsb(dst, src);
392   } else if (r.IsUInteger8()) {
393     ldrb(dst, src);
394   } else if (r.IsInteger16()) {
395     ldrsh(dst, src);
396   } else if (r.IsUInteger16()) {
397     ldrh(dst, src);
398   } else {
399     ldr(dst, src);
400   }
401 }
402 
403 
Store(Register src,const MemOperand & dst,Representation r)404 void MacroAssembler::Store(Register src,
405                            const MemOperand& dst,
406                            Representation r) {
407   DCHECK(!r.IsDouble());
408   if (r.IsInteger8() || r.IsUInteger8()) {
409     strb(src, dst);
410   } else if (r.IsInteger16() || r.IsUInteger16()) {
411     strh(src, dst);
412   } else {
413     if (r.IsHeapObject()) {
414       AssertNotSmi(src);
415     } else if (r.IsSmi()) {
416       AssertSmi(src);
417     }
418     str(src, dst);
419   }
420 }
421 
422 
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond)423 void MacroAssembler::LoadRoot(Register destination,
424                               Heap::RootListIndex index,
425                               Condition cond) {
426   if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
427       isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
428       !predictable_code_size()) {
429     // The CPU supports fast immediate values, and this root will never
430     // change. We will load it as a relocatable immediate value.
431     Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
432     mov(destination, Operand(root), LeaveCC, cond);
433     return;
434   }
435   ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
436 }
437 
438 
StoreRoot(Register source,Heap::RootListIndex index,Condition cond)439 void MacroAssembler::StoreRoot(Register source,
440                                Heap::RootListIndex index,
441                                Condition cond) {
442   str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
443 }
444 
445 
InNewSpace(Register object,Register scratch,Condition cond,Label * branch)446 void MacroAssembler::InNewSpace(Register object,
447                                 Register scratch,
448                                 Condition cond,
449                                 Label* branch) {
450   DCHECK(cond == eq || cond == ne);
451   and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
452   cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
453   b(cond, branch);
454 }
455 
456 
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)457 void MacroAssembler::RecordWriteField(
458     Register object,
459     int offset,
460     Register value,
461     Register dst,
462     LinkRegisterStatus lr_status,
463     SaveFPRegsMode save_fp,
464     RememberedSetAction remembered_set_action,
465     SmiCheck smi_check,
466     PointersToHereCheck pointers_to_here_check_for_value) {
467   // First, check if a write barrier is even needed. The tests below
468   // catch stores of Smis.
469   Label done;
470 
471   // Skip barrier if writing a smi.
472   if (smi_check == INLINE_SMI_CHECK) {
473     JumpIfSmi(value, &done);
474   }
475 
476   // Although the object register is tagged, the offset is relative to the start
477   // of the object, so so offset must be a multiple of kPointerSize.
478   DCHECK(IsAligned(offset, kPointerSize));
479 
480   add(dst, object, Operand(offset - kHeapObjectTag));
481   if (emit_debug_code()) {
482     Label ok;
483     tst(dst, Operand((1 << kPointerSizeLog2) - 1));
484     b(eq, &ok);
485     stop("Unaligned cell in write barrier");
486     bind(&ok);
487   }
488 
489   RecordWrite(object,
490               dst,
491               value,
492               lr_status,
493               save_fp,
494               remembered_set_action,
495               OMIT_SMI_CHECK,
496               pointers_to_here_check_for_value);
497 
498   bind(&done);
499 
500   // Clobber clobbered input registers when running with the debug-code flag
501   // turned on to provoke errors.
502   if (emit_debug_code()) {
503     mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
504     mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
505   }
506 }
507 
508 
509 // Will clobber 4 registers: object, map, dst, ip.  The
510 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode)511 void MacroAssembler::RecordWriteForMap(Register object,
512                                        Register map,
513                                        Register dst,
514                                        LinkRegisterStatus lr_status,
515                                        SaveFPRegsMode fp_mode) {
516   if (emit_debug_code()) {
517     ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset));
518     cmp(dst, Operand(isolate()->factory()->meta_map()));
519     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
520   }
521 
522   if (!FLAG_incremental_marking) {
523     return;
524   }
525 
526   if (emit_debug_code()) {
527     ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
528     cmp(ip, map);
529     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
530   }
531 
532   Label done;
533 
534   // A single check of the map's pages interesting flag suffices, since it is
535   // only set during incremental collection, and then it's also guaranteed that
536   // the from object's page's interesting flag is also set.  This optimization
537   // relies on the fact that maps can never be in new space.
538   CheckPageFlag(map,
539                 map,  // Used as scratch.
540                 MemoryChunk::kPointersToHereAreInterestingMask,
541                 eq,
542                 &done);
543 
544   add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
545   if (emit_debug_code()) {
546     Label ok;
547     tst(dst, Operand((1 << kPointerSizeLog2) - 1));
548     b(eq, &ok);
549     stop("Unaligned cell in write barrier");
550     bind(&ok);
551   }
552 
553   // Record the actual write.
554   if (lr_status == kLRHasNotBeenSaved) {
555     push(lr);
556   }
557   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
558                        fp_mode);
559   CallStub(&stub);
560   if (lr_status == kLRHasNotBeenSaved) {
561     pop(lr);
562   }
563 
564   bind(&done);
565 
566   // Count number of write barriers in generated code.
567   isolate()->counters()->write_barriers_static()->Increment();
568   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
569 
570   // Clobber clobbered registers when running with the debug-code flag
571   // turned on to provoke errors.
572   if (emit_debug_code()) {
573     mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
574     mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
575   }
576 }
577 
578 
579 // Will clobber 4 registers: object, address, scratch, ip.  The
580 // register 'object' contains a heap object pointer.  The heap object
581 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)582 void MacroAssembler::RecordWrite(
583     Register object,
584     Register address,
585     Register value,
586     LinkRegisterStatus lr_status,
587     SaveFPRegsMode fp_mode,
588     RememberedSetAction remembered_set_action,
589     SmiCheck smi_check,
590     PointersToHereCheck pointers_to_here_check_for_value) {
591   DCHECK(!object.is(value));
592   if (emit_debug_code()) {
593     ldr(ip, MemOperand(address));
594     cmp(ip, value);
595     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
596   }
597 
598   if (remembered_set_action == OMIT_REMEMBERED_SET &&
599       !FLAG_incremental_marking) {
600     return;
601   }
602 
603   // First, check if a write barrier is even needed. The tests below
604   // catch stores of smis and stores into the young generation.
605   Label done;
606 
607   if (smi_check == INLINE_SMI_CHECK) {
608     JumpIfSmi(value, &done);
609   }
610 
611   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
612     CheckPageFlag(value,
613                   value,  // Used as scratch.
614                   MemoryChunk::kPointersToHereAreInterestingMask,
615                   eq,
616                   &done);
617   }
618   CheckPageFlag(object,
619                 value,  // Used as scratch.
620                 MemoryChunk::kPointersFromHereAreInterestingMask,
621                 eq,
622                 &done);
623 
624   // Record the actual write.
625   if (lr_status == kLRHasNotBeenSaved) {
626     push(lr);
627   }
628   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
629                        fp_mode);
630   CallStub(&stub);
631   if (lr_status == kLRHasNotBeenSaved) {
632     pop(lr);
633   }
634 
635   bind(&done);
636 
637   // Count number of write barriers in generated code.
638   isolate()->counters()->write_barriers_static()->Increment();
639   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
640                    value);
641 
642   // Clobber clobbered registers when running with the debug-code flag
643   // turned on to provoke errors.
644   if (emit_debug_code()) {
645     mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
646     mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
647   }
648 }
649 
650 
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)651 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
652                                          Register address,
653                                          Register scratch,
654                                          SaveFPRegsMode fp_mode,
655                                          RememberedSetFinalAction and_then) {
656   Label done;
657   if (emit_debug_code()) {
658     Label ok;
659     JumpIfNotInNewSpace(object, scratch, &ok);
660     stop("Remembered set pointer is in new space");
661     bind(&ok);
662   }
663   // Load store buffer top.
664   ExternalReference store_buffer =
665       ExternalReference::store_buffer_top(isolate());
666   mov(ip, Operand(store_buffer));
667   ldr(scratch, MemOperand(ip));
668   // Store pointer to buffer and increment buffer top.
669   str(address, MemOperand(scratch, kPointerSize, PostIndex));
670   // Write back new top of buffer.
671   str(scratch, MemOperand(ip));
672   // Call stub on end of buffer.
673   // Check for end of buffer.
674   tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
675   if (and_then == kFallThroughAtEnd) {
676     b(eq, &done);
677   } else {
678     DCHECK(and_then == kReturnAtEnd);
679     Ret(eq);
680   }
681   push(lr);
682   StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
683   CallStub(&store_buffer_overflow);
684   pop(lr);
685   bind(&done);
686   if (and_then == kReturnAtEnd) {
687     Ret();
688   }
689 }
690 
691 
PushFixedFrame(Register marker_reg)692 void MacroAssembler::PushFixedFrame(Register marker_reg) {
693   DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
694   stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
695                 cp.bit() |
696                 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
697                 fp.bit() |
698                 lr.bit());
699 }
700 
701 
PopFixedFrame(Register marker_reg)702 void MacroAssembler::PopFixedFrame(Register marker_reg) {
703   DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
704   ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
705                 cp.bit() |
706                 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
707                 fp.bit() |
708                 lr.bit());
709 }
710 
711 
712 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()713 void MacroAssembler::PushSafepointRegisters() {
714   // Safepoints expect a block of contiguous register values starting with r0:
715   DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
716   // Safepoints expect a block of kNumSafepointRegisters values on the
717   // stack, so adjust the stack for unsaved registers.
718   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
719   DCHECK(num_unsaved >= 0);
720   sub(sp, sp, Operand(num_unsaved * kPointerSize));
721   stm(db_w, sp, kSafepointSavedRegisters);
722 }
723 
724 
PopSafepointRegisters()725 void MacroAssembler::PopSafepointRegisters() {
726   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
727   ldm(ia_w, sp, kSafepointSavedRegisters);
728   add(sp, sp, Operand(num_unsaved * kPointerSize));
729 }
730 
731 
StoreToSafepointRegisterSlot(Register src,Register dst)732 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
733   str(src, SafepointRegisterSlot(dst));
734 }
735 
736 
LoadFromSafepointRegisterSlot(Register dst,Register src)737 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
738   ldr(dst, SafepointRegisterSlot(src));
739 }
740 
741 
SafepointRegisterStackIndex(int reg_code)742 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
743   // The registers are pushed starting with the highest encoding,
744   // which means that lowest encodings are closest to the stack pointer.
745   DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
746   return reg_code;
747 }
748 
749 
SafepointRegisterSlot(Register reg)750 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
751   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
752 }
753 
754 
SafepointRegistersAndDoublesSlot(Register reg)755 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
756   // Number of d-regs not known at snapshot time.
757   DCHECK(!serializer_enabled());
758   // General purpose registers are pushed last on the stack.
759   int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
760   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
761   return MemOperand(sp, doubles_size + register_offset);
762 }
763 
764 
Ldrd(Register dst1,Register dst2,const MemOperand & src,Condition cond)765 void MacroAssembler::Ldrd(Register dst1, Register dst2,
766                           const MemOperand& src, Condition cond) {
767   DCHECK(src.rm().is(no_reg));
768   DCHECK(!dst1.is(lr));  // r14.
769 
770   // V8 does not use this addressing mode, so the fallback code
771   // below doesn't support it yet.
772   DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
773 
774   // Generate two ldr instructions if ldrd is not available.
775   if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
776       (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
777     CpuFeatureScope scope(this, ARMv7);
778     ldrd(dst1, dst2, src, cond);
779   } else {
780     if ((src.am() == Offset) || (src.am() == NegOffset)) {
781       MemOperand src2(src);
782       src2.set_offset(src2.offset() + 4);
783       if (dst1.is(src.rn())) {
784         ldr(dst2, src2, cond);
785         ldr(dst1, src, cond);
786       } else {
787         ldr(dst1, src, cond);
788         ldr(dst2, src2, cond);
789       }
790     } else {  // PostIndex or NegPostIndex.
791       DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex));
792       if (dst1.is(src.rn())) {
793         ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
794         ldr(dst1, src, cond);
795       } else {
796         MemOperand src2(src);
797         src2.set_offset(src2.offset() - 4);
798         ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
799         ldr(dst2, src2, cond);
800       }
801     }
802   }
803 }
804 
805 
Strd(Register src1,Register src2,const MemOperand & dst,Condition cond)806 void MacroAssembler::Strd(Register src1, Register src2,
807                           const MemOperand& dst, Condition cond) {
808   DCHECK(dst.rm().is(no_reg));
809   DCHECK(!src1.is(lr));  // r14.
810 
811   // V8 does not use this addressing mode, so the fallback code
812   // below doesn't support it yet.
813   DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
814 
815   // Generate two str instructions if strd is not available.
816   if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
817       (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
818     CpuFeatureScope scope(this, ARMv7);
819     strd(src1, src2, dst, cond);
820   } else {
821     MemOperand dst2(dst);
822     if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
823       dst2.set_offset(dst2.offset() + 4);
824       str(src1, dst, cond);
825       str(src2, dst2, cond);
826     } else {  // PostIndex or NegPostIndex.
827       DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
828       dst2.set_offset(dst2.offset() - 4);
829       str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
830       str(src2, dst2, cond);
831     }
832   }
833 }
834 
835 
VFPEnsureFPSCRState(Register scratch)836 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
837   // If needed, restore wanted bits of FPSCR.
838   Label fpscr_done;
839   vmrs(scratch);
840   if (emit_debug_code()) {
841     Label rounding_mode_correct;
842     tst(scratch, Operand(kVFPRoundingModeMask));
843     b(eq, &rounding_mode_correct);
844     // Don't call Assert here, since Runtime_Abort could re-enter here.
845     stop("Default rounding mode not set");
846     bind(&rounding_mode_correct);
847   }
848   tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
849   b(ne, &fpscr_done);
850   orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
851   vmsr(scratch);
852   bind(&fpscr_done);
853 }
854 
855 
VFPCanonicalizeNaN(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)856 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
857                                         const DwVfpRegister src,
858                                         const Condition cond) {
859   vsub(dst, src, kDoubleRegZero, cond);
860 }
861 
862 
VFPCompareAndSetFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)863 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
864                                            const DwVfpRegister src2,
865                                            const Condition cond) {
866   // Compare and move FPSCR flags to the normal condition flags.
867   VFPCompareAndLoadFlags(src1, src2, pc, cond);
868 }
869 
VFPCompareAndSetFlags(const DwVfpRegister src1,const double src2,const Condition cond)870 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
871                                            const double src2,
872                                            const Condition cond) {
873   // Compare and move FPSCR flags to the normal condition flags.
874   VFPCompareAndLoadFlags(src1, src2, pc, cond);
875 }
876 
877 
VFPCompareAndLoadFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Register fpscr_flags,const Condition cond)878 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
879                                             const DwVfpRegister src2,
880                                             const Register fpscr_flags,
881                                             const Condition cond) {
882   // Compare and load FPSCR.
883   vcmp(src1, src2, cond);
884   vmrs(fpscr_flags, cond);
885 }
886 
VFPCompareAndLoadFlags(const DwVfpRegister src1,const double src2,const Register fpscr_flags,const Condition cond)887 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
888                                             const double src2,
889                                             const Register fpscr_flags,
890                                             const Condition cond) {
891   // Compare and load FPSCR.
892   vcmp(src1, src2, cond);
893   vmrs(fpscr_flags, cond);
894 }
895 
Vmov(const DwVfpRegister dst,const double imm,const Register scratch)896 void MacroAssembler::Vmov(const DwVfpRegister dst,
897                           const double imm,
898                           const Register scratch) {
899   static const DoubleRepresentation minus_zero(-0.0);
900   static const DoubleRepresentation zero(0.0);
901   DoubleRepresentation value_rep(imm);
902   // Handle special values first.
903   if (value_rep == zero) {
904     vmov(dst, kDoubleRegZero);
905   } else if (value_rep == minus_zero) {
906     vneg(dst, kDoubleRegZero);
907   } else {
908     vmov(dst, imm, scratch);
909   }
910 }
911 
912 
VmovHigh(Register dst,DwVfpRegister src)913 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
914   if (src.code() < 16) {
915     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
916     vmov(dst, loc.high());
917   } else {
918     vmov(dst, VmovIndexHi, src);
919   }
920 }
921 
922 
VmovHigh(DwVfpRegister dst,Register src)923 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
924   if (dst.code() < 16) {
925     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
926     vmov(loc.high(), src);
927   } else {
928     vmov(dst, VmovIndexHi, src);
929   }
930 }
931 
932 
VmovLow(Register dst,DwVfpRegister src)933 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
934   if (src.code() < 16) {
935     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
936     vmov(dst, loc.low());
937   } else {
938     vmov(dst, VmovIndexLo, src);
939   }
940 }
941 
942 
VmovLow(DwVfpRegister dst,Register src)943 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
944   if (dst.code() < 16) {
945     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
946     vmov(loc.low(), src);
947   } else {
948     vmov(dst, VmovIndexLo, src);
949   }
950 }
951 
952 
LoadConstantPoolPointerRegister()953 void MacroAssembler::LoadConstantPoolPointerRegister() {
954   if (FLAG_enable_ool_constant_pool) {
955     int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
956         pc_offset() - Instruction::kPCReadOffset;
957     DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
958     ldr(pp, MemOperand(pc, constant_pool_offset));
959   }
960 }
961 
962 
StubPrologue()963 void MacroAssembler::StubPrologue() {
964   PushFixedFrame();
965   Push(Smi::FromInt(StackFrame::STUB));
966   // Adjust FP to point to saved FP.
967   add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
968   if (FLAG_enable_ool_constant_pool) {
969     LoadConstantPoolPointerRegister();
970     set_constant_pool_available(true);
971   }
972 }
973 
974 
Prologue(bool code_pre_aging)975 void MacroAssembler::Prologue(bool code_pre_aging) {
976   { PredictableCodeSizeScope predictible_code_size_scope(
977         this, kNoCodeAgeSequenceLength);
978     // The following three instructions must remain together and unmodified
979     // for code aging to work properly.
980     if (code_pre_aging) {
981       // Pre-age the code.
982       Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
983       add(r0, pc, Operand(-8));
984       ldr(pc, MemOperand(pc, -4));
985       emit_code_stub_address(stub);
986     } else {
987       PushFixedFrame(r1);
988       nop(ip.code());
989       // Adjust FP to point to saved FP.
990       add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
991     }
992   }
993   if (FLAG_enable_ool_constant_pool) {
994     LoadConstantPoolPointerRegister();
995     set_constant_pool_available(true);
996   }
997 }
998 
999 
EnterFrame(StackFrame::Type type,bool load_constant_pool)1000 void MacroAssembler::EnterFrame(StackFrame::Type type,
1001                                 bool load_constant_pool) {
1002   // r0-r3: preserved
1003   PushFixedFrame();
1004   if (FLAG_enable_ool_constant_pool && load_constant_pool) {
1005     LoadConstantPoolPointerRegister();
1006   }
1007   mov(ip, Operand(Smi::FromInt(type)));
1008   push(ip);
1009   mov(ip, Operand(CodeObject()));
1010   push(ip);
1011   // Adjust FP to point to saved FP.
1012   add(fp, sp,
1013       Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
1014 }
1015 
1016 
LeaveFrame(StackFrame::Type type)1017 int MacroAssembler::LeaveFrame(StackFrame::Type type) {
1018   // r0: preserved
1019   // r1: preserved
1020   // r2: preserved
1021 
1022   // Drop the execution stack down to the frame pointer and restore
1023   // the caller frame pointer, return address and constant pool pointer
1024   // (if FLAG_enable_ool_constant_pool).
1025   int frame_ends;
1026   if (FLAG_enable_ool_constant_pool) {
1027     add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
1028     frame_ends = pc_offset();
1029     ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
1030   } else {
1031     mov(sp, fp);
1032     frame_ends = pc_offset();
1033     ldm(ia_w, sp, fp.bit() | lr.bit());
1034   }
1035   return frame_ends;
1036 }
1037 
1038 
EnterExitFrame(bool save_doubles,int stack_space)1039 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
1040   // Set up the frame structure on the stack.
1041   DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1042   DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1043   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1044   Push(lr, fp);
1045   mov(fp, Operand(sp));  // Set up new frame pointer.
1046   // Reserve room for saved entry sp and code object.
1047   sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
1048   if (emit_debug_code()) {
1049     mov(ip, Operand::Zero());
1050     str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1051   }
1052   if (FLAG_enable_ool_constant_pool) {
1053     str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1054   }
1055   mov(ip, Operand(CodeObject()));
1056   str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1057 
1058   // Save the frame pointer and the context in top.
1059   mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1060   str(fp, MemOperand(ip));
1061   mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1062   str(cp, MemOperand(ip));
1063 
1064   // Optionally save all double registers.
1065   if (save_doubles) {
1066     SaveFPRegs(sp, ip);
1067     // Note that d0 will be accessible at
1068     //   fp - ExitFrameConstants::kFrameSize -
1069     //   DwVfpRegister::kMaxNumRegisters * kDoubleSize,
1070     // since the sp slot, code slot and constant pool slot (if
1071     // FLAG_enable_ool_constant_pool) were pushed after the fp.
1072   }
1073 
1074   // Reserve place for the return address and stack space and align the frame
1075   // preparing for calling the runtime function.
1076   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1077   sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1078   if (frame_alignment > 0) {
1079     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1080     and_(sp, sp, Operand(-frame_alignment));
1081   }
1082 
1083   // Set the exit frame sp value to point just before the return address
1084   // location.
1085   add(ip, sp, Operand(kPointerSize));
1086   str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1087 }
1088 
1089 
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)1090 void MacroAssembler::InitializeNewString(Register string,
1091                                          Register length,
1092                                          Heap::RootListIndex map_index,
1093                                          Register scratch1,
1094                                          Register scratch2) {
1095   SmiTag(scratch1, length);
1096   LoadRoot(scratch2, map_index);
1097   str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1098   mov(scratch1, Operand(String::kEmptyHashField));
1099   str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1100   str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
1101 }
1102 
1103 
ActivationFrameAlignment()1104 int MacroAssembler::ActivationFrameAlignment() {
1105 #if V8_HOST_ARCH_ARM
1106   // Running on the real platform. Use the alignment as mandated by the local
1107   // environment.
1108   // Note: This will break if we ever start generating snapshots on one ARM
1109   // platform for another ARM platform with a different alignment.
1110   return base::OS::ActivationFrameAlignment();
1111 #else  // V8_HOST_ARCH_ARM
1112   // If we are using the simulator then we should always align to the expected
1113   // alignment. As the simulator is used to generate snapshots we do not know
1114   // if the target platform will need alignment, so this is controlled from a
1115   // flag.
1116   return FLAG_sim_stack_alignment;
1117 #endif  // V8_HOST_ARCH_ARM
1118 }
1119 
1120 
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context)1121 void MacroAssembler::LeaveExitFrame(bool save_doubles,
1122                                     Register argument_count,
1123                                     bool restore_context) {
1124   ConstantPoolUnavailableScope constant_pool_unavailable(this);
1125 
1126   // Optionally restore all double registers.
1127   if (save_doubles) {
1128     // Calculate the stack location of the saved doubles and restore them.
1129     const int offset = ExitFrameConstants::kFrameSize;
1130     sub(r3, fp,
1131         Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1132     RestoreFPRegs(r3, ip);
1133   }
1134 
1135   // Clear top frame.
1136   mov(r3, Operand::Zero());
1137   mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1138   str(r3, MemOperand(ip));
1139 
1140   // Restore current context from top and clear it in debug mode.
1141   if (restore_context) {
1142     mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1143     ldr(cp, MemOperand(ip));
1144   }
1145 #ifdef DEBUG
1146   mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1147   str(r3, MemOperand(ip));
1148 #endif
1149 
1150   // Tear down the exit frame, pop the arguments, and return.
1151   if (FLAG_enable_ool_constant_pool) {
1152     ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1153   }
1154   mov(sp, Operand(fp));
1155   ldm(ia_w, sp, fp.bit() | lr.bit());
1156   if (argument_count.is_valid()) {
1157     add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1158   }
1159 }
1160 
1161 
MovFromFloatResult(const DwVfpRegister dst)1162 void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1163   if (use_eabi_hardfloat()) {
1164     Move(dst, d0);
1165   } else {
1166     vmov(dst, r0, r1);
1167   }
1168 }
1169 
1170 
1171 // On ARM this is just a synonym to make the purpose clear.
MovFromFloatParameter(DwVfpRegister dst)1172 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1173   MovFromFloatResult(dst);
1174 }
1175 
1176 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Handle<Code> code_constant,Register code_reg,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)1177 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1178                                     const ParameterCount& actual,
1179                                     Handle<Code> code_constant,
1180                                     Register code_reg,
1181                                     Label* done,
1182                                     bool* definitely_mismatches,
1183                                     InvokeFlag flag,
1184                                     const CallWrapper& call_wrapper) {
1185   bool definitely_matches = false;
1186   *definitely_mismatches = false;
1187   Label regular_invoke;
1188 
1189   // Check whether the expected and actual arguments count match. If not,
1190   // setup registers according to contract with ArgumentsAdaptorTrampoline:
1191   //  r0: actual arguments count
1192   //  r1: function (passed through to callee)
1193   //  r2: expected arguments count
1194 
1195   // The code below is made a lot easier because the calling code already sets
1196   // up actual and expected registers according to the contract if values are
1197   // passed in registers.
1198   DCHECK(actual.is_immediate() || actual.reg().is(r0));
1199   DCHECK(expected.is_immediate() || expected.reg().is(r2));
1200   DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
1201 
1202   if (expected.is_immediate()) {
1203     DCHECK(actual.is_immediate());
1204     if (expected.immediate() == actual.immediate()) {
1205       definitely_matches = true;
1206     } else {
1207       mov(r0, Operand(actual.immediate()));
1208       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1209       if (expected.immediate() == sentinel) {
1210         // Don't worry about adapting arguments for builtins that
1211         // don't want that done. Skip adaption code by making it look
1212         // like we have a match between expected and actual number of
1213         // arguments.
1214         definitely_matches = true;
1215       } else {
1216         *definitely_mismatches = true;
1217         mov(r2, Operand(expected.immediate()));
1218       }
1219     }
1220   } else {
1221     if (actual.is_immediate()) {
1222       cmp(expected.reg(), Operand(actual.immediate()));
1223       b(eq, &regular_invoke);
1224       mov(r0, Operand(actual.immediate()));
1225     } else {
1226       cmp(expected.reg(), Operand(actual.reg()));
1227       b(eq, &regular_invoke);
1228     }
1229   }
1230 
1231   if (!definitely_matches) {
1232     if (!code_constant.is_null()) {
1233       mov(r3, Operand(code_constant));
1234       add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
1235     }
1236 
1237     Handle<Code> adaptor =
1238         isolate()->builtins()->ArgumentsAdaptorTrampoline();
1239     if (flag == CALL_FUNCTION) {
1240       call_wrapper.BeforeCall(CallSize(adaptor));
1241       Call(adaptor);
1242       call_wrapper.AfterCall();
1243       if (!*definitely_mismatches) {
1244         b(done);
1245       }
1246     } else {
1247       Jump(adaptor, RelocInfo::CODE_TARGET);
1248     }
1249     bind(&regular_invoke);
1250   }
1251 }
1252 
1253 
InvokeCode(Register code,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1254 void MacroAssembler::InvokeCode(Register code,
1255                                 const ParameterCount& expected,
1256                                 const ParameterCount& actual,
1257                                 InvokeFlag flag,
1258                                 const CallWrapper& call_wrapper) {
1259   // You can't call a function without a valid frame.
1260   DCHECK(flag == JUMP_FUNCTION || has_frame());
1261 
1262   Label done;
1263   bool definitely_mismatches = false;
1264   InvokePrologue(expected, actual, Handle<Code>::null(), code,
1265                  &done, &definitely_mismatches, flag,
1266                  call_wrapper);
1267   if (!definitely_mismatches) {
1268     if (flag == CALL_FUNCTION) {
1269       call_wrapper.BeforeCall(CallSize(code));
1270       Call(code);
1271       call_wrapper.AfterCall();
1272     } else {
1273       DCHECK(flag == JUMP_FUNCTION);
1274       Jump(code);
1275     }
1276 
1277     // Continue here if InvokePrologue does handle the invocation due to
1278     // mismatched parameter counts.
1279     bind(&done);
1280   }
1281 }
1282 
1283 
InvokeFunction(Register fun,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1284 void MacroAssembler::InvokeFunction(Register fun,
1285                                     const ParameterCount& actual,
1286                                     InvokeFlag flag,
1287                                     const CallWrapper& call_wrapper) {
1288   // You can't call a function without a valid frame.
1289   DCHECK(flag == JUMP_FUNCTION || has_frame());
1290 
1291   // Contract with called JS functions requires that function is passed in r1.
1292   DCHECK(fun.is(r1));
1293 
1294   Register expected_reg = r2;
1295   Register code_reg = r3;
1296 
1297   ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1298   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1299   ldr(expected_reg,
1300       FieldMemOperand(code_reg,
1301                       SharedFunctionInfo::kFormalParameterCountOffset));
1302   SmiUntag(expected_reg);
1303   ldr(code_reg,
1304       FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1305 
1306   ParameterCount expected(expected_reg);
1307   InvokeCode(code_reg, expected, actual, flag, call_wrapper);
1308 }
1309 
1310 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1311 void MacroAssembler::InvokeFunction(Register function,
1312                                     const ParameterCount& expected,
1313                                     const ParameterCount& actual,
1314                                     InvokeFlag flag,
1315                                     const CallWrapper& call_wrapper) {
1316   // You can't call a function without a valid frame.
1317   DCHECK(flag == JUMP_FUNCTION || has_frame());
1318 
1319   // Contract with called JS functions requires that function is passed in r1.
1320   DCHECK(function.is(r1));
1321 
1322   // Get the function and setup the context.
1323   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1324 
1325   // We call indirectly through the code field in the function to
1326   // allow recompilation to take effect without changing any of the
1327   // call sites.
1328   ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1329   InvokeCode(r3, expected, actual, flag, call_wrapper);
1330 }
1331 
1332 
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1333 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1334                                     const ParameterCount& expected,
1335                                     const ParameterCount& actual,
1336                                     InvokeFlag flag,
1337                                     const CallWrapper& call_wrapper) {
1338   Move(r1, function);
1339   InvokeFunction(r1, expected, actual, flag, call_wrapper);
1340 }
1341 
1342 
IsObjectJSObjectType(Register heap_object,Register map,Register scratch,Label * fail)1343 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1344                                           Register map,
1345                                           Register scratch,
1346                                           Label* fail) {
1347   ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1348   IsInstanceJSObjectType(map, scratch, fail);
1349 }
1350 
1351 
IsInstanceJSObjectType(Register map,Register scratch,Label * fail)1352 void MacroAssembler::IsInstanceJSObjectType(Register map,
1353                                             Register scratch,
1354                                             Label* fail) {
1355   ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1356   cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1357   b(lt, fail);
1358   cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1359   b(gt, fail);
1360 }
1361 
1362 
IsObjectJSStringType(Register object,Register scratch,Label * fail)1363 void MacroAssembler::IsObjectJSStringType(Register object,
1364                                           Register scratch,
1365                                           Label* fail) {
1366   DCHECK(kNotStringTag != 0);
1367 
1368   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1369   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1370   tst(scratch, Operand(kIsNotStringMask));
1371   b(ne, fail);
1372 }
1373 
1374 
IsObjectNameType(Register object,Register scratch,Label * fail)1375 void MacroAssembler::IsObjectNameType(Register object,
1376                                       Register scratch,
1377                                       Label* fail) {
1378   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1379   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1380   cmp(scratch, Operand(LAST_NAME_TYPE));
1381   b(hi, fail);
1382 }
1383 
1384 
DebugBreak()1385 void MacroAssembler::DebugBreak() {
1386   mov(r0, Operand::Zero());
1387   mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1388   CEntryStub ces(isolate(), 1);
1389   DCHECK(AllowThisStubCall(&ces));
1390   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1391 }
1392 
1393 
PushTryHandler(StackHandler::Kind kind,int handler_index)1394 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1395                                     int handler_index) {
1396   // Adjust this code if not the case.
1397   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1398   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1399   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1400   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1401   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1402   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1403 
1404   // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
1405   // We will build up the handler from the bottom by pushing on the stack.
1406   // Set up the code object (r5) and the state (r6) for pushing.
1407   unsigned state =
1408       StackHandler::IndexField::encode(handler_index) |
1409       StackHandler::KindField::encode(kind);
1410   mov(r5, Operand(CodeObject()));
1411   mov(r6, Operand(state));
1412 
1413   // Push the frame pointer, context, state, and code object.
1414   if (kind == StackHandler::JS_ENTRY) {
1415     mov(cp, Operand(Smi::FromInt(0)));  // Indicates no context.
1416     mov(ip, Operand::Zero());  // NULL frame pointer.
1417     stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
1418   } else {
1419     stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1420   }
1421 
1422   // Link the current handler as the next handler.
1423   mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1424   ldr(r5, MemOperand(r6));
1425   push(r5);
1426   // Set this new handler as the current one.
1427   str(sp, MemOperand(r6));
1428 }
1429 
1430 
PopTryHandler()1431 void MacroAssembler::PopTryHandler() {
1432   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1433   pop(r1);
1434   mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1435   add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1436   str(r1, MemOperand(ip));
1437 }
1438 
1439 
JumpToHandlerEntry()1440 void MacroAssembler::JumpToHandlerEntry() {
1441   // Compute the handler entry address and jump to it.  The handler table is
1442   // a fixed array of (smi-tagged) code offsets.
1443   // r0 = exception, r1 = code object, r2 = state.
1444 
1445   ConstantPoolUnavailableScope constant_pool_unavailable(this);
1446   if (FLAG_enable_ool_constant_pool) {
1447     ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset));  // Constant pool.
1448   }
1449   ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset));  // Handler table.
1450   add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1451   mov(r2, Operand(r2, LSR, StackHandler::kKindWidth));  // Handler index.
1452   ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));  // Smi-tagged offset.
1453   add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
1454   add(pc, r1, Operand::SmiUntag(r2));  // Jump
1455 }
1456 
1457 
Throw(Register value)1458 void MacroAssembler::Throw(Register value) {
1459   // Adjust this code if not the case.
1460   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1461   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1462   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1463   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1464   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1465   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1466 
1467   // The exception is expected in r0.
1468   if (!value.is(r0)) {
1469     mov(r0, value);
1470   }
1471   // Drop the stack pointer to the top of the top handler.
1472   mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1473   ldr(sp, MemOperand(r3));
1474   // Restore the next handler.
1475   pop(r2);
1476   str(r2, MemOperand(r3));
1477 
1478   // Get the code object (r1) and state (r2).  Restore the context and frame
1479   // pointer.
1480   ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1481 
1482   // If the handler is a JS frame, restore the context to the frame.
1483   // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1484   // or cp.
1485   tst(cp, cp);
1486   str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1487 
1488   JumpToHandlerEntry();
1489 }
1490 
1491 
ThrowUncatchable(Register value)1492 void MacroAssembler::ThrowUncatchable(Register value) {
1493   // Adjust this code if not the case.
1494   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1495   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1496   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1497   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1498   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1499   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1500 
1501   // The exception is expected in r0.
1502   if (!value.is(r0)) {
1503     mov(r0, value);
1504   }
1505   // Drop the stack pointer to the top of the top stack handler.
1506   mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1507   ldr(sp, MemOperand(r3));
1508 
1509   // Unwind the handlers until the ENTRY handler is found.
1510   Label fetch_next, check_kind;
1511   jmp(&check_kind);
1512   bind(&fetch_next);
1513   ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1514 
1515   bind(&check_kind);
1516   STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1517   ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
1518   tst(r2, Operand(StackHandler::KindField::kMask));
1519   b(ne, &fetch_next);
1520 
1521   // Set the top handler address to next handler past the top ENTRY handler.
1522   pop(r2);
1523   str(r2, MemOperand(r3));
1524   // Get the code object (r1) and state (r2).  Clear the context and frame
1525   // pointer (0 was saved in the handler).
1526   ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1527 
1528   JumpToHandlerEntry();
1529 }
1530 
1531 
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)1532 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1533                                             Register scratch,
1534                                             Label* miss) {
1535   Label same_contexts;
1536 
1537   DCHECK(!holder_reg.is(scratch));
1538   DCHECK(!holder_reg.is(ip));
1539   DCHECK(!scratch.is(ip));
1540 
1541   // Load current lexical context from the stack frame.
1542   ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1543   // In debug mode, make sure the lexical context is set.
1544 #ifdef DEBUG
1545   cmp(scratch, Operand::Zero());
1546   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1547 #endif
1548 
1549   // Load the native context of the current context.
1550   int offset =
1551       Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1552   ldr(scratch, FieldMemOperand(scratch, offset));
1553   ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1554 
1555   // Check the context is a native context.
1556   if (emit_debug_code()) {
1557     // Cannot use ip as a temporary in this verification code. Due to the fact
1558     // that ip is clobbered as part of cmp with an object Operand.
1559     push(holder_reg);  // Temporarily save holder on the stack.
1560     // Read the first word and compare to the native_context_map.
1561     ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1562     LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1563     cmp(holder_reg, ip);
1564     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1565     pop(holder_reg);  // Restore holder.
1566   }
1567 
1568   // Check if both contexts are the same.
1569   ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1570   cmp(scratch, Operand(ip));
1571   b(eq, &same_contexts);
1572 
1573   // Check the context is a native context.
1574   if (emit_debug_code()) {
1575     // Cannot use ip as a temporary in this verification code. Due to the fact
1576     // that ip is clobbered as part of cmp with an object Operand.
1577     push(holder_reg);  // Temporarily save holder on the stack.
1578     mov(holder_reg, ip);  // Move ip to its holding place.
1579     LoadRoot(ip, Heap::kNullValueRootIndex);
1580     cmp(holder_reg, ip);
1581     Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1582 
1583     ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1584     LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1585     cmp(holder_reg, ip);
1586     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1587     // Restore ip is not needed. ip is reloaded below.
1588     pop(holder_reg);  // Restore holder.
1589     // Restore ip to holder's context.
1590     ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1591   }
1592 
1593   // Check that the security token in the calling global object is
1594   // compatible with the security token in the receiving global
1595   // object.
1596   int token_offset = Context::kHeaderSize +
1597                      Context::SECURITY_TOKEN_INDEX * kPointerSize;
1598 
1599   ldr(scratch, FieldMemOperand(scratch, token_offset));
1600   ldr(ip, FieldMemOperand(ip, token_offset));
1601   cmp(scratch, Operand(ip));
1602   b(ne, miss);
1603 
1604   bind(&same_contexts);
1605 }
1606 
1607 
1608 // Compute the hash code from the untagged key.  This must be kept in sync with
1609 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1610 // code-stub-hydrogen.cc
GetNumberHash(Register t0,Register scratch)1611 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1612   // First of all we assign the hash seed to scratch.
1613   LoadRoot(scratch, Heap::kHashSeedRootIndex);
1614   SmiUntag(scratch);
1615 
1616   // Xor original key with a seed.
1617   eor(t0, t0, Operand(scratch));
1618 
1619   // Compute the hash code from the untagged key.  This must be kept in sync
1620   // with ComputeIntegerHash in utils.h.
1621   //
1622   // hash = ~hash + (hash << 15);
1623   mvn(scratch, Operand(t0));
1624   add(t0, scratch, Operand(t0, LSL, 15));
1625   // hash = hash ^ (hash >> 12);
1626   eor(t0, t0, Operand(t0, LSR, 12));
1627   // hash = hash + (hash << 2);
1628   add(t0, t0, Operand(t0, LSL, 2));
1629   // hash = hash ^ (hash >> 4);
1630   eor(t0, t0, Operand(t0, LSR, 4));
1631   // hash = hash * 2057;
1632   mov(scratch, Operand(t0, LSL, 11));
1633   add(t0, t0, Operand(t0, LSL, 3));
1634   add(t0, t0, scratch);
1635   // hash = hash ^ (hash >> 16);
1636   eor(t0, t0, Operand(t0, LSR, 16));
1637 }
1638 
1639 
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register result,Register t0,Register t1,Register t2)1640 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1641                                               Register elements,
1642                                               Register key,
1643                                               Register result,
1644                                               Register t0,
1645                                               Register t1,
1646                                               Register t2) {
1647   // Register use:
1648   //
1649   // elements - holds the slow-case elements of the receiver on entry.
1650   //            Unchanged unless 'result' is the same register.
1651   //
1652   // key      - holds the smi key on entry.
1653   //            Unchanged unless 'result' is the same register.
1654   //
1655   // result   - holds the result on exit if the load succeeded.
1656   //            Allowed to be the same as 'key' or 'result'.
1657   //            Unchanged on bailout so 'key' or 'result' can be used
1658   //            in further computation.
1659   //
1660   // Scratch registers:
1661   //
1662   // t0 - holds the untagged key on entry and holds the hash once computed.
1663   //
1664   // t1 - used to hold the capacity mask of the dictionary
1665   //
1666   // t2 - used for the index into the dictionary.
1667   Label done;
1668 
1669   GetNumberHash(t0, t1);
1670 
1671   // Compute the capacity mask.
1672   ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1673   SmiUntag(t1);
1674   sub(t1, t1, Operand(1));
1675 
1676   // Generate an unrolled loop that performs a few probes before giving up.
1677   for (int i = 0; i < kNumberDictionaryProbes; i++) {
1678     // Use t2 for index calculations and keep the hash intact in t0.
1679     mov(t2, t0);
1680     // Compute the masked index: (hash + i + i * i) & mask.
1681     if (i > 0) {
1682       add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1683     }
1684     and_(t2, t2, Operand(t1));
1685 
1686     // Scale the index by multiplying by the element size.
1687     DCHECK(SeededNumberDictionary::kEntrySize == 3);
1688     add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
1689 
1690     // Check if the key is identical to the name.
1691     add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1692     ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1693     cmp(key, Operand(ip));
1694     if (i != kNumberDictionaryProbes - 1) {
1695       b(eq, &done);
1696     } else {
1697       b(ne, miss);
1698     }
1699   }
1700 
1701   bind(&done);
1702   // Check that the value is a normal property.
1703   // t2: elements + (index * kPointerSize)
1704   const int kDetailsOffset =
1705       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1706   ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1707   tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1708   b(ne, miss);
1709 
1710   // Get the value at the masked, scaled index and return.
1711   const int kValueOffset =
1712       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1713   ldr(result, FieldMemOperand(t2, kValueOffset));
1714 }
1715 
1716 
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1717 void MacroAssembler::Allocate(int object_size,
1718                               Register result,
1719                               Register scratch1,
1720                               Register scratch2,
1721                               Label* gc_required,
1722                               AllocationFlags flags) {
1723   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1724   if (!FLAG_inline_new) {
1725     if (emit_debug_code()) {
1726       // Trash the registers to simulate an allocation failure.
1727       mov(result, Operand(0x7091));
1728       mov(scratch1, Operand(0x7191));
1729       mov(scratch2, Operand(0x7291));
1730     }
1731     jmp(gc_required);
1732     return;
1733   }
1734 
1735   DCHECK(!result.is(scratch1));
1736   DCHECK(!result.is(scratch2));
1737   DCHECK(!scratch1.is(scratch2));
1738   DCHECK(!scratch1.is(ip));
1739   DCHECK(!scratch2.is(ip));
1740 
1741   // Make object size into bytes.
1742   if ((flags & SIZE_IN_WORDS) != 0) {
1743     object_size *= kPointerSize;
1744   }
1745   DCHECK_EQ(0, object_size & kObjectAlignmentMask);
1746 
1747   // Check relative positions of allocation top and limit addresses.
1748   // The values must be adjacent in memory to allow the use of LDM.
1749   // Also, assert that the registers are numbered such that the values
1750   // are loaded in the correct order.
1751   ExternalReference allocation_top =
1752       AllocationUtils::GetAllocationTopReference(isolate(), flags);
1753   ExternalReference allocation_limit =
1754       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1755 
1756   intptr_t top   =
1757       reinterpret_cast<intptr_t>(allocation_top.address());
1758   intptr_t limit =
1759       reinterpret_cast<intptr_t>(allocation_limit.address());
1760   DCHECK((limit - top) == kPointerSize);
1761   DCHECK(result.code() < ip.code());
1762 
1763   // Set up allocation top address register.
1764   Register topaddr = scratch1;
1765   mov(topaddr, Operand(allocation_top));
1766 
1767   // This code stores a temporary value in ip. This is OK, as the code below
1768   // does not need ip for implicit literal generation.
1769   if ((flags & RESULT_CONTAINS_TOP) == 0) {
1770     // Load allocation top into result and allocation limit into ip.
1771     ldm(ia, topaddr, result.bit() | ip.bit());
1772   } else {
1773     if (emit_debug_code()) {
1774       // Assert that result actually contains top on entry. ip is used
1775       // immediately below so this use of ip does not cause difference with
1776       // respect to register content between debug and release mode.
1777       ldr(ip, MemOperand(topaddr));
1778       cmp(result, ip);
1779       Check(eq, kUnexpectedAllocationTop);
1780     }
1781     // Load allocation limit into ip. Result already contains allocation top.
1782     ldr(ip, MemOperand(topaddr, limit - top));
1783   }
1784 
1785   if ((flags & DOUBLE_ALIGNMENT) != 0) {
1786     // Align the next allocation. Storing the filler map without checking top is
1787     // safe in new-space because the limit of the heap is aligned there.
1788     DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1789     STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1790     and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1791     Label aligned;
1792     b(eq, &aligned);
1793     if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1794       cmp(result, Operand(ip));
1795       b(hs, gc_required);
1796     }
1797     mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1798     str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1799     bind(&aligned);
1800   }
1801 
1802   // Calculate new top and bail out if new space is exhausted. Use result
1803   // to calculate the new top. We must preserve the ip register at this
1804   // point, so we cannot just use add().
1805   DCHECK(object_size > 0);
1806   Register source = result;
1807   Condition cond = al;
1808   int shift = 0;
1809   while (object_size != 0) {
1810     if (((object_size >> shift) & 0x03) == 0) {
1811       shift += 2;
1812     } else {
1813       int bits = object_size & (0xff << shift);
1814       object_size -= bits;
1815       shift += 8;
1816       Operand bits_operand(bits);
1817       DCHECK(bits_operand.instructions_required(this) == 1);
1818       add(scratch2, source, bits_operand, SetCC, cond);
1819       source = scratch2;
1820       cond = cc;
1821     }
1822   }
1823   b(cs, gc_required);
1824   cmp(scratch2, Operand(ip));
1825   b(hi, gc_required);
1826   str(scratch2, MemOperand(topaddr));
1827 
1828   // Tag object if requested.
1829   if ((flags & TAG_OBJECT) != 0) {
1830     add(result, result, Operand(kHeapObjectTag));
1831   }
1832 }
1833 
1834 
Allocate(Register object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1835 void MacroAssembler::Allocate(Register object_size,
1836                               Register result,
1837                               Register scratch1,
1838                               Register scratch2,
1839                               Label* gc_required,
1840                               AllocationFlags flags) {
1841   if (!FLAG_inline_new) {
1842     if (emit_debug_code()) {
1843       // Trash the registers to simulate an allocation failure.
1844       mov(result, Operand(0x7091));
1845       mov(scratch1, Operand(0x7191));
1846       mov(scratch2, Operand(0x7291));
1847     }
1848     jmp(gc_required);
1849     return;
1850   }
1851 
1852   // Assert that the register arguments are different and that none of
1853   // them are ip. ip is used explicitly in the code generated below.
1854   DCHECK(!result.is(scratch1));
1855   DCHECK(!result.is(scratch2));
1856   DCHECK(!scratch1.is(scratch2));
1857   DCHECK(!object_size.is(ip));
1858   DCHECK(!result.is(ip));
1859   DCHECK(!scratch1.is(ip));
1860   DCHECK(!scratch2.is(ip));
1861 
1862   // Check relative positions of allocation top and limit addresses.
1863   // The values must be adjacent in memory to allow the use of LDM.
1864   // Also, assert that the registers are numbered such that the values
1865   // are loaded in the correct order.
1866   ExternalReference allocation_top =
1867       AllocationUtils::GetAllocationTopReference(isolate(), flags);
1868   ExternalReference allocation_limit =
1869       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1870   intptr_t top =
1871       reinterpret_cast<intptr_t>(allocation_top.address());
1872   intptr_t limit =
1873       reinterpret_cast<intptr_t>(allocation_limit.address());
1874   DCHECK((limit - top) == kPointerSize);
1875   DCHECK(result.code() < ip.code());
1876 
1877   // Set up allocation top address.
1878   Register topaddr = scratch1;
1879   mov(topaddr, Operand(allocation_top));
1880 
1881   // This code stores a temporary value in ip. This is OK, as the code below
1882   // does not need ip for implicit literal generation.
1883   if ((flags & RESULT_CONTAINS_TOP) == 0) {
1884     // Load allocation top into result and allocation limit into ip.
1885     ldm(ia, topaddr, result.bit() | ip.bit());
1886   } else {
1887     if (emit_debug_code()) {
1888       // Assert that result actually contains top on entry. ip is used
1889       // immediately below so this use of ip does not cause difference with
1890       // respect to register content between debug and release mode.
1891       ldr(ip, MemOperand(topaddr));
1892       cmp(result, ip);
1893       Check(eq, kUnexpectedAllocationTop);
1894     }
1895     // Load allocation limit into ip. Result already contains allocation top.
1896     ldr(ip, MemOperand(topaddr, limit - top));
1897   }
1898 
1899   if ((flags & DOUBLE_ALIGNMENT) != 0) {
1900     // Align the next allocation. Storing the filler map without checking top is
1901     // safe in new-space because the limit of the heap is aligned there.
1902     DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1903     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1904     and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1905     Label aligned;
1906     b(eq, &aligned);
1907     if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1908       cmp(result, Operand(ip));
1909       b(hs, gc_required);
1910     }
1911     mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1912     str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1913     bind(&aligned);
1914   }
1915 
1916   // Calculate new top and bail out if new space is exhausted. Use result
1917   // to calculate the new top. Object size may be in words so a shift is
1918   // required to get the number of bytes.
1919   if ((flags & SIZE_IN_WORDS) != 0) {
1920     add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1921   } else {
1922     add(scratch2, result, Operand(object_size), SetCC);
1923   }
1924   b(cs, gc_required);
1925   cmp(scratch2, Operand(ip));
1926   b(hi, gc_required);
1927 
1928   // Update allocation top. result temporarily holds the new top.
1929   if (emit_debug_code()) {
1930     tst(scratch2, Operand(kObjectAlignmentMask));
1931     Check(eq, kUnalignedAllocationInNewSpace);
1932   }
1933   str(scratch2, MemOperand(topaddr));
1934 
1935   // Tag object if requested.
1936   if ((flags & TAG_OBJECT) != 0) {
1937     add(result, result, Operand(kHeapObjectTag));
1938   }
1939 }
1940 
1941 
UndoAllocationInNewSpace(Register object,Register scratch)1942 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1943                                               Register scratch) {
1944   ExternalReference new_space_allocation_top =
1945       ExternalReference::new_space_allocation_top_address(isolate());
1946 
1947   // Make sure the object has no tag before resetting top.
1948   and_(object, object, Operand(~kHeapObjectTagMask));
1949 #ifdef DEBUG
1950   // Check that the object un-allocated is below the current top.
1951   mov(scratch, Operand(new_space_allocation_top));
1952   ldr(scratch, MemOperand(scratch));
1953   cmp(object, scratch);
1954   Check(lt, kUndoAllocationOfNonAllocatedMemory);
1955 #endif
1956   // Write the address of the object to un-allocate as the current top.
1957   mov(scratch, Operand(new_space_allocation_top));
1958   str(object, MemOperand(scratch));
1959 }
1960 
1961 
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)1962 void MacroAssembler::AllocateTwoByteString(Register result,
1963                                            Register length,
1964                                            Register scratch1,
1965                                            Register scratch2,
1966                                            Register scratch3,
1967                                            Label* gc_required) {
1968   // Calculate the number of bytes needed for the characters in the string while
1969   // observing object alignment.
1970   DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1971   mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
1972   add(scratch1, scratch1,
1973       Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1974   and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1975 
1976   // Allocate two-byte string in new space.
1977   Allocate(scratch1,
1978            result,
1979            scratch2,
1980            scratch3,
1981            gc_required,
1982            TAG_OBJECT);
1983 
1984   // Set the map, length and hash field.
1985   InitializeNewString(result,
1986                       length,
1987                       Heap::kStringMapRootIndex,
1988                       scratch1,
1989                       scratch2);
1990 }
1991 
1992 
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)1993 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1994                                            Register scratch1, Register scratch2,
1995                                            Register scratch3,
1996                                            Label* gc_required) {
1997   // Calculate the number of bytes needed for the characters in the string while
1998   // observing object alignment.
1999   DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2000   DCHECK(kCharSize == 1);
2001   add(scratch1, length,
2002       Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
2003   and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2004 
2005   // Allocate one-byte string in new space.
2006   Allocate(scratch1,
2007            result,
2008            scratch2,
2009            scratch3,
2010            gc_required,
2011            TAG_OBJECT);
2012 
2013   // Set the map, length and hash field.
2014   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
2015                       scratch1, scratch2);
2016 }
2017 
2018 
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2019 void MacroAssembler::AllocateTwoByteConsString(Register result,
2020                                                Register length,
2021                                                Register scratch1,
2022                                                Register scratch2,
2023                                                Label* gc_required) {
2024   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
2025            TAG_OBJECT);
2026 
2027   InitializeNewString(result,
2028                       length,
2029                       Heap::kConsStringMapRootIndex,
2030                       scratch1,
2031                       scratch2);
2032 }
2033 
2034 
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2035 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
2036                                                Register scratch1,
2037                                                Register scratch2,
2038                                                Label* gc_required) {
2039   Allocate(ConsString::kSize,
2040            result,
2041            scratch1,
2042            scratch2,
2043            gc_required,
2044            TAG_OBJECT);
2045 
2046   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
2047                       scratch1, scratch2);
2048 }
2049 
2050 
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2051 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2052                                                  Register length,
2053                                                  Register scratch1,
2054                                                  Register scratch2,
2055                                                  Label* gc_required) {
2056   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2057            TAG_OBJECT);
2058 
2059   InitializeNewString(result,
2060                       length,
2061                       Heap::kSlicedStringMapRootIndex,
2062                       scratch1,
2063                       scratch2);
2064 }
2065 
2066 
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2067 void MacroAssembler::AllocateOneByteSlicedString(Register result,
2068                                                  Register length,
2069                                                  Register scratch1,
2070                                                  Register scratch2,
2071                                                  Label* gc_required) {
2072   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2073            TAG_OBJECT);
2074 
2075   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
2076                       scratch1, scratch2);
2077 }
2078 
2079 
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)2080 void MacroAssembler::CompareObjectType(Register object,
2081                                        Register map,
2082                                        Register type_reg,
2083                                        InstanceType type) {
2084   const Register temp = type_reg.is(no_reg) ? ip : type_reg;
2085 
2086   ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2087   CompareInstanceType(map, temp, type);
2088 }
2089 
2090 
CheckObjectTypeRange(Register object,Register map,InstanceType min_type,InstanceType max_type,Label * false_label)2091 void MacroAssembler::CheckObjectTypeRange(Register object,
2092                                           Register map,
2093                                           InstanceType min_type,
2094                                           InstanceType max_type,
2095                                           Label* false_label) {
2096   STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2097   STATIC_ASSERT(LAST_TYPE < 256);
2098   ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2099   ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
2100   sub(ip, ip, Operand(min_type));
2101   cmp(ip, Operand(max_type - min_type));
2102   b(hi, false_label);
2103 }
2104 
2105 
CompareInstanceType(Register map,Register type_reg,InstanceType type)2106 void MacroAssembler::CompareInstanceType(Register map,
2107                                          Register type_reg,
2108                                          InstanceType type) {
2109   // Registers map and type_reg can be ip. These two lines assert
2110   // that ip can be used with the two instructions (the constants
2111   // will never need ip).
2112   STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2113   STATIC_ASSERT(LAST_TYPE < 256);
2114   ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2115   cmp(type_reg, Operand(type));
2116 }
2117 
2118 
CompareRoot(Register obj,Heap::RootListIndex index)2119 void MacroAssembler::CompareRoot(Register obj,
2120                                  Heap::RootListIndex index) {
2121   DCHECK(!obj.is(ip));
2122   LoadRoot(ip, index);
2123   cmp(obj, ip);
2124 }
2125 
2126 
CheckFastElements(Register map,Register scratch,Label * fail)2127 void MacroAssembler::CheckFastElements(Register map,
2128                                        Register scratch,
2129                                        Label* fail) {
2130   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2131   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2132   STATIC_ASSERT(FAST_ELEMENTS == 2);
2133   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2134   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2135   cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2136   b(hi, fail);
2137 }
2138 
2139 
CheckFastObjectElements(Register map,Register scratch,Label * fail)2140 void MacroAssembler::CheckFastObjectElements(Register map,
2141                                              Register scratch,
2142                                              Label* fail) {
2143   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2144   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2145   STATIC_ASSERT(FAST_ELEMENTS == 2);
2146   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2147   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2148   cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2149   b(ls, fail);
2150   cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2151   b(hi, fail);
2152 }
2153 
2154 
CheckFastSmiElements(Register map,Register scratch,Label * fail)2155 void MacroAssembler::CheckFastSmiElements(Register map,
2156                                           Register scratch,
2157                                           Label* fail) {
2158   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2159   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2160   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2161   cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2162   b(hi, fail);
2163 }
2164 
2165 
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,LowDwVfpRegister double_scratch,Label * fail,int elements_offset)2166 void MacroAssembler::StoreNumberToDoubleElements(
2167                                       Register value_reg,
2168                                       Register key_reg,
2169                                       Register elements_reg,
2170                                       Register scratch1,
2171                                       LowDwVfpRegister double_scratch,
2172                                       Label* fail,
2173                                       int elements_offset) {
2174   Label smi_value, store;
2175 
2176   // Handle smi values specially.
2177   JumpIfSmi(value_reg, &smi_value);
2178 
2179   // Ensure that the object is a heap number
2180   CheckMap(value_reg,
2181            scratch1,
2182            isolate()->factory()->heap_number_map(),
2183            fail,
2184            DONT_DO_SMI_CHECK);
2185 
2186   vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2187   // Force a canonical NaN.
2188   if (emit_debug_code()) {
2189     vmrs(ip);
2190     tst(ip, Operand(kVFPDefaultNaNModeControlBit));
2191     Assert(ne, kDefaultNaNModeNotSet);
2192   }
2193   VFPCanonicalizeNaN(double_scratch);
2194   b(&store);
2195 
2196   bind(&smi_value);
2197   SmiToDouble(double_scratch, value_reg);
2198 
2199   bind(&store);
2200   add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2201   vstr(double_scratch,
2202        FieldMemOperand(scratch1,
2203                        FixedDoubleArray::kHeaderSize - elements_offset));
2204 }
2205 
2206 
CompareMap(Register obj,Register scratch,Handle<Map> map,Label * early_success)2207 void MacroAssembler::CompareMap(Register obj,
2208                                 Register scratch,
2209                                 Handle<Map> map,
2210                                 Label* early_success) {
2211   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2212   CompareMap(scratch, map, early_success);
2213 }
2214 
2215 
CompareMap(Register obj_map,Handle<Map> map,Label * early_success)2216 void MacroAssembler::CompareMap(Register obj_map,
2217                                 Handle<Map> map,
2218                                 Label* early_success) {
2219   cmp(obj_map, Operand(map));
2220 }
2221 
2222 
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)2223 void MacroAssembler::CheckMap(Register obj,
2224                               Register scratch,
2225                               Handle<Map> map,
2226                               Label* fail,
2227                               SmiCheckType smi_check_type) {
2228   if (smi_check_type == DO_SMI_CHECK) {
2229     JumpIfSmi(obj, fail);
2230   }
2231 
2232   Label success;
2233   CompareMap(obj, scratch, map, &success);
2234   b(ne, fail);
2235   bind(&success);
2236 }
2237 
2238 
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)2239 void MacroAssembler::CheckMap(Register obj,
2240                               Register scratch,
2241                               Heap::RootListIndex index,
2242                               Label* fail,
2243                               SmiCheckType smi_check_type) {
2244   if (smi_check_type == DO_SMI_CHECK) {
2245     JumpIfSmi(obj, fail);
2246   }
2247   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2248   LoadRoot(ip, index);
2249   cmp(scratch, ip);
2250   b(ne, fail);
2251 }
2252 
2253 
DispatchMap(Register obj,Register scratch,Handle<Map> map,Handle<Code> success,SmiCheckType smi_check_type)2254 void MacroAssembler::DispatchMap(Register obj,
2255                                  Register scratch,
2256                                  Handle<Map> map,
2257                                  Handle<Code> success,
2258                                  SmiCheckType smi_check_type) {
2259   Label fail;
2260   if (smi_check_type == DO_SMI_CHECK) {
2261     JumpIfSmi(obj, &fail);
2262   }
2263   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2264   mov(ip, Operand(map));
2265   cmp(scratch, ip);
2266   Jump(success, RelocInfo::CODE_TARGET, eq);
2267   bind(&fail);
2268 }
2269 
2270 
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss,bool miss_on_bound_function)2271 void MacroAssembler::TryGetFunctionPrototype(Register function,
2272                                              Register result,
2273                                              Register scratch,
2274                                              Label* miss,
2275                                              bool miss_on_bound_function) {
2276   Label non_instance;
2277   if (miss_on_bound_function) {
2278     // Check that the receiver isn't a smi.
2279     JumpIfSmi(function, miss);
2280 
2281     // Check that the function really is a function.  Load map into result reg.
2282     CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2283     b(ne, miss);
2284 
2285     ldr(scratch,
2286         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2287     ldr(scratch,
2288         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2289     tst(scratch,
2290         Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2291     b(ne, miss);
2292 
2293     // Make sure that the function has an instance prototype.
2294     ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2295     tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2296     b(ne, &non_instance);
2297   }
2298 
2299   // Get the prototype or initial map from the function.
2300   ldr(result,
2301       FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2302 
2303   // If the prototype or initial map is the hole, don't return it and
2304   // simply miss the cache instead. This will allow us to allocate a
2305   // prototype object on-demand in the runtime system.
2306   LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2307   cmp(result, ip);
2308   b(eq, miss);
2309 
2310   // If the function does not have an initial map, we're done.
2311   Label done;
2312   CompareObjectType(result, scratch, scratch, MAP_TYPE);
2313   b(ne, &done);
2314 
2315   // Get the prototype from the initial map.
2316   ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2317 
2318   if (miss_on_bound_function) {
2319     jmp(&done);
2320 
2321     // Non-instance prototype: Fetch prototype from constructor field
2322     // in initial map.
2323     bind(&non_instance);
2324     ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2325   }
2326 
2327   // All done.
2328   bind(&done);
2329 }
2330 
2331 
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)2332 void MacroAssembler::CallStub(CodeStub* stub,
2333                               TypeFeedbackId ast_id,
2334                               Condition cond) {
2335   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
2336   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2337 }
2338 
2339 
TailCallStub(CodeStub * stub,Condition cond)2340 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2341   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2342 }
2343 
2344 
AddressOffset(ExternalReference ref0,ExternalReference ref1)2345 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2346   return ref0.address() - ref1.address();
2347 }
2348 
2349 
CallApiFunctionAndReturn(Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand return_value_operand,MemOperand * context_restore_operand)2350 void MacroAssembler::CallApiFunctionAndReturn(
2351     Register function_address,
2352     ExternalReference thunk_ref,
2353     int stack_space,
2354     MemOperand return_value_operand,
2355     MemOperand* context_restore_operand) {
2356   ExternalReference next_address =
2357       ExternalReference::handle_scope_next_address(isolate());
2358   const int kNextOffset = 0;
2359   const int kLimitOffset = AddressOffset(
2360       ExternalReference::handle_scope_limit_address(isolate()),
2361       next_address);
2362   const int kLevelOffset = AddressOffset(
2363       ExternalReference::handle_scope_level_address(isolate()),
2364       next_address);
2365 
2366   DCHECK(function_address.is(r1) || function_address.is(r2));
2367 
2368   Label profiler_disabled;
2369   Label end_profiler_check;
2370   mov(r9, Operand(ExternalReference::is_profiling_address(isolate())));
2371   ldrb(r9, MemOperand(r9, 0));
2372   cmp(r9, Operand(0));
2373   b(eq, &profiler_disabled);
2374 
2375   // Additional parameter is the address of the actual callback.
2376   mov(r3, Operand(thunk_ref));
2377   jmp(&end_profiler_check);
2378 
2379   bind(&profiler_disabled);
2380   Move(r3, function_address);
2381   bind(&end_profiler_check);
2382 
2383   // Allocate HandleScope in callee-save registers.
2384   mov(r9, Operand(next_address));
2385   ldr(r4, MemOperand(r9, kNextOffset));
2386   ldr(r5, MemOperand(r9, kLimitOffset));
2387   ldr(r6, MemOperand(r9, kLevelOffset));
2388   add(r6, r6, Operand(1));
2389   str(r6, MemOperand(r9, kLevelOffset));
2390 
2391   if (FLAG_log_timer_events) {
2392     FrameScope frame(this, StackFrame::MANUAL);
2393     PushSafepointRegisters();
2394     PrepareCallCFunction(1, r0);
2395     mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2396     CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2397     PopSafepointRegisters();
2398   }
2399 
2400   // Native call returns to the DirectCEntry stub which redirects to the
2401   // return address pushed on stack (could have moved after GC).
2402   // DirectCEntry stub itself is generated early and never moves.
2403   DirectCEntryStub stub(isolate());
2404   stub.GenerateCall(this, r3);
2405 
2406   if (FLAG_log_timer_events) {
2407     FrameScope frame(this, StackFrame::MANUAL);
2408     PushSafepointRegisters();
2409     PrepareCallCFunction(1, r0);
2410     mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2411     CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2412     PopSafepointRegisters();
2413   }
2414 
2415   Label promote_scheduled_exception;
2416   Label exception_handled;
2417   Label delete_allocated_handles;
2418   Label leave_exit_frame;
2419   Label return_value_loaded;
2420 
2421   // load value from ReturnValue
2422   ldr(r0, return_value_operand);
2423   bind(&return_value_loaded);
2424   // No more valid handles (the result handle was the last one). Restore
2425   // previous handle scope.
2426   str(r4, MemOperand(r9, kNextOffset));
2427   if (emit_debug_code()) {
2428     ldr(r1, MemOperand(r9, kLevelOffset));
2429     cmp(r1, r6);
2430     Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2431   }
2432   sub(r6, r6, Operand(1));
2433   str(r6, MemOperand(r9, kLevelOffset));
2434   ldr(ip, MemOperand(r9, kLimitOffset));
2435   cmp(r5, ip);
2436   b(ne, &delete_allocated_handles);
2437 
2438   // Check if the function scheduled an exception.
2439   bind(&leave_exit_frame);
2440   LoadRoot(r4, Heap::kTheHoleValueRootIndex);
2441   mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2442   ldr(r5, MemOperand(ip));
2443   cmp(r4, r5);
2444   b(ne, &promote_scheduled_exception);
2445   bind(&exception_handled);
2446 
2447   bool restore_context = context_restore_operand != NULL;
2448   if (restore_context) {
2449     ldr(cp, *context_restore_operand);
2450   }
2451   // LeaveExitFrame expects unwind space to be in a register.
2452   mov(r4, Operand(stack_space));
2453   LeaveExitFrame(false, r4, !restore_context);
2454   mov(pc, lr);
2455 
2456   bind(&promote_scheduled_exception);
2457   {
2458     FrameScope frame(this, StackFrame::INTERNAL);
2459     CallExternalReference(
2460         ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2461         0);
2462   }
2463   jmp(&exception_handled);
2464 
2465   // HandleScope limit has changed. Delete allocated extensions.
2466   bind(&delete_allocated_handles);
2467   str(r5, MemOperand(r9, kLimitOffset));
2468   mov(r4, r0);
2469   PrepareCallCFunction(1, r5);
2470   mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2471   CallCFunction(
2472       ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2473   mov(r0, r4);
2474   jmp(&leave_exit_frame);
2475 }
2476 
2477 
AllowThisStubCall(CodeStub * stub)2478 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2479   return has_frame_ || !stub->SometimesSetsUpAFrame();
2480 }
2481 
2482 
IndexFromHash(Register hash,Register index)2483 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2484   // If the hash field contains an array index pick it out. The assert checks
2485   // that the constants for the maximum number of digits for an array index
2486   // cached in the hash field and the number of bits reserved for it does not
2487   // conflict.
2488   DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2489          (1 << String::kArrayIndexValueBits));
2490   DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2491 }
2492 
2493 
SmiToDouble(LowDwVfpRegister value,Register smi)2494 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2495   if (CpuFeatures::IsSupported(VFP3)) {
2496     vmov(value.low(), smi);
2497     vcvt_f64_s32(value, 1);
2498   } else {
2499     SmiUntag(ip, smi);
2500     vmov(value.low(), ip);
2501     vcvt_f64_s32(value, value.low());
2502   }
2503 }
2504 
2505 
TestDoubleIsInt32(DwVfpRegister double_input,LowDwVfpRegister double_scratch)2506 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2507                                        LowDwVfpRegister double_scratch) {
2508   DCHECK(!double_input.is(double_scratch));
2509   vcvt_s32_f64(double_scratch.low(), double_input);
2510   vcvt_f64_s32(double_scratch, double_scratch.low());
2511   VFPCompareAndSetFlags(double_input, double_scratch);
2512 }
2513 
2514 
TryDoubleToInt32Exact(Register result,DwVfpRegister double_input,LowDwVfpRegister double_scratch)2515 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2516                                            DwVfpRegister double_input,
2517                                            LowDwVfpRegister double_scratch) {
2518   DCHECK(!double_input.is(double_scratch));
2519   vcvt_s32_f64(double_scratch.low(), double_input);
2520   vmov(result, double_scratch.low());
2521   vcvt_f64_s32(double_scratch, double_scratch.low());
2522   VFPCompareAndSetFlags(double_input, double_scratch);
2523 }
2524 
2525 
TryInt32Floor(Register result,DwVfpRegister double_input,Register input_high,LowDwVfpRegister double_scratch,Label * done,Label * exact)2526 void MacroAssembler::TryInt32Floor(Register result,
2527                                    DwVfpRegister double_input,
2528                                    Register input_high,
2529                                    LowDwVfpRegister double_scratch,
2530                                    Label* done,
2531                                    Label* exact) {
2532   DCHECK(!result.is(input_high));
2533   DCHECK(!double_input.is(double_scratch));
2534   Label negative, exception;
2535 
2536   VmovHigh(input_high, double_input);
2537 
2538   // Test for NaN and infinities.
2539   Sbfx(result, input_high,
2540        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2541   cmp(result, Operand(-1));
2542   b(eq, &exception);
2543   // Test for values that can be exactly represented as a
2544   // signed 32-bit integer.
2545   TryDoubleToInt32Exact(result, double_input, double_scratch);
2546   // If exact, return (result already fetched).
2547   b(eq, exact);
2548   cmp(input_high, Operand::Zero());
2549   b(mi, &negative);
2550 
2551   // Input is in ]+0, +inf[.
2552   // If result equals 0x7fffffff input was out of range or
2553   // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2554   // could fits into an int32, that means we always think input was
2555   // out of range and always go to exception.
2556   // If result < 0x7fffffff, go to done, result fetched.
2557   cmn(result, Operand(1));
2558   b(mi, &exception);
2559   b(done);
2560 
2561   // Input is in ]-inf, -0[.
2562   // If x is a non integer negative number,
2563   // floor(x) <=> round_to_zero(x) - 1.
2564   bind(&negative);
2565   sub(result, result, Operand(1), SetCC);
2566   // If result is still negative, go to done, result fetched.
2567   // Else, we had an overflow and we fall through exception.
2568   b(mi, done);
2569   bind(&exception);
2570 }
2571 
TryInlineTruncateDoubleToI(Register result,DwVfpRegister double_input,Label * done)2572 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2573                                                 DwVfpRegister double_input,
2574                                                 Label* done) {
2575   LowDwVfpRegister double_scratch = kScratchDoubleReg;
2576   vcvt_s32_f64(double_scratch.low(), double_input);
2577   vmov(result, double_scratch.low());
2578 
2579   // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2580   sub(ip, result, Operand(1));
2581   cmp(ip, Operand(0x7ffffffe));
2582   b(lt, done);
2583 }
2584 
2585 
TruncateDoubleToI(Register result,DwVfpRegister double_input)2586 void MacroAssembler::TruncateDoubleToI(Register result,
2587                                        DwVfpRegister double_input) {
2588   Label done;
2589 
2590   TryInlineTruncateDoubleToI(result, double_input, &done);
2591 
2592   // If we fell through then inline version didn't succeed - call stub instead.
2593   push(lr);
2594   sub(sp, sp, Operand(kDoubleSize));  // Put input on stack.
2595   vstr(double_input, MemOperand(sp, 0));
2596 
2597   DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2598   CallStub(&stub);
2599 
2600   add(sp, sp, Operand(kDoubleSize));
2601   pop(lr);
2602 
2603   bind(&done);
2604 }
2605 
2606 
TruncateHeapNumberToI(Register result,Register object)2607 void MacroAssembler::TruncateHeapNumberToI(Register result,
2608                                            Register object) {
2609   Label done;
2610   LowDwVfpRegister double_scratch = kScratchDoubleReg;
2611   DCHECK(!result.is(object));
2612 
2613   vldr(double_scratch,
2614        MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2615   TryInlineTruncateDoubleToI(result, double_scratch, &done);
2616 
2617   // If we fell through then inline version didn't succeed - call stub instead.
2618   push(lr);
2619   DoubleToIStub stub(isolate(),
2620                      object,
2621                      result,
2622                      HeapNumber::kValueOffset - kHeapObjectTag,
2623                      true,
2624                      true);
2625   CallStub(&stub);
2626   pop(lr);
2627 
2628   bind(&done);
2629 }
2630 
2631 
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch1,Label * not_number)2632 void MacroAssembler::TruncateNumberToI(Register object,
2633                                        Register result,
2634                                        Register heap_number_map,
2635                                        Register scratch1,
2636                                        Label* not_number) {
2637   Label done;
2638   DCHECK(!result.is(object));
2639 
2640   UntagAndJumpIfSmi(result, object, &done);
2641   JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2642   TruncateHeapNumberToI(result, object);
2643 
2644   bind(&done);
2645 }
2646 
2647 
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2648 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2649                                          Register src,
2650                                          int num_least_bits) {
2651   if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2652     ubfx(dst, src, kSmiTagSize, num_least_bits);
2653   } else {
2654     SmiUntag(dst, src);
2655     and_(dst, dst, Operand((1 << num_least_bits) - 1));
2656   }
2657 }
2658 
2659 
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2660 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2661                                            Register src,
2662                                            int num_least_bits) {
2663   and_(dst, src, Operand((1 << num_least_bits) - 1));
2664 }
2665 
2666 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)2667 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2668                                  int num_arguments,
2669                                  SaveFPRegsMode save_doubles) {
2670   // All parameters are on the stack.  r0 has the return value after call.
2671 
2672   // If the expected number of arguments of the runtime function is
2673   // constant, we check that the actual number of arguments match the
2674   // expectation.
2675   CHECK(f->nargs < 0 || f->nargs == num_arguments);
2676 
2677   // TODO(1236192): Most runtime routines don't need the number of
2678   // arguments passed in because it is constant. At some point we
2679   // should remove this need and make the runtime routine entry code
2680   // smarter.
2681   mov(r0, Operand(num_arguments));
2682   mov(r1, Operand(ExternalReference(f, isolate())));
2683   CEntryStub stub(isolate(), 1, save_doubles);
2684   CallStub(&stub);
2685 }
2686 
2687 
CallExternalReference(const ExternalReference & ext,int num_arguments)2688 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2689                                            int num_arguments) {
2690   mov(r0, Operand(num_arguments));
2691   mov(r1, Operand(ext));
2692 
2693   CEntryStub stub(isolate(), 1);
2694   CallStub(&stub);
2695 }
2696 
2697 
TailCallExternalReference(const ExternalReference & ext,int num_arguments,int result_size)2698 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2699                                                int num_arguments,
2700                                                int result_size) {
2701   // TODO(1236192): Most runtime routines don't need the number of
2702   // arguments passed in because it is constant. At some point we
2703   // should remove this need and make the runtime routine entry code
2704   // smarter.
2705   mov(r0, Operand(num_arguments));
2706   JumpToExternalReference(ext);
2707 }
2708 
2709 
TailCallRuntime(Runtime::FunctionId fid,int num_arguments,int result_size)2710 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2711                                      int num_arguments,
2712                                      int result_size) {
2713   TailCallExternalReference(ExternalReference(fid, isolate()),
2714                             num_arguments,
2715                             result_size);
2716 }
2717 
2718 
JumpToExternalReference(const ExternalReference & builtin)2719 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2720 #if defined(__thumb__)
2721   // Thumb mode builtin.
2722   DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2723 #endif
2724   mov(r1, Operand(builtin));
2725   CEntryStub stub(isolate(), 1);
2726   Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2727 }
2728 
2729 
InvokeBuiltin(Builtins::JavaScript id,InvokeFlag flag,const CallWrapper & call_wrapper)2730 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2731                                    InvokeFlag flag,
2732                                    const CallWrapper& call_wrapper) {
2733   // You can't call a builtin without a valid frame.
2734   DCHECK(flag == JUMP_FUNCTION || has_frame());
2735 
2736   GetBuiltinEntry(r2, id);
2737   if (flag == CALL_FUNCTION) {
2738     call_wrapper.BeforeCall(CallSize(r2));
2739     Call(r2);
2740     call_wrapper.AfterCall();
2741   } else {
2742     DCHECK(flag == JUMP_FUNCTION);
2743     Jump(r2);
2744   }
2745 }
2746 
2747 
GetBuiltinFunction(Register target,Builtins::JavaScript id)2748 void MacroAssembler::GetBuiltinFunction(Register target,
2749                                         Builtins::JavaScript id) {
2750   // Load the builtins object into target register.
2751   ldr(target,
2752       MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2753   ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2754   // Load the JavaScript builtin function from the builtins object.
2755   ldr(target, FieldMemOperand(target,
2756                           JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2757 }
2758 
2759 
GetBuiltinEntry(Register target,Builtins::JavaScript id)2760 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2761   DCHECK(!target.is(r1));
2762   GetBuiltinFunction(r1, id);
2763   // Load the code entry point from the builtins object.
2764   ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2765 }
2766 
2767 
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2768 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2769                                 Register scratch1, Register scratch2) {
2770   if (FLAG_native_code_counters && counter->Enabled()) {
2771     mov(scratch1, Operand(value));
2772     mov(scratch2, Operand(ExternalReference(counter)));
2773     str(scratch1, MemOperand(scratch2));
2774   }
2775 }
2776 
2777 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2778 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2779                                       Register scratch1, Register scratch2) {
2780   DCHECK(value > 0);
2781   if (FLAG_native_code_counters && counter->Enabled()) {
2782     mov(scratch2, Operand(ExternalReference(counter)));
2783     ldr(scratch1, MemOperand(scratch2));
2784     add(scratch1, scratch1, Operand(value));
2785     str(scratch1, MemOperand(scratch2));
2786   }
2787 }
2788 
2789 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2790 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2791                                       Register scratch1, Register scratch2) {
2792   DCHECK(value > 0);
2793   if (FLAG_native_code_counters && counter->Enabled()) {
2794     mov(scratch2, Operand(ExternalReference(counter)));
2795     ldr(scratch1, MemOperand(scratch2));
2796     sub(scratch1, scratch1, Operand(value));
2797     str(scratch1, MemOperand(scratch2));
2798   }
2799 }
2800 
2801 
Assert(Condition cond,BailoutReason reason)2802 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2803   if (emit_debug_code())
2804     Check(cond, reason);
2805 }
2806 
2807 
AssertFastElements(Register elements)2808 void MacroAssembler::AssertFastElements(Register elements) {
2809   if (emit_debug_code()) {
2810     DCHECK(!elements.is(ip));
2811     Label ok;
2812     push(elements);
2813     ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2814     LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2815     cmp(elements, ip);
2816     b(eq, &ok);
2817     LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2818     cmp(elements, ip);
2819     b(eq, &ok);
2820     LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2821     cmp(elements, ip);
2822     b(eq, &ok);
2823     Abort(kJSObjectWithFastElementsMapHasSlowElements);
2824     bind(&ok);
2825     pop(elements);
2826   }
2827 }
2828 
2829 
Check(Condition cond,BailoutReason reason)2830 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2831   Label L;
2832   b(cond, &L);
2833   Abort(reason);
2834   // will not return here
2835   bind(&L);
2836 }
2837 
2838 
Abort(BailoutReason reason)2839 void MacroAssembler::Abort(BailoutReason reason) {
2840   Label abort_start;
2841   bind(&abort_start);
2842 #ifdef DEBUG
2843   const char* msg = GetBailoutReason(reason);
2844   if (msg != NULL) {
2845     RecordComment("Abort message: ");
2846     RecordComment(msg);
2847   }
2848 
2849   if (FLAG_trap_on_abort) {
2850     stop(msg);
2851     return;
2852   }
2853 #endif
2854 
2855   mov(r0, Operand(Smi::FromInt(reason)));
2856   push(r0);
2857 
2858   // Disable stub call restrictions to always allow calls to abort.
2859   if (!has_frame_) {
2860     // We don't actually want to generate a pile of code for this, so just
2861     // claim there is a stack frame, without generating one.
2862     FrameScope scope(this, StackFrame::NONE);
2863     CallRuntime(Runtime::kAbort, 1);
2864   } else {
2865     CallRuntime(Runtime::kAbort, 1);
2866   }
2867   // will not return here
2868   if (is_const_pool_blocked()) {
2869     // If the calling code cares about the exact number of
2870     // instructions generated, we insert padding here to keep the size
2871     // of the Abort macro constant.
2872     static const int kExpectedAbortInstructions = 7;
2873     int abort_instructions = InstructionsGeneratedSince(&abort_start);
2874     DCHECK(abort_instructions <= kExpectedAbortInstructions);
2875     while (abort_instructions++ < kExpectedAbortInstructions) {
2876       nop();
2877     }
2878   }
2879 }
2880 
2881 
LoadContext(Register dst,int context_chain_length)2882 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2883   if (context_chain_length > 0) {
2884     // Move up the chain of contexts to the context containing the slot.
2885     ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2886     for (int i = 1; i < context_chain_length; i++) {
2887       ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2888     }
2889   } else {
2890     // Slot is in the current function context.  Move it into the
2891     // destination register in case we store into it (the write barrier
2892     // cannot be allowed to destroy the context in esi).
2893     mov(dst, cp);
2894   }
2895 }
2896 
2897 
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)2898 void MacroAssembler::LoadTransitionedArrayMapConditional(
2899     ElementsKind expected_kind,
2900     ElementsKind transitioned_kind,
2901     Register map_in_out,
2902     Register scratch,
2903     Label* no_map_match) {
2904   // Load the global or builtins object from the current context.
2905   ldr(scratch,
2906       MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2907   ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2908 
2909   // Check that the function's map is the same as the expected cached map.
2910   ldr(scratch,
2911       MemOperand(scratch,
2912                  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2913   size_t offset = expected_kind * kPointerSize +
2914       FixedArrayBase::kHeaderSize;
2915   ldr(ip, FieldMemOperand(scratch, offset));
2916   cmp(map_in_out, ip);
2917   b(ne, no_map_match);
2918 
2919   // Use the transitioned cached map.
2920   offset = transitioned_kind * kPointerSize +
2921       FixedArrayBase::kHeaderSize;
2922   ldr(map_in_out, FieldMemOperand(scratch, offset));
2923 }
2924 
2925 
LoadGlobalFunction(int index,Register function)2926 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2927   // Load the global or builtins object from the current context.
2928   ldr(function,
2929       MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2930   // Load the native context from the global or builtins object.
2931   ldr(function, FieldMemOperand(function,
2932                                 GlobalObject::kNativeContextOffset));
2933   // Load the function from the native context.
2934   ldr(function, MemOperand(function, Context::SlotOffset(index)));
2935 }
2936 
2937 
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)2938 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2939                                                   Register map,
2940                                                   Register scratch) {
2941   // Load the initial map. The global functions all have initial maps.
2942   ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2943   if (emit_debug_code()) {
2944     Label ok, fail;
2945     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2946     b(&ok);
2947     bind(&fail);
2948     Abort(kGlobalFunctionsMustHaveInitialMap);
2949     bind(&ok);
2950   }
2951 }
2952 
2953 
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)2954 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2955     Register reg,
2956     Register scratch,
2957     Label* not_power_of_two_or_zero) {
2958   sub(scratch, reg, Operand(1), SetCC);
2959   b(mi, not_power_of_two_or_zero);
2960   tst(scratch, reg);
2961   b(ne, not_power_of_two_or_zero);
2962 }
2963 
2964 
JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,Register scratch,Label * zero_and_neg,Label * not_power_of_two)2965 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2966     Register reg,
2967     Register scratch,
2968     Label* zero_and_neg,
2969     Label* not_power_of_two) {
2970   sub(scratch, reg, Operand(1), SetCC);
2971   b(mi, zero_and_neg);
2972   tst(scratch, reg);
2973   b(ne, not_power_of_two);
2974 }
2975 
2976 
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)2977 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2978                                       Register reg2,
2979                                       Label* on_not_both_smi) {
2980   STATIC_ASSERT(kSmiTag == 0);
2981   tst(reg1, Operand(kSmiTagMask));
2982   tst(reg2, Operand(kSmiTagMask), eq);
2983   b(ne, on_not_both_smi);
2984 }
2985 
2986 
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)2987 void MacroAssembler::UntagAndJumpIfSmi(
2988     Register dst, Register src, Label* smi_case) {
2989   STATIC_ASSERT(kSmiTag == 0);
2990   SmiUntag(dst, src, SetCC);
2991   b(cc, smi_case);  // Shifter carry is not set for a smi.
2992 }
2993 
2994 
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)2995 void MacroAssembler::UntagAndJumpIfNotSmi(
2996     Register dst, Register src, Label* non_smi_case) {
2997   STATIC_ASSERT(kSmiTag == 0);
2998   SmiUntag(dst, src, SetCC);
2999   b(cs, non_smi_case);  // Shifter carry is set for a non-smi.
3000 }
3001 
3002 
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)3003 void MacroAssembler::JumpIfEitherSmi(Register reg1,
3004                                      Register reg2,
3005                                      Label* on_either_smi) {
3006   STATIC_ASSERT(kSmiTag == 0);
3007   tst(reg1, Operand(kSmiTagMask));
3008   tst(reg2, Operand(kSmiTagMask), ne);
3009   b(eq, on_either_smi);
3010 }
3011 
3012 
AssertNotSmi(Register object)3013 void MacroAssembler::AssertNotSmi(Register object) {
3014   if (emit_debug_code()) {
3015     STATIC_ASSERT(kSmiTag == 0);
3016     tst(object, Operand(kSmiTagMask));
3017     Check(ne, kOperandIsASmi);
3018   }
3019 }
3020 
3021 
AssertSmi(Register object)3022 void MacroAssembler::AssertSmi(Register object) {
3023   if (emit_debug_code()) {
3024     STATIC_ASSERT(kSmiTag == 0);
3025     tst(object, Operand(kSmiTagMask));
3026     Check(eq, kOperandIsNotSmi);
3027   }
3028 }
3029 
3030 
AssertString(Register object)3031 void MacroAssembler::AssertString(Register object) {
3032   if (emit_debug_code()) {
3033     STATIC_ASSERT(kSmiTag == 0);
3034     tst(object, Operand(kSmiTagMask));
3035     Check(ne, kOperandIsASmiAndNotAString);
3036     push(object);
3037     ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3038     CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3039     pop(object);
3040     Check(lo, kOperandIsNotAString);
3041   }
3042 }
3043 
3044 
AssertName(Register object)3045 void MacroAssembler::AssertName(Register object) {
3046   if (emit_debug_code()) {
3047     STATIC_ASSERT(kSmiTag == 0);
3048     tst(object, Operand(kSmiTagMask));
3049     Check(ne, kOperandIsASmiAndNotAName);
3050     push(object);
3051     ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3052     CompareInstanceType(object, object, LAST_NAME_TYPE);
3053     pop(object);
3054     Check(le, kOperandIsNotAName);
3055   }
3056 }
3057 
3058 
AssertUndefinedOrAllocationSite(Register object,Register scratch)3059 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
3060                                                      Register scratch) {
3061   if (emit_debug_code()) {
3062     Label done_checking;
3063     AssertNotSmi(object);
3064     CompareRoot(object, Heap::kUndefinedValueRootIndex);
3065     b(eq, &done_checking);
3066     ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3067     CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
3068     Assert(eq, kExpectedUndefinedOrCell);
3069     bind(&done_checking);
3070   }
3071 }
3072 
3073 
AssertIsRoot(Register reg,Heap::RootListIndex index)3074 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
3075   if (emit_debug_code()) {
3076     CompareRoot(reg, index);
3077     Check(eq, kHeapNumberMapRegisterClobbered);
3078   }
3079 }
3080 
3081 
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)3082 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3083                                          Register heap_number_map,
3084                                          Register scratch,
3085                                          Label* on_not_heap_number) {
3086   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3087   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3088   cmp(scratch, heap_number_map);
3089   b(ne, on_not_heap_number);
3090 }
3091 
3092 
LookupNumberStringCache(Register object,Register result,Register scratch1,Register scratch2,Register scratch3,Label * not_found)3093 void MacroAssembler::LookupNumberStringCache(Register object,
3094                                              Register result,
3095                                              Register scratch1,
3096                                              Register scratch2,
3097                                              Register scratch3,
3098                                              Label* not_found) {
3099   // Use of registers. Register result is used as a temporary.
3100   Register number_string_cache = result;
3101   Register mask = scratch3;
3102 
3103   // Load the number string cache.
3104   LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3105 
3106   // Make the hash mask from the length of the number string cache. It
3107   // contains two elements (number and string) for each cache entry.
3108   ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
3109   // Divide length by two (length is a smi).
3110   mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
3111   sub(mask, mask, Operand(1));  // Make mask.
3112 
3113   // Calculate the entry in the number string cache. The hash value in the
3114   // number string cache for smis is just the smi value, and the hash for
3115   // doubles is the xor of the upper and lower words. See
3116   // Heap::GetNumberStringCache.
3117   Label is_smi;
3118   Label load_result_from_cache;
3119   JumpIfSmi(object, &is_smi);
3120   CheckMap(object,
3121            scratch1,
3122            Heap::kHeapNumberMapRootIndex,
3123            not_found,
3124            DONT_DO_SMI_CHECK);
3125 
3126   STATIC_ASSERT(8 == kDoubleSize);
3127   add(scratch1,
3128       object,
3129       Operand(HeapNumber::kValueOffset - kHeapObjectTag));
3130   ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
3131   eor(scratch1, scratch1, Operand(scratch2));
3132   and_(scratch1, scratch1, Operand(mask));
3133 
3134   // Calculate address of entry in string cache: each entry consists
3135   // of two pointer sized fields.
3136   add(scratch1,
3137       number_string_cache,
3138       Operand(scratch1, LSL, kPointerSizeLog2 + 1));
3139 
3140   Register probe = mask;
3141   ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3142   JumpIfSmi(probe, not_found);
3143   sub(scratch2, object, Operand(kHeapObjectTag));
3144   vldr(d0, scratch2, HeapNumber::kValueOffset);
3145   sub(probe, probe, Operand(kHeapObjectTag));
3146   vldr(d1, probe, HeapNumber::kValueOffset);
3147   VFPCompareAndSetFlags(d0, d1);
3148   b(ne, not_found);  // The cache did not contain this value.
3149   b(&load_result_from_cache);
3150 
3151   bind(&is_smi);
3152   Register scratch = scratch1;
3153   and_(scratch, mask, Operand(object, ASR, 1));
3154   // Calculate address of entry in string cache: each entry consists
3155   // of two pointer sized fields.
3156   add(scratch,
3157       number_string_cache,
3158       Operand(scratch, LSL, kPointerSizeLog2 + 1));
3159 
3160   // Check if the entry is the smi we are looking for.
3161   ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3162   cmp(object, probe);
3163   b(ne, not_found);
3164 
3165   // Get the result from the cache.
3166   bind(&load_result_from_cache);
3167   ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
3168   IncrementCounter(isolate()->counters()->number_to_string_native(),
3169                    1,
3170                    scratch1,
3171                    scratch2);
3172 }
3173 
3174 
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3175 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
3176     Register first, Register second, Register scratch1, Register scratch2,
3177     Label* failure) {
3178   // Test that both first and second are sequential one-byte strings.
3179   // Assume that they are non-smis.
3180   ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3181   ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3182   ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3183   ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3184 
3185   JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
3186                                                  scratch2, failure);
3187 }
3188 
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3189 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
3190                                                            Register second,
3191                                                            Register scratch1,
3192                                                            Register scratch2,
3193                                                            Label* failure) {
3194   // Check that neither is a smi.
3195   and_(scratch1, first, Operand(second));
3196   JumpIfSmi(scratch1, failure);
3197   JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
3198                                                scratch2, failure);
3199 }
3200 
3201 
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)3202 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3203                                                      Label* not_unique_name) {
3204   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3205   Label succeed;
3206   tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3207   b(eq, &succeed);
3208   cmp(reg, Operand(SYMBOL_TYPE));
3209   b(ne, not_unique_name);
3210 
3211   bind(&succeed);
3212 }
3213 
3214 
3215 // Allocates a heap number or jumps to the need_gc label if the young space
3216 // is full and a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required,TaggingMode tagging_mode,MutableMode mode)3217 void MacroAssembler::AllocateHeapNumber(Register result,
3218                                         Register scratch1,
3219                                         Register scratch2,
3220                                         Register heap_number_map,
3221                                         Label* gc_required,
3222                                         TaggingMode tagging_mode,
3223                                         MutableMode mode) {
3224   // Allocate an object in the heap for the heap number and tag it as a heap
3225   // object.
3226   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3227            tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3228 
3229   Heap::RootListIndex map_index = mode == MUTABLE
3230       ? Heap::kMutableHeapNumberMapRootIndex
3231       : Heap::kHeapNumberMapRootIndex;
3232   AssertIsRoot(heap_number_map, map_index);
3233 
3234   // Store heap number map in the allocated object.
3235   if (tagging_mode == TAG_RESULT) {
3236     str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3237   } else {
3238     str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3239   }
3240 }
3241 
3242 
AllocateHeapNumberWithValue(Register result,DwVfpRegister value,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required)3243 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3244                                                  DwVfpRegister value,
3245                                                  Register scratch1,
3246                                                  Register scratch2,
3247                                                  Register heap_number_map,
3248                                                  Label* gc_required) {
3249   AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3250   sub(scratch1, result, Operand(kHeapObjectTag));
3251   vstr(value, scratch1, HeapNumber::kValueOffset);
3252 }
3253 
3254 
3255 // Copies a fixed number of fields of heap objects from src to dst.
CopyFields(Register dst,Register src,LowDwVfpRegister double_scratch,int field_count)3256 void MacroAssembler::CopyFields(Register dst,
3257                                 Register src,
3258                                 LowDwVfpRegister double_scratch,
3259                                 int field_count) {
3260   int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
3261   for (int i = 0; i < double_count; i++) {
3262     vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
3263     vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
3264   }
3265 
3266   STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
3267   STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
3268 
3269   int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
3270   if (remain != 0) {
3271     vldr(double_scratch.low(),
3272          FieldMemOperand(src, (field_count - 1) * kPointerSize));
3273     vstr(double_scratch.low(),
3274          FieldMemOperand(dst, (field_count - 1) * kPointerSize));
3275   }
3276 }
3277 
3278 
CopyBytes(Register src,Register dst,Register length,Register scratch)3279 void MacroAssembler::CopyBytes(Register src,
3280                                Register dst,
3281                                Register length,
3282                                Register scratch) {
3283   Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3284 
3285   // Align src before copying in word size chunks.
3286   cmp(length, Operand(kPointerSize));
3287   b(le, &byte_loop);
3288 
3289   bind(&align_loop_1);
3290   tst(src, Operand(kPointerSize - 1));
3291   b(eq, &word_loop);
3292   ldrb(scratch, MemOperand(src, 1, PostIndex));
3293   strb(scratch, MemOperand(dst, 1, PostIndex));
3294   sub(length, length, Operand(1), SetCC);
3295   b(&align_loop_1);
3296   // Copy bytes in word size chunks.
3297   bind(&word_loop);
3298   if (emit_debug_code()) {
3299     tst(src, Operand(kPointerSize - 1));
3300     Assert(eq, kExpectingAlignmentForCopyBytes);
3301   }
3302   cmp(length, Operand(kPointerSize));
3303   b(lt, &byte_loop);
3304   ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3305   if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3306     str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3307   } else {
3308     strb(scratch, MemOperand(dst, 1, PostIndex));
3309     mov(scratch, Operand(scratch, LSR, 8));
3310     strb(scratch, MemOperand(dst, 1, PostIndex));
3311     mov(scratch, Operand(scratch, LSR, 8));
3312     strb(scratch, MemOperand(dst, 1, PostIndex));
3313     mov(scratch, Operand(scratch, LSR, 8));
3314     strb(scratch, MemOperand(dst, 1, PostIndex));
3315   }
3316   sub(length, length, Operand(kPointerSize));
3317   b(&word_loop);
3318 
3319   // Copy the last bytes if any left.
3320   bind(&byte_loop);
3321   cmp(length, Operand::Zero());
3322   b(eq, &done);
3323   bind(&byte_loop_1);
3324   ldrb(scratch, MemOperand(src, 1, PostIndex));
3325   strb(scratch, MemOperand(dst, 1, PostIndex));
3326   sub(length, length, Operand(1), SetCC);
3327   b(ne, &byte_loop_1);
3328   bind(&done);
3329 }
3330 
3331 
InitializeFieldsWithFiller(Register start_offset,Register end_offset,Register filler)3332 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3333                                                 Register end_offset,
3334                                                 Register filler) {
3335   Label loop, entry;
3336   b(&entry);
3337   bind(&loop);
3338   str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
3339   bind(&entry);
3340   cmp(start_offset, end_offset);
3341   b(lt, &loop);
3342 }
3343 
3344 
CheckFor32DRegs(Register scratch)3345 void MacroAssembler::CheckFor32DRegs(Register scratch) {
3346   mov(scratch, Operand(ExternalReference::cpu_features()));
3347   ldr(scratch, MemOperand(scratch));
3348   tst(scratch, Operand(1u << VFP32DREGS));
3349 }
3350 
3351 
SaveFPRegs(Register location,Register scratch)3352 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3353   CheckFor32DRegs(scratch);
3354   vstm(db_w, location, d16, d31, ne);
3355   sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3356   vstm(db_w, location, d0, d15);
3357 }
3358 
3359 
RestoreFPRegs(Register location,Register scratch)3360 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3361   CheckFor32DRegs(scratch);
3362   vldm(ia_w, location, d0, d15);
3363   vldm(ia_w, location, d16, d31, ne);
3364   add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3365 }
3366 
3367 
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3368 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3369     Register first, Register second, Register scratch1, Register scratch2,
3370     Label* failure) {
3371   const int kFlatOneByteStringMask =
3372       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3373   const int kFlatOneByteStringTag =
3374       kStringTag | kOneByteStringTag | kSeqStringTag;
3375   and_(scratch1, first, Operand(kFlatOneByteStringMask));
3376   and_(scratch2, second, Operand(kFlatOneByteStringMask));
3377   cmp(scratch1, Operand(kFlatOneByteStringTag));
3378   // Ignore second test if first test failed.
3379   cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
3380   b(ne, failure);
3381 }
3382 
3383 
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)3384 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
3385                                                               Register scratch,
3386                                                               Label* failure) {
3387   const int kFlatOneByteStringMask =
3388       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3389   const int kFlatOneByteStringTag =
3390       kStringTag | kOneByteStringTag | kSeqStringTag;
3391   and_(scratch, type, Operand(kFlatOneByteStringMask));
3392   cmp(scratch, Operand(kFlatOneByteStringTag));
3393   b(ne, failure);
3394 }
3395 
3396 static const int kRegisterPassedArguments = 4;
3397 
3398 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)3399 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3400                                               int num_double_arguments) {
3401   int stack_passed_words = 0;
3402   if (use_eabi_hardfloat()) {
3403     // In the hard floating point calling convention, we can use
3404     // all double registers to pass doubles.
3405     if (num_double_arguments > DoubleRegister::NumRegisters()) {
3406       stack_passed_words +=
3407           2 * (num_double_arguments - DoubleRegister::NumRegisters());
3408     }
3409   } else {
3410     // In the soft floating point calling convention, every double
3411     // argument is passed using two registers.
3412     num_reg_arguments += 2 * num_double_arguments;
3413   }
3414   // Up to four simple arguments are passed in registers r0..r3.
3415   if (num_reg_arguments > kRegisterPassedArguments) {
3416     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3417   }
3418   return stack_passed_words;
3419 }
3420 
3421 
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)3422 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3423                                                Register index,
3424                                                Register value,
3425                                                uint32_t encoding_mask) {
3426   Label is_object;
3427   SmiTst(string);
3428   Check(ne, kNonObject);
3429 
3430   ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3431   ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3432 
3433   and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3434   cmp(ip, Operand(encoding_mask));
3435   Check(eq, kUnexpectedStringType);
3436 
3437   // The index is assumed to be untagged coming in, tag it to compare with the
3438   // string length without using a temp register, it is restored at the end of
3439   // this function.
3440   Label index_tag_ok, index_tag_bad;
3441   TrySmiTag(index, index, &index_tag_bad);
3442   b(&index_tag_ok);
3443   bind(&index_tag_bad);
3444   Abort(kIndexIsTooLarge);
3445   bind(&index_tag_ok);
3446 
3447   ldr(ip, FieldMemOperand(string, String::kLengthOffset));
3448   cmp(index, ip);
3449   Check(lt, kIndexIsTooLarge);
3450 
3451   cmp(index, Operand(Smi::FromInt(0)));
3452   Check(ge, kIndexIsNegative);
3453 
3454   SmiUntag(index, index);
3455 }
3456 
3457 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)3458 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3459                                           int num_double_arguments,
3460                                           Register scratch) {
3461   int frame_alignment = ActivationFrameAlignment();
3462   int stack_passed_arguments = CalculateStackPassedWords(
3463       num_reg_arguments, num_double_arguments);
3464   if (frame_alignment > kPointerSize) {
3465     // Make stack end at alignment and make room for num_arguments - 4 words
3466     // and the original value of sp.
3467     mov(scratch, sp);
3468     sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3469     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3470     and_(sp, sp, Operand(-frame_alignment));
3471     str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3472   } else {
3473     sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3474   }
3475 }
3476 
3477 
PrepareCallCFunction(int num_reg_arguments,Register scratch)3478 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3479                                           Register scratch) {
3480   PrepareCallCFunction(num_reg_arguments, 0, scratch);
3481 }
3482 
3483 
MovToFloatParameter(DwVfpRegister src)3484 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
3485   DCHECK(src.is(d0));
3486   if (!use_eabi_hardfloat()) {
3487     vmov(r0, r1, src);
3488   }
3489 }
3490 
3491 
3492 // On ARM this is just a synonym to make the purpose clear.
MovToFloatResult(DwVfpRegister src)3493 void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
3494   MovToFloatParameter(src);
3495 }
3496 
3497 
MovToFloatParameters(DwVfpRegister src1,DwVfpRegister src2)3498 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
3499                                           DwVfpRegister src2) {
3500   DCHECK(src1.is(d0));
3501   DCHECK(src2.is(d1));
3502   if (!use_eabi_hardfloat()) {
3503     vmov(r0, r1, src1);
3504     vmov(r2, r3, src2);
3505   }
3506 }
3507 
3508 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)3509 void MacroAssembler::CallCFunction(ExternalReference function,
3510                                    int num_reg_arguments,
3511                                    int num_double_arguments) {
3512   mov(ip, Operand(function));
3513   CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3514 }
3515 
3516 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)3517 void MacroAssembler::CallCFunction(Register function,
3518                                    int num_reg_arguments,
3519                                    int num_double_arguments) {
3520   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3521 }
3522 
3523 
CallCFunction(ExternalReference function,int num_arguments)3524 void MacroAssembler::CallCFunction(ExternalReference function,
3525                                    int num_arguments) {
3526   CallCFunction(function, num_arguments, 0);
3527 }
3528 
3529 
CallCFunction(Register function,int num_arguments)3530 void MacroAssembler::CallCFunction(Register function,
3531                                    int num_arguments) {
3532   CallCFunction(function, num_arguments, 0);
3533 }
3534 
3535 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)3536 void MacroAssembler::CallCFunctionHelper(Register function,
3537                                          int num_reg_arguments,
3538                                          int num_double_arguments) {
3539   DCHECK(has_frame());
3540   // Make sure that the stack is aligned before calling a C function unless
3541   // running in the simulator. The simulator has its own alignment check which
3542   // provides more information.
3543 #if V8_HOST_ARCH_ARM
3544   if (emit_debug_code()) {
3545     int frame_alignment = base::OS::ActivationFrameAlignment();
3546     int frame_alignment_mask = frame_alignment - 1;
3547     if (frame_alignment > kPointerSize) {
3548       DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3549       Label alignment_as_expected;
3550       tst(sp, Operand(frame_alignment_mask));
3551       b(eq, &alignment_as_expected);
3552       // Don't use Check here, as it will call Runtime_Abort possibly
3553       // re-entering here.
3554       stop("Unexpected alignment");
3555       bind(&alignment_as_expected);
3556     }
3557   }
3558 #endif
3559 
3560   // Just call directly. The function called cannot cause a GC, or
3561   // allow preemption, so the return address in the link register
3562   // stays correct.
3563   Call(function);
3564   int stack_passed_arguments = CalculateStackPassedWords(
3565       num_reg_arguments, num_double_arguments);
3566   if (ActivationFrameAlignment() > kPointerSize) {
3567     ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3568   } else {
3569     add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3570   }
3571 }
3572 
3573 
GetRelocatedValueLocation(Register ldr_location,Register result,Register scratch)3574 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3575                                                Register result,
3576                                                Register scratch) {
3577   Label small_constant_pool_load, load_result;
3578   ldr(result, MemOperand(ldr_location));
3579 
3580   if (FLAG_enable_ool_constant_pool) {
3581     // Check if this is an extended constant pool load.
3582     and_(scratch, result, Operand(GetConsantPoolLoadMask()));
3583     teq(scratch, Operand(GetConsantPoolLoadPattern()));
3584     b(eq, &small_constant_pool_load);
3585     if (emit_debug_code()) {
3586       // Check that the instruction sequence is:
3587       //   movw reg, #offset_low
3588       //   movt reg, #offset_high
3589       //   ldr reg, [pp, reg]
3590       Instr patterns[] = {GetMovWPattern(), GetMovTPattern(),
3591                           GetLdrPpRegOffsetPattern()};
3592       for (int i = 0; i < 3; i++) {
3593         ldr(result, MemOperand(ldr_location, i * kInstrSize));
3594         and_(result, result, Operand(patterns[i]));
3595         cmp(result, Operand(patterns[i]));
3596         Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3597       }
3598       // Result was clobbered. Restore it.
3599       ldr(result, MemOperand(ldr_location));
3600     }
3601 
3602     // Get the offset into the constant pool.  First extract movw immediate into
3603     // result.
3604     and_(scratch, result, Operand(0xfff));
3605     mov(ip, Operand(result, LSR, 4));
3606     and_(ip, ip, Operand(0xf000));
3607     orr(result, scratch, Operand(ip));
3608     // Then extract movt immediate and or into result.
3609     ldr(scratch, MemOperand(ldr_location, kInstrSize));
3610     and_(ip, scratch, Operand(0xf0000));
3611     orr(result, result, Operand(ip, LSL, 12));
3612     and_(scratch, scratch, Operand(0xfff));
3613     orr(result, result, Operand(scratch, LSL, 16));
3614 
3615     b(&load_result);
3616   }
3617 
3618   bind(&small_constant_pool_load);
3619   if (emit_debug_code()) {
3620     // Check that the instruction is a ldr reg, [<pc or pp> + offset] .
3621     and_(result, result, Operand(GetConsantPoolLoadPattern()));
3622     cmp(result, Operand(GetConsantPoolLoadPattern()));
3623     Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3624     // Result was clobbered. Restore it.
3625     ldr(result, MemOperand(ldr_location));
3626   }
3627 
3628   // Get the offset into the constant pool.
3629   const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3630   and_(result, result, Operand(kLdrOffsetMask));
3631 
3632   bind(&load_result);
3633   // Get the address of the constant.
3634   if (FLAG_enable_ool_constant_pool) {
3635     add(result, pp, Operand(result));
3636   } else {
3637     add(result, ldr_location, Operand(result));
3638     add(result, result, Operand(Instruction::kPCReadOffset));
3639   }
3640 }
3641 
3642 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)3643 void MacroAssembler::CheckPageFlag(
3644     Register object,
3645     Register scratch,
3646     int mask,
3647     Condition cc,
3648     Label* condition_met) {
3649   Bfc(scratch, object, 0, kPageSizeBits);
3650   ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3651   tst(scratch, Operand(mask));
3652   b(cc, condition_met);
3653 }
3654 
3655 
CheckMapDeprecated(Handle<Map> map,Register scratch,Label * if_deprecated)3656 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
3657                                         Register scratch,
3658                                         Label* if_deprecated) {
3659   if (map->CanBeDeprecated()) {
3660     mov(scratch, Operand(map));
3661     ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
3662     tst(scratch, Operand(Map::Deprecated::kMask));
3663     b(ne, if_deprecated);
3664   }
3665 }
3666 
3667 
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)3668 void MacroAssembler::JumpIfBlack(Register object,
3669                                  Register scratch0,
3670                                  Register scratch1,
3671                                  Label* on_black) {
3672   HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
3673   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3674 }
3675 
3676 
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)3677 void MacroAssembler::HasColor(Register object,
3678                               Register bitmap_scratch,
3679                               Register mask_scratch,
3680                               Label* has_color,
3681                               int first_bit,
3682                               int second_bit) {
3683   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3684 
3685   GetMarkBits(object, bitmap_scratch, mask_scratch);
3686 
3687   Label other_color, word_boundary;
3688   ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3689   tst(ip, Operand(mask_scratch));
3690   b(first_bit == 1 ? eq : ne, &other_color);
3691   // Shift left 1 by adding.
3692   add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3693   b(eq, &word_boundary);
3694   tst(ip, Operand(mask_scratch));
3695   b(second_bit == 1 ? ne : eq, has_color);
3696   jmp(&other_color);
3697 
3698   bind(&word_boundary);
3699   ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3700   tst(ip, Operand(1));
3701   b(second_bit == 1 ? ne : eq, has_color);
3702   bind(&other_color);
3703 }
3704 
3705 
3706 // Detect some, but not all, common pointer-free objects.  This is used by the
3707 // incremental write barrier which doesn't care about oddballs (they are always
3708 // marked black immediately so this code is not hit).
JumpIfDataObject(Register value,Register scratch,Label * not_data_object)3709 void MacroAssembler::JumpIfDataObject(Register value,
3710                                       Register scratch,
3711                                       Label* not_data_object) {
3712   Label is_data_object;
3713   ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3714   CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3715   b(eq, &is_data_object);
3716   DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3717   DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3718   // If it's a string and it's not a cons string then it's an object containing
3719   // no GC pointers.
3720   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3721   tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3722   b(ne, not_data_object);
3723   bind(&is_data_object);
3724 }
3725 
3726 
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)3727 void MacroAssembler::GetMarkBits(Register addr_reg,
3728                                  Register bitmap_reg,
3729                                  Register mask_reg) {
3730   DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3731   and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3732   Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3733   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3734   Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3735   add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3736   mov(ip, Operand(1));
3737   mov(mask_reg, Operand(ip, LSL, mask_reg));
3738 }
3739 
3740 
EnsureNotWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white_and_not_data)3741 void MacroAssembler::EnsureNotWhite(
3742     Register value,
3743     Register bitmap_scratch,
3744     Register mask_scratch,
3745     Register load_scratch,
3746     Label* value_is_white_and_not_data) {
3747   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3748   GetMarkBits(value, bitmap_scratch, mask_scratch);
3749 
3750   // If the value is black or grey we don't need to do anything.
3751   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3752   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3753   DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
3754   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3755 
3756   Label done;
3757 
3758   // Since both black and grey have a 1 in the first position and white does
3759   // not have a 1 there we only need to check one bit.
3760   ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3761   tst(mask_scratch, load_scratch);
3762   b(ne, &done);
3763 
3764   if (emit_debug_code()) {
3765     // Check for impossible bit pattern.
3766     Label ok;
3767     // LSL may overflow, making the check conservative.
3768     tst(load_scratch, Operand(mask_scratch, LSL, 1));
3769     b(eq, &ok);
3770     stop("Impossible marking bit pattern");
3771     bind(&ok);
3772   }
3773 
3774   // Value is white.  We check whether it is data that doesn't need scanning.
3775   // Currently only checks for HeapNumber and non-cons strings.
3776   Register map = load_scratch;  // Holds map while checking type.
3777   Register length = load_scratch;  // Holds length of object after testing type.
3778   Label is_data_object;
3779 
3780   // Check for heap-number
3781   ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3782   CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3783   mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
3784   b(eq, &is_data_object);
3785 
3786   // Check for strings.
3787   DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3788   DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3789   // If it's a string and it's not a cons string then it's an object containing
3790   // no GC pointers.
3791   Register instance_type = load_scratch;
3792   ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3793   tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3794   b(ne, value_is_white_and_not_data);
3795   // It's a non-indirect (non-cons and non-slice) string.
3796   // If it's external, the length is just ExternalString::kSize.
3797   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3798   // External strings are the only ones with the kExternalStringTag bit
3799   // set.
3800   DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
3801   DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
3802   tst(instance_type, Operand(kExternalStringTag));
3803   mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
3804   b(ne, &is_data_object);
3805 
3806   // Sequential string, either Latin1 or UC16.
3807   // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
3808   // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
3809   // getting the length multiplied by 2.
3810   DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3811   DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
3812   ldr(ip, FieldMemOperand(value, String::kLengthOffset));
3813   tst(instance_type, Operand(kStringEncodingMask));
3814   mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
3815   add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3816   and_(length, length, Operand(~kObjectAlignmentMask));
3817 
3818   bind(&is_data_object);
3819   // Value is a data object, and it is white.  Mark it black.  Since we know
3820   // that the object is white we can make it black by flipping one bit.
3821   ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3822   orr(ip, ip, Operand(mask_scratch));
3823   str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3824 
3825   and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3826   ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3827   add(ip, ip, Operand(length));
3828   str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3829 
3830   bind(&done);
3831 }
3832 
3833 
ClampUint8(Register output_reg,Register input_reg)3834 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3835   Usat(output_reg, 8, Operand(input_reg));
3836 }
3837 
3838 
ClampDoubleToUint8(Register result_reg,DwVfpRegister input_reg,LowDwVfpRegister double_scratch)3839 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3840                                         DwVfpRegister input_reg,
3841                                         LowDwVfpRegister double_scratch) {
3842   Label done;
3843 
3844   // Handle inputs >= 255 (including +infinity).
3845   Vmov(double_scratch, 255.0, result_reg);
3846   mov(result_reg, Operand(255));
3847   VFPCompareAndSetFlags(input_reg, double_scratch);
3848   b(ge, &done);
3849 
3850   // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest
3851   // rounding mode will provide the correct result.
3852   vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3853   vmov(result_reg, double_scratch.low());
3854 
3855   bind(&done);
3856 }
3857 
3858 
LoadInstanceDescriptors(Register map,Register descriptors)3859 void MacroAssembler::LoadInstanceDescriptors(Register map,
3860                                              Register descriptors) {
3861   ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3862 }
3863 
3864 
NumberOfOwnDescriptors(Register dst,Register map)3865 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3866   ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3867   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3868 }
3869 
3870 
EnumLength(Register dst,Register map)3871 void MacroAssembler::EnumLength(Register dst, Register map) {
3872   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3873   ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3874   and_(dst, dst, Operand(Map::EnumLengthBits::kMask));
3875   SmiTag(dst);
3876 }
3877 
3878 
CheckEnumCache(Register null_value,Label * call_runtime)3879 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3880   Register  empty_fixed_array_value = r6;
3881   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3882   Label next, start;
3883   mov(r2, r0);
3884 
3885   // Check if the enum length field is properly initialized, indicating that
3886   // there is an enum cache.
3887   ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3888 
3889   EnumLength(r3, r1);
3890   cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3891   b(eq, call_runtime);
3892 
3893   jmp(&start);
3894 
3895   bind(&next);
3896   ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3897 
3898   // For all objects but the receiver, check that the cache is empty.
3899   EnumLength(r3, r1);
3900   cmp(r3, Operand(Smi::FromInt(0)));
3901   b(ne, call_runtime);
3902 
3903   bind(&start);
3904 
3905   // Check that there are no elements. Register r2 contains the current JS
3906   // object we've reached through the prototype chain.
3907   Label no_elements;
3908   ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3909   cmp(r2, empty_fixed_array_value);
3910   b(eq, &no_elements);
3911 
3912   // Second chance, the object may be using the empty slow element dictionary.
3913   CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
3914   b(ne, call_runtime);
3915 
3916   bind(&no_elements);
3917   ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3918   cmp(r2, null_value);
3919   b(ne, &next);
3920 }
3921 
3922 
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)3923 void MacroAssembler::TestJSArrayForAllocationMemento(
3924     Register receiver_reg,
3925     Register scratch_reg,
3926     Label* no_memento_found) {
3927   ExternalReference new_space_start =
3928       ExternalReference::new_space_start(isolate());
3929   ExternalReference new_space_allocation_top =
3930       ExternalReference::new_space_allocation_top_address(isolate());
3931   add(scratch_reg, receiver_reg,
3932       Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3933   cmp(scratch_reg, Operand(new_space_start));
3934   b(lt, no_memento_found);
3935   mov(ip, Operand(new_space_allocation_top));
3936   ldr(ip, MemOperand(ip));
3937   cmp(scratch_reg, ip);
3938   b(gt, no_memento_found);
3939   ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
3940   cmp(scratch_reg,
3941       Operand(isolate()->factory()->allocation_memento_map()));
3942 }
3943 
3944 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)3945 Register GetRegisterThatIsNotOneOf(Register reg1,
3946                                    Register reg2,
3947                                    Register reg3,
3948                                    Register reg4,
3949                                    Register reg5,
3950                                    Register reg6) {
3951   RegList regs = 0;
3952   if (reg1.is_valid()) regs |= reg1.bit();
3953   if (reg2.is_valid()) regs |= reg2.bit();
3954   if (reg3.is_valid()) regs |= reg3.bit();
3955   if (reg4.is_valid()) regs |= reg4.bit();
3956   if (reg5.is_valid()) regs |= reg5.bit();
3957   if (reg6.is_valid()) regs |= reg6.bit();
3958 
3959   for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
3960     Register candidate = Register::FromAllocationIndex(i);
3961     if (regs & candidate.bit()) continue;
3962     return candidate;
3963   }
3964   UNREACHABLE();
3965   return no_reg;
3966 }
3967 
3968 
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)3969 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3970     Register object,
3971     Register scratch0,
3972     Register scratch1,
3973     Label* found) {
3974   DCHECK(!scratch1.is(scratch0));
3975   Factory* factory = isolate()->factory();
3976   Register current = scratch0;
3977   Label loop_again;
3978 
3979   // scratch contained elements pointer.
3980   mov(current, object);
3981 
3982   // Loop based on the map going up the prototype chain.
3983   bind(&loop_again);
3984   ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
3985   ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
3986   DecodeField<Map::ElementsKindBits>(scratch1);
3987   cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
3988   b(eq, found);
3989   ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
3990   cmp(current, Operand(factory->null_value()));
3991   b(ne, &loop_again);
3992 }
3993 
3994 
3995 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8)3996 bool AreAliased(Register reg1,
3997                 Register reg2,
3998                 Register reg3,
3999                 Register reg4,
4000                 Register reg5,
4001                 Register reg6,
4002                 Register reg7,
4003                 Register reg8) {
4004   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
4005       reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4006       reg7.is_valid() + reg8.is_valid();
4007 
4008   RegList regs = 0;
4009   if (reg1.is_valid()) regs |= reg1.bit();
4010   if (reg2.is_valid()) regs |= reg2.bit();
4011   if (reg3.is_valid()) regs |= reg3.bit();
4012   if (reg4.is_valid()) regs |= reg4.bit();
4013   if (reg5.is_valid()) regs |= reg5.bit();
4014   if (reg6.is_valid()) regs |= reg6.bit();
4015   if (reg7.is_valid()) regs |= reg7.bit();
4016   if (reg8.is_valid()) regs |= reg8.bit();
4017   int n_of_non_aliasing_regs = NumRegs(regs);
4018 
4019   return n_of_valid_regs != n_of_non_aliasing_regs;
4020 }
4021 #endif
4022 
4023 
CodePatcher(byte * address,int instructions,FlushICache flush_cache)4024 CodePatcher::CodePatcher(byte* address,
4025                          int instructions,
4026                          FlushICache flush_cache)
4027     : address_(address),
4028       size_(instructions * Assembler::kInstrSize),
4029       masm_(NULL, address, size_ + Assembler::kGap),
4030       flush_cache_(flush_cache) {
4031   // Create a new macro assembler pointing to the address of the code to patch.
4032   // The size is adjusted with kGap on order for the assembler to generate size
4033   // bytes of instructions without failing with buffer size constraints.
4034   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4035 }
4036 
4037 
~CodePatcher()4038 CodePatcher::~CodePatcher() {
4039   // Indicate that code has changed.
4040   if (flush_cache_ == FLUSH) {
4041     CpuFeatures::FlushICache(address_, size_);
4042   }
4043 
4044   // Check that the code was patched as expected.
4045   DCHECK(masm_.pc_ == address_ + size_);
4046   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4047 }
4048 
4049 
Emit(Instr instr)4050 void CodePatcher::Emit(Instr instr) {
4051   masm()->emit(instr);
4052 }
4053 
4054 
Emit(Address addr)4055 void CodePatcher::Emit(Address addr) {
4056   masm()->emit(reinterpret_cast<Instr>(addr));
4057 }
4058 
4059 
EmitCondition(Condition cond)4060 void CodePatcher::EmitCondition(Condition cond) {
4061   Instr instr = Assembler::instr_at(masm_.pc_);
4062   instr = (instr & ~kCondMask) | cond;
4063   masm_.emit(instr);
4064 }
4065 
4066 
TruncatingDiv(Register result,Register dividend,int32_t divisor)4067 void MacroAssembler::TruncatingDiv(Register result,
4068                                    Register dividend,
4069                                    int32_t divisor) {
4070   DCHECK(!dividend.is(result));
4071   DCHECK(!dividend.is(ip));
4072   DCHECK(!result.is(ip));
4073   base::MagicNumbersForDivision<uint32_t> mag =
4074       base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
4075   mov(ip, Operand(mag.multiplier));
4076   smull(ip, result, dividend, ip);
4077   bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
4078   if (divisor > 0 && neg) {
4079     add(result, result, Operand(dividend));
4080   }
4081   if (divisor < 0 && !neg && mag.multiplier > 0) {
4082     sub(result, result, Operand(dividend));
4083   }
4084   if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
4085   add(result, result, Operand(dividend, LSR, 31));
4086 }
4087 
4088 
4089 } }  // namespace v8::internal
4090 
4091 #endif  // V8_TARGET_ARCH_ARM
4092