1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
6 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
7
8 #include "src/arm/assembler-arm.h"
9 #include "src/assembler.h"
10 #include "src/bailout-reason.h"
11 #include "src/globals.h"
12 #include "src/turbo-assembler.h"
13
14 namespace v8 {
15 namespace internal {
16
17 // Give alias names to registers for calling conventions.
18 constexpr Register kReturnRegister0 = r0;
19 constexpr Register kReturnRegister1 = r1;
20 constexpr Register kReturnRegister2 = r2;
21 constexpr Register kJSFunctionRegister = r1;
22 constexpr Register kContextRegister = r7;
23 constexpr Register kAllocateSizeRegister = r1;
24 constexpr Register kSpeculationPoisonRegister = r9;
25 constexpr Register kInterpreterAccumulatorRegister = r0;
26 constexpr Register kInterpreterBytecodeOffsetRegister = r5;
27 constexpr Register kInterpreterBytecodeArrayRegister = r6;
28 constexpr Register kInterpreterDispatchTableRegister = r8;
29
30 constexpr Register kJavaScriptCallArgCountRegister = r0;
31 constexpr Register kJavaScriptCallCodeStartRegister = r2;
32 constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
33 constexpr Register kJavaScriptCallNewTargetRegister = r3;
34 constexpr Register kJavaScriptCallExtraArg1Register = r2;
35
36 constexpr Register kOffHeapTrampolineRegister = ip;
37 constexpr Register kRuntimeCallFunctionRegister = r1;
38 constexpr Register kRuntimeCallArgCountRegister = r0;
39 constexpr Register kRuntimeCallArgvRegister = r2;
40 constexpr Register kWasmInstanceRegister = r3;
41
42 // ----------------------------------------------------------------------------
43 // Static helper functions
44
45 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)46 inline MemOperand FieldMemOperand(Register object, int offset) {
47 return MemOperand(object, offset - kHeapObjectTag);
48 }
49
50
51 // Give alias names to registers
52 constexpr Register cp = r7; // JavaScript context pointer.
53 constexpr Register kRootRegister = r10; // Roots array pointer.
54
55 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
56 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
57 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
58
59
60 Register GetRegisterThatIsNotOneOf(Register reg1,
61 Register reg2 = no_reg,
62 Register reg3 = no_reg,
63 Register reg4 = no_reg,
64 Register reg5 = no_reg,
65 Register reg6 = no_reg);
66
67 enum TargetAddressStorageMode {
68 CAN_INLINE_TARGET_ADDRESS,
69 NEVER_INLINE_TARGET_ADDRESS
70 };
71
72 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
73 public:
TurboAssembler(Isolate * isolate,const AssemblerOptions & options,void * buffer,int buffer_size,CodeObjectRequired create_code_object)74 TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
75 void* buffer, int buffer_size,
76 CodeObjectRequired create_code_object)
77 : TurboAssemblerBase(isolate, options, buffer, buffer_size,
78 create_code_object) {}
79
80 // Activation support.
81 void EnterFrame(StackFrame::Type type,
82 bool load_constant_pool_pointer_reg = false);
83 // Returns the pc offset at which the frame ends.
84 int LeaveFrame(StackFrame::Type type);
85
86 // Push a fixed frame, consisting of lr, fp
87 void PushCommonFrame(Register marker_reg = no_reg);
88
89 // Generates function and stub prologue code.
90 void StubPrologue(StackFrame::Type type);
91 void Prologue();
92
93 // Push a standard frame, consisting of lr, fp, context and JS function
94 void PushStandardFrame(Register function_reg);
95
96 void InitializeRootRegister();
97
Push(Register src)98 void Push(Register src) { push(src); }
99
100 void Push(Handle<HeapObject> handle);
101 void Push(Smi* smi);
102
103 // Push two registers. Pushes leftmost register first (to highest address).
104 void Push(Register src1, Register src2, Condition cond = al) {
105 if (src1.code() > src2.code()) {
106 stm(db_w, sp, src1.bit() | src2.bit(), cond);
107 } else {
108 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
109 str(src2, MemOperand(sp, 4, NegPreIndex), cond);
110 }
111 }
112
113 // Push three registers. Pushes leftmost register first (to highest address).
114 void Push(Register src1, Register src2, Register src3, Condition cond = al) {
115 if (src1.code() > src2.code()) {
116 if (src2.code() > src3.code()) {
117 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
118 } else {
119 stm(db_w, sp, src1.bit() | src2.bit(), cond);
120 str(src3, MemOperand(sp, 4, NegPreIndex), cond);
121 }
122 } else {
123 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
124 Push(src2, src3, cond);
125 }
126 }
127
128 // Push four registers. Pushes leftmost register first (to highest address).
129 void Push(Register src1, Register src2, Register src3, Register src4,
130 Condition cond = al) {
131 if (src1.code() > src2.code()) {
132 if (src2.code() > src3.code()) {
133 if (src3.code() > src4.code()) {
134 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
135 cond);
136 } else {
137 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
138 str(src4, MemOperand(sp, 4, NegPreIndex), cond);
139 }
140 } else {
141 stm(db_w, sp, src1.bit() | src2.bit(), cond);
142 Push(src3, src4, cond);
143 }
144 } else {
145 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
146 Push(src2, src3, src4, cond);
147 }
148 }
149
150 // Push five registers. Pushes leftmost register first (to highest address).
151 void Push(Register src1, Register src2, Register src3, Register src4,
152 Register src5, Condition cond = al) {
153 if (src1.code() > src2.code()) {
154 if (src2.code() > src3.code()) {
155 if (src3.code() > src4.code()) {
156 if (src4.code() > src5.code()) {
157 stm(db_w, sp,
158 src1.bit() | src2.bit() | src3.bit() | src4.bit() | src5.bit(),
159 cond);
160 } else {
161 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
162 cond);
163 str(src5, MemOperand(sp, 4, NegPreIndex), cond);
164 }
165 } else {
166 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
167 Push(src4, src5, cond);
168 }
169 } else {
170 stm(db_w, sp, src1.bit() | src2.bit(), cond);
171 Push(src3, src4, src5, cond);
172 }
173 } else {
174 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
175 Push(src2, src3, src4, src5, cond);
176 }
177 }
178
Pop(Register dst)179 void Pop(Register dst) { pop(dst); }
180
181 // Pop two registers. Pops rightmost register first (from lower address).
182 void Pop(Register src1, Register src2, Condition cond = al) {
183 DCHECK(src1 != src2);
184 if (src1.code() > src2.code()) {
185 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
186 } else {
187 ldr(src2, MemOperand(sp, 4, PostIndex), cond);
188 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
189 }
190 }
191
192 // Pop three registers. Pops rightmost register first (from lower address).
193 void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
194 DCHECK(!AreAliased(src1, src2, src3));
195 if (src1.code() > src2.code()) {
196 if (src2.code() > src3.code()) {
197 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
198 } else {
199 ldr(src3, MemOperand(sp, 4, PostIndex), cond);
200 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
201 }
202 } else {
203 Pop(src2, src3, cond);
204 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
205 }
206 }
207
208 // Pop four registers. Pops rightmost register first (from lower address).
209 void Pop(Register src1, Register src2, Register src3, Register src4,
210 Condition cond = al) {
211 DCHECK(!AreAliased(src1, src2, src3, src4));
212 if (src1.code() > src2.code()) {
213 if (src2.code() > src3.code()) {
214 if (src3.code() > src4.code()) {
215 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
216 cond);
217 } else {
218 ldr(src4, MemOperand(sp, 4, PostIndex), cond);
219 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
220 }
221 } else {
222 Pop(src3, src4, cond);
223 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
224 }
225 } else {
226 Pop(src2, src3, src4, cond);
227 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
228 }
229 }
230
231 // Before calling a C-function from generated code, align arguments on stack.
232 // After aligning the frame, non-register arguments must be stored in
233 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
234 // are word sized. If double arguments are used, this function assumes that
235 // all double arguments are stored before core registers; otherwise the
236 // correct alignment of the double values is not guaranteed.
237 // Some compilers/platforms require the stack to be aligned when calling
238 // C++ code.
239 // Needs a scratch register to do some arithmetic. This register will be
240 // trashed.
241 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers = 0,
242 Register scratch = no_reg);
243
244 // Removes current frame and its arguments from the stack preserving
245 // the arguments and a return address pushed to the stack for the next call.
246 // Both |callee_args_count| and |caller_args_count_reg| do not include
247 // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
248 // is trashed.
249 void PrepareForTailCall(const ParameterCount& callee_args_count,
250 Register caller_args_count_reg, Register scratch0,
251 Register scratch1);
252
253 // There are two ways of passing double arguments on ARM, depending on
254 // whether soft or hard floating point ABI is used. These functions
255 // abstract parameter passing for the three different ways we call
256 // C functions from generated code.
257 void MovToFloatParameter(DwVfpRegister src);
258 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
259 void MovToFloatResult(DwVfpRegister src);
260
261 // Calls a C function and cleans up the space for arguments allocated
262 // by PrepareCallCFunction. The called function is not allowed to trigger a
263 // garbage collection, since that might move the code and invalidate the
264 // return address (unless this is somehow accounted for by the called
265 // function).
266 void CallCFunction(ExternalReference function, int num_arguments);
267 void CallCFunction(Register function, int num_arguments);
268 void CallCFunction(ExternalReference function, int num_reg_arguments,
269 int num_double_arguments);
270 void CallCFunction(Register function, int num_reg_arguments,
271 int num_double_arguments);
272
273 void MovFromFloatParameter(DwVfpRegister dst);
274 void MovFromFloatResult(DwVfpRegister dst);
275
276 // Calls Abort(msg) if the condition cond is not satisfied.
277 // Use --debug_code to enable.
278 void Assert(Condition cond, AbortReason reason);
279
280 // Like Assert(), but always enabled.
281 void Check(Condition cond, AbortReason reason);
282
283 // Print a message to stdout and abort execution.
284 void Abort(AbortReason msg);
285
286 inline bool AllowThisStubCall(CodeStub* stub);
287
288 void LslPair(Register dst_low, Register dst_high, Register src_low,
289 Register src_high, Register shift);
290 void LslPair(Register dst_low, Register dst_high, Register src_low,
291 Register src_high, uint32_t shift);
292 void LsrPair(Register dst_low, Register dst_high, Register src_low,
293 Register src_high, Register shift);
294 void LsrPair(Register dst_low, Register dst_high, Register src_low,
295 Register src_high, uint32_t shift);
296 void AsrPair(Register dst_low, Register dst_high, Register src_low,
297 Register src_high, Register shift);
298 void AsrPair(Register dst_low, Register dst_high, Register src_low,
299 Register src_high, uint32_t shift);
300
301 void LoadFromConstantsTable(Register destination,
302 int constant_index) override;
303 void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
304 void LoadRootRelative(Register destination, int32_t offset) override;
305
306 static constexpr int kCallStubSize = 2 * kInstrSize;
307 void CallStubDelayed(CodeStub* stub);
308
309 // Call a runtime routine. This expects {centry} to contain a fitting CEntry
310 // builtin for the target runtime function and uses an indirect call.
311 void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
312
313 // Jump, Call, and Ret pseudo instructions implementing inter-working.
314 void Call(Register target, Condition cond = al);
315 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al,
316 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
317 bool check_constant_pool = true);
318 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
319 Condition cond = al,
320 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
321 bool check_constant_pool = true);
322 void Call(Label* target);
323
324 // This should only be used when assembling a deoptimizer call because of
325 // the CheckConstPool invocation, which is only needed for deoptimization.
CallForDeoptimization(Address target,int deopt_id,RelocInfo::Mode rmode)326 void CallForDeoptimization(Address target, int deopt_id,
327 RelocInfo::Mode rmode) {
328 USE(deopt_id);
329 Call(target, rmode);
330 CheckConstPool(false, false);
331 }
332
333 // Emit code to discard a non-negative number of pointer-sized elements
334 // from the stack, clobbering only the sp register.
335 void Drop(int count, Condition cond = al);
336 void Drop(Register count, Condition cond = al);
337
338 void Ret(Condition cond = al);
339 void Ret(int drop, Condition cond = al);
340
341 // Compare single values and move the result to the normal condition flags.
342 void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2,
343 const Condition cond = al);
344 void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2,
345 const Condition cond = al);
346
347 // Compare double values and move the result to the normal condition flags.
348 void VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2,
349 const Condition cond = al);
350 void VFPCompareAndSetFlags(const DwVfpRegister src1, const double src2,
351 const Condition cond = al);
352
353 // If the value is a NaN, canonicalize the value else, do nothing.
354 void VFPCanonicalizeNaN(const DwVfpRegister dst, const DwVfpRegister src,
355 const Condition cond = al);
356 void VFPCanonicalizeNaN(const DwVfpRegister value,
357 const Condition cond = al) {
358 VFPCanonicalizeNaN(value, value, cond);
359 }
360
361 void VmovHigh(Register dst, DwVfpRegister src);
362 void VmovHigh(DwVfpRegister dst, Register src);
363 void VmovLow(Register dst, DwVfpRegister src);
364 void VmovLow(DwVfpRegister dst, Register src);
365
366 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
367 Label* condition_met);
368
369 // Check whether d16-d31 are available on the CPU. The result is given by the
370 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
371 void CheckFor32DRegs(Register scratch);
372
373 void SaveRegisters(RegList registers);
374 void RestoreRegisters(RegList registers);
375
376 void CallRecordWriteStub(Register object, Register address,
377 RememberedSetAction remembered_set_action,
378 SaveFPRegsMode fp_mode);
379
380 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
381 // values to location, saving [d0..(d15|d31)].
382 void SaveFPRegs(Register location, Register scratch);
383
384 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
385 // values to location, restoring [d0..(d15|d31)].
386 void RestoreFPRegs(Register location, Register scratch);
387
388 // Calculate how much stack space (in bytes) are required to store caller
389 // registers excluding those specified in the arguments.
390 int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
391 Register exclusion1 = no_reg,
392 Register exclusion2 = no_reg,
393 Register exclusion3 = no_reg) const;
394
395 // Push caller saved registers on the stack, and return the number of bytes
396 // stack pointer is adjusted.
397 int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
398 Register exclusion2 = no_reg,
399 Register exclusion3 = no_reg);
400 // Restore caller saved registers from the stack, and return the number of
401 // bytes stack pointer is adjusted.
402 int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
403 Register exclusion2 = no_reg,
404 Register exclusion3 = no_reg);
405 void Jump(Register target, Condition cond = al);
406 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
407 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
408
409 // Perform a floating-point min or max operation with the
410 // (IEEE-754-compatible) semantics of ARM64's fmin/fmax. Some cases, typically
411 // NaNs or +/-0.0, are expected to be rare and are handled in out-of-line
412 // code. The specific behaviour depends on supported instructions.
413 //
414 // These functions assume (and assert) that left!=right. It is permitted
415 // for the result to alias either input register.
416 void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
417 Label* out_of_line);
418 void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
419 Label* out_of_line);
420 void FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
421 Label* out_of_line);
422 void FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
423 Label* out_of_line);
424
425 // Generate out-of-line cases for the macros above.
426 void FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
427 SwVfpRegister right);
428 void FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
429 SwVfpRegister right);
430 void FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
431 DwVfpRegister right);
432 void FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
433 DwVfpRegister right);
434
435 void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
436 void ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane);
437 void ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane);
438 void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane,
439 NeonDataType dt, int lane);
440 void ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
441 SwVfpRegister src_lane, int lane);
442
443 // Register move. May do nothing if the registers are identical.
444 void Move(Register dst, Smi* smi);
445 void Move(Register dst, Handle<HeapObject> value);
446 void Move(Register dst, ExternalReference reference);
447 void Move(Register dst, Register src, Condition cond = al);
448 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
449 Condition cond = al) {
450 if (!src.IsRegister() || src.rm() != dst || sbit != LeaveCC) {
451 mov(dst, src, sbit, cond);
452 }
453 }
454 void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
455 void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
456 void Move(QwNeonRegister dst, QwNeonRegister src);
457
458 // Simulate s-register moves for imaginary s32 - s63 registers.
459 void VmovExtended(Register dst, int src_code);
460 void VmovExtended(int dst_code, Register src);
461 // Move between s-registers and imaginary s-registers.
462 void VmovExtended(int dst_code, int src_code);
463 void VmovExtended(int dst_code, const MemOperand& src);
464 void VmovExtended(const MemOperand& dst, int src_code);
465
466 // Register swap. Note that the register operands should be distinct.
467 void Swap(Register srcdst0, Register srcdst1);
468 void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
469 void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
470
471 // Get the actual activation frame alignment for target environment.
472 static int ActivationFrameAlignment();
473
474 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
475
476 void SmiUntag(Register reg, SBit s = LeaveCC) {
477 mov(reg, Operand::SmiUntag(reg), s);
478 }
479 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
480 mov(dst, Operand::SmiUntag(src), s);
481 }
482
483 // Load an object from the root table.
LoadRoot(Register destination,Heap::RootListIndex index)484 void LoadRoot(Register destination, Heap::RootListIndex index) override {
485 LoadRoot(destination, index, al);
486 }
487 void LoadRoot(Register destination, Heap::RootListIndex index,
488 Condition cond);
489
490 // Jump if the register contains a smi.
491 void JumpIfSmi(Register value, Label* smi_label);
492
493 void JumpIfEqual(Register x, int32_t y, Label* dest);
494 void JumpIfLessThan(Register x, int32_t y, Label* dest);
495
496 // Performs a truncating conversion of a floating point number as used by
497 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
498 // succeeds, otherwise falls through if result is saturated. On return
499 // 'result' either holds answer, or is clobbered on fall through.
500 //
501 // Only public for the test code in test-code-stubs-arm.cc.
502 void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input,
503 Label* done);
504
505 // Performs a truncating conversion of a floating point number as used by
506 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
507 // Exits with 'result' holding the answer.
508 void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
509 DwVfpRegister double_input, StubCallMode stub_mode);
510
511 // EABI variant for double arguments in use.
use_eabi_hardfloat()512 bool use_eabi_hardfloat() {
513 #ifdef __arm__
514 return base::OS::ArmUsingHardFloat();
515 #elif USE_EABI_HARDFLOAT
516 return true;
517 #else
518 return false;
519 #endif
520 }
521
522 // Compute the start of the generated instruction stream from the current PC.
523 // This is an alternative to embedding the {CodeObject} handle as a reference.
524 void ComputeCodeStartAddress(Register dst);
525
526 void ResetSpeculationPoisonRegister();
527
528 private:
529 // Compare single values and then load the fpscr flags to a register.
530 void VFPCompareAndLoadFlags(const SwVfpRegister src1,
531 const SwVfpRegister src2,
532 const Register fpscr_flags,
533 const Condition cond = al);
534 void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
535 const Register fpscr_flags,
536 const Condition cond = al);
537
538 // Compare double values and then load the fpscr flags to a register.
539 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
540 const DwVfpRegister src2,
541 const Register fpscr_flags,
542 const Condition cond = al);
543 void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
544 const Register fpscr_flags,
545 const Condition cond = al);
546
547 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
548
549 // Implementation helpers for FloatMin and FloatMax.
550 template <typename T>
551 void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
552 template <typename T>
553 void FloatMinHelper(T result, T left, T right, Label* out_of_line);
554 template <typename T>
555 void FloatMaxOutOfLineHelper(T result, T left, T right);
556 template <typename T>
557 void FloatMinOutOfLineHelper(T result, T left, T right);
558
559 int CalculateStackPassedWords(int num_reg_arguments,
560 int num_double_arguments);
561
562 void CallCFunctionHelper(Register function, int num_reg_arguments,
563 int num_double_arguments);
564 };
565
566 // MacroAssembler implements a collection of frequently used macros.
567 class MacroAssembler : public TurboAssembler {
568 public:
MacroAssembler(Isolate * isolate,void * buffer,int size,CodeObjectRequired create_code_object)569 MacroAssembler(Isolate* isolate, void* buffer, int size,
570 CodeObjectRequired create_code_object)
571 : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
572 size, create_code_object) {}
573 MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
574 void* buffer, int size, CodeObjectRequired create_code_object);
575
576 void Mls(Register dst, Register src1, Register src2, Register srcA,
577 Condition cond = al);
578 void And(Register dst, Register src1, const Operand& src2,
579 Condition cond = al);
580 void Ubfx(Register dst, Register src, int lsb, int width,
581 Condition cond = al);
582 void Sbfx(Register dst, Register src, int lsb, int width,
583 Condition cond = al);
584
585 void Load(Register dst, const MemOperand& src, Representation r);
586 void Store(Register src, const MemOperand& dst, Representation r);
587
588 // ---------------------------------------------------------------------------
589 // GC Support
590
591 // Check if object is in new space. Jumps if the object is not in new space.
592 // The register scratch can be object itself, but scratch will be clobbered.
JumpIfNotInNewSpace(Register object,Register scratch,Label * branch)593 void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
594 InNewSpace(object, scratch, eq, branch);
595 }
596
597 // Check if object is in new space. Jumps if the object is in new space.
598 // The register scratch can be object itself, but it will be clobbered.
JumpIfInNewSpace(Register object,Register scratch,Label * branch)599 void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
600 InNewSpace(object, scratch, ne, branch);
601 }
602
603 // Check if an object has a given incremental marking color.
604 void HasColor(Register object, Register scratch0, Register scratch1,
605 Label* has_color, int first_bit, int second_bit);
606
607 void JumpIfBlack(Register object, Register scratch0, Register scratch1,
608 Label* on_black);
609
610 // Checks the color of an object. If the object is white we jump to the
611 // incremental marker.
612 void JumpIfWhite(Register value, Register scratch1, Register scratch2,
613 Register scratch3, Label* value_is_white);
614
615 // Notify the garbage collector that we wrote a pointer into an object.
616 // |object| is the object being stored into, |value| is the object being
617 // stored. value and scratch registers are clobbered by the operation.
618 // The offset is the offset from the start of the object, not the offset from
619 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
620 void RecordWriteField(
621 Register object, int offset, Register value, Register scratch,
622 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
623 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
624 SmiCheck smi_check = INLINE_SMI_CHECK);
625
626 // For a given |object| notify the garbage collector that the slot |address|
627 // has been written. |value| is the object being stored. The value and
628 // address registers are clobbered by the operation.
629 void RecordWrite(
630 Register object, Register address, Register value,
631 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
632 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
633 SmiCheck smi_check = INLINE_SMI_CHECK);
634
635 // Push and pop the registers that can hold pointers, as defined by the
636 // RegList constant kSafepointSavedRegisters.
637 void PushSafepointRegisters();
638 void PopSafepointRegisters();
639
640 // Enter exit frame.
641 // stack_space - extra stack space, used for alignment before call to C.
642 void EnterExitFrame(bool save_doubles, int stack_space = 0,
643 StackFrame::Type frame_type = StackFrame::EXIT);
644
645 // Leave the current exit frame. Expects the return value in r0.
646 // Expect the number of values, pushed prior to the exit frame, to
647 // remove in a register (or no_reg, if there is nothing to remove).
648 void LeaveExitFrame(bool save_doubles, Register argument_count,
649 bool argument_count_is_length = false);
650
651 // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)652 void LoadGlobalProxy(Register dst) {
653 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
654 }
655
656 void LoadNativeContextSlot(int index, Register dst);
657
658 // ---------------------------------------------------------------------------
659 // JavaScript invokes
660
661 // Invoke the JavaScript function code by either calling or jumping.
662 void InvokeFunctionCode(Register function, Register new_target,
663 const ParameterCount& expected,
664 const ParameterCount& actual, InvokeFlag flag);
665
666 // On function call, call into the debugger if necessary.
667 void CheckDebugHook(Register fun, Register new_target,
668 const ParameterCount& expected,
669 const ParameterCount& actual);
670
671 // Invoke the JavaScript function in the given register. Changes the
672 // current context to the context in the function before invoking.
673 void InvokeFunction(Register function, Register new_target,
674 const ParameterCount& actual, InvokeFlag flag);
675
676 void InvokeFunction(Register function, const ParameterCount& expected,
677 const ParameterCount& actual, InvokeFlag flag);
678
679 // Frame restart support
680 void MaybeDropFrames();
681
682 // Exception handling
683
684 // Push a new stack handler and link into stack handler chain.
685 void PushStackHandler();
686
687 // Unlink the stack handler on top of the stack from the stack handler chain.
688 // Must preserve the result register.
689 void PopStackHandler();
690
691 // ---------------------------------------------------------------------------
692 // Support functions.
693
694 // Compare object type for heap object. heap_object contains a non-Smi
695 // whose object type should be compared with the given type. This both
696 // sets the flags and leaves the object type in the type_reg register.
697 // It leaves the map in the map register (unless the type_reg and map register
698 // are the same register). It leaves the heap object in the heap_object
699 // register unless the heap_object register is the same register as one of the
700 // other registers.
701 // Type_reg can be no_reg. In that case a scratch register is used.
702 void CompareObjectType(Register heap_object,
703 Register map,
704 Register type_reg,
705 InstanceType type);
706
707 // Compare instance type in a map. map contains a valid map object whose
708 // object type should be compared with the given type. This both
709 // sets the flags and leaves the object type in the type_reg register.
710 void CompareInstanceType(Register map,
711 Register type_reg,
712 InstanceType type);
713
714 // Compare the object in a register to a value from the root list.
715 // Acquires a scratch register.
716 void CompareRoot(Register obj, Heap::RootListIndex index);
PushRoot(Heap::RootListIndex index)717 void PushRoot(Heap::RootListIndex index) {
718 UseScratchRegisterScope temps(this);
719 Register scratch = temps.Acquire();
720 LoadRoot(scratch, index);
721 Push(scratch);
722 }
723
724 // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,Heap::RootListIndex index,Label * if_equal)725 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
726 CompareRoot(with, index);
727 b(eq, if_equal);
728 }
729
730 // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,Heap::RootListIndex index,Label * if_not_equal)731 void JumpIfNotRoot(Register with, Heap::RootListIndex index,
732 Label* if_not_equal) {
733 CompareRoot(with, index);
734 b(ne, if_not_equal);
735 }
736
737 // Try to convert a double to a signed 32-bit integer.
738 // Z flag set to one and result assigned if the conversion is exact.
739 void TryDoubleToInt32Exact(Register result,
740 DwVfpRegister double_input,
741 LowDwVfpRegister double_scratch);
742
743 // ---------------------------------------------------------------------------
744 // Runtime calls
745
746 // Call a code stub.
747 void CallStub(CodeStub* stub,
748 Condition cond = al);
749
750 // Call a code stub.
751 void TailCallStub(CodeStub* stub, Condition cond = al);
752
753 // Call a runtime routine.
754 void CallRuntime(const Runtime::Function* f,
755 int num_arguments,
756 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
757
758 // Convenience function: Same as above, but takes the fid instead.
759 void CallRuntime(Runtime::FunctionId fid,
760 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
761 const Runtime::Function* function = Runtime::FunctionForId(fid);
762 CallRuntime(function, function->nargs, save_doubles);
763 }
764
765 // Convenience function: Same as above, but takes the fid instead.
766 void CallRuntime(Runtime::FunctionId fid, int num_arguments,
767 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
768 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
769 }
770
771 // Convenience function: tail call a runtime routine (jump).
772 void TailCallRuntime(Runtime::FunctionId fid);
773
774 // Jump to a runtime routine.
775 void JumpToExternalReference(const ExternalReference& builtin,
776 bool builtin_exit_frame = false);
777
778 // Generates a trampoline to jump to the off-heap instruction stream.
779 void JumpToInstructionStream(Address entry);
780
781 // ---------------------------------------------------------------------------
782 // In-place weak references.
783 void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
784
785 // ---------------------------------------------------------------------------
786 // StatsCounter support
787
788 void IncrementCounter(StatsCounter* counter, int value,
789 Register scratch1, Register scratch2);
790 void DecrementCounter(StatsCounter* counter, int value,
791 Register scratch1, Register scratch2);
792
793 // ---------------------------------------------------------------------------
794 // Smi utilities
795
796 void SmiTag(Register reg, SBit s = LeaveCC);
797 void SmiTag(Register dst, Register src, SBit s = LeaveCC);
798
799 // Untag the source value into destination and jump if source is a smi.
800 // Souce and destination can be the same register.
801 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
802
803 // Test if the register contains a smi (Z == 0 (eq) if true).
804 void SmiTst(Register value);
805 // Jump if either of the registers contain a non-smi.
806 void JumpIfNotSmi(Register value, Label* not_smi_label);
807 // Jump if either of the registers contain a smi.
808 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
809
810 // Abort execution if argument is a smi, enabled via --debug-code.
811 void AssertNotSmi(Register object);
812 void AssertSmi(Register object);
813
814 // Abort execution if argument is not a Constructor, enabled via --debug-code.
815 void AssertConstructor(Register object);
816
817 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
818 void AssertFunction(Register object);
819
820 // Abort execution if argument is not a JSBoundFunction,
821 // enabled via --debug-code.
822 void AssertBoundFunction(Register object);
823
824 // Abort execution if argument is not a JSGeneratorObject (or subclass),
825 // enabled via --debug-code.
826 void AssertGeneratorObject(Register object);
827
828 // Abort execution if argument is not undefined or an AllocationSite, enabled
829 // via --debug-code.
830 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
831
832 template<typename Field>
DecodeField(Register dst,Register src)833 void DecodeField(Register dst, Register src) {
834 Ubfx(dst, src, Field::kShift, Field::kSize);
835 }
836
837 template<typename Field>
DecodeField(Register reg)838 void DecodeField(Register reg) {
839 DecodeField<Field>(reg, reg);
840 }
841
842 private:
843 // Helper functions for generating invokes.
844 void InvokePrologue(const ParameterCount& expected,
845 const ParameterCount& actual, Label* done,
846 bool* definitely_mismatches, InvokeFlag flag);
847
848 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
849 void InNewSpace(Register object,
850 Register scratch,
851 Condition cond, // eq for new space, ne otherwise.
852 Label* branch);
853
854 // Compute memory operands for safepoint stack slots.
855 static int SafepointRegisterStackIndex(int reg_code);
856
857 // Needs access to SafepointRegisterStackIndex for compiled frame
858 // traversal.
859 friend class StandardFrame;
860 };
861
862 // -----------------------------------------------------------------------------
863 // Static helper functions.
864
865 inline MemOperand ContextMemOperand(Register context, int index = 0) {
866 return MemOperand(context, Context::SlotOffset(index));
867 }
868
869
NativeContextMemOperand()870 inline MemOperand NativeContextMemOperand() {
871 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
872 }
873
874 #define ACCESS_MASM(masm) masm->
875
876 } // namespace internal
877 } // namespace v8
878
879 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
880