1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_ARM64
6 
7 #include "src/bootstrapper.h"
8 #include "src/code-stubs.h"
9 #include "src/codegen.h"
10 #include "src/ic/handler-compiler.h"
11 #include "src/ic/ic.h"
12 #include "src/ic/stub-cache.h"
13 #include "src/isolate.h"
14 #include "src/regexp/jsregexp.h"
15 #include "src/regexp/regexp-macro-assembler.h"
16 #include "src/runtime/runtime.h"
17 
18 #include "src/arm64/code-stubs-arm64.h"
19 #include "src/arm64/frames-arm64.h"
20 
21 namespace v8 {
22 namespace internal {
23 
24 
InitializeArrayConstructorDescriptor(Isolate * isolate,CodeStubDescriptor * descriptor,int constant_stack_parameter_count)25 static void InitializeArrayConstructorDescriptor(
26     Isolate* isolate, CodeStubDescriptor* descriptor,
27     int constant_stack_parameter_count) {
28   // cp: context
29   // x1: function
30   // x2: allocation site with elements kind
31   // x0: number of arguments to the constructor function
32   Address deopt_handler = Runtime::FunctionForId(
33       Runtime::kArrayConstructor)->entry;
34 
35   if (constant_stack_parameter_count == 0) {
36     descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
37                            JS_FUNCTION_STUB_MODE);
38   } else {
39     descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
40                            JS_FUNCTION_STUB_MODE);
41   }
42 }
43 
44 
InitializeDescriptor(CodeStubDescriptor * descriptor)45 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
46     CodeStubDescriptor* descriptor) {
47   InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
48 }
49 
50 
InitializeDescriptor(CodeStubDescriptor * descriptor)51 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
52     CodeStubDescriptor* descriptor) {
53   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
54 }
55 
56 
InitializeDescriptor(CodeStubDescriptor * descriptor)57 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
58     CodeStubDescriptor* descriptor) {
59   InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
60 }
61 
62 
InitializeInternalArrayConstructorDescriptor(Isolate * isolate,CodeStubDescriptor * descriptor,int constant_stack_parameter_count)63 static void InitializeInternalArrayConstructorDescriptor(
64     Isolate* isolate, CodeStubDescriptor* descriptor,
65     int constant_stack_parameter_count) {
66   Address deopt_handler = Runtime::FunctionForId(
67       Runtime::kInternalArrayConstructor)->entry;
68 
69   if (constant_stack_parameter_count == 0) {
70     descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
71                            JS_FUNCTION_STUB_MODE);
72   } else {
73     descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
74                            JS_FUNCTION_STUB_MODE);
75   }
76 }
77 
78 
InitializeDescriptor(CodeStubDescriptor * descriptor)79 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
80     CodeStubDescriptor* descriptor) {
81   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
82 }
83 
84 
InitializeDescriptor(CodeStubDescriptor * descriptor)85 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
86     CodeStubDescriptor* descriptor) {
87   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
88 }
89 
90 
InitializeDescriptor(CodeStubDescriptor * descriptor)91 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
92     CodeStubDescriptor* descriptor) {
93   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
94 }
95 
96 
97 #define __ ACCESS_MASM(masm)
98 
99 
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)100 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
101                                                ExternalReference miss) {
102   // Update the static counter each time a new code stub is generated.
103   isolate()->counters()->code_stubs()->Increment();
104 
105   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
106   int param_count = descriptor.GetRegisterParameterCount();
107   {
108     // Call the runtime system in a fresh internal frame.
109     FrameScope scope(masm, StackFrame::INTERNAL);
110     DCHECK((param_count == 0) ||
111            x0.Is(descriptor.GetRegisterParameter(param_count - 1)));
112 
113     // Push arguments
114     MacroAssembler::PushPopQueue queue(masm);
115     for (int i = 0; i < param_count; ++i) {
116       queue.Queue(descriptor.GetRegisterParameter(i));
117     }
118     queue.PushQueued();
119 
120     __ CallExternalReference(miss, param_count);
121   }
122 
123   __ Ret();
124 }
125 
126 
Generate(MacroAssembler * masm)127 void DoubleToIStub::Generate(MacroAssembler* masm) {
128   Label done;
129   Register input = source();
130   Register result = destination();
131   DCHECK(is_truncating());
132 
133   DCHECK(result.Is64Bits());
134   DCHECK(jssp.Is(masm->StackPointer()));
135 
136   int double_offset = offset();
137 
138   DoubleRegister double_scratch = d0;  // only used if !skip_fastpath()
139   Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
140   Register scratch2 =
141       GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
142 
143   __ Push(scratch1, scratch2);
144   // Account for saved regs if input is jssp.
145   if (input.is(jssp)) double_offset += 2 * kPointerSize;
146 
147   if (!skip_fastpath()) {
148     __ Push(double_scratch);
149     if (input.is(jssp)) double_offset += 1 * kDoubleSize;
150     __ Ldr(double_scratch, MemOperand(input, double_offset));
151     // Try to convert with a FPU convert instruction.  This handles all
152     // non-saturating cases.
153     __ TryConvertDoubleToInt64(result, double_scratch, &done);
154     __ Fmov(result, double_scratch);
155   } else {
156     __ Ldr(result, MemOperand(input, double_offset));
157   }
158 
159   // If we reach here we need to manually convert the input to an int32.
160 
161   // Extract the exponent.
162   Register exponent = scratch1;
163   __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
164           HeapNumber::kExponentBits);
165 
166   // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
167   // the mantissa gets shifted completely out of the int32_t result.
168   __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
169   __ CzeroX(result, ge);
170   __ B(ge, &done);
171 
172   // The Fcvtzs sequence handles all cases except where the conversion causes
173   // signed overflow in the int64_t target. Since we've already handled
174   // exponents >= 84, we can guarantee that 63 <= exponent < 84.
175 
176   if (masm->emit_debug_code()) {
177     __ Cmp(exponent, HeapNumber::kExponentBias + 63);
178     // Exponents less than this should have been handled by the Fcvt case.
179     __ Check(ge, kUnexpectedValue);
180   }
181 
182   // Isolate the mantissa bits, and set the implicit '1'.
183   Register mantissa = scratch2;
184   __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
185   __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
186 
187   // Negate the mantissa if necessary.
188   __ Tst(result, kXSignMask);
189   __ Cneg(mantissa, mantissa, ne);
190 
191   // Shift the mantissa bits in the correct place. We know that we have to shift
192   // it left here, because exponent >= 63 >= kMantissaBits.
193   __ Sub(exponent, exponent,
194          HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
195   __ Lsl(result, mantissa, exponent);
196 
197   __ Bind(&done);
198   if (!skip_fastpath()) {
199     __ Pop(double_scratch);
200   }
201   __ Pop(scratch2, scratch1);
202   __ Ret();
203 }
204 
205 
206 // See call site for description.
EmitIdenticalObjectComparison(MacroAssembler * masm,Register left,Register right,Register scratch,FPRegister double_scratch,Label * slow,Condition cond,Strength strength)207 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
208                                           Register right, Register scratch,
209                                           FPRegister double_scratch,
210                                           Label* slow, Condition cond,
211                                           Strength strength) {
212   DCHECK(!AreAliased(left, right, scratch));
213   Label not_identical, return_equal, heap_number;
214   Register result = x0;
215 
216   __ Cmp(right, left);
217   __ B(ne, &not_identical);
218 
219   // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
220   // so we do the second best thing - test it ourselves.
221   // They are both equal and they are not both Smis so both of them are not
222   // Smis.  If it's not a heap number, then return equal.
223   Register right_type = scratch;
224   if ((cond == lt) || (cond == gt)) {
225     // Call runtime on identical JSObjects.  Otherwise return equal.
226     __ JumpIfObjectType(right, right_type, right_type, FIRST_JS_RECEIVER_TYPE,
227                         slow, ge);
228     // Call runtime on identical symbols since we need to throw a TypeError.
229     __ Cmp(right_type, SYMBOL_TYPE);
230     __ B(eq, slow);
231     // Call runtime on identical SIMD values since we must throw a TypeError.
232     __ Cmp(right_type, SIMD128_VALUE_TYPE);
233     __ B(eq, slow);
234     if (is_strong(strength)) {
235       // Call the runtime on anything that is converted in the semantics, since
236       // we need to throw a TypeError. Smis have already been ruled out.
237       __ Cmp(right_type, Operand(HEAP_NUMBER_TYPE));
238       __ B(eq, &return_equal);
239       __ Tst(right_type, Operand(kIsNotStringMask));
240       __ B(ne, slow);
241     }
242   } else if (cond == eq) {
243     __ JumpIfHeapNumber(right, &heap_number);
244   } else {
245     __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
246                         &heap_number);
247     // Comparing JS objects with <=, >= is complicated.
248     __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
249     __ B(ge, slow);
250     // Call runtime on identical symbols since we need to throw a TypeError.
251     __ Cmp(right_type, SYMBOL_TYPE);
252     __ B(eq, slow);
253     // Call runtime on identical SIMD values since we must throw a TypeError.
254     __ Cmp(right_type, SIMD128_VALUE_TYPE);
255     __ B(eq, slow);
256     if (is_strong(strength)) {
257       // Call the runtime on anything that is converted in the semantics,
258       // since we need to throw a TypeError. Smis and heap numbers have
259       // already been ruled out.
260       __ Tst(right_type, Operand(kIsNotStringMask));
261       __ B(ne, slow);
262     }
263     // Normally here we fall through to return_equal, but undefined is
264     // special: (undefined == undefined) == true, but
265     // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
266     if ((cond == le) || (cond == ge)) {
267       __ Cmp(right_type, ODDBALL_TYPE);
268       __ B(ne, &return_equal);
269       __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
270       if (cond == le) {
271         // undefined <= undefined should fail.
272         __ Mov(result, GREATER);
273       } else {
274         // undefined >= undefined should fail.
275         __ Mov(result, LESS);
276       }
277       __ Ret();
278     }
279   }
280 
281   __ Bind(&return_equal);
282   if (cond == lt) {
283     __ Mov(result, GREATER);  // Things aren't less than themselves.
284   } else if (cond == gt) {
285     __ Mov(result, LESS);     // Things aren't greater than themselves.
286   } else {
287     __ Mov(result, EQUAL);    // Things are <=, >=, ==, === themselves.
288   }
289   __ Ret();
290 
291   // Cases lt and gt have been handled earlier, and case ne is never seen, as
292   // it is handled in the parser (see Parser::ParseBinaryExpression). We are
293   // only concerned with cases ge, le and eq here.
294   if ((cond != lt) && (cond != gt)) {
295     DCHECK((cond == ge) || (cond == le) || (cond == eq));
296     __ Bind(&heap_number);
297     // Left and right are identical pointers to a heap number object. Return
298     // non-equal if the heap number is a NaN, and equal otherwise. Comparing
299     // the number to itself will set the overflow flag iff the number is NaN.
300     __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
301     __ Fcmp(double_scratch, double_scratch);
302     __ B(vc, &return_equal);  // Not NaN, so treat as normal heap number.
303 
304     if (cond == le) {
305       __ Mov(result, GREATER);
306     } else {
307       __ Mov(result, LESS);
308     }
309     __ Ret();
310   }
311 
312   // No fall through here.
313   if (FLAG_debug_code) {
314     __ Unreachable();
315   }
316 
317   __ Bind(&not_identical);
318 }
319 
320 
321 // See call site for description.
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register left,Register right,Register left_type,Register right_type,Register scratch)322 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
323                                            Register left,
324                                            Register right,
325                                            Register left_type,
326                                            Register right_type,
327                                            Register scratch) {
328   DCHECK(!AreAliased(left, right, left_type, right_type, scratch));
329 
330   if (masm->emit_debug_code()) {
331     // We assume that the arguments are not identical.
332     __ Cmp(left, right);
333     __ Assert(ne, kExpectedNonIdenticalObjects);
334   }
335 
336   // If either operand is a JS object or an oddball value, then they are not
337   // equal since their pointers are different.
338   // There is no test for undetectability in strict equality.
339   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
340   Label right_non_object;
341 
342   __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
343   __ B(lt, &right_non_object);
344 
345   // Return non-zero - x0 already contains a non-zero pointer.
346   DCHECK(left.is(x0) || right.is(x0));
347   Label return_not_equal;
348   __ Bind(&return_not_equal);
349   __ Ret();
350 
351   __ Bind(&right_non_object);
352 
353   // Check for oddballs: true, false, null, undefined.
354   __ Cmp(right_type, ODDBALL_TYPE);
355 
356   // If right is not ODDBALL, test left. Otherwise, set eq condition.
357   __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
358 
359   // If right or left is not ODDBALL, test left >= FIRST_JS_RECEIVER_TYPE.
360   // Otherwise, right or left is ODDBALL, so set a ge condition.
361   __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NVFlag, ne);
362 
363   __ B(ge, &return_not_equal);
364 
365   // Internalized strings are unique, so they can only be equal if they are the
366   // same object. We have already tested that case, so if left and right are
367   // both internalized strings, they cannot be equal.
368   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
369   __ Orr(scratch, left_type, right_type);
370   __ TestAndBranchIfAllClear(
371       scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
372 }
373 
374 
375 // See call site for description.
EmitSmiNonsmiComparison(MacroAssembler * masm,Register left,Register right,FPRegister left_d,FPRegister right_d,Label * slow,bool strict)376 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
377                                     Register left,
378                                     Register right,
379                                     FPRegister left_d,
380                                     FPRegister right_d,
381                                     Label* slow,
382                                     bool strict) {
383   DCHECK(!AreAliased(left_d, right_d));
384   DCHECK((left.is(x0) && right.is(x1)) ||
385          (right.is(x0) && left.is(x1)));
386   Register result = x0;
387 
388   Label right_is_smi, done;
389   __ JumpIfSmi(right, &right_is_smi);
390 
391   // Left is the smi. Check whether right is a heap number.
392   if (strict) {
393     // If right is not a number and left is a smi, then strict equality cannot
394     // succeed. Return non-equal.
395     Label is_heap_number;
396     __ JumpIfHeapNumber(right, &is_heap_number);
397     // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
398     if (!right.is(result)) {
399       __ Mov(result, NOT_EQUAL);
400     }
401     __ Ret();
402     __ Bind(&is_heap_number);
403   } else {
404     // Smi compared non-strictly with a non-smi, non-heap-number. Call the
405     // runtime.
406     __ JumpIfNotHeapNumber(right, slow);
407   }
408 
409   // Left is the smi. Right is a heap number. Load right value into right_d, and
410   // convert left smi into double in left_d.
411   __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
412   __ SmiUntagToDouble(left_d, left);
413   __ B(&done);
414 
415   __ Bind(&right_is_smi);
416   // Right is a smi. Check whether the non-smi left is a heap number.
417   if (strict) {
418     // If left is not a number and right is a smi then strict equality cannot
419     // succeed. Return non-equal.
420     Label is_heap_number;
421     __ JumpIfHeapNumber(left, &is_heap_number);
422     // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
423     if (!left.is(result)) {
424       __ Mov(result, NOT_EQUAL);
425     }
426     __ Ret();
427     __ Bind(&is_heap_number);
428   } else {
429     // Smi compared non-strictly with a non-smi, non-heap-number. Call the
430     // runtime.
431     __ JumpIfNotHeapNumber(left, slow);
432   }
433 
434   // Right is the smi. Left is a heap number. Load left value into left_d, and
435   // convert right smi into double in right_d.
436   __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
437   __ SmiUntagToDouble(right_d, right);
438 
439   // Fall through to both_loaded_as_doubles.
440   __ Bind(&done);
441 }
442 
443 
444 // Fast negative check for internalized-to-internalized equality.
445 // See call site for description.
EmitCheckForInternalizedStringsOrObjects(MacroAssembler * masm,Register left,Register right,Register left_map,Register right_map,Register left_type,Register right_type,Label * possible_strings,Label * not_both_strings)446 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
447                                                      Register left,
448                                                      Register right,
449                                                      Register left_map,
450                                                      Register right_map,
451                                                      Register left_type,
452                                                      Register right_type,
453                                                      Label* possible_strings,
454                                                      Label* not_both_strings) {
455   DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
456   Register result = x0;
457 
458   Label object_test;
459   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
460   // TODO(all): reexamine this branch sequence for optimisation wrt branch
461   // prediction.
462   __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
463   __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
464   __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
465   __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
466 
467   // Both are internalized. We already checked that they weren't the same
468   // pointer, so they are not equal.
469   __ Mov(result, NOT_EQUAL);
470   __ Ret();
471 
472   __ Bind(&object_test);
473 
474   __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
475 
476   // If right >= FIRST_JS_RECEIVER_TYPE, test left.
477   // Otherwise, right < FIRST_JS_RECEIVER_TYPE, so set lt condition.
478   __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NFlag, ge);
479 
480   __ B(lt, not_both_strings);
481 
482   // If both objects are undetectable, they are equal. Otherwise, they are not
483   // equal, since they are different objects and an object is not equal to
484   // undefined.
485 
486   // Returning here, so we can corrupt right_type and left_type.
487   Register right_bitfield = right_type;
488   Register left_bitfield = left_type;
489   __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
490   __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
491   __ And(result, right_bitfield, left_bitfield);
492   __ And(result, result, 1 << Map::kIsUndetectable);
493   __ Eor(result, result, 1 << Map::kIsUndetectable);
494   __ Ret();
495 }
496 
497 
CompareICStub_CheckInputType(MacroAssembler * masm,Register input,CompareICState::State expected,Label * fail)498 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
499                                          CompareICState::State expected,
500                                          Label* fail) {
501   Label ok;
502   if (expected == CompareICState::SMI) {
503     __ JumpIfNotSmi(input, fail);
504   } else if (expected == CompareICState::NUMBER) {
505     __ JumpIfSmi(input, &ok);
506     __ JumpIfNotHeapNumber(input, fail);
507   }
508   // We could be strict about internalized/non-internalized here, but as long as
509   // hydrogen doesn't care, the stub doesn't have to care either.
510   __ Bind(&ok);
511 }
512 
513 
GenerateGeneric(MacroAssembler * masm)514 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
515   Register lhs = x1;
516   Register rhs = x0;
517   Register result = x0;
518   Condition cond = GetCondition();
519 
520   Label miss;
521   CompareICStub_CheckInputType(masm, lhs, left(), &miss);
522   CompareICStub_CheckInputType(masm, rhs, right(), &miss);
523 
524   Label slow;  // Call builtin.
525   Label not_smis, both_loaded_as_doubles;
526   Label not_two_smis, smi_done;
527   __ JumpIfEitherNotSmi(lhs, rhs, &not_two_smis);
528   __ SmiUntag(lhs);
529   __ Sub(result, lhs, Operand::UntagSmi(rhs));
530   __ Ret();
531 
532   __ Bind(&not_two_smis);
533 
534   // NOTICE! This code is only reached after a smi-fast-case check, so it is
535   // certain that at least one operand isn't a smi.
536 
537   // Handle the case where the objects are identical. Either returns the answer
538   // or goes to slow. Only falls through if the objects were not identical.
539   EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond,
540                                 strength());
541 
542   // If either is a smi (we know that at least one is not a smi), then they can
543   // only be strictly equal if the other is a HeapNumber.
544   __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
545 
546   // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
547   // can:
548   //  1) Return the answer.
549   //  2) Branch to the slow case.
550   //  3) Fall through to both_loaded_as_doubles.
551   // In case 3, we have found out that we were dealing with a number-number
552   // comparison. The double values of the numbers have been loaded, right into
553   // rhs_d, left into lhs_d.
554   FPRegister rhs_d = d0;
555   FPRegister lhs_d = d1;
556   EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
557 
558   __ Bind(&both_loaded_as_doubles);
559   // The arguments have been converted to doubles and stored in rhs_d and
560   // lhs_d.
561   Label nan;
562   __ Fcmp(lhs_d, rhs_d);
563   __ B(vs, &nan);  // Overflow flag set if either is NaN.
564   STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
565   __ Cset(result, gt);  // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
566   __ Csinv(result, result, xzr, ge);  // lt => -1, gt => 1, eq => 0.
567   __ Ret();
568 
569   __ Bind(&nan);
570   // Left and/or right is a NaN. Load the result register with whatever makes
571   // the comparison fail, since comparisons with NaN always fail (except ne,
572   // which is filtered out at a higher level.)
573   DCHECK(cond != ne);
574   if ((cond == lt) || (cond == le)) {
575     __ Mov(result, GREATER);
576   } else {
577     __ Mov(result, LESS);
578   }
579   __ Ret();
580 
581   __ Bind(&not_smis);
582   // At this point we know we are dealing with two different objects, and
583   // neither of them is a smi. The objects are in rhs_ and lhs_.
584 
585   // Load the maps and types of the objects.
586   Register rhs_map = x10;
587   Register rhs_type = x11;
588   Register lhs_map = x12;
589   Register lhs_type = x13;
590   __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
591   __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
592   __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
593   __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
594 
595   if (strict()) {
596     // This emits a non-equal return sequence for some object types, or falls
597     // through if it was not lucky.
598     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
599   }
600 
601   Label check_for_internalized_strings;
602   Label flat_string_check;
603   // Check for heap number comparison. Branch to earlier double comparison code
604   // if they are heap numbers, otherwise, branch to internalized string check.
605   __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
606   __ B(ne, &check_for_internalized_strings);
607   __ Cmp(lhs_map, rhs_map);
608 
609   // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
610   // string check.
611   __ B(ne, &flat_string_check);
612 
613   // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
614   // comparison code.
615   __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
616   __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
617   __ B(&both_loaded_as_doubles);
618 
619   __ Bind(&check_for_internalized_strings);
620   // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
621   // of internalized strings.
622   if ((cond == eq) && !strict()) {
623     // Returns an answer for two internalized strings or two detectable objects.
624     // Otherwise branches to the string case or not both strings case.
625     EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
626                                              lhs_type, rhs_type,
627                                              &flat_string_check, &slow);
628   }
629 
630   // Check for both being sequential one-byte strings,
631   // and inline if that is the case.
632   __ Bind(&flat_string_check);
633   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x14,
634                                                     x15, &slow);
635 
636   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
637                       x11);
638   if (cond == eq) {
639     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
640                                                   x12);
641   } else {
642     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
643                                                     x12, x13);
644   }
645 
646   // Never fall through to here.
647   if (FLAG_debug_code) {
648     __ Unreachable();
649   }
650 
651   __ Bind(&slow);
652 
653   __ Push(lhs, rhs);
654   // Figure out which native to call and setup the arguments.
655   if (cond == eq) {
656     __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
657   } else {
658     int ncr;  // NaN compare result
659     if ((cond == lt) || (cond == le)) {
660       ncr = GREATER;
661     } else {
662       DCHECK((cond == gt) || (cond == ge));  // remaining cases
663       ncr = LESS;
664     }
665     __ Mov(x10, Smi::FromInt(ncr));
666     __ Push(x10);
667 
668     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
669     // tagged as a small integer.
670     __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
671                                              : Runtime::kCompare);
672   }
673 
674   __ Bind(&miss);
675   GenerateMiss(masm);
676 }
677 
678 
Generate(MacroAssembler * masm)679 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
680   CPURegList saved_regs = kCallerSaved;
681   CPURegList saved_fp_regs = kCallerSavedFP;
682 
683   // We don't allow a GC during a store buffer overflow so there is no need to
684   // store the registers in any particular way, but we do have to store and
685   // restore them.
686 
687   // We don't care if MacroAssembler scratch registers are corrupted.
688   saved_regs.Remove(*(masm->TmpList()));
689   saved_fp_regs.Remove(*(masm->FPTmpList()));
690 
691   __ PushCPURegList(saved_regs);
692   if (save_doubles()) {
693     __ PushCPURegList(saved_fp_regs);
694   }
695 
696   AllowExternalCallThatCantCauseGC scope(masm);
697   __ Mov(x0, ExternalReference::isolate_address(isolate()));
698   __ CallCFunction(
699       ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
700 
701   if (save_doubles()) {
702     __ PopCPURegList(saved_fp_regs);
703   }
704   __ PopCPURegList(saved_regs);
705   __ Ret();
706 }
707 
708 
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)709 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
710     Isolate* isolate) {
711   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
712   stub1.GetCode();
713   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
714   stub2.GetCode();
715 }
716 
717 
Generate(MacroAssembler * masm)718 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
719   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
720   UseScratchRegisterScope temps(masm);
721   Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
722   Register return_address = temps.AcquireX();
723   __ Mov(return_address, lr);
724   // Restore lr with the value it had before the call to this stub (the value
725   // which must be pushed).
726   __ Mov(lr, saved_lr);
727   __ PushSafepointRegisters();
728   __ Ret(return_address);
729 }
730 
731 
Generate(MacroAssembler * masm)732 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
733   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
734   UseScratchRegisterScope temps(masm);
735   Register return_address = temps.AcquireX();
736   // Preserve the return address (lr will be clobbered by the pop).
737   __ Mov(return_address, lr);
738   __ PopSafepointRegisters();
739   __ Ret(return_address);
740 }
741 
742 
Generate(MacroAssembler * masm)743 void MathPowStub::Generate(MacroAssembler* masm) {
744   // Stack on entry:
745   // jssp[0]: Exponent (as a tagged value).
746   // jssp[1]: Base (as a tagged value).
747   //
748   // The (tagged) result will be returned in x0, as a heap number.
749 
750   Register result_tagged = x0;
751   Register base_tagged = x10;
752   Register exponent_tagged = MathPowTaggedDescriptor::exponent();
753   DCHECK(exponent_tagged.is(x11));
754   Register exponent_integer = MathPowIntegerDescriptor::exponent();
755   DCHECK(exponent_integer.is(x12));
756   Register scratch1 = x14;
757   Register scratch0 = x15;
758   Register saved_lr = x19;
759   FPRegister result_double = d0;
760   FPRegister base_double = d0;
761   FPRegister exponent_double = d1;
762   FPRegister base_double_copy = d2;
763   FPRegister scratch1_double = d6;
764   FPRegister scratch0_double = d7;
765 
766   // A fast-path for integer exponents.
767   Label exponent_is_smi, exponent_is_integer;
768   // Bail out to runtime.
769   Label call_runtime;
770   // Allocate a heap number for the result, and return it.
771   Label done;
772 
773   // Unpack the inputs.
774   if (exponent_type() == ON_STACK) {
775     Label base_is_smi;
776     Label unpack_exponent;
777 
778     __ Pop(exponent_tagged, base_tagged);
779 
780     __ JumpIfSmi(base_tagged, &base_is_smi);
781     __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
782     // base_tagged is a heap number, so load its double value.
783     __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
784     __ B(&unpack_exponent);
785     __ Bind(&base_is_smi);
786     // base_tagged is a SMI, so untag it and convert it to a double.
787     __ SmiUntagToDouble(base_double, base_tagged);
788 
789     __ Bind(&unpack_exponent);
790     //  x10   base_tagged       The tagged base (input).
791     //  x11   exponent_tagged   The tagged exponent (input).
792     //  d1    base_double       The base as a double.
793     __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
794     __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
795     // exponent_tagged is a heap number, so load its double value.
796     __ Ldr(exponent_double,
797            FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
798   } else if (exponent_type() == TAGGED) {
799     __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
800     __ Ldr(exponent_double,
801            FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
802   }
803 
804   // Handle double (heap number) exponents.
805   if (exponent_type() != INTEGER) {
806     // Detect integer exponents stored as doubles and handle those in the
807     // integer fast-path.
808     __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
809                                  scratch0_double, &exponent_is_integer);
810 
811     if (exponent_type() == ON_STACK) {
812       FPRegister  half_double = d3;
813       FPRegister  minus_half_double = d4;
814       // Detect square root case. Crankshaft detects constant +/-0.5 at compile
815       // time and uses DoMathPowHalf instead. We then skip this check for
816       // non-constant cases of +/-0.5 as these hardly occur.
817 
818       __ Fmov(minus_half_double, -0.5);
819       __ Fmov(half_double, 0.5);
820       __ Fcmp(minus_half_double, exponent_double);
821       __ Fccmp(half_double, exponent_double, NZFlag, ne);
822       // Condition flags at this point:
823       //    0.5;  nZCv    // Identified by eq && pl
824       //   -0.5:  NZcv    // Identified by eq && mi
825       //  other:  ?z??    // Identified by ne
826       __ B(ne, &call_runtime);
827 
828       // The exponent is 0.5 or -0.5.
829 
830       // Given that exponent is known to be either 0.5 or -0.5, the following
831       // special cases could apply (according to ECMA-262 15.8.2.13):
832       //
833       //  base.isNaN():                   The result is NaN.
834       //  (base == +INFINITY) || (base == -INFINITY)
835       //    exponent == 0.5:              The result is +INFINITY.
836       //    exponent == -0.5:             The result is +0.
837       //  (base == +0) || (base == -0)
838       //    exponent == 0.5:              The result is +0.
839       //    exponent == -0.5:             The result is +INFINITY.
840       //  (base < 0) && base.isFinite():  The result is NaN.
841       //
842       // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
843       // where base is -INFINITY or -0.
844 
845       // Add +0 to base. This has no effect other than turning -0 into +0.
846       __ Fadd(base_double, base_double, fp_zero);
847       // The operation -0+0 results in +0 in all cases except where the
848       // FPCR rounding mode is 'round towards minus infinity' (RM). The
849       // ARM64 simulator does not currently simulate FPCR (where the rounding
850       // mode is set), so test the operation with some debug code.
851       if (masm->emit_debug_code()) {
852         UseScratchRegisterScope temps(masm);
853         Register temp = temps.AcquireX();
854         __ Fneg(scratch0_double, fp_zero);
855         // Verify that we correctly generated +0.0 and -0.0.
856         //  bits(+0.0) = 0x0000000000000000
857         //  bits(-0.0) = 0x8000000000000000
858         __ Fmov(temp, fp_zero);
859         __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
860         __ Fmov(temp, scratch0_double);
861         __ Eor(temp, temp, kDSignMask);
862         __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
863         // Check that -0.0 + 0.0 == +0.0.
864         __ Fadd(scratch0_double, scratch0_double, fp_zero);
865         __ Fmov(temp, scratch0_double);
866         __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
867       }
868 
869       // If base is -INFINITY, make it +INFINITY.
870       //  * Calculate base - base: All infinities will become NaNs since both
871       //    -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
872       //  * If the result is NaN, calculate abs(base).
873       __ Fsub(scratch0_double, base_double, base_double);
874       __ Fcmp(scratch0_double, 0.0);
875       __ Fabs(scratch1_double, base_double);
876       __ Fcsel(base_double, scratch1_double, base_double, vs);
877 
878       // Calculate the square root of base.
879       __ Fsqrt(result_double, base_double);
880       __ Fcmp(exponent_double, 0.0);
881       __ B(ge, &done);  // Finish now for exponents of 0.5.
882       // Find the inverse for exponents of -0.5.
883       __ Fmov(scratch0_double, 1.0);
884       __ Fdiv(result_double, scratch0_double, result_double);
885       __ B(&done);
886     }
887 
888     {
889       AllowExternalCallThatCantCauseGC scope(masm);
890       __ Mov(saved_lr, lr);
891       __ CallCFunction(
892           ExternalReference::power_double_double_function(isolate()),
893           0, 2);
894       __ Mov(lr, saved_lr);
895       __ B(&done);
896     }
897 
898     // Handle SMI exponents.
899     __ Bind(&exponent_is_smi);
900     //  x10   base_tagged       The tagged base (input).
901     //  x11   exponent_tagged   The tagged exponent (input).
902     //  d1    base_double       The base as a double.
903     __ SmiUntag(exponent_integer, exponent_tagged);
904   }
905 
906   __ Bind(&exponent_is_integer);
907   //  x10   base_tagged       The tagged base (input).
908   //  x11   exponent_tagged   The tagged exponent (input).
909   //  x12   exponent_integer  The exponent as an integer.
910   //  d1    base_double       The base as a double.
911 
912   // Find abs(exponent). For negative exponents, we can find the inverse later.
913   Register exponent_abs = x13;
914   __ Cmp(exponent_integer, 0);
915   __ Cneg(exponent_abs, exponent_integer, mi);
916   //  x13   exponent_abs      The value of abs(exponent_integer).
917 
918   // Repeatedly multiply to calculate the power.
919   //  result = 1.0;
920   //  For each bit n (exponent_integer{n}) {
921   //    if (exponent_integer{n}) {
922   //      result *= base;
923   //    }
924   //    base *= base;
925   //    if (remaining bits in exponent_integer are all zero) {
926   //      break;
927   //    }
928   //  }
929   Label power_loop, power_loop_entry, power_loop_exit;
930   __ Fmov(scratch1_double, base_double);
931   __ Fmov(base_double_copy, base_double);
932   __ Fmov(result_double, 1.0);
933   __ B(&power_loop_entry);
934 
935   __ Bind(&power_loop);
936   __ Fmul(scratch1_double, scratch1_double, scratch1_double);
937   __ Lsr(exponent_abs, exponent_abs, 1);
938   __ Cbz(exponent_abs, &power_loop_exit);
939 
940   __ Bind(&power_loop_entry);
941   __ Tbz(exponent_abs, 0, &power_loop);
942   __ Fmul(result_double, result_double, scratch1_double);
943   __ B(&power_loop);
944 
945   __ Bind(&power_loop_exit);
946 
947   // If the exponent was positive, result_double holds the result.
948   __ Tbz(exponent_integer, kXSignBit, &done);
949 
950   // The exponent was negative, so find the inverse.
951   __ Fmov(scratch0_double, 1.0);
952   __ Fdiv(result_double, scratch0_double, result_double);
953   // ECMA-262 only requires Math.pow to return an 'implementation-dependent
954   // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
955   // to calculate the subnormal value 2^-1074. This method of calculating
956   // negative powers doesn't work because 2^1074 overflows to infinity. To
957   // catch this corner-case, we bail out if the result was 0. (This can only
958   // occur if the divisor is infinity or the base is zero.)
959   __ Fcmp(result_double, 0.0);
960   __ B(&done, ne);
961 
962   if (exponent_type() == ON_STACK) {
963     // Bail out to runtime code.
964     __ Bind(&call_runtime);
965     // Put the arguments back on the stack.
966     __ Push(base_tagged, exponent_tagged);
967     __ TailCallRuntime(Runtime::kMathPowRT);
968 
969     // Return.
970     __ Bind(&done);
971     __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
972                           result_double);
973     DCHECK(result_tagged.is(x0));
974     __ IncrementCounter(
975         isolate()->counters()->math_pow(), 1, scratch0, scratch1);
976     __ Ret();
977   } else {
978     AllowExternalCallThatCantCauseGC scope(masm);
979     __ Mov(saved_lr, lr);
980     __ Fmov(base_double, base_double_copy);
981     __ Scvtf(exponent_double, exponent_integer);
982     __ CallCFunction(
983         ExternalReference::power_double_double_function(isolate()),
984         0, 2);
985     __ Mov(lr, saved_lr);
986     __ Bind(&done);
987     __ IncrementCounter(
988         isolate()->counters()->math_pow(), 1, scratch0, scratch1);
989     __ Ret();
990   }
991 }
992 
993 
GenerateStubsAheadOfTime(Isolate * isolate)994 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
995   // It is important that the following stubs are generated in this order
996   // because pregenerated stubs can only call other pregenerated stubs.
997   // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
998   // CEntryStub.
999   CEntryStub::GenerateAheadOfTime(isolate);
1000   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1001   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1002   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1003   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1004   CreateWeakCellStub::GenerateAheadOfTime(isolate);
1005   BinaryOpICStub::GenerateAheadOfTime(isolate);
1006   StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1007   RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1008   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1009   StoreFastElementStub::GenerateAheadOfTime(isolate);
1010   TypeofStub::GenerateAheadOfTime(isolate);
1011 }
1012 
1013 
GenerateAheadOfTime(Isolate * isolate)1014 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1015   StoreRegistersStateStub stub(isolate);
1016   stub.GetCode();
1017 }
1018 
1019 
GenerateAheadOfTime(Isolate * isolate)1020 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1021   RestoreRegistersStateStub stub(isolate);
1022   stub.GetCode();
1023 }
1024 
1025 
GenerateFPStubs(Isolate * isolate)1026 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1027   // Floating-point code doesn't get special handling in ARM64, so there's
1028   // nothing to do here.
1029   USE(isolate);
1030 }
1031 
1032 
NeedsImmovableCode()1033 bool CEntryStub::NeedsImmovableCode() {
1034   // CEntryStub stores the return address on the stack before calling into
1035   // C++ code. In some cases, the VM accesses this address, but it is not used
1036   // when the C++ code returns to the stub because LR holds the return address
1037   // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
1038   // returning to dead code.
1039   // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
1040   // find any comment to confirm this, and I don't hit any crashes whatever
1041   // this function returns. The anaylsis should be properly confirmed.
1042   return true;
1043 }
1044 
1045 
GenerateAheadOfTime(Isolate * isolate)1046 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1047   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1048   stub.GetCode();
1049   CEntryStub stub_fp(isolate, 1, kSaveFPRegs);
1050   stub_fp.GetCode();
1051 }
1052 
1053 
Generate(MacroAssembler * masm)1054 void CEntryStub::Generate(MacroAssembler* masm) {
1055   // The Abort mechanism relies on CallRuntime, which in turn relies on
1056   // CEntryStub, so until this stub has been generated, we have to use a
1057   // fall-back Abort mechanism.
1058   //
1059   // Note that this stub must be generated before any use of Abort.
1060   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
1061 
1062   ASM_LOCATION("CEntryStub::Generate entry");
1063   ProfileEntryHookStub::MaybeCallEntryHook(masm);
1064 
1065   // Register parameters:
1066   //    x0: argc (including receiver, untagged)
1067   //    x1: target
1068   // If argv_in_register():
1069   //    x11: argv (pointer to first argument)
1070   //
1071   // The stack on entry holds the arguments and the receiver, with the receiver
1072   // at the highest address:
1073   //
1074   //    jssp]argc-1]: receiver
1075   //    jssp[argc-2]: arg[argc-2]
1076   //    ...           ...
1077   //    jssp[1]:      arg[1]
1078   //    jssp[0]:      arg[0]
1079   //
1080   // The arguments are in reverse order, so that arg[argc-2] is actually the
1081   // first argument to the target function and arg[0] is the last.
1082   DCHECK(jssp.Is(__ StackPointer()));
1083   const Register& argc_input = x0;
1084   const Register& target_input = x1;
1085 
1086   // Calculate argv, argc and the target address, and store them in
1087   // callee-saved registers so we can retry the call without having to reload
1088   // these arguments.
1089   // TODO(jbramley): If the first call attempt succeeds in the common case (as
1090   // it should), then we might be better off putting these parameters directly
1091   // into their argument registers, rather than using callee-saved registers and
1092   // preserving them on the stack.
1093   const Register& argv = x21;
1094   const Register& argc = x22;
1095   const Register& target = x23;
1096 
1097   // Derive argv from the stack pointer so that it points to the first argument
1098   // (arg[argc-2]), or just below the receiver in case there are no arguments.
1099   //  - Adjust for the arg[] array.
1100   Register temp_argv = x11;
1101   if (!argv_in_register()) {
1102     __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
1103     //  - Adjust for the receiver.
1104     __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
1105   }
1106 
1107   // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
1108   // registers.
1109   FrameScope scope(masm, StackFrame::MANUAL);
1110   __ EnterExitFrame(save_doubles(), x10, 3);
1111   DCHECK(csp.Is(__ StackPointer()));
1112 
1113   // Poke callee-saved registers into reserved space.
1114   __ Poke(argv, 1 * kPointerSize);
1115   __ Poke(argc, 2 * kPointerSize);
1116   __ Poke(target, 3 * kPointerSize);
1117 
1118   // We normally only keep tagged values in callee-saved registers, as they
1119   // could be pushed onto the stack by called stubs and functions, and on the
1120   // stack they can confuse the GC. However, we're only calling C functions
1121   // which can push arbitrary data onto the stack anyway, and so the GC won't
1122   // examine that part of the stack.
1123   __ Mov(argc, argc_input);
1124   __ Mov(target, target_input);
1125   __ Mov(argv, temp_argv);
1126 
1127   // x21 : argv
1128   // x22 : argc
1129   // x23 : call target
1130   //
1131   // The stack (on entry) holds the arguments and the receiver, with the
1132   // receiver at the highest address:
1133   //
1134   //         argv[8]:     receiver
1135   // argv -> argv[0]:     arg[argc-2]
1136   //         ...          ...
1137   //         argv[...]:   arg[1]
1138   //         argv[...]:   arg[0]
1139   //
1140   // Immediately below (after) this is the exit frame, as constructed by
1141   // EnterExitFrame:
1142   //         fp[8]:    CallerPC (lr)
1143   //   fp -> fp[0]:    CallerFP (old fp)
1144   //         fp[-8]:   Space reserved for SPOffset.
1145   //         fp[-16]:  CodeObject()
1146   //         csp[...]: Saved doubles, if saved_doubles is true.
1147   //         csp[32]:  Alignment padding, if necessary.
1148   //         csp[24]:  Preserved x23 (used for target).
1149   //         csp[16]:  Preserved x22 (used for argc).
1150   //         csp[8]:   Preserved x21 (used for argv).
1151   //  csp -> csp[0]:   Space reserved for the return address.
1152   //
1153   // After a successful call, the exit frame, preserved registers (x21-x23) and
1154   // the arguments (including the receiver) are dropped or popped as
1155   // appropriate. The stub then returns.
1156   //
1157   // After an unsuccessful call, the exit frame and suchlike are left
1158   // untouched, and the stub either throws an exception by jumping to one of
1159   // the exception_returned label.
1160 
1161   DCHECK(csp.Is(__ StackPointer()));
1162 
1163   // Prepare AAPCS64 arguments to pass to the builtin.
1164   __ Mov(x0, argc);
1165   __ Mov(x1, argv);
1166   __ Mov(x2, ExternalReference::isolate_address(isolate()));
1167 
1168   Label return_location;
1169   __ Adr(x12, &return_location);
1170   __ Poke(x12, 0);
1171 
1172   if (__ emit_debug_code()) {
1173     // Verify that the slot below fp[kSPOffset]-8 points to the return location
1174     // (currently in x12).
1175     UseScratchRegisterScope temps(masm);
1176     Register temp = temps.AcquireX();
1177     __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
1178     __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
1179     __ Cmp(temp, x12);
1180     __ Check(eq, kReturnAddressNotFoundInFrame);
1181   }
1182 
1183   // Call the builtin.
1184   __ Blr(target);
1185   __ Bind(&return_location);
1186 
1187   //  x0    result      The return code from the call.
1188   //  x21   argv
1189   //  x22   argc
1190   //  x23   target
1191   const Register& result = x0;
1192 
1193   // Check result for exception sentinel.
1194   Label exception_returned;
1195   __ CompareRoot(result, Heap::kExceptionRootIndex);
1196   __ B(eq, &exception_returned);
1197 
1198   // The call succeeded, so unwind the stack and return.
1199 
1200   // Restore callee-saved registers x21-x23.
1201   __ Mov(x11, argc);
1202 
1203   __ Peek(argv, 1 * kPointerSize);
1204   __ Peek(argc, 2 * kPointerSize);
1205   __ Peek(target, 3 * kPointerSize);
1206 
1207   __ LeaveExitFrame(save_doubles(), x10, true);
1208   DCHECK(jssp.Is(__ StackPointer()));
1209   if (!argv_in_register()) {
1210     // Drop the remaining stack slots and return from the stub.
1211     __ Drop(x11);
1212   }
1213   __ AssertFPCRState();
1214   __ Ret();
1215 
1216   // The stack pointer is still csp if we aren't returning, and the frame
1217   // hasn't changed (except for the return address).
1218   __ SetStackPointer(csp);
1219 
1220   // Handling of exception.
1221   __ Bind(&exception_returned);
1222 
1223   ExternalReference pending_handler_context_address(
1224       Isolate::kPendingHandlerContextAddress, isolate());
1225   ExternalReference pending_handler_code_address(
1226       Isolate::kPendingHandlerCodeAddress, isolate());
1227   ExternalReference pending_handler_offset_address(
1228       Isolate::kPendingHandlerOffsetAddress, isolate());
1229   ExternalReference pending_handler_fp_address(
1230       Isolate::kPendingHandlerFPAddress, isolate());
1231   ExternalReference pending_handler_sp_address(
1232       Isolate::kPendingHandlerSPAddress, isolate());
1233 
1234   // Ask the runtime for help to determine the handler. This will set x0 to
1235   // contain the current pending exception, don't clobber it.
1236   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1237                                  isolate());
1238   DCHECK(csp.Is(masm->StackPointer()));
1239   {
1240     FrameScope scope(masm, StackFrame::MANUAL);
1241     __ Mov(x0, 0);  // argc.
1242     __ Mov(x1, 0);  // argv.
1243     __ Mov(x2, ExternalReference::isolate_address(isolate()));
1244     __ CallCFunction(find_handler, 3);
1245   }
1246 
1247   // We didn't execute a return case, so the stack frame hasn't been updated
1248   // (except for the return address slot). However, we don't need to initialize
1249   // jssp because the throw method will immediately overwrite it when it
1250   // unwinds the stack.
1251   __ SetStackPointer(jssp);
1252 
1253   // Retrieve the handler context, SP and FP.
1254   __ Mov(cp, Operand(pending_handler_context_address));
1255   __ Ldr(cp, MemOperand(cp));
1256   __ Mov(jssp, Operand(pending_handler_sp_address));
1257   __ Ldr(jssp, MemOperand(jssp));
1258   __ Mov(fp, Operand(pending_handler_fp_address));
1259   __ Ldr(fp, MemOperand(fp));
1260 
1261   // If the handler is a JS frame, restore the context to the frame. Note that
1262   // the context will be set to (cp == 0) for non-JS frames.
1263   Label not_js_frame;
1264   __ Cbz(cp, &not_js_frame);
1265   __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1266   __ Bind(&not_js_frame);
1267 
1268   // Compute the handler entry address and jump to it.
1269   __ Mov(x10, Operand(pending_handler_code_address));
1270   __ Ldr(x10, MemOperand(x10));
1271   __ Mov(x11, Operand(pending_handler_offset_address));
1272   __ Ldr(x11, MemOperand(x11));
1273   __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
1274   __ Add(x10, x10, x11);
1275   __ Br(x10);
1276 }
1277 
1278 
1279 // This is the entry point from C++. 5 arguments are provided in x0-x4.
1280 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
1281 // Input:
1282 //   x0: code entry.
1283 //   x1: function.
1284 //   x2: receiver.
1285 //   x3: argc.
1286 //   x4: argv.
1287 // Output:
1288 //   x0: result.
Generate(MacroAssembler * masm)1289 void JSEntryStub::Generate(MacroAssembler* masm) {
1290   DCHECK(jssp.Is(__ StackPointer()));
1291   Register code_entry = x0;
1292 
1293   // Enable instruction instrumentation. This only works on the simulator, and
1294   // will have no effect on the model or real hardware.
1295   __ EnableInstrumentation();
1296 
1297   Label invoke, handler_entry, exit;
1298 
1299   // Push callee-saved registers and synchronize the system stack pointer (csp)
1300   // and the JavaScript stack pointer (jssp).
1301   //
1302   // We must not write to jssp until after the PushCalleeSavedRegisters()
1303   // call, since jssp is itself a callee-saved register.
1304   __ SetStackPointer(csp);
1305   __ PushCalleeSavedRegisters();
1306   __ Mov(jssp, csp);
1307   __ SetStackPointer(jssp);
1308 
1309   // Configure the FPCR. We don't restore it, so this is technically not allowed
1310   // according to AAPCS64. However, we only set default-NaN mode and this will
1311   // be harmless for most C code. Also, it works for ARM.
1312   __ ConfigureFPCR();
1313 
1314   ProfileEntryHookStub::MaybeCallEntryHook(masm);
1315 
1316   // Set up the reserved register for 0.0.
1317   __ Fmov(fp_zero, 0.0);
1318 
1319   // Build an entry frame (see layout below).
1320   int marker = type();
1321   int64_t bad_frame_pointer = -1L;  // Bad frame pointer to fail if it is used.
1322   __ Mov(x13, bad_frame_pointer);
1323   __ Mov(x12, Smi::FromInt(marker));
1324   __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1325   __ Ldr(x10, MemOperand(x11));
1326 
1327   __ Push(x13, xzr, x12, x10);
1328   // Set up fp.
1329   __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
1330 
1331   // Push the JS entry frame marker. Also set js_entry_sp if this is the
1332   // outermost JS call.
1333   Label non_outermost_js, done;
1334   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1335   __ Mov(x10, ExternalReference(js_entry_sp));
1336   __ Ldr(x11, MemOperand(x10));
1337   __ Cbnz(x11, &non_outermost_js);
1338   __ Str(fp, MemOperand(x10));
1339   __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1340   __ Push(x12);
1341   __ B(&done);
1342   __ Bind(&non_outermost_js);
1343   // We spare one instruction by pushing xzr since the marker is 0.
1344   DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
1345   __ Push(xzr);
1346   __ Bind(&done);
1347 
1348   // The frame set up looks like this:
1349   // jssp[0] : JS entry frame marker.
1350   // jssp[1] : C entry FP.
1351   // jssp[2] : stack frame marker.
1352   // jssp[3] : stack frmae marker.
1353   // jssp[4] : bad frame pointer 0xfff...ff   <- fp points here.
1354 
1355 
1356   // Jump to a faked try block that does the invoke, with a faked catch
1357   // block that sets the pending exception.
1358   __ B(&invoke);
1359 
1360   // Prevent the constant pool from being emitted between the record of the
1361   // handler_entry position and the first instruction of the sequence here.
1362   // There is no risk because Assembler::Emit() emits the instruction before
1363   // checking for constant pool emission, but we do not want to depend on
1364   // that.
1365   {
1366     Assembler::BlockPoolsScope block_pools(masm);
1367     __ bind(&handler_entry);
1368     handler_offset_ = handler_entry.pos();
1369     // Caught exception: Store result (exception) in the pending exception
1370     // field in the JSEnv and return a failure sentinel. Coming in here the
1371     // fp will be invalid because the PushTryHandler below sets it to 0 to
1372     // signal the existence of the JSEntry frame.
1373     __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1374                                           isolate())));
1375   }
1376   __ Str(code_entry, MemOperand(x10));
1377   __ LoadRoot(x0, Heap::kExceptionRootIndex);
1378   __ B(&exit);
1379 
1380   // Invoke: Link this frame into the handler chain.
1381   __ Bind(&invoke);
1382   __ PushStackHandler();
1383   // If an exception not caught by another handler occurs, this handler
1384   // returns control to the code after the B(&invoke) above, which
1385   // restores all callee-saved registers (including cp and fp) to their
1386   // saved values before returning a failure to C.
1387 
1388   // Clear any pending exceptions.
1389   __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
1390   __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1391                                         isolate())));
1392   __ Str(x10, MemOperand(x11));
1393 
1394   // Invoke the function by calling through the JS entry trampoline builtin.
1395   // Notice that we cannot store a reference to the trampoline code directly in
1396   // this stub, because runtime stubs are not traversed when doing GC.
1397 
1398   // Expected registers by Builtins::JSEntryTrampoline
1399   // x0: code entry.
1400   // x1: function.
1401   // x2: receiver.
1402   // x3: argc.
1403   // x4: argv.
1404   ExternalReference entry(type() == StackFrame::ENTRY_CONSTRUCT
1405                               ? Builtins::kJSConstructEntryTrampoline
1406                               : Builtins::kJSEntryTrampoline,
1407                           isolate());
1408   __ Mov(x10, entry);
1409 
1410   // Call the JSEntryTrampoline.
1411   __ Ldr(x11, MemOperand(x10));  // Dereference the address.
1412   __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
1413   __ Blr(x12);
1414 
1415   // Unlink this frame from the handler chain.
1416   __ PopStackHandler();
1417 
1418 
1419   __ Bind(&exit);
1420   // x0 holds the result.
1421   // The stack pointer points to the top of the entry frame pushed on entry from
1422   // C++ (at the beginning of this stub):
1423   // jssp[0] : JS entry frame marker.
1424   // jssp[1] : C entry FP.
1425   // jssp[2] : stack frame marker.
1426   // jssp[3] : stack frmae marker.
1427   // jssp[4] : bad frame pointer 0xfff...ff   <- fp points here.
1428 
1429   // Check if the current stack frame is marked as the outermost JS frame.
1430   Label non_outermost_js_2;
1431   __ Pop(x10);
1432   __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1433   __ B(ne, &non_outermost_js_2);
1434   __ Mov(x11, ExternalReference(js_entry_sp));
1435   __ Str(xzr, MemOperand(x11));
1436   __ Bind(&non_outermost_js_2);
1437 
1438   // Restore the top frame descriptors from the stack.
1439   __ Pop(x10);
1440   __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1441   __ Str(x10, MemOperand(x11));
1442 
1443   // Reset the stack to the callee saved registers.
1444   __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
1445   // Restore the callee-saved registers and return.
1446   DCHECK(jssp.Is(__ StackPointer()));
1447   __ Mov(csp, jssp);
1448   __ SetStackPointer(csp);
1449   __ PopCalleeSavedRegisters();
1450   // After this point, we must not modify jssp because it is a callee-saved
1451   // register which we have just restored.
1452   __ Ret();
1453 }
1454 
1455 
Generate(MacroAssembler * masm)1456 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1457   Label miss;
1458   Register receiver = LoadDescriptor::ReceiverRegister();
1459   // Ensure that the vector and slot registers won't be clobbered before
1460   // calling the miss handler.
1461   DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::VectorRegister(),
1462                      LoadWithVectorDescriptor::SlotRegister()));
1463 
1464   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
1465                                                           x11, &miss);
1466 
1467   __ Bind(&miss);
1468   PropertyAccessCompiler::TailCallBuiltin(
1469       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1470 }
1471 
1472 
Generate(MacroAssembler * masm)1473 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1474   // Return address is in lr.
1475   Label miss;
1476 
1477   Register receiver = LoadDescriptor::ReceiverRegister();
1478   Register index = LoadDescriptor::NameRegister();
1479   Register result = x0;
1480   Register scratch = x10;
1481   DCHECK(!scratch.is(receiver) && !scratch.is(index));
1482   DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
1483          result.is(LoadWithVectorDescriptor::SlotRegister()));
1484 
1485   // StringCharAtGenerator doesn't use the result register until it's passed
1486   // the different miss possibilities. If it did, we would have a conflict
1487   // when FLAG_vector_ics is true.
1488   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1489                                           &miss,  // When not a string.
1490                                           &miss,  // When not a number.
1491                                           &miss,  // When index out of range.
1492                                           STRING_INDEX_IS_ARRAY_INDEX,
1493                                           RECEIVER_IS_STRING);
1494   char_at_generator.GenerateFast(masm);
1495   __ Ret();
1496 
1497   StubRuntimeCallHelper call_helper;
1498   char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1499 
1500   __ Bind(&miss);
1501   PropertyAccessCompiler::TailCallBuiltin(
1502       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1503 }
1504 
1505 
Generate(MacroAssembler * masm)1506 void InstanceOfStub::Generate(MacroAssembler* masm) {
1507   Register const object = x1;              // Object (lhs).
1508   Register const function = x0;            // Function (rhs).
1509   Register const object_map = x2;          // Map of {object}.
1510   Register const function_map = x3;        // Map of {function}.
1511   Register const function_prototype = x4;  // Prototype of {function}.
1512   Register const scratch = x5;
1513 
1514   DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
1515   DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
1516 
1517   // Check if {object} is a smi.
1518   Label object_is_smi;
1519   __ JumpIfSmi(object, &object_is_smi);
1520 
1521   // Lookup the {function} and the {object} map in the global instanceof cache.
1522   // Note: This is safe because we clear the global instanceof cache whenever
1523   // we change the prototype of any object.
1524   Label fast_case, slow_case;
1525   __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
1526   __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex,
1527                    &fast_case);
1528   __ JumpIfNotRoot(object_map, Heap::kInstanceofCacheMapRootIndex, &fast_case);
1529   __ LoadRoot(x0, Heap::kInstanceofCacheAnswerRootIndex);
1530   __ Ret();
1531 
1532   // If {object} is a smi we can safely return false if {function} is a JS
1533   // function, otherwise we have to miss to the runtime and throw an exception.
1534   __ Bind(&object_is_smi);
1535   __ JumpIfSmi(function, &slow_case);
1536   __ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
1537                          &slow_case);
1538   __ LoadRoot(x0, Heap::kFalseValueRootIndex);
1539   __ Ret();
1540 
1541   // Fast-case: The {function} must be a valid JSFunction.
1542   __ Bind(&fast_case);
1543   __ JumpIfSmi(function, &slow_case);
1544   __ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
1545                          &slow_case);
1546 
1547   // Ensure that {function} has an instance prototype.
1548   __ Ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
1549   __ Tbnz(scratch, Map::kHasNonInstancePrototype, &slow_case);
1550 
1551   // Get the "prototype" (or initial map) of the {function}.
1552   __ Ldr(function_prototype,
1553          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1554   __ AssertNotSmi(function_prototype);
1555 
1556   // Resolve the prototype if the {function} has an initial map.  Afterwards the
1557   // {function_prototype} will be either the JSReceiver prototype object or the
1558   // hole value, which means that no instances of the {function} were created so
1559   // far and hence we should return false.
1560   Label function_prototype_valid;
1561   __ JumpIfNotObjectType(function_prototype, scratch, scratch, MAP_TYPE,
1562                          &function_prototype_valid);
1563   __ Ldr(function_prototype,
1564          FieldMemOperand(function_prototype, Map::kPrototypeOffset));
1565   __ Bind(&function_prototype_valid);
1566   __ AssertNotSmi(function_prototype);
1567 
1568   // Update the global instanceof cache with the current {object} map and
1569   // {function}.  The cached answer will be set when it is known below.
1570   __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1571   __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
1572 
1573   // Loop through the prototype chain looking for the {function} prototype.
1574   // Assume true, and change to false if not found.
1575   Register const object_instance_type = function_map;
1576   Register const map_bit_field = function_map;
1577   Register const null = scratch;
1578   Register const result = x0;
1579 
1580   Label done, loop, fast_runtime_fallback;
1581   __ LoadRoot(result, Heap::kTrueValueRootIndex);
1582   __ LoadRoot(null, Heap::kNullValueRootIndex);
1583   __ Bind(&loop);
1584 
1585   // Check if the object needs to be access checked.
1586   __ Ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
1587   __ TestAndBranchIfAnySet(map_bit_field, 1 << Map::kIsAccessCheckNeeded,
1588                            &fast_runtime_fallback);
1589   // Check if the current object is a Proxy.
1590   __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
1591   __ B(eq, &fast_runtime_fallback);
1592 
1593   __ Ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
1594   __ Cmp(object, function_prototype);
1595   __ B(eq, &done);
1596   __ Cmp(object, null);
1597   __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
1598   __ B(ne, &loop);
1599   __ LoadRoot(result, Heap::kFalseValueRootIndex);
1600   __ Bind(&done);
1601   __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
1602   __ Ret();
1603 
1604   // Found Proxy or access check needed: Call the runtime
1605   __ Bind(&fast_runtime_fallback);
1606   __ Push(object, function_prototype);
1607   // Invalidate the instanceof cache.
1608   __ Move(scratch, Smi::FromInt(0));
1609   __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
1610   __ TailCallRuntime(Runtime::kHasInPrototypeChain);
1611 
1612   // Slow-case: Call the %InstanceOf runtime function.
1613   __ bind(&slow_case);
1614   __ Push(object, function);
1615   __ TailCallRuntime(Runtime::kInstanceOf);
1616 }
1617 
1618 
GenerateReadElement(MacroAssembler * masm)1619 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1620   Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
1621   Register key = ArgumentsAccessReadDescriptor::index();
1622   DCHECK(arg_count.is(x0));
1623   DCHECK(key.is(x1));
1624 
1625   // The displacement is the offset of the last parameter (if any) relative
1626   // to the frame pointer.
1627   static const int kDisplacement =
1628       StandardFrameConstants::kCallerSPOffset - kPointerSize;
1629 
1630   // Check that the key is a smi.
1631   Label slow;
1632   __ JumpIfNotSmi(key, &slow);
1633 
1634   // Check if the calling frame is an arguments adaptor frame.
1635   Register local_fp = x11;
1636   Register caller_fp = x11;
1637   Register caller_ctx = x12;
1638   Label skip_adaptor;
1639   __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1640   __ Ldr(caller_ctx, MemOperand(caller_fp,
1641                                 StandardFrameConstants::kContextOffset));
1642   __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1643   __ Csel(local_fp, fp, caller_fp, ne);
1644   __ B(ne, &skip_adaptor);
1645 
1646   // Load the actual arguments limit found in the arguments adaptor frame.
1647   __ Ldr(arg_count, MemOperand(caller_fp,
1648                                ArgumentsAdaptorFrameConstants::kLengthOffset));
1649   __ Bind(&skip_adaptor);
1650 
1651   // Check index against formal parameters count limit. Use unsigned comparison
1652   // to get negative check for free: branch if key < 0 or key >= arg_count.
1653   __ Cmp(key, arg_count);
1654   __ B(hs, &slow);
1655 
1656   // Read the argument from the stack and return it.
1657   __ Sub(x10, arg_count, key);
1658   __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
1659   __ Ldr(x0, MemOperand(x10, kDisplacement));
1660   __ Ret();
1661 
1662   // Slow case: handle non-smi or out-of-bounds access to arguments by calling
1663   // the runtime system.
1664   __ Bind(&slow);
1665   __ Push(key);
1666   __ TailCallRuntime(Runtime::kArguments);
1667 }
1668 
1669 
GenerateNewSloppySlow(MacroAssembler * masm)1670 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1671   // x1 : function
1672   // x2 : number of parameters (tagged)
1673   // x3 : parameters pointer
1674 
1675   DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
1676   DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
1677   DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
1678 
1679   // Check if the calling frame is an arguments adaptor frame.
1680   Label runtime;
1681   Register caller_fp = x10;
1682   __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1683   // Load and untag the context.
1684   __ Ldr(w11, UntagSmiMemOperand(caller_fp,
1685                                  StandardFrameConstants::kContextOffset));
1686   __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
1687   __ B(ne, &runtime);
1688 
1689   // Patch the arguments.length and parameters pointer in the current frame.
1690   __ Ldr(x2,
1691          MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1692   __ Add(x3, caller_fp, Operand::UntagSmiAndScale(x2, kPointerSizeLog2));
1693   __ Add(x3, x3, StandardFrameConstants::kCallerSPOffset);
1694 
1695   __ Bind(&runtime);
1696   __ Push(x1, x3, x2);
1697   __ TailCallRuntime(Runtime::kNewSloppyArguments);
1698 }
1699 
1700 
GenerateNewSloppyFast(MacroAssembler * masm)1701 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1702   // x1 : function
1703   // x2 : number of parameters (tagged)
1704   // x3 : parameters pointer
1705   //
1706   // Returns pointer to result object in x0.
1707 
1708   DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
1709   DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
1710   DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
1711 
1712   // Make an untagged copy of the parameter count.
1713   // Note: arg_count_smi is an alias of param_count_smi.
1714   Register function = x1;
1715   Register arg_count_smi = x2;
1716   Register param_count_smi = x2;
1717   Register recv_arg = x3;
1718   Register param_count = x7;
1719   __ SmiUntag(param_count, param_count_smi);
1720 
1721   // Check if the calling frame is an arguments adaptor frame.
1722   Register caller_fp = x11;
1723   Register caller_ctx = x12;
1724   Label runtime;
1725   Label adaptor_frame, try_allocate;
1726   __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1727   __ Ldr(caller_ctx, MemOperand(caller_fp,
1728                                 StandardFrameConstants::kContextOffset));
1729   __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1730   __ B(eq, &adaptor_frame);
1731 
1732   // No adaptor, parameter count = argument count.
1733 
1734   //   x1   function      function pointer
1735   //   x2   arg_count_smi number of function arguments (smi)
1736   //   x3   recv_arg      pointer to receiver arguments
1737   //   x4   mapped_params number of mapped params, min(params, args) (uninit)
1738   //   x7   param_count   number of function parameters
1739   //   x11  caller_fp     caller's frame pointer
1740   //   x14  arg_count     number of function arguments (uninit)
1741 
1742   Register arg_count = x14;
1743   Register mapped_params = x4;
1744   __ Mov(arg_count, param_count);
1745   __ Mov(mapped_params, param_count);
1746   __ B(&try_allocate);
1747 
1748   // We have an adaptor frame. Patch the parameters pointer.
1749   __ Bind(&adaptor_frame);
1750   __ Ldr(arg_count_smi,
1751          MemOperand(caller_fp,
1752                     ArgumentsAdaptorFrameConstants::kLengthOffset));
1753   __ SmiUntag(arg_count, arg_count_smi);
1754   __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
1755   __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
1756 
1757   // Compute the mapped parameter count = min(param_count, arg_count)
1758   __ Cmp(param_count, arg_count);
1759   __ Csel(mapped_params, param_count, arg_count, lt);
1760 
1761   __ Bind(&try_allocate);
1762 
1763   //   x0   alloc_obj     pointer to allocated objects: param map, backing
1764   //                      store, arguments (uninit)
1765   //   x1   function      function pointer
1766   //   x2   arg_count_smi number of function arguments (smi)
1767   //   x3   recv_arg      pointer to receiver arguments
1768   //   x4   mapped_params number of mapped parameters, min(params, args)
1769   //   x7   param_count   number of function parameters
1770   //   x10  size          size of objects to allocate (uninit)
1771   //   x14  arg_count     number of function arguments
1772 
1773   // Compute the size of backing store, parameter map, and arguments object.
1774   // 1. Parameter map, has two extra words containing context and backing
1775   // store.
1776   const int kParameterMapHeaderSize =
1777       FixedArray::kHeaderSize + 2 * kPointerSize;
1778 
1779   // Calculate the parameter map size, assuming it exists.
1780   Register size = x10;
1781   __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
1782   __ Add(size, size, kParameterMapHeaderSize);
1783 
1784   // If there are no mapped parameters, set the running size total to zero.
1785   // Otherwise, use the parameter map size calculated earlier.
1786   __ Cmp(mapped_params, 0);
1787   __ CzeroX(size, eq);
1788 
1789   // 2. Add the size of the backing store and arguments object.
1790   __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
1791   __ Add(size, size,
1792          FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize);
1793 
1794   // Do the allocation of all three objects in one go. Assign this to x0, as it
1795   // will be returned to the caller.
1796   Register alloc_obj = x0;
1797   __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
1798 
1799   // Get the arguments boilerplate from the current (global) context.
1800 
1801   //   x0   alloc_obj       pointer to allocated objects (param map, backing
1802   //                        store, arguments)
1803   //   x1   function        function pointer
1804   //   x2   arg_count_smi   number of function arguments (smi)
1805   //   x3   recv_arg        pointer to receiver arguments
1806   //   x4   mapped_params   number of mapped parameters, min(params, args)
1807   //   x7   param_count     number of function parameters
1808   //   x11  sloppy_args_map offset to args (or aliased args) map (uninit)
1809   //   x14  arg_count       number of function arguments
1810 
1811   Register global_ctx = x10;
1812   Register sloppy_args_map = x11;
1813   Register aliased_args_map = x10;
1814   __ Ldr(global_ctx, NativeContextMemOperand());
1815 
1816   __ Ldr(sloppy_args_map,
1817          ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
1818   __ Ldr(
1819       aliased_args_map,
1820       ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
1821   __ Cmp(mapped_params, 0);
1822   __ CmovX(sloppy_args_map, aliased_args_map, ne);
1823 
1824   // Copy the JS object part.
1825   __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
1826   __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
1827   __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
1828   __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
1829 
1830   // Set up the callee in-object property.
1831   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1832   const int kCalleeOffset = JSObject::kHeaderSize +
1833                             Heap::kArgumentsCalleeIndex * kPointerSize;
1834   __ AssertNotSmi(function);
1835   __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
1836 
1837   // Use the length and set that as an in-object property.
1838   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1839   const int kLengthOffset = JSObject::kHeaderSize +
1840                             Heap::kArgumentsLengthIndex * kPointerSize;
1841   __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
1842 
1843   // Set up the elements pointer in the allocated arguments object.
1844   // If we allocated a parameter map, "elements" will point there, otherwise
1845   // it will point to the backing store.
1846 
1847   //   x0   alloc_obj     pointer to allocated objects (param map, backing
1848   //                      store, arguments)
1849   //   x1   function      function pointer
1850   //   x2   arg_count_smi number of function arguments (smi)
1851   //   x3   recv_arg      pointer to receiver arguments
1852   //   x4   mapped_params number of mapped parameters, min(params, args)
1853   //   x5   elements      pointer to parameter map or backing store (uninit)
1854   //   x6   backing_store pointer to backing store (uninit)
1855   //   x7   param_count   number of function parameters
1856   //   x14  arg_count     number of function arguments
1857 
1858   Register elements = x5;
1859   __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
1860   __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
1861 
1862   // Initialize parameter map. If there are no mapped arguments, we're done.
1863   Label skip_parameter_map;
1864   __ Cmp(mapped_params, 0);
1865   // Set up backing store address, because it is needed later for filling in
1866   // the unmapped arguments.
1867   Register backing_store = x6;
1868   __ CmovX(backing_store, elements, eq);
1869   __ B(eq, &skip_parameter_map);
1870 
1871   __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
1872   __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
1873   __ Add(x10, mapped_params, 2);
1874   __ SmiTag(x10);
1875   __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
1876   __ Str(cp, FieldMemOperand(elements,
1877                              FixedArray::kHeaderSize + 0 * kPointerSize));
1878   __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
1879   __ Add(x10, x10, kParameterMapHeaderSize);
1880   __ Str(x10, FieldMemOperand(elements,
1881                               FixedArray::kHeaderSize + 1 * kPointerSize));
1882 
1883   // Copy the parameter slots and the holes in the arguments.
1884   // We need to fill in mapped_parameter_count slots. Then index the context,
1885   // where parameters are stored in reverse order, at:
1886   //
1887   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
1888   //
1889   // The mapped parameter thus needs to get indices:
1890   //
1891   //   MIN_CONTEXT_SLOTS + parameter_count - 1 ..
1892   //     MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
1893   //
1894   // We loop from right to left.
1895 
1896   //   x0   alloc_obj     pointer to allocated objects (param map, backing
1897   //                      store, arguments)
1898   //   x1   function      function pointer
1899   //   x2   arg_count_smi number of function arguments (smi)
1900   //   x3   recv_arg      pointer to receiver arguments
1901   //   x4   mapped_params number of mapped parameters, min(params, args)
1902   //   x5   elements      pointer to parameter map or backing store (uninit)
1903   //   x6   backing_store pointer to backing store (uninit)
1904   //   x7   param_count   number of function parameters
1905   //   x11  loop_count    parameter loop counter (uninit)
1906   //   x12  index         parameter index (smi, uninit)
1907   //   x13  the_hole      hole value (uninit)
1908   //   x14  arg_count     number of function arguments
1909 
1910   Register loop_count = x11;
1911   Register index = x12;
1912   Register the_hole = x13;
1913   Label parameters_loop, parameters_test;
1914   __ Mov(loop_count, mapped_params);
1915   __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
1916   __ Sub(index, index, mapped_params);
1917   __ SmiTag(index);
1918   __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
1919   __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
1920   __ Add(backing_store, backing_store, kParameterMapHeaderSize);
1921 
1922   __ B(&parameters_test);
1923 
1924   __ Bind(&parameters_loop);
1925   __ Sub(loop_count, loop_count, 1);
1926   __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
1927   __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
1928   __ Str(index, MemOperand(elements, x10));
1929   __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
1930   __ Str(the_hole, MemOperand(backing_store, x10));
1931   __ Add(index, index, Smi::FromInt(1));
1932   __ Bind(&parameters_test);
1933   __ Cbnz(loop_count, &parameters_loop);
1934 
1935   __ Bind(&skip_parameter_map);
1936   // Copy arguments header and remaining slots (if there are any.)
1937   __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
1938   __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
1939   __ Str(arg_count_smi, FieldMemOperand(backing_store,
1940                                         FixedArray::kLengthOffset));
1941 
1942   //   x0   alloc_obj     pointer to allocated objects (param map, backing
1943   //                      store, arguments)
1944   //   x1   function      function pointer
1945   //   x2   arg_count_smi number of function arguments (smi)
1946   //   x3   recv_arg      pointer to receiver arguments
1947   //   x4   mapped_params number of mapped parameters, min(params, args)
1948   //   x6   backing_store pointer to backing store (uninit)
1949   //   x14  arg_count     number of function arguments
1950 
1951   Label arguments_loop, arguments_test;
1952   __ Mov(x10, mapped_params);
1953   __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
1954   __ B(&arguments_test);
1955 
1956   __ Bind(&arguments_loop);
1957   __ Sub(recv_arg, recv_arg, kPointerSize);
1958   __ Ldr(x11, MemOperand(recv_arg));
1959   __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
1960   __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
1961   __ Add(x10, x10, 1);
1962 
1963   __ Bind(&arguments_test);
1964   __ Cmp(x10, arg_count);
1965   __ B(lt, &arguments_loop);
1966 
1967   __ Ret();
1968 
1969   // Do the runtime call to allocate the arguments object.
1970   __ Bind(&runtime);
1971   __ Push(function, recv_arg, arg_count_smi);
1972   __ TailCallRuntime(Runtime::kNewSloppyArguments);
1973 }
1974 
1975 
Generate(MacroAssembler * masm)1976 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1977   // Return address is in lr.
1978   Label slow;
1979 
1980   Register receiver = LoadDescriptor::ReceiverRegister();
1981   Register key = LoadDescriptor::NameRegister();
1982 
1983   // Check that the key is an array index, that is Uint32.
1984   __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
1985 
1986   // Everything is fine, call runtime.
1987   __ Push(receiver, key);
1988   __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
1989 
1990   __ Bind(&slow);
1991   PropertyAccessCompiler::TailCallBuiltin(
1992       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1993 }
1994 
1995 
GenerateNewStrict(MacroAssembler * masm)1996 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1997   // x1 : function
1998   // x2 : number of parameters (tagged)
1999   // x3 : parameters pointer
2000   //
2001   // Returns pointer to result object in x0.
2002 
2003   DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
2004   DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
2005   DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
2006 
2007   // Make an untagged copy of the parameter count.
2008   Register function = x1;
2009   Register param_count_smi = x2;
2010   Register params = x3;
2011   Register param_count = x13;
2012   __ SmiUntag(param_count, param_count_smi);
2013 
2014   // Test if arguments adaptor needed.
2015   Register caller_fp = x11;
2016   Register caller_ctx = x12;
2017   Label try_allocate, runtime;
2018   __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2019   __ Ldr(caller_ctx, MemOperand(caller_fp,
2020                                 StandardFrameConstants::kContextOffset));
2021   __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2022   __ B(ne, &try_allocate);
2023 
2024   //   x1   function          function pointer
2025   //   x2   param_count_smi   number of parameters passed to function (smi)
2026   //   x3   params            pointer to parameters
2027   //   x11  caller_fp         caller's frame pointer
2028   //   x13  param_count       number of parameters passed to function
2029 
2030   // Patch the argument length and parameters pointer.
2031   __ Ldr(param_count_smi,
2032          MemOperand(caller_fp,
2033                     ArgumentsAdaptorFrameConstants::kLengthOffset));
2034   __ SmiUntag(param_count, param_count_smi);
2035   __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
2036   __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
2037 
2038   // Try the new space allocation. Start out with computing the size of the
2039   // arguments object and the elements array in words.
2040   Register size = x10;
2041   __ Bind(&try_allocate);
2042   __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
2043   __ Cmp(param_count, 0);
2044   __ CzeroX(size, eq);
2045   __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize);
2046 
2047   // Do the allocation of both objects in one go. Assign this to x0, as it will
2048   // be returned to the caller.
2049   Register alloc_obj = x0;
2050   __ Allocate(size, alloc_obj, x11, x12, &runtime,
2051               static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2052 
2053   // Get the arguments boilerplate from the current (native) context.
2054   Register strict_args_map = x4;
2055   __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX,
2056                            strict_args_map);
2057 
2058   //   x0   alloc_obj         pointer to allocated objects: parameter array and
2059   //                          arguments object
2060   //   x1   function          function pointer
2061   //   x2   param_count_smi   number of parameters passed to function (smi)
2062   //   x3   params            pointer to parameters
2063   //   x4   strict_args_map   offset to arguments map
2064   //   x13  param_count       number of parameters passed to function
2065   __ Str(strict_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
2066   __ LoadRoot(x5, Heap::kEmptyFixedArrayRootIndex);
2067   __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
2068   __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2069 
2070   // Set the smi-tagged length as an in-object property.
2071   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2072   const int kLengthOffset = JSObject::kHeaderSize +
2073                             Heap::kArgumentsLengthIndex * kPointerSize;
2074   __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
2075 
2076   // If there are no actual arguments, we're done.
2077   Label done;
2078   __ Cbz(param_count, &done);
2079 
2080   // Set up the elements pointer in the allocated arguments object and
2081   // initialize the header in the elements fixed array.
2082   Register elements = x5;
2083   __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize);
2084   __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2085   __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
2086   __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
2087   __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
2088 
2089   //   x0   alloc_obj         pointer to allocated objects: parameter array and
2090   //                          arguments object
2091   //   x1   function          function pointer
2092   //   x2   param_count_smi   number of parameters passed to function (smi)
2093   //   x3   params            pointer to parameters
2094   //   x4   array             pointer to array slot (uninit)
2095   //   x5   elements          pointer to elements array of alloc_obj
2096   //   x13  param_count       number of parameters passed to function
2097 
2098   // Copy the fixed array slots.
2099   Label loop;
2100   Register array = x4;
2101   // Set up pointer to first array slot.
2102   __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
2103 
2104   __ Bind(&loop);
2105   // Pre-decrement the parameters pointer by kPointerSize on each iteration.
2106   // Pre-decrement in order to skip receiver.
2107   __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
2108   // Post-increment elements by kPointerSize on each iteration.
2109   __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
2110   __ Sub(param_count, param_count, 1);
2111   __ Cbnz(param_count, &loop);
2112 
2113   // Return from stub.
2114   __ Bind(&done);
2115   __ Ret();
2116 
2117   // Do the runtime call to allocate the arguments object.
2118   __ Bind(&runtime);
2119   __ Push(function, params, param_count_smi);
2120   __ TailCallRuntime(Runtime::kNewStrictArguments);
2121 }
2122 
2123 
GenerateNew(MacroAssembler * masm)2124 void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
2125   // x2 : number of parameters (tagged)
2126   // x3 : parameters pointer
2127   // x4 : rest parameter index (tagged)
2128   //
2129   // Returns pointer to result object in x0.
2130 
2131   DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
2132   DCHECK(x3.is(RestParamAccessDescriptor::parameter_pointer()));
2133   DCHECK(x4.is(RestParamAccessDescriptor::rest_parameter_index()));
2134 
2135   // Get the stub arguments from the frame, and make an untagged copy of the
2136   // parameter count.
2137   Register rest_index_smi = x4;
2138   Register param_count_smi = x2;
2139   Register params = x3;
2140   Register param_count = x13;
2141   __ SmiUntag(param_count, param_count_smi);
2142 
2143   // Test if arguments adaptor needed.
2144   Register caller_fp = x11;
2145   Register caller_ctx = x12;
2146   Label runtime;
2147   __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2148   __ Ldr(caller_ctx,
2149          MemOperand(caller_fp, StandardFrameConstants::kContextOffset));
2150   __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2151   __ B(ne, &runtime);
2152 
2153   //   x4   rest_index_smi     index of rest parameter
2154   //   x2   param_count_smi    number of parameters passed to function (smi)
2155   //   x3   params             pointer to parameters
2156   //   x11  caller_fp          caller's frame pointer
2157   //   x13  param_count        number of parameters passed to function
2158 
2159   // Patch the argument length and parameters pointer.
2160   __ Ldr(param_count_smi,
2161          MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
2162   __ SmiUntag(param_count, param_count_smi);
2163   __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
2164   __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
2165 
2166   __ Bind(&runtime);
2167   __ Push(param_count_smi, params, rest_index_smi);
2168   __ TailCallRuntime(Runtime::kNewRestParam);
2169 }
2170 
2171 
Generate(MacroAssembler * masm)2172 void RegExpExecStub::Generate(MacroAssembler* masm) {
2173 #ifdef V8_INTERPRETED_REGEXP
2174   __ TailCallRuntime(Runtime::kRegExpExec);
2175 #else  // V8_INTERPRETED_REGEXP
2176 
2177   // Stack frame on entry.
2178   //  jssp[0]: last_match_info (expected JSArray)
2179   //  jssp[8]: previous index
2180   //  jssp[16]: subject string
2181   //  jssp[24]: JSRegExp object
2182   Label runtime;
2183 
2184   // Use of registers for this function.
2185 
2186   // Variable registers:
2187   //   x10-x13                                  used as scratch registers
2188   //   w0       string_type                     type of subject string
2189   //   x2       jsstring_length                 subject string length
2190   //   x3       jsregexp_object                 JSRegExp object
2191   //   w4       string_encoding                 Latin1 or UC16
2192   //   w5       sliced_string_offset            if the string is a SlicedString
2193   //                                            offset to the underlying string
2194   //   w6       string_representation           groups attributes of the string:
2195   //                                              - is a string
2196   //                                              - type of the string
2197   //                                              - is a short external string
2198   Register string_type = w0;
2199   Register jsstring_length = x2;
2200   Register jsregexp_object = x3;
2201   Register string_encoding = w4;
2202   Register sliced_string_offset = w5;
2203   Register string_representation = w6;
2204 
2205   // These are in callee save registers and will be preserved by the call
2206   // to the native RegExp code, as this code is called using the normal
2207   // C calling convention. When calling directly from generated code the
2208   // native RegExp code will not do a GC and therefore the content of
2209   // these registers are safe to use after the call.
2210 
2211   //   x19       subject                        subject string
2212   //   x20       regexp_data                    RegExp data (FixedArray)
2213   //   x21       last_match_info_elements       info relative to the last match
2214   //                                            (FixedArray)
2215   //   x22       code_object                    generated regexp code
2216   Register subject = x19;
2217   Register regexp_data = x20;
2218   Register last_match_info_elements = x21;
2219   Register code_object = x22;
2220 
2221   // Stack frame.
2222   //  jssp[00]: last_match_info (JSArray)
2223   //  jssp[08]: previous index
2224   //  jssp[16]: subject string
2225   //  jssp[24]: JSRegExp object
2226 
2227   const int kLastMatchInfoOffset = 0 * kPointerSize;
2228   const int kPreviousIndexOffset = 1 * kPointerSize;
2229   const int kSubjectOffset = 2 * kPointerSize;
2230   const int kJSRegExpOffset = 3 * kPointerSize;
2231 
2232   // Ensure that a RegExp stack is allocated.
2233   ExternalReference address_of_regexp_stack_memory_address =
2234       ExternalReference::address_of_regexp_stack_memory_address(isolate());
2235   ExternalReference address_of_regexp_stack_memory_size =
2236       ExternalReference::address_of_regexp_stack_memory_size(isolate());
2237   __ Mov(x10, address_of_regexp_stack_memory_size);
2238   __ Ldr(x10, MemOperand(x10));
2239   __ Cbz(x10, &runtime);
2240 
2241   // Check that the first argument is a JSRegExp object.
2242   DCHECK(jssp.Is(__ StackPointer()));
2243   __ Peek(jsregexp_object, kJSRegExpOffset);
2244   __ JumpIfSmi(jsregexp_object, &runtime);
2245   __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
2246 
2247   // Check that the RegExp has been compiled (data contains a fixed array).
2248   __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
2249   if (FLAG_debug_code) {
2250     STATIC_ASSERT(kSmiTag == 0);
2251     __ Tst(regexp_data, kSmiTagMask);
2252     __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2253     __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
2254     __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2255   }
2256 
2257   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2258   __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2259   __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
2260   __ B(ne, &runtime);
2261 
2262   // Check that the number of captures fit in the static offsets vector buffer.
2263   // We have always at least one capture for the whole match, plus additional
2264   // ones due to capturing parentheses. A capture takes 2 registers.
2265   // The number of capture registers then is (number_of_captures + 1) * 2.
2266   __ Ldrsw(x10,
2267            UntagSmiFieldMemOperand(regexp_data,
2268                                    JSRegExp::kIrregexpCaptureCountOffset));
2269   // Check (number_of_captures + 1) * 2 <= offsets vector size
2270   //             number_of_captures * 2 <= offsets vector size - 2
2271   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2272   __ Add(x10, x10, x10);
2273   __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
2274   __ B(hi, &runtime);
2275 
2276   // Initialize offset for possibly sliced string.
2277   __ Mov(sliced_string_offset, 0);
2278 
2279   DCHECK(jssp.Is(__ StackPointer()));
2280   __ Peek(subject, kSubjectOffset);
2281   __ JumpIfSmi(subject, &runtime);
2282 
2283   __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2284   __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2285 
2286   __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
2287 
2288   // Handle subject string according to its encoding and representation:
2289   // (1) Sequential string?  If yes, go to (5).
2290   // (2) Anything but sequential or cons?  If yes, go to (6).
2291   // (3) Cons string.  If the string is flat, replace subject with first string.
2292   //     Otherwise bailout.
2293   // (4) Is subject external?  If yes, go to (7).
2294   // (5) Sequential string.  Load regexp code according to encoding.
2295   // (E) Carry on.
2296   /// [...]
2297 
2298   // Deferred code at the end of the stub:
2299   // (6) Not a long external string?  If yes, go to (8).
2300   // (7) External string.  Make it, offset-wise, look like a sequential string.
2301   //     Go to (5).
2302   // (8) Short external string or not a string?  If yes, bail out to runtime.
2303   // (9) Sliced string.  Replace subject with parent.  Go to (4).
2304 
2305   Label check_underlying;   // (4)
2306   Label seq_string;         // (5)
2307   Label not_seq_nor_cons;   // (6)
2308   Label external_string;    // (7)
2309   Label not_long_external;  // (8)
2310 
2311   // (1) Sequential string?  If yes, go to (5).
2312   __ And(string_representation,
2313          string_type,
2314          kIsNotStringMask |
2315              kStringRepresentationMask |
2316              kShortExternalStringMask);
2317   // We depend on the fact that Strings of type
2318   // SeqString and not ShortExternalString are defined
2319   // by the following pattern:
2320   //   string_type: 0XX0 XX00
2321   //                ^  ^   ^^
2322   //                |  |   ||
2323   //                |  |   is a SeqString
2324   //                |  is not a short external String
2325   //                is a String
2326   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2327   STATIC_ASSERT(kShortExternalStringTag != 0);
2328   __ Cbz(string_representation, &seq_string);  // Go to (5).
2329 
2330   // (2) Anything but sequential or cons?  If yes, go to (6).
2331   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2332   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2333   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2334   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2335   __ Cmp(string_representation, kExternalStringTag);
2336   __ B(ge, &not_seq_nor_cons);  // Go to (6).
2337 
2338   // (3) Cons string.  Check that it's flat.
2339   __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
2340   __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
2341   // Replace subject with first string.
2342   __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2343 
2344   // (4) Is subject external?  If yes, go to (7).
2345   __ Bind(&check_underlying);
2346   // Reload the string type.
2347   __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2348   __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2349   STATIC_ASSERT(kSeqStringTag == 0);
2350   // The underlying external string is never a short external string.
2351   STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2352   STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2353   __ TestAndBranchIfAnySet(string_type.X(),
2354                            kStringRepresentationMask,
2355                            &external_string);  // Go to (7).
2356 
2357   // (5) Sequential string.  Load regexp code according to encoding.
2358   __ Bind(&seq_string);
2359 
2360   // Check that the third argument is a positive smi less than the subject
2361   // string length. A negative value will be greater (unsigned comparison).
2362   DCHECK(jssp.Is(__ StackPointer()));
2363   __ Peek(x10, kPreviousIndexOffset);
2364   __ JumpIfNotSmi(x10, &runtime);
2365   __ Cmp(jsstring_length, x10);
2366   __ B(ls, &runtime);
2367 
2368   // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
2369   // before entering the exit frame.
2370   __ SmiUntag(x1, x10);
2371 
2372   // The third bit determines the string encoding in string_type.
2373   STATIC_ASSERT(kOneByteStringTag == 0x04);
2374   STATIC_ASSERT(kTwoByteStringTag == 0x00);
2375   STATIC_ASSERT(kStringEncodingMask == 0x04);
2376 
2377   // Find the code object based on the assumptions above.
2378   // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
2379   // of kPointerSize to reach the latter.
2380   STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
2381                 JSRegExp::kDataUC16CodeOffset);
2382   __ Mov(x10, kPointerSize);
2383   // We will need the encoding later: Latin1 = 0x04
2384   //                                  UC16   = 0x00
2385   __ Ands(string_encoding, string_type, kStringEncodingMask);
2386   __ CzeroX(x10, ne);
2387   __ Add(x10, regexp_data, x10);
2388   __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
2389 
2390   // (E) Carry on.  String handling is done.
2391 
2392   // Check that the irregexp code has been generated for the actual string
2393   // encoding. If it has, the field contains a code object otherwise it contains
2394   // a smi (code flushing support).
2395   __ JumpIfSmi(code_object, &runtime);
2396 
2397   // All checks done. Now push arguments for native regexp code.
2398   __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
2399                       x10,
2400                       x11);
2401 
2402   // Isolates: note we add an additional parameter here (isolate pointer).
2403   __ EnterExitFrame(false, x10, 1);
2404   DCHECK(csp.Is(__ StackPointer()));
2405 
2406   // We have 9 arguments to pass to the regexp code, therefore we have to pass
2407   // one on the stack and the rest as registers.
2408 
2409   // Note that the placement of the argument on the stack isn't standard
2410   // AAPCS64:
2411   // csp[0]: Space for the return address placed by DirectCEntryStub.
2412   // csp[8]: Argument 9, the current isolate address.
2413 
2414   __ Mov(x10, ExternalReference::isolate_address(isolate()));
2415   __ Poke(x10, kPointerSize);
2416 
2417   Register length = w11;
2418   Register previous_index_in_bytes = w12;
2419   Register start = x13;
2420 
2421   // Load start of the subject string.
2422   __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
2423   // Load the length from the original subject string from the previous stack
2424   // frame. Therefore we have to use fp, which points exactly to two pointer
2425   // sizes below the previous sp. (Because creating a new stack frame pushes
2426   // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
2427   __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2428   __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
2429 
2430   // Handle UC16 encoding, two bytes make one character.
2431   //   string_encoding: if Latin1: 0x04
2432   //                    if UC16:   0x00
2433   STATIC_ASSERT(kStringEncodingMask == 0x04);
2434   __ Ubfx(string_encoding, string_encoding, 2, 1);
2435   __ Eor(string_encoding, string_encoding, 1);
2436   //   string_encoding: if Latin1: 0
2437   //                    if UC16:   1
2438 
2439   // Convert string positions from characters to bytes.
2440   // Previous index is in x1.
2441   __ Lsl(previous_index_in_bytes, w1, string_encoding);
2442   __ Lsl(length, length, string_encoding);
2443   __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
2444 
2445   // Argument 1 (x0): Subject string.
2446   __ Mov(x0, subject);
2447 
2448   // Argument 2 (x1): Previous index, already there.
2449 
2450   // Argument 3 (x2): Get the start of input.
2451   // Start of input = start of string + previous index + substring offset
2452   //                                                     (0 if the string
2453   //                                                      is not sliced).
2454   __ Add(w10, previous_index_in_bytes, sliced_string_offset);
2455   __ Add(x2, start, Operand(w10, UXTW));
2456 
2457   // Argument 4 (x3):
2458   // End of input = start of input + (length of input - previous index)
2459   __ Sub(w10, length, previous_index_in_bytes);
2460   __ Add(x3, x2, Operand(w10, UXTW));
2461 
2462   // Argument 5 (x4): static offsets vector buffer.
2463   __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
2464 
2465   // Argument 6 (x5): Set the number of capture registers to zero to force
2466   // global regexps to behave as non-global. This stub is not used for global
2467   // regexps.
2468   __ Mov(x5, 0);
2469 
2470   // Argument 7 (x6): Start (high end) of backtracking stack memory area.
2471   __ Mov(x10, address_of_regexp_stack_memory_address);
2472   __ Ldr(x10, MemOperand(x10));
2473   __ Mov(x11, address_of_regexp_stack_memory_size);
2474   __ Ldr(x11, MemOperand(x11));
2475   __ Add(x6, x10, x11);
2476 
2477   // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
2478   __ Mov(x7, 1);
2479 
2480   // Locate the code entry and call it.
2481   __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
2482   DirectCEntryStub stub(isolate());
2483   stub.GenerateCall(masm, code_object);
2484 
2485   __ LeaveExitFrame(false, x10, true);
2486 
2487   // The generated regexp code returns an int32 in w0.
2488   Label failure, exception;
2489   __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
2490   __ CompareAndBranch(w0,
2491                       NativeRegExpMacroAssembler::EXCEPTION,
2492                       eq,
2493                       &exception);
2494   __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
2495 
2496   // Success: process the result from the native regexp code.
2497   Register number_of_capture_registers = x12;
2498 
2499   // Calculate number of capture registers (number_of_captures + 1) * 2
2500   // and store it in the last match info.
2501   __ Ldrsw(x10,
2502            UntagSmiFieldMemOperand(regexp_data,
2503                                    JSRegExp::kIrregexpCaptureCountOffset));
2504   __ Add(x10, x10, x10);
2505   __ Add(number_of_capture_registers, x10, 2);
2506 
2507   // Check that the fourth object is a JSArray object.
2508   DCHECK(jssp.Is(__ StackPointer()));
2509   __ Peek(x10, kLastMatchInfoOffset);
2510   __ JumpIfSmi(x10, &runtime);
2511   __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
2512 
2513   // Check that the JSArray is the fast case.
2514   __ Ldr(last_match_info_elements,
2515          FieldMemOperand(x10, JSArray::kElementsOffset));
2516   __ Ldr(x10,
2517          FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2518   __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
2519 
2520   // Check that the last match info has space for the capture registers and the
2521   // additional information (overhead).
2522   //     (number_of_captures + 1) * 2 + overhead <= last match info size
2523   //     (number_of_captures * 2) + 2 + overhead <= last match info size
2524   //      number_of_capture_registers + overhead <= last match info size
2525   __ Ldrsw(x10,
2526            UntagSmiFieldMemOperand(last_match_info_elements,
2527                                    FixedArray::kLengthOffset));
2528   __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
2529   __ Cmp(x11, x10);
2530   __ B(gt, &runtime);
2531 
2532   // Store the capture count.
2533   __ SmiTag(x10, number_of_capture_registers);
2534   __ Str(x10,
2535          FieldMemOperand(last_match_info_elements,
2536                          RegExpImpl::kLastCaptureCountOffset));
2537   // Store last subject and last input.
2538   __ Str(subject,
2539          FieldMemOperand(last_match_info_elements,
2540                          RegExpImpl::kLastSubjectOffset));
2541   // Use x10 as the subject string in order to only need
2542   // one RecordWriteStub.
2543   __ Mov(x10, subject);
2544   __ RecordWriteField(last_match_info_elements,
2545                       RegExpImpl::kLastSubjectOffset,
2546                       x10,
2547                       x11,
2548                       kLRHasNotBeenSaved,
2549                       kDontSaveFPRegs);
2550   __ Str(subject,
2551          FieldMemOperand(last_match_info_elements,
2552                          RegExpImpl::kLastInputOffset));
2553   __ Mov(x10, subject);
2554   __ RecordWriteField(last_match_info_elements,
2555                       RegExpImpl::kLastInputOffset,
2556                       x10,
2557                       x11,
2558                       kLRHasNotBeenSaved,
2559                       kDontSaveFPRegs);
2560 
2561   Register last_match_offsets = x13;
2562   Register offsets_vector_index = x14;
2563   Register current_offset = x15;
2564 
2565   // Get the static offsets vector filled by the native regexp code
2566   // and fill the last match info.
2567   ExternalReference address_of_static_offsets_vector =
2568       ExternalReference::address_of_static_offsets_vector(isolate());
2569   __ Mov(offsets_vector_index, address_of_static_offsets_vector);
2570 
2571   Label next_capture, done;
2572   // Capture register counter starts from number of capture registers and
2573   // iterates down to zero (inclusive).
2574   __ Add(last_match_offsets,
2575          last_match_info_elements,
2576          RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
2577   __ Bind(&next_capture);
2578   __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
2579   __ B(mi, &done);
2580   // Read two 32 bit values from the static offsets vector buffer into
2581   // an X register
2582   __ Ldr(current_offset,
2583          MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
2584   // Store the smi values in the last match info.
2585   __ SmiTag(x10, current_offset);
2586   // Clearing the 32 bottom bits gives us a Smi.
2587   STATIC_ASSERT(kSmiTag == 0);
2588   __ Bic(x11, current_offset, kSmiShiftMask);
2589   __ Stp(x10,
2590          x11,
2591          MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
2592   __ B(&next_capture);
2593   __ Bind(&done);
2594 
2595   // Return last match info.
2596   __ Peek(x0, kLastMatchInfoOffset);
2597   // Drop the 4 arguments of the stub from the stack.
2598   __ Drop(4);
2599   __ Ret();
2600 
2601   __ Bind(&exception);
2602   Register exception_value = x0;
2603   // A stack overflow (on the backtrack stack) may have occured
2604   // in the RegExp code but no exception has been created yet.
2605   // If there is no pending exception, handle that in the runtime system.
2606   __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
2607   __ Mov(x11,
2608          Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2609                                    isolate())));
2610   __ Ldr(exception_value, MemOperand(x11));
2611   __ Cmp(x10, exception_value);
2612   __ B(eq, &runtime);
2613 
2614   // For exception, throw the exception again.
2615   __ TailCallRuntime(Runtime::kRegExpExecReThrow);
2616 
2617   __ Bind(&failure);
2618   __ Mov(x0, Operand(isolate()->factory()->null_value()));
2619   // Drop the 4 arguments of the stub from the stack.
2620   __ Drop(4);
2621   __ Ret();
2622 
2623   __ Bind(&runtime);
2624   __ TailCallRuntime(Runtime::kRegExpExec);
2625 
2626   // Deferred code for string handling.
2627   // (6) Not a long external string?  If yes, go to (8).
2628   __ Bind(&not_seq_nor_cons);
2629   // Compare flags are still set.
2630   __ B(ne, &not_long_external);  // Go to (8).
2631 
2632   // (7) External string. Make it, offset-wise, look like a sequential string.
2633   __ Bind(&external_string);
2634   if (masm->emit_debug_code()) {
2635     // Assert that we do not have a cons or slice (indirect strings) here.
2636     // Sequential strings have already been ruled out.
2637     __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2638     __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2639     __ Tst(x10, kIsIndirectStringMask);
2640     __ Check(eq, kExternalStringExpectedButNotFound);
2641     __ And(x10, x10, kStringRepresentationMask);
2642     __ Cmp(x10, 0);
2643     __ Check(ne, kExternalStringExpectedButNotFound);
2644   }
2645   __ Ldr(subject,
2646          FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2647   // Move the pointer so that offset-wise, it looks like a sequential string.
2648   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2649   __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2650   __ B(&seq_string);    // Go to (5).
2651 
2652   // (8) If this is a short external string or not a string, bail out to
2653   // runtime.
2654   __ Bind(&not_long_external);
2655   STATIC_ASSERT(kShortExternalStringTag != 0);
2656   __ TestAndBranchIfAnySet(string_representation,
2657                            kShortExternalStringMask | kIsNotStringMask,
2658                            &runtime);
2659 
2660   // (9) Sliced string. Replace subject with parent.
2661   __ Ldr(sliced_string_offset,
2662          UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
2663   __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2664   __ B(&check_underlying);    // Go to (4).
2665 #endif
2666 }
2667 
2668 
CallStubInRecordCallTarget(MacroAssembler * masm,CodeStub * stub,Register argc,Register function,Register feedback_vector,Register index,Register new_target)2669 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
2670                                        Register argc, Register function,
2671                                        Register feedback_vector, Register index,
2672                                        Register new_target) {
2673   FrameScope scope(masm, StackFrame::INTERNAL);
2674 
2675   // Number-of-arguments register must be smi-tagged to call out.
2676   __ SmiTag(argc);
2677   __ Push(argc, function, feedback_vector, index);
2678 
2679   DCHECK(feedback_vector.Is(x2) && index.Is(x3));
2680   __ CallStub(stub);
2681 
2682   __ Pop(index, feedback_vector, function, argc);
2683   __ SmiUntag(argc);
2684 }
2685 
2686 
GenerateRecordCallTarget(MacroAssembler * masm,Register argc,Register function,Register feedback_vector,Register index,Register new_target,Register scratch1,Register scratch2,Register scratch3)2687 static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
2688                                      Register function,
2689                                      Register feedback_vector, Register index,
2690                                      Register new_target, Register scratch1,
2691                                      Register scratch2, Register scratch3) {
2692   ASM_LOCATION("GenerateRecordCallTarget");
2693   DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
2694                      feedback_vector, index, new_target));
2695   // Cache the called function in a feedback vector slot. Cache states are
2696   // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
2697   //  argc :            number of arguments to the construct function
2698   //  function :        the function to call
2699   //  feedback_vector : the feedback vector
2700   //  index :           slot in feedback vector (smi)
2701   Label initialize, done, miss, megamorphic, not_array_function;
2702 
2703   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2704             masm->isolate()->heap()->megamorphic_symbol());
2705   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2706             masm->isolate()->heap()->uninitialized_symbol());
2707 
2708   // Load the cache state.
2709   Register feedback = scratch1;
2710   Register feedback_map = scratch2;
2711   Register feedback_value = scratch3;
2712   __ Add(feedback, feedback_vector,
2713          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2714   __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
2715 
2716   // A monomorphic cache hit or an already megamorphic state: invoke the
2717   // function without changing the state.
2718   // We don't know if feedback value is a WeakCell or a Symbol, but it's
2719   // harmless to read at this position in a symbol (see static asserts in
2720   // type-feedback-vector.h).
2721   Label check_allocation_site;
2722   __ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
2723   __ Cmp(function, feedback_value);
2724   __ B(eq, &done);
2725   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
2726   __ B(eq, &done);
2727   __ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
2728   __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
2729   __ B(ne, &check_allocation_site);
2730 
2731   // If the weak cell is cleared, we have a new chance to become monomorphic.
2732   __ JumpIfSmi(feedback_value, &initialize);
2733   __ B(&megamorphic);
2734 
2735   __ bind(&check_allocation_site);
2736   // If we came here, we need to see if we are the array function.
2737   // If we didn't have a matching function, and we didn't find the megamorph
2738   // sentinel, then we have in the slot either some other function or an
2739   // AllocationSite.
2740   __ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
2741 
2742   // Make sure the function is the Array() function
2743   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
2744   __ Cmp(function, scratch1);
2745   __ B(ne, &megamorphic);
2746   __ B(&done);
2747 
2748   __ Bind(&miss);
2749 
2750   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2751   // megamorphic.
2752   __ JumpIfRoot(scratch1, Heap::kuninitialized_symbolRootIndex, &initialize);
2753   // MegamorphicSentinel is an immortal immovable object (undefined) so no
2754   // write-barrier is needed.
2755   __ Bind(&megamorphic);
2756   __ Add(scratch1, feedback_vector,
2757          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2758   __ LoadRoot(scratch2, Heap::kmegamorphic_symbolRootIndex);
2759   __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2760   __ B(&done);
2761 
2762   // An uninitialized cache is patched with the function or sentinel to
2763   // indicate the ElementsKind if function is the Array constructor.
2764   __ Bind(&initialize);
2765 
2766   // Make sure the function is the Array() function
2767   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
2768   __ Cmp(function, scratch1);
2769   __ B(ne, &not_array_function);
2770 
2771   // The target function is the Array constructor,
2772   // Create an AllocationSite if we don't already have it, store it in the
2773   // slot.
2774   CreateAllocationSiteStub create_stub(masm->isolate());
2775   CallStubInRecordCallTarget(masm, &create_stub, argc, function,
2776                              feedback_vector, index, new_target);
2777   __ B(&done);
2778 
2779   __ Bind(&not_array_function);
2780   CreateWeakCellStub weak_cell_stub(masm->isolate());
2781   CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
2782                              feedback_vector, index, new_target);
2783   __ Bind(&done);
2784 }
2785 
2786 
Generate(MacroAssembler * masm)2787 void CallConstructStub::Generate(MacroAssembler* masm) {
2788   ASM_LOCATION("CallConstructStub::Generate");
2789   // x0 : number of arguments
2790   // x1 : the function to call
2791   // x2 : feedback vector
2792   // x3 : slot in feedback vector (Smi, for RecordCallTarget)
2793   Register function = x1;
2794 
2795   Label non_function;
2796   // Check that the function is not a smi.
2797   __ JumpIfSmi(function, &non_function);
2798   // Check that the function is a JSFunction.
2799   Register object_type = x10;
2800   __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
2801                          &non_function);
2802 
2803   GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12);
2804 
2805   __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
2806   Label feedback_register_initialized;
2807   // Put the AllocationSite from the feedback vector into x2, or undefined.
2808   __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
2809   __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
2810   __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
2811                 &feedback_register_initialized);
2812   __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
2813   __ bind(&feedback_register_initialized);
2814 
2815   __ AssertUndefinedOrAllocationSite(x2, x5);
2816 
2817   __ Mov(x3, function);
2818 
2819   // Tail call to the function-specific construct stub (still in the caller
2820   // context at this point).
2821   __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2822   __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
2823   __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
2824   __ Br(x4);
2825 
2826   __ Bind(&non_function);
2827   __ Mov(x3, function);
2828   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
2829 }
2830 
2831 
HandleArrayCase(MacroAssembler * masm,Label * miss)2832 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
2833   // x1 - function
2834   // x3 - slot id
2835   // x2 - vector
2836   // x4 - allocation site (loaded from vector[slot])
2837   Register function = x1;
2838   Register feedback_vector = x2;
2839   Register index = x3;
2840   Register allocation_site = x4;
2841   Register scratch = x5;
2842 
2843   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch);
2844   __ Cmp(function, scratch);
2845   __ B(ne, miss);
2846 
2847   __ Mov(x0, Operand(arg_count()));
2848 
2849   // Increment the call count for monomorphic function calls.
2850   __ Add(feedback_vector, feedback_vector,
2851          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2852   __ Add(feedback_vector, feedback_vector,
2853          Operand(FixedArray::kHeaderSize + kPointerSize));
2854   __ Ldr(index, FieldMemOperand(feedback_vector, 0));
2855   __ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2856   __ Str(index, FieldMemOperand(feedback_vector, 0));
2857 
2858   // Set up arguments for the array constructor stub.
2859   Register allocation_site_arg = feedback_vector;
2860   Register new_target_arg = index;
2861   __ Mov(allocation_site_arg, allocation_site);
2862   __ Mov(new_target_arg, function);
2863   ArrayConstructorStub stub(masm->isolate(), arg_count());
2864   __ TailCallStub(&stub);
2865 }
2866 
2867 
Generate(MacroAssembler * masm)2868 void CallICStub::Generate(MacroAssembler* masm) {
2869   ASM_LOCATION("CallICStub");
2870 
2871   // x1 - function
2872   // x3 - slot id (Smi)
2873   // x2 - vector
2874   Label extra_checks_or_miss, call, call_function;
2875   int argc = arg_count();
2876   ParameterCount actual(argc);
2877 
2878   Register function = x1;
2879   Register feedback_vector = x2;
2880   Register index = x3;
2881 
2882   // The checks. First, does x1 match the recorded monomorphic target?
2883   __ Add(x4, feedback_vector,
2884          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2885   __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
2886 
2887   // We don't know that we have a weak cell. We might have a private symbol
2888   // or an AllocationSite, but the memory is safe to examine.
2889   // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2890   // FixedArray.
2891   // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2892   // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2893   // computed, meaning that it can't appear to be a pointer. If the low bit is
2894   // 0, then hash is computed, but the 0 bit prevents the field from appearing
2895   // to be a pointer.
2896   STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2897   STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2898                     WeakCell::kValueOffset &&
2899                 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2900 
2901   __ Ldr(x5, FieldMemOperand(x4, WeakCell::kValueOffset));
2902   __ Cmp(x5, function);
2903   __ B(ne, &extra_checks_or_miss);
2904 
2905   // The compare above could have been a SMI/SMI comparison. Guard against this
2906   // convincing us that we have a monomorphic JSFunction.
2907   __ JumpIfSmi(function, &extra_checks_or_miss);
2908 
2909   // Increment the call count for monomorphic function calls.
2910   __ Add(feedback_vector, feedback_vector,
2911          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2912   __ Add(feedback_vector, feedback_vector,
2913          Operand(FixedArray::kHeaderSize + kPointerSize));
2914   __ Ldr(index, FieldMemOperand(feedback_vector, 0));
2915   __ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2916   __ Str(index, FieldMemOperand(feedback_vector, 0));
2917 
2918   __ Bind(&call_function);
2919   __ Mov(x0, argc);
2920   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
2921           RelocInfo::CODE_TARGET);
2922 
2923   __ bind(&extra_checks_or_miss);
2924   Label uninitialized, miss, not_allocation_site;
2925 
2926   __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &call);
2927 
2928   __ Ldr(x5, FieldMemOperand(x4, HeapObject::kMapOffset));
2929   __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &not_allocation_site);
2930 
2931   HandleArrayCase(masm, &miss);
2932 
2933   __ bind(&not_allocation_site);
2934 
2935   // The following cases attempt to handle MISS cases without going to the
2936   // runtime.
2937   if (FLAG_trace_ic) {
2938     __ jmp(&miss);
2939   }
2940 
2941   __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
2942 
2943   // We are going megamorphic. If the feedback is a JSFunction, it is fine
2944   // to handle it here. More complex cases are dealt with in the runtime.
2945   __ AssertNotSmi(x4);
2946   __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
2947   __ Add(x4, feedback_vector,
2948          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2949   __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
2950   __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
2951 
2952   __ Bind(&call);
2953   __ Mov(x0, argc);
2954   __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
2955           RelocInfo::CODE_TARGET);
2956 
2957   __ bind(&uninitialized);
2958 
2959   // We are going monomorphic, provided we actually have a JSFunction.
2960   __ JumpIfSmi(function, &miss);
2961 
2962   // Goto miss case if we do not have a function.
2963   __ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
2964 
2965   // Make sure the function is not the Array() function, which requires special
2966   // behavior on MISS.
2967   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, x5);
2968   __ Cmp(function, x5);
2969   __ B(eq, &miss);
2970 
2971   // Make sure the function belongs to the same native context.
2972   __ Ldr(x4, FieldMemOperand(function, JSFunction::kContextOffset));
2973   __ Ldr(x4, ContextMemOperand(x4, Context::NATIVE_CONTEXT_INDEX));
2974   __ Ldr(x5, NativeContextMemOperand());
2975   __ Cmp(x4, x5);
2976   __ B(ne, &miss);
2977 
2978   // Initialize the call counter.
2979   __ Mov(x5, Smi::FromInt(CallICNexus::kCallCountIncrement));
2980   __ Adds(x4, feedback_vector,
2981           Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2982   __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize + kPointerSize));
2983 
2984   // Store the function. Use a stub since we need a frame for allocation.
2985   // x2 - vector
2986   // x3 - slot
2987   // x1 - function
2988   {
2989     FrameScope scope(masm, StackFrame::INTERNAL);
2990     CreateWeakCellStub create_stub(masm->isolate());
2991     __ Push(function);
2992     __ CallStub(&create_stub);
2993     __ Pop(function);
2994   }
2995 
2996   __ B(&call_function);
2997 
2998   // We are here because tracing is on or we encountered a MISS case we can't
2999   // handle here.
3000   __ bind(&miss);
3001   GenerateMiss(masm);
3002 
3003   __ B(&call);
3004 }
3005 
3006 
GenerateMiss(MacroAssembler * masm)3007 void CallICStub::GenerateMiss(MacroAssembler* masm) {
3008   ASM_LOCATION("CallICStub[Miss]");
3009 
3010   FrameScope scope(masm, StackFrame::INTERNAL);
3011 
3012   // Push the receiver and the function and feedback info.
3013   __ Push(x1, x2, x3);
3014 
3015   // Call the entry.
3016   __ CallRuntime(Runtime::kCallIC_Miss);
3017 
3018   // Move result to edi and exit the internal frame.
3019   __ Mov(x1, x0);
3020 }
3021 
3022 
GenerateFast(MacroAssembler * masm)3023 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3024   // If the receiver is a smi trigger the non-string case.
3025   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
3026     __ JumpIfSmi(object_, receiver_not_string_);
3027 
3028     // Fetch the instance type of the receiver into result register.
3029     __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3030     __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3031 
3032     // If the receiver is not a string trigger the non-string case.
3033     __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
3034   }
3035 
3036   // If the index is non-smi trigger the non-smi case.
3037   __ JumpIfNotSmi(index_, &index_not_smi_);
3038 
3039   __ Bind(&got_smi_index_);
3040   // Check for index out of range.
3041   __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
3042   __ Cmp(result_, Operand::UntagSmi(index_));
3043   __ B(ls, index_out_of_range_);
3044 
3045   __ SmiUntag(index_);
3046 
3047   StringCharLoadGenerator::Generate(masm,
3048                                     object_,
3049                                     index_.W(),
3050                                     result_,
3051                                     &call_runtime_);
3052   __ SmiTag(result_);
3053   __ Bind(&exit_);
3054 }
3055 
3056 
GenerateSlow(MacroAssembler * masm,EmbedMode embed_mode,const RuntimeCallHelper & call_helper)3057 void StringCharCodeAtGenerator::GenerateSlow(
3058     MacroAssembler* masm, EmbedMode embed_mode,
3059     const RuntimeCallHelper& call_helper) {
3060   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3061 
3062   __ Bind(&index_not_smi_);
3063   // If index is a heap number, try converting it to an integer.
3064   __ JumpIfNotHeapNumber(index_, index_not_number_);
3065   call_helper.BeforeCall(masm);
3066   if (embed_mode == PART_OF_IC_HANDLER) {
3067     __ Push(LoadWithVectorDescriptor::VectorRegister(),
3068             LoadWithVectorDescriptor::SlotRegister(), object_, index_);
3069   } else {
3070     // Save object_ on the stack and pass index_ as argument for runtime call.
3071     __ Push(object_, index_);
3072   }
3073   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3074     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
3075   } else {
3076     DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3077     // NumberToSmi discards numbers that are not exact integers.
3078     __ CallRuntime(Runtime::kNumberToSmi);
3079   }
3080   // Save the conversion result before the pop instructions below
3081   // have a chance to overwrite it.
3082   __ Mov(index_, x0);
3083   if (embed_mode == PART_OF_IC_HANDLER) {
3084     __ Pop(object_, LoadWithVectorDescriptor::SlotRegister(),
3085            LoadWithVectorDescriptor::VectorRegister());
3086   } else {
3087     __ Pop(object_);
3088   }
3089   // Reload the instance type.
3090   __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3091   __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3092   call_helper.AfterCall(masm);
3093 
3094   // If index is still not a smi, it must be out of range.
3095   __ JumpIfNotSmi(index_, index_out_of_range_);
3096   // Otherwise, return to the fast path.
3097   __ B(&got_smi_index_);
3098 
3099   // Call runtime. We get here when the receiver is a string and the
3100   // index is a number, but the code of getting the actual character
3101   // is too complex (e.g., when the string needs to be flattened).
3102   __ Bind(&call_runtime_);
3103   call_helper.BeforeCall(masm);
3104   __ SmiTag(index_);
3105   __ Push(object_, index_);
3106   __ CallRuntime(Runtime::kStringCharCodeAtRT);
3107   __ Mov(result_, x0);
3108   call_helper.AfterCall(masm);
3109   __ B(&exit_);
3110 
3111   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3112 }
3113 
3114 
GenerateFast(MacroAssembler * masm)3115 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3116   __ JumpIfNotSmi(code_, &slow_case_);
3117   __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
3118   __ B(hi, &slow_case_);
3119 
3120   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3121   // At this point code register contains smi tagged one-byte char code.
3122   __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
3123   __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3124   __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
3125   __ Bind(&exit_);
3126 }
3127 
3128 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)3129 void StringCharFromCodeGenerator::GenerateSlow(
3130     MacroAssembler* masm,
3131     const RuntimeCallHelper& call_helper) {
3132   __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3133 
3134   __ Bind(&slow_case_);
3135   call_helper.BeforeCall(masm);
3136   __ Push(code_);
3137   __ CallRuntime(Runtime::kStringCharFromCode);
3138   __ Mov(result_, x0);
3139   call_helper.AfterCall(masm);
3140   __ B(&exit_);
3141 
3142   __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3143 }
3144 
3145 
GenerateBooleans(MacroAssembler * masm)3146 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
3147   // Inputs are in x0 (lhs) and x1 (rhs).
3148   DCHECK_EQ(CompareICState::BOOLEAN, state());
3149   ASM_LOCATION("CompareICStub[Booleans]");
3150   Label miss;
3151 
3152   __ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
3153   __ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
3154   if (op() != Token::EQ_STRICT && is_strong(strength())) {
3155     __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
3156   } else {
3157     if (!Token::IsEqualityOp(op())) {
3158       __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
3159       __ AssertSmi(x1);
3160       __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
3161       __ AssertSmi(x0);
3162     }
3163     __ Sub(x0, x1, x0);
3164     __ Ret();
3165   }
3166 
3167   __ Bind(&miss);
3168   GenerateMiss(masm);
3169 }
3170 
3171 
GenerateSmis(MacroAssembler * masm)3172 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3173   // Inputs are in x0 (lhs) and x1 (rhs).
3174   DCHECK(state() == CompareICState::SMI);
3175   ASM_LOCATION("CompareICStub[Smis]");
3176   Label miss;
3177   // Bail out (to 'miss') unless both x0 and x1 are smis.
3178   __ JumpIfEitherNotSmi(x0, x1, &miss);
3179 
3180   if (GetCondition() == eq) {
3181     // For equality we do not care about the sign of the result.
3182     __ Sub(x0, x0, x1);
3183   } else {
3184     // Untag before subtracting to avoid handling overflow.
3185     __ SmiUntag(x1);
3186     __ Sub(x0, x1, Operand::UntagSmi(x0));
3187   }
3188   __ Ret();
3189 
3190   __ Bind(&miss);
3191   GenerateMiss(masm);
3192 }
3193 
3194 
GenerateNumbers(MacroAssembler * masm)3195 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3196   DCHECK(state() == CompareICState::NUMBER);
3197   ASM_LOCATION("CompareICStub[HeapNumbers]");
3198 
3199   Label unordered, maybe_undefined1, maybe_undefined2;
3200   Label miss, handle_lhs, values_in_d_regs;
3201   Label untag_rhs, untag_lhs;
3202 
3203   Register result = x0;
3204   Register rhs = x0;
3205   Register lhs = x1;
3206   FPRegister rhs_d = d0;
3207   FPRegister lhs_d = d1;
3208 
3209   if (left() == CompareICState::SMI) {
3210     __ JumpIfNotSmi(lhs, &miss);
3211   }
3212   if (right() == CompareICState::SMI) {
3213     __ JumpIfNotSmi(rhs, &miss);
3214   }
3215 
3216   __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
3217   __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
3218 
3219   // Load rhs if it's a heap number.
3220   __ JumpIfSmi(rhs, &handle_lhs);
3221   __ JumpIfNotHeapNumber(rhs, &maybe_undefined1);
3222   __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
3223 
3224   // Load lhs if it's a heap number.
3225   __ Bind(&handle_lhs);
3226   __ JumpIfSmi(lhs, &values_in_d_regs);
3227   __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
3228   __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
3229 
3230   __ Bind(&values_in_d_regs);
3231   __ Fcmp(lhs_d, rhs_d);
3232   __ B(vs, &unordered);  // Overflow flag set if either is NaN.
3233   STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
3234   __ Cset(result, gt);  // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
3235   __ Csinv(result, result, xzr, ge);  // lt => -1, gt => 1, eq => 0.
3236   __ Ret();
3237 
3238   __ Bind(&unordered);
3239   CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
3240                      CompareICState::GENERIC, CompareICState::GENERIC);
3241   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3242 
3243   __ Bind(&maybe_undefined1);
3244   if (Token::IsOrderedRelationalCompareOp(op())) {
3245     __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
3246     __ JumpIfSmi(lhs, &unordered);
3247     __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
3248     __ B(&unordered);
3249   }
3250 
3251   __ Bind(&maybe_undefined2);
3252   if (Token::IsOrderedRelationalCompareOp(op())) {
3253     __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
3254   }
3255 
3256   __ Bind(&miss);
3257   GenerateMiss(masm);
3258 }
3259 
3260 
GenerateInternalizedStrings(MacroAssembler * masm)3261 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3262   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3263   ASM_LOCATION("CompareICStub[InternalizedStrings]");
3264   Label miss;
3265 
3266   Register result = x0;
3267   Register rhs = x0;
3268   Register lhs = x1;
3269 
3270   // Check that both operands are heap objects.
3271   __ JumpIfEitherSmi(lhs, rhs, &miss);
3272 
3273   // Check that both operands are internalized strings.
3274   Register rhs_map = x10;
3275   Register lhs_map = x11;
3276   Register rhs_type = x10;
3277   Register lhs_type = x11;
3278   __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3279   __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3280   __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
3281   __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
3282 
3283   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
3284   __ Orr(x12, lhs_type, rhs_type);
3285   __ TestAndBranchIfAnySet(
3286       x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
3287 
3288   // Internalized strings are compared by identity.
3289   STATIC_ASSERT(EQUAL == 0);
3290   __ Cmp(lhs, rhs);
3291   __ Cset(result, ne);
3292   __ Ret();
3293 
3294   __ Bind(&miss);
3295   GenerateMiss(masm);
3296 }
3297 
3298 
GenerateUniqueNames(MacroAssembler * masm)3299 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3300   DCHECK(state() == CompareICState::UNIQUE_NAME);
3301   ASM_LOCATION("CompareICStub[UniqueNames]");
3302   DCHECK(GetCondition() == eq);
3303   Label miss;
3304 
3305   Register result = x0;
3306   Register rhs = x0;
3307   Register lhs = x1;
3308 
3309   Register lhs_instance_type = w2;
3310   Register rhs_instance_type = w3;
3311 
3312   // Check that both operands are heap objects.
3313   __ JumpIfEitherSmi(lhs, rhs, &miss);
3314 
3315   // Check that both operands are unique names. This leaves the instance
3316   // types loaded in tmp1 and tmp2.
3317   __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
3318   __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
3319   __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
3320   __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
3321 
3322   // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
3323   // should have kInternalizedTag set.
3324   __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
3325   __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
3326 
3327   // Unique names are compared by identity.
3328   STATIC_ASSERT(EQUAL == 0);
3329   __ Cmp(lhs, rhs);
3330   __ Cset(result, ne);
3331   __ Ret();
3332 
3333   __ Bind(&miss);
3334   GenerateMiss(masm);
3335 }
3336 
3337 
GenerateStrings(MacroAssembler * masm)3338 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3339   DCHECK(state() == CompareICState::STRING);
3340   ASM_LOCATION("CompareICStub[Strings]");
3341 
3342   Label miss;
3343 
3344   bool equality = Token::IsEqualityOp(op());
3345 
3346   Register result = x0;
3347   Register rhs = x0;
3348   Register lhs = x1;
3349 
3350   // Check that both operands are heap objects.
3351   __ JumpIfEitherSmi(rhs, lhs, &miss);
3352 
3353   // Check that both operands are strings.
3354   Register rhs_map = x10;
3355   Register lhs_map = x11;
3356   Register rhs_type = x10;
3357   Register lhs_type = x11;
3358   __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3359   __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3360   __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
3361   __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
3362   STATIC_ASSERT(kNotStringTag != 0);
3363   __ Orr(x12, lhs_type, rhs_type);
3364   __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
3365 
3366   // Fast check for identical strings.
3367   Label not_equal;
3368   __ Cmp(lhs, rhs);
3369   __ B(ne, &not_equal);
3370   __ Mov(result, EQUAL);
3371   __ Ret();
3372 
3373   __ Bind(&not_equal);
3374   // Handle not identical strings
3375 
3376   // Check that both strings are internalized strings. If they are, we're done
3377   // because we already know they are not identical. We know they are both
3378   // strings.
3379   if (equality) {
3380     DCHECK(GetCondition() == eq);
3381     STATIC_ASSERT(kInternalizedTag == 0);
3382     Label not_internalized_strings;
3383     __ Orr(x12, lhs_type, rhs_type);
3384     __ TestAndBranchIfAnySet(
3385         x12, kIsNotInternalizedMask, &not_internalized_strings);
3386     // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
3387     __ Ret();
3388     __ Bind(&not_internalized_strings);
3389   }
3390 
3391   // Check that both strings are sequential one-byte.
3392   Label runtime;
3393   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x12,
3394                                                     x13, &runtime);
3395 
3396   // Compare flat one-byte strings. Returns when done.
3397   if (equality) {
3398     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
3399                                                   x12);
3400   } else {
3401     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
3402                                                     x12, x13);
3403   }
3404 
3405   // Handle more complex cases in runtime.
3406   __ Bind(&runtime);
3407   __ Push(lhs, rhs);
3408   if (equality) {
3409     __ TailCallRuntime(Runtime::kStringEquals);
3410   } else {
3411     __ TailCallRuntime(Runtime::kStringCompare);
3412   }
3413 
3414   __ Bind(&miss);
3415   GenerateMiss(masm);
3416 }
3417 
3418 
GenerateReceivers(MacroAssembler * masm)3419 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
3420   DCHECK_EQ(CompareICState::RECEIVER, state());
3421   ASM_LOCATION("CompareICStub[Receivers]");
3422 
3423   Label miss;
3424 
3425   Register result = x0;
3426   Register rhs = x0;
3427   Register lhs = x1;
3428 
3429   __ JumpIfEitherSmi(rhs, lhs, &miss);
3430 
3431   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3432   __ JumpIfObjectType(rhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
3433   __ JumpIfObjectType(lhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
3434 
3435   DCHECK_EQ(eq, GetCondition());
3436   __ Sub(result, rhs, lhs);
3437   __ Ret();
3438 
3439   __ Bind(&miss);
3440   GenerateMiss(masm);
3441 }
3442 
3443 
GenerateKnownReceivers(MacroAssembler * masm)3444 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
3445   ASM_LOCATION("CompareICStub[KnownReceivers]");
3446 
3447   Label miss;
3448   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3449 
3450   Register result = x0;
3451   Register rhs = x0;
3452   Register lhs = x1;
3453 
3454   __ JumpIfEitherSmi(rhs, lhs, &miss);
3455 
3456   Register rhs_map = x10;
3457   Register lhs_map = x11;
3458   Register map = x12;
3459   __ GetWeakValue(map, cell);
3460   __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3461   __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3462   __ Cmp(rhs_map, map);
3463   __ B(ne, &miss);
3464   __ Cmp(lhs_map, map);
3465   __ B(ne, &miss);
3466 
3467   if (Token::IsEqualityOp(op())) {
3468   __ Sub(result, rhs, lhs);
3469   __ Ret();
3470   } else if (is_strong(strength())) {
3471     __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
3472   } else {
3473     Register ncr = x2;
3474     if (op() == Token::LT || op() == Token::LTE) {
3475       __ Mov(ncr, Smi::FromInt(GREATER));
3476     } else {
3477       __ Mov(ncr, Smi::FromInt(LESS));
3478     }
3479     __ Push(lhs, rhs, ncr);
3480     __ TailCallRuntime(Runtime::kCompare);
3481   }
3482 
3483   __ Bind(&miss);
3484   GenerateMiss(masm);
3485 }
3486 
3487 
3488 // This method handles the case where a compare stub had the wrong
3489 // implementation. It calls a miss handler, which re-writes the stub. All other
3490 // CompareICStub::Generate* methods should fall back into this one if their
3491 // operands were not the expected types.
GenerateMiss(MacroAssembler * masm)3492 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3493   ASM_LOCATION("CompareICStub[Miss]");
3494 
3495   Register stub_entry = x11;
3496   {
3497     FrameScope scope(masm, StackFrame::INTERNAL);
3498     Register op = x10;
3499     Register left = x1;
3500     Register right = x0;
3501     // Preserve some caller-saved registers.
3502     __ Push(x1, x0, lr);
3503     // Push the arguments.
3504     __ Mov(op, Smi::FromInt(this->op()));
3505     __ Push(left, right, op);
3506 
3507     // Call the miss handler. This also pops the arguments.
3508     __ CallRuntime(Runtime::kCompareIC_Miss);
3509 
3510     // Compute the entry point of the rewritten stub.
3511     __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
3512     // Restore caller-saved registers.
3513     __ Pop(lr, x0, x1);
3514   }
3515 
3516   // Tail-call to the new stub.
3517   __ Jump(stub_entry);
3518 }
3519 
3520 
Generate(MacroAssembler * masm)3521 void SubStringStub::Generate(MacroAssembler* masm) {
3522   ASM_LOCATION("SubStringStub::Generate");
3523   Label runtime;
3524 
3525   // Stack frame on entry.
3526   //  lr: return address
3527   //  jssp[0]:  substring "to" offset
3528   //  jssp[8]:  substring "from" offset
3529   //  jssp[16]: pointer to string object
3530 
3531   // This stub is called from the native-call %_SubString(...), so
3532   // nothing can be assumed about the arguments. It is tested that:
3533   //  "string" is a sequential string,
3534   //  both "from" and "to" are smis, and
3535   //  0 <= from <= to <= string.length (in debug mode.)
3536   // If any of these assumptions fail, we call the runtime system.
3537 
3538   static const int kToOffset = 0 * kPointerSize;
3539   static const int kFromOffset = 1 * kPointerSize;
3540   static const int kStringOffset = 2 * kPointerSize;
3541 
3542   Register to = x0;
3543   Register from = x15;
3544   Register input_string = x10;
3545   Register input_length = x11;
3546   Register input_type = x12;
3547   Register result_string = x0;
3548   Register result_length = x1;
3549   Register temp = x3;
3550 
3551   __ Peek(to, kToOffset);
3552   __ Peek(from, kFromOffset);
3553 
3554   // Check that both from and to are smis. If not, jump to runtime.
3555   __ JumpIfEitherNotSmi(from, to, &runtime);
3556   __ SmiUntag(from);
3557   __ SmiUntag(to);
3558 
3559   // Calculate difference between from and to. If to < from, branch to runtime.
3560   __ Subs(result_length, to, from);
3561   __ B(mi, &runtime);
3562 
3563   // Check from is positive.
3564   __ Tbnz(from, kWSignBit, &runtime);
3565 
3566   // Make sure first argument is a string.
3567   __ Peek(input_string, kStringOffset);
3568   __ JumpIfSmi(input_string, &runtime);
3569   __ IsObjectJSStringType(input_string, input_type, &runtime);
3570 
3571   Label single_char;
3572   __ Cmp(result_length, 1);
3573   __ B(eq, &single_char);
3574 
3575   // Short-cut for the case of trivial substring.
3576   Label return_x0;
3577   __ Ldrsw(input_length,
3578            UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
3579 
3580   __ Cmp(result_length, input_length);
3581   __ CmovX(x0, input_string, eq);
3582   // Return original string.
3583   __ B(eq, &return_x0);
3584 
3585   // Longer than original string's length or negative: unsafe arguments.
3586   __ B(hi, &runtime);
3587 
3588   // Shorter than original string's length: an actual substring.
3589 
3590   //   x0   to               substring end character offset
3591   //   x1   result_length    length of substring result
3592   //   x10  input_string     pointer to input string object
3593   //   x10  unpacked_string  pointer to unpacked string object
3594   //   x11  input_length     length of input string
3595   //   x12  input_type       instance type of input string
3596   //   x15  from             substring start character offset
3597 
3598   // Deal with different string types: update the index if necessary and put
3599   // the underlying string into register unpacked_string.
3600   Label underlying_unpacked, sliced_string, seq_or_external_string;
3601   Label update_instance_type;
3602   // If the string is not indirect, it can only be sequential or external.
3603   STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3604   STATIC_ASSERT(kIsIndirectStringMask != 0);
3605 
3606   // Test for string types, and branch/fall through to appropriate unpacking
3607   // code.
3608   __ Tst(input_type, kIsIndirectStringMask);
3609   __ B(eq, &seq_or_external_string);
3610   __ Tst(input_type, kSlicedNotConsMask);
3611   __ B(ne, &sliced_string);
3612 
3613   Register unpacked_string = input_string;
3614 
3615   // Cons string. Check whether it is flat, then fetch first part.
3616   __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
3617   __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
3618   __ Ldr(unpacked_string,
3619          FieldMemOperand(input_string, ConsString::kFirstOffset));
3620   __ B(&update_instance_type);
3621 
3622   __ Bind(&sliced_string);
3623   // Sliced string. Fetch parent and correct start index by offset.
3624   __ Ldrsw(temp,
3625            UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
3626   __ Add(from, from, temp);
3627   __ Ldr(unpacked_string,
3628          FieldMemOperand(input_string, SlicedString::kParentOffset));
3629 
3630   __ Bind(&update_instance_type);
3631   __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
3632   __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
3633   // Now control must go to &underlying_unpacked. Since the no code is generated
3634   // before then we fall through instead of generating a useless branch.
3635 
3636   __ Bind(&seq_or_external_string);
3637   // Sequential or external string. Registers unpacked_string and input_string
3638   // alias, so there's nothing to do here.
3639   // Note that if code is added here, the above code must be updated.
3640 
3641   //   x0   result_string    pointer to result string object (uninit)
3642   //   x1   result_length    length of substring result
3643   //   x10  unpacked_string  pointer to unpacked string object
3644   //   x11  input_length     length of input string
3645   //   x12  input_type       instance type of input string
3646   //   x15  from             substring start character offset
3647   __ Bind(&underlying_unpacked);
3648 
3649   if (FLAG_string_slices) {
3650     Label copy_routine;
3651     __ Cmp(result_length, SlicedString::kMinLength);
3652     // Short slice. Copy instead of slicing.
3653     __ B(lt, &copy_routine);
3654     // Allocate new sliced string. At this point we do not reload the instance
3655     // type including the string encoding because we simply rely on the info
3656     // provided by the original string. It does not matter if the original
3657     // string's encoding is wrong because we always have to recheck encoding of
3658     // the newly created string's parent anyway due to externalized strings.
3659     Label two_byte_slice, set_slice_header;
3660     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3661     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3662     __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
3663     __ AllocateOneByteSlicedString(result_string, result_length, x3, x4,
3664                                    &runtime);
3665     __ B(&set_slice_header);
3666 
3667     __ Bind(&two_byte_slice);
3668     __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
3669                                    &runtime);
3670 
3671     __ Bind(&set_slice_header);
3672     __ SmiTag(from);
3673     __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
3674     __ Str(unpacked_string,
3675            FieldMemOperand(result_string, SlicedString::kParentOffset));
3676     __ B(&return_x0);
3677 
3678     __ Bind(&copy_routine);
3679   }
3680 
3681   //   x0   result_string    pointer to result string object (uninit)
3682   //   x1   result_length    length of substring result
3683   //   x10  unpacked_string  pointer to unpacked string object
3684   //   x11  input_length     length of input string
3685   //   x12  input_type       instance type of input string
3686   //   x13  unpacked_char0   pointer to first char of unpacked string (uninit)
3687   //   x13  substring_char0  pointer to first char of substring (uninit)
3688   //   x14  result_char0     pointer to first char of result (uninit)
3689   //   x15  from             substring start character offset
3690   Register unpacked_char0 = x13;
3691   Register substring_char0 = x13;
3692   Register result_char0 = x14;
3693   Label two_byte_sequential, sequential_string, allocate_result;
3694   STATIC_ASSERT(kExternalStringTag != 0);
3695   STATIC_ASSERT(kSeqStringTag == 0);
3696 
3697   __ Tst(input_type, kExternalStringTag);
3698   __ B(eq, &sequential_string);
3699 
3700   __ Tst(input_type, kShortExternalStringTag);
3701   __ B(ne, &runtime);
3702   __ Ldr(unpacked_char0,
3703          FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
3704   // unpacked_char0 points to the first character of the underlying string.
3705   __ B(&allocate_result);
3706 
3707   __ Bind(&sequential_string);
3708   // Locate first character of underlying subject string.
3709   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3710   __ Add(unpacked_char0, unpacked_string,
3711          SeqOneByteString::kHeaderSize - kHeapObjectTag);
3712 
3713   __ Bind(&allocate_result);
3714   // Sequential one-byte string. Allocate the result.
3715   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3716   __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
3717 
3718   // Allocate and copy the resulting one-byte string.
3719   __ AllocateOneByteString(result_string, result_length, x3, x4, x5, &runtime);
3720 
3721   // Locate first character of substring to copy.
3722   __ Add(substring_char0, unpacked_char0, from);
3723 
3724   // Locate first character of result.
3725   __ Add(result_char0, result_string,
3726          SeqOneByteString::kHeaderSize - kHeapObjectTag);
3727 
3728   STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3729   __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
3730   __ B(&return_x0);
3731 
3732   // Allocate and copy the resulting two-byte string.
3733   __ Bind(&two_byte_sequential);
3734   __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
3735 
3736   // Locate first character of substring to copy.
3737   __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
3738 
3739   // Locate first character of result.
3740   __ Add(result_char0, result_string,
3741          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
3742 
3743   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3744   __ Add(result_length, result_length, result_length);
3745   __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
3746 
3747   __ Bind(&return_x0);
3748   Counters* counters = isolate()->counters();
3749   __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
3750   __ Drop(3);
3751   __ Ret();
3752 
3753   __ Bind(&runtime);
3754   __ TailCallRuntime(Runtime::kSubString);
3755 
3756   __ bind(&single_char);
3757   // x1: result_length
3758   // x10: input_string
3759   // x12: input_type
3760   // x15: from (untagged)
3761   __ SmiTag(from);
3762   StringCharAtGenerator generator(input_string, from, result_length, x0,
3763                                   &runtime, &runtime, &runtime,
3764                                   STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3765   generator.GenerateFast(masm);
3766   __ Drop(3);
3767   __ Ret();
3768   generator.SkipSlow(masm, &runtime);
3769 }
3770 
3771 
Generate(MacroAssembler * masm)3772 void ToNumberStub::Generate(MacroAssembler* masm) {
3773   // The ToNumber stub takes one argument in x0.
3774   Label not_smi;
3775   __ JumpIfNotSmi(x0, &not_smi);
3776   __ Ret();
3777   __ Bind(&not_smi);
3778 
3779   Label not_heap_number;
3780   __ Ldr(x1, FieldMemOperand(x0, HeapObject::kMapOffset));
3781   __ Ldrb(x1, FieldMemOperand(x1, Map::kInstanceTypeOffset));
3782   // x0: object
3783   // x1: instance type
3784   __ Cmp(x1, HEAP_NUMBER_TYPE);
3785   __ B(ne, &not_heap_number);
3786   __ Ret();
3787   __ Bind(&not_heap_number);
3788 
3789   Label not_string, slow_string;
3790   __ Cmp(x1, FIRST_NONSTRING_TYPE);
3791   __ B(hs, &not_string);
3792   // Check if string has a cached array index.
3793   __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
3794   __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
3795   __ B(ne, &slow_string);
3796   __ IndexFromHash(x2, x0);
3797   __ Ret();
3798   __ Bind(&slow_string);
3799   __ Push(x0);  // Push argument.
3800   __ TailCallRuntime(Runtime::kStringToNumber);
3801   __ Bind(&not_string);
3802 
3803   Label not_oddball;
3804   __ Cmp(x1, ODDBALL_TYPE);
3805   __ B(ne, &not_oddball);
3806   __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
3807   __ Ret();
3808   __ Bind(&not_oddball);
3809 
3810   __ Push(x0);  // Push argument.
3811   __ TailCallRuntime(Runtime::kToNumber);
3812 }
3813 
3814 
Generate(MacroAssembler * masm)3815 void ToLengthStub::Generate(MacroAssembler* masm) {
3816   // The ToLength stub takes one argument in x0.
3817   Label not_smi;
3818   __ JumpIfNotSmi(x0, &not_smi);
3819   STATIC_ASSERT(kSmiTag == 0);
3820   __ Tst(x0, x0);
3821   __ Csel(x0, x0, Operand(0), ge);
3822   __ Ret();
3823   __ Bind(&not_smi);
3824 
3825   __ Push(x0);  // Push argument.
3826   __ TailCallRuntime(Runtime::kToLength);
3827 }
3828 
3829 
Generate(MacroAssembler * masm)3830 void ToStringStub::Generate(MacroAssembler* masm) {
3831   // The ToString stub takes one argument in x0.
3832   Label is_number;
3833   __ JumpIfSmi(x0, &is_number);
3834 
3835   Label not_string;
3836   __ JumpIfObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE, &not_string, hs);
3837   // x0: receiver
3838   // x1: receiver instance type
3839   __ Ret();
3840   __ Bind(&not_string);
3841 
3842   Label not_heap_number;
3843   __ Cmp(x1, HEAP_NUMBER_TYPE);
3844   __ B(ne, &not_heap_number);
3845   __ Bind(&is_number);
3846   NumberToStringStub stub(isolate());
3847   __ TailCallStub(&stub);
3848   __ Bind(&not_heap_number);
3849 
3850   Label not_oddball;
3851   __ Cmp(x1, ODDBALL_TYPE);
3852   __ B(ne, &not_oddball);
3853   __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
3854   __ Ret();
3855   __ Bind(&not_oddball);
3856 
3857   __ Push(x0);  // Push argument.
3858   __ TailCallRuntime(Runtime::kToString);
3859 }
3860 
3861 
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)3862 void StringHelper::GenerateFlatOneByteStringEquals(
3863     MacroAssembler* masm, Register left, Register right, Register scratch1,
3864     Register scratch2, Register scratch3) {
3865   DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
3866   Register result = x0;
3867   Register left_length = scratch1;
3868   Register right_length = scratch2;
3869 
3870   // Compare lengths. If lengths differ, strings can't be equal. Lengths are
3871   // smis, and don't need to be untagged.
3872   Label strings_not_equal, check_zero_length;
3873   __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
3874   __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
3875   __ Cmp(left_length, right_length);
3876   __ B(eq, &check_zero_length);
3877 
3878   __ Bind(&strings_not_equal);
3879   __ Mov(result, Smi::FromInt(NOT_EQUAL));
3880   __ Ret();
3881 
3882   // Check if the length is zero. If so, the strings must be equal (and empty.)
3883   Label compare_chars;
3884   __ Bind(&check_zero_length);
3885   STATIC_ASSERT(kSmiTag == 0);
3886   __ Cbnz(left_length, &compare_chars);
3887   __ Mov(result, Smi::FromInt(EQUAL));
3888   __ Ret();
3889 
3890   // Compare characters. Falls through if all characters are equal.
3891   __ Bind(&compare_chars);
3892   GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
3893                                   scratch3, &strings_not_equal);
3894 
3895   // Characters in strings are equal.
3896   __ Mov(result, Smi::FromInt(EQUAL));
3897   __ Ret();
3898 }
3899 
3900 
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)3901 void StringHelper::GenerateCompareFlatOneByteStrings(
3902     MacroAssembler* masm, Register left, Register right, Register scratch1,
3903     Register scratch2, Register scratch3, Register scratch4) {
3904   DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
3905   Label result_not_equal, compare_lengths;
3906 
3907   // Find minimum length and length difference.
3908   Register length_delta = scratch3;
3909   __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
3910   __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3911   __ Subs(length_delta, scratch1, scratch2);
3912 
3913   Register min_length = scratch1;
3914   __ Csel(min_length, scratch2, scratch1, gt);
3915   __ Cbz(min_length, &compare_lengths);
3916 
3917   // Compare loop.
3918   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3919                                   scratch4, &result_not_equal);
3920 
3921   // Compare lengths - strings up to min-length are equal.
3922   __ Bind(&compare_lengths);
3923 
3924   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3925 
3926   // Use length_delta as result if it's zero.
3927   Register result = x0;
3928   __ Subs(result, length_delta, 0);
3929 
3930   __ Bind(&result_not_equal);
3931   Register greater = x10;
3932   Register less = x11;
3933   __ Mov(greater, Smi::FromInt(GREATER));
3934   __ Mov(less, Smi::FromInt(LESS));
3935   __ CmovX(result, greater, gt);
3936   __ CmovX(result, less, lt);
3937   __ Ret();
3938 }
3939 
3940 
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Register scratch2,Label * chars_not_equal)3941 void StringHelper::GenerateOneByteCharsCompareLoop(
3942     MacroAssembler* masm, Register left, Register right, Register length,
3943     Register scratch1, Register scratch2, Label* chars_not_equal) {
3944   DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
3945 
3946   // Change index to run from -length to -1 by adding length to string
3947   // start. This means that loop ends when index reaches zero, which
3948   // doesn't need an additional compare.
3949   __ SmiUntag(length);
3950   __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
3951   __ Add(left, left, scratch1);
3952   __ Add(right, right, scratch1);
3953 
3954   Register index = length;
3955   __ Neg(index, length);  // index = -length;
3956 
3957   // Compare loop
3958   Label loop;
3959   __ Bind(&loop);
3960   __ Ldrb(scratch1, MemOperand(left, index));
3961   __ Ldrb(scratch2, MemOperand(right, index));
3962   __ Cmp(scratch1, scratch2);
3963   __ B(ne, chars_not_equal);
3964   __ Add(index, index, 1);
3965   __ Cbnz(index, &loop);
3966 }
3967 
3968 
Generate(MacroAssembler * masm)3969 void StringCompareStub::Generate(MacroAssembler* masm) {
3970   // ----------- S t a t e -------------
3971   //  -- x1    : left
3972   //  -- x0    : right
3973   //  -- lr    : return address
3974   // -----------------------------------
3975   __ AssertString(x1);
3976   __ AssertString(x0);
3977 
3978   Label not_same;
3979   __ Cmp(x0, x1);
3980   __ B(ne, &not_same);
3981   __ Mov(x0, Smi::FromInt(EQUAL));
3982   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
3983                       x4);
3984   __ Ret();
3985 
3986   __ Bind(&not_same);
3987 
3988   // Check that both objects are sequential one-byte strings.
3989   Label runtime;
3990   __ JumpIfEitherIsNotSequentialOneByteStrings(x1, x0, x12, x13, &runtime);
3991 
3992   // Compare flat one-byte strings natively.
3993   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
3994                       x4);
3995   StringHelper::GenerateCompareFlatOneByteStrings(masm, x1, x0, x12, x13, x14,
3996                                                   x15);
3997 
3998   // Call the runtime.
3999   // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
4000   __ Bind(&runtime);
4001   __ Push(x1, x0);
4002   __ TailCallRuntime(Runtime::kStringCompare);
4003 }
4004 
4005 
Generate(MacroAssembler * masm)4006 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
4007   // ----------- S t a t e -------------
4008   //  -- x1    : left
4009   //  -- x0    : right
4010   //  -- lr    : return address
4011   // -----------------------------------
4012 
4013   // Load x2 with the allocation site.  We stick an undefined dummy value here
4014   // and replace it with the real allocation site later when we instantiate this
4015   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
4016   __ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
4017 
4018   // Make sure that we actually patched the allocation site.
4019   if (FLAG_debug_code) {
4020     __ AssertNotSmi(x2, kExpectedAllocationSite);
4021     __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
4022     __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
4023                             kExpectedAllocationSite);
4024   }
4025 
4026   // Tail call into the stub that handles binary operations with allocation
4027   // sites.
4028   BinaryOpWithAllocationSiteStub stub(isolate(), state());
4029   __ TailCallStub(&stub);
4030 }
4031 
4032 
GenerateIncremental(MacroAssembler * masm,Mode mode)4033 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4034   // We need some extra registers for this stub, they have been allocated
4035   // but we need to save them before using them.
4036   regs_.Save(masm);
4037 
4038   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4039     Label dont_need_remembered_set;
4040 
4041     Register val = regs_.scratch0();
4042     __ Ldr(val, MemOperand(regs_.address()));
4043     __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
4044 
4045     __ CheckPageFlagSet(regs_.object(), val, 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4046                         &dont_need_remembered_set);
4047 
4048     // First notify the incremental marker if necessary, then update the
4049     // remembered set.
4050     CheckNeedsToInformIncrementalMarker(
4051         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4052     InformIncrementalMarker(masm);
4053     regs_.Restore(masm);  // Restore the extra scratch registers we used.
4054 
4055     __ RememberedSetHelper(object(), address(),
4056                            value(),  // scratch1
4057                            save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
4058 
4059     __ Bind(&dont_need_remembered_set);
4060   }
4061 
4062   CheckNeedsToInformIncrementalMarker(
4063       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4064   InformIncrementalMarker(masm);
4065   regs_.Restore(masm);  // Restore the extra scratch registers we used.
4066   __ Ret();
4067 }
4068 
4069 
InformIncrementalMarker(MacroAssembler * masm)4070 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4071   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4072   Register address =
4073     x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
4074   DCHECK(!address.Is(regs_.object()));
4075   DCHECK(!address.Is(x0));
4076   __ Mov(address, regs_.address());
4077   __ Mov(x0, regs_.object());
4078   __ Mov(x1, address);
4079   __ Mov(x2, ExternalReference::isolate_address(isolate()));
4080 
4081   AllowExternalCallThatCantCauseGC scope(masm);
4082   ExternalReference function =
4083       ExternalReference::incremental_marking_record_write_function(
4084           isolate());
4085   __ CallCFunction(function, 3, 0);
4086 
4087   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4088 }
4089 
4090 
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)4091 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4092     MacroAssembler* masm,
4093     OnNoNeedToInformIncrementalMarker on_no_need,
4094     Mode mode) {
4095   Label on_black;
4096   Label need_incremental;
4097   Label need_incremental_pop_scratch;
4098 
4099   Register mem_chunk = regs_.scratch0();
4100   Register counter = regs_.scratch1();
4101   __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
4102   __ Ldr(counter,
4103          MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
4104   __ Subs(counter, counter, 1);
4105   __ Str(counter,
4106          MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
4107   __ B(mi, &need_incremental);
4108 
4109   // If the object is not black we don't have to inform the incremental marker.
4110   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4111 
4112   regs_.Restore(masm);  // Restore the extra scratch registers we used.
4113   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4114     __ RememberedSetHelper(object(), address(),
4115                            value(),  // scratch1
4116                            save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
4117   } else {
4118     __ Ret();
4119   }
4120 
4121   __ Bind(&on_black);
4122   // Get the value from the slot.
4123   Register val = regs_.scratch0();
4124   __ Ldr(val, MemOperand(regs_.address()));
4125 
4126   if (mode == INCREMENTAL_COMPACTION) {
4127     Label ensure_not_white;
4128 
4129     __ CheckPageFlagClear(val, regs_.scratch1(),
4130                           MemoryChunk::kEvacuationCandidateMask,
4131                           &ensure_not_white);
4132 
4133     __ CheckPageFlagClear(regs_.object(),
4134                           regs_.scratch1(),
4135                           MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4136                           &need_incremental);
4137 
4138     __ Bind(&ensure_not_white);
4139   }
4140 
4141   // We need extra registers for this, so we push the object and the address
4142   // register temporarily.
4143   __ Push(regs_.address(), regs_.object());
4144   __ JumpIfWhite(val,
4145                  regs_.scratch1(),  // Scratch.
4146                  regs_.object(),    // Scratch.
4147                  regs_.address(),   // Scratch.
4148                  regs_.scratch2(),  // Scratch.
4149                  &need_incremental_pop_scratch);
4150   __ Pop(regs_.object(), regs_.address());
4151 
4152   regs_.Restore(masm);  // Restore the extra scratch registers we used.
4153   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4154     __ RememberedSetHelper(object(), address(),
4155                            value(),  // scratch1
4156                            save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
4157   } else {
4158     __ Ret();
4159   }
4160 
4161   __ Bind(&need_incremental_pop_scratch);
4162   __ Pop(regs_.object(), regs_.address());
4163 
4164   __ Bind(&need_incremental);
4165   // Fall through when we need to inform the incremental marker.
4166 }
4167 
4168 
Generate(MacroAssembler * masm)4169 void RecordWriteStub::Generate(MacroAssembler* masm) {
4170   Label skip_to_incremental_noncompacting;
4171   Label skip_to_incremental_compacting;
4172 
4173   // We patch these two first instructions back and forth between a nop and
4174   // real branch when we start and stop incremental heap marking.
4175   // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
4176   // are generated.
4177   // See RecordWriteStub::Patch for details.
4178   {
4179     InstructionAccurateScope scope(masm, 2);
4180     __ adr(xzr, &skip_to_incremental_noncompacting);
4181     __ adr(xzr, &skip_to_incremental_compacting);
4182   }
4183 
4184   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4185     __ RememberedSetHelper(object(), address(),
4186                            value(),  // scratch1
4187                            save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
4188   }
4189   __ Ret();
4190 
4191   __ Bind(&skip_to_incremental_noncompacting);
4192   GenerateIncremental(masm, INCREMENTAL);
4193 
4194   __ Bind(&skip_to_incremental_compacting);
4195   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4196 }
4197 
4198 
Generate(MacroAssembler * masm)4199 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4200   CEntryStub ces(isolate(), 1, kSaveFPRegs);
4201   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4202   int parameter_count_offset =
4203       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4204   __ Ldr(x1, MemOperand(fp, parameter_count_offset));
4205   if (function_mode() == JS_FUNCTION_STUB_MODE) {
4206     __ Add(x1, x1, 1);
4207   }
4208   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4209   __ Drop(x1);
4210   // Return to IC Miss stub, continuation still on stack.
4211   __ Ret();
4212 }
4213 
4214 
Generate(MacroAssembler * masm)4215 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4216   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
4217   LoadICStub stub(isolate(), state());
4218   stub.GenerateForTrampoline(masm);
4219 }
4220 
4221 
Generate(MacroAssembler * masm)4222 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4223   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
4224   KeyedLoadICStub stub(isolate(), state());
4225   stub.GenerateForTrampoline(masm);
4226 }
4227 
4228 
Generate(MacroAssembler * masm)4229 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
4230   __ EmitLoadTypeFeedbackVector(x2);
4231   CallICStub stub(isolate(), state());
4232   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4233 }
4234 
4235 
Generate(MacroAssembler * masm)4236 void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
4237 
4238 
GenerateForTrampoline(MacroAssembler * masm)4239 void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
4240   GenerateImpl(masm, true);
4241 }
4242 
4243 
HandleArrayCases(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,bool is_polymorphic,Label * miss)4244 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
4245                              Register receiver_map, Register scratch1,
4246                              Register scratch2, bool is_polymorphic,
4247                              Label* miss) {
4248   // feedback initially contains the feedback array
4249   Label next_loop, prepare_next;
4250   Label load_smi_map, compare_map;
4251   Label start_polymorphic;
4252 
4253   Register cached_map = scratch1;
4254 
4255   __ Ldr(cached_map,
4256          FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
4257   __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4258   __ Cmp(receiver_map, cached_map);
4259   __ B(ne, &start_polymorphic);
4260   // found, now call handler.
4261   Register handler = feedback;
4262   __ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
4263   __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
4264   __ Jump(feedback);
4265 
4266   Register length = scratch2;
4267   __ Bind(&start_polymorphic);
4268   __ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
4269   if (!is_polymorphic) {
4270     __ Cmp(length, Operand(Smi::FromInt(2)));
4271     __ B(eq, miss);
4272   }
4273 
4274   Register too_far = length;
4275   Register pointer_reg = feedback;
4276 
4277   // +-----+------+------+-----+-----+ ... ----+
4278   // | map | len  | wm0  | h0  | wm1 |      hN |
4279   // +-----+------+------+-----+-----+ ... ----+
4280   //                 0      1     2        len-1
4281   //                              ^              ^
4282   //                              |              |
4283   //                         pointer_reg      too_far
4284   //                         aka feedback     scratch2
4285   // also need receiver_map
4286   // use cached_map (scratch1) to look in the weak map values.
4287   __ Add(too_far, feedback,
4288          Operand::UntagSmiAndScale(length, kPointerSizeLog2));
4289   __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
4290   __ Add(pointer_reg, feedback,
4291          FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
4292 
4293   __ Bind(&next_loop);
4294   __ Ldr(cached_map, MemOperand(pointer_reg));
4295   __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4296   __ Cmp(receiver_map, cached_map);
4297   __ B(ne, &prepare_next);
4298   __ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
4299   __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
4300   __ Jump(handler);
4301 
4302   __ Bind(&prepare_next);
4303   __ Add(pointer_reg, pointer_reg, kPointerSize * 2);
4304   __ Cmp(pointer_reg, too_far);
4305   __ B(lt, &next_loop);
4306 
4307   // We exhausted our array of map handler pairs.
4308   __ jmp(miss);
4309 }
4310 
4311 
HandleMonomorphicCase(MacroAssembler * masm,Register receiver,Register receiver_map,Register feedback,Register vector,Register slot,Register scratch,Label * compare_map,Label * load_smi_map,Label * try_array)4312 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
4313                                   Register receiver_map, Register feedback,
4314                                   Register vector, Register slot,
4315                                   Register scratch, Label* compare_map,
4316                                   Label* load_smi_map, Label* try_array) {
4317   __ JumpIfSmi(receiver, load_smi_map);
4318   __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
4319   __ bind(compare_map);
4320   Register cached_map = scratch;
4321   // Move the weak map into the weak_cell register.
4322   __ Ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
4323   __ Cmp(cached_map, receiver_map);
4324   __ B(ne, try_array);
4325 
4326   Register handler = feedback;
4327   __ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
4328   __ Ldr(handler,
4329          FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
4330   __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
4331   __ Jump(handler);
4332 }
4333 
4334 
GenerateImpl(MacroAssembler * masm,bool in_frame)4335 void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4336   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // x1
4337   Register name = LoadWithVectorDescriptor::NameRegister();          // x2
4338   Register vector = LoadWithVectorDescriptor::VectorRegister();      // x3
4339   Register slot = LoadWithVectorDescriptor::SlotRegister();          // x0
4340   Register feedback = x4;
4341   Register receiver_map = x5;
4342   Register scratch1 = x6;
4343 
4344   __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
4345   __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4346 
4347   // Try to quickly handle the monomorphic case without knowing for sure
4348   // if we have a weak cell in feedback. We do know it's safe to look
4349   // at WeakCell::kValueOffset.
4350   Label try_array, load_smi_map, compare_map;
4351   Label not_array, miss;
4352   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4353                         scratch1, &compare_map, &load_smi_map, &try_array);
4354 
4355   // Is it a fixed array?
4356   __ Bind(&try_array);
4357   __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4358   __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
4359   HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
4360 
4361   __ Bind(&not_array);
4362   __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
4363   Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
4364       Code::ComputeHandlerFlags(Code::LOAD_IC));
4365   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
4366                                                receiver, name, feedback,
4367                                                receiver_map, scratch1, x7);
4368 
4369   __ Bind(&miss);
4370   LoadIC::GenerateMiss(masm);
4371 
4372   __ Bind(&load_smi_map);
4373   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4374   __ jmp(&compare_map);
4375 }
4376 
4377 
Generate(MacroAssembler * masm)4378 void KeyedLoadICStub::Generate(MacroAssembler* masm) {
4379   GenerateImpl(masm, false);
4380 }
4381 
4382 
GenerateForTrampoline(MacroAssembler * masm)4383 void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
4384   GenerateImpl(masm, true);
4385 }
4386 
4387 
GenerateImpl(MacroAssembler * masm,bool in_frame)4388 void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4389   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // x1
4390   Register key = LoadWithVectorDescriptor::NameRegister();           // x2
4391   Register vector = LoadWithVectorDescriptor::VectorRegister();      // x3
4392   Register slot = LoadWithVectorDescriptor::SlotRegister();          // x0
4393   Register feedback = x4;
4394   Register receiver_map = x5;
4395   Register scratch1 = x6;
4396 
4397   __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
4398   __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4399 
4400   // Try to quickly handle the monomorphic case without knowing for sure
4401   // if we have a weak cell in feedback. We do know it's safe to look
4402   // at WeakCell::kValueOffset.
4403   Label try_array, load_smi_map, compare_map;
4404   Label not_array, miss;
4405   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4406                         scratch1, &compare_map, &load_smi_map, &try_array);
4407 
4408   __ Bind(&try_array);
4409   // Is it a fixed array?
4410   __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4411   __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
4412 
4413   // We have a polymorphic element handler.
4414   Label polymorphic, try_poly_name;
4415   __ Bind(&polymorphic);
4416   HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
4417 
4418   __ Bind(&not_array);
4419   // Is it generic?
4420   __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
4421                    &try_poly_name);
4422   Handle<Code> megamorphic_stub =
4423       KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
4424   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
4425 
4426   __ Bind(&try_poly_name);
4427   // We might have a name in feedback, and a fixed array in the next slot.
4428   __ Cmp(key, feedback);
4429   __ B(ne, &miss);
4430   // If the name comparison succeeded, we know we have a fixed array with
4431   // at least one map/handler pair.
4432   __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
4433   __ Ldr(feedback,
4434          FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
4435   HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, false, &miss);
4436 
4437   __ Bind(&miss);
4438   KeyedLoadIC::GenerateMiss(masm);
4439 
4440   __ Bind(&load_smi_map);
4441   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4442   __ jmp(&compare_map);
4443 }
4444 
4445 
Generate(MacroAssembler * masm)4446 void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
4447   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
4448   VectorStoreICStub stub(isolate(), state());
4449   stub.GenerateForTrampoline(masm);
4450 }
4451 
4452 
Generate(MacroAssembler * masm)4453 void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
4454   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
4455   VectorKeyedStoreICStub stub(isolate(), state());
4456   stub.GenerateForTrampoline(masm);
4457 }
4458 
4459 
Generate(MacroAssembler * masm)4460 void VectorStoreICStub::Generate(MacroAssembler* masm) {
4461   GenerateImpl(masm, false);
4462 }
4463 
4464 
GenerateForTrampoline(MacroAssembler * masm)4465 void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
4466   GenerateImpl(masm, true);
4467 }
4468 
4469 
GenerateImpl(MacroAssembler * masm,bool in_frame)4470 void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4471   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // x1
4472   Register key = VectorStoreICDescriptor::NameRegister();           // x2
4473   Register vector = VectorStoreICDescriptor::VectorRegister();      // x3
4474   Register slot = VectorStoreICDescriptor::SlotRegister();          // x4
4475   DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0));          // x0
4476   Register feedback = x5;
4477   Register receiver_map = x6;
4478   Register scratch1 = x7;
4479 
4480   __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
4481   __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4482 
4483   // Try to quickly handle the monomorphic case without knowing for sure
4484   // if we have a weak cell in feedback. We do know it's safe to look
4485   // at WeakCell::kValueOffset.
4486   Label try_array, load_smi_map, compare_map;
4487   Label not_array, miss;
4488   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4489                         scratch1, &compare_map, &load_smi_map, &try_array);
4490 
4491   // Is it a fixed array?
4492   __ Bind(&try_array);
4493   __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4494   __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
4495   HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, true, &miss);
4496 
4497   __ Bind(&not_array);
4498   __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
4499   Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
4500       Code::ComputeHandlerFlags(Code::STORE_IC));
4501   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
4502                                                receiver, key, feedback,
4503                                                receiver_map, scratch1, x8);
4504 
4505   __ Bind(&miss);
4506   StoreIC::GenerateMiss(masm);
4507 
4508   __ Bind(&load_smi_map);
4509   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4510   __ jmp(&compare_map);
4511 }
4512 
4513 
Generate(MacroAssembler * masm)4514 void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
4515   GenerateImpl(masm, false);
4516 }
4517 
4518 
GenerateForTrampoline(MacroAssembler * masm)4519 void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
4520   GenerateImpl(masm, true);
4521 }
4522 
4523 
HandlePolymorphicStoreCase(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,Label * miss)4524 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
4525                                        Register receiver_map, Register scratch1,
4526                                        Register scratch2, Label* miss) {
4527   // feedback initially contains the feedback array
4528   Label next_loop, prepare_next;
4529   Label start_polymorphic;
4530   Label transition_call;
4531 
4532   Register cached_map = scratch1;
4533   Register too_far = scratch2;
4534   Register pointer_reg = feedback;
4535 
4536   __ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
4537 
4538   // +-----+------+------+-----+-----+-----+ ... ----+
4539   // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
4540   // +-----+------+------+-----+-----+ ----+ ... ----+
4541   //                 0      1     2              len-1
4542   //                 ^                                 ^
4543   //                 |                                 |
4544   //             pointer_reg                        too_far
4545   //             aka feedback                       scratch2
4546   // also need receiver_map
4547   // use cached_map (scratch1) to look in the weak map values.
4548   __ Add(too_far, feedback,
4549          Operand::UntagSmiAndScale(too_far, kPointerSizeLog2));
4550   __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
4551   __ Add(pointer_reg, feedback,
4552          FixedArray::OffsetOfElementAt(0) - kHeapObjectTag);
4553 
4554   __ Bind(&next_loop);
4555   __ Ldr(cached_map, MemOperand(pointer_reg));
4556   __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4557   __ Cmp(receiver_map, cached_map);
4558   __ B(ne, &prepare_next);
4559   // Is it a transitioning store?
4560   __ Ldr(too_far, MemOperand(pointer_reg, kPointerSize));
4561   __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
4562   __ B(ne, &transition_call);
4563 
4564   __ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
4565   __ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag);
4566   __ Jump(pointer_reg);
4567 
4568   __ Bind(&transition_call);
4569   __ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
4570   __ JumpIfSmi(too_far, miss);
4571 
4572   __ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
4573   // Load the map into the correct register.
4574   DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
4575   __ mov(feedback, too_far);
4576   __ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
4577   __ Jump(receiver_map);
4578 
4579   __ Bind(&prepare_next);
4580   __ Add(pointer_reg, pointer_reg, kPointerSize * 3);
4581   __ Cmp(pointer_reg, too_far);
4582   __ B(lt, &next_loop);
4583 
4584   // We exhausted our array of map handler pairs.
4585   __ jmp(miss);
4586 }
4587 
4588 
GenerateImpl(MacroAssembler * masm,bool in_frame)4589 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4590   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // x1
4591   Register key = VectorStoreICDescriptor::NameRegister();           // x2
4592   Register vector = VectorStoreICDescriptor::VectorRegister();      // x3
4593   Register slot = VectorStoreICDescriptor::SlotRegister();          // x4
4594   DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0));          // x0
4595   Register feedback = x5;
4596   Register receiver_map = x6;
4597   Register scratch1 = x7;
4598 
4599   __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
4600   __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4601 
4602   // Try to quickly handle the monomorphic case without knowing for sure
4603   // if we have a weak cell in feedback. We do know it's safe to look
4604   // at WeakCell::kValueOffset.
4605   Label try_array, load_smi_map, compare_map;
4606   Label not_array, miss;
4607   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4608                         scratch1, &compare_map, &load_smi_map, &try_array);
4609 
4610   __ Bind(&try_array);
4611   // Is it a fixed array?
4612   __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4613   __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
4614 
4615   // We have a polymorphic element handler.
4616   Label try_poly_name;
4617   HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss);
4618 
4619   __ Bind(&not_array);
4620   // Is it generic?
4621   __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
4622                    &try_poly_name);
4623   Handle<Code> megamorphic_stub =
4624       KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
4625   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
4626 
4627   __ Bind(&try_poly_name);
4628   // We might have a name in feedback, and a fixed array in the next slot.
4629   __ Cmp(key, feedback);
4630   __ B(ne, &miss);
4631   // If the name comparison succeeded, we know we have a fixed array with
4632   // at least one map/handler pair.
4633   __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
4634   __ Ldr(feedback,
4635          FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
4636   HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss);
4637 
4638   __ Bind(&miss);
4639   KeyedStoreIC::GenerateMiss(masm);
4640 
4641   __ Bind(&load_smi_map);
4642   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4643   __ jmp(&compare_map);
4644 }
4645 
4646 
4647 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
4648 // a "Push lr" instruction, followed by a call.
4649 static const unsigned int kProfileEntryHookCallSize =
4650     Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
4651 
4652 
MaybeCallEntryHook(MacroAssembler * masm)4653 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4654   if (masm->isolate()->function_entry_hook() != NULL) {
4655     ProfileEntryHookStub stub(masm->isolate());
4656     Assembler::BlockConstPoolScope no_const_pools(masm);
4657     DontEmitDebugCodeScope no_debug_code(masm);
4658     Label entry_hook_call_start;
4659     __ Bind(&entry_hook_call_start);
4660     __ Push(lr);
4661     __ CallStub(&stub);
4662     DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
4663            kProfileEntryHookCallSize);
4664 
4665     __ Pop(lr);
4666   }
4667 }
4668 
4669 
Generate(MacroAssembler * masm)4670 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4671   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
4672 
4673   // Save all kCallerSaved registers (including lr), since this can be called
4674   // from anywhere.
4675   // TODO(jbramley): What about FP registers?
4676   __ PushCPURegList(kCallerSaved);
4677   DCHECK(kCallerSaved.IncludesAliasOf(lr));
4678   const int kNumSavedRegs = kCallerSaved.Count();
4679 
4680   // Compute the function's address as the first argument.
4681   __ Sub(x0, lr, kProfileEntryHookCallSize);
4682 
4683 #if V8_HOST_ARCH_ARM64
4684   uintptr_t entry_hook =
4685       reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
4686   __ Mov(x10, entry_hook);
4687 #else
4688   // Under the simulator we need to indirect the entry hook through a trampoline
4689   // function at a known address.
4690   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4691   __ Mov(x10, Operand(ExternalReference(&dispatcher,
4692                                         ExternalReference::BUILTIN_CALL,
4693                                         isolate())));
4694   // It additionally takes an isolate as a third parameter
4695   __ Mov(x2, ExternalReference::isolate_address(isolate()));
4696 #endif
4697 
4698   // The caller's return address is above the saved temporaries.
4699   // Grab its location for the second argument to the hook.
4700   __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
4701 
4702   {
4703     // Create a dummy frame, as CallCFunction requires this.
4704     FrameScope frame(masm, StackFrame::MANUAL);
4705     __ CallCFunction(x10, 2, 0);
4706   }
4707 
4708   __ PopCPURegList(kCallerSaved);
4709   __ Ret();
4710 }
4711 
4712 
Generate(MacroAssembler * masm)4713 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4714   // When calling into C++ code the stack pointer must be csp.
4715   // Therefore this code must use csp for peek/poke operations when the
4716   // stub is generated. When the stub is called
4717   // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
4718   // and configure the stack pointer *before* doing the call.
4719   const Register old_stack_pointer = __ StackPointer();
4720   __ SetStackPointer(csp);
4721 
4722   // Put return address on the stack (accessible to GC through exit frame pc).
4723   __ Poke(lr, 0);
4724   // Call the C++ function.
4725   __ Blr(x10);
4726   // Return to calling code.
4727   __ Peek(lr, 0);
4728   __ AssertFPCRState();
4729   __ Ret();
4730 
4731   __ SetStackPointer(old_stack_pointer);
4732 }
4733 
GenerateCall(MacroAssembler * masm,Register target)4734 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4735                                     Register target) {
4736   // Make sure the caller configured the stack pointer (see comment in
4737   // DirectCEntryStub::Generate).
4738   DCHECK(csp.Is(__ StackPointer()));
4739 
4740   intptr_t code =
4741       reinterpret_cast<intptr_t>(GetCode().location());
4742   __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
4743   __ Mov(x10, target);
4744   // Branch to the stub.
4745   __ Blr(lr);
4746 }
4747 
4748 
4749 // Probe the name dictionary in the 'elements' register.
4750 // Jump to the 'done' label if a property with the given name is found.
4751 // Jump to the 'miss' label otherwise.
4752 //
4753 // If lookup was successful 'scratch2' will be equal to elements + 4 * index.
4754 // 'elements' and 'name' registers are preserved on miss.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register scratch1,Register scratch2)4755 void NameDictionaryLookupStub::GeneratePositiveLookup(
4756     MacroAssembler* masm,
4757     Label* miss,
4758     Label* done,
4759     Register elements,
4760     Register name,
4761     Register scratch1,
4762     Register scratch2) {
4763   DCHECK(!AreAliased(elements, name, scratch1, scratch2));
4764 
4765   // Assert that name contains a string.
4766   __ AssertName(name);
4767 
4768   // Compute the capacity mask.
4769   __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
4770   __ Sub(scratch1, scratch1, 1);
4771 
4772   // Generate an unrolled loop that performs a few probes before giving up.
4773   for (int i = 0; i < kInlinedProbes; i++) {
4774     // Compute the masked index: (hash + i + i * i) & mask.
4775     __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4776     if (i > 0) {
4777       // Add the probe offset (i + i * i) left shifted to avoid right shifting
4778       // the hash in a separate instruction. The value hash + i + i * i is right
4779       // shifted in the following and instruction.
4780       DCHECK(NameDictionary::GetProbeOffset(i) <
4781           1 << (32 - Name::kHashFieldOffset));
4782       __ Add(scratch2, scratch2, Operand(
4783           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4784     }
4785     __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
4786 
4787     // Scale the index by multiplying by the element size.
4788     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
4789     __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4790 
4791     // Check if the key is identical to the name.
4792     UseScratchRegisterScope temps(masm);
4793     Register scratch3 = temps.AcquireX();
4794     __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
4795     __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
4796     __ Cmp(name, scratch3);
4797     __ B(eq, done);
4798   }
4799 
4800   // The inlined probes didn't find the entry.
4801   // Call the complete stub to scan the whole dictionary.
4802 
4803   CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
4804   spill_list.Combine(lr);
4805   spill_list.Remove(scratch1);
4806   spill_list.Remove(scratch2);
4807 
4808   __ PushCPURegList(spill_list);
4809 
4810   if (name.is(x0)) {
4811     DCHECK(!elements.is(x1));
4812     __ Mov(x1, name);
4813     __ Mov(x0, elements);
4814   } else {
4815     __ Mov(x0, elements);
4816     __ Mov(x1, name);
4817   }
4818 
4819   Label not_found;
4820   NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4821   __ CallStub(&stub);
4822   __ Cbz(x0, &not_found);
4823   __ Mov(scratch2, x2);  // Move entry index into scratch2.
4824   __ PopCPURegList(spill_list);
4825   __ B(done);
4826 
4827   __ Bind(&not_found);
4828   __ PopCPURegList(spill_list);
4829   __ B(miss);
4830 }
4831 
4832 
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<Name> name,Register scratch0)4833 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4834                                                       Label* miss,
4835                                                       Label* done,
4836                                                       Register receiver,
4837                                                       Register properties,
4838                                                       Handle<Name> name,
4839                                                       Register scratch0) {
4840   DCHECK(!AreAliased(receiver, properties, scratch0));
4841   DCHECK(name->IsUniqueName());
4842   // If names of slots in range from 1 to kProbes - 1 for the hash value are
4843   // not equal to the name and kProbes-th slot is not used (its name is the
4844   // undefined value), it guarantees the hash table doesn't contain the
4845   // property. It's true even if some slots represent deleted properties
4846   // (their names are the hole value).
4847   for (int i = 0; i < kInlinedProbes; i++) {
4848     // scratch0 points to properties hash.
4849     // Compute the masked index: (hash + i + i * i) & mask.
4850     Register index = scratch0;
4851     // Capacity is smi 2^n.
4852     __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
4853     __ Sub(index, index, 1);
4854     __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
4855 
4856     // Scale the index by multiplying by the entry size.
4857     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
4858     __ Add(index, index, Operand(index, LSL, 1));  // index *= 3.
4859 
4860     Register entity_name = scratch0;
4861     // Having undefined at this place means the name is not contained.
4862     Register tmp = index;
4863     __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
4864     __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4865 
4866     __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
4867 
4868     // Stop if found the property.
4869     __ Cmp(entity_name, Operand(name));
4870     __ B(eq, miss);
4871 
4872     Label good;
4873     __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
4874 
4875     // Check if the entry name is not a unique name.
4876     __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4877     __ Ldrb(entity_name,
4878             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4879     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
4880     __ Bind(&good);
4881   }
4882 
4883   CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
4884   spill_list.Combine(lr);
4885   spill_list.Remove(scratch0);  // Scratch registers don't need to be preserved.
4886 
4887   __ PushCPURegList(spill_list);
4888 
4889   __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4890   __ Mov(x1, Operand(name));
4891   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4892   __ CallStub(&stub);
4893   // Move stub return value to scratch0. Note that scratch0 is not included in
4894   // spill_list and won't be clobbered by PopCPURegList.
4895   __ Mov(scratch0, x0);
4896   __ PopCPURegList(spill_list);
4897 
4898   __ Cbz(scratch0, done);
4899   __ B(miss);
4900 }
4901 
4902 
Generate(MacroAssembler * masm)4903 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4904   // This stub overrides SometimesSetsUpAFrame() to return false. That means
4905   // we cannot call anything that could cause a GC from this stub.
4906   //
4907   // Arguments are in x0 and x1:
4908   //   x0: property dictionary.
4909   //   x1: the name of the property we are looking for.
4910   //
4911   // Return value is in x0 and is zero if lookup failed, non zero otherwise.
4912   // If the lookup is successful, x2 will contains the index of the entry.
4913 
4914   Register result = x0;
4915   Register dictionary = x0;
4916   Register key = x1;
4917   Register index = x2;
4918   Register mask = x3;
4919   Register hash = x4;
4920   Register undefined = x5;
4921   Register entry_key = x6;
4922 
4923   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4924 
4925   __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
4926   __ Sub(mask, mask, 1);
4927 
4928   __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4929   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4930 
4931   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4932     // Compute the masked index: (hash + i + i * i) & mask.
4933     // Capacity is smi 2^n.
4934     if (i > 0) {
4935       // Add the probe offset (i + i * i) left shifted to avoid right shifting
4936       // the hash in a separate instruction. The value hash + i + i * i is right
4937       // shifted in the following and instruction.
4938       DCHECK(NameDictionary::GetProbeOffset(i) <
4939              1 << (32 - Name::kHashFieldOffset));
4940       __ Add(index, hash,
4941              NameDictionary::GetProbeOffset(i) << Name::kHashShift);
4942     } else {
4943       __ Mov(index, hash);
4944     }
4945     __ And(index, mask, Operand(index, LSR, Name::kHashShift));
4946 
4947     // Scale the index by multiplying by the entry size.
4948     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
4949     __ Add(index, index, Operand(index, LSL, 1));  // index *= 3.
4950 
4951     __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
4952     __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
4953 
4954     // Having undefined at this place means the name is not contained.
4955     __ Cmp(entry_key, undefined);
4956     __ B(eq, &not_in_dictionary);
4957 
4958     // Stop if found the property.
4959     __ Cmp(entry_key, key);
4960     __ B(eq, &in_dictionary);
4961 
4962     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4963       // Check if the entry name is not a unique name.
4964       __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4965       __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4966       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4967     }
4968   }
4969 
4970   __ Bind(&maybe_in_dictionary);
4971   // If we are doing negative lookup then probing failure should be
4972   // treated as a lookup success. For positive lookup, probing failure
4973   // should be treated as lookup failure.
4974   if (mode() == POSITIVE_LOOKUP) {
4975     __ Mov(result, 0);
4976     __ Ret();
4977   }
4978 
4979   __ Bind(&in_dictionary);
4980   __ Mov(result, 1);
4981   __ Ret();
4982 
4983   __ Bind(&not_in_dictionary);
4984   __ Mov(result, 0);
4985   __ Ret();
4986 }
4987 
4988 
4989 template<class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)4990 static void CreateArrayDispatch(MacroAssembler* masm,
4991                                 AllocationSiteOverrideMode mode) {
4992   ASM_LOCATION("CreateArrayDispatch");
4993   if (mode == DISABLE_ALLOCATION_SITES) {
4994     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4995      __ TailCallStub(&stub);
4996 
4997   } else if (mode == DONT_OVERRIDE) {
4998     Register kind = x3;
4999     int last_index =
5000         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
5001     for (int i = 0; i <= last_index; ++i) {
5002       Label next;
5003       ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
5004       // TODO(jbramley): Is this the best way to handle this? Can we make the
5005       // tail calls conditional, rather than hopping over each one?
5006       __ CompareAndBranch(kind, candidate_kind, ne, &next);
5007       T stub(masm->isolate(), candidate_kind);
5008       __ TailCallStub(&stub);
5009       __ Bind(&next);
5010     }
5011 
5012     // If we reached this point there is a problem.
5013     __ Abort(kUnexpectedElementsKindInArrayConstructor);
5014 
5015   } else {
5016     UNREACHABLE();
5017   }
5018 }
5019 
5020 
5021 // TODO(jbramley): If this needs to be a special case, make it a proper template
5022 // specialization, and not a separate function.
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)5023 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5024                                            AllocationSiteOverrideMode mode) {
5025   ASM_LOCATION("CreateArrayDispatchOneArgument");
5026   // x0 - argc
5027   // x1 - constructor?
5028   // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
5029   // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5030   // sp[0] - last argument
5031 
5032   Register allocation_site = x2;
5033   Register kind = x3;
5034 
5035   Label normal_sequence;
5036   if (mode == DONT_OVERRIDE) {
5037     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
5038     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
5039     STATIC_ASSERT(FAST_ELEMENTS == 2);
5040     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
5041     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
5042     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
5043 
5044     // Is the low bit set? If so, the array is holey.
5045     __ Tbnz(kind, 0, &normal_sequence);
5046   }
5047 
5048   // Look at the last argument.
5049   // TODO(jbramley): What does a 0 argument represent?
5050   __ Peek(x10, 0);
5051   __ Cbz(x10, &normal_sequence);
5052 
5053   if (mode == DISABLE_ALLOCATION_SITES) {
5054     ElementsKind initial = GetInitialFastElementsKind();
5055     ElementsKind holey_initial = GetHoleyElementsKind(initial);
5056 
5057     ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
5058                                                   holey_initial,
5059                                                   DISABLE_ALLOCATION_SITES);
5060     __ TailCallStub(&stub_holey);
5061 
5062     __ Bind(&normal_sequence);
5063     ArraySingleArgumentConstructorStub stub(masm->isolate(),
5064                                             initial,
5065                                             DISABLE_ALLOCATION_SITES);
5066     __ TailCallStub(&stub);
5067   } else if (mode == DONT_OVERRIDE) {
5068     // We are going to create a holey array, but our kind is non-holey.
5069     // Fix kind and retry (only if we have an allocation site in the slot).
5070     __ Orr(kind, kind, 1);
5071 
5072     if (FLAG_debug_code) {
5073       __ Ldr(x10, FieldMemOperand(allocation_site, 0));
5074       __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
5075                        &normal_sequence);
5076       __ Assert(eq, kExpectedAllocationSite);
5077     }
5078 
5079     // Save the resulting elements kind in type info. We can't just store 'kind'
5080     // in the AllocationSite::transition_info field because elements kind is
5081     // restricted to a portion of the field; upper bits need to be left alone.
5082     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5083     __ Ldr(x11, FieldMemOperand(allocation_site,
5084                                 AllocationSite::kTransitionInfoOffset));
5085     __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
5086     __ Str(x11, FieldMemOperand(allocation_site,
5087                                 AllocationSite::kTransitionInfoOffset));
5088 
5089     __ Bind(&normal_sequence);
5090     int last_index =
5091         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
5092     for (int i = 0; i <= last_index; ++i) {
5093       Label next;
5094       ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
5095       __ CompareAndBranch(kind, candidate_kind, ne, &next);
5096       ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
5097       __ TailCallStub(&stub);
5098       __ Bind(&next);
5099     }
5100 
5101     // If we reached this point there is a problem.
5102     __ Abort(kUnexpectedElementsKindInArrayConstructor);
5103   } else {
5104     UNREACHABLE();
5105   }
5106 }
5107 
5108 
5109 template<class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)5110 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5111   int to_index = GetSequenceIndexFromFastElementsKind(
5112       TERMINAL_FAST_ELEMENTS_KIND);
5113   for (int i = 0; i <= to_index; ++i) {
5114     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5115     T stub(isolate, kind);
5116     stub.GetCode();
5117     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
5118       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
5119       stub1.GetCode();
5120     }
5121   }
5122 }
5123 
5124 
GenerateStubsAheadOfTime(Isolate * isolate)5125 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
5126   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5127       isolate);
5128   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5129       isolate);
5130   ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5131       isolate);
5132 }
5133 
5134 
GenerateStubsAheadOfTime(Isolate * isolate)5135 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
5136     Isolate* isolate) {
5137   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
5138   for (int i = 0; i < 2; i++) {
5139     // For internal arrays we only need a few things
5140     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
5141     stubh1.GetCode();
5142     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
5143     stubh2.GetCode();
5144     InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
5145     stubh3.GetCode();
5146   }
5147 }
5148 
5149 
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)5150 void ArrayConstructorStub::GenerateDispatchToArrayStub(
5151     MacroAssembler* masm,
5152     AllocationSiteOverrideMode mode) {
5153   Register argc = x0;
5154   if (argument_count() == ANY) {
5155     Label zero_case, n_case;
5156     __ Cbz(argc, &zero_case);
5157     __ Cmp(argc, 1);
5158     __ B(ne, &n_case);
5159 
5160     // One argument.
5161     CreateArrayDispatchOneArgument(masm, mode);
5162 
5163     __ Bind(&zero_case);
5164     // No arguments.
5165     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5166 
5167     __ Bind(&n_case);
5168     // N arguments.
5169     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5170 
5171   } else if (argument_count() == NONE) {
5172     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5173   } else if (argument_count() == ONE) {
5174     CreateArrayDispatchOneArgument(masm, mode);
5175   } else if (argument_count() == MORE_THAN_ONE) {
5176     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5177   } else {
5178     UNREACHABLE();
5179   }
5180 }
5181 
5182 
Generate(MacroAssembler * masm)5183 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5184   ASM_LOCATION("ArrayConstructorStub::Generate");
5185   // ----------- S t a t e -------------
5186   //  -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
5187   //  -- x1 : constructor
5188   //  -- x2 : AllocationSite or undefined
5189   //  -- x3 : new target
5190   //  -- sp[0] : last argument
5191   // -----------------------------------
5192   Register constructor = x1;
5193   Register allocation_site = x2;
5194   Register new_target = x3;
5195 
5196   if (FLAG_debug_code) {
5197     // The array construct code is only set for the global and natives
5198     // builtin Array functions which always have maps.
5199 
5200     Label unexpected_map, map_ok;
5201     // Initial map for the builtin Array function should be a map.
5202     __ Ldr(x10, FieldMemOperand(constructor,
5203                                 JSFunction::kPrototypeOrInitialMapOffset));
5204     // Will both indicate a NULL and a Smi.
5205     __ JumpIfSmi(x10, &unexpected_map);
5206     __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
5207     __ Bind(&unexpected_map);
5208     __ Abort(kUnexpectedInitialMapForArrayFunction);
5209     __ Bind(&map_ok);
5210 
5211     // We should either have undefined in the allocation_site register or a
5212     // valid AllocationSite.
5213     __ AssertUndefinedOrAllocationSite(allocation_site, x10);
5214   }
5215 
5216   // Enter the context of the Array function.
5217   __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
5218 
5219   Label subclassing;
5220   __ Cmp(new_target, constructor);
5221   __ B(ne, &subclassing);
5222 
5223   Register kind = x3;
5224   Label no_info;
5225   // Get the elements kind and case on that.
5226   __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
5227 
5228   __ Ldrsw(kind,
5229            UntagSmiFieldMemOperand(allocation_site,
5230                                    AllocationSite::kTransitionInfoOffset));
5231   __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
5232   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5233 
5234   __ Bind(&no_info);
5235   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5236 
5237   // Subclassing support.
5238   __ Bind(&subclassing);
5239   switch (argument_count()) {
5240     case ANY:
5241     case MORE_THAN_ONE:
5242       __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
5243       __ Add(x0, x0, Operand(3));
5244       break;
5245     case NONE:
5246       __ Poke(constructor, 0 * kPointerSize);
5247       __ Mov(x0, Operand(3));
5248       break;
5249     case ONE:
5250       __ Poke(constructor, 1 * kPointerSize);
5251       __ Mov(x0, Operand(4));
5252       break;
5253   }
5254   __ Push(new_target, allocation_site);
5255   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
5256 }
5257 
5258 
GenerateCase(MacroAssembler * masm,ElementsKind kind)5259 void InternalArrayConstructorStub::GenerateCase(
5260     MacroAssembler* masm, ElementsKind kind) {
5261   Label zero_case, n_case;
5262   Register argc = x0;
5263 
5264   __ Cbz(argc, &zero_case);
5265   __ CompareAndBranch(argc, 1, ne, &n_case);
5266 
5267   // One argument.
5268   if (IsFastPackedElementsKind(kind)) {
5269     Label packed_case;
5270 
5271     // We might need to create a holey array; look at the first argument.
5272     __ Peek(x10, 0);
5273     __ Cbz(x10, &packed_case);
5274 
5275     InternalArraySingleArgumentConstructorStub
5276         stub1_holey(isolate(), GetHoleyElementsKind(kind));
5277     __ TailCallStub(&stub1_holey);
5278 
5279     __ Bind(&packed_case);
5280   }
5281   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
5282   __ TailCallStub(&stub1);
5283 
5284   __ Bind(&zero_case);
5285   // No arguments.
5286   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
5287   __ TailCallStub(&stub0);
5288 
5289   __ Bind(&n_case);
5290   // N arguments.
5291   InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
5292   __ TailCallStub(&stubN);
5293 }
5294 
5295 
Generate(MacroAssembler * masm)5296 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5297   // ----------- S t a t e -------------
5298   //  -- x0 : argc
5299   //  -- x1 : constructor
5300   //  -- sp[0] : return address
5301   //  -- sp[4] : last argument
5302   // -----------------------------------
5303 
5304   Register constructor = x1;
5305 
5306   if (FLAG_debug_code) {
5307     // The array construct code is only set for the global and natives
5308     // builtin Array functions which always have maps.
5309 
5310     Label unexpected_map, map_ok;
5311     // Initial map for the builtin Array function should be a map.
5312     __ Ldr(x10, FieldMemOperand(constructor,
5313                                 JSFunction::kPrototypeOrInitialMapOffset));
5314     // Will both indicate a NULL and a Smi.
5315     __ JumpIfSmi(x10, &unexpected_map);
5316     __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
5317     __ Bind(&unexpected_map);
5318     __ Abort(kUnexpectedInitialMapForArrayFunction);
5319     __ Bind(&map_ok);
5320   }
5321 
5322   Register kind = w3;
5323   // Figure out the right elements kind
5324   __ Ldr(x10, FieldMemOperand(constructor,
5325                               JSFunction::kPrototypeOrInitialMapOffset));
5326 
5327   // Retrieve elements_kind from map.
5328   __ LoadElementsKindFromMap(kind, x10);
5329 
5330   if (FLAG_debug_code) {
5331     Label done;
5332     __ Cmp(x3, FAST_ELEMENTS);
5333     __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
5334     __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
5335   }
5336 
5337   Label fast_elements_case;
5338   __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
5339   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5340 
5341   __ Bind(&fast_elements_case);
5342   GenerateCase(masm, FAST_ELEMENTS);
5343 }
5344 
5345 
Generate(MacroAssembler * masm)5346 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
5347   Register context = cp;
5348   Register result = x0;
5349   Register slot = x2;
5350   Label slow_case;
5351 
5352   // Go up the context chain to the script context.
5353   for (int i = 0; i < depth(); ++i) {
5354     __ Ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
5355     context = result;
5356   }
5357 
5358   // Load the PropertyCell value at the specified slot.
5359   __ Add(result, context, Operand(slot, LSL, kPointerSizeLog2));
5360   __ Ldr(result, ContextMemOperand(result));
5361   __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
5362 
5363   // If the result is not the_hole, return. Otherwise, handle in the runtime.
5364   __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &slow_case);
5365   __ Ret();
5366 
5367   // Fallback to runtime.
5368   __ Bind(&slow_case);
5369   __ SmiTag(slot);
5370   __ Push(slot);
5371   __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
5372 }
5373 
5374 
Generate(MacroAssembler * masm)5375 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
5376   Register context = cp;
5377   Register value = x0;
5378   Register slot = x2;
5379   Register context_temp = x10;
5380   Register cell = x10;
5381   Register cell_details = x11;
5382   Register cell_value = x12;
5383   Register cell_value_map = x13;
5384   Register value_map = x14;
5385   Label fast_heapobject_case, fast_smi_case, slow_case;
5386 
5387   if (FLAG_debug_code) {
5388     __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
5389     __ Check(ne, kUnexpectedValue);
5390   }
5391 
5392   // Go up the context chain to the script context.
5393   for (int i = 0; i < depth(); i++) {
5394     __ Ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
5395     context = context_temp;
5396   }
5397 
5398   // Load the PropertyCell at the specified slot.
5399   __ Add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
5400   __ Ldr(cell, ContextMemOperand(cell));
5401 
5402   // Load PropertyDetails for the cell (actually only the cell_type and kind).
5403   __ Ldr(cell_details,
5404          UntagSmiFieldMemOperand(cell, PropertyCell::kDetailsOffset));
5405   __ And(cell_details, cell_details,
5406          PropertyDetails::PropertyCellTypeField::kMask |
5407              PropertyDetails::KindField::kMask |
5408              PropertyDetails::kAttributesReadOnlyMask);
5409 
5410   // Check if PropertyCell holds mutable data.
5411   Label not_mutable_data;
5412   __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
5413                            PropertyCellType::kMutable) |
5414                            PropertyDetails::KindField::encode(kData));
5415   __ B(ne, &not_mutable_data);
5416   __ JumpIfSmi(value, &fast_smi_case);
5417   __ Bind(&fast_heapobject_case);
5418   __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
5419   // RecordWriteField clobbers the value register, so we copy it before the
5420   // call.
5421   __ Mov(x11, value);
5422   __ RecordWriteField(cell, PropertyCell::kValueOffset, x11, x12,
5423                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
5424                       OMIT_SMI_CHECK);
5425   __ Ret();
5426 
5427   __ Bind(&not_mutable_data);
5428   // Check if PropertyCell value matches the new value (relevant for Constant,
5429   // ConstantType and Undefined cells).
5430   Label not_same_value;
5431   __ Ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
5432   __ Cmp(cell_value, value);
5433   __ B(ne, &not_same_value);
5434 
5435   // Make sure the PropertyCell is not marked READ_ONLY.
5436   __ Tst(cell_details, PropertyDetails::kAttributesReadOnlyMask);
5437   __ B(ne, &slow_case);
5438 
5439   if (FLAG_debug_code) {
5440     Label done;
5441     // This can only be true for Constant, ConstantType and Undefined cells,
5442     // because we never store the_hole via this stub.
5443     __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
5444                              PropertyCellType::kConstant) |
5445                              PropertyDetails::KindField::encode(kData));
5446     __ B(eq, &done);
5447     __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
5448                              PropertyCellType::kConstantType) |
5449                              PropertyDetails::KindField::encode(kData));
5450     __ B(eq, &done);
5451     __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
5452                              PropertyCellType::kUndefined) |
5453                              PropertyDetails::KindField::encode(kData));
5454     __ Check(eq, kUnexpectedValue);
5455     __ Bind(&done);
5456   }
5457   __ Ret();
5458   __ Bind(&not_same_value);
5459 
5460   // Check if PropertyCell contains data with constant type (and is not
5461   // READ_ONLY).
5462   __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
5463                            PropertyCellType::kConstantType) |
5464                            PropertyDetails::KindField::encode(kData));
5465   __ B(ne, &slow_case);
5466 
5467   // Now either both old and new values must be smis or both must be heap
5468   // objects with same map.
5469   Label value_is_heap_object;
5470   __ JumpIfNotSmi(value, &value_is_heap_object);
5471   __ JumpIfNotSmi(cell_value, &slow_case);
5472   // Old and new values are smis, no need for a write barrier here.
5473   __ Bind(&fast_smi_case);
5474   __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
5475   __ Ret();
5476 
5477   __ Bind(&value_is_heap_object);
5478   __ JumpIfSmi(cell_value, &slow_case);
5479 
5480   __ Ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
5481   __ Ldr(value_map, FieldMemOperand(value, HeapObject::kMapOffset));
5482   __ Cmp(cell_value_map, value_map);
5483   __ B(eq, &fast_heapobject_case);
5484 
5485   // Fall back to the runtime.
5486   __ Bind(&slow_case);
5487   __ SmiTag(slot);
5488   __ Push(slot, value);
5489   __ TailCallRuntime(is_strict(language_mode())
5490                          ? Runtime::kStoreGlobalViaContext_Strict
5491                          : Runtime::kStoreGlobalViaContext_Sloppy);
5492 }
5493 
5494 
5495 // The number of register that CallApiFunctionAndReturn will need to save on
5496 // the stack. The space for these registers need to be allocated in the
5497 // ExitFrame before calling CallApiFunctionAndReturn.
5498 static const int kCallApiFunctionSpillSpace = 4;
5499 
5500 
AddressOffset(ExternalReference ref0,ExternalReference ref1)5501 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
5502   return static_cast<int>(ref0.address() - ref1.address());
5503 }
5504 
5505 
5506 // Calls an API function. Allocates HandleScope, extracts returned value
5507 // from handle and propagates exceptions.
5508 // 'stack_space' is the space to be unwound on exit (includes the call JS
5509 // arguments space and the additional space allocated for the fast call).
5510 // 'spill_offset' is the offset from the stack pointer where
5511 // CallApiFunctionAndReturn can spill registers.
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,int spill_offset,MemOperand return_value_operand,MemOperand * context_restore_operand)5512 static void CallApiFunctionAndReturn(
5513     MacroAssembler* masm, Register function_address,
5514     ExternalReference thunk_ref, int stack_space,
5515     MemOperand* stack_space_operand, int spill_offset,
5516     MemOperand return_value_operand, MemOperand* context_restore_operand) {
5517   ASM_LOCATION("CallApiFunctionAndReturn");
5518   Isolate* isolate = masm->isolate();
5519   ExternalReference next_address =
5520       ExternalReference::handle_scope_next_address(isolate);
5521   const int kNextOffset = 0;
5522   const int kLimitOffset = AddressOffset(
5523       ExternalReference::handle_scope_limit_address(isolate), next_address);
5524   const int kLevelOffset = AddressOffset(
5525       ExternalReference::handle_scope_level_address(isolate), next_address);
5526 
5527   DCHECK(function_address.is(x1) || function_address.is(x2));
5528 
5529   Label profiler_disabled;
5530   Label end_profiler_check;
5531   __ Mov(x10, ExternalReference::is_profiling_address(isolate));
5532   __ Ldrb(w10, MemOperand(x10));
5533   __ Cbz(w10, &profiler_disabled);
5534   __ Mov(x3, thunk_ref);
5535   __ B(&end_profiler_check);
5536 
5537   __ Bind(&profiler_disabled);
5538   __ Mov(x3, function_address);
5539   __ Bind(&end_profiler_check);
5540 
5541   // Save the callee-save registers we are going to use.
5542   // TODO(all): Is this necessary? ARM doesn't do it.
5543   STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
5544   __ Poke(x19, (spill_offset + 0) * kXRegSize);
5545   __ Poke(x20, (spill_offset + 1) * kXRegSize);
5546   __ Poke(x21, (spill_offset + 2) * kXRegSize);
5547   __ Poke(x22, (spill_offset + 3) * kXRegSize);
5548 
5549   // Allocate HandleScope in callee-save registers.
5550   // We will need to restore the HandleScope after the call to the API function,
5551   // by allocating it in callee-save registers they will be preserved by C code.
5552   Register handle_scope_base = x22;
5553   Register next_address_reg = x19;
5554   Register limit_reg = x20;
5555   Register level_reg = w21;
5556 
5557   __ Mov(handle_scope_base, next_address);
5558   __ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
5559   __ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
5560   __ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
5561   __ Add(level_reg, level_reg, 1);
5562   __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
5563 
5564   if (FLAG_log_timer_events) {
5565     FrameScope frame(masm, StackFrame::MANUAL);
5566     __ PushSafepointRegisters();
5567     __ Mov(x0, ExternalReference::isolate_address(isolate));
5568     __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
5569                      1);
5570     __ PopSafepointRegisters();
5571   }
5572 
5573   // Native call returns to the DirectCEntry stub which redirects to the
5574   // return address pushed on stack (could have moved after GC).
5575   // DirectCEntry stub itself is generated early and never moves.
5576   DirectCEntryStub stub(isolate);
5577   stub.GenerateCall(masm, x3);
5578 
5579   if (FLAG_log_timer_events) {
5580     FrameScope frame(masm, StackFrame::MANUAL);
5581     __ PushSafepointRegisters();
5582     __ Mov(x0, ExternalReference::isolate_address(isolate));
5583     __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5584                      1);
5585     __ PopSafepointRegisters();
5586   }
5587 
5588   Label promote_scheduled_exception;
5589   Label delete_allocated_handles;
5590   Label leave_exit_frame;
5591   Label return_value_loaded;
5592 
5593   // Load value from ReturnValue.
5594   __ Ldr(x0, return_value_operand);
5595   __ Bind(&return_value_loaded);
5596   // No more valid handles (the result handle was the last one). Restore
5597   // previous handle scope.
5598   __ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
5599   if (__ emit_debug_code()) {
5600     __ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
5601     __ Cmp(w1, level_reg);
5602     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
5603   }
5604   __ Sub(level_reg, level_reg, 1);
5605   __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
5606   __ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
5607   __ Cmp(limit_reg, x1);
5608   __ B(ne, &delete_allocated_handles);
5609 
5610   // Leave the API exit frame.
5611   __ Bind(&leave_exit_frame);
5612   // Restore callee-saved registers.
5613   __ Peek(x19, (spill_offset + 0) * kXRegSize);
5614   __ Peek(x20, (spill_offset + 1) * kXRegSize);
5615   __ Peek(x21, (spill_offset + 2) * kXRegSize);
5616   __ Peek(x22, (spill_offset + 3) * kXRegSize);
5617 
5618   bool restore_context = context_restore_operand != NULL;
5619   if (restore_context) {
5620     __ Ldr(cp, *context_restore_operand);
5621   }
5622 
5623   if (stack_space_operand != NULL) {
5624     __ Ldr(w2, *stack_space_operand);
5625   }
5626 
5627   __ LeaveExitFrame(false, x1, !restore_context);
5628 
5629   // Check if the function scheduled an exception.
5630   __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
5631   __ Ldr(x5, MemOperand(x5));
5632   __ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
5633                    &promote_scheduled_exception);
5634 
5635   if (stack_space_operand != NULL) {
5636     __ Drop(x2, 1);
5637   } else {
5638     __ Drop(stack_space);
5639   }
5640   __ Ret();
5641 
5642   // Re-throw by promoting a scheduled exception.
5643   __ Bind(&promote_scheduled_exception);
5644   __ TailCallRuntime(Runtime::kPromoteScheduledException);
5645 
5646   // HandleScope limit has changed. Delete allocated extensions.
5647   __ Bind(&delete_allocated_handles);
5648   __ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
5649   // Save the return value in a callee-save register.
5650   Register saved_result = x19;
5651   __ Mov(saved_result, x0);
5652   __ Mov(x0, ExternalReference::isolate_address(isolate));
5653   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
5654                    1);
5655   __ Mov(x0, saved_result);
5656   __ B(&leave_exit_frame);
5657 }
5658 
5659 
CallApiFunctionStubHelper(MacroAssembler * masm,const ParameterCount & argc,bool return_first_arg,bool call_data_undefined)5660 static void CallApiFunctionStubHelper(MacroAssembler* masm,
5661                                       const ParameterCount& argc,
5662                                       bool return_first_arg,
5663                                       bool call_data_undefined) {
5664   // ----------- S t a t e -------------
5665   //  -- x0                  : callee
5666   //  -- x4                  : call_data
5667   //  -- x2                  : holder
5668   //  -- x1                  : api_function_address
5669   //  -- x3                  : number of arguments if argc is a register
5670   //  -- cp                  : context
5671   //  --
5672   //  -- sp[0]               : last argument
5673   //  -- ...
5674   //  -- sp[(argc - 1) * 8]  : first argument
5675   //  -- sp[argc * 8]        : receiver
5676   // -----------------------------------
5677 
5678   Register callee = x0;
5679   Register call_data = x4;
5680   Register holder = x2;
5681   Register api_function_address = x1;
5682   Register context = cp;
5683 
5684   typedef FunctionCallbackArguments FCA;
5685 
5686   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5687   STATIC_ASSERT(FCA::kCalleeIndex == 5);
5688   STATIC_ASSERT(FCA::kDataIndex == 4);
5689   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5690   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5691   STATIC_ASSERT(FCA::kIsolateIndex == 1);
5692   STATIC_ASSERT(FCA::kHolderIndex == 0);
5693   STATIC_ASSERT(FCA::kArgsLength == 7);
5694 
5695   DCHECK(argc.is_immediate() || x3.is(argc.reg()));
5696 
5697   // FunctionCallbackArguments: context, callee and call data.
5698   __ Push(context, callee, call_data);
5699 
5700   // Load context from callee
5701   __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5702 
5703   if (!call_data_undefined) {
5704     __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
5705   }
5706   Register isolate_reg = x5;
5707   __ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
5708 
5709   // FunctionCallbackArguments:
5710   //    return value, return value default, isolate, holder.
5711   __ Push(call_data, call_data, isolate_reg, holder);
5712 
5713   // Prepare arguments.
5714   Register args = x6;
5715   __ Mov(args, masm->StackPointer());
5716 
5717   // Allocate the v8::Arguments structure in the arguments' space, since it's
5718   // not controlled by GC.
5719   const int kApiStackSpace = 4;
5720 
5721   // Allocate space for CallApiFunctionAndReturn can store some scratch
5722   // registeres on the stack.
5723   const int kCallApiFunctionSpillSpace = 4;
5724 
5725   FrameScope frame_scope(masm, StackFrame::MANUAL);
5726   __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5727 
5728   DCHECK(!AreAliased(x0, api_function_address));
5729   // x0 = FunctionCallbackInfo&
5730   // Arguments is after the return address.
5731   __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
5732   if (argc.is_immediate()) {
5733     // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
5734     __ Add(x10, args,
5735            Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
5736     __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
5737     // FunctionCallbackInfo::length_ = argc and
5738     // FunctionCallbackInfo::is_construct_call = 0
5739     __ Mov(x10, argc.immediate());
5740     __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
5741   } else {
5742     // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
5743     __ Add(x10, args, Operand(argc.reg(), LSL, kPointerSizeLog2));
5744     __ Add(x10, x10, (FCA::kArgsLength - 1) * kPointerSize);
5745     __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
5746     // FunctionCallbackInfo::length_ = argc and
5747     // FunctionCallbackInfo::is_construct_call
5748     __ Add(x10, argc.reg(), FCA::kArgsLength + 1);
5749     __ Mov(x10, Operand(x10, LSL, kPointerSizeLog2));
5750     __ Stp(argc.reg(), x10, MemOperand(x0, 2 * kPointerSize));
5751   }
5752 
5753   ExternalReference thunk_ref =
5754       ExternalReference::invoke_function_callback(masm->isolate());
5755 
5756   AllowExternalCallThatCantCauseGC scope(masm);
5757   MemOperand context_restore_operand(
5758       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5759   // Stores return the first js argument
5760   int return_value_offset = 0;
5761   if (return_first_arg) {
5762     return_value_offset = 2 + FCA::kArgsLength;
5763   } else {
5764     return_value_offset = 2 + FCA::kReturnValueOffset;
5765   }
5766   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5767   int stack_space = 0;
5768   MemOperand is_construct_call_operand =
5769       MemOperand(masm->StackPointer(), 4 * kPointerSize);
5770   MemOperand* stack_space_operand = &is_construct_call_operand;
5771   if (argc.is_immediate()) {
5772     stack_space = argc.immediate() + FCA::kArgsLength + 1;
5773     stack_space_operand = NULL;
5774   }
5775 
5776   const int spill_offset = 1 + kApiStackSpace;
5777   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
5778                            stack_space_operand, spill_offset,
5779                            return_value_operand, &context_restore_operand);
5780 }
5781 
5782 
Generate(MacroAssembler * masm)5783 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5784   bool call_data_undefined = this->call_data_undefined();
5785   CallApiFunctionStubHelper(masm, ParameterCount(x3), false,
5786                             call_data_undefined);
5787 }
5788 
5789 
Generate(MacroAssembler * masm)5790 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
5791   bool is_store = this->is_store();
5792   int argc = this->argc();
5793   bool call_data_undefined = this->call_data_undefined();
5794   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
5795                             call_data_undefined);
5796 }
5797 
5798 
Generate(MacroAssembler * masm)5799 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5800   // ----------- S t a t e -------------
5801   //  -- sp[0]                  : name
5802   //  -- sp[8 - kArgsLength*8]  : PropertyCallbackArguments object
5803   //  -- ...
5804   //  -- x2                     : api_function_address
5805   // -----------------------------------
5806 
5807   Register api_function_address = ApiGetterDescriptor::function_address();
5808   DCHECK(api_function_address.is(x2));
5809 
5810   __ Mov(x0, masm->StackPointer());  // x0 = Handle<Name>
5811   __ Add(x1, x0, 1 * kPointerSize);  // x1 = PCA
5812 
5813   const int kApiStackSpace = 1;
5814 
5815   // Allocate space for CallApiFunctionAndReturn can store some scratch
5816   // registeres on the stack.
5817   const int kCallApiFunctionSpillSpace = 4;
5818 
5819   FrameScope frame_scope(masm, StackFrame::MANUAL);
5820   __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5821 
5822   // Create PropertyAccessorInfo instance on the stack above the exit frame with
5823   // x1 (internal::Object** args_) as the data.
5824   __ Poke(x1, 1 * kPointerSize);
5825   __ Add(x1, masm->StackPointer(), 1 * kPointerSize);  // x1 = AccessorInfo&
5826 
5827   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5828 
5829   ExternalReference thunk_ref =
5830       ExternalReference::invoke_accessor_getter_callback(isolate());
5831 
5832   const int spill_offset = 1 + kApiStackSpace;
5833   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5834                            kStackUnwindSpace, NULL, spill_offset,
5835                            MemOperand(fp, 6 * kPointerSize), NULL);
5836 }
5837 
5838 
5839 #undef __
5840 
5841 }  // namespace internal
5842 }  // namespace v8
5843 
5844 #endif  // V8_TARGET_ARCH_ARM64
5845