1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_S390
6 
7 #include "src/code-stubs.h"
8 #include "src/api-arguments.h"
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/codegen.h"
12 #include "src/ic/handler-compiler.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 #include "src/isolate.h"
16 #include "src/regexp/jsregexp.h"
17 #include "src/regexp/regexp-macro-assembler.h"
18 #include "src/runtime/runtime.h"
19 #include "src/s390/code-stubs-s390.h"
20 
21 namespace v8 {
22 namespace internal {
23 
24 #define __ ACCESS_MASM(masm)
25 
Generate(MacroAssembler * masm)26 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
27   __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
28   __ StoreP(r3, MemOperand(sp, r1));
29   __ push(r3);
30   __ push(r4);
31   __ AddP(r2, r2, Operand(3));
32   __ TailCallRuntime(Runtime::kNewArray);
33 }
34 
InitializeDescriptor(CodeStubDescriptor * descriptor)35 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
36   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
37   descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
38 }
39 
InitializeDescriptor(CodeStubDescriptor * descriptor)40 void FastFunctionBindStub::InitializeDescriptor(
41     CodeStubDescriptor* descriptor) {
42   Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
43   descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
44 }
45 
46 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
47                                           Condition cond);
48 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
49                                     Register rhs, Label* lhs_not_nan,
50                                     Label* slow, bool strict);
51 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
52                                            Register rhs);
53 
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)54 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
55                                                ExternalReference miss) {
56   // Update the static counter each time a new code stub is generated.
57   isolate()->counters()->code_stubs()->Increment();
58 
59   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
60   int param_count = descriptor.GetRegisterParameterCount();
61   {
62     // Call the runtime system in a fresh internal frame.
63     FrameScope scope(masm, StackFrame::INTERNAL);
64     DCHECK(param_count == 0 ||
65            r2.is(descriptor.GetRegisterParameter(param_count - 1)));
66     // Push arguments
67     for (int i = 0; i < param_count; ++i) {
68       __ push(descriptor.GetRegisterParameter(i));
69     }
70     __ CallExternalReference(miss, param_count);
71   }
72 
73   __ Ret();
74 }
75 
Generate(MacroAssembler * masm)76 void DoubleToIStub::Generate(MacroAssembler* masm) {
77   Label out_of_range, only_low, negate, done, fastpath_done;
78   Register input_reg = source();
79   Register result_reg = destination();
80   DCHECK(is_truncating());
81 
82   int double_offset = offset();
83 
84   // Immediate values for this stub fit in instructions, so it's safe to use ip.
85   Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
86   Register scratch_low =
87       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
88   Register scratch_high =
89       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
90   DoubleRegister double_scratch = kScratchDoubleReg;
91 
92   __ push(scratch);
93   // Account for saved regs if input is sp.
94   if (input_reg.is(sp)) double_offset += kPointerSize;
95 
96   if (!skip_fastpath()) {
97     // Load double input.
98     __ LoadDouble(double_scratch, MemOperand(input_reg, double_offset));
99 
100     // Do fast-path convert from double to int.
101     __ ConvertDoubleToInt64(double_scratch,
102 #if !V8_TARGET_ARCH_S390X
103                             scratch,
104 #endif
105                             result_reg, d0);
106 
107 // Test for overflow
108 #if V8_TARGET_ARCH_S390X
109     __ TestIfInt32(result_reg, r0);
110 #else
111     __ TestIfInt32(scratch, result_reg, r0);
112 #endif
113     __ beq(&fastpath_done, Label::kNear);
114   }
115 
116   __ Push(scratch_high, scratch_low);
117   // Account for saved regs if input is sp.
118   if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
119 
120   __ LoadlW(scratch_high,
121             MemOperand(input_reg, double_offset + Register::kExponentOffset));
122   __ LoadlW(scratch_low,
123             MemOperand(input_reg, double_offset + Register::kMantissaOffset));
124 
125   __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
126   // Load scratch with exponent - 1. This is faster than loading
127   // with exponent because Bias + 1 = 1024 which is a *S390* immediate value.
128   STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
129   __ SubP(scratch, Operand(HeapNumber::kExponentBias + 1));
130   // If exponent is greater than or equal to 84, the 32 less significant
131   // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
132   // the result is 0.
133   // Compare exponent with 84 (compare exponent - 1 with 83).
134   __ CmpP(scratch, Operand(83));
135   __ bge(&out_of_range, Label::kNear);
136 
137   // If we reach this code, 31 <= exponent <= 83.
138   // So, we don't have to handle cases where 0 <= exponent <= 20 for
139   // which we would need to shift right the high part of the mantissa.
140   // Scratch contains exponent - 1.
141   // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
142   __ Load(r0, Operand(51));
143   __ SubP(scratch, r0, scratch);
144   __ CmpP(scratch, Operand::Zero());
145   __ ble(&only_low, Label::kNear);
146   // 21 <= exponent <= 51, shift scratch_low and scratch_high
147   // to generate the result.
148   __ ShiftRight(scratch_low, scratch_low, scratch);
149   // Scratch contains: 52 - exponent.
150   // We needs: exponent - 20.
151   // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
152   __ Load(r0, Operand(32));
153   __ SubP(scratch, r0, scratch);
154   __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
155   // Set the implicit 1 before the mantissa part in scratch_high.
156   STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
157   __ Load(r0, Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
158   __ ShiftLeftP(r0, r0, Operand(16));
159   __ OrP(result_reg, result_reg, r0);
160   __ ShiftLeft(r0, result_reg, scratch);
161   __ OrP(result_reg, scratch_low, r0);
162   __ b(&negate, Label::kNear);
163 
164   __ bind(&out_of_range);
165   __ mov(result_reg, Operand::Zero());
166   __ b(&done, Label::kNear);
167 
168   __ bind(&only_low);
169   // 52 <= exponent <= 83, shift only scratch_low.
170   // On entry, scratch contains: 52 - exponent.
171   __ LoadComplementRR(scratch, scratch);
172   __ ShiftLeft(result_reg, scratch_low, scratch);
173 
174   __ bind(&negate);
175   // If input was positive, scratch_high ASR 31 equals 0 and
176   // scratch_high LSR 31 equals zero.
177   // New result = (result eor 0) + 0 = result.
178   // If the input was negative, we have to negate the result.
179   // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
180   // New result = (result eor 0xffffffff) + 1 = 0 - result.
181   __ ShiftRightArith(r0, scratch_high, Operand(31));
182 #if V8_TARGET_ARCH_S390X
183   __ lgfr(r0, r0);
184   __ ShiftRightP(r0, r0, Operand(32));
185 #endif
186   __ XorP(result_reg, r0);
187   __ ShiftRight(r0, scratch_high, Operand(31));
188   __ AddP(result_reg, r0);
189 
190   __ bind(&done);
191   __ Pop(scratch_high, scratch_low);
192 
193   __ bind(&fastpath_done);
194   __ pop(scratch);
195 
196   __ Ret();
197 }
198 
199 // Handle the case where the lhs and rhs are the same object.
200 // Equality is almost reflexive (everything but NaN), so this is a test
201 // for "identity and not NaN".
EmitIdenticalObjectComparison(MacroAssembler * masm,Label * slow,Condition cond)202 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
203                                           Condition cond) {
204   Label not_identical;
205   Label heap_number, return_equal;
206   __ CmpP(r2, r3);
207   __ bne(&not_identical);
208 
209   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
210   // so we do the second best thing - test it ourselves.
211   // They are both equal and they are not both Smis so both of them are not
212   // Smis.  If it's not a heap number, then return equal.
213   if (cond == lt || cond == gt) {
214     // Call runtime on identical JSObjects.
215     __ CompareObjectType(r2, r6, r6, FIRST_JS_RECEIVER_TYPE);
216     __ bge(slow);
217     // Call runtime on identical symbols since we need to throw a TypeError.
218     __ CmpP(r6, Operand(SYMBOL_TYPE));
219     __ beq(slow);
220     // Call runtime on identical SIMD values since we must throw a TypeError.
221     __ CmpP(r6, Operand(SIMD128_VALUE_TYPE));
222     __ beq(slow);
223   } else {
224     __ CompareObjectType(r2, r6, r6, HEAP_NUMBER_TYPE);
225     __ beq(&heap_number);
226     // Comparing JS objects with <=, >= is complicated.
227     if (cond != eq) {
228       __ CmpP(r6, Operand(FIRST_JS_RECEIVER_TYPE));
229       __ bge(slow);
230       // Call runtime on identical symbols since we need to throw a TypeError.
231       __ CmpP(r6, Operand(SYMBOL_TYPE));
232       __ beq(slow);
233       // Call runtime on identical SIMD values since we must throw a TypeError.
234       __ CmpP(r6, Operand(SIMD128_VALUE_TYPE));
235       __ beq(slow);
236       // Normally here we fall through to return_equal, but undefined is
237       // special: (undefined == undefined) == true, but
238       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
239       if (cond == le || cond == ge) {
240         __ CmpP(r6, Operand(ODDBALL_TYPE));
241         __ bne(&return_equal);
242         __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
243         __ bne(&return_equal);
244         if (cond == le) {
245           // undefined <= undefined should fail.
246           __ LoadImmP(r2, Operand(GREATER));
247         } else {
248           // undefined >= undefined should fail.
249           __ LoadImmP(r2, Operand(LESS));
250         }
251         __ Ret();
252       }
253     }
254   }
255 
256   __ bind(&return_equal);
257   if (cond == lt) {
258     __ LoadImmP(r2, Operand(GREATER));  // Things aren't less than themselves.
259   } else if (cond == gt) {
260     __ LoadImmP(r2, Operand(LESS));  // Things aren't greater than themselves.
261   } else {
262     __ LoadImmP(r2, Operand(EQUAL));  // Things are <=, >=, ==, === themselves
263   }
264   __ Ret();
265 
266   // For less and greater we don't have to check for NaN since the result of
267   // x < x is false regardless.  For the others here is some code to check
268   // for NaN.
269   if (cond != lt && cond != gt) {
270     __ bind(&heap_number);
271     // It is a heap number, so return non-equal if it's NaN and equal if it's
272     // not NaN.
273 
274     // The representation of NaN values has all exponent bits (52..62) set,
275     // and not all mantissa bits (0..51) clear.
276     // Read top bits of double representation (second word of value).
277     __ LoadlW(r4, FieldMemOperand(r2, HeapNumber::kExponentOffset));
278     // Test that exponent bits are all set.
279     STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u);
280     __ ExtractBitMask(r5, r4, HeapNumber::kExponentMask);
281     __ CmpLogicalP(r5, Operand(0x7ff));
282     __ bne(&return_equal);
283 
284     // Shift out flag and all exponent bits, retaining only mantissa.
285     __ sll(r4, Operand(HeapNumber::kNonMantissaBitsInTopWord));
286     // Or with all low-bits of mantissa.
287     __ LoadlW(r5, FieldMemOperand(r2, HeapNumber::kMantissaOffset));
288     __ OrP(r2, r5, r4);
289     __ CmpP(r2, Operand::Zero());
290     // For equal we already have the right value in r2:  Return zero (equal)
291     // if all bits in mantissa are zero (it's an Infinity) and non-zero if
292     // not (it's a NaN).  For <= and >= we need to load r0 with the failing
293     // value if it's a NaN.
294     if (cond != eq) {
295       Label not_equal;
296       __ bne(&not_equal, Label::kNear);
297       // All-zero means Infinity means equal.
298       __ Ret();
299       __ bind(&not_equal);
300       if (cond == le) {
301         __ LoadImmP(r2, Operand(GREATER));  // NaN <= NaN should fail.
302       } else {
303         __ LoadImmP(r2, Operand(LESS));  // NaN >= NaN should fail.
304       }
305     }
306     __ Ret();
307   }
308   // No fall through here.
309 
310   __ bind(&not_identical);
311 }
312 
313 // See comment at call site.
EmitSmiNonsmiComparison(MacroAssembler * masm,Register lhs,Register rhs,Label * lhs_not_nan,Label * slow,bool strict)314 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
315                                     Register rhs, Label* lhs_not_nan,
316                                     Label* slow, bool strict) {
317   DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
318 
319   Label rhs_is_smi;
320   __ JumpIfSmi(rhs, &rhs_is_smi);
321 
322   // Lhs is a Smi.  Check whether the rhs is a heap number.
323   __ CompareObjectType(rhs, r5, r6, HEAP_NUMBER_TYPE);
324   if (strict) {
325     // If rhs is not a number and lhs is a Smi then strict equality cannot
326     // succeed.  Return non-equal
327     // If rhs is r2 then there is already a non zero value in it.
328     Label skip;
329     __ beq(&skip, Label::kNear);
330     if (!rhs.is(r2)) {
331       __ mov(r2, Operand(NOT_EQUAL));
332     }
333     __ Ret();
334     __ bind(&skip);
335   } else {
336     // Smi compared non-strictly with a non-Smi non-heap-number.  Call
337     // the runtime.
338     __ bne(slow);
339   }
340 
341   // Lhs is a smi, rhs is a number.
342   // Convert lhs to a double in d7.
343   __ SmiToDouble(d7, lhs);
344   // Load the double from rhs, tagged HeapNumber r2, to d6.
345   __ LoadDouble(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
346 
347   // We now have both loaded as doubles but we can skip the lhs nan check
348   // since it's a smi.
349   __ b(lhs_not_nan);
350 
351   __ bind(&rhs_is_smi);
352   // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
353   __ CompareObjectType(lhs, r6, r6, HEAP_NUMBER_TYPE);
354   if (strict) {
355     // If lhs is not a number and rhs is a smi then strict equality cannot
356     // succeed.  Return non-equal.
357     // If lhs is r2 then there is already a non zero value in it.
358     Label skip;
359     __ beq(&skip, Label::kNear);
360     if (!lhs.is(r2)) {
361       __ mov(r2, Operand(NOT_EQUAL));
362     }
363     __ Ret();
364     __ bind(&skip);
365   } else {
366     // Smi compared non-strictly with a non-smi non-heap-number.  Call
367     // the runtime.
368     __ bne(slow);
369   }
370 
371   // Rhs is a smi, lhs is a heap number.
372   // Load the double from lhs, tagged HeapNumber r3, to d7.
373   __ LoadDouble(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
374   // Convert rhs to a double in d6.
375   __ SmiToDouble(d6, rhs);
376   // Fall through to both_loaded_as_doubles.
377 }
378 
379 // See comment at call site.
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register lhs,Register rhs)380 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
381                                            Register rhs) {
382   DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
383 
384   // If either operand is a JS object or an oddball value, then they are
385   // not equal since their pointers are different.
386   // There is no test for undetectability in strict equality.
387   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
388   Label first_non_object;
389   // Get the type of the first operand into r4 and compare it with
390   // FIRST_JS_RECEIVER_TYPE.
391   __ CompareObjectType(rhs, r4, r4, FIRST_JS_RECEIVER_TYPE);
392   __ blt(&first_non_object, Label::kNear);
393 
394   // Return non-zero (r2 is not zero)
395   Label return_not_equal;
396   __ bind(&return_not_equal);
397   __ Ret();
398 
399   __ bind(&first_non_object);
400   // Check for oddballs: true, false, null, undefined.
401   __ CmpP(r4, Operand(ODDBALL_TYPE));
402   __ beq(&return_not_equal);
403 
404   __ CompareObjectType(lhs, r5, r5, FIRST_JS_RECEIVER_TYPE);
405   __ bge(&return_not_equal);
406 
407   // Check for oddballs: true, false, null, undefined.
408   __ CmpP(r5, Operand(ODDBALL_TYPE));
409   __ beq(&return_not_equal);
410 
411   // Now that we have the types we might as well check for
412   // internalized-internalized.
413   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
414   __ OrP(r4, r4, r5);
415   __ AndP(r0, r4, Operand(kIsNotStringMask | kIsNotInternalizedMask));
416   __ beq(&return_not_equal);
417 }
418 
419 // See comment at call site.
EmitCheckForTwoHeapNumbers(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * not_heap_numbers,Label * slow)420 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
421                                        Register rhs,
422                                        Label* both_loaded_as_doubles,
423                                        Label* not_heap_numbers, Label* slow) {
424   DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
425 
426   __ CompareObjectType(rhs, r5, r4, HEAP_NUMBER_TYPE);
427   __ bne(not_heap_numbers);
428   __ LoadP(r4, FieldMemOperand(lhs, HeapObject::kMapOffset));
429   __ CmpP(r4, r5);
430   __ bne(slow);  // First was a heap number, second wasn't.  Go slow case.
431 
432   // Both are heap numbers.  Load them up then jump to the code we have
433   // for that.
434   __ LoadDouble(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
435   __ LoadDouble(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
436 
437   __ b(both_loaded_as_doubles);
438 }
439 
440 // Fast negative check for internalized-to-internalized equality or receiver
441 // equality. Also handles the undetectable receiver to null/undefined
442 // comparison.
EmitCheckForInternalizedStringsOrObjects(MacroAssembler * masm,Register lhs,Register rhs,Label * possible_strings,Label * runtime_call)443 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
444                                                      Register lhs, Register rhs,
445                                                      Label* possible_strings,
446                                                      Label* runtime_call) {
447   DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
448 
449   // r4 is object type of rhs.
450   Label object_test, return_equal, return_unequal, undetectable;
451   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
452   __ mov(r0, Operand(kIsNotStringMask));
453   __ AndP(r0, r4);
454   __ bne(&object_test, Label::kNear);
455   __ mov(r0, Operand(kIsNotInternalizedMask));
456   __ AndP(r0, r4);
457   __ bne(possible_strings);
458   __ CompareObjectType(lhs, r5, r5, FIRST_NONSTRING_TYPE);
459   __ bge(runtime_call);
460   __ mov(r0, Operand(kIsNotInternalizedMask));
461   __ AndP(r0, r5);
462   __ bne(possible_strings);
463 
464   // Both are internalized. We already checked they weren't the same pointer so
465   // they are not equal. Return non-equal by returning the non-zero object
466   // pointer in r2.
467   __ Ret();
468 
469   __ bind(&object_test);
470   __ LoadP(r4, FieldMemOperand(lhs, HeapObject::kMapOffset));
471   __ LoadP(r5, FieldMemOperand(rhs, HeapObject::kMapOffset));
472   __ LoadlB(r6, FieldMemOperand(r4, Map::kBitFieldOffset));
473   __ LoadlB(r7, FieldMemOperand(r5, Map::kBitFieldOffset));
474   __ AndP(r0, r6, Operand(1 << Map::kIsUndetectable));
475   __ bne(&undetectable);
476   __ AndP(r0, r7, Operand(1 << Map::kIsUndetectable));
477   __ bne(&return_unequal);
478 
479   __ CompareInstanceType(r4, r4, FIRST_JS_RECEIVER_TYPE);
480   __ blt(runtime_call);
481   __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE);
482   __ blt(runtime_call);
483 
484   __ bind(&return_unequal);
485   // Return non-equal by returning the non-zero object pointer in r2.
486   __ Ret();
487 
488   __ bind(&undetectable);
489   __ AndP(r0, r7, Operand(1 << Map::kIsUndetectable));
490   __ beq(&return_unequal);
491 
492   // If both sides are JSReceivers, then the result is false according to
493   // the HTML specification, which says that only comparisons with null or
494   // undefined are affected by special casing for document.all.
495   __ CompareInstanceType(r4, r4, ODDBALL_TYPE);
496   __ beq(&return_equal);
497   __ CompareInstanceType(r5, r5, ODDBALL_TYPE);
498   __ bne(&return_unequal);
499 
500   __ bind(&return_equal);
501   __ LoadImmP(r2, Operand(EQUAL));
502   __ Ret();
503 }
504 
CompareICStub_CheckInputType(MacroAssembler * masm,Register input,Register scratch,CompareICState::State expected,Label * fail)505 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
506                                          Register scratch,
507                                          CompareICState::State expected,
508                                          Label* fail) {
509   Label ok;
510   if (expected == CompareICState::SMI) {
511     __ JumpIfNotSmi(input, fail);
512   } else if (expected == CompareICState::NUMBER) {
513     __ JumpIfSmi(input, &ok);
514     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
515                 DONT_DO_SMI_CHECK);
516   }
517   // We could be strict about internalized/non-internalized here, but as long as
518   // hydrogen doesn't care, the stub doesn't have to care either.
519   __ bind(&ok);
520 }
521 
522 // On entry r3 and r4 are the values to be compared.
523 // On exit r2 is 0, positive or negative to indicate the result of
524 // the comparison.
GenerateGeneric(MacroAssembler * masm)525 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
526   Register lhs = r3;
527   Register rhs = r2;
528   Condition cc = GetCondition();
529 
530   Label miss;
531   CompareICStub_CheckInputType(masm, lhs, r4, left(), &miss);
532   CompareICStub_CheckInputType(masm, rhs, r5, right(), &miss);
533 
534   Label slow;  // Call builtin.
535   Label not_smis, both_loaded_as_doubles, lhs_not_nan;
536 
537   Label not_two_smis, smi_done;
538   __ OrP(r4, r3, r2);
539   __ JumpIfNotSmi(r4, &not_two_smis);
540   __ SmiUntag(r3);
541   __ SmiUntag(r2);
542   __ SubP(r2, r3, r2);
543   __ Ret();
544   __ bind(&not_two_smis);
545 
546   // NOTICE! This code is only reached after a smi-fast-case check, so
547   // it is certain that at least one operand isn't a smi.
548 
549   // Handle the case where the objects are identical.  Either returns the answer
550   // or goes to slow.  Only falls through if the objects were not identical.
551   EmitIdenticalObjectComparison(masm, &slow, cc);
552 
553   // If either is a Smi (we know that not both are), then they can only
554   // be strictly equal if the other is a HeapNumber.
555   STATIC_ASSERT(kSmiTag == 0);
556   DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
557   __ AndP(r4, lhs, rhs);
558   __ JumpIfNotSmi(r4, &not_smis);
559   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
560   // 1) Return the answer.
561   // 2) Go to slow.
562   // 3) Fall through to both_loaded_as_doubles.
563   // 4) Jump to lhs_not_nan.
564   // In cases 3 and 4 we have found out we were dealing with a number-number
565   // comparison.  The double values of the numbers have been loaded
566   // into d7 and d6.
567   EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
568 
569   __ bind(&both_loaded_as_doubles);
570   // The arguments have been converted to doubles and stored in d6 and d7
571   __ bind(&lhs_not_nan);
572   Label no_nan;
573   __ cdbr(d7, d6);
574 
575   Label nan, equal, less_than;
576   __ bunordered(&nan);
577   __ beq(&equal, Label::kNear);
578   __ blt(&less_than, Label::kNear);
579   __ LoadImmP(r2, Operand(GREATER));
580   __ Ret();
581   __ bind(&equal);
582   __ LoadImmP(r2, Operand(EQUAL));
583   __ Ret();
584   __ bind(&less_than);
585   __ LoadImmP(r2, Operand(LESS));
586   __ Ret();
587 
588   __ bind(&nan);
589   // If one of the sides was a NaN then the v flag is set.  Load r2 with
590   // whatever it takes to make the comparison fail, since comparisons with NaN
591   // always fail.
592   if (cc == lt || cc == le) {
593     __ LoadImmP(r2, Operand(GREATER));
594   } else {
595     __ LoadImmP(r2, Operand(LESS));
596   }
597   __ Ret();
598 
599   __ bind(&not_smis);
600   // At this point we know we are dealing with two different objects,
601   // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
602   if (strict()) {
603     // This returns non-equal for some object types, or falls through if it
604     // was not lucky.
605     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
606   }
607 
608   Label check_for_internalized_strings;
609   Label flat_string_check;
610   // Check for heap-number-heap-number comparison.  Can jump to slow case,
611   // or load both doubles into r2, r3, r4, r5 and jump to the code that handles
612   // that case.  If the inputs are not doubles then jumps to
613   // check_for_internalized_strings.
614   // In this case r4 will contain the type of rhs_.  Never falls through.
615   EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles,
616                              &check_for_internalized_strings,
617                              &flat_string_check);
618 
619   __ bind(&check_for_internalized_strings);
620   // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
621   // internalized strings.
622   if (cc == eq && !strict()) {
623     // Returns an answer for two internalized strings or two detectable objects.
624     // Otherwise jumps to string case or not both strings case.
625     // Assumes that r4 is the type of rhs_ on entry.
626     EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check,
627                                              &slow);
628   }
629 
630   // Check for both being sequential one-byte strings,
631   // and inline if that is the case.
632   __ bind(&flat_string_check);
633 
634   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r4, r5, &slow);
635 
636   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4,
637                       r5);
638   if (cc == eq) {
639     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r4, r5);
640   } else {
641     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r4, r5, r6);
642   }
643   // Never falls through to here.
644 
645   __ bind(&slow);
646 
647   if (cc == eq) {
648     {
649       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
650       __ Push(lhs, rhs);
651       __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
652     }
653     // Turn true into 0 and false into some non-zero value.
654     STATIC_ASSERT(EQUAL == 0);
655     __ LoadRoot(r3, Heap::kTrueValueRootIndex);
656     __ SubP(r2, r2, r3);
657     __ Ret();
658   } else {
659     __ Push(lhs, rhs);
660     int ncr;  // NaN compare result
661     if (cc == lt || cc == le) {
662       ncr = GREATER;
663     } else {
664       DCHECK(cc == gt || cc == ge);  // remaining cases
665       ncr = LESS;
666     }
667     __ LoadSmiLiteral(r2, Smi::FromInt(ncr));
668     __ push(r2);
669 
670     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
671     // tagged as a small integer.
672     __ TailCallRuntime(Runtime::kCompare);
673   }
674 
675   __ bind(&miss);
676   GenerateMiss(masm);
677 }
678 
Generate(MacroAssembler * masm)679 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
680   // We don't allow a GC during a store buffer overflow so there is no need to
681   // store the registers in any particular way, but we do have to store and
682   // restore them.
683   __ MultiPush(kJSCallerSaved | r14.bit());
684   if (save_doubles()) {
685     __ MultiPushDoubles(kCallerSavedDoubles);
686   }
687   const int argument_count = 1;
688   const int fp_argument_count = 0;
689   const Register scratch = r3;
690 
691   AllowExternalCallThatCantCauseGC scope(masm);
692   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
693   __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
694   __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
695                    argument_count);
696   if (save_doubles()) {
697     __ MultiPopDoubles(kCallerSavedDoubles);
698   }
699   __ MultiPop(kJSCallerSaved | r14.bit());
700   __ Ret();
701 }
702 
Generate(MacroAssembler * masm)703 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
704   __ PushSafepointRegisters();
705   __ b(r14);
706 }
707 
Generate(MacroAssembler * masm)708 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
709   __ PopSafepointRegisters();
710   __ b(r14);
711 }
712 
Generate(MacroAssembler * masm)713 void MathPowStub::Generate(MacroAssembler* masm) {
714   const Register exponent = MathPowTaggedDescriptor::exponent();
715   DCHECK(exponent.is(r4));
716   const DoubleRegister double_base = d1;
717   const DoubleRegister double_exponent = d2;
718   const DoubleRegister double_result = d3;
719   const DoubleRegister double_scratch = d0;
720   const Register scratch = r1;
721   const Register scratch2 = r9;
722 
723   Label call_runtime, done, int_exponent;
724   if (exponent_type() == TAGGED) {
725     // Base is already in double_base.
726     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
727 
728     __ LoadDouble(double_exponent,
729                   FieldMemOperand(exponent, HeapNumber::kValueOffset));
730   }
731 
732   if (exponent_type() != INTEGER) {
733     // Detect integer exponents stored as double.
734     __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
735                              double_scratch);
736     __ beq(&int_exponent, Label::kNear);
737 
738     __ push(r14);
739     {
740       AllowExternalCallThatCantCauseGC scope(masm);
741       __ PrepareCallCFunction(0, 2, scratch);
742       __ MovToFloatParameters(double_base, double_exponent);
743       __ CallCFunction(
744           ExternalReference::power_double_double_function(isolate()), 0, 2);
745     }
746     __ pop(r14);
747     __ MovFromFloatResult(double_result);
748     __ b(&done);
749   }
750 
751   // Calculate power with integer exponent.
752   __ bind(&int_exponent);
753 
754   // Get two copies of exponent in the registers scratch and exponent.
755   if (exponent_type() == INTEGER) {
756     __ LoadRR(scratch, exponent);
757   } else {
758     // Exponent has previously been stored into scratch as untagged integer.
759     __ LoadRR(exponent, scratch);
760   }
761   __ ldr(double_scratch, double_base);  // Back up base.
762   __ LoadImmP(scratch2, Operand(1));
763   __ ConvertIntToDouble(scratch2, double_result);
764 
765   // Get absolute value of exponent.
766   Label positive_exponent;
767   __ CmpP(scratch, Operand::Zero());
768   __ bge(&positive_exponent, Label::kNear);
769   __ LoadComplementRR(scratch, scratch);
770   __ bind(&positive_exponent);
771 
772   Label while_true, no_carry, loop_end;
773   __ bind(&while_true);
774   __ mov(scratch2, Operand(1));
775   __ AndP(scratch2, scratch);
776   __ beq(&no_carry, Label::kNear);
777   __ mdbr(double_result, double_scratch);
778   __ bind(&no_carry);
779   __ ShiftRightP(scratch, scratch, Operand(1));
780   __ LoadAndTestP(scratch, scratch);
781   __ beq(&loop_end, Label::kNear);
782   __ mdbr(double_scratch, double_scratch);
783   __ b(&while_true);
784   __ bind(&loop_end);
785 
786   __ CmpP(exponent, Operand::Zero());
787   __ bge(&done);
788 
789   // get 1/double_result:
790   __ ldr(double_scratch, double_result);
791   __ LoadImmP(scratch2, Operand(1));
792   __ ConvertIntToDouble(scratch2, double_result);
793   __ ddbr(double_result, double_scratch);
794 
795   // Test whether result is zero.  Bail out to check for subnormal result.
796   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
797   __ lzdr(kDoubleRegZero);
798   __ cdbr(double_result, kDoubleRegZero);
799   __ bne(&done, Label::kNear);
800   // double_exponent may not containe the exponent value if the input was a
801   // smi.  We set it with exponent value before bailing out.
802   __ ConvertIntToDouble(exponent, double_exponent);
803 
804   // Returning or bailing out.
805   __ push(r14);
806   {
807     AllowExternalCallThatCantCauseGC scope(masm);
808     __ PrepareCallCFunction(0, 2, scratch);
809     __ MovToFloatParameters(double_base, double_exponent);
810     __ CallCFunction(
811         ExternalReference::power_double_double_function(isolate()), 0, 2);
812   }
813   __ pop(r14);
814   __ MovFromFloatResult(double_result);
815 
816   __ bind(&done);
817   __ Ret();
818 }
819 
NeedsImmovableCode()820 bool CEntryStub::NeedsImmovableCode() { return true; }
821 
GenerateStubsAheadOfTime(Isolate * isolate)822 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
823   CEntryStub::GenerateAheadOfTime(isolate);
824   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
825   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
826   CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
827   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
828   CreateWeakCellStub::GenerateAheadOfTime(isolate);
829   BinaryOpICStub::GenerateAheadOfTime(isolate);
830   StoreRegistersStateStub::GenerateAheadOfTime(isolate);
831   RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
832   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
833   StoreFastElementStub::GenerateAheadOfTime(isolate);
834 }
835 
GenerateAheadOfTime(Isolate * isolate)836 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
837   StoreRegistersStateStub stub(isolate);
838   stub.GetCode();
839 }
840 
GenerateAheadOfTime(Isolate * isolate)841 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
842   RestoreRegistersStateStub stub(isolate);
843   stub.GetCode();
844 }
845 
GenerateFPStubs(Isolate * isolate)846 void CodeStub::GenerateFPStubs(Isolate* isolate) {
847   SaveFPRegsMode mode = kSaveFPRegs;
848   CEntryStub(isolate, 1, mode).GetCode();
849   StoreBufferOverflowStub(isolate, mode).GetCode();
850   isolate->set_fp_stubs_generated(true);
851 }
852 
GenerateAheadOfTime(Isolate * isolate)853 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
854   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
855   stub.GetCode();
856 }
857 
Generate(MacroAssembler * masm)858 void CEntryStub::Generate(MacroAssembler* masm) {
859   // Called from JavaScript; parameters are on stack as if calling JS function.
860   // r2: number of arguments including receiver
861   // r3: pointer to builtin function
862   // fp: frame pointer  (restored after C call)
863   // sp: stack pointer  (restored as callee's sp after C call)
864   // cp: current context  (C callee-saved)
865   //
866   // If argv_in_register():
867   // r4: pointer to the first argument
868   ProfileEntryHookStub::MaybeCallEntryHook(masm);
869 
870   __ LoadRR(r7, r3);
871 
872   if (argv_in_register()) {
873     // Move argv into the correct register.
874     __ LoadRR(r3, r4);
875   } else {
876     // Compute the argv pointer.
877     __ ShiftLeftP(r3, r2, Operand(kPointerSizeLog2));
878     __ lay(r3, MemOperand(r3, sp, -kPointerSize));
879   }
880 
881   // Enter the exit frame that transitions from JavaScript to C++.
882   FrameScope scope(masm, StackFrame::MANUAL);
883 
884   // Need at least one extra slot for return address location.
885   int arg_stack_space = 1;
886 
887   // Pass buffer for return value on stack if necessary
888   bool needs_return_buffer =
889       result_size() > 2 ||
890       (result_size() == 2 && !ABI_RETURNS_OBJECTPAIR_IN_REGS);
891   if (needs_return_buffer) {
892     arg_stack_space += result_size();
893   }
894 
895 #if V8_TARGET_ARCH_S390X
896   // 64-bit linux pass Argument object by reference not value
897   arg_stack_space += 2;
898 #endif
899 
900   __ EnterExitFrame(save_doubles(), arg_stack_space, is_builtin_exit()
901                                            ? StackFrame::BUILTIN_EXIT
902                                            : StackFrame::EXIT);
903 
904   // Store a copy of argc, argv in callee-saved registers for later.
905   __ LoadRR(r6, r2);
906   __ LoadRR(r8, r3);
907   // r2, r6: number of arguments including receiver  (C callee-saved)
908   // r3, r8: pointer to the first argument
909   // r7: pointer to builtin function  (C callee-saved)
910 
911   // Result returned in registers or stack, depending on result size and ABI.
912 
913   Register isolate_reg = r4;
914   if (needs_return_buffer) {
915     // The return value is 16-byte non-scalar value.
916     // Use frame storage reserved by calling function to pass return
917     // buffer as implicit first argument in R2.  Shfit original parameters
918     // by one register each.
919     __ LoadRR(r4, r3);
920     __ LoadRR(r3, r2);
921     __ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
922     isolate_reg = r5;
923   }
924   // Call C built-in.
925   __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
926 
927   Register target = r7;
928 
929   // To let the GC traverse the return address of the exit frames, we need to
930   // know where the return address is. The CEntryStub is unmovable, so
931   // we can store the address on the stack to be able to find it again and
932   // we never have to restore it, because it will not change.
933   {
934     Label return_label;
935     __ larl(r14, &return_label);  // Generate the return addr of call later.
936     __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
937 
938     // zLinux ABI requires caller's frame to have sufficient space for callee
939     // preserved regsiter save area.
940     // __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize));
941     __ b(target);
942     __ bind(&return_label);
943     // __ la(sp, MemOperand(sp, +kCalleeRegisterSaveAreaSize));
944   }
945 
946   // If return value is on the stack, pop it to registers.
947   if (needs_return_buffer) {
948     if (result_size() > 2) __ LoadP(r4, MemOperand(r2, 2 * kPointerSize));
949     __ LoadP(r3, MemOperand(r2, kPointerSize));
950     __ LoadP(r2, MemOperand(r2));
951   }
952 
953   // Check result for exception sentinel.
954   Label exception_returned;
955   __ CompareRoot(r2, Heap::kExceptionRootIndex);
956   __ beq(&exception_returned, Label::kNear);
957 
958   // Check that there is no pending exception, otherwise we
959   // should have returned the exception sentinel.
960   if (FLAG_debug_code) {
961     Label okay;
962     ExternalReference pending_exception_address(
963         Isolate::kPendingExceptionAddress, isolate());
964     __ mov(r1, Operand(pending_exception_address));
965     __ LoadP(r1, MemOperand(r1));
966     __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
967     // Cannot use check here as it attempts to generate call into runtime.
968     __ beq(&okay, Label::kNear);
969     __ stop("Unexpected pending exception");
970     __ bind(&okay);
971   }
972 
973   // Exit C frame and return.
974   // r2:r3: result
975   // sp: stack pointer
976   // fp: frame pointer
977   Register argc;
978   if (argv_in_register()) {
979     // We don't want to pop arguments so set argc to no_reg.
980     argc = no_reg;
981   } else {
982     // r6: still holds argc (callee-saved).
983     argc = r6;
984   }
985   __ LeaveExitFrame(save_doubles(), argc, true);
986   __ b(r14);
987 
988   // Handling of exception.
989   __ bind(&exception_returned);
990 
991   ExternalReference pending_handler_context_address(
992       Isolate::kPendingHandlerContextAddress, isolate());
993   ExternalReference pending_handler_code_address(
994       Isolate::kPendingHandlerCodeAddress, isolate());
995   ExternalReference pending_handler_offset_address(
996       Isolate::kPendingHandlerOffsetAddress, isolate());
997   ExternalReference pending_handler_fp_address(
998       Isolate::kPendingHandlerFPAddress, isolate());
999   ExternalReference pending_handler_sp_address(
1000       Isolate::kPendingHandlerSPAddress, isolate());
1001 
1002   // Ask the runtime for help to determine the handler. This will set r3 to
1003   // contain the current pending exception, don't clobber it.
1004   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1005                                  isolate());
1006   {
1007     FrameScope scope(masm, StackFrame::MANUAL);
1008     __ PrepareCallCFunction(3, 0, r2);
1009     __ LoadImmP(r2, Operand::Zero());
1010     __ LoadImmP(r3, Operand::Zero());
1011     __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
1012     __ CallCFunction(find_handler, 3);
1013   }
1014 
1015   // Retrieve the handler context, SP and FP.
1016   __ mov(cp, Operand(pending_handler_context_address));
1017   __ LoadP(cp, MemOperand(cp));
1018   __ mov(sp, Operand(pending_handler_sp_address));
1019   __ LoadP(sp, MemOperand(sp));
1020   __ mov(fp, Operand(pending_handler_fp_address));
1021   __ LoadP(fp, MemOperand(fp));
1022 
1023   // If the handler is a JS frame, restore the context to the frame. Note that
1024   // the context will be set to (cp == 0) for non-JS frames.
1025   Label skip;
1026   __ CmpP(cp, Operand::Zero());
1027   __ beq(&skip, Label::kNear);
1028   __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1029   __ bind(&skip);
1030 
1031   // Compute the handler entry address and jump to it.
1032   __ mov(r3, Operand(pending_handler_code_address));
1033   __ LoadP(r3, MemOperand(r3));
1034   __ mov(r4, Operand(pending_handler_offset_address));
1035   __ LoadP(r4, MemOperand(r4));
1036   __ AddP(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start
1037   __ AddP(ip, r3, r4);
1038   __ Jump(ip);
1039 }
1040 
Generate(MacroAssembler * masm)1041 void JSEntryStub::Generate(MacroAssembler* masm) {
1042   // r2: code entry
1043   // r3: function
1044   // r4: receiver
1045   // r5: argc
1046   // r6: argv
1047 
1048   Label invoke, handler_entry, exit;
1049 
1050   ProfileEntryHookStub::MaybeCallEntryHook(masm);
1051 
1052 // saving floating point registers
1053 #if V8_TARGET_ARCH_S390X
1054   // 64bit ABI requires f8 to f15 be saved
1055   __ lay(sp, MemOperand(sp, -8 * kDoubleSize));
1056   __ std(d8, MemOperand(sp));
1057   __ std(d9, MemOperand(sp, 1 * kDoubleSize));
1058   __ std(d10, MemOperand(sp, 2 * kDoubleSize));
1059   __ std(d11, MemOperand(sp, 3 * kDoubleSize));
1060   __ std(d12, MemOperand(sp, 4 * kDoubleSize));
1061   __ std(d13, MemOperand(sp, 5 * kDoubleSize));
1062   __ std(d14, MemOperand(sp, 6 * kDoubleSize));
1063   __ std(d15, MemOperand(sp, 7 * kDoubleSize));
1064 #else
1065   // 31bit ABI requires you to store f4 and f6:
1066   // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
1067   __ lay(sp, MemOperand(sp, -2 * kDoubleSize));
1068   __ std(d4, MemOperand(sp));
1069   __ std(d6, MemOperand(sp, kDoubleSize));
1070 #endif
1071 
1072   // zLinux ABI
1073   //    Incoming parameters:
1074   //          r2: code entry
1075   //          r3: function
1076   //          r4: receiver
1077   //          r5: argc
1078   //          r6: argv
1079   //    Requires us to save the callee-preserved registers r6-r13
1080   //    General convention is to also save r14 (return addr) and
1081   //    sp/r15 as well in a single STM/STMG
1082   __ lay(sp, MemOperand(sp, -10 * kPointerSize));
1083   __ StoreMultipleP(r6, sp, MemOperand(sp, 0));
1084 
1085   // Set up the reserved register for 0.0.
1086   // __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
1087 
1088   // Push a frame with special values setup to mark it as an entry frame.
1089   //   Bad FP (-1)
1090   //   SMI Marker
1091   //   SMI Marker
1092   //   kCEntryFPAddress
1093   //   Frame type
1094   __ lay(sp, MemOperand(sp, -5 * kPointerSize));
1095   // Push a bad frame pointer to fail if it is used.
1096   __ LoadImmP(r10, Operand(-1));
1097 
1098   int marker = type();
1099   __ LoadSmiLiteral(r9, Smi::FromInt(marker));
1100   __ LoadSmiLiteral(r8, Smi::FromInt(marker));
1101   // Save copies of the top frame descriptor on the stack.
1102   __ mov(r7, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1103   __ LoadP(r7, MemOperand(r7));
1104   __ StoreMultipleP(r7, r10, MemOperand(sp, kPointerSize));
1105   // Set up frame pointer for the frame to be pushed.
1106   // Need to add kPointerSize, because sp has one extra
1107   // frame already for the frame type being pushed later.
1108   __ lay(fp,
1109          MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
1110 
1111   // If this is the outermost JS call, set js_entry_sp value.
1112   Label non_outermost_js;
1113   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1114   __ mov(r7, Operand(ExternalReference(js_entry_sp)));
1115   __ LoadAndTestP(r8, MemOperand(r7));
1116   __ bne(&non_outermost_js, Label::kNear);
1117   __ StoreP(fp, MemOperand(r7));
1118   __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1119   Label cont;
1120   __ b(&cont, Label::kNear);
1121   __ bind(&non_outermost_js);
1122   __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
1123 
1124   __ bind(&cont);
1125   __ StoreP(ip, MemOperand(sp));  // frame-type
1126 
1127   // Jump to a faked try block that does the invoke, with a faked catch
1128   // block that sets the pending exception.
1129   __ b(&invoke, Label::kNear);
1130 
1131   __ bind(&handler_entry);
1132   handler_offset_ = handler_entry.pos();
1133   // Caught exception: Store result (exception) in the pending exception
1134   // field in the JSEnv and return a failure sentinel.  Coming in here the
1135   // fp will be invalid because the PushStackHandler below sets it to 0 to
1136   // signal the existence of the JSEntry frame.
1137   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1138                                        isolate())));
1139 
1140   __ StoreP(r2, MemOperand(ip));
1141   __ LoadRoot(r2, Heap::kExceptionRootIndex);
1142   __ b(&exit, Label::kNear);
1143 
1144   // Invoke: Link this frame into the handler chain.
1145   __ bind(&invoke);
1146   // Must preserve r2-r6.
1147   __ PushStackHandler();
1148   // If an exception not caught by another handler occurs, this handler
1149   // returns control to the code after the b(&invoke) above, which
1150   // restores all kCalleeSaved registers (including cp and fp) to their
1151   // saved values before returning a failure to C.
1152 
1153   // Invoke the function by calling through JS entry trampoline builtin.
1154   // Notice that we cannot store a reference to the trampoline code directly in
1155   // this stub, because runtime stubs are not traversed when doing GC.
1156 
1157   // Expected registers by Builtins::JSEntryTrampoline
1158   // r2: code entry
1159   // r3: function
1160   // r4: receiver
1161   // r5: argc
1162   // r6: argv
1163   if (type() == StackFrame::ENTRY_CONSTRUCT) {
1164     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1165                                       isolate());
1166     __ mov(ip, Operand(construct_entry));
1167   } else {
1168     ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1169     __ mov(ip, Operand(entry));
1170   }
1171   __ LoadP(ip, MemOperand(ip));  // deref address
1172 
1173   // Branch and link to JSEntryTrampoline.
1174   // the address points to the start of the code object, skip the header
1175   __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1176   Label return_addr;
1177   // __ basr(r14, ip);
1178   __ larl(r14, &return_addr);
1179   __ b(ip);
1180   __ bind(&return_addr);
1181 
1182   // Unlink this frame from the handler chain.
1183   __ PopStackHandler();
1184 
1185   __ bind(&exit);  // r2 holds result
1186   // Check if the current stack frame is marked as the outermost JS frame.
1187   Label non_outermost_js_2;
1188   __ pop(r7);
1189   __ CmpSmiLiteral(r7, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
1190   __ bne(&non_outermost_js_2, Label::kNear);
1191   __ mov(r8, Operand::Zero());
1192   __ mov(r7, Operand(ExternalReference(js_entry_sp)));
1193   __ StoreP(r8, MemOperand(r7));
1194   __ bind(&non_outermost_js_2);
1195 
1196   // Restore the top frame descriptors from the stack.
1197   __ pop(r5);
1198   __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1199   __ StoreP(r5, MemOperand(ip));
1200 
1201   // Reset the stack to the callee saved registers.
1202   __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));
1203 
1204   // Reload callee-saved preserved regs, return address reg (r14) and sp
1205   __ LoadMultipleP(r6, sp, MemOperand(sp, 0));
1206   __ la(sp, MemOperand(sp, 10 * kPointerSize));
1207 
1208 // saving floating point registers
1209 #if V8_TARGET_ARCH_S390X
1210   // 64bit ABI requires f8 to f15 be saved
1211   __ ld(d8, MemOperand(sp));
1212   __ ld(d9, MemOperand(sp, 1 * kDoubleSize));
1213   __ ld(d10, MemOperand(sp, 2 * kDoubleSize));
1214   __ ld(d11, MemOperand(sp, 3 * kDoubleSize));
1215   __ ld(d12, MemOperand(sp, 4 * kDoubleSize));
1216   __ ld(d13, MemOperand(sp, 5 * kDoubleSize));
1217   __ ld(d14, MemOperand(sp, 6 * kDoubleSize));
1218   __ ld(d15, MemOperand(sp, 7 * kDoubleSize));
1219   __ la(sp, MemOperand(sp, 8 * kDoubleSize));
1220 #else
1221   // 31bit ABI requires you to store f4 and f6:
1222   // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
1223   __ ld(d4, MemOperand(sp));
1224   __ ld(d6, MemOperand(sp, kDoubleSize));
1225   __ la(sp, MemOperand(sp, 2 * kDoubleSize));
1226 #endif
1227 
1228   __ b(r14);
1229 }
1230 
Generate(MacroAssembler * masm)1231 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1232   Label miss;
1233   Register receiver = LoadDescriptor::ReceiverRegister();
1234   // Ensure that the vector and slot registers won't be clobbered before
1235   // calling the miss handler.
1236   DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::VectorRegister(),
1237                      LoadWithVectorDescriptor::SlotRegister()));
1238 
1239   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r6,
1240                                                           r7, &miss);
1241   __ bind(&miss);
1242   PropertyAccessCompiler::TailCallBuiltin(
1243       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1244 }
1245 
Generate(MacroAssembler * masm)1246 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1247   // Return address is in lr.
1248   Label miss;
1249 
1250   Register receiver = LoadDescriptor::ReceiverRegister();
1251   Register index = LoadDescriptor::NameRegister();
1252   Register scratch = r7;
1253   Register result = r2;
1254   DCHECK(!scratch.is(receiver) && !scratch.is(index));
1255   DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
1256          result.is(LoadWithVectorDescriptor::SlotRegister()));
1257 
1258   // StringCharAtGenerator doesn't use the result register until it's passed
1259   // the different miss possibilities. If it did, we would have a conflict
1260   // when FLAG_vector_ics is true.
1261   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1262                                           &miss,  // When not a string.
1263                                           &miss,  // When not a number.
1264                                           &miss,  // When index out of range.
1265                                           RECEIVER_IS_STRING);
1266   char_at_generator.GenerateFast(masm);
1267   __ Ret();
1268 
1269   StubRuntimeCallHelper call_helper;
1270   char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1271 
1272   __ bind(&miss);
1273   PropertyAccessCompiler::TailCallBuiltin(
1274       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1275 }
1276 
Generate(MacroAssembler * masm)1277 void RegExpExecStub::Generate(MacroAssembler* masm) {
1278 // Just jump directly to runtime if native RegExp is not selected at compile
1279 // time or if regexp entry in generated code is turned off runtime switch or
1280 // at compilation.
1281 #ifdef V8_INTERPRETED_REGEXP
1282   __ TailCallRuntime(Runtime::kRegExpExec);
1283 #else   // V8_INTERPRETED_REGEXP
1284 
1285   // Stack frame on entry.
1286   //  sp[0]: last_match_info (expected JSArray)
1287   //  sp[4]: previous index
1288   //  sp[8]: subject string
1289   //  sp[12]: JSRegExp object
1290 
1291   const int kLastMatchInfoOffset = 0 * kPointerSize;
1292   const int kPreviousIndexOffset = 1 * kPointerSize;
1293   const int kSubjectOffset = 2 * kPointerSize;
1294   const int kJSRegExpOffset = 3 * kPointerSize;
1295 
1296   Label runtime, br_over, encoding_type_UC16;
1297 
1298   // Allocation of registers for this function. These are in callee save
1299   // registers and will be preserved by the call to the native RegExp code, as
1300   // this code is called using the normal C calling convention. When calling
1301   // directly from generated code the native RegExp code will not do a GC and
1302   // therefore the content of these registers are safe to use after the call.
1303   Register subject = r6;
1304   Register regexp_data = r7;
1305   Register last_match_info_elements = r8;
1306   Register code = r9;
1307 
1308   __ CleanseP(r14);
1309 
1310   // Ensure register assigments are consistent with callee save masks
1311   DCHECK(subject.bit() & kCalleeSaved);
1312   DCHECK(regexp_data.bit() & kCalleeSaved);
1313   DCHECK(last_match_info_elements.bit() & kCalleeSaved);
1314   DCHECK(code.bit() & kCalleeSaved);
1315 
1316   // Ensure that a RegExp stack is allocated.
1317   ExternalReference address_of_regexp_stack_memory_address =
1318       ExternalReference::address_of_regexp_stack_memory_address(isolate());
1319   ExternalReference address_of_regexp_stack_memory_size =
1320       ExternalReference::address_of_regexp_stack_memory_size(isolate());
1321   __ mov(r2, Operand(address_of_regexp_stack_memory_size));
1322   __ LoadAndTestP(r2, MemOperand(r2));
1323   __ beq(&runtime);
1324 
1325   // Check that the first argument is a JSRegExp object.
1326   __ LoadP(r2, MemOperand(sp, kJSRegExpOffset));
1327   __ JumpIfSmi(r2, &runtime);
1328   __ CompareObjectType(r2, r3, r3, JS_REGEXP_TYPE);
1329   __ bne(&runtime);
1330 
1331   // Check that the RegExp has been compiled (data contains a fixed array).
1332   __ LoadP(regexp_data, FieldMemOperand(r2, JSRegExp::kDataOffset));
1333   if (FLAG_debug_code) {
1334     __ TestIfSmi(regexp_data);
1335     __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0);
1336     __ CompareObjectType(regexp_data, r2, r2, FIXED_ARRAY_TYPE);
1337     __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1338   }
1339 
1340   // regexp_data: RegExp data (FixedArray)
1341   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1342   __ LoadP(r2, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1343   // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu);
1344   __ CmpSmiLiteral(r2, Smi::FromInt(JSRegExp::IRREGEXP), r0);
1345   __ bne(&runtime);
1346 
1347   // regexp_data: RegExp data (FixedArray)
1348   // Check that the number of captures fit in the static offsets vector buffer.
1349   __ LoadP(r4,
1350            FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1351   // Check (number_of_captures + 1) * 2 <= offsets vector size
1352   // Or          number_of_captures * 2 <= offsets vector size - 2
1353   // SmiToShortArrayOffset accomplishes the multiplication by 2 and
1354   // SmiUntag (which is a nop for 32-bit).
1355   __ SmiToShortArrayOffset(r4, r4);
1356   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1357   __ CmpLogicalP(r4, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
1358   __ bgt(&runtime);
1359 
1360   // Reset offset for possibly sliced string.
1361   __ LoadImmP(ip, Operand::Zero());
1362   __ LoadP(subject, MemOperand(sp, kSubjectOffset));
1363   __ JumpIfSmi(subject, &runtime);
1364   __ LoadRR(r5, subject);  // Make a copy of the original subject string.
1365   // subject: subject string
1366   // r5: subject string
1367   // regexp_data: RegExp data (FixedArray)
1368   // Handle subject string according to its encoding and representation:
1369   // (1) Sequential string?  If yes, go to (4).
1370   // (2) Sequential or cons?  If not, go to (5).
1371   // (3) Cons string.  If the string is flat, replace subject with first string
1372   //     and go to (1). Otherwise bail out to runtime.
1373   // (4) Sequential string.  Load regexp code according to encoding.
1374   // (E) Carry on.
1375   /// [...]
1376 
1377   // Deferred code at the end of the stub:
1378   // (5) Long external string?  If not, go to (7).
1379   // (6) External string.  Make it, offset-wise, look like a sequential string.
1380   //     Go to (4).
1381   // (7) Short external string or not a string?  If yes, bail out to runtime.
1382   // (8) Sliced string.  Replace subject with parent.  Go to (1).
1383 
1384   Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
1385       not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
1386 
1387   __ bind(&check_underlying);
1388   __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset));
1389   __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
1390 
1391   // (1) Sequential string?  If yes, go to (4).
1392 
1393   STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
1394                  kShortExternalStringMask) == 0x93);
1395   __ mov(r3, Operand(kIsNotStringMask | kStringRepresentationMask |
1396                      kShortExternalStringMask));
1397   __ AndP(r3, r2);
1398   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1399   __ beq(&seq_string, Label::kNear);  // Go to (4).
1400 
1401   // (2) Sequential or cons? If not, go to (5).
1402   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1403   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1404   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1405   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1406   STATIC_ASSERT(kExternalStringTag < 0xffffu);
1407   __ CmpP(r3, Operand(kExternalStringTag));
1408   __ bge(&not_seq_nor_cons);  // Go to (5).
1409 
1410   // (3) Cons string.  Check that it's flat.
1411   // Replace subject with first string and reload instance type.
1412   __ LoadP(r2, FieldMemOperand(subject, ConsString::kSecondOffset));
1413   __ CompareRoot(r2, Heap::kempty_stringRootIndex);
1414   __ bne(&runtime);
1415   __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
1416   __ b(&check_underlying);
1417 
1418   // (4) Sequential string.  Load regexp code according to encoding.
1419   __ bind(&seq_string);
1420   // subject: sequential subject string (or look-alike, external string)
1421   // r5: original subject string
1422   // Load previous index and check range before r5 is overwritten.  We have to
1423   // use r5 instead of subject here because subject might have been only made
1424   // to look like a sequential string when it actually is an external string.
1425   __ LoadP(r3, MemOperand(sp, kPreviousIndexOffset));
1426   __ JumpIfNotSmi(r3, &runtime);
1427   __ LoadP(r5, FieldMemOperand(r5, String::kLengthOffset));
1428   __ CmpLogicalP(r5, r3);
1429   __ ble(&runtime);
1430   __ SmiUntag(r3);
1431 
1432   STATIC_ASSERT(4 == kOneByteStringTag);
1433   STATIC_ASSERT(kTwoByteStringTag == 0);
1434   STATIC_ASSERT(kStringEncodingMask == 4);
1435   __ ExtractBitMask(r5, r2, kStringEncodingMask, SetRC);
1436   __ beq(&encoding_type_UC16, Label::kNear);
1437   __ LoadP(code,
1438            FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
1439   __ b(&br_over, Label::kNear);
1440   __ bind(&encoding_type_UC16);
1441   __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
1442   __ bind(&br_over);
1443 
1444   // (E) Carry on.  String handling is done.
1445   // code: irregexp code
1446   // Check that the irregexp code has been generated for the actual string
1447   // encoding. If it has, the field contains a code object otherwise it contains
1448   // a smi (code flushing support).
1449   __ JumpIfSmi(code, &runtime);
1450 
1451   // r3: previous index
1452   // r5: encoding of subject string (1 if one_byte, 0 if two_byte);
1453   // code: Address of generated regexp code
1454   // subject: Subject string
1455   // regexp_data: RegExp data (FixedArray)
1456   // All checks done. Now push arguments for native regexp code.
1457   __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r2, r4);
1458 
1459   // Isolates: note we add an additional parameter here (isolate pointer).
1460   const int kRegExpExecuteArguments = 10;
1461   const int kParameterRegisters = 5;
1462   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
1463 
1464   // Stack pointer now points to cell where return address is to be written.
1465   // Arguments are before that on the stack or in registers.
1466 
1467   // Argument 10 (in stack parameter area): Pass current isolate address.
1468   __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
1469   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
1470                                    4 * kPointerSize));
1471 
1472   // Argument 9 is a dummy that reserves the space used for
1473   // the return address added by the ExitFrame in native calls.
1474   __ mov(r2, Operand::Zero());
1475   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
1476                                    3 * kPointerSize));
1477 
1478   // Argument 8: Indicate that this is a direct call from JavaScript.
1479   __ mov(r2, Operand(1));
1480   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
1481                                    2 * kPointerSize));
1482 
1483   // Argument 7: Start (high end) of backtracking stack memory area.
1484   __ mov(r2, Operand(address_of_regexp_stack_memory_address));
1485   __ LoadP(r2, MemOperand(r2, 0));
1486   __ mov(r1, Operand(address_of_regexp_stack_memory_size));
1487   __ LoadP(r1, MemOperand(r1, 0));
1488   __ AddP(r2, r1);
1489   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
1490                                    1 * kPointerSize));
1491 
1492   // Argument 6: Set the number of capture registers to zero to force
1493   // global egexps to behave as non-global.  This does not affect non-global
1494   // regexps.
1495   __ mov(r2, Operand::Zero());
1496   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
1497                                    0 * kPointerSize));
1498 
1499   // Argument 1 (r2): Subject string.
1500   // Load the length from the original subject string from the previous stack
1501   // frame. Therefore we have to use fp, which points exactly to 15 pointer
1502   // sizes below the previous sp. (Because creating a new stack frame pushes
1503   // the previous fp onto the stack and moves up sp by 2 * kPointerSize and
1504   // 13 registers saved on the stack previously)
1505   __ LoadP(r2, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1506 
1507   // Argument 2 (r3): Previous index.
1508   // Already there
1509   __ AddP(r1, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
1510 
1511   // Argument 5 (r6): static offsets vector buffer.
1512   __ mov(
1513       r6,
1514       Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
1515 
1516   // For arguments 4 (r5) and 3 (r4) get string length, calculate start of data
1517   // and calculate the shift of the index (0 for one-byte and 1 for two byte).
1518   __ XorP(r5, Operand(1));
1519   // If slice offset is not 0, load the length from the original sliced string.
1520   // Argument 3, r4: Start of string data
1521   // Prepare start and end index of the input.
1522   __ ShiftLeftP(ip, ip, r5);
1523   __ AddP(ip, r1, ip);
1524   __ ShiftLeftP(r4, r3, r5);
1525   __ AddP(r4, ip, r4);
1526 
1527   // Argument 4, r5: End of string data
1528   __ LoadP(r1, FieldMemOperand(r2, String::kLengthOffset));
1529   __ SmiUntag(r1);
1530   __ ShiftLeftP(r0, r1, r5);
1531   __ AddP(r5, ip, r0);
1532 
1533   // Locate the code entry and call it.
1534   __ AddP(code, Operand(Code::kHeaderSize - kHeapObjectTag));
1535 
1536   DirectCEntryStub stub(isolate());
1537   stub.GenerateCall(masm, code);
1538 
1539   __ LeaveExitFrame(false, no_reg, true);
1540 
1541   // r2: result (int32)
1542   // subject: subject string -- needed to reload
1543   __ LoadP(subject, MemOperand(sp, kSubjectOffset));
1544 
1545   // regexp_data: RegExp data (callee saved)
1546   // last_match_info_elements: Last match info elements (callee saved)
1547   // Check the result.
1548   Label success;
1549   __ Cmp32(r2, Operand(1));
1550   // We expect exactly one result since we force the called regexp to behave
1551   // as non-global.
1552   __ beq(&success);
1553   Label failure;
1554   __ Cmp32(r2, Operand(NativeRegExpMacroAssembler::FAILURE));
1555   __ beq(&failure);
1556   __ Cmp32(r2, Operand(NativeRegExpMacroAssembler::EXCEPTION));
1557   // If not exception it can only be retry. Handle that in the runtime system.
1558   __ bne(&runtime);
1559   // Result must now be exception. If there is no pending exception already a
1560   // stack overflow (on the backtrack stack) was detected in RegExp code but
1561   // haven't created the exception yet. Handle that in the runtime system.
1562   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1563   __ mov(r3, Operand(isolate()->factory()->the_hole_value()));
1564   __ mov(r4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1565                                        isolate())));
1566   __ LoadP(r2, MemOperand(r4, 0));
1567   __ CmpP(r2, r3);
1568   __ beq(&runtime);
1569 
1570   // For exception, throw the exception again.
1571   __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1572 
1573   __ bind(&failure);
1574   // For failure and exception return null.
1575   __ mov(r2, Operand(isolate()->factory()->null_value()));
1576   __ la(sp, MemOperand(sp, (4 * kPointerSize)));
1577   __ Ret();
1578 
1579   // Process the result from the native regexp code.
1580   __ bind(&success);
1581   __ LoadP(r3,
1582            FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1583   // Calculate number of capture registers (number_of_captures + 1) * 2.
1584   // SmiToShortArrayOffset accomplishes the multiplication by 2 and
1585   // SmiUntag (which is a nop for 32-bit).
1586   __ SmiToShortArrayOffset(r3, r3);
1587   __ AddP(r3, Operand(2));
1588 
1589   // Check that the last match info is a FixedArray.
1590   __ LoadP(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
1591   __ JumpIfSmi(last_match_info_elements, &runtime);
1592   // Check that the object has fast elements.
1593   __ LoadP(r2,
1594            FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1595   __ CompareRoot(r2, Heap::kFixedArrayMapRootIndex);
1596   __ bne(&runtime);
1597   // Check that the last match info has space for the capture registers and the
1598   // additional information.
1599   __ LoadP(
1600       r2, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
1601   __ AddP(r4, r3, Operand(RegExpMatchInfo::kLastMatchOverhead));
1602   __ SmiUntag(r0, r2);
1603   __ CmpP(r4, r0);
1604   __ bgt(&runtime);
1605 
1606   // r3: number of capture registers
1607   // subject: subject string
1608   // Store the capture count.
1609   __ SmiTag(r4, r3);
1610   __ StoreP(r4, FieldMemOperand(last_match_info_elements,
1611                                 RegExpMatchInfo::kNumberOfCapturesOffset));
1612   // Store last subject and last input.
1613   __ StoreP(subject, FieldMemOperand(last_match_info_elements,
1614                                      RegExpMatchInfo::kLastSubjectOffset));
1615   __ LoadRR(r4, subject);
1616   __ RecordWriteField(last_match_info_elements,
1617                       RegExpMatchInfo::kLastSubjectOffset, subject, r9,
1618                       kLRHasNotBeenSaved, kDontSaveFPRegs);
1619   __ LoadRR(subject, r4);
1620   __ StoreP(subject, FieldMemOperand(last_match_info_elements,
1621                                      RegExpMatchInfo::kLastInputOffset));
1622   __ RecordWriteField(last_match_info_elements,
1623                       RegExpMatchInfo::kLastInputOffset, subject, r9,
1624                       kLRHasNotBeenSaved, kDontSaveFPRegs);
1625 
1626   // Get the static offsets vector filled by the native regexp code.
1627   ExternalReference address_of_static_offsets_vector =
1628       ExternalReference::address_of_static_offsets_vector(isolate());
1629   __ mov(r4, Operand(address_of_static_offsets_vector));
1630 
1631   // r3: number of capture registers
1632   // r4: offsets vector
1633   Label next_capture;
1634   // Capture register counter starts from number of capture registers and
1635   // counts down until wrapping after zero.
1636   __ AddP(r2, last_match_info_elements,
1637           Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag -
1638                   kPointerSize));
1639   __ AddP(r4, Operand(-kIntSize));  // bias down for lwzu
1640   __ bind(&next_capture);
1641   // Read the value from the static offsets vector buffer.
1642   __ ly(r5, MemOperand(r4, kIntSize));
1643   __ lay(r4, MemOperand(r4, kIntSize));
1644   // Store the smi value in the last match info.
1645   __ SmiTag(r5);
1646   __ StoreP(r5, MemOperand(r2, kPointerSize));
1647   __ lay(r2, MemOperand(r2, kPointerSize));
1648   __ BranchOnCount(r3, &next_capture);
1649 
1650   // Return last match info.
1651   __ LoadRR(r2, last_match_info_elements);
1652   __ la(sp, MemOperand(sp, (4 * kPointerSize)));
1653   __ Ret();
1654 
1655   // Do the runtime call to execute the regexp.
1656   __ bind(&runtime);
1657   __ TailCallRuntime(Runtime::kRegExpExec);
1658 
1659   // Deferred code for string handling.
1660   // (5) Long external string? If not, go to (7).
1661   __ bind(&not_seq_nor_cons);
1662   // Compare flags are still set.
1663   __ bgt(&not_long_external, Label::kNear);  // Go to (7).
1664 
1665   // (6) External string.  Make it, offset-wise, look like a sequential string.
1666   __ bind(&external_string);
1667   __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset));
1668   __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
1669   if (FLAG_debug_code) {
1670     // Assert that we do not have a cons or slice (indirect strings) here.
1671     // Sequential strings have already been ruled out.
1672     STATIC_ASSERT(kIsIndirectStringMask == 1);
1673     __ tmll(r2, Operand(kIsIndirectStringMask));
1674     __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
1675   }
1676   __ LoadP(subject,
1677            FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1678   // Move the pointer so that offset-wise, it looks like a sequential string.
1679   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1680   __ SubP(subject, subject,
1681           Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
1682   __ b(&seq_string);  // Go to (4).
1683 
1684   // (7) Short external string or not a string?  If yes, bail out to runtime.
1685   __ bind(&not_long_external);
1686   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
1687   __ mov(r0, Operand(kIsNotStringMask | kShortExternalStringMask));
1688   __ AndP(r0, r3);
1689   __ bne(&runtime);
1690 
1691   // (8) Sliced string.  Replace subject with parent.  Go to (4).
1692   // Load offset into ip and replace subject string with parent.
1693   __ LoadP(ip, FieldMemOperand(subject, SlicedString::kOffsetOffset));
1694   __ SmiUntag(ip);
1695   __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
1696   __ b(&check_underlying);  // Go to (4).
1697 #endif  // V8_INTERPRETED_REGEXP
1698 }
1699 
CallStubInRecordCallTarget(MacroAssembler * masm,CodeStub * stub)1700 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
1701   // r2 : number of arguments to the construct function
1702   // r3 : the function to call
1703   // r4 : feedback vector
1704   // r5 : slot in feedback vector (Smi)
1705   FrameScope scope(masm, StackFrame::INTERNAL);
1706 
1707   // Number-of-arguments register must be smi-tagged to call out.
1708   __ SmiTag(r2);
1709   __ Push(r5, r4, r3, r2);
1710   __ Push(cp);
1711 
1712   __ CallStub(stub);
1713 
1714   __ Pop(cp);
1715   __ Pop(r5, r4, r3, r2);
1716   __ SmiUntag(r2);
1717 }
1718 
GenerateRecordCallTarget(MacroAssembler * masm)1719 static void GenerateRecordCallTarget(MacroAssembler* masm) {
1720   // Cache the called function in a feedback vector slot.  Cache states
1721   // are uninitialized, monomorphic (indicated by a JSFunction), and
1722   // megamorphic.
1723   // r2 : number of arguments to the construct function
1724   // r3 : the function to call
1725   // r4 : feedback vector
1726   // r5 : slot in feedback vector (Smi)
1727   Label initialize, done, miss, megamorphic, not_array_function;
1728 
1729   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
1730             masm->isolate()->heap()->megamorphic_symbol());
1731   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
1732             masm->isolate()->heap()->uninitialized_symbol());
1733 
1734   const int count_offset = FixedArray::kHeaderSize + kPointerSize;
1735 
1736   // Load the cache state into r7.
1737   __ SmiToPtrArrayOffset(r7, r5);
1738   __ AddP(r7, r4, r7);
1739   __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
1740 
1741   // A monomorphic cache hit or an already megamorphic state: invoke the
1742   // function without changing the state.
1743   // We don't know if r7 is a WeakCell or a Symbol, but it's harmless to read at
1744   // this position in a symbol (see static asserts in type-feedback-vector.h).
1745   Label check_allocation_site;
1746   Register feedback_map = r8;
1747   Register weak_value = r9;
1748   __ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset));
1749   __ CmpP(r3, weak_value);
1750   __ beq(&done, Label::kNear);
1751   __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
1752   __ beq(&done, Label::kNear);
1753   __ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset));
1754   __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
1755   __ bne(&check_allocation_site);
1756 
1757   // If the weak cell is cleared, we have a new chance to become monomorphic.
1758   __ JumpIfSmi(weak_value, &initialize);
1759   __ b(&megamorphic);
1760 
1761   __ bind(&check_allocation_site);
1762   // If we came here, we need to see if we are the array function.
1763   // If we didn't have a matching function, and we didn't find the megamorph
1764   // sentinel, then we have in the slot either some other function or an
1765   // AllocationSite.
1766   __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
1767   __ bne(&miss);
1768 
1769   // Make sure the function is the Array() function
1770   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
1771   __ CmpP(r3, r7);
1772   __ bne(&megamorphic);
1773   __ b(&done, Label::kNear);
1774 
1775   __ bind(&miss);
1776 
1777   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1778   // megamorphic.
1779   __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
1780   __ beq(&initialize);
1781   // MegamorphicSentinel is an immortal immovable object (undefined) so no
1782   // write-barrier is needed.
1783   __ bind(&megamorphic);
1784   __ SmiToPtrArrayOffset(r7, r5);
1785   __ AddP(r7, r4, r7);
1786   __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
1787   __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
1788   __ jmp(&done);
1789 
1790   // An uninitialized cache is patched with the function
1791   __ bind(&initialize);
1792 
1793   // Make sure the function is the Array() function.
1794   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
1795   __ CmpP(r3, r7);
1796   __ bne(&not_array_function);
1797 
1798   // The target function is the Array constructor,
1799   // Create an AllocationSite if we don't already have it, store it in the
1800   // slot.
1801   CreateAllocationSiteStub create_stub(masm->isolate());
1802   CallStubInRecordCallTarget(masm, &create_stub);
1803   __ b(&done, Label::kNear);
1804 
1805   __ bind(&not_array_function);
1806 
1807   CreateWeakCellStub weak_cell_stub(masm->isolate());
1808   CallStubInRecordCallTarget(masm, &weak_cell_stub);
1809 
1810   __ bind(&done);
1811 
1812   // Increment the call count for all function calls.
1813   __ SmiToPtrArrayOffset(r7, r5);
1814   __ AddP(r7, r4, r7);
1815 
1816   __ LoadP(r6, FieldMemOperand(r7, count_offset));
1817   __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
1818   __ StoreP(r6, FieldMemOperand(r7, count_offset), r0);
1819 }
1820 
Generate(MacroAssembler * masm)1821 void CallConstructStub::Generate(MacroAssembler* masm) {
1822   // r2 : number of arguments
1823   // r3 : the function to call
1824   // r4 : feedback vector
1825   // r5 : slot in feedback vector (Smi, for RecordCallTarget)
1826 
1827   Label non_function;
1828   // Check that the function is not a smi.
1829   __ JumpIfSmi(r3, &non_function);
1830   // Check that the function is a JSFunction.
1831   __ CompareObjectType(r3, r7, r7, JS_FUNCTION_TYPE);
1832   __ bne(&non_function);
1833 
1834   GenerateRecordCallTarget(masm);
1835 
1836   __ SmiToPtrArrayOffset(r7, r5);
1837   __ AddP(r7, r4, r7);
1838   // Put the AllocationSite from the feedback vector into r4, or undefined.
1839   __ LoadP(r4, FieldMemOperand(r7, FixedArray::kHeaderSize));
1840   __ LoadP(r7, FieldMemOperand(r4, AllocationSite::kMapOffset));
1841   __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
1842   Label feedback_register_initialized;
1843   __ beq(&feedback_register_initialized);
1844   __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
1845   __ bind(&feedback_register_initialized);
1846 
1847   __ AssertUndefinedOrAllocationSite(r4, r7);
1848 
1849   // Pass function as new target.
1850   __ LoadRR(r5, r3);
1851 
1852   // Tail call to the function-specific construct stub (still in the caller
1853   // context at this point).
1854   __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
1855   __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
1856   __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
1857   __ JumpToJSEntry(ip);
1858 
1859   __ bind(&non_function);
1860   __ LoadRR(r5, r3);
1861   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1862 }
1863 
1864 // Note: feedback_vector and slot are clobbered after the call.
IncrementCallCount(MacroAssembler * masm,Register feedback_vector,Register slot,Register temp)1865 static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
1866                                Register slot, Register temp) {
1867   const int count_offset = FixedArray::kHeaderSize + kPointerSize;
1868   __ SmiToPtrArrayOffset(temp, slot);
1869   __ AddP(feedback_vector, feedback_vector, temp);
1870   __ LoadP(slot, FieldMemOperand(feedback_vector, count_offset));
1871   __ AddSmiLiteral(slot, slot, Smi::FromInt(1), temp);
1872   __ StoreP(slot, FieldMemOperand(feedback_vector, count_offset), temp);
1873 }
1874 
HandleArrayCase(MacroAssembler * masm,Label * miss)1875 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
1876   // r2 - number of arguments
1877   // r3 - function
1878   // r5 - slot id
1879   // r4 - vector
1880   // r6 - allocation site (loaded from vector[slot])
1881   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
1882   __ CmpP(r3, r7);
1883   __ bne(miss);
1884 
1885   // Increment the call count for monomorphic function calls.
1886   IncrementCallCount(masm, r4, r5, r1);
1887 
1888   __ LoadRR(r4, r6);
1889   __ LoadRR(r5, r3);
1890   ArrayConstructorStub stub(masm->isolate());
1891   __ TailCallStub(&stub);
1892 }
1893 
Generate(MacroAssembler * masm)1894 void CallICStub::Generate(MacroAssembler* masm) {
1895   // r2 - number of arguments
1896   // r3 - function
1897   // r5 - slot id (Smi)
1898   // r4 - vector
1899   Label extra_checks_or_miss, call, call_function, call_count_incremented;
1900 
1901   // The checks. First, does r3 match the recorded monomorphic target?
1902   __ SmiToPtrArrayOffset(r8, r5);
1903   __ AddP(r8, r4, r8);
1904   __ LoadP(r6, FieldMemOperand(r8, FixedArray::kHeaderSize));
1905 
1906   // We don't know that we have a weak cell. We might have a private symbol
1907   // or an AllocationSite, but the memory is safe to examine.
1908   // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
1909   // FixedArray.
1910   // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
1911   // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
1912   // computed, meaning that it can't appear to be a pointer. If the low bit is
1913   // 0, then hash is computed, but the 0 bit prevents the field from appearing
1914   // to be a pointer.
1915   STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
1916   STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
1917                     WeakCell::kValueOffset &&
1918                 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
1919 
1920   __ LoadP(r7, FieldMemOperand(r6, WeakCell::kValueOffset));
1921   __ CmpP(r3, r7);
1922   __ bne(&extra_checks_or_miss, Label::kNear);
1923 
1924   // The compare above could have been a SMI/SMI comparison. Guard against this
1925   // convincing us that we have a monomorphic JSFunction.
1926   __ JumpIfSmi(r3, &extra_checks_or_miss);
1927 
1928   __ bind(&call_function);
1929 
1930   // Increment the call count for monomorphic function calls.
1931   IncrementCallCount(masm, r4, r5, r1);
1932 
1933   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
1934                                                     tail_call_mode()),
1935           RelocInfo::CODE_TARGET);
1936 
1937   __ bind(&extra_checks_or_miss);
1938   Label uninitialized, miss, not_allocation_site;
1939 
1940   __ CompareRoot(r6, Heap::kmegamorphic_symbolRootIndex);
1941   __ beq(&call);
1942 
1943   // Verify that r6 contains an AllocationSite
1944   __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
1945   __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
1946   __ bne(&not_allocation_site);
1947 
1948   // We have an allocation site.
1949   HandleArrayCase(masm, &miss);
1950 
1951   __ bind(&not_allocation_site);
1952 
1953   // The following cases attempt to handle MISS cases without going to the
1954   // runtime.
1955   if (FLAG_trace_ic) {
1956     __ b(&miss);
1957   }
1958 
1959   __ CompareRoot(r6, Heap::kuninitialized_symbolRootIndex);
1960   __ beq(&uninitialized);
1961 
1962   // We are going megamorphic. If the feedback is a JSFunction, it is fine
1963   // to handle it here. More complex cases are dealt with in the runtime.
1964   __ AssertNotSmi(r6);
1965   __ CompareObjectType(r6, r7, r7, JS_FUNCTION_TYPE);
1966   __ bne(&miss);
1967   __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
1968   __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
1969 
1970   __ bind(&call);
1971 
1972   // Increment the call count for megamorphic function calls.
1973   IncrementCallCount(masm, r4, r5, r1);
1974 
1975   __ bind(&call_count_incremented);
1976   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
1977           RelocInfo::CODE_TARGET);
1978 
1979   __ bind(&uninitialized);
1980 
1981   // We are going monomorphic, provided we actually have a JSFunction.
1982   __ JumpIfSmi(r3, &miss);
1983 
1984   // Goto miss case if we do not have a function.
1985   __ CompareObjectType(r3, r6, r6, JS_FUNCTION_TYPE);
1986   __ bne(&miss);
1987 
1988   // Make sure the function is not the Array() function, which requires special
1989   // behavior on MISS.
1990   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r6);
1991   __ CmpP(r3, r6);
1992   __ beq(&miss);
1993 
1994   // Make sure the function belongs to the same native context.
1995   __ LoadP(r6, FieldMemOperand(r3, JSFunction::kContextOffset));
1996   __ LoadP(r6, ContextMemOperand(r6, Context::NATIVE_CONTEXT_INDEX));
1997   __ LoadP(ip, NativeContextMemOperand());
1998   __ CmpP(r6, ip);
1999   __ bne(&miss);
2000 
2001   // Store the function. Use a stub since we need a frame for allocation.
2002   // r4 - vector
2003   // r5 - slot
2004   // r3 - function
2005   {
2006     FrameScope scope(masm, StackFrame::INTERNAL);
2007     CreateWeakCellStub create_stub(masm->isolate());
2008     __ SmiTag(r2);
2009     __ Push(r2, r4, r5, cp, r3);
2010     __ CallStub(&create_stub);
2011     __ Pop(r4, r5, cp, r3);
2012     __ Pop(r2);
2013     __ SmiUntag(r2);
2014   }
2015 
2016   __ b(&call_function);
2017 
2018   // We are here because tracing is on or we encountered a MISS case we can't
2019   // handle here.
2020   __ bind(&miss);
2021   GenerateMiss(masm);
2022 
2023   __ b(&call_count_incremented);
2024 }
2025 
GenerateMiss(MacroAssembler * masm)2026 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2027   FrameScope scope(masm, StackFrame::INTERNAL);
2028 
2029   // Preserve the number of arguments as Smi.
2030   __ SmiTag(r2);
2031 
2032   // Push the receiver and the function and feedback info.
2033   __ Push(r2, r3, r4, r5);
2034 
2035   // Call the entry.
2036   __ CallRuntime(Runtime::kCallIC_Miss);
2037 
2038   // Move result to r3 and exit the internal frame.
2039   __ LoadRR(r3, r2);
2040 
2041   // Restore number of arguments.
2042   __ Pop(r2);
2043   __ SmiUntag(r2);
2044 }
2045 
2046 // StringCharCodeAtGenerator
GenerateFast(MacroAssembler * masm)2047 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2048   // If the receiver is a smi trigger the non-string case.
2049   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2050     __ JumpIfSmi(object_, receiver_not_string_);
2051 
2052     // Fetch the instance type of the receiver into result register.
2053     __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2054     __ LoadlB(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2055     // If the receiver is not a string trigger the non-string case.
2056     __ mov(r0, Operand(kIsNotStringMask));
2057     __ AndP(r0, result_);
2058     __ bne(receiver_not_string_);
2059   }
2060 
2061   // If the index is non-smi trigger the non-smi case.
2062   __ JumpIfNotSmi(index_, &index_not_smi_);
2063   __ bind(&got_smi_index_);
2064 
2065   // Check for index out of range.
2066   __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset));
2067   __ CmpLogicalP(ip, index_);
2068   __ ble(index_out_of_range_);
2069 
2070   __ SmiUntag(index_);
2071 
2072   StringCharLoadGenerator::Generate(masm, object_, index_, result_,
2073                                     &call_runtime_);
2074 
2075   __ SmiTag(result_);
2076   __ bind(&exit_);
2077 }
2078 
GenerateSlow(MacroAssembler * masm,EmbedMode embed_mode,const RuntimeCallHelper & call_helper)2079 void StringCharCodeAtGenerator::GenerateSlow(
2080     MacroAssembler* masm, EmbedMode embed_mode,
2081     const RuntimeCallHelper& call_helper) {
2082   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2083 
2084   // Index is not a smi.
2085   __ bind(&index_not_smi_);
2086   // If index is a heap number, try converting it to an integer.
2087   __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
2088               DONT_DO_SMI_CHECK);
2089   call_helper.BeforeCall(masm);
2090   if (embed_mode == PART_OF_IC_HANDLER) {
2091     __ Push(LoadWithVectorDescriptor::VectorRegister(),
2092             LoadWithVectorDescriptor::SlotRegister(), object_, index_);
2093   } else {
2094     // index_ is consumed by runtime conversion function.
2095     __ Push(object_, index_);
2096   }
2097   __ CallRuntime(Runtime::kNumberToSmi);
2098   // Save the conversion result before the pop instructions below
2099   // have a chance to overwrite it.
2100   __ Move(index_, r2);
2101   if (embed_mode == PART_OF_IC_HANDLER) {
2102     __ Pop(LoadWithVectorDescriptor::VectorRegister(),
2103            LoadWithVectorDescriptor::SlotRegister(), object_);
2104   } else {
2105     __ pop(object_);
2106   }
2107   // Reload the instance type.
2108   __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2109   __ LoadlB(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2110   call_helper.AfterCall(masm);
2111   // If index is still not a smi, it must be out of range.
2112   __ JumpIfNotSmi(index_, index_out_of_range_);
2113   // Otherwise, return to the fast path.
2114   __ b(&got_smi_index_);
2115 
2116   // Call runtime. We get here when the receiver is a string and the
2117   // index is a number, but the code of getting the actual character
2118   // is too complex (e.g., when the string needs to be flattened).
2119   __ bind(&call_runtime_);
2120   call_helper.BeforeCall(masm);
2121   __ SmiTag(index_);
2122   __ Push(object_, index_);
2123   __ CallRuntime(Runtime::kStringCharCodeAtRT);
2124   __ Move(result_, r2);
2125   call_helper.AfterCall(masm);
2126   __ b(&exit_);
2127 
2128   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2129 }
2130 
2131 // -------------------------------------------------------------------------
2132 // StringCharFromCodeGenerator
2133 
GenerateFast(MacroAssembler * masm)2134 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2135   // Fast case of Heap::LookupSingleCharacterStringFromCode.
2136   DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
2137   __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU));
2138   __ OrP(r0, r0, Operand(kSmiTagMask));
2139   __ AndP(r0, code_, r0);
2140   __ bne(&slow_case_);
2141 
2142   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2143   // At this point code register contains smi tagged one-byte char code.
2144   __ LoadRR(r0, code_);
2145   __ SmiToPtrArrayOffset(code_, code_);
2146   __ AddP(result_, code_);
2147   __ LoadRR(code_, r0);
2148   __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2149   __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
2150   __ beq(&slow_case_);
2151   __ bind(&exit_);
2152 }
2153 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)2154 void StringCharFromCodeGenerator::GenerateSlow(
2155     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
2156   __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2157 
2158   __ bind(&slow_case_);
2159   call_helper.BeforeCall(masm);
2160   __ push(code_);
2161   __ CallRuntime(Runtime::kStringCharFromCode);
2162   __ Move(result_, r2);
2163   call_helper.AfterCall(masm);
2164   __ b(&exit_);
2165 
2166   __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2167 }
2168 
2169 enum CopyCharactersFlags { COPY_ASCII = 1, DEST_ALWAYS_ALIGNED = 2 };
2170 
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,String::Encoding encoding)2171 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
2172                                           Register src, Register count,
2173                                           Register scratch,
2174                                           String::Encoding encoding) {
2175   if (FLAG_debug_code) {
2176     // Check that destination is word aligned.
2177     __ mov(r0, Operand(kPointerAlignmentMask));
2178     __ AndP(r0, dest);
2179     __ Check(eq, kDestinationOfCopyNotAligned, cr0);
2180   }
2181 
2182   // Nothing to do for zero characters.
2183   Label done;
2184   if (encoding == String::TWO_BYTE_ENCODING) {
2185     // double the length
2186     __ AddP(count, count, count);
2187     __ beq(&done, Label::kNear);
2188   } else {
2189     __ CmpP(count, Operand::Zero());
2190     __ beq(&done, Label::kNear);
2191   }
2192 
2193   // Copy count bytes from src to dst.
2194   Label byte_loop;
2195   // TODO(joransiu): Convert into MVC loop
2196   __ bind(&byte_loop);
2197   __ LoadlB(scratch, MemOperand(src));
2198   __ la(src, MemOperand(src, 1));
2199   __ stc(scratch, MemOperand(dest));
2200   __ la(dest, MemOperand(dest, 1));
2201   __ BranchOnCount(count, &byte_loop);
2202 
2203   __ bind(&done);
2204 }
2205 
2206 
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2)2207 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
2208                                                    Register left,
2209                                                    Register right,
2210                                                    Register scratch1,
2211                                                    Register scratch2) {
2212   Register length = scratch1;
2213 
2214   // Compare lengths.
2215   Label strings_not_equal, check_zero_length;
2216   __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
2217   __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
2218   __ CmpP(length, scratch2);
2219   __ beq(&check_zero_length);
2220   __ bind(&strings_not_equal);
2221   __ LoadSmiLiteral(r2, Smi::FromInt(NOT_EQUAL));
2222   __ Ret();
2223 
2224   // Check if the length is zero.
2225   Label compare_chars;
2226   __ bind(&check_zero_length);
2227   STATIC_ASSERT(kSmiTag == 0);
2228   __ CmpP(length, Operand::Zero());
2229   __ bne(&compare_chars);
2230   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
2231   __ Ret();
2232 
2233   // Compare characters.
2234   __ bind(&compare_chars);
2235   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
2236                                   &strings_not_equal);
2237 
2238   // Characters are equal.
2239   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
2240   __ Ret();
2241 }
2242 
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)2243 void StringHelper::GenerateCompareFlatOneByteStrings(
2244     MacroAssembler* masm, Register left, Register right, Register scratch1,
2245     Register scratch2, Register scratch3) {
2246   Label skip, result_not_equal, compare_lengths;
2247   // Find minimum length and length difference.
2248   __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
2249   __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
2250   __ SubP(scratch3, scratch1, scratch2 /*, LeaveOE, SetRC*/);
2251   // Removing RC looks okay here.
2252   Register length_delta = scratch3;
2253   __ ble(&skip, Label::kNear);
2254   __ LoadRR(scratch1, scratch2);
2255   __ bind(&skip);
2256   Register min_length = scratch1;
2257   STATIC_ASSERT(kSmiTag == 0);
2258   __ CmpP(min_length, Operand::Zero());
2259   __ beq(&compare_lengths);
2260 
2261   // Compare loop.
2262   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
2263                                   &result_not_equal);
2264 
2265   // Compare lengths - strings up to min-length are equal.
2266   __ bind(&compare_lengths);
2267   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
2268   // Use length_delta as result if it's zero.
2269   __ LoadRR(r2, length_delta);
2270   __ CmpP(length_delta, Operand::Zero());
2271   __ bind(&result_not_equal);
2272   // Conditionally update the result based either on length_delta or
2273   // the last comparion performed in the loop above.
2274   Label less_equal, equal;
2275   __ ble(&less_equal);
2276   __ LoadSmiLiteral(r2, Smi::FromInt(GREATER));
2277   __ Ret();
2278   __ bind(&less_equal);
2279   __ beq(&equal);
2280   __ LoadSmiLiteral(r2, Smi::FromInt(LESS));
2281   __ bind(&equal);
2282   __ Ret();
2283 }
2284 
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Label * chars_not_equal)2285 void StringHelper::GenerateOneByteCharsCompareLoop(
2286     MacroAssembler* masm, Register left, Register right, Register length,
2287     Register scratch1, Label* chars_not_equal) {
2288   // Change index to run from -length to -1 by adding length to string
2289   // start. This means that loop ends when index reaches zero, which
2290   // doesn't need an additional compare.
2291   __ SmiUntag(length);
2292   __ AddP(scratch1, length,
2293           Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2294   __ AddP(left, scratch1);
2295   __ AddP(right, scratch1);
2296   __ LoadComplementRR(length, length);
2297   Register index = length;  // index = -length;
2298 
2299   // Compare loop.
2300   Label loop;
2301   __ bind(&loop);
2302   __ LoadlB(scratch1, MemOperand(left, index));
2303   __ LoadlB(r0, MemOperand(right, index));
2304   __ CmpP(scratch1, r0);
2305   __ bne(chars_not_equal);
2306   __ AddP(index, Operand(1));
2307   __ CmpP(index, Operand::Zero());
2308   __ bne(&loop);
2309 }
2310 
Generate(MacroAssembler * masm)2311 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
2312   // ----------- S t a t e -------------
2313   //  -- r3    : left
2314   //  -- r2    : right
2315   // r3: second string
2316   // -----------------------------------
2317 
2318   // Load r4 with the allocation site.  We stick an undefined dummy value here
2319   // and replace it with the real allocation site later when we instantiate this
2320   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
2321   __ Move(r4, isolate()->factory()->undefined_value());
2322 
2323   // Make sure that we actually patched the allocation site.
2324   if (FLAG_debug_code) {
2325     __ TestIfSmi(r4);
2326     __ Assert(ne, kExpectedAllocationSite, cr0);
2327     __ push(r4);
2328     __ LoadP(r4, FieldMemOperand(r4, HeapObject::kMapOffset));
2329     __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
2330     __ pop(r4);
2331     __ Assert(eq, kExpectedAllocationSite);
2332   }
2333 
2334   // Tail call into the stub that handles binary operations with allocation
2335   // sites.
2336   BinaryOpWithAllocationSiteStub stub(isolate(), state());
2337   __ TailCallStub(&stub);
2338 }
2339 
GenerateBooleans(MacroAssembler * masm)2340 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
2341   DCHECK_EQ(CompareICState::BOOLEAN, state());
2342   Label miss;
2343 
2344   __ CheckMap(r3, r4, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2345   __ CheckMap(r2, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2346   if (!Token::IsEqualityOp(op())) {
2347     __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
2348     __ AssertSmi(r3);
2349     __ LoadP(r2, FieldMemOperand(r2, Oddball::kToNumberOffset));
2350     __ AssertSmi(r2);
2351   }
2352   __ SubP(r2, r3, r2);
2353   __ Ret();
2354 
2355   __ bind(&miss);
2356   GenerateMiss(masm);
2357 }
2358 
GenerateSmis(MacroAssembler * masm)2359 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
2360   DCHECK(state() == CompareICState::SMI);
2361   Label miss;
2362   __ OrP(r4, r3, r2);
2363   __ JumpIfNotSmi(r4, &miss);
2364 
2365   if (GetCondition() == eq) {
2366     // For equality we do not care about the sign of the result.
2367     // __ sub(r2, r2, r3, SetCC);
2368     __ SubP(r2, r2, r3);
2369   } else {
2370     // Untag before subtracting to avoid handling overflow.
2371     __ SmiUntag(r3);
2372     __ SmiUntag(r2);
2373     __ SubP(r2, r3, r2);
2374   }
2375   __ Ret();
2376 
2377   __ bind(&miss);
2378   GenerateMiss(masm);
2379 }
2380 
GenerateNumbers(MacroAssembler * masm)2381 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
2382   DCHECK(state() == CompareICState::NUMBER);
2383 
2384   Label generic_stub;
2385   Label unordered, maybe_undefined1, maybe_undefined2;
2386   Label miss;
2387   Label equal, less_than;
2388 
2389   if (left() == CompareICState::SMI) {
2390     __ JumpIfNotSmi(r3, &miss);
2391   }
2392   if (right() == CompareICState::SMI) {
2393     __ JumpIfNotSmi(r2, &miss);
2394   }
2395 
2396   // Inlining the double comparison and falling back to the general compare
2397   // stub if NaN is involved.
2398   // Load left and right operand.
2399   Label done, left, left_smi, right_smi;
2400   __ JumpIfSmi(r2, &right_smi);
2401   __ CheckMap(r2, r4, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
2402               DONT_DO_SMI_CHECK);
2403   __ LoadDouble(d1, FieldMemOperand(r2, HeapNumber::kValueOffset));
2404   __ b(&left);
2405   __ bind(&right_smi);
2406   __ SmiToDouble(d1, r2);
2407 
2408   __ bind(&left);
2409   __ JumpIfSmi(r3, &left_smi);
2410   __ CheckMap(r3, r4, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
2411               DONT_DO_SMI_CHECK);
2412   __ LoadDouble(d0, FieldMemOperand(r3, HeapNumber::kValueOffset));
2413   __ b(&done);
2414   __ bind(&left_smi);
2415   __ SmiToDouble(d0, r3);
2416 
2417   __ bind(&done);
2418 
2419   // Compare operands
2420   __ cdbr(d0, d1);
2421 
2422   // Don't base result on status bits when a NaN is involved.
2423   __ bunordered(&unordered);
2424 
2425   // Return a result of -1, 0, or 1, based on status bits.
2426   __ beq(&equal);
2427   __ blt(&less_than);
2428   //  assume greater than
2429   __ LoadImmP(r2, Operand(GREATER));
2430   __ Ret();
2431   __ bind(&equal);
2432   __ LoadImmP(r2, Operand(EQUAL));
2433   __ Ret();
2434   __ bind(&less_than);
2435   __ LoadImmP(r2, Operand(LESS));
2436   __ Ret();
2437 
2438   __ bind(&unordered);
2439   __ bind(&generic_stub);
2440   CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
2441                      CompareICState::GENERIC, CompareICState::GENERIC);
2442   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2443 
2444   __ bind(&maybe_undefined1);
2445   if (Token::IsOrderedRelationalCompareOp(op())) {
2446     __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
2447     __ bne(&miss);
2448     __ JumpIfSmi(r3, &unordered);
2449     __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
2450     __ bne(&maybe_undefined2);
2451     __ b(&unordered);
2452   }
2453 
2454   __ bind(&maybe_undefined2);
2455   if (Token::IsOrderedRelationalCompareOp(op())) {
2456     __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
2457     __ beq(&unordered);
2458   }
2459 
2460   __ bind(&miss);
2461   GenerateMiss(masm);
2462 }
2463 
GenerateInternalizedStrings(MacroAssembler * masm)2464 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2465   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2466   Label miss, not_equal;
2467 
2468   // Registers containing left and right operands respectively.
2469   Register left = r3;
2470   Register right = r2;
2471   Register tmp1 = r4;
2472   Register tmp2 = r5;
2473 
2474   // Check that both operands are heap objects.
2475   __ JumpIfEitherSmi(left, right, &miss);
2476 
2477   // Check that both operands are symbols.
2478   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2479   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2480   __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2481   __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2482   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2483   __ OrP(tmp1, tmp1, tmp2);
2484   __ AndP(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2485   __ bne(&miss);
2486 
2487   // Internalized strings are compared by identity.
2488   __ CmpP(left, right);
2489   __ bne(&not_equal);
2490   // Make sure r2 is non-zero. At this point input operands are
2491   // guaranteed to be non-zero.
2492   DCHECK(right.is(r2));
2493   STATIC_ASSERT(EQUAL == 0);
2494   STATIC_ASSERT(kSmiTag == 0);
2495   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
2496   __ bind(&not_equal);
2497   __ Ret();
2498 
2499   __ bind(&miss);
2500   GenerateMiss(masm);
2501 }
2502 
GenerateUniqueNames(MacroAssembler * masm)2503 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2504   DCHECK(state() == CompareICState::UNIQUE_NAME);
2505   DCHECK(GetCondition() == eq);
2506   Label miss;
2507 
2508   // Registers containing left and right operands respectively.
2509   Register left = r3;
2510   Register right = r2;
2511   Register tmp1 = r4;
2512   Register tmp2 = r5;
2513 
2514   // Check that both operands are heap objects.
2515   __ JumpIfEitherSmi(left, right, &miss);
2516 
2517   // Check that both operands are unique names. This leaves the instance
2518   // types loaded in tmp1 and tmp2.
2519   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2520   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2521   __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2522   __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2523 
2524   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
2525   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
2526 
2527   // Unique names are compared by identity.
2528   __ CmpP(left, right);
2529   __ bne(&miss);
2530   // Make sure r2 is non-zero. At this point input operands are
2531   // guaranteed to be non-zero.
2532   DCHECK(right.is(r2));
2533   STATIC_ASSERT(EQUAL == 0);
2534   STATIC_ASSERT(kSmiTag == 0);
2535   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
2536   __ Ret();
2537 
2538   __ bind(&miss);
2539   GenerateMiss(masm);
2540 }
2541 
GenerateStrings(MacroAssembler * masm)2542 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2543   DCHECK(state() == CompareICState::STRING);
2544   Label miss, not_identical, is_symbol;
2545 
2546   bool equality = Token::IsEqualityOp(op());
2547 
2548   // Registers containing left and right operands respectively.
2549   Register left = r3;
2550   Register right = r2;
2551   Register tmp1 = r4;
2552   Register tmp2 = r5;
2553   Register tmp3 = r6;
2554   Register tmp4 = r7;
2555 
2556   // Check that both operands are heap objects.
2557   __ JumpIfEitherSmi(left, right, &miss);
2558 
2559   // Check that both operands are strings. This leaves the instance
2560   // types loaded in tmp1 and tmp2.
2561   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2562   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2563   __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2564   __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2565   STATIC_ASSERT(kNotStringTag != 0);
2566   __ OrP(tmp3, tmp1, tmp2);
2567   __ AndP(r0, tmp3, Operand(kIsNotStringMask));
2568   __ bne(&miss);
2569 
2570   // Fast check for identical strings.
2571   __ CmpP(left, right);
2572   STATIC_ASSERT(EQUAL == 0);
2573   STATIC_ASSERT(kSmiTag == 0);
2574   __ bne(&not_identical);
2575   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
2576   __ Ret();
2577   __ bind(&not_identical);
2578 
2579   // Handle not identical strings.
2580 
2581   // Check that both strings are internalized strings. If they are, we're done
2582   // because we already know they are not identical. We know they are both
2583   // strings.
2584   if (equality) {
2585     DCHECK(GetCondition() == eq);
2586     STATIC_ASSERT(kInternalizedTag == 0);
2587     __ OrP(tmp3, tmp1, tmp2);
2588     __ AndP(r0, tmp3, Operand(kIsNotInternalizedMask));
2589     __ bne(&is_symbol);
2590     // Make sure r2 is non-zero. At this point input operands are
2591     // guaranteed to be non-zero.
2592     DCHECK(right.is(r2));
2593     __ Ret();
2594     __ bind(&is_symbol);
2595   }
2596 
2597   // Check that both strings are sequential one-byte.
2598   Label runtime;
2599   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
2600                                                     &runtime);
2601 
2602   // Compare flat one-byte strings. Returns when done.
2603   if (equality) {
2604     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
2605                                                   tmp2);
2606   } else {
2607     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
2608                                                     tmp2, tmp3);
2609   }
2610 
2611   // Handle more complex cases in runtime.
2612   __ bind(&runtime);
2613   if (equality) {
2614     {
2615       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2616       __ Push(left, right);
2617       __ CallRuntime(Runtime::kStringEqual);
2618     }
2619     __ LoadRoot(r3, Heap::kTrueValueRootIndex);
2620     __ SubP(r2, r2, r3);
2621     __ Ret();
2622   } else {
2623     __ Push(left, right);
2624     __ TailCallRuntime(Runtime::kStringCompare);
2625   }
2626 
2627   __ bind(&miss);
2628   GenerateMiss(masm);
2629 }
2630 
GenerateReceivers(MacroAssembler * masm)2631 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
2632   DCHECK_EQ(CompareICState::RECEIVER, state());
2633   Label miss;
2634   __ AndP(r4, r3, r2);
2635   __ JumpIfSmi(r4, &miss);
2636 
2637   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2638   __ CompareObjectType(r2, r4, r4, FIRST_JS_RECEIVER_TYPE);
2639   __ blt(&miss);
2640   __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
2641   __ blt(&miss);
2642 
2643   DCHECK(GetCondition() == eq);
2644   __ SubP(r2, r2, r3);
2645   __ Ret();
2646 
2647   __ bind(&miss);
2648   GenerateMiss(masm);
2649 }
2650 
GenerateKnownReceivers(MacroAssembler * masm)2651 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
2652   Label miss;
2653   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
2654   __ AndP(r4, r3, r2);
2655   __ JumpIfSmi(r4, &miss);
2656   __ GetWeakValue(r6, cell);
2657   __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
2658   __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
2659   __ CmpP(r4, r6);
2660   __ bne(&miss);
2661   __ CmpP(r5, r6);
2662   __ bne(&miss);
2663 
2664   if (Token::IsEqualityOp(op())) {
2665     __ SubP(r2, r2, r3);
2666     __ Ret();
2667   } else {
2668     if (op() == Token::LT || op() == Token::LTE) {
2669       __ LoadSmiLiteral(r4, Smi::FromInt(GREATER));
2670     } else {
2671       __ LoadSmiLiteral(r4, Smi::FromInt(LESS));
2672     }
2673     __ Push(r3, r2, r4);
2674     __ TailCallRuntime(Runtime::kCompare);
2675   }
2676 
2677   __ bind(&miss);
2678   GenerateMiss(masm);
2679 }
2680 
GenerateMiss(MacroAssembler * masm)2681 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
2682   {
2683     // Call the runtime system in a fresh internal frame.
2684     FrameScope scope(masm, StackFrame::INTERNAL);
2685     __ Push(r3, r2);
2686     __ Push(r3, r2);
2687     __ LoadSmiLiteral(r0, Smi::FromInt(op()));
2688     __ push(r0);
2689     __ CallRuntime(Runtime::kCompareIC_Miss);
2690     // Compute the entry point of the rewritten stub.
2691     __ AddP(r4, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
2692     // Restore registers.
2693     __ Pop(r3, r2);
2694   }
2695 
2696   __ JumpToJSEntry(r4);
2697 }
2698 
2699 // This stub is paired with DirectCEntryStub::GenerateCall
Generate(MacroAssembler * masm)2700 void DirectCEntryStub::Generate(MacroAssembler* masm) {
2701   __ CleanseP(r14);
2702 
2703   __ b(ip);  // Callee will return to R14 directly
2704 }
2705 
GenerateCall(MacroAssembler * masm,Register target)2706 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
2707 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
2708   // Native AIX/S390X Linux use a function descriptor.
2709   __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
2710   __ LoadP(target, MemOperand(target, 0));  // Instruction address
2711 #else
2712   // ip needs to be set for DirectCEentryStub::Generate, and also
2713   // for ABI_CALL_VIA_IP.
2714   __ Move(ip, target);
2715 #endif
2716 
2717   __ call(GetCode(), RelocInfo::CODE_TARGET);  // Call the stub.
2718 }
2719 
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<Name> name,Register scratch0)2720 void NameDictionaryLookupStub::GenerateNegativeLookup(
2721     MacroAssembler* masm, Label* miss, Label* done, Register receiver,
2722     Register properties, Handle<Name> name, Register scratch0) {
2723   DCHECK(name->IsUniqueName());
2724   // If names of slots in range from 1 to kProbes - 1 for the hash value are
2725   // not equal to the name and kProbes-th slot is not used (its name is the
2726   // undefined value), it guarantees the hash table doesn't contain the
2727   // property. It's true even if some slots represent deleted properties
2728   // (their names are the hole value).
2729   for (int i = 0; i < kInlinedProbes; i++) {
2730     // scratch0 points to properties hash.
2731     // Compute the masked index: (hash + i + i * i) & mask.
2732     Register index = scratch0;
2733     // Capacity is smi 2^n.
2734     __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
2735     __ SubP(index, Operand(1));
2736     __ LoadSmiLiteral(
2737         ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
2738     __ AndP(index, ip);
2739 
2740     // Scale the index by multiplying by the entry size.
2741     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2742     __ ShiftLeftP(ip, index, Operand(1));
2743     __ AddP(index, ip);  // index *= 3.
2744 
2745     Register entity_name = scratch0;
2746     // Having undefined at this place means the name is not contained.
2747     Register tmp = properties;
2748     __ SmiToPtrArrayOffset(ip, index);
2749     __ AddP(tmp, properties, ip);
2750     __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
2751 
2752     DCHECK(!tmp.is(entity_name));
2753     __ CompareRoot(entity_name, Heap::kUndefinedValueRootIndex);
2754     __ beq(done);
2755 
2756     // Stop if found the property.
2757     __ CmpP(entity_name, Operand(Handle<Name>(name)));
2758     __ beq(miss);
2759 
2760     Label good;
2761     __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
2762     __ beq(&good);
2763 
2764     // Check if the entry name is not a unique name.
2765     __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
2766     __ LoadlB(entity_name,
2767               FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
2768     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
2769     __ bind(&good);
2770 
2771     // Restore the properties.
2772     __ LoadP(properties,
2773              FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2774   }
2775 
2776   const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
2777                           r4.bit() | r3.bit() | r2.bit());
2778 
2779   __ LoadRR(r0, r14);
2780   __ MultiPush(spill_mask);
2781 
2782   __ LoadP(r2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2783   __ mov(r3, Operand(Handle<Name>(name)));
2784   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
2785   __ CallStub(&stub);
2786   __ CmpP(r2, Operand::Zero());
2787 
2788   __ MultiPop(spill_mask);  // MultiPop does not touch condition flags
2789   __ LoadRR(r14, r0);
2790 
2791   __ beq(done);
2792   __ bne(miss);
2793 }
2794 
2795 // Probe the name dictionary in the |elements| register. Jump to the
2796 // |done| label if a property with the given name is found. Jump to
2797 // the |miss| label otherwise.
2798 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register scratch1,Register scratch2)2799 void NameDictionaryLookupStub::GeneratePositiveLookup(
2800     MacroAssembler* masm, Label* miss, Label* done, Register elements,
2801     Register name, Register scratch1, Register scratch2) {
2802   DCHECK(!elements.is(scratch1));
2803   DCHECK(!elements.is(scratch2));
2804   DCHECK(!name.is(scratch1));
2805   DCHECK(!name.is(scratch2));
2806 
2807   __ AssertName(name);
2808 
2809   // Compute the capacity mask.
2810   __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
2811   __ SmiUntag(scratch1);  // convert smi to int
2812   __ SubP(scratch1, Operand(1));
2813 
2814   // Generate an unrolled loop that performs a few probes before
2815   // giving up. Measurements done on Gmail indicate that 2 probes
2816   // cover ~93% of loads from dictionaries.
2817   for (int i = 0; i < kInlinedProbes; i++) {
2818     // Compute the masked index: (hash + i + i * i) & mask.
2819     __ LoadlW(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
2820     if (i > 0) {
2821       // Add the probe offset (i + i * i) left shifted to avoid right shifting
2822       // the hash in a separate instruction. The value hash + i + i * i is right
2823       // shifted in the following and instruction.
2824       DCHECK(NameDictionary::GetProbeOffset(i) <
2825              1 << (32 - Name::kHashFieldOffset));
2826       __ AddP(scratch2,
2827               Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
2828     }
2829     __ srl(scratch2, Operand(String::kHashShift));
2830     __ AndP(scratch2, scratch1);
2831 
2832     // Scale the index by multiplying by the entry size.
2833     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2834     // scratch2 = scratch2 * 3.
2835     __ ShiftLeftP(ip, scratch2, Operand(1));
2836     __ AddP(scratch2, ip);
2837 
2838     // Check if the key is identical to the name.
2839     __ ShiftLeftP(ip, scratch2, Operand(kPointerSizeLog2));
2840     __ AddP(scratch2, elements, ip);
2841     __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
2842     __ CmpP(name, ip);
2843     __ beq(done);
2844   }
2845 
2846   const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
2847                           r4.bit() | r3.bit() | r2.bit()) &
2848                          ~(scratch1.bit() | scratch2.bit());
2849 
2850   __ LoadRR(r0, r14);
2851   __ MultiPush(spill_mask);
2852   if (name.is(r2)) {
2853     DCHECK(!elements.is(r3));
2854     __ LoadRR(r3, name);
2855     __ LoadRR(r2, elements);
2856   } else {
2857     __ LoadRR(r2, elements);
2858     __ LoadRR(r3, name);
2859   }
2860   NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
2861   __ CallStub(&stub);
2862   __ LoadRR(r1, r2);
2863   __ LoadRR(scratch2, r4);
2864   __ MultiPop(spill_mask);
2865   __ LoadRR(r14, r0);
2866 
2867   __ CmpP(r1, Operand::Zero());
2868   __ bne(done);
2869   __ beq(miss);
2870 }
2871 
Generate(MacroAssembler * masm)2872 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
2873   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
2874   // we cannot call anything that could cause a GC from this stub.
2875   // Registers:
2876   //  result: NameDictionary to probe
2877   //  r3: key
2878   //  dictionary: NameDictionary to probe.
2879   //  index: will hold an index of entry if lookup is successful.
2880   //         might alias with result_.
2881   // Returns:
2882   //  result_ is zero if lookup failed, non zero otherwise.
2883 
2884   Register result = r2;
2885   Register dictionary = r2;
2886   Register key = r3;
2887   Register index = r4;
2888   Register mask = r5;
2889   Register hash = r6;
2890   Register undefined = r7;
2891   Register entry_key = r8;
2892   Register scratch = r8;
2893 
2894   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
2895 
2896   __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
2897   __ SmiUntag(mask);
2898   __ SubP(mask, Operand(1));
2899 
2900   __ LoadlW(hash, FieldMemOperand(key, String::kHashFieldOffset));
2901 
2902   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
2903 
2904   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
2905     // Compute the masked index: (hash + i + i * i) & mask.
2906     // Capacity is smi 2^n.
2907     if (i > 0) {
2908       // Add the probe offset (i + i * i) left shifted to avoid right shifting
2909       // the hash in a separate instruction. The value hash + i + i * i is right
2910       // shifted in the following and instruction.
2911       DCHECK(NameDictionary::GetProbeOffset(i) <
2912              1 << (32 - Name::kHashFieldOffset));
2913       __ AddP(index, hash,
2914               Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
2915     } else {
2916       __ LoadRR(index, hash);
2917     }
2918     __ ShiftRight(r0, index, Operand(String::kHashShift));
2919     __ AndP(index, r0, mask);
2920 
2921     // Scale the index by multiplying by the entry size.
2922     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2923     __ ShiftLeftP(scratch, index, Operand(1));
2924     __ AddP(index, scratch);  // index *= 3.
2925 
2926     __ ShiftLeftP(scratch, index, Operand(kPointerSizeLog2));
2927     __ AddP(index, dictionary, scratch);
2928     __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
2929 
2930     // Having undefined at this place means the name is not contained.
2931     __ CmpP(entry_key, undefined);
2932     __ beq(&not_in_dictionary);
2933 
2934     // Stop if found the property.
2935     __ CmpP(entry_key, key);
2936     __ beq(&in_dictionary);
2937 
2938     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
2939       // Check if the entry name is not a unique name.
2940       __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
2941       __ LoadlB(entry_key,
2942                 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
2943       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
2944     }
2945   }
2946 
2947   __ bind(&maybe_in_dictionary);
2948   // If we are doing negative lookup then probing failure should be
2949   // treated as a lookup success. For positive lookup probing failure
2950   // should be treated as lookup failure.
2951   if (mode() == POSITIVE_LOOKUP) {
2952     __ LoadImmP(result, Operand::Zero());
2953     __ Ret();
2954   }
2955 
2956   __ bind(&in_dictionary);
2957   __ LoadImmP(result, Operand(1));
2958   __ Ret();
2959 
2960   __ bind(&not_in_dictionary);
2961   __ LoadImmP(result, Operand::Zero());
2962   __ Ret();
2963 }
2964 
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)2965 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
2966     Isolate* isolate) {
2967   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
2968   stub1.GetCode();
2969   // Hydrogen code stubs need stub2 at snapshot time.
2970   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
2971   stub2.GetCode();
2972 }
2973 
2974 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
2975 // the value has just been written into the object, now this stub makes sure
2976 // we keep the GC informed.  The word in the object where the value has been
2977 // written is in the address register.
Generate(MacroAssembler * masm)2978 void RecordWriteStub::Generate(MacroAssembler* masm) {
2979   Label skip_to_incremental_noncompacting;
2980   Label skip_to_incremental_compacting;
2981 
2982   // The first two branch instructions are generated with labels so as to
2983   // get the offset fixed up correctly by the bind(Label*) call.  We patch
2984   // it back and forth between branch condition True and False
2985   // when we start and stop incremental heap marking.
2986   // See RecordWriteStub::Patch for details.
2987 
2988   // Clear the bit, branch on True for NOP action initially
2989   __ b(CC_NOP, &skip_to_incremental_noncompacting);
2990   __ b(CC_NOP, &skip_to_incremental_compacting);
2991 
2992   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2993     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
2994                            MacroAssembler::kReturnAtEnd);
2995   }
2996   __ Ret();
2997 
2998   __ bind(&skip_to_incremental_noncompacting);
2999   GenerateIncremental(masm, INCREMENTAL);
3000 
3001   __ bind(&skip_to_incremental_compacting);
3002   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
3003 
3004   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
3005   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
3006   // patching not required on S390 as the initial path is effectively NOP
3007 }
3008 
GenerateIncremental(MacroAssembler * masm,Mode mode)3009 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
3010   regs_.Save(masm);
3011 
3012   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3013     Label dont_need_remembered_set;
3014 
3015     __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
3016     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
3017                            regs_.scratch0(), &dont_need_remembered_set);
3018 
3019     __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
3020                         &dont_need_remembered_set);
3021 
3022     // First notify the incremental marker if necessary, then update the
3023     // remembered set.
3024     CheckNeedsToInformIncrementalMarker(
3025         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
3026     InformIncrementalMarker(masm);
3027     regs_.Restore(masm);
3028     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3029                            MacroAssembler::kReturnAtEnd);
3030 
3031     __ bind(&dont_need_remembered_set);
3032   }
3033 
3034   CheckNeedsToInformIncrementalMarker(
3035       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
3036   InformIncrementalMarker(masm);
3037   regs_.Restore(masm);
3038   __ Ret();
3039 }
3040 
InformIncrementalMarker(MacroAssembler * masm)3041 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
3042   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
3043   int argument_count = 3;
3044   __ PrepareCallCFunction(argument_count, regs_.scratch0());
3045   Register address =
3046       r2.is(regs_.address()) ? regs_.scratch0() : regs_.address();
3047   DCHECK(!address.is(regs_.object()));
3048   DCHECK(!address.is(r2));
3049   __ LoadRR(address, regs_.address());
3050   __ LoadRR(r2, regs_.object());
3051   __ LoadRR(r3, address);
3052   __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
3053 
3054   AllowExternalCallThatCantCauseGC scope(masm);
3055   __ CallCFunction(
3056       ExternalReference::incremental_marking_record_write_function(isolate()),
3057       argument_count);
3058   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
3059 }
3060 
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)3061 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
3062     MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
3063     Mode mode) {
3064   Label on_black;
3065   Label need_incremental;
3066   Label need_incremental_pop_scratch;
3067 
3068   // Let's look at the color of the object:  If it is not black we don't have
3069   // to inform the incremental marker.
3070   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
3071 
3072   regs_.Restore(masm);
3073   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3074     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3075                            MacroAssembler::kReturnAtEnd);
3076   } else {
3077     __ Ret();
3078   }
3079 
3080   __ bind(&on_black);
3081 
3082   // Get the value from the slot.
3083   __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
3084 
3085   if (mode == INCREMENTAL_COMPACTION) {
3086     Label ensure_not_white;
3087 
3088     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
3089                      regs_.scratch1(),  // Scratch.
3090                      MemoryChunk::kEvacuationCandidateMask, eq,
3091                      &ensure_not_white);
3092 
3093     __ CheckPageFlag(regs_.object(),
3094                      regs_.scratch1(),  // Scratch.
3095                      MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
3096                      &need_incremental);
3097 
3098     __ bind(&ensure_not_white);
3099   }
3100 
3101   // We need extra registers for this, so we push the object and the address
3102   // register temporarily.
3103   __ Push(regs_.object(), regs_.address());
3104   __ JumpIfWhite(regs_.scratch0(),  // The value.
3105                  regs_.scratch1(),  // Scratch.
3106                  regs_.object(),    // Scratch.
3107                  regs_.address(),   // Scratch.
3108                  &need_incremental_pop_scratch);
3109   __ Pop(regs_.object(), regs_.address());
3110 
3111   regs_.Restore(masm);
3112   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3113     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3114                            MacroAssembler::kReturnAtEnd);
3115   } else {
3116     __ Ret();
3117   }
3118 
3119   __ bind(&need_incremental_pop_scratch);
3120   __ Pop(regs_.object(), regs_.address());
3121 
3122   __ bind(&need_incremental);
3123 
3124   // Fall through when we need to inform the incremental marker.
3125 }
3126 
Generate(MacroAssembler * masm)3127 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
3128   CEntryStub ces(isolate(), 1, kSaveFPRegs);
3129   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
3130   int parameter_count_offset =
3131       StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
3132   __ LoadP(r3, MemOperand(fp, parameter_count_offset));
3133   if (function_mode() == JS_FUNCTION_STUB_MODE) {
3134     __ AddP(r3, Operand(1));
3135   }
3136   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
3137   __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
3138   __ la(sp, MemOperand(r3, sp));
3139   __ Ret();
3140 }
3141 
Generate(MacroAssembler * masm)3142 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
3143   __ EmitLoadTypeFeedbackVector(r4);
3144   CallICStub stub(isolate(), state());
3145   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3146 }
3147 
HandleArrayCases(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,bool is_polymorphic,Label * miss)3148 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
3149                              Register receiver_map, Register scratch1,
3150                              Register scratch2, bool is_polymorphic,
3151                              Label* miss) {
3152   // feedback initially contains the feedback array
3153   Label next_loop, prepare_next;
3154   Label start_polymorphic;
3155 
3156   Register cached_map = scratch1;
3157 
3158   __ LoadP(cached_map,
3159            FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
3160   __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3161   __ CmpP(receiver_map, cached_map);
3162   __ bne(&start_polymorphic, Label::kNear);
3163   // found, now call handler.
3164   Register handler = feedback;
3165   __ LoadP(handler,
3166            FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
3167   __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3168   __ Jump(ip);
3169 
3170   Register length = scratch2;
3171   __ bind(&start_polymorphic);
3172   __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3173   if (!is_polymorphic) {
3174     // If the IC could be monomorphic we have to make sure we don't go past the
3175     // end of the feedback array.
3176     __ CmpSmiLiteral(length, Smi::FromInt(2), r0);
3177     __ beq(miss);
3178   }
3179 
3180   Register too_far = length;
3181   Register pointer_reg = feedback;
3182 
3183   // +-----+------+------+-----+-----+ ... ----+
3184   // | map | len  | wm0  | h0  | wm1 |      hN |
3185   // +-----+------+------+-----+-----+ ... ----+
3186   //                 0      1     2        len-1
3187   //                              ^              ^
3188   //                              |              |
3189   //                         pointer_reg      too_far
3190   //                         aka feedback     scratch2
3191   // also need receiver_map
3192   // use cached_map (scratch1) to look in the weak map values.
3193   __ SmiToPtrArrayOffset(r0, length);
3194   __ AddP(too_far, feedback, r0);
3195   __ AddP(too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3196   __ AddP(pointer_reg, feedback,
3197           Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
3198 
3199   __ bind(&next_loop);
3200   __ LoadP(cached_map, MemOperand(pointer_reg));
3201   __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3202   __ CmpP(receiver_map, cached_map);
3203   __ bne(&prepare_next, Label::kNear);
3204   __ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
3205   __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3206   __ Jump(ip);
3207 
3208   __ bind(&prepare_next);
3209   __ AddP(pointer_reg, Operand(kPointerSize * 2));
3210   __ CmpP(pointer_reg, too_far);
3211   __ blt(&next_loop, Label::kNear);
3212 
3213   // We exhausted our array of map handler pairs.
3214   __ b(miss);
3215 }
3216 
HandleMonomorphicCase(MacroAssembler * masm,Register receiver,Register receiver_map,Register feedback,Register vector,Register slot,Register scratch,Label * compare_map,Label * load_smi_map,Label * try_array)3217 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
3218                                   Register receiver_map, Register feedback,
3219                                   Register vector, Register slot,
3220                                   Register scratch, Label* compare_map,
3221                                   Label* load_smi_map, Label* try_array) {
3222   __ JumpIfSmi(receiver, load_smi_map);
3223   __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
3224   __ bind(compare_map);
3225   Register cached_map = scratch;
3226   // Move the weak map into the weak_cell register.
3227   __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
3228   __ CmpP(cached_map, receiver_map);
3229   __ bne(try_array);
3230   Register handler = feedback;
3231   __ SmiToPtrArrayOffset(r1, slot);
3232   __ LoadP(handler,
3233            FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
3234   __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3235   __ Jump(ip);
3236 }
3237 
Generate(MacroAssembler * masm)3238 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
3239   __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
3240   KeyedStoreICStub stub(isolate(), state());
3241   stub.GenerateForTrampoline(masm);
3242 }
3243 
Generate(MacroAssembler * masm)3244 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
3245   GenerateImpl(masm, false);
3246 }
3247 
GenerateForTrampoline(MacroAssembler * masm)3248 void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
3249   GenerateImpl(masm, true);
3250 }
3251 
HandlePolymorphicStoreCase(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,Label * miss)3252 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
3253                                        Register receiver_map, Register scratch1,
3254                                        Register scratch2, Label* miss) {
3255   // feedback initially contains the feedback array
3256   Label next_loop, prepare_next;
3257   Label start_polymorphic;
3258   Label transition_call;
3259 
3260   Register cached_map = scratch1;
3261   Register too_far = scratch2;
3262   Register pointer_reg = feedback;
3263   __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3264 
3265   // +-----+------+------+-----+-----+-----+ ... ----+
3266   // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
3267   // +-----+------+------+-----+-----+ ----+ ... ----+
3268   //                 0      1     2              len-1
3269   //                 ^                                 ^
3270   //                 |                                 |
3271   //             pointer_reg                        too_far
3272   //             aka feedback                       scratch2
3273   // also need receiver_map
3274   // use cached_map (scratch1) to look in the weak map values.
3275   __ SmiToPtrArrayOffset(r0, too_far);
3276   __ AddP(too_far, feedback, r0);
3277   __ AddP(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3278   __ AddP(pointer_reg, feedback,
3279           Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
3280 
3281   __ bind(&next_loop);
3282   __ LoadP(cached_map, MemOperand(pointer_reg));
3283   __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3284   __ CmpP(receiver_map, cached_map);
3285   __ bne(&prepare_next);
3286   // Is it a transitioning store?
3287   __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
3288   __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
3289   __ bne(&transition_call);
3290   __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
3291   __ AddP(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3292   __ Jump(ip);
3293 
3294   __ bind(&transition_call);
3295   __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
3296   __ JumpIfSmi(too_far, miss);
3297 
3298   __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
3299 
3300   // Load the map into the correct register.
3301   DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
3302   __ LoadRR(feedback, too_far);
3303 
3304   __ AddP(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
3305   __ Jump(ip);
3306 
3307   __ bind(&prepare_next);
3308   __ AddP(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
3309   __ CmpLogicalP(pointer_reg, too_far);
3310   __ blt(&next_loop);
3311 
3312   // We exhausted our array of map handler pairs.
3313   __ b(miss);
3314 }
3315 
GenerateImpl(MacroAssembler * masm,bool in_frame)3316 void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3317   Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // r3
3318   Register key = StoreWithVectorDescriptor::NameRegister();           // r4
3319   Register vector = StoreWithVectorDescriptor::VectorRegister();      // r5
3320   Register slot = StoreWithVectorDescriptor::SlotRegister();          // r6
3321   DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r2));          // r2
3322   Register feedback = r7;
3323   Register receiver_map = r8;
3324   Register scratch1 = r9;
3325 
3326   __ SmiToPtrArrayOffset(r0, slot);
3327   __ AddP(feedback, vector, r0);
3328   __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3329 
3330   // Try to quickly handle the monomorphic case without knowing for sure
3331   // if we have a weak cell in feedback. We do know it's safe to look
3332   // at WeakCell::kValueOffset.
3333   Label try_array, load_smi_map, compare_map;
3334   Label not_array, miss;
3335   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3336                         scratch1, &compare_map, &load_smi_map, &try_array);
3337 
3338   __ bind(&try_array);
3339   // Is it a fixed array?
3340   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3341   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
3342   __ bne(&not_array);
3343 
3344   // We have a polymorphic element handler.
3345   Label polymorphic, try_poly_name;
3346   __ bind(&polymorphic);
3347 
3348   Register scratch2 = ip;
3349 
3350   HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
3351                              &miss);
3352 
3353   __ bind(&not_array);
3354   // Is it generic?
3355   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
3356   __ bne(&try_poly_name);
3357   Handle<Code> megamorphic_stub =
3358       KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
3359   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
3360 
3361   __ bind(&try_poly_name);
3362   // We might have a name in feedback, and a fixed array in the next slot.
3363   __ CmpP(key, feedback);
3364   __ bne(&miss);
3365   // If the name comparison succeeded, we know we have a fixed array with
3366   // at least one map/handler pair.
3367   __ SmiToPtrArrayOffset(r0, slot);
3368   __ AddP(feedback, vector, r0);
3369   __ LoadP(feedback,
3370            FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
3371   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
3372                    &miss);
3373 
3374   __ bind(&miss);
3375   KeyedStoreIC::GenerateMiss(masm);
3376 
3377   __ bind(&load_smi_map);
3378   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3379   __ b(&compare_map);
3380 }
3381 
MaybeCallEntryHook(MacroAssembler * masm)3382 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
3383   if (masm->isolate()->function_entry_hook() != NULL) {
3384     PredictableCodeSizeScope predictable(masm,
3385 #if V8_TARGET_ARCH_S390X
3386                                          40);
3387 #elif V8_HOST_ARCH_S390
3388                                          36);
3389 #else
3390                                          32);
3391 #endif
3392     ProfileEntryHookStub stub(masm->isolate());
3393     __ CleanseP(r14);
3394     __ Push(r14, ip);
3395     __ CallStub(&stub);  // BRASL
3396     __ Pop(r14, ip);
3397   }
3398 }
3399 
Generate(MacroAssembler * masm)3400 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
3401 // The entry hook is a "push lr" instruction (LAY+ST/STG), followed by a call.
3402 #if V8_TARGET_ARCH_S390X
3403   const int32_t kReturnAddressDistanceFromFunctionStart =
3404       Assembler::kCallTargetAddressOffset + 18;  // LAY + STG * 2
3405 #elif V8_HOST_ARCH_S390
3406   const int32_t kReturnAddressDistanceFromFunctionStart =
3407       Assembler::kCallTargetAddressOffset + 18;  // NILH + LAY + ST * 2
3408 #else
3409   const int32_t kReturnAddressDistanceFromFunctionStart =
3410       Assembler::kCallTargetAddressOffset + 14;  // LAY + ST * 2
3411 #endif
3412 
3413   // This should contain all kJSCallerSaved registers.
3414   const RegList kSavedRegs = kJSCallerSaved |  // Caller saved registers.
3415                              r7.bit();         // Saved stack pointer.
3416 
3417   // We also save r14+ip, so count here is one higher than the mask indicates.
3418   const int32_t kNumSavedRegs = kNumJSCallerSaved + 3;
3419 
3420   // Save all caller-save registers as this may be called from anywhere.
3421   __ CleanseP(r14);
3422   __ LoadRR(ip, r14);
3423   __ MultiPush(kSavedRegs | ip.bit());
3424 
3425   // Compute the function's address for the first argument.
3426 
3427   __ SubP(r2, ip, Operand(kReturnAddressDistanceFromFunctionStart));
3428 
3429   // The caller's return address is two slots above the saved temporaries.
3430   // Grab that for the second argument to the hook.
3431   __ lay(r3, MemOperand(sp, kNumSavedRegs * kPointerSize));
3432 
3433   // Align the stack if necessary.
3434   int frame_alignment = masm->ActivationFrameAlignment();
3435   if (frame_alignment > kPointerSize) {
3436     __ LoadRR(r7, sp);
3437     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3438     __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
3439   }
3440 
3441 #if !defined(USE_SIMULATOR)
3442   uintptr_t entry_hook =
3443       reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
3444   __ mov(ip, Operand(entry_hook));
3445 
3446 #if ABI_USES_FUNCTION_DESCRIPTORS
3447   // Function descriptor
3448   __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
3449   __ LoadP(ip, MemOperand(ip, 0));
3450 // ip already set.
3451 #endif
3452 #endif
3453 
3454   // zLinux ABI requires caller's frame to have sufficient space for callee
3455   // preserved regsiter save area.
3456   __ LoadImmP(r0, Operand::Zero());
3457   __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize -
3458                                 kNumRequiredStackFrameSlots * kPointerSize));
3459   __ StoreP(r0, MemOperand(sp));
3460 #if defined(USE_SIMULATOR)
3461   // Under the simulator we need to indirect the entry hook through a
3462   // trampoline function at a known address.
3463   // It additionally takes an isolate as a third parameter
3464   __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
3465 
3466   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
3467   __ mov(ip, Operand(ExternalReference(
3468                  &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
3469 #endif
3470   __ Call(ip);
3471 
3472   // zLinux ABI requires caller's frame to have sufficient space for callee
3473   // preserved regsiter save area.
3474   __ la(sp, MemOperand(sp, kCalleeRegisterSaveAreaSize +
3475                                kNumRequiredStackFrameSlots * kPointerSize));
3476 
3477   // Restore the stack pointer if needed.
3478   if (frame_alignment > kPointerSize) {
3479     __ LoadRR(sp, r7);
3480   }
3481 
3482   // Also pop lr to get Ret(0).
3483   __ MultiPop(kSavedRegs | ip.bit());
3484   __ LoadRR(r14, ip);
3485   __ Ret();
3486 }
3487 
3488 template <class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)3489 static void CreateArrayDispatch(MacroAssembler* masm,
3490                                 AllocationSiteOverrideMode mode) {
3491   if (mode == DISABLE_ALLOCATION_SITES) {
3492     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
3493     __ TailCallStub(&stub);
3494   } else if (mode == DONT_OVERRIDE) {
3495     int last_index =
3496         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
3497     for (int i = 0; i <= last_index; ++i) {
3498       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3499       __ CmpP(r5, Operand(kind));
3500       T stub(masm->isolate(), kind);
3501       __ TailCallStub(&stub, eq);
3502     }
3503 
3504     // If we reached this point there is a problem.
3505     __ Abort(kUnexpectedElementsKindInArrayConstructor);
3506   } else {
3507     UNREACHABLE();
3508   }
3509 }
3510 
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)3511 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
3512                                            AllocationSiteOverrideMode mode) {
3513   // r4 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
3514   // r5 - kind (if mode != DISABLE_ALLOCATION_SITES)
3515   // r2 - number of arguments
3516   // r3 - constructor?
3517   // sp[0] - last argument
3518   Label normal_sequence;
3519   if (mode == DONT_OVERRIDE) {
3520     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3521     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3522     STATIC_ASSERT(FAST_ELEMENTS == 2);
3523     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3524     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
3525     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
3526 
3527     // is the low bit set? If so, we are holey and that is good.
3528     __ AndP(r0, r5, Operand(1));
3529     __ bne(&normal_sequence);
3530   }
3531 
3532   // look at the first argument
3533   __ LoadP(r7, MemOperand(sp, 0));
3534   __ CmpP(r7, Operand::Zero());
3535   __ beq(&normal_sequence);
3536 
3537   if (mode == DISABLE_ALLOCATION_SITES) {
3538     ElementsKind initial = GetInitialFastElementsKind();
3539     ElementsKind holey_initial = GetHoleyElementsKind(initial);
3540 
3541     ArraySingleArgumentConstructorStub stub_holey(
3542         masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
3543     __ TailCallStub(&stub_holey);
3544 
3545     __ bind(&normal_sequence);
3546     ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
3547                                             DISABLE_ALLOCATION_SITES);
3548     __ TailCallStub(&stub);
3549   } else if (mode == DONT_OVERRIDE) {
3550     // We are going to create a holey array, but our kind is non-holey.
3551     // Fix kind and retry (only if we have an allocation site in the slot).
3552     __ AddP(r5, r5, Operand(1));
3553     if (FLAG_debug_code) {
3554       __ LoadP(r7, FieldMemOperand(r4, 0));
3555       __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
3556       __ Assert(eq, kExpectedAllocationSite);
3557     }
3558 
3559     // Save the resulting elements kind in type info. We can't just store r5
3560     // in the AllocationSite::transition_info field because elements kind is
3561     // restricted to a portion of the field...upper bits need to be left alone.
3562     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3563     __ LoadP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
3564     __ AddSmiLiteral(r6, r6, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
3565     __ StoreP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
3566 
3567     __ bind(&normal_sequence);
3568     int last_index =
3569         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
3570     for (int i = 0; i <= last_index; ++i) {
3571       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3572       __ CmpP(r5, Operand(kind));
3573       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
3574       __ TailCallStub(&stub, eq);
3575     }
3576 
3577     // If we reached this point there is a problem.
3578     __ Abort(kUnexpectedElementsKindInArrayConstructor);
3579   } else {
3580     UNREACHABLE();
3581   }
3582 }
3583 
3584 template <class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)3585 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
3586   int to_index =
3587       GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
3588   for (int i = 0; i <= to_index; ++i) {
3589     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3590     T stub(isolate, kind);
3591     stub.GetCode();
3592     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
3593       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
3594       stub1.GetCode();
3595     }
3596   }
3597 }
3598 
GenerateStubsAheadOfTime(Isolate * isolate)3599 void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3600   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
3601       isolate);
3602   ArrayNArgumentsConstructorStub stub(isolate);
3603   stub.GetCode();
3604   ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
3605   for (int i = 0; i < 2; i++) {
3606     // For internal arrays we only need a few things
3607     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
3608     stubh1.GetCode();
3609     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
3610     stubh2.GetCode();
3611   }
3612 }
3613 
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)3614 void ArrayConstructorStub::GenerateDispatchToArrayStub(
3615     MacroAssembler* masm, AllocationSiteOverrideMode mode) {
3616   Label not_zero_case, not_one_case;
3617   __ CmpP(r2, Operand::Zero());
3618   __ bne(&not_zero_case);
3619   CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
3620 
3621   __ bind(&not_zero_case);
3622   __ CmpP(r2, Operand(1));
3623   __ bgt(&not_one_case);
3624   CreateArrayDispatchOneArgument(masm, mode);
3625 
3626   __ bind(&not_one_case);
3627   ArrayNArgumentsConstructorStub stub(masm->isolate());
3628   __ TailCallStub(&stub);
3629 }
3630 
Generate(MacroAssembler * masm)3631 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
3632   // ----------- S t a t e -------------
3633   //  -- r2 : argc (only if argument_count() == ANY)
3634   //  -- r3 : constructor
3635   //  -- r4 : AllocationSite or undefined
3636   //  -- r5 : new target
3637   //  -- sp[0] : return address
3638   //  -- sp[4] : last argument
3639   // -----------------------------------
3640 
3641   if (FLAG_debug_code) {
3642     // The array construct code is only set for the global and natives
3643     // builtin Array functions which always have maps.
3644 
3645     // Initial map for the builtin Array function should be a map.
3646     __ LoadP(r6, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
3647     // Will both indicate a NULL and a Smi.
3648     __ TestIfSmi(r6);
3649     __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
3650     __ CompareObjectType(r6, r6, r7, MAP_TYPE);
3651     __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
3652 
3653     // We should either have undefined in r4 or a valid AllocationSite
3654     __ AssertUndefinedOrAllocationSite(r4, r6);
3655   }
3656 
3657   // Enter the context of the Array function.
3658   __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
3659 
3660   Label subclassing;
3661   __ CmpP(r5, r3);
3662   __ bne(&subclassing, Label::kNear);
3663 
3664   Label no_info;
3665   // Get the elements kind and case on that.
3666   __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
3667   __ beq(&no_info);
3668 
3669   __ LoadP(r5, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
3670   __ SmiUntag(r5);
3671   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3672   __ AndP(r5, Operand(AllocationSite::ElementsKindBits::kMask));
3673   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
3674 
3675   __ bind(&no_info);
3676   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
3677 
3678   __ bind(&subclassing);
3679   __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
3680   __ StoreP(r3, MemOperand(sp, r1));
3681   __ AddP(r2, r2, Operand(3));
3682   __ Push(r5, r4);
3683   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
3684 }
3685 
GenerateCase(MacroAssembler * masm,ElementsKind kind)3686 void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
3687                                                 ElementsKind kind) {
3688   __ CmpLogicalP(r2, Operand(1));
3689 
3690   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
3691   __ TailCallStub(&stub0, lt);
3692 
3693   ArrayNArgumentsConstructorStub stubN(isolate());
3694   __ TailCallStub(&stubN, gt);
3695 
3696   if (IsFastPackedElementsKind(kind)) {
3697     // We might need to create a holey array
3698     // look at the first argument
3699     __ LoadP(r5, MemOperand(sp, 0));
3700     __ CmpP(r5, Operand::Zero());
3701 
3702     InternalArraySingleArgumentConstructorStub stub1_holey(
3703         isolate(), GetHoleyElementsKind(kind));
3704     __ TailCallStub(&stub1_holey, ne);
3705   }
3706 
3707   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
3708   __ TailCallStub(&stub1);
3709 }
3710 
Generate(MacroAssembler * masm)3711 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
3712   // ----------- S t a t e -------------
3713   //  -- r2 : argc
3714   //  -- r3 : constructor
3715   //  -- sp[0] : return address
3716   //  -- sp[4] : last argument
3717   // -----------------------------------
3718 
3719   if (FLAG_debug_code) {
3720     // The array construct code is only set for the global and natives
3721     // builtin Array functions which always have maps.
3722 
3723     // Initial map for the builtin Array function should be a map.
3724     __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
3725     // Will both indicate a NULL and a Smi.
3726     __ TestIfSmi(r5);
3727     __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
3728     __ CompareObjectType(r5, r5, r6, MAP_TYPE);
3729     __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
3730   }
3731 
3732   // Figure out the right elements kind
3733   __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
3734   // Load the map's "bit field 2" into |result|.
3735   __ LoadlB(r5, FieldMemOperand(r5, Map::kBitField2Offset));
3736   // Retrieve elements_kind from bit field 2.
3737   __ DecodeField<Map::ElementsKindBits>(r5);
3738 
3739   if (FLAG_debug_code) {
3740     Label done;
3741     __ CmpP(r5, Operand(FAST_ELEMENTS));
3742     __ beq(&done);
3743     __ CmpP(r5, Operand(FAST_HOLEY_ELEMENTS));
3744     __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
3745     __ bind(&done);
3746   }
3747 
3748   Label fast_elements_case;
3749   __ CmpP(r5, Operand(FAST_ELEMENTS));
3750   __ beq(&fast_elements_case);
3751   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
3752 
3753   __ bind(&fast_elements_case);
3754   GenerateCase(masm, FAST_ELEMENTS);
3755 }
3756 
Generate(MacroAssembler * masm)3757 void FastNewObjectStub::Generate(MacroAssembler* masm) {
3758   // ----------- S t a t e -------------
3759   //  -- r3 : target
3760   //  -- r5 : new target
3761   //  -- cp : context
3762   //  -- lr : return address
3763   // -----------------------------------
3764   __ AssertFunction(r3);
3765   __ AssertReceiver(r5);
3766 
3767   // Verify that the new target is a JSFunction.
3768   Label new_object;
3769   __ CompareObjectType(r5, r4, r4, JS_FUNCTION_TYPE);
3770   __ bne(&new_object);
3771 
3772   // Load the initial map and verify that it's in fact a map.
3773   __ LoadP(r4, FieldMemOperand(r5, JSFunction::kPrototypeOrInitialMapOffset));
3774   __ JumpIfSmi(r4, &new_object);
3775   __ CompareObjectType(r4, r2, r2, MAP_TYPE);
3776   __ bne(&new_object);
3777 
3778   // Fall back to runtime if the target differs from the new target's
3779   // initial map constructor.
3780   __ LoadP(r2, FieldMemOperand(r4, Map::kConstructorOrBackPointerOffset));
3781   __ CmpP(r2, r3);
3782   __ bne(&new_object);
3783 
3784   // Allocate the JSObject on the heap.
3785   Label allocate, done_allocate;
3786   __ LoadlB(r6, FieldMemOperand(r4, Map::kInstanceSizeOffset));
3787   __ Allocate(r6, r2, r7, r8, &allocate, SIZE_IN_WORDS);
3788   __ bind(&done_allocate);
3789 
3790   // Initialize the JSObject fields.
3791   __ StoreP(r4, FieldMemOperand(r2, JSObject::kMapOffset));
3792   __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
3793   __ StoreP(r5, FieldMemOperand(r2, JSObject::kPropertiesOffset));
3794   __ StoreP(r5, FieldMemOperand(r2, JSObject::kElementsOffset));
3795   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
3796   __ AddP(r3, r2, Operand(JSObject::kHeaderSize - kHeapObjectTag));
3797 
3798   // ----------- S t a t e -------------
3799   //  -- r2 : result (tagged)
3800   //  -- r3 : result fields (untagged)
3801   //  -- r7 : result end (untagged)
3802   //  -- r4 : initial map
3803   //  -- cp : context
3804   //  -- lr : return address
3805   // -----------------------------------
3806 
3807   // Perform in-object slack tracking if requested.
3808   Label slack_tracking;
3809   STATIC_ASSERT(Map::kNoSlackTracking == 0);
3810   __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
3811   __ LoadlW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
3812   __ DecodeField<Map::ConstructionCounter>(r9, r5);
3813   __ LoadAndTestP(r9, r9);
3814   __ bne(&slack_tracking);
3815   {
3816     // Initialize all in-object fields with undefined.
3817     __ InitializeFieldsWithFiller(r3, r7, r8);
3818 
3819     __ Ret();
3820   }
3821   __ bind(&slack_tracking);
3822   {
3823     // Decrease generous allocation count.
3824     STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
3825     __ Add32(r5, r5, Operand(-(1 << Map::ConstructionCounter::kShift)));
3826     __ StoreW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
3827 
3828     // Initialize the in-object fields with undefined.
3829     __ LoadlB(r6, FieldMemOperand(r4, Map::kUnusedPropertyFieldsOffset));
3830     __ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
3831     __ SubP(r6, r7, r6);
3832     __ InitializeFieldsWithFiller(r3, r6, r8);
3833 
3834     // Initialize the remaining (reserved) fields with one pointer filler map.
3835     __ LoadRoot(r8, Heap::kOnePointerFillerMapRootIndex);
3836     __ InitializeFieldsWithFiller(r3, r7, r8);
3837 
3838     // Check if we can finalize the instance size.
3839     __ CmpP(r9, Operand(Map::kSlackTrackingCounterEnd));
3840     __ Ret(ne);
3841 
3842     // Finalize the instance size.
3843     {
3844       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3845       __ Push(r2, r4);
3846       __ CallRuntime(Runtime::kFinalizeInstanceSize);
3847       __ Pop(r2);
3848     }
3849     __ Ret();
3850   }
3851 
3852   // Fall back to %AllocateInNewSpace.
3853   __ bind(&allocate);
3854   {
3855     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3856     STATIC_ASSERT(kSmiTag == 0);
3857     __ ShiftLeftP(r6, r6,
3858                   Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
3859     __ Push(r4, r6);
3860     __ CallRuntime(Runtime::kAllocateInNewSpace);
3861     __ Pop(r4);
3862   }
3863   __ LoadlB(r7, FieldMemOperand(r4, Map::kInstanceSizeOffset));
3864   __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
3865   __ AddP(r7, r2, r7);
3866   __ SubP(r7, r7, Operand(kHeapObjectTag));
3867   __ b(&done_allocate);
3868 
3869   // Fall back to %NewObject.
3870   __ bind(&new_object);
3871   __ Push(r3, r5);
3872   __ TailCallRuntime(Runtime::kNewObject);
3873 }
3874 
Generate(MacroAssembler * masm)3875 void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
3876   // ----------- S t a t e -------------
3877   //  -- r3 : function
3878   //  -- cp : context
3879   //  -- fp : frame pointer
3880   //  -- lr : return address
3881   // -----------------------------------
3882   __ AssertFunction(r3);
3883 
3884   // Make r4 point to the JavaScript frame.
3885   __ LoadRR(r4, fp);
3886   if (skip_stub_frame()) {
3887     // For Ignition we need to skip the handler/stub frame to reach the
3888     // JavaScript frame for the function.
3889     __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
3890   }
3891   if (FLAG_debug_code) {
3892     Label ok;
3893     __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
3894     __ CmpP(ip, r3);
3895     __ b(&ok, Label::kNear);
3896     __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
3897     __ bind(&ok);
3898   }
3899 
3900   // Check if we have rest parameters (only possible if we have an
3901   // arguments adaptor frame below the function frame).
3902   Label no_rest_parameters;
3903   __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
3904   __ LoadP(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
3905   __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3906   __ bne(&no_rest_parameters);
3907 
3908   // Check if the arguments adaptor frame contains more arguments than
3909   // specified by the function's internal formal parameter count.
3910   Label rest_parameters;
3911   __ LoadP(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
3912   __ LoadP(r5, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
3913   __ LoadW(
3914       r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
3915 #if V8_TARGET_ARCH_S390X
3916   __ SmiTag(r5);
3917 #endif
3918   __ SubP(r2, r2, r5);
3919   __ bgt(&rest_parameters);
3920 
3921   // Return an empty rest parameter array.
3922   __ bind(&no_rest_parameters);
3923   {
3924     // ----------- S t a t e -------------
3925     //  -- cp : context
3926     //  -- lr : return address
3927     // -----------------------------------
3928 
3929     // Allocate an empty rest parameter array.
3930     Label allocate, done_allocate;
3931     __ Allocate(JSArray::kSize, r2, r3, r4, &allocate, NO_ALLOCATION_FLAGS);
3932     __ bind(&done_allocate);
3933 
3934     // Setup the rest parameter array in r0.
3935     __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3);
3936     __ StoreP(r3, FieldMemOperand(r2, JSArray::kMapOffset), r0);
3937     __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
3938     __ StoreP(r3, FieldMemOperand(r2, JSArray::kPropertiesOffset), r0);
3939     __ StoreP(r3, FieldMemOperand(r2, JSArray::kElementsOffset), r0);
3940     __ LoadImmP(r3, Operand::Zero());
3941     __ StoreP(r3, FieldMemOperand(r2, JSArray::kLengthOffset), r0);
3942     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
3943     __ Ret();
3944 
3945     // Fall back to %AllocateInNewSpace.
3946     __ bind(&allocate);
3947     {
3948       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3949       __ Push(Smi::FromInt(JSArray::kSize));
3950       __ CallRuntime(Runtime::kAllocateInNewSpace);
3951     }
3952     __ b(&done_allocate);
3953   }
3954 
3955   __ bind(&rest_parameters);
3956   {
3957     // Compute the pointer to the first rest parameter (skippping the receiver).
3958     __ SmiToPtrArrayOffset(r8, r2);
3959     __ AddP(r4, r4, r8);
3960     __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset));
3961 
3962     // ----------- S t a t e -------------
3963     //  -- cp : context
3964     //  -- r2 : number of rest parameters (tagged)
3965     //  -- r3 : function
3966     //  -- r4 : pointer just past first rest parameters
3967     //  -- r8 : size of rest parameters
3968     //  -- lr : return address
3969     // -----------------------------------
3970 
3971     // Allocate space for the rest parameter array plus the backing store.
3972     Label allocate, done_allocate;
3973     __ mov(r9, Operand(JSArray::kSize + FixedArray::kHeaderSize));
3974     __ AddP(r9, r9, r8);
3975     __ Allocate(r9, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
3976     __ bind(&done_allocate);
3977 
3978     // Setup the elements array in r5.
3979     __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
3980     __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0);
3981     __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0);
3982     __ AddP(r6, r5,
3983             Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
3984     {
3985       Label loop;
3986       __ SmiUntag(r1, r2);
3987       // __ mtctr(r0);
3988       __ bind(&loop);
3989       __ lay(r4, MemOperand(r4, -kPointerSize));
3990       __ LoadP(ip, MemOperand(r4));
3991       __ la(r6, MemOperand(r6, kPointerSize));
3992       __ StoreP(ip, MemOperand(r6));
3993       // __ bdnz(&loop);
3994       __ BranchOnCount(r1, &loop);
3995       __ AddP(r6, r6, Operand(kPointerSize));
3996     }
3997 
3998     // Setup the rest parameter array in r6.
3999     __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3);
4000     __ StoreP(r3, MemOperand(r6, JSArray::kMapOffset));
4001     __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
4002     __ StoreP(r3, MemOperand(r6, JSArray::kPropertiesOffset));
4003     __ StoreP(r5, MemOperand(r6, JSArray::kElementsOffset));
4004     __ StoreP(r2, MemOperand(r6, JSArray::kLengthOffset));
4005     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4006     __ AddP(r2, r6, Operand(kHeapObjectTag));
4007     __ Ret();
4008 
4009     // Fall back to %AllocateInNewSpace (if not too big).
4010     Label too_big_for_new_space;
4011     __ bind(&allocate);
4012     __ CmpP(r9, Operand(kMaxRegularHeapObjectSize));
4013     __ bgt(&too_big_for_new_space);
4014     {
4015       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
4016       __ SmiTag(r9);
4017       __ Push(r2, r4, r9);
4018       __ CallRuntime(Runtime::kAllocateInNewSpace);
4019       __ LoadRR(r5, r2);
4020       __ Pop(r2, r4);
4021     }
4022     __ b(&done_allocate);
4023 
4024     // Fall back to %NewRestParameter.
4025     __ bind(&too_big_for_new_space);
4026     __ push(r3);
4027     __ TailCallRuntime(Runtime::kNewRestParameter);
4028   }
4029 }
4030 
Generate(MacroAssembler * masm)4031 void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
4032   // ----------- S t a t e -------------
4033   //  -- r3 : function
4034   //  -- cp : context
4035   //  -- fp : frame pointer
4036   //  -- lr : return address
4037   // -----------------------------------
4038   __ AssertFunction(r3);
4039 
4040   // Make r9 point to the JavaScript frame.
4041   __ LoadRR(r9, fp);
4042   if (skip_stub_frame()) {
4043     // For Ignition we need to skip the handler/stub frame to reach the
4044     // JavaScript frame for the function.
4045     __ LoadP(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
4046   }
4047   if (FLAG_debug_code) {
4048     Label ok;
4049     __ LoadP(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
4050     __ CmpP(ip, r3);
4051     __ beq(&ok, Label::kNear);
4052     __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4053     __ bind(&ok);
4054   }
4055 
4056   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
4057   __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
4058   __ LoadW(
4059       r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
4060 #if V8_TARGET_ARCH_S390X
4061   __ SmiTag(r4);
4062 #endif
4063   __ SmiToPtrArrayOffset(r5, r4);
4064   __ AddP(r5, r9, r5);
4065   __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
4066 
4067   // r3 : function
4068   // r4 : number of parameters (tagged)
4069   // r5 : parameters pointer
4070   // r9 : JavaScript frame pointer
4071   // Registers used over whole function:
4072   // r7 : arguments count (tagged)
4073   // r8 : mapped parameter count (tagged)
4074 
4075   // Check if the calling frame is an arguments adaptor frame.
4076   Label adaptor_frame, try_allocate, runtime;
4077   __ LoadP(r6, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
4078   __ LoadP(r2, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
4079   __ CmpSmiLiteral(r2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
4080   __ beq(&adaptor_frame);
4081 
4082   // No adaptor, parameter count = argument count.
4083   __ LoadRR(r7, r4);
4084   __ LoadRR(r8, r4);
4085   __ b(&try_allocate);
4086 
4087   // We have an adaptor frame. Patch the parameters pointer.
4088   __ bind(&adaptor_frame);
4089   __ LoadP(r7, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
4090   __ SmiToPtrArrayOffset(r5, r7);
4091   __ AddP(r5, r5, r6);
4092   __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
4093 
4094   // r7 = argument count (tagged)
4095   // r8 = parameter count (tagged)
4096   // Compute the mapped parameter count = min(r4, r7) in r8.
4097   __ CmpP(r4, r7);
4098   Label skip;
4099   __ LoadRR(r8, r4);
4100   __ blt(&skip);
4101   __ LoadRR(r8, r7);
4102   __ bind(&skip);
4103 
4104   __ bind(&try_allocate);
4105 
4106   // Compute the sizes of backing store, parameter map, and arguments object.
4107   // 1. Parameter map, has 2 extra words containing context and backing store.
4108   const int kParameterMapHeaderSize =
4109       FixedArray::kHeaderSize + 2 * kPointerSize;
4110   // If there are no mapped parameters, we do not need the parameter_map.
4111   __ CmpSmiLiteral(r8, Smi::kZero, r0);
4112   Label skip2, skip3;
4113   __ bne(&skip2);
4114   __ LoadImmP(r1, Operand::Zero());
4115   __ b(&skip3);
4116   __ bind(&skip2);
4117   __ SmiToPtrArrayOffset(r1, r8);
4118   __ AddP(r1, r1, Operand(kParameterMapHeaderSize));
4119   __ bind(&skip3);
4120 
4121   // 2. Backing store.
4122   __ SmiToPtrArrayOffset(r6, r7);
4123   __ AddP(r1, r1, r6);
4124   __ AddP(r1, r1, Operand(FixedArray::kHeaderSize));
4125 
4126   // 3. Arguments object.
4127   __ AddP(r1, r1, Operand(JSSloppyArgumentsObject::kSize));
4128 
4129   // Do the allocation of all three objects in one go.
4130   __ Allocate(r1, r2, r1, r6, &runtime, NO_ALLOCATION_FLAGS);
4131 
4132   // r2 = address of new object(s) (tagged)
4133   // r4 = argument count (smi-tagged)
4134   // Get the arguments boilerplate from the current native context into r3.
4135   const int kNormalOffset =
4136       Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
4137   const int kAliasedOffset =
4138       Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
4139 
4140   __ LoadP(r6, NativeContextMemOperand());
4141   __ CmpP(r8, Operand::Zero());
4142   Label skip4, skip5;
4143   __ bne(&skip4);
4144   __ LoadP(r6, MemOperand(r6, kNormalOffset));
4145   __ b(&skip5);
4146   __ bind(&skip4);
4147   __ LoadP(r6, MemOperand(r6, kAliasedOffset));
4148   __ bind(&skip5);
4149 
4150   // r2 = address of new object (tagged)
4151   // r4 = argument count (smi-tagged)
4152   // r6 = address of arguments map (tagged)
4153   // r8 = mapped parameter count (tagged)
4154   __ StoreP(r6, FieldMemOperand(r2, JSObject::kMapOffset), r0);
4155   __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
4156   __ StoreP(r1, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
4157   __ StoreP(r1, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
4158 
4159   // Set up the callee in-object property.
4160   __ AssertNotSmi(r3);
4161   __ StoreP(r3, FieldMemOperand(r2, JSSloppyArgumentsObject::kCalleeOffset),
4162             r0);
4163 
4164   // Use the length (smi tagged) and set that as an in-object property too.
4165   __ AssertSmi(r7);
4166   __ StoreP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset),
4167             r0);
4168 
4169   // Set up the elements pointer in the allocated arguments object.
4170   // If we allocated a parameter map, r6 will point there, otherwise
4171   // it will point to the backing store.
4172   __ AddP(r6, r2, Operand(JSSloppyArgumentsObject::kSize));
4173   __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
4174 
4175   // r2 = address of new object (tagged)
4176   // r4 = argument count (tagged)
4177   // r6 = address of parameter map or backing store (tagged)
4178   // r8 = mapped parameter count (tagged)
4179   // Initialize parameter map. If there are no mapped arguments, we're done.
4180   Label skip_parameter_map;
4181   __ CmpSmiLiteral(r8, Smi::kZero, r0);
4182   Label skip6;
4183   __ bne(&skip6);
4184   // Move backing store address to r3, because it is
4185   // expected there when filling in the unmapped arguments.
4186   __ LoadRR(r3, r6);
4187   __ b(&skip_parameter_map);
4188   __ bind(&skip6);
4189 
4190   __ LoadRoot(r7, Heap::kSloppyArgumentsElementsMapRootIndex);
4191   __ StoreP(r7, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
4192   __ AddSmiLiteral(r7, r8, Smi::FromInt(2), r0);
4193   __ StoreP(r7, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
4194   __ StoreP(cp, FieldMemOperand(r6, FixedArray::kHeaderSize + 0 * kPointerSize),
4195             r0);
4196   __ SmiToPtrArrayOffset(r7, r8);
4197   __ AddP(r7, r7, r6);
4198   __ AddP(r7, r7, Operand(kParameterMapHeaderSize));
4199   __ StoreP(r7, FieldMemOperand(r6, FixedArray::kHeaderSize + 1 * kPointerSize),
4200             r0);
4201 
4202   // Copy the parameter slots and the holes in the arguments.
4203   // We need to fill in mapped_parameter_count slots. They index the context,
4204   // where parameters are stored in reverse order, at
4205   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4206   // The mapped parameter thus need to get indices
4207   //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
4208   //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4209   // We loop from right to left.
4210   Label parameters_loop;
4211   __ LoadRR(r7, r8);
4212   __ AddSmiLiteral(r1, r4, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
4213   __ SubP(r1, r1, r8);
4214   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
4215   __ SmiToPtrArrayOffset(r3, r7);
4216   __ AddP(r3, r3, r6);
4217   __ AddP(r3, r3, Operand(kParameterMapHeaderSize));
4218 
4219   // r3 = address of backing store (tagged)
4220   // r6 = address of parameter map (tagged)
4221   // r7 = temporary scratch (a.o., for address calculation)
4222   // r9 = temporary scratch (a.o., for address calculation)
4223   // ip = the hole value
4224   __ SmiUntag(r7);
4225   __ push(r4);
4226   __ LoadRR(r4, r7);
4227   __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
4228   __ AddP(r9, r3, r7);
4229   __ AddP(r7, r6, r7);
4230   __ AddP(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4231   __ AddP(r7, r7, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4232 
4233   __ bind(&parameters_loop);
4234   __ StoreP(r1, MemOperand(r7, -kPointerSize));
4235   __ lay(r7, MemOperand(r7, -kPointerSize));
4236   __ StoreP(ip, MemOperand(r9, -kPointerSize));
4237   __ lay(r9, MemOperand(r9, -kPointerSize));
4238   __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
4239   __ BranchOnCount(r4, &parameters_loop);
4240   __ pop(r4);
4241 
4242   // Restore r7 = argument count (tagged).
4243   __ LoadP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset));
4244 
4245   __ bind(&skip_parameter_map);
4246   // r2 = address of new object (tagged)
4247   // r3 = address of backing store (tagged)
4248   // r7 = argument count (tagged)
4249   // r8 = mapped parameter count (tagged)
4250   // r1 = scratch
4251   // Copy arguments header and remaining slots (if there are any).
4252   __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
4253   __ StoreP(r1, FieldMemOperand(r3, FixedArray::kMapOffset), r0);
4254   __ StoreP(r7, FieldMemOperand(r3, FixedArray::kLengthOffset), r0);
4255   __ SubP(r1, r7, r8);
4256   __ Ret(eq);
4257 
4258   Label arguments_loop;
4259   __ SmiUntag(r1);
4260   __ LoadRR(r4, r1);
4261 
4262   __ SmiToPtrArrayOffset(r0, r8);
4263   __ SubP(r5, r5, r0);
4264   __ AddP(r1, r3, r0);
4265   __ AddP(r1, r1,
4266           Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
4267 
4268   __ bind(&arguments_loop);
4269   __ LoadP(r6, MemOperand(r5, -kPointerSize));
4270   __ lay(r5, MemOperand(r5, -kPointerSize));
4271   __ StoreP(r6, MemOperand(r1, kPointerSize));
4272   __ la(r1, MemOperand(r1, kPointerSize));
4273   __ BranchOnCount(r4, &arguments_loop);
4274 
4275   // Return.
4276   __ Ret();
4277 
4278   // Do the runtime call to allocate the arguments object.
4279   // r7 = argument count (tagged)
4280   __ bind(&runtime);
4281   __ Push(r3, r5, r7);
4282   __ TailCallRuntime(Runtime::kNewSloppyArguments);
4283 }
4284 
Generate(MacroAssembler * masm)4285 void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
4286   // ----------- S t a t e -------------
4287   //  -- r3 : function
4288   //  -- cp : context
4289   //  -- fp : frame pointer
4290   //  -- lr : return address
4291   // -----------------------------------
4292   __ AssertFunction(r3);
4293 
4294   // Make r4 point to the JavaScript frame.
4295   __ LoadRR(r4, fp);
4296   if (skip_stub_frame()) {
4297     // For Ignition we need to skip the handler/stub frame to reach the
4298     // JavaScript frame for the function.
4299     __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
4300   }
4301   if (FLAG_debug_code) {
4302     Label ok;
4303     __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
4304     __ CmpP(ip, r3);
4305     __ beq(&ok, Label::kNear);
4306     __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4307     __ bind(&ok);
4308   }
4309 
4310   // Check if we have an arguments adaptor frame below the function frame.
4311   Label arguments_adaptor, arguments_done;
4312   __ LoadP(r5, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
4313   __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
4314   __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
4315   __ beq(&arguments_adaptor);
4316   {
4317     __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
4318     __ LoadW(r2, FieldMemOperand(
4319                      r6, SharedFunctionInfo::kFormalParameterCountOffset));
4320 #if V8_TARGET_ARCH_S390X
4321     __ SmiTag(r2);
4322 #endif
4323     __ SmiToPtrArrayOffset(r8, r2);
4324     __ AddP(r4, r4, r8);
4325   }
4326   __ b(&arguments_done);
4327   __ bind(&arguments_adaptor);
4328   {
4329     __ LoadP(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
4330     __ SmiToPtrArrayOffset(r8, r2);
4331     __ AddP(r4, r5, r8);
4332   }
4333   __ bind(&arguments_done);
4334   __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset));
4335 
4336   // ----------- S t a t e -------------
4337   //  -- cp : context
4338   //  -- r2 : number of rest parameters (tagged)
4339   //  -- r3 : function
4340   //  -- r4 : pointer just past first rest parameters
4341   //  -- r8 : size of rest parameters
4342   //  -- lr : return address
4343   // -----------------------------------
4344 
4345   // Allocate space for the strict arguments object plus the backing store.
4346   Label allocate, done_allocate;
4347   __ mov(r9, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
4348   __ AddP(r9, r9, r8);
4349   __ Allocate(r9, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
4350   __ bind(&done_allocate);
4351 
4352   // Setup the elements array in r5.
4353   __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
4354   __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0);
4355   __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0);
4356   __ AddP(r6, r5,
4357           Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
4358   {
4359     Label loop, done_loop;
4360     __ SmiUntag(r1, r2);
4361     __ LoadAndTestP(r1, r1);
4362     __ beq(&done_loop);
4363     __ bind(&loop);
4364     __ lay(r4, MemOperand(r4, -kPointerSize));
4365     __ LoadP(ip, MemOperand(r4));
4366     __ la(r6, MemOperand(r6, kPointerSize));
4367     __ StoreP(ip, MemOperand(r6));
4368     __ BranchOnCount(r1, &loop);
4369     __ bind(&done_loop);
4370     __ AddP(r6, r6, Operand(kPointerSize));
4371   }
4372 
4373   // Setup the rest parameter array in r6.
4374   __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r3);
4375   __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kMapOffset));
4376   __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
4377   __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kPropertiesOffset));
4378   __ StoreP(r5, MemOperand(r6, JSStrictArgumentsObject::kElementsOffset));
4379   __ StoreP(r2, MemOperand(r6, JSStrictArgumentsObject::kLengthOffset));
4380   STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
4381   __ AddP(r2, r6, Operand(kHeapObjectTag));
4382   __ Ret();
4383 
4384   // Fall back to %AllocateInNewSpace (if not too big).
4385   Label too_big_for_new_space;
4386   __ bind(&allocate);
4387   __ CmpP(r9, Operand(kMaxRegularHeapObjectSize));
4388   __ bgt(&too_big_for_new_space);
4389   {
4390     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
4391     __ SmiTag(r9);
4392     __ Push(r2, r4, r9);
4393     __ CallRuntime(Runtime::kAllocateInNewSpace);
4394     __ LoadRR(r5, r2);
4395     __ Pop(r2, r4);
4396   }
4397   __ b(&done_allocate);
4398 
4399   // Fall back to %NewStrictArguments.
4400   __ bind(&too_big_for_new_space);
4401   __ push(r3);
4402   __ TailCallRuntime(Runtime::kNewStrictArguments);
4403 }
4404 
AddressOffset(ExternalReference ref0,ExternalReference ref1)4405 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4406   return ref0.address() - ref1.address();
4407 }
4408 
4409 // Calls an API function.  Allocates HandleScope, extracts returned value
4410 // from handle and propagates exceptions.  Restores context.  stack_space
4411 // - space to be unwound on exit (includes the call JS arguments space and
4412 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand,MemOperand * context_restore_operand)4413 static void CallApiFunctionAndReturn(MacroAssembler* masm,
4414                                      Register function_address,
4415                                      ExternalReference thunk_ref,
4416                                      int stack_space,
4417                                      MemOperand* stack_space_operand,
4418                                      MemOperand return_value_operand,
4419                                      MemOperand* context_restore_operand) {
4420   Isolate* isolate = masm->isolate();
4421   ExternalReference next_address =
4422       ExternalReference::handle_scope_next_address(isolate);
4423   const int kNextOffset = 0;
4424   const int kLimitOffset = AddressOffset(
4425       ExternalReference::handle_scope_limit_address(isolate), next_address);
4426   const int kLevelOffset = AddressOffset(
4427       ExternalReference::handle_scope_level_address(isolate), next_address);
4428 
4429   // Additional parameter is the address of the actual callback.
4430   DCHECK(function_address.is(r3) || function_address.is(r4));
4431   Register scratch = r5;
4432 
4433   __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
4434   __ LoadlB(scratch, MemOperand(scratch, 0));
4435   __ CmpP(scratch, Operand::Zero());
4436 
4437   Label profiler_disabled;
4438   Label end_profiler_check;
4439   __ beq(&profiler_disabled, Label::kNear);
4440   __ mov(scratch, Operand(thunk_ref));
4441   __ b(&end_profiler_check, Label::kNear);
4442   __ bind(&profiler_disabled);
4443   __ LoadRR(scratch, function_address);
4444   __ bind(&end_profiler_check);
4445 
4446   // Allocate HandleScope in callee-save registers.
4447   // r9 - next_address
4448   // r6 - next_address->kNextOffset
4449   // r7 - next_address->kLimitOffset
4450   // r8 - next_address->kLevelOffset
4451   __ mov(r9, Operand(next_address));
4452   __ LoadP(r6, MemOperand(r9, kNextOffset));
4453   __ LoadP(r7, MemOperand(r9, kLimitOffset));
4454   __ LoadlW(r8, MemOperand(r9, kLevelOffset));
4455   __ AddP(r8, Operand(1));
4456   __ StoreW(r8, MemOperand(r9, kLevelOffset));
4457 
4458   if (FLAG_log_timer_events) {
4459     FrameScope frame(masm, StackFrame::MANUAL);
4460     __ PushSafepointRegisters();
4461     __ PrepareCallCFunction(1, r2);
4462     __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
4463     __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
4464                      1);
4465     __ PopSafepointRegisters();
4466   }
4467 
4468   // Native call returns to the DirectCEntry stub which redirects to the
4469   // return address pushed on stack (could have moved after GC).
4470   // DirectCEntry stub itself is generated early and never moves.
4471   DirectCEntryStub stub(isolate);
4472   stub.GenerateCall(masm, scratch);
4473 
4474   if (FLAG_log_timer_events) {
4475     FrameScope frame(masm, StackFrame::MANUAL);
4476     __ PushSafepointRegisters();
4477     __ PrepareCallCFunction(1, r2);
4478     __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
4479     __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
4480                      1);
4481     __ PopSafepointRegisters();
4482   }
4483 
4484   Label promote_scheduled_exception;
4485   Label delete_allocated_handles;
4486   Label leave_exit_frame;
4487   Label return_value_loaded;
4488 
4489   // load value from ReturnValue
4490   __ LoadP(r2, return_value_operand);
4491   __ bind(&return_value_loaded);
4492   // No more valid handles (the result handle was the last one). Restore
4493   // previous handle scope.
4494   __ StoreP(r6, MemOperand(r9, kNextOffset));
4495   if (__ emit_debug_code()) {
4496     __ LoadlW(r3, MemOperand(r9, kLevelOffset));
4497     __ CmpP(r3, r8);
4498     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
4499   }
4500   __ SubP(r8, Operand(1));
4501   __ StoreW(r8, MemOperand(r9, kLevelOffset));
4502   __ CmpP(r7, MemOperand(r9, kLimitOffset));
4503   __ bne(&delete_allocated_handles, Label::kNear);
4504 
4505   // Leave the API exit frame.
4506   __ bind(&leave_exit_frame);
4507   bool restore_context = context_restore_operand != NULL;
4508   if (restore_context) {
4509     __ LoadP(cp, *context_restore_operand);
4510   }
4511   // LeaveExitFrame expects unwind space to be in a register.
4512   if (stack_space_operand != NULL) {
4513     __ l(r6, *stack_space_operand);
4514   } else {
4515     __ mov(r6, Operand(stack_space));
4516   }
4517   __ LeaveExitFrame(false, r6, !restore_context, stack_space_operand != NULL);
4518 
4519   // Check if the function scheduled an exception.
4520   __ mov(r7, Operand(ExternalReference::scheduled_exception_address(isolate)));
4521   __ LoadP(r7, MemOperand(r7));
4522   __ CompareRoot(r7, Heap::kTheHoleValueRootIndex);
4523   __ bne(&promote_scheduled_exception, Label::kNear);
4524 
4525   __ b(r14);
4526 
4527   // Re-throw by promoting a scheduled exception.
4528   __ bind(&promote_scheduled_exception);
4529   __ TailCallRuntime(Runtime::kPromoteScheduledException);
4530 
4531   // HandleScope limit has changed. Delete allocated extensions.
4532   __ bind(&delete_allocated_handles);
4533   __ StoreP(r7, MemOperand(r9, kLimitOffset));
4534   __ LoadRR(r6, r2);
4535   __ PrepareCallCFunction(1, r7);
4536   __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
4537   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
4538                    1);
4539   __ LoadRR(r2, r6);
4540   __ b(&leave_exit_frame, Label::kNear);
4541 }
4542 
Generate(MacroAssembler * masm)4543 void CallApiCallbackStub::Generate(MacroAssembler* masm) {
4544   // ----------- S t a t e -------------
4545   //  -- r2                  : callee
4546   //  -- r6                  : call_data
4547   //  -- r4                  : holder
4548   //  -- r3                  : api_function_address
4549   //  -- cp                  : context
4550   //  --
4551   //  -- sp[0]               : last argument
4552   //  -- ...
4553   //  -- sp[(argc - 1)* 4]   : first argument
4554   //  -- sp[argc * 4]        : receiver
4555   // -----------------------------------
4556 
4557   Register callee = r2;
4558   Register call_data = r6;
4559   Register holder = r4;
4560   Register api_function_address = r3;
4561   Register context = cp;
4562 
4563   typedef FunctionCallbackArguments FCA;
4564 
4565   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4566   STATIC_ASSERT(FCA::kCalleeIndex == 5);
4567   STATIC_ASSERT(FCA::kDataIndex == 4);
4568   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4569   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4570   STATIC_ASSERT(FCA::kIsolateIndex == 1);
4571   STATIC_ASSERT(FCA::kHolderIndex == 0);
4572   STATIC_ASSERT(FCA::kNewTargetIndex == 7);
4573   STATIC_ASSERT(FCA::kArgsLength == 8);
4574 
4575   // new target
4576   __ PushRoot(Heap::kUndefinedValueRootIndex);
4577 
4578   // context save
4579   __ push(context);
4580   if (!is_lazy()) {
4581     // load context from callee
4582     __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4583   }
4584 
4585   // callee
4586   __ push(callee);
4587 
4588   // call data
4589   __ push(call_data);
4590 
4591   Register scratch = call_data;
4592   if (!call_data_undefined()) {
4593     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4594   }
4595   // return value
4596   __ push(scratch);
4597   // return value default
4598   __ push(scratch);
4599   // isolate
4600   __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
4601   __ push(scratch);
4602   // holder
4603   __ push(holder);
4604 
4605   // Prepare arguments.
4606   __ LoadRR(scratch, sp);
4607 
4608   // Allocate the v8::Arguments structure in the arguments' space since
4609   // it's not controlled by GC.
4610   // S390 LINUX ABI:
4611   //
4612   // Create 4 extra slots on stack:
4613   //    [0] space for DirectCEntryStub's LR save
4614   //    [1-3] FunctionCallbackInfo
4615   const int kApiStackSpace = 4;
4616   const int kFunctionCallbackInfoOffset =
4617       (kStackFrameExtraParamSlot + 1) * kPointerSize;
4618 
4619   FrameScope frame_scope(masm, StackFrame::MANUAL);
4620   __ EnterExitFrame(false, kApiStackSpace);
4621 
4622   DCHECK(!api_function_address.is(r2) && !scratch.is(r2));
4623   // r2 = FunctionCallbackInfo&
4624   // Arguments is after the return address.
4625   __ AddP(r2, sp, Operand(kFunctionCallbackInfoOffset));
4626   // FunctionCallbackInfo::implicit_args_
4627   __ StoreP(scratch, MemOperand(r2, 0 * kPointerSize));
4628   // FunctionCallbackInfo::values_
4629   __ AddP(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
4630   __ StoreP(ip, MemOperand(r2, 1 * kPointerSize));
4631   // FunctionCallbackInfo::length_ = argc
4632   __ LoadImmP(ip, Operand(argc()));
4633   __ StoreW(ip, MemOperand(r2, 2 * kPointerSize));
4634 
4635   ExternalReference thunk_ref =
4636       ExternalReference::invoke_function_callback(masm->isolate());
4637 
4638   AllowExternalCallThatCantCauseGC scope(masm);
4639   MemOperand context_restore_operand(
4640       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4641   // Stores return the first js argument
4642   int return_value_offset = 0;
4643   if (is_store()) {
4644     return_value_offset = 2 + FCA::kArgsLength;
4645   } else {
4646     return_value_offset = 2 + FCA::kReturnValueOffset;
4647   }
4648   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4649   int stack_space = 0;
4650   MemOperand length_operand =
4651       MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize);
4652   MemOperand* stack_space_operand = &length_operand;
4653   stack_space = argc() + FCA::kArgsLength + 1;
4654   stack_space_operand = NULL;
4655   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
4656                            stack_space_operand, return_value_operand,
4657                            &context_restore_operand);
4658 }
4659 
Generate(MacroAssembler * masm)4660 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4661   int arg0Slot = 0;
4662   int accessorInfoSlot = 0;
4663   int apiStackSpace = 0;
4664   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
4665   // name below the exit frame to make GC aware of them.
4666   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
4667   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
4668   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
4669   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
4670   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
4671   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
4672   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
4673   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
4674 
4675   Register receiver = ApiGetterDescriptor::ReceiverRegister();
4676   Register holder = ApiGetterDescriptor::HolderRegister();
4677   Register callback = ApiGetterDescriptor::CallbackRegister();
4678   Register scratch = r6;
4679   DCHECK(!AreAliased(receiver, holder, callback, scratch));
4680 
4681   Register api_function_address = r4;
4682 
4683   __ push(receiver);
4684   // Push data from AccessorInfo.
4685   __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
4686   __ push(scratch);
4687   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4688   __ Push(scratch, scratch);
4689   __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
4690   __ Push(scratch, holder);
4691   __ Push(Smi::kZero);  // should_throw_on_error -> false
4692   __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
4693   __ push(scratch);
4694 
4695   // v8::PropertyCallbackInfo::args_ array and name handle.
4696   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4697 
4698   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
4699   __ LoadRR(r2, sp);                           // r2 = Handle<Name>
4700   __ AddP(r3, r2, Operand(1 * kPointerSize));  // r3 = v8::PCI::args_
4701 
4702   // If ABI passes Handles (pointer-sized struct) in a register:
4703   //
4704   // Create 2 extra slots on stack:
4705   //    [0] space for DirectCEntryStub's LR save
4706   //    [1] AccessorInfo&
4707   //
4708   // Otherwise:
4709   //
4710   // Create 3 extra slots on stack:
4711   //    [0] space for DirectCEntryStub's LR save
4712   //    [1] copy of Handle (first arg)
4713   //    [2] AccessorInfo&
4714   if (ABI_PASSES_HANDLES_IN_REGS) {
4715     accessorInfoSlot = kStackFrameExtraParamSlot + 1;
4716     apiStackSpace = 2;
4717   } else {
4718     arg0Slot = kStackFrameExtraParamSlot + 1;
4719     accessorInfoSlot = arg0Slot + 1;
4720     apiStackSpace = 3;
4721   }
4722 
4723   FrameScope frame_scope(masm, StackFrame::MANUAL);
4724   __ EnterExitFrame(false, apiStackSpace);
4725 
4726   if (!ABI_PASSES_HANDLES_IN_REGS) {
4727     // pass 1st arg by reference
4728     __ StoreP(r2, MemOperand(sp, arg0Slot * kPointerSize));
4729     __ AddP(r2, sp, Operand(arg0Slot * kPointerSize));
4730   }
4731 
4732   // Create v8::PropertyCallbackInfo object on the stack and initialize
4733   // it's args_ field.
4734   __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kPointerSize));
4735   __ AddP(r3, sp, Operand(accessorInfoSlot * kPointerSize));
4736   // r3 = v8::PropertyCallbackInfo&
4737 
4738   ExternalReference thunk_ref =
4739       ExternalReference::invoke_accessor_getter_callback(isolate());
4740 
4741   __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
4742   __ LoadP(api_function_address,
4743            FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
4744 
4745   // +3 is to skip prolog, return address and name handle.
4746   MemOperand return_value_operand(
4747       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
4748   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
4749                            kStackUnwindSpace, NULL, return_value_operand, NULL);
4750 }
4751 
4752 #undef __
4753 
4754 }  // namespace internal
4755 }  // namespace v8
4756 
4757 #endif  // V8_TARGET_ARCH_S390
4758